2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
30 #include "print-tree.h"
31 #include "transaction.h"
34 #include "free-space-cache.h"
36 static int update_block_group(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 u64 bytenr
, u64 num_bytes
, int alloc
,
40 static int update_reserved_extents(struct btrfs_block_group_cache
*cache
,
41 u64 num_bytes
, int reserve
);
42 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
43 struct btrfs_root
*root
,
44 u64 bytenr
, u64 num_bytes
, u64 parent
,
45 u64 root_objectid
, u64 owner_objectid
,
46 u64 owner_offset
, int refs_to_drop
,
47 struct btrfs_delayed_extent_op
*extra_op
);
48 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
49 struct extent_buffer
*leaf
,
50 struct btrfs_extent_item
*ei
);
51 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
52 struct btrfs_root
*root
,
53 u64 parent
, u64 root_objectid
,
54 u64 flags
, u64 owner
, u64 offset
,
55 struct btrfs_key
*ins
, int ref_mod
);
56 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
57 struct btrfs_root
*root
,
58 u64 parent
, u64 root_objectid
,
59 u64 flags
, struct btrfs_disk_key
*key
,
60 int level
, struct btrfs_key
*ins
);
61 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
62 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
63 u64 flags
, int force
);
64 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
65 struct btrfs_root
*root
,
66 struct btrfs_path
*path
,
67 u64 bytenr
, u64 num_bytes
,
68 int is_data
, int reserved
,
69 struct extent_buffer
**must_clean
);
70 static int find_next_key(struct btrfs_path
*path
, int level
,
71 struct btrfs_key
*key
);
72 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
,
73 int dump_block_groups
);
76 block_group_cache_done(struct btrfs_block_group_cache
*cache
)
79 return cache
->cached
== BTRFS_CACHE_FINISHED
;
82 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
84 return (cache
->flags
& bits
) == bits
;
87 void btrfs_get_block_group(struct btrfs_block_group_cache
*cache
)
89 atomic_inc(&cache
->count
);
92 void btrfs_put_block_group(struct btrfs_block_group_cache
*cache
)
94 if (atomic_dec_and_test(&cache
->count
))
99 * this adds the block group to the fs_info rb tree for the block group
102 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
103 struct btrfs_block_group_cache
*block_group
)
106 struct rb_node
*parent
= NULL
;
107 struct btrfs_block_group_cache
*cache
;
109 spin_lock(&info
->block_group_cache_lock
);
110 p
= &info
->block_group_cache_tree
.rb_node
;
114 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
116 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
118 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
121 spin_unlock(&info
->block_group_cache_lock
);
126 rb_link_node(&block_group
->cache_node
, parent
, p
);
127 rb_insert_color(&block_group
->cache_node
,
128 &info
->block_group_cache_tree
);
129 spin_unlock(&info
->block_group_cache_lock
);
135 * This will return the block group at or after bytenr if contains is 0, else
136 * it will return the block group that contains the bytenr
138 static struct btrfs_block_group_cache
*
139 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
142 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
146 spin_lock(&info
->block_group_cache_lock
);
147 n
= info
->block_group_cache_tree
.rb_node
;
150 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
152 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
153 start
= cache
->key
.objectid
;
155 if (bytenr
< start
) {
156 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
159 } else if (bytenr
> start
) {
160 if (contains
&& bytenr
<= end
) {
171 btrfs_get_block_group(ret
);
172 spin_unlock(&info
->block_group_cache_lock
);
177 static int add_excluded_extent(struct btrfs_root
*root
,
178 u64 start
, u64 num_bytes
)
180 u64 end
= start
+ num_bytes
- 1;
181 set_extent_bits(&root
->fs_info
->freed_extents
[0],
182 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
183 set_extent_bits(&root
->fs_info
->freed_extents
[1],
184 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
188 static void free_excluded_extents(struct btrfs_root
*root
,
189 struct btrfs_block_group_cache
*cache
)
193 start
= cache
->key
.objectid
;
194 end
= start
+ cache
->key
.offset
- 1;
196 clear_extent_bits(&root
->fs_info
->freed_extents
[0],
197 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
198 clear_extent_bits(&root
->fs_info
->freed_extents
[1],
199 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
202 static int exclude_super_stripes(struct btrfs_root
*root
,
203 struct btrfs_block_group_cache
*cache
)
210 if (cache
->key
.objectid
< BTRFS_SUPER_INFO_OFFSET
) {
211 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->key
.objectid
;
212 cache
->bytes_super
+= stripe_len
;
213 ret
= add_excluded_extent(root
, cache
->key
.objectid
,
218 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
219 bytenr
= btrfs_sb_offset(i
);
220 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
221 cache
->key
.objectid
, bytenr
,
222 0, &logical
, &nr
, &stripe_len
);
226 cache
->bytes_super
+= stripe_len
;
227 ret
= add_excluded_extent(root
, logical
[nr
],
237 static struct btrfs_caching_control
*
238 get_caching_control(struct btrfs_block_group_cache
*cache
)
240 struct btrfs_caching_control
*ctl
;
242 spin_lock(&cache
->lock
);
243 if (cache
->cached
!= BTRFS_CACHE_STARTED
) {
244 spin_unlock(&cache
->lock
);
248 ctl
= cache
->caching_ctl
;
249 atomic_inc(&ctl
->count
);
250 spin_unlock(&cache
->lock
);
254 static void put_caching_control(struct btrfs_caching_control
*ctl
)
256 if (atomic_dec_and_test(&ctl
->count
))
261 * this is only called by cache_block_group, since we could have freed extents
262 * we need to check the pinned_extents for any extents that can't be used yet
263 * since their free space will be released as soon as the transaction commits.
265 static u64
add_new_free_space(struct btrfs_block_group_cache
*block_group
,
266 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
268 u64 extent_start
, extent_end
, size
, total_added
= 0;
271 while (start
< end
) {
272 ret
= find_first_extent_bit(info
->pinned_extents
, start
,
273 &extent_start
, &extent_end
,
274 EXTENT_DIRTY
| EXTENT_UPTODATE
);
278 if (extent_start
<= start
) {
279 start
= extent_end
+ 1;
280 } else if (extent_start
> start
&& extent_start
< end
) {
281 size
= extent_start
- start
;
283 ret
= btrfs_add_free_space(block_group
, start
,
286 start
= extent_end
+ 1;
295 ret
= btrfs_add_free_space(block_group
, start
, size
);
302 static int caching_kthread(void *data
)
304 struct btrfs_block_group_cache
*block_group
= data
;
305 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
306 struct btrfs_caching_control
*caching_ctl
= block_group
->caching_ctl
;
307 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
308 struct btrfs_path
*path
;
309 struct extent_buffer
*leaf
;
310 struct btrfs_key key
;
316 path
= btrfs_alloc_path();
320 exclude_super_stripes(extent_root
, block_group
);
321 spin_lock(&block_group
->space_info
->lock
);
322 block_group
->space_info
->bytes_super
+= block_group
->bytes_super
;
323 spin_unlock(&block_group
->space_info
->lock
);
325 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
328 * We don't want to deadlock with somebody trying to allocate a new
329 * extent for the extent root while also trying to search the extent
330 * root to add free space. So we skip locking and search the commit
331 * root, since its read-only
333 path
->skip_locking
= 1;
334 path
->search_commit_root
= 1;
339 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
341 mutex_lock(&caching_ctl
->mutex
);
342 /* need to make sure the commit_root doesn't disappear */
343 down_read(&fs_info
->extent_commit_sem
);
345 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
349 leaf
= path
->nodes
[0];
350 nritems
= btrfs_header_nritems(leaf
);
354 if (fs_info
->closing
> 1) {
359 if (path
->slots
[0] < nritems
) {
360 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
362 ret
= find_next_key(path
, 0, &key
);
366 caching_ctl
->progress
= last
;
367 btrfs_release_path(extent_root
, path
);
368 up_read(&fs_info
->extent_commit_sem
);
369 mutex_unlock(&caching_ctl
->mutex
);
370 if (btrfs_transaction_in_commit(fs_info
))
377 if (key
.objectid
< block_group
->key
.objectid
) {
382 if (key
.objectid
>= block_group
->key
.objectid
+
383 block_group
->key
.offset
)
386 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
387 total_found
+= add_new_free_space(block_group
,
390 last
= key
.objectid
+ key
.offset
;
392 if (total_found
> (1024 * 1024 * 2)) {
394 wake_up(&caching_ctl
->wait
);
401 total_found
+= add_new_free_space(block_group
, fs_info
, last
,
402 block_group
->key
.objectid
+
403 block_group
->key
.offset
);
404 caching_ctl
->progress
= (u64
)-1;
406 spin_lock(&block_group
->lock
);
407 block_group
->caching_ctl
= NULL
;
408 block_group
->cached
= BTRFS_CACHE_FINISHED
;
409 spin_unlock(&block_group
->lock
);
412 btrfs_free_path(path
);
413 up_read(&fs_info
->extent_commit_sem
);
415 free_excluded_extents(extent_root
, block_group
);
417 mutex_unlock(&caching_ctl
->mutex
);
418 wake_up(&caching_ctl
->wait
);
420 put_caching_control(caching_ctl
);
421 atomic_dec(&block_group
->space_info
->caching_threads
);
422 btrfs_put_block_group(block_group
);
427 static int cache_block_group(struct btrfs_block_group_cache
*cache
)
429 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
430 struct btrfs_caching_control
*caching_ctl
;
431 struct task_struct
*tsk
;
435 if (cache
->cached
!= BTRFS_CACHE_NO
)
438 caching_ctl
= kzalloc(sizeof(*caching_ctl
), GFP_KERNEL
);
439 BUG_ON(!caching_ctl
);
441 INIT_LIST_HEAD(&caching_ctl
->list
);
442 mutex_init(&caching_ctl
->mutex
);
443 init_waitqueue_head(&caching_ctl
->wait
);
444 caching_ctl
->block_group
= cache
;
445 caching_ctl
->progress
= cache
->key
.objectid
;
446 /* one for caching kthread, one for caching block group list */
447 atomic_set(&caching_ctl
->count
, 2);
449 spin_lock(&cache
->lock
);
450 if (cache
->cached
!= BTRFS_CACHE_NO
) {
451 spin_unlock(&cache
->lock
);
455 cache
->caching_ctl
= caching_ctl
;
456 cache
->cached
= BTRFS_CACHE_STARTED
;
457 spin_unlock(&cache
->lock
);
459 down_write(&fs_info
->extent_commit_sem
);
460 list_add_tail(&caching_ctl
->list
, &fs_info
->caching_block_groups
);
461 up_write(&fs_info
->extent_commit_sem
);
463 atomic_inc(&cache
->space_info
->caching_threads
);
464 btrfs_get_block_group(cache
);
466 tsk
= kthread_run(caching_kthread
, cache
, "btrfs-cache-%llu\n",
467 cache
->key
.objectid
);
470 printk(KERN_ERR
"error running thread %d\n", ret
);
478 * return the block group that starts at or after bytenr
480 static struct btrfs_block_group_cache
*
481 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
483 struct btrfs_block_group_cache
*cache
;
485 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
491 * return the block group that contains the given bytenr
493 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
494 struct btrfs_fs_info
*info
,
497 struct btrfs_block_group_cache
*cache
;
499 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
504 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
507 struct list_head
*head
= &info
->space_info
;
508 struct btrfs_space_info
*found
;
511 list_for_each_entry_rcu(found
, head
, list
) {
512 if (found
->flags
== flags
) {
522 * after adding space to the filesystem, we need to clear the full flags
523 * on all the space infos.
525 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
527 struct list_head
*head
= &info
->space_info
;
528 struct btrfs_space_info
*found
;
531 list_for_each_entry_rcu(found
, head
, list
)
536 static u64
div_factor(u64 num
, int factor
)
545 u64
btrfs_find_block_group(struct btrfs_root
*root
,
546 u64 search_start
, u64 search_hint
, int owner
)
548 struct btrfs_block_group_cache
*cache
;
550 u64 last
= max(search_hint
, search_start
);
557 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
561 spin_lock(&cache
->lock
);
562 last
= cache
->key
.objectid
+ cache
->key
.offset
;
563 used
= btrfs_block_group_used(&cache
->item
);
565 if ((full_search
|| !cache
->ro
) &&
566 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
567 if (used
+ cache
->pinned
+ cache
->reserved
<
568 div_factor(cache
->key
.offset
, factor
)) {
569 group_start
= cache
->key
.objectid
;
570 spin_unlock(&cache
->lock
);
571 btrfs_put_block_group(cache
);
575 spin_unlock(&cache
->lock
);
576 btrfs_put_block_group(cache
);
584 if (!full_search
&& factor
< 10) {
594 /* simple helper to search for an existing extent at a given offset */
595 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
598 struct btrfs_key key
;
599 struct btrfs_path
*path
;
601 path
= btrfs_alloc_path();
603 key
.objectid
= start
;
605 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
606 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
608 btrfs_free_path(path
);
613 * Back reference rules. Back refs have three main goals:
615 * 1) differentiate between all holders of references to an extent so that
616 * when a reference is dropped we can make sure it was a valid reference
617 * before freeing the extent.
619 * 2) Provide enough information to quickly find the holders of an extent
620 * if we notice a given block is corrupted or bad.
622 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
623 * maintenance. This is actually the same as #2, but with a slightly
624 * different use case.
626 * There are two kinds of back refs. The implicit back refs is optimized
627 * for pointers in non-shared tree blocks. For a given pointer in a block,
628 * back refs of this kind provide information about the block's owner tree
629 * and the pointer's key. These information allow us to find the block by
630 * b-tree searching. The full back refs is for pointers in tree blocks not
631 * referenced by their owner trees. The location of tree block is recorded
632 * in the back refs. Actually the full back refs is generic, and can be
633 * used in all cases the implicit back refs is used. The major shortcoming
634 * of the full back refs is its overhead. Every time a tree block gets
635 * COWed, we have to update back refs entry for all pointers in it.
637 * For a newly allocated tree block, we use implicit back refs for
638 * pointers in it. This means most tree related operations only involve
639 * implicit back refs. For a tree block created in old transaction, the
640 * only way to drop a reference to it is COW it. So we can detect the
641 * event that tree block loses its owner tree's reference and do the
642 * back refs conversion.
644 * When a tree block is COW'd through a tree, there are four cases:
646 * The reference count of the block is one and the tree is the block's
647 * owner tree. Nothing to do in this case.
649 * The reference count of the block is one and the tree is not the
650 * block's owner tree. In this case, full back refs is used for pointers
651 * in the block. Remove these full back refs, add implicit back refs for
652 * every pointers in the new block.
654 * The reference count of the block is greater than one and the tree is
655 * the block's owner tree. In this case, implicit back refs is used for
656 * pointers in the block. Add full back refs for every pointers in the
657 * block, increase lower level extents' reference counts. The original
658 * implicit back refs are entailed to the new block.
660 * The reference count of the block is greater than one and the tree is
661 * not the block's owner tree. Add implicit back refs for every pointer in
662 * the new block, increase lower level extents' reference count.
664 * Back Reference Key composing:
666 * The key objectid corresponds to the first byte in the extent,
667 * The key type is used to differentiate between types of back refs.
668 * There are different meanings of the key offset for different types
671 * File extents can be referenced by:
673 * - multiple snapshots, subvolumes, or different generations in one subvol
674 * - different files inside a single subvolume
675 * - different offsets inside a file (bookend extents in file.c)
677 * The extent ref structure for the implicit back refs has fields for:
679 * - Objectid of the subvolume root
680 * - objectid of the file holding the reference
681 * - original offset in the file
682 * - how many bookend extents
684 * The key offset for the implicit back refs is hash of the first
687 * The extent ref structure for the full back refs has field for:
689 * - number of pointers in the tree leaf
691 * The key offset for the implicit back refs is the first byte of
694 * When a file extent is allocated, The implicit back refs is used.
695 * the fields are filled in:
697 * (root_key.objectid, inode objectid, offset in file, 1)
699 * When a file extent is removed file truncation, we find the
700 * corresponding implicit back refs and check the following fields:
702 * (btrfs_header_owner(leaf), inode objectid, offset in file)
704 * Btree extents can be referenced by:
706 * - Different subvolumes
708 * Both the implicit back refs and the full back refs for tree blocks
709 * only consist of key. The key offset for the implicit back refs is
710 * objectid of block's owner tree. The key offset for the full back refs
711 * is the first byte of parent block.
713 * When implicit back refs is used, information about the lowest key and
714 * level of the tree block are required. These information are stored in
715 * tree block info structure.
718 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
719 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
720 struct btrfs_root
*root
,
721 struct btrfs_path
*path
,
722 u64 owner
, u32 extra_size
)
724 struct btrfs_extent_item
*item
;
725 struct btrfs_extent_item_v0
*ei0
;
726 struct btrfs_extent_ref_v0
*ref0
;
727 struct btrfs_tree_block_info
*bi
;
728 struct extent_buffer
*leaf
;
729 struct btrfs_key key
;
730 struct btrfs_key found_key
;
731 u32 new_size
= sizeof(*item
);
735 leaf
= path
->nodes
[0];
736 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
738 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
739 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
740 struct btrfs_extent_item_v0
);
741 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
743 if (owner
== (u64
)-1) {
745 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
746 ret
= btrfs_next_leaf(root
, path
);
750 leaf
= path
->nodes
[0];
752 btrfs_item_key_to_cpu(leaf
, &found_key
,
754 BUG_ON(key
.objectid
!= found_key
.objectid
);
755 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
759 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
760 struct btrfs_extent_ref_v0
);
761 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
765 btrfs_release_path(root
, path
);
767 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
768 new_size
+= sizeof(*bi
);
770 new_size
-= sizeof(*ei0
);
771 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
772 new_size
+ extra_size
, 1);
777 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
780 leaf
= path
->nodes
[0];
781 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
782 btrfs_set_extent_refs(leaf
, item
, refs
);
783 /* FIXME: get real generation */
784 btrfs_set_extent_generation(leaf
, item
, 0);
785 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
786 btrfs_set_extent_flags(leaf
, item
,
787 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
788 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
789 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
790 /* FIXME: get first key of the block */
791 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
792 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
794 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
796 btrfs_mark_buffer_dirty(leaf
);
801 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
803 u32 high_crc
= ~(u32
)0;
804 u32 low_crc
= ~(u32
)0;
807 lenum
= cpu_to_le64(root_objectid
);
808 high_crc
= crc32c(high_crc
, &lenum
, sizeof(lenum
));
809 lenum
= cpu_to_le64(owner
);
810 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
811 lenum
= cpu_to_le64(offset
);
812 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
814 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
817 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
818 struct btrfs_extent_data_ref
*ref
)
820 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
821 btrfs_extent_data_ref_objectid(leaf
, ref
),
822 btrfs_extent_data_ref_offset(leaf
, ref
));
825 static int match_extent_data_ref(struct extent_buffer
*leaf
,
826 struct btrfs_extent_data_ref
*ref
,
827 u64 root_objectid
, u64 owner
, u64 offset
)
829 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
830 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
831 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
836 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
837 struct btrfs_root
*root
,
838 struct btrfs_path
*path
,
839 u64 bytenr
, u64 parent
,
841 u64 owner
, u64 offset
)
843 struct btrfs_key key
;
844 struct btrfs_extent_data_ref
*ref
;
845 struct extent_buffer
*leaf
;
851 key
.objectid
= bytenr
;
853 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
856 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
857 key
.offset
= hash_extent_data_ref(root_objectid
,
862 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
871 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
872 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
873 btrfs_release_path(root
, path
);
874 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
885 leaf
= path
->nodes
[0];
886 nritems
= btrfs_header_nritems(leaf
);
888 if (path
->slots
[0] >= nritems
) {
889 ret
= btrfs_next_leaf(root
, path
);
895 leaf
= path
->nodes
[0];
896 nritems
= btrfs_header_nritems(leaf
);
900 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
901 if (key
.objectid
!= bytenr
||
902 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
905 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
906 struct btrfs_extent_data_ref
);
908 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
911 btrfs_release_path(root
, path
);
923 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
924 struct btrfs_root
*root
,
925 struct btrfs_path
*path
,
926 u64 bytenr
, u64 parent
,
927 u64 root_objectid
, u64 owner
,
928 u64 offset
, int refs_to_add
)
930 struct btrfs_key key
;
931 struct extent_buffer
*leaf
;
936 key
.objectid
= bytenr
;
938 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
940 size
= sizeof(struct btrfs_shared_data_ref
);
942 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
943 key
.offset
= hash_extent_data_ref(root_objectid
,
945 size
= sizeof(struct btrfs_extent_data_ref
);
948 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
949 if (ret
&& ret
!= -EEXIST
)
952 leaf
= path
->nodes
[0];
954 struct btrfs_shared_data_ref
*ref
;
955 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
956 struct btrfs_shared_data_ref
);
958 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
960 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
961 num_refs
+= refs_to_add
;
962 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
965 struct btrfs_extent_data_ref
*ref
;
966 while (ret
== -EEXIST
) {
967 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
968 struct btrfs_extent_data_ref
);
969 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
972 btrfs_release_path(root
, path
);
974 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
976 if (ret
&& ret
!= -EEXIST
)
979 leaf
= path
->nodes
[0];
981 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
982 struct btrfs_extent_data_ref
);
984 btrfs_set_extent_data_ref_root(leaf
, ref
,
986 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
987 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
988 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
990 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
991 num_refs
+= refs_to_add
;
992 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
995 btrfs_mark_buffer_dirty(leaf
);
998 btrfs_release_path(root
, path
);
1002 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
1003 struct btrfs_root
*root
,
1004 struct btrfs_path
*path
,
1007 struct btrfs_key key
;
1008 struct btrfs_extent_data_ref
*ref1
= NULL
;
1009 struct btrfs_shared_data_ref
*ref2
= NULL
;
1010 struct extent_buffer
*leaf
;
1014 leaf
= path
->nodes
[0];
1015 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1017 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1018 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
1019 struct btrfs_extent_data_ref
);
1020 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1021 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
1022 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
1023 struct btrfs_shared_data_ref
);
1024 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1025 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1026 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
1027 struct btrfs_extent_ref_v0
*ref0
;
1028 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1029 struct btrfs_extent_ref_v0
);
1030 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
1036 BUG_ON(num_refs
< refs_to_drop
);
1037 num_refs
-= refs_to_drop
;
1039 if (num_refs
== 0) {
1040 ret
= btrfs_del_item(trans
, root
, path
);
1042 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
1043 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
1044 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
1045 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
1046 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1048 struct btrfs_extent_ref_v0
*ref0
;
1049 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1050 struct btrfs_extent_ref_v0
);
1051 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
1054 btrfs_mark_buffer_dirty(leaf
);
1059 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
1060 struct btrfs_path
*path
,
1061 struct btrfs_extent_inline_ref
*iref
)
1063 struct btrfs_key key
;
1064 struct extent_buffer
*leaf
;
1065 struct btrfs_extent_data_ref
*ref1
;
1066 struct btrfs_shared_data_ref
*ref2
;
1069 leaf
= path
->nodes
[0];
1070 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1072 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
1073 BTRFS_EXTENT_DATA_REF_KEY
) {
1074 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1075 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1077 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1078 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1080 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1081 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
1082 struct btrfs_extent_data_ref
);
1083 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1084 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
1085 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
1086 struct btrfs_shared_data_ref
);
1087 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1088 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1089 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
1090 struct btrfs_extent_ref_v0
*ref0
;
1091 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1092 struct btrfs_extent_ref_v0
);
1093 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
1101 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
1102 struct btrfs_root
*root
,
1103 struct btrfs_path
*path
,
1104 u64 bytenr
, u64 parent
,
1107 struct btrfs_key key
;
1110 key
.objectid
= bytenr
;
1112 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1113 key
.offset
= parent
;
1115 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1116 key
.offset
= root_objectid
;
1119 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1122 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1123 if (ret
== -ENOENT
&& parent
) {
1124 btrfs_release_path(root
, path
);
1125 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
1126 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1134 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
1135 struct btrfs_root
*root
,
1136 struct btrfs_path
*path
,
1137 u64 bytenr
, u64 parent
,
1140 struct btrfs_key key
;
1143 key
.objectid
= bytenr
;
1145 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1146 key
.offset
= parent
;
1148 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1149 key
.offset
= root_objectid
;
1152 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1153 btrfs_release_path(root
, path
);
1157 static inline int extent_ref_type(u64 parent
, u64 owner
)
1160 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1162 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1164 type
= BTRFS_TREE_BLOCK_REF_KEY
;
1167 type
= BTRFS_SHARED_DATA_REF_KEY
;
1169 type
= BTRFS_EXTENT_DATA_REF_KEY
;
1174 static int find_next_key(struct btrfs_path
*path
, int level
,
1175 struct btrfs_key
*key
)
1178 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
1179 if (!path
->nodes
[level
])
1181 if (path
->slots
[level
] + 1 >=
1182 btrfs_header_nritems(path
->nodes
[level
]))
1185 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
1186 path
->slots
[level
] + 1);
1188 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
1189 path
->slots
[level
] + 1);
1196 * look for inline back ref. if back ref is found, *ref_ret is set
1197 * to the address of inline back ref, and 0 is returned.
1199 * if back ref isn't found, *ref_ret is set to the address where it
1200 * should be inserted, and -ENOENT is returned.
1202 * if insert is true and there are too many inline back refs, the path
1203 * points to the extent item, and -EAGAIN is returned.
1205 * NOTE: inline back refs are ordered in the same way that back ref
1206 * items in the tree are ordered.
1208 static noinline_for_stack
1209 int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1210 struct btrfs_root
*root
,
1211 struct btrfs_path
*path
,
1212 struct btrfs_extent_inline_ref
**ref_ret
,
1213 u64 bytenr
, u64 num_bytes
,
1214 u64 parent
, u64 root_objectid
,
1215 u64 owner
, u64 offset
, int insert
)
1217 struct btrfs_key key
;
1218 struct extent_buffer
*leaf
;
1219 struct btrfs_extent_item
*ei
;
1220 struct btrfs_extent_inline_ref
*iref
;
1231 key
.objectid
= bytenr
;
1232 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1233 key
.offset
= num_bytes
;
1235 want
= extent_ref_type(parent
, owner
);
1237 extra_size
= btrfs_extent_inline_ref_size(want
);
1238 path
->keep_locks
= 1;
1241 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1248 leaf
= path
->nodes
[0];
1249 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1250 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1251 if (item_size
< sizeof(*ei
)) {
1256 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1262 leaf
= path
->nodes
[0];
1263 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1266 BUG_ON(item_size
< sizeof(*ei
));
1268 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1269 flags
= btrfs_extent_flags(leaf
, ei
);
1271 ptr
= (unsigned long)(ei
+ 1);
1272 end
= (unsigned long)ei
+ item_size
;
1274 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1275 ptr
+= sizeof(struct btrfs_tree_block_info
);
1278 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
1287 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1288 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1292 ptr
+= btrfs_extent_inline_ref_size(type
);
1296 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1297 struct btrfs_extent_data_ref
*dref
;
1298 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1299 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1304 if (hash_extent_data_ref_item(leaf
, dref
) <
1305 hash_extent_data_ref(root_objectid
, owner
, offset
))
1309 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1311 if (parent
== ref_offset
) {
1315 if (ref_offset
< parent
)
1318 if (root_objectid
== ref_offset
) {
1322 if (ref_offset
< root_objectid
)
1326 ptr
+= btrfs_extent_inline_ref_size(type
);
1328 if (err
== -ENOENT
&& insert
) {
1329 if (item_size
+ extra_size
>=
1330 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1335 * To add new inline back ref, we have to make sure
1336 * there is no corresponding back ref item.
1337 * For simplicity, we just do not add new inline back
1338 * ref if there is any kind of item for this block
1340 if (find_next_key(path
, 0, &key
) == 0 &&
1341 key
.objectid
== bytenr
&&
1342 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1347 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1350 path
->keep_locks
= 0;
1351 btrfs_unlock_up_safe(path
, 1);
1357 * helper to add new inline back ref
1359 static noinline_for_stack
1360 int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1361 struct btrfs_root
*root
,
1362 struct btrfs_path
*path
,
1363 struct btrfs_extent_inline_ref
*iref
,
1364 u64 parent
, u64 root_objectid
,
1365 u64 owner
, u64 offset
, int refs_to_add
,
1366 struct btrfs_delayed_extent_op
*extent_op
)
1368 struct extent_buffer
*leaf
;
1369 struct btrfs_extent_item
*ei
;
1372 unsigned long item_offset
;
1378 leaf
= path
->nodes
[0];
1379 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1380 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1382 type
= extent_ref_type(parent
, owner
);
1383 size
= btrfs_extent_inline_ref_size(type
);
1385 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1388 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1389 refs
= btrfs_extent_refs(leaf
, ei
);
1390 refs
+= refs_to_add
;
1391 btrfs_set_extent_refs(leaf
, ei
, refs
);
1393 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1395 ptr
= (unsigned long)ei
+ item_offset
;
1396 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1397 if (ptr
< end
- size
)
1398 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1401 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1402 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1403 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1404 struct btrfs_extent_data_ref
*dref
;
1405 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1406 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1407 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1408 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1409 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1410 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1411 struct btrfs_shared_data_ref
*sref
;
1412 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1413 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1414 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1415 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1416 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1418 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1420 btrfs_mark_buffer_dirty(leaf
);
1424 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1425 struct btrfs_root
*root
,
1426 struct btrfs_path
*path
,
1427 struct btrfs_extent_inline_ref
**ref_ret
,
1428 u64 bytenr
, u64 num_bytes
, u64 parent
,
1429 u64 root_objectid
, u64 owner
, u64 offset
)
1433 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1434 bytenr
, num_bytes
, parent
,
1435 root_objectid
, owner
, offset
, 0);
1439 btrfs_release_path(root
, path
);
1442 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1443 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1446 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1447 root_objectid
, owner
, offset
);
1453 * helper to update/remove inline back ref
1455 static noinline_for_stack
1456 int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1457 struct btrfs_root
*root
,
1458 struct btrfs_path
*path
,
1459 struct btrfs_extent_inline_ref
*iref
,
1461 struct btrfs_delayed_extent_op
*extent_op
)
1463 struct extent_buffer
*leaf
;
1464 struct btrfs_extent_item
*ei
;
1465 struct btrfs_extent_data_ref
*dref
= NULL
;
1466 struct btrfs_shared_data_ref
*sref
= NULL
;
1475 leaf
= path
->nodes
[0];
1476 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1477 refs
= btrfs_extent_refs(leaf
, ei
);
1478 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1479 refs
+= refs_to_mod
;
1480 btrfs_set_extent_refs(leaf
, ei
, refs
);
1482 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1484 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1486 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1487 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1488 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1489 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1490 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1491 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1494 BUG_ON(refs_to_mod
!= -1);
1497 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1498 refs
+= refs_to_mod
;
1501 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1502 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1504 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1506 size
= btrfs_extent_inline_ref_size(type
);
1507 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1508 ptr
= (unsigned long)iref
;
1509 end
= (unsigned long)ei
+ item_size
;
1510 if (ptr
+ size
< end
)
1511 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1514 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1517 btrfs_mark_buffer_dirty(leaf
);
1521 static noinline_for_stack
1522 int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1523 struct btrfs_root
*root
,
1524 struct btrfs_path
*path
,
1525 u64 bytenr
, u64 num_bytes
, u64 parent
,
1526 u64 root_objectid
, u64 owner
,
1527 u64 offset
, int refs_to_add
,
1528 struct btrfs_delayed_extent_op
*extent_op
)
1530 struct btrfs_extent_inline_ref
*iref
;
1533 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1534 bytenr
, num_bytes
, parent
,
1535 root_objectid
, owner
, offset
, 1);
1537 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1538 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1539 refs_to_add
, extent_op
);
1540 } else if (ret
== -ENOENT
) {
1541 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1542 parent
, root_objectid
,
1543 owner
, offset
, refs_to_add
,
1549 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1550 struct btrfs_root
*root
,
1551 struct btrfs_path
*path
,
1552 u64 bytenr
, u64 parent
, u64 root_objectid
,
1553 u64 owner
, u64 offset
, int refs_to_add
)
1556 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1557 BUG_ON(refs_to_add
!= 1);
1558 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1559 parent
, root_objectid
);
1561 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1562 parent
, root_objectid
,
1563 owner
, offset
, refs_to_add
);
1568 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1569 struct btrfs_root
*root
,
1570 struct btrfs_path
*path
,
1571 struct btrfs_extent_inline_ref
*iref
,
1572 int refs_to_drop
, int is_data
)
1576 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1578 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1579 -refs_to_drop
, NULL
);
1580 } else if (is_data
) {
1581 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1583 ret
= btrfs_del_item(trans
, root
, path
);
1588 static void btrfs_issue_discard(struct block_device
*bdev
,
1591 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
,
1592 DISCARD_FL_BARRIER
);
1595 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
1599 u64 map_length
= num_bytes
;
1600 struct btrfs_multi_bio
*multi
= NULL
;
1602 if (!btrfs_test_opt(root
, DISCARD
))
1605 /* Tell the block device(s) that the sectors can be discarded */
1606 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
1607 bytenr
, &map_length
, &multi
, 0);
1609 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
1612 if (map_length
> num_bytes
)
1613 map_length
= num_bytes
;
1615 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
1616 btrfs_issue_discard(stripe
->dev
->bdev
,
1626 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1627 struct btrfs_root
*root
,
1628 u64 bytenr
, u64 num_bytes
, u64 parent
,
1629 u64 root_objectid
, u64 owner
, u64 offset
)
1632 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
&&
1633 root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
1635 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1636 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
1637 parent
, root_objectid
, (int)owner
,
1638 BTRFS_ADD_DELAYED_REF
, NULL
);
1640 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
1641 parent
, root_objectid
, owner
, offset
,
1642 BTRFS_ADD_DELAYED_REF
, NULL
);
1647 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1648 struct btrfs_root
*root
,
1649 u64 bytenr
, u64 num_bytes
,
1650 u64 parent
, u64 root_objectid
,
1651 u64 owner
, u64 offset
, int refs_to_add
,
1652 struct btrfs_delayed_extent_op
*extent_op
)
1654 struct btrfs_path
*path
;
1655 struct extent_buffer
*leaf
;
1656 struct btrfs_extent_item
*item
;
1661 path
= btrfs_alloc_path();
1666 path
->leave_spinning
= 1;
1667 /* this will setup the path even if it fails to insert the back ref */
1668 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1669 path
, bytenr
, num_bytes
, parent
,
1670 root_objectid
, owner
, offset
,
1671 refs_to_add
, extent_op
);
1675 if (ret
!= -EAGAIN
) {
1680 leaf
= path
->nodes
[0];
1681 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1682 refs
= btrfs_extent_refs(leaf
, item
);
1683 btrfs_set_extent_refs(leaf
, item
, refs
+ refs_to_add
);
1685 __run_delayed_extent_op(extent_op
, leaf
, item
);
1687 btrfs_mark_buffer_dirty(leaf
);
1688 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1691 path
->leave_spinning
= 1;
1693 /* now insert the actual backref */
1694 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1695 path
, bytenr
, parent
, root_objectid
,
1696 owner
, offset
, refs_to_add
);
1699 btrfs_free_path(path
);
1703 static int run_delayed_data_ref(struct btrfs_trans_handle
*trans
,
1704 struct btrfs_root
*root
,
1705 struct btrfs_delayed_ref_node
*node
,
1706 struct btrfs_delayed_extent_op
*extent_op
,
1707 int insert_reserved
)
1710 struct btrfs_delayed_data_ref
*ref
;
1711 struct btrfs_key ins
;
1716 ins
.objectid
= node
->bytenr
;
1717 ins
.offset
= node
->num_bytes
;
1718 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1720 ref
= btrfs_delayed_node_to_data_ref(node
);
1721 if (node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1722 parent
= ref
->parent
;
1724 ref_root
= ref
->root
;
1726 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1728 BUG_ON(extent_op
->update_key
);
1729 flags
|= extent_op
->flags_to_set
;
1731 ret
= alloc_reserved_file_extent(trans
, root
,
1732 parent
, ref_root
, flags
,
1733 ref
->objectid
, ref
->offset
,
1734 &ins
, node
->ref_mod
);
1735 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1736 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
1737 node
->num_bytes
, parent
,
1738 ref_root
, ref
->objectid
,
1739 ref
->offset
, node
->ref_mod
,
1741 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1742 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
1743 node
->num_bytes
, parent
,
1744 ref_root
, ref
->objectid
,
1745 ref
->offset
, node
->ref_mod
,
1753 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
1754 struct extent_buffer
*leaf
,
1755 struct btrfs_extent_item
*ei
)
1757 u64 flags
= btrfs_extent_flags(leaf
, ei
);
1758 if (extent_op
->update_flags
) {
1759 flags
|= extent_op
->flags_to_set
;
1760 btrfs_set_extent_flags(leaf
, ei
, flags
);
1763 if (extent_op
->update_key
) {
1764 struct btrfs_tree_block_info
*bi
;
1765 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
));
1766 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1767 btrfs_set_tree_block_key(leaf
, bi
, &extent_op
->key
);
1771 static int run_delayed_extent_op(struct btrfs_trans_handle
*trans
,
1772 struct btrfs_root
*root
,
1773 struct btrfs_delayed_ref_node
*node
,
1774 struct btrfs_delayed_extent_op
*extent_op
)
1776 struct btrfs_key key
;
1777 struct btrfs_path
*path
;
1778 struct btrfs_extent_item
*ei
;
1779 struct extent_buffer
*leaf
;
1784 path
= btrfs_alloc_path();
1788 key
.objectid
= node
->bytenr
;
1789 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1790 key
.offset
= node
->num_bytes
;
1793 path
->leave_spinning
= 1;
1794 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
,
1805 leaf
= path
->nodes
[0];
1806 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1807 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1808 if (item_size
< sizeof(*ei
)) {
1809 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1815 leaf
= path
->nodes
[0];
1816 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1819 BUG_ON(item_size
< sizeof(*ei
));
1820 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1821 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1823 btrfs_mark_buffer_dirty(leaf
);
1825 btrfs_free_path(path
);
1829 static int run_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
1830 struct btrfs_root
*root
,
1831 struct btrfs_delayed_ref_node
*node
,
1832 struct btrfs_delayed_extent_op
*extent_op
,
1833 int insert_reserved
)
1836 struct btrfs_delayed_tree_ref
*ref
;
1837 struct btrfs_key ins
;
1841 ins
.objectid
= node
->bytenr
;
1842 ins
.offset
= node
->num_bytes
;
1843 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1845 ref
= btrfs_delayed_node_to_tree_ref(node
);
1846 if (node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1847 parent
= ref
->parent
;
1849 ref_root
= ref
->root
;
1851 BUG_ON(node
->ref_mod
!= 1);
1852 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1853 BUG_ON(!extent_op
|| !extent_op
->update_flags
||
1854 !extent_op
->update_key
);
1855 ret
= alloc_reserved_tree_block(trans
, root
,
1857 extent_op
->flags_to_set
,
1860 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1861 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
1862 node
->num_bytes
, parent
, ref_root
,
1863 ref
->level
, 0, 1, extent_op
);
1864 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1865 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
1866 node
->num_bytes
, parent
, ref_root
,
1867 ref
->level
, 0, 1, extent_op
);
1875 /* helper function to actually process a single delayed ref entry */
1876 static int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
1877 struct btrfs_root
*root
,
1878 struct btrfs_delayed_ref_node
*node
,
1879 struct btrfs_delayed_extent_op
*extent_op
,
1880 int insert_reserved
)
1883 if (btrfs_delayed_ref_is_head(node
)) {
1884 struct btrfs_delayed_ref_head
*head
;
1886 * we've hit the end of the chain and we were supposed
1887 * to insert this extent into the tree. But, it got
1888 * deleted before we ever needed to insert it, so all
1889 * we have to do is clean up the accounting
1892 head
= btrfs_delayed_node_to_head(node
);
1893 if (insert_reserved
) {
1895 struct extent_buffer
*must_clean
= NULL
;
1897 ret
= pin_down_bytes(trans
, root
, NULL
,
1898 node
->bytenr
, node
->num_bytes
,
1899 head
->is_data
, 1, &must_clean
);
1904 clean_tree_block(NULL
, root
, must_clean
);
1905 btrfs_tree_unlock(must_clean
);
1906 free_extent_buffer(must_clean
);
1908 if (head
->is_data
) {
1909 ret
= btrfs_del_csums(trans
, root
,
1915 ret
= btrfs_free_reserved_extent(root
,
1921 mutex_unlock(&head
->mutex
);
1925 if (node
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
1926 node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1927 ret
= run_delayed_tree_ref(trans
, root
, node
, extent_op
,
1929 else if (node
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
1930 node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1931 ret
= run_delayed_data_ref(trans
, root
, node
, extent_op
,
1938 static noinline
struct btrfs_delayed_ref_node
*
1939 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
1941 struct rb_node
*node
;
1942 struct btrfs_delayed_ref_node
*ref
;
1943 int action
= BTRFS_ADD_DELAYED_REF
;
1946 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1947 * this prevents ref count from going down to zero when
1948 * there still are pending delayed ref.
1950 node
= rb_prev(&head
->node
.rb_node
);
1954 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
1956 if (ref
->bytenr
!= head
->node
.bytenr
)
1958 if (ref
->action
== action
)
1960 node
= rb_prev(node
);
1962 if (action
== BTRFS_ADD_DELAYED_REF
) {
1963 action
= BTRFS_DROP_DELAYED_REF
;
1969 static noinline
int run_clustered_refs(struct btrfs_trans_handle
*trans
,
1970 struct btrfs_root
*root
,
1971 struct list_head
*cluster
)
1973 struct btrfs_delayed_ref_root
*delayed_refs
;
1974 struct btrfs_delayed_ref_node
*ref
;
1975 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
1976 struct btrfs_delayed_extent_op
*extent_op
;
1979 int must_insert_reserved
= 0;
1981 delayed_refs
= &trans
->transaction
->delayed_refs
;
1984 /* pick a new head ref from the cluster list */
1985 if (list_empty(cluster
))
1988 locked_ref
= list_entry(cluster
->next
,
1989 struct btrfs_delayed_ref_head
, cluster
);
1991 /* grab the lock that says we are going to process
1992 * all the refs for this head */
1993 ret
= btrfs_delayed_ref_lock(trans
, locked_ref
);
1996 * we may have dropped the spin lock to get the head
1997 * mutex lock, and that might have given someone else
1998 * time to free the head. If that's true, it has been
1999 * removed from our list and we can move on.
2001 if (ret
== -EAGAIN
) {
2009 * record the must insert reserved flag before we
2010 * drop the spin lock.
2012 must_insert_reserved
= locked_ref
->must_insert_reserved
;
2013 locked_ref
->must_insert_reserved
= 0;
2015 extent_op
= locked_ref
->extent_op
;
2016 locked_ref
->extent_op
= NULL
;
2019 * locked_ref is the head node, so we have to go one
2020 * node back for any delayed ref updates
2022 ref
= select_delayed_ref(locked_ref
);
2024 /* All delayed refs have been processed, Go ahead
2025 * and send the head node to run_one_delayed_ref,
2026 * so that any accounting fixes can happen
2028 ref
= &locked_ref
->node
;
2030 if (extent_op
&& must_insert_reserved
) {
2036 spin_unlock(&delayed_refs
->lock
);
2038 ret
= run_delayed_extent_op(trans
, root
,
2044 spin_lock(&delayed_refs
->lock
);
2048 list_del_init(&locked_ref
->cluster
);
2053 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
2054 delayed_refs
->num_entries
--;
2056 spin_unlock(&delayed_refs
->lock
);
2058 ret
= run_one_delayed_ref(trans
, root
, ref
, extent_op
,
2059 must_insert_reserved
);
2062 btrfs_put_delayed_ref(ref
);
2067 spin_lock(&delayed_refs
->lock
);
2073 * this starts processing the delayed reference count updates and
2074 * extent insertions we have queued up so far. count can be
2075 * 0, which means to process everything in the tree at the start
2076 * of the run (but not newly added entries), or it can be some target
2077 * number you'd like to process.
2079 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
2080 struct btrfs_root
*root
, unsigned long count
)
2082 struct rb_node
*node
;
2083 struct btrfs_delayed_ref_root
*delayed_refs
;
2084 struct btrfs_delayed_ref_node
*ref
;
2085 struct list_head cluster
;
2087 int run_all
= count
== (unsigned long)-1;
2090 if (root
== root
->fs_info
->extent_root
)
2091 root
= root
->fs_info
->tree_root
;
2093 delayed_refs
= &trans
->transaction
->delayed_refs
;
2094 INIT_LIST_HEAD(&cluster
);
2096 spin_lock(&delayed_refs
->lock
);
2098 count
= delayed_refs
->num_entries
* 2;
2102 if (!(run_all
|| run_most
) &&
2103 delayed_refs
->num_heads_ready
< 64)
2107 * go find something we can process in the rbtree. We start at
2108 * the beginning of the tree, and then build a cluster
2109 * of refs to process starting at the first one we are able to
2112 ret
= btrfs_find_ref_cluster(trans
, &cluster
,
2113 delayed_refs
->run_delayed_start
);
2117 ret
= run_clustered_refs(trans
, root
, &cluster
);
2120 count
-= min_t(unsigned long, ret
, count
);
2127 node
= rb_first(&delayed_refs
->root
);
2130 count
= (unsigned long)-1;
2133 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
2135 if (btrfs_delayed_ref_is_head(ref
)) {
2136 struct btrfs_delayed_ref_head
*head
;
2138 head
= btrfs_delayed_node_to_head(ref
);
2139 atomic_inc(&ref
->refs
);
2141 spin_unlock(&delayed_refs
->lock
);
2142 mutex_lock(&head
->mutex
);
2143 mutex_unlock(&head
->mutex
);
2145 btrfs_put_delayed_ref(ref
);
2149 node
= rb_next(node
);
2151 spin_unlock(&delayed_refs
->lock
);
2152 schedule_timeout(1);
2156 spin_unlock(&delayed_refs
->lock
);
2160 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle
*trans
,
2161 struct btrfs_root
*root
,
2162 u64 bytenr
, u64 num_bytes
, u64 flags
,
2165 struct btrfs_delayed_extent_op
*extent_op
;
2168 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2172 extent_op
->flags_to_set
= flags
;
2173 extent_op
->update_flags
= 1;
2174 extent_op
->update_key
= 0;
2175 extent_op
->is_data
= is_data
? 1 : 0;
2177 ret
= btrfs_add_delayed_extent_op(trans
, bytenr
, num_bytes
, extent_op
);
2183 static noinline
int check_delayed_ref(struct btrfs_trans_handle
*trans
,
2184 struct btrfs_root
*root
,
2185 struct btrfs_path
*path
,
2186 u64 objectid
, u64 offset
, u64 bytenr
)
2188 struct btrfs_delayed_ref_head
*head
;
2189 struct btrfs_delayed_ref_node
*ref
;
2190 struct btrfs_delayed_data_ref
*data_ref
;
2191 struct btrfs_delayed_ref_root
*delayed_refs
;
2192 struct rb_node
*node
;
2196 delayed_refs
= &trans
->transaction
->delayed_refs
;
2197 spin_lock(&delayed_refs
->lock
);
2198 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
2202 if (!mutex_trylock(&head
->mutex
)) {
2203 atomic_inc(&head
->node
.refs
);
2204 spin_unlock(&delayed_refs
->lock
);
2206 btrfs_release_path(root
->fs_info
->extent_root
, path
);
2208 mutex_lock(&head
->mutex
);
2209 mutex_unlock(&head
->mutex
);
2210 btrfs_put_delayed_ref(&head
->node
);
2214 node
= rb_prev(&head
->node
.rb_node
);
2218 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2220 if (ref
->bytenr
!= bytenr
)
2224 if (ref
->type
!= BTRFS_EXTENT_DATA_REF_KEY
)
2227 data_ref
= btrfs_delayed_node_to_data_ref(ref
);
2229 node
= rb_prev(node
);
2231 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2232 if (ref
->bytenr
== bytenr
)
2236 if (data_ref
->root
!= root
->root_key
.objectid
||
2237 data_ref
->objectid
!= objectid
|| data_ref
->offset
!= offset
)
2242 mutex_unlock(&head
->mutex
);
2244 spin_unlock(&delayed_refs
->lock
);
2248 static noinline
int check_committed_ref(struct btrfs_trans_handle
*trans
,
2249 struct btrfs_root
*root
,
2250 struct btrfs_path
*path
,
2251 u64 objectid
, u64 offset
, u64 bytenr
)
2253 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2254 struct extent_buffer
*leaf
;
2255 struct btrfs_extent_data_ref
*ref
;
2256 struct btrfs_extent_inline_ref
*iref
;
2257 struct btrfs_extent_item
*ei
;
2258 struct btrfs_key key
;
2262 key
.objectid
= bytenr
;
2263 key
.offset
= (u64
)-1;
2264 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2266 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
2272 if (path
->slots
[0] == 0)
2276 leaf
= path
->nodes
[0];
2277 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2279 if (key
.objectid
!= bytenr
|| key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
2283 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
2284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2285 if (item_size
< sizeof(*ei
)) {
2286 WARN_ON(item_size
!= sizeof(struct btrfs_extent_item_v0
));
2290 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
2292 if (item_size
!= sizeof(*ei
) +
2293 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY
))
2296 if (btrfs_extent_generation(leaf
, ei
) <=
2297 btrfs_root_last_snapshot(&root
->root_item
))
2300 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
2301 if (btrfs_extent_inline_ref_type(leaf
, iref
) !=
2302 BTRFS_EXTENT_DATA_REF_KEY
)
2305 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
2306 if (btrfs_extent_refs(leaf
, ei
) !=
2307 btrfs_extent_data_ref_count(leaf
, ref
) ||
2308 btrfs_extent_data_ref_root(leaf
, ref
) !=
2309 root
->root_key
.objectid
||
2310 btrfs_extent_data_ref_objectid(leaf
, ref
) != objectid
||
2311 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
2319 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
2320 struct btrfs_root
*root
,
2321 u64 objectid
, u64 offset
, u64 bytenr
)
2323 struct btrfs_path
*path
;
2327 path
= btrfs_alloc_path();
2332 ret
= check_committed_ref(trans
, root
, path
, objectid
,
2334 if (ret
&& ret
!= -ENOENT
)
2337 ret2
= check_delayed_ref(trans
, root
, path
, objectid
,
2339 } while (ret2
== -EAGAIN
);
2341 if (ret2
&& ret2
!= -ENOENT
) {
2346 if (ret
!= -ENOENT
|| ret2
!= -ENOENT
)
2349 btrfs_free_path(path
);
2354 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2355 struct extent_buffer
*buf
, u32 nr_extents
)
2357 struct btrfs_key key
;
2358 struct btrfs_file_extent_item
*fi
;
2366 if (!root
->ref_cows
)
2369 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
2371 root_gen
= root
->root_key
.offset
;
2374 root_gen
= trans
->transid
- 1;
2377 level
= btrfs_header_level(buf
);
2378 nritems
= btrfs_header_nritems(buf
);
2381 struct btrfs_leaf_ref
*ref
;
2382 struct btrfs_extent_info
*info
;
2384 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
2390 ref
->root_gen
= root_gen
;
2391 ref
->bytenr
= buf
->start
;
2392 ref
->owner
= btrfs_header_owner(buf
);
2393 ref
->generation
= btrfs_header_generation(buf
);
2394 ref
->nritems
= nr_extents
;
2395 info
= ref
->extents
;
2397 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
2399 btrfs_item_key_to_cpu(buf
, &key
, i
);
2400 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2402 fi
= btrfs_item_ptr(buf
, i
,
2403 struct btrfs_file_extent_item
);
2404 if (btrfs_file_extent_type(buf
, fi
) ==
2405 BTRFS_FILE_EXTENT_INLINE
)
2407 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2408 if (disk_bytenr
== 0)
2411 info
->bytenr
= disk_bytenr
;
2413 btrfs_file_extent_disk_num_bytes(buf
, fi
);
2414 info
->objectid
= key
.objectid
;
2415 info
->offset
= key
.offset
;
2419 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2420 if (ret
== -EEXIST
&& shared
) {
2421 struct btrfs_leaf_ref
*old
;
2422 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
2424 btrfs_remove_leaf_ref(root
, old
);
2425 btrfs_free_leaf_ref(root
, old
);
2426 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2429 btrfs_free_leaf_ref(root
, ref
);
2435 /* when a block goes through cow, we update the reference counts of
2436 * everything that block points to. The internal pointers of the block
2437 * can be in just about any order, and it is likely to have clusters of
2438 * things that are close together and clusters of things that are not.
2440 * To help reduce the seeks that come with updating all of these reference
2441 * counts, sort them by byte number before actual updates are done.
2443 * struct refsort is used to match byte number to slot in the btree block.
2444 * we sort based on the byte number and then use the slot to actually
2447 * struct refsort is smaller than strcut btrfs_item and smaller than
2448 * struct btrfs_key_ptr. Since we're currently limited to the page size
2449 * for a btree block, there's no way for a kmalloc of refsorts for a
2450 * single node to be bigger than a page.
2458 * for passing into sort()
2460 static int refsort_cmp(const void *a_void
, const void *b_void
)
2462 const struct refsort
*a
= a_void
;
2463 const struct refsort
*b
= b_void
;
2465 if (a
->bytenr
< b
->bytenr
)
2467 if (a
->bytenr
> b
->bytenr
)
2473 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
2474 struct btrfs_root
*root
,
2475 struct extent_buffer
*buf
,
2476 int full_backref
, int inc
)
2483 struct btrfs_key key
;
2484 struct btrfs_file_extent_item
*fi
;
2488 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
2489 u64
, u64
, u64
, u64
, u64
, u64
);
2491 ref_root
= btrfs_header_owner(buf
);
2492 nritems
= btrfs_header_nritems(buf
);
2493 level
= btrfs_header_level(buf
);
2495 if (!root
->ref_cows
&& level
== 0)
2499 process_func
= btrfs_inc_extent_ref
;
2501 process_func
= btrfs_free_extent
;
2504 parent
= buf
->start
;
2508 for (i
= 0; i
< nritems
; i
++) {
2510 btrfs_item_key_to_cpu(buf
, &key
, i
);
2511 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2513 fi
= btrfs_item_ptr(buf
, i
,
2514 struct btrfs_file_extent_item
);
2515 if (btrfs_file_extent_type(buf
, fi
) ==
2516 BTRFS_FILE_EXTENT_INLINE
)
2518 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2522 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
2523 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
2524 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2525 parent
, ref_root
, key
.objectid
,
2530 bytenr
= btrfs_node_blockptr(buf
, i
);
2531 num_bytes
= btrfs_level_size(root
, level
- 1);
2532 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2533 parent
, ref_root
, level
- 1, 0);
2544 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2545 struct extent_buffer
*buf
, int full_backref
)
2547 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 1);
2550 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2551 struct extent_buffer
*buf
, int full_backref
)
2553 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 0);
2556 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
2557 struct btrfs_root
*root
,
2558 struct btrfs_path
*path
,
2559 struct btrfs_block_group_cache
*cache
)
2562 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2564 struct extent_buffer
*leaf
;
2566 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
2571 leaf
= path
->nodes
[0];
2572 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
2573 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
2574 btrfs_mark_buffer_dirty(leaf
);
2575 btrfs_release_path(extent_root
, path
);
2583 static struct btrfs_block_group_cache
*
2584 next_block_group(struct btrfs_root
*root
,
2585 struct btrfs_block_group_cache
*cache
)
2587 struct rb_node
*node
;
2588 spin_lock(&root
->fs_info
->block_group_cache_lock
);
2589 node
= rb_next(&cache
->cache_node
);
2590 btrfs_put_block_group(cache
);
2592 cache
= rb_entry(node
, struct btrfs_block_group_cache
,
2594 btrfs_get_block_group(cache
);
2597 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
2601 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
2602 struct btrfs_root
*root
)
2604 struct btrfs_block_group_cache
*cache
;
2606 struct btrfs_path
*path
;
2609 path
= btrfs_alloc_path();
2615 err
= btrfs_run_delayed_refs(trans
, root
,
2620 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
2624 cache
= next_block_group(root
, cache
);
2634 last
= cache
->key
.objectid
+ cache
->key
.offset
;
2636 err
= write_one_cache_group(trans
, root
, path
, cache
);
2638 btrfs_put_block_group(cache
);
2641 btrfs_free_path(path
);
2645 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
2647 struct btrfs_block_group_cache
*block_group
;
2650 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
2651 if (!block_group
|| block_group
->ro
)
2654 btrfs_put_block_group(block_group
);
2658 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
2659 u64 total_bytes
, u64 bytes_used
,
2660 struct btrfs_space_info
**space_info
)
2662 struct btrfs_space_info
*found
;
2664 found
= __find_space_info(info
, flags
);
2666 spin_lock(&found
->lock
);
2667 found
->total_bytes
+= total_bytes
;
2668 found
->bytes_used
+= bytes_used
;
2670 spin_unlock(&found
->lock
);
2671 *space_info
= found
;
2674 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
2678 INIT_LIST_HEAD(&found
->block_groups
);
2679 init_rwsem(&found
->groups_sem
);
2680 spin_lock_init(&found
->lock
);
2681 found
->flags
= flags
;
2682 found
->total_bytes
= total_bytes
;
2683 found
->bytes_used
= bytes_used
;
2684 found
->bytes_pinned
= 0;
2685 found
->bytes_reserved
= 0;
2686 found
->bytes_readonly
= 0;
2687 found
->bytes_delalloc
= 0;
2689 found
->force_alloc
= 0;
2690 *space_info
= found
;
2691 list_add_rcu(&found
->list
, &info
->space_info
);
2692 atomic_set(&found
->caching_threads
, 0);
2696 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
2698 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
2699 BTRFS_BLOCK_GROUP_RAID1
|
2700 BTRFS_BLOCK_GROUP_RAID10
|
2701 BTRFS_BLOCK_GROUP_DUP
);
2703 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
2704 fs_info
->avail_data_alloc_bits
|= extra_flags
;
2705 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
2706 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
2707 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
2708 fs_info
->avail_system_alloc_bits
|= extra_flags
;
2712 static void set_block_group_readonly(struct btrfs_block_group_cache
*cache
)
2714 spin_lock(&cache
->space_info
->lock
);
2715 spin_lock(&cache
->lock
);
2717 cache
->space_info
->bytes_readonly
+= cache
->key
.offset
-
2718 btrfs_block_group_used(&cache
->item
);
2721 spin_unlock(&cache
->lock
);
2722 spin_unlock(&cache
->space_info
->lock
);
2725 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
2727 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
2729 if (num_devices
== 1)
2730 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
2731 if (num_devices
< 4)
2732 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
2734 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
2735 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
2736 BTRFS_BLOCK_GROUP_RAID10
))) {
2737 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
2740 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
2741 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
2742 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
2745 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
2746 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
2747 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
2748 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
2749 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
2753 static u64
btrfs_get_alloc_profile(struct btrfs_root
*root
, u64 data
)
2755 struct btrfs_fs_info
*info
= root
->fs_info
;
2759 alloc_profile
= info
->avail_data_alloc_bits
&
2760 info
->data_alloc_profile
;
2761 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2762 } else if (root
== root
->fs_info
->chunk_root
) {
2763 alloc_profile
= info
->avail_system_alloc_bits
&
2764 info
->system_alloc_profile
;
2765 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2767 alloc_profile
= info
->avail_metadata_alloc_bits
&
2768 info
->metadata_alloc_profile
;
2769 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2772 return btrfs_reduce_alloc_profile(root
, data
);
2775 void btrfs_set_inode_space_info(struct btrfs_root
*root
, struct inode
*inode
)
2779 alloc_target
= btrfs_get_alloc_profile(root
, 1);
2780 BTRFS_I(inode
)->space_info
= __find_space_info(root
->fs_info
,
2784 static u64
calculate_bytes_needed(struct btrfs_root
*root
, int num_items
)
2789 level
= BTRFS_MAX_LEVEL
- 2;
2791 * NOTE: these calculations are absolutely the worst possible case.
2792 * This assumes that _every_ item we insert will require a new leaf, and
2793 * that the tree has grown to its maximum level size.
2797 * for every item we insert we could insert both an extent item and a
2798 * extent ref item. Then for ever item we insert, we will need to cow
2799 * both the original leaf, plus the leaf to the left and right of it.
2801 * Unless we are talking about the extent root, then we just want the
2802 * number of items * 2, since we just need the extent item plus its ref.
2804 if (root
== root
->fs_info
->extent_root
)
2805 num_bytes
= num_items
* 2;
2807 num_bytes
= (num_items
+ (2 * num_items
)) * 3;
2810 * num_bytes is total number of leaves we could need times the leaf
2811 * size, and then for every leaf we could end up cow'ing 2 nodes per
2812 * level, down to the leaf level.
2814 num_bytes
= (num_bytes
* root
->leafsize
) +
2815 (num_bytes
* (level
* 2)) * root
->nodesize
;
2821 * Unreserve metadata space for delalloc. If we have less reserved credits than
2822 * we have extents, this function does nothing.
2824 int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root
*root
,
2825 struct inode
*inode
, int num_items
)
2827 struct btrfs_fs_info
*info
= root
->fs_info
;
2828 struct btrfs_space_info
*meta_sinfo
;
2833 /* get the space info for where the metadata will live */
2834 alloc_target
= btrfs_get_alloc_profile(root
, 0);
2835 meta_sinfo
= __find_space_info(info
, alloc_target
);
2837 num_bytes
= calculate_bytes_needed(root
->fs_info
->extent_root
,
2840 spin_lock(&meta_sinfo
->lock
);
2841 spin_lock(&BTRFS_I(inode
)->accounting_lock
);
2842 if (BTRFS_I(inode
)->reserved_extents
<=
2843 BTRFS_I(inode
)->outstanding_extents
) {
2844 spin_unlock(&BTRFS_I(inode
)->accounting_lock
);
2845 spin_unlock(&meta_sinfo
->lock
);
2848 spin_unlock(&BTRFS_I(inode
)->accounting_lock
);
2850 BTRFS_I(inode
)->reserved_extents
--;
2851 BUG_ON(BTRFS_I(inode
)->reserved_extents
< 0);
2853 if (meta_sinfo
->bytes_delalloc
< num_bytes
) {
2855 meta_sinfo
->bytes_delalloc
= 0;
2857 meta_sinfo
->bytes_delalloc
-= num_bytes
;
2859 spin_unlock(&meta_sinfo
->lock
);
2866 static void check_force_delalloc(struct btrfs_space_info
*meta_sinfo
)
2870 thresh
= meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
2871 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
+
2872 meta_sinfo
->bytes_super
+ meta_sinfo
->bytes_root
+
2873 meta_sinfo
->bytes_may_use
;
2875 thresh
= meta_sinfo
->total_bytes
- thresh
;
2877 do_div(thresh
, 100);
2878 if (thresh
<= meta_sinfo
->bytes_delalloc
)
2879 meta_sinfo
->force_delalloc
= 1;
2881 meta_sinfo
->force_delalloc
= 0;
2884 struct async_flush
{
2885 struct btrfs_root
*root
;
2886 struct btrfs_space_info
*info
;
2887 struct btrfs_work work
;
2890 static noinline
void flush_delalloc_async(struct btrfs_work
*work
)
2892 struct async_flush
*async
;
2893 struct btrfs_root
*root
;
2894 struct btrfs_space_info
*info
;
2896 async
= container_of(work
, struct async_flush
, work
);
2900 btrfs_start_delalloc_inodes(root
, 0);
2901 wake_up(&info
->flush_wait
);
2902 btrfs_wait_ordered_extents(root
, 0, 0);
2904 spin_lock(&info
->lock
);
2906 spin_unlock(&info
->lock
);
2907 wake_up(&info
->flush_wait
);
2912 static void wait_on_flush(struct btrfs_space_info
*info
)
2918 prepare_to_wait(&info
->flush_wait
, &wait
,
2919 TASK_UNINTERRUPTIBLE
);
2920 spin_lock(&info
->lock
);
2921 if (!info
->flushing
) {
2922 spin_unlock(&info
->lock
);
2926 used
= info
->bytes_used
+ info
->bytes_reserved
+
2927 info
->bytes_pinned
+ info
->bytes_readonly
+
2928 info
->bytes_super
+ info
->bytes_root
+
2929 info
->bytes_may_use
+ info
->bytes_delalloc
;
2930 if (used
< info
->total_bytes
) {
2931 spin_unlock(&info
->lock
);
2934 spin_unlock(&info
->lock
);
2937 finish_wait(&info
->flush_wait
, &wait
);
2940 static void flush_delalloc(struct btrfs_root
*root
,
2941 struct btrfs_space_info
*info
)
2943 struct async_flush
*async
;
2946 spin_lock(&info
->lock
);
2948 if (!info
->flushing
) {
2950 init_waitqueue_head(&info
->flush_wait
);
2955 spin_unlock(&info
->lock
);
2958 wait_on_flush(info
);
2962 async
= kzalloc(sizeof(*async
), GFP_NOFS
);
2968 async
->work
.func
= flush_delalloc_async
;
2970 btrfs_queue_worker(&root
->fs_info
->enospc_workers
,
2972 wait_on_flush(info
);
2976 btrfs_start_delalloc_inodes(root
, 0);
2977 btrfs_wait_ordered_extents(root
, 0, 0);
2979 spin_lock(&info
->lock
);
2981 spin_unlock(&info
->lock
);
2982 wake_up(&info
->flush_wait
);
2985 static int maybe_allocate_chunk(struct btrfs_root
*root
,
2986 struct btrfs_space_info
*info
)
2988 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
2989 struct btrfs_trans_handle
*trans
;
2995 free_space
= btrfs_super_total_bytes(disk_super
);
2997 * we allow the metadata to grow to a max of either 10gb or 5% of the
2998 * space in the volume.
3000 min_metadata
= min((u64
)10 * 1024 * 1024 * 1024,
3001 div64_u64(free_space
* 5, 100));
3002 if (info
->total_bytes
>= min_metadata
) {
3003 spin_unlock(&info
->lock
);
3008 spin_unlock(&info
->lock
);
3012 if (!info
->allocating_chunk
) {
3013 info
->force_alloc
= 1;
3014 info
->allocating_chunk
= 1;
3015 init_waitqueue_head(&info
->allocate_wait
);
3020 spin_unlock(&info
->lock
);
3023 wait_event(info
->allocate_wait
,
3024 !info
->allocating_chunk
);
3028 trans
= btrfs_start_transaction(root
, 1);
3034 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3035 4096 + 2 * 1024 * 1024,
3037 btrfs_end_transaction(trans
, root
);
3041 spin_lock(&info
->lock
);
3042 info
->allocating_chunk
= 0;
3043 spin_unlock(&info
->lock
);
3044 wake_up(&info
->allocate_wait
);
3052 * Reserve metadata space for delalloc.
3054 int btrfs_reserve_metadata_for_delalloc(struct btrfs_root
*root
,
3055 struct inode
*inode
, int num_items
)
3057 struct btrfs_fs_info
*info
= root
->fs_info
;
3058 struct btrfs_space_info
*meta_sinfo
;
3065 /* get the space info for where the metadata will live */
3066 alloc_target
= btrfs_get_alloc_profile(root
, 0);
3067 meta_sinfo
= __find_space_info(info
, alloc_target
);
3069 num_bytes
= calculate_bytes_needed(root
->fs_info
->extent_root
,
3072 spin_lock(&meta_sinfo
->lock
);
3074 force_delalloc
= meta_sinfo
->force_delalloc
;
3076 if (unlikely(!meta_sinfo
->bytes_root
))
3077 meta_sinfo
->bytes_root
= calculate_bytes_needed(root
, 6);
3080 meta_sinfo
->bytes_delalloc
+= num_bytes
;
3082 used
= meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
3083 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
+
3084 meta_sinfo
->bytes_super
+ meta_sinfo
->bytes_root
+
3085 meta_sinfo
->bytes_may_use
+ meta_sinfo
->bytes_delalloc
;
3087 if (used
> meta_sinfo
->total_bytes
) {
3091 if (maybe_allocate_chunk(root
, meta_sinfo
))
3095 spin_unlock(&meta_sinfo
->lock
);
3099 filemap_flush(inode
->i_mapping
);
3101 } else if (flushed
== 3) {
3102 flush_delalloc(root
, meta_sinfo
);
3105 spin_lock(&meta_sinfo
->lock
);
3106 meta_sinfo
->bytes_delalloc
-= num_bytes
;
3107 spin_unlock(&meta_sinfo
->lock
);
3108 printk(KERN_ERR
"enospc, has %d, reserved %d\n",
3109 BTRFS_I(inode
)->outstanding_extents
,
3110 BTRFS_I(inode
)->reserved_extents
);
3111 dump_space_info(meta_sinfo
, 0, 0);
3115 BTRFS_I(inode
)->reserved_extents
++;
3116 check_force_delalloc(meta_sinfo
);
3117 spin_unlock(&meta_sinfo
->lock
);
3119 if (!flushed
&& force_delalloc
)
3120 filemap_flush(inode
->i_mapping
);
3126 * unreserve num_items number of items worth of metadata space. This needs to
3127 * be paired with btrfs_reserve_metadata_space.
3129 * NOTE: if you have the option, run this _AFTER_ you do a
3130 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3131 * oprations which will result in more used metadata, so we want to make sure we
3132 * can do that without issue.
3134 int btrfs_unreserve_metadata_space(struct btrfs_root
*root
, int num_items
)
3136 struct btrfs_fs_info
*info
= root
->fs_info
;
3137 struct btrfs_space_info
*meta_sinfo
;
3142 /* get the space info for where the metadata will live */
3143 alloc_target
= btrfs_get_alloc_profile(root
, 0);
3144 meta_sinfo
= __find_space_info(info
, alloc_target
);
3146 num_bytes
= calculate_bytes_needed(root
, num_items
);
3148 spin_lock(&meta_sinfo
->lock
);
3149 if (meta_sinfo
->bytes_may_use
< num_bytes
) {
3151 meta_sinfo
->bytes_may_use
= 0;
3153 meta_sinfo
->bytes_may_use
-= num_bytes
;
3155 spin_unlock(&meta_sinfo
->lock
);
3163 * Reserve some metadata space for use. We'll calculate the worste case number
3164 * of bytes that would be needed to modify num_items number of items. If we
3165 * have space, fantastic, if not, you get -ENOSPC. Please call
3166 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3167 * items you reserved, since whatever metadata you needed should have already
3170 * This will commit the transaction to make more space if we don't have enough
3171 * metadata space. THe only time we don't do this is if we're reserving space
3172 * inside of a transaction, then we will just return -ENOSPC and it is the
3173 * callers responsibility to handle it properly.
3175 int btrfs_reserve_metadata_space(struct btrfs_root
*root
, int num_items
)
3177 struct btrfs_fs_info
*info
= root
->fs_info
;
3178 struct btrfs_space_info
*meta_sinfo
;
3184 /* get the space info for where the metadata will live */
3185 alloc_target
= btrfs_get_alloc_profile(root
, 0);
3186 meta_sinfo
= __find_space_info(info
, alloc_target
);
3188 num_bytes
= calculate_bytes_needed(root
, num_items
);
3190 spin_lock(&meta_sinfo
->lock
);
3192 if (unlikely(!meta_sinfo
->bytes_root
))
3193 meta_sinfo
->bytes_root
= calculate_bytes_needed(root
, 6);
3196 meta_sinfo
->bytes_may_use
+= num_bytes
;
3198 used
= meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
3199 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
+
3200 meta_sinfo
->bytes_super
+ meta_sinfo
->bytes_root
+
3201 meta_sinfo
->bytes_may_use
+ meta_sinfo
->bytes_delalloc
;
3203 if (used
> meta_sinfo
->total_bytes
) {
3206 if (maybe_allocate_chunk(root
, meta_sinfo
))
3210 spin_unlock(&meta_sinfo
->lock
);
3214 flush_delalloc(root
, meta_sinfo
);
3217 spin_lock(&meta_sinfo
->lock
);
3218 meta_sinfo
->bytes_may_use
-= num_bytes
;
3219 spin_unlock(&meta_sinfo
->lock
);
3221 dump_space_info(meta_sinfo
, 0, 0);
3225 check_force_delalloc(meta_sinfo
);
3226 spin_unlock(&meta_sinfo
->lock
);
3232 * This will check the space that the inode allocates from to make sure we have
3233 * enough space for bytes.
3235 int btrfs_check_data_free_space(struct btrfs_root
*root
, struct inode
*inode
,
3238 struct btrfs_space_info
*data_sinfo
;
3239 int ret
= 0, committed
= 0;
3241 /* make sure bytes are sectorsize aligned */
3242 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
3244 data_sinfo
= BTRFS_I(inode
)->space_info
;
3249 /* make sure we have enough space to handle the data first */
3250 spin_lock(&data_sinfo
->lock
);
3251 if (data_sinfo
->total_bytes
- data_sinfo
->bytes_used
-
3252 data_sinfo
->bytes_delalloc
- data_sinfo
->bytes_reserved
-
3253 data_sinfo
->bytes_pinned
- data_sinfo
->bytes_readonly
-
3254 data_sinfo
->bytes_may_use
- data_sinfo
->bytes_super
< bytes
) {
3255 struct btrfs_trans_handle
*trans
;
3258 * if we don't have enough free bytes in this space then we need
3259 * to alloc a new chunk.
3261 if (!data_sinfo
->full
) {
3264 data_sinfo
->force_alloc
= 1;
3265 spin_unlock(&data_sinfo
->lock
);
3267 alloc_target
= btrfs_get_alloc_profile(root
, 1);
3268 trans
= btrfs_start_transaction(root
, 1);
3272 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3273 bytes
+ 2 * 1024 * 1024,
3275 btrfs_end_transaction(trans
, root
);
3280 btrfs_set_inode_space_info(root
, inode
);
3281 data_sinfo
= BTRFS_I(inode
)->space_info
;
3285 spin_unlock(&data_sinfo
->lock
);
3287 /* commit the current transaction and try again */
3288 if (!committed
&& !root
->fs_info
->open_ioctl_trans
) {
3290 trans
= btrfs_join_transaction(root
, 1);
3293 ret
= btrfs_commit_transaction(trans
, root
);
3299 printk(KERN_ERR
"no space left, need %llu, %llu delalloc bytes"
3300 ", %llu bytes_used, %llu bytes_reserved, "
3301 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
3302 "%llu total\n", (unsigned long long)bytes
,
3303 (unsigned long long)data_sinfo
->bytes_delalloc
,
3304 (unsigned long long)data_sinfo
->bytes_used
,
3305 (unsigned long long)data_sinfo
->bytes_reserved
,
3306 (unsigned long long)data_sinfo
->bytes_pinned
,
3307 (unsigned long long)data_sinfo
->bytes_readonly
,
3308 (unsigned long long)data_sinfo
->bytes_may_use
,
3309 (unsigned long long)data_sinfo
->total_bytes
);
3312 data_sinfo
->bytes_may_use
+= bytes
;
3313 BTRFS_I(inode
)->reserved_bytes
+= bytes
;
3314 spin_unlock(&data_sinfo
->lock
);
3320 * if there was an error for whatever reason after calling
3321 * btrfs_check_data_free_space, call this so we can cleanup the counters.
3323 void btrfs_free_reserved_data_space(struct btrfs_root
*root
,
3324 struct inode
*inode
, u64 bytes
)
3326 struct btrfs_space_info
*data_sinfo
;
3328 /* make sure bytes are sectorsize aligned */
3329 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
3331 data_sinfo
= BTRFS_I(inode
)->space_info
;
3332 spin_lock(&data_sinfo
->lock
);
3333 data_sinfo
->bytes_may_use
-= bytes
;
3334 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
3335 spin_unlock(&data_sinfo
->lock
);
3338 /* called when we are adding a delalloc extent to the inode's io_tree */
3339 void btrfs_delalloc_reserve_space(struct btrfs_root
*root
, struct inode
*inode
,
3342 struct btrfs_space_info
*data_sinfo
;
3344 /* get the space info for where this inode will be storing its data */
3345 data_sinfo
= BTRFS_I(inode
)->space_info
;
3347 /* make sure we have enough space to handle the data first */
3348 spin_lock(&data_sinfo
->lock
);
3349 data_sinfo
->bytes_delalloc
+= bytes
;
3352 * we are adding a delalloc extent without calling
3353 * btrfs_check_data_free_space first. This happens on a weird
3354 * writepage condition, but shouldn't hurt our accounting
3356 if (unlikely(bytes
> BTRFS_I(inode
)->reserved_bytes
)) {
3357 data_sinfo
->bytes_may_use
-= BTRFS_I(inode
)->reserved_bytes
;
3358 BTRFS_I(inode
)->reserved_bytes
= 0;
3360 data_sinfo
->bytes_may_use
-= bytes
;
3361 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
3364 spin_unlock(&data_sinfo
->lock
);
3367 /* called when we are clearing an delalloc extent from the inode's io_tree */
3368 void btrfs_delalloc_free_space(struct btrfs_root
*root
, struct inode
*inode
,
3371 struct btrfs_space_info
*info
;
3373 info
= BTRFS_I(inode
)->space_info
;
3375 spin_lock(&info
->lock
);
3376 info
->bytes_delalloc
-= bytes
;
3377 spin_unlock(&info
->lock
);
3380 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
3382 struct list_head
*head
= &info
->space_info
;
3383 struct btrfs_space_info
*found
;
3386 list_for_each_entry_rcu(found
, head
, list
) {
3387 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
3388 found
->force_alloc
= 1;
3393 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
3394 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
3395 u64 flags
, int force
)
3397 struct btrfs_space_info
*space_info
;
3398 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
3402 mutex_lock(&fs_info
->chunk_mutex
);
3404 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
3406 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
3408 ret
= update_space_info(extent_root
->fs_info
, flags
,
3412 BUG_ON(!space_info
);
3414 spin_lock(&space_info
->lock
);
3415 if (space_info
->force_alloc
)
3417 if (space_info
->full
) {
3418 spin_unlock(&space_info
->lock
);
3422 thresh
= space_info
->total_bytes
- space_info
->bytes_readonly
;
3423 thresh
= div_factor(thresh
, 8);
3425 (space_info
->bytes_used
+ space_info
->bytes_pinned
+
3426 space_info
->bytes_reserved
+ alloc_bytes
) < thresh
) {
3427 spin_unlock(&space_info
->lock
);
3430 spin_unlock(&space_info
->lock
);
3433 * if we're doing a data chunk, go ahead and make sure that
3434 * we keep a reasonable number of metadata chunks allocated in the
3437 if (flags
& BTRFS_BLOCK_GROUP_DATA
&& fs_info
->metadata_ratio
) {
3438 fs_info
->data_chunk_allocations
++;
3439 if (!(fs_info
->data_chunk_allocations
%
3440 fs_info
->metadata_ratio
))
3441 force_metadata_allocation(fs_info
);
3444 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
3445 spin_lock(&space_info
->lock
);
3447 space_info
->full
= 1;
3448 space_info
->force_alloc
= 0;
3449 spin_unlock(&space_info
->lock
);
3451 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
3455 static int update_block_group(struct btrfs_trans_handle
*trans
,
3456 struct btrfs_root
*root
,
3457 u64 bytenr
, u64 num_bytes
, int alloc
,
3460 struct btrfs_block_group_cache
*cache
;
3461 struct btrfs_fs_info
*info
= root
->fs_info
;
3462 u64 total
= num_bytes
;
3466 /* block accounting for super block */
3467 spin_lock(&info
->delalloc_lock
);
3468 old_val
= btrfs_super_bytes_used(&info
->super_copy
);
3470 old_val
+= num_bytes
;
3472 old_val
-= num_bytes
;
3473 btrfs_set_super_bytes_used(&info
->super_copy
, old_val
);
3474 spin_unlock(&info
->delalloc_lock
);
3477 cache
= btrfs_lookup_block_group(info
, bytenr
);
3480 byte_in_group
= bytenr
- cache
->key
.objectid
;
3481 WARN_ON(byte_in_group
> cache
->key
.offset
);
3483 spin_lock(&cache
->space_info
->lock
);
3484 spin_lock(&cache
->lock
);
3486 old_val
= btrfs_block_group_used(&cache
->item
);
3487 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
3489 old_val
+= num_bytes
;
3490 btrfs_set_block_group_used(&cache
->item
, old_val
);
3491 cache
->reserved
-= num_bytes
;
3492 cache
->space_info
->bytes_used
+= num_bytes
;
3493 cache
->space_info
->bytes_reserved
-= num_bytes
;
3495 cache
->space_info
->bytes_readonly
-= num_bytes
;
3496 spin_unlock(&cache
->lock
);
3497 spin_unlock(&cache
->space_info
->lock
);
3499 old_val
-= num_bytes
;
3500 cache
->space_info
->bytes_used
-= num_bytes
;
3502 cache
->space_info
->bytes_readonly
+= num_bytes
;
3503 btrfs_set_block_group_used(&cache
->item
, old_val
);
3504 spin_unlock(&cache
->lock
);
3505 spin_unlock(&cache
->space_info
->lock
);
3509 ret
= btrfs_discard_extent(root
, bytenr
,
3513 ret
= btrfs_add_free_space(cache
, bytenr
,
3518 btrfs_put_block_group(cache
);
3520 bytenr
+= num_bytes
;
3525 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
3527 struct btrfs_block_group_cache
*cache
;
3530 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
3534 bytenr
= cache
->key
.objectid
;
3535 btrfs_put_block_group(cache
);
3541 * this function must be called within transaction
3543 int btrfs_pin_extent(struct btrfs_root
*root
,
3544 u64 bytenr
, u64 num_bytes
, int reserved
)
3546 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3547 struct btrfs_block_group_cache
*cache
;
3549 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
3552 spin_lock(&cache
->space_info
->lock
);
3553 spin_lock(&cache
->lock
);
3554 cache
->pinned
+= num_bytes
;
3555 cache
->space_info
->bytes_pinned
+= num_bytes
;
3557 cache
->reserved
-= num_bytes
;
3558 cache
->space_info
->bytes_reserved
-= num_bytes
;
3560 spin_unlock(&cache
->lock
);
3561 spin_unlock(&cache
->space_info
->lock
);
3563 btrfs_put_block_group(cache
);
3565 set_extent_dirty(fs_info
->pinned_extents
,
3566 bytenr
, bytenr
+ num_bytes
- 1, GFP_NOFS
);
3570 static int update_reserved_extents(struct btrfs_block_group_cache
*cache
,
3571 u64 num_bytes
, int reserve
)
3573 spin_lock(&cache
->space_info
->lock
);
3574 spin_lock(&cache
->lock
);
3576 cache
->reserved
+= num_bytes
;
3577 cache
->space_info
->bytes_reserved
+= num_bytes
;
3579 cache
->reserved
-= num_bytes
;
3580 cache
->space_info
->bytes_reserved
-= num_bytes
;
3582 spin_unlock(&cache
->lock
);
3583 spin_unlock(&cache
->space_info
->lock
);
3587 int btrfs_prepare_extent_commit(struct btrfs_trans_handle
*trans
,
3588 struct btrfs_root
*root
)
3590 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3591 struct btrfs_caching_control
*next
;
3592 struct btrfs_caching_control
*caching_ctl
;
3593 struct btrfs_block_group_cache
*cache
;
3595 down_write(&fs_info
->extent_commit_sem
);
3597 list_for_each_entry_safe(caching_ctl
, next
,
3598 &fs_info
->caching_block_groups
, list
) {
3599 cache
= caching_ctl
->block_group
;
3600 if (block_group_cache_done(cache
)) {
3601 cache
->last_byte_to_unpin
= (u64
)-1;
3602 list_del_init(&caching_ctl
->list
);
3603 put_caching_control(caching_ctl
);
3605 cache
->last_byte_to_unpin
= caching_ctl
->progress
;
3609 if (fs_info
->pinned_extents
== &fs_info
->freed_extents
[0])
3610 fs_info
->pinned_extents
= &fs_info
->freed_extents
[1];
3612 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
3614 up_write(&fs_info
->extent_commit_sem
);
3618 static int unpin_extent_range(struct btrfs_root
*root
, u64 start
, u64 end
)
3620 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3621 struct btrfs_block_group_cache
*cache
= NULL
;
3624 while (start
<= end
) {
3626 start
>= cache
->key
.objectid
+ cache
->key
.offset
) {
3628 btrfs_put_block_group(cache
);
3629 cache
= btrfs_lookup_block_group(fs_info
, start
);
3633 len
= cache
->key
.objectid
+ cache
->key
.offset
- start
;
3634 len
= min(len
, end
+ 1 - start
);
3636 if (start
< cache
->last_byte_to_unpin
) {
3637 len
= min(len
, cache
->last_byte_to_unpin
- start
);
3638 btrfs_add_free_space(cache
, start
, len
);
3641 spin_lock(&cache
->space_info
->lock
);
3642 spin_lock(&cache
->lock
);
3643 cache
->pinned
-= len
;
3644 cache
->space_info
->bytes_pinned
-= len
;
3645 spin_unlock(&cache
->lock
);
3646 spin_unlock(&cache
->space_info
->lock
);
3652 btrfs_put_block_group(cache
);
3656 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
3657 struct btrfs_root
*root
)
3659 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3660 struct extent_io_tree
*unpin
;
3665 if (fs_info
->pinned_extents
== &fs_info
->freed_extents
[0])
3666 unpin
= &fs_info
->freed_extents
[1];
3668 unpin
= &fs_info
->freed_extents
[0];
3671 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3676 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
3678 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3679 unpin_extent_range(root
, start
, end
);
3686 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
3687 struct btrfs_root
*root
,
3688 struct btrfs_path
*path
,
3689 u64 bytenr
, u64 num_bytes
,
3690 int is_data
, int reserved
,
3691 struct extent_buffer
**must_clean
)
3694 struct extent_buffer
*buf
;
3700 * discard is sloooow, and so triggering discards on
3701 * individual btree blocks isn't a good plan. Just
3702 * pin everything in discard mode.
3704 if (btrfs_test_opt(root
, DISCARD
))
3707 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
3711 /* we can reuse a block if it hasn't been written
3712 * and it is from this transaction. We can't
3713 * reuse anything from the tree log root because
3714 * it has tiny sub-transactions.
3716 if (btrfs_buffer_uptodate(buf
, 0) &&
3717 btrfs_try_tree_lock(buf
)) {
3718 u64 header_owner
= btrfs_header_owner(buf
);
3719 u64 header_transid
= btrfs_header_generation(buf
);
3720 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
3721 header_transid
== trans
->transid
&&
3722 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
3726 btrfs_tree_unlock(buf
);
3728 free_extent_buffer(buf
);
3731 btrfs_set_path_blocking(path
);
3732 /* unlocks the pinned mutex */
3733 btrfs_pin_extent(root
, bytenr
, num_bytes
, reserved
);
3739 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
3740 struct btrfs_root
*root
,
3741 u64 bytenr
, u64 num_bytes
, u64 parent
,
3742 u64 root_objectid
, u64 owner_objectid
,
3743 u64 owner_offset
, int refs_to_drop
,
3744 struct btrfs_delayed_extent_op
*extent_op
)
3746 struct btrfs_key key
;
3747 struct btrfs_path
*path
;
3748 struct btrfs_fs_info
*info
= root
->fs_info
;
3749 struct btrfs_root
*extent_root
= info
->extent_root
;
3750 struct extent_buffer
*leaf
;
3751 struct btrfs_extent_item
*ei
;
3752 struct btrfs_extent_inline_ref
*iref
;
3755 int extent_slot
= 0;
3756 int found_extent
= 0;
3761 path
= btrfs_alloc_path();
3766 path
->leave_spinning
= 1;
3768 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
3769 BUG_ON(!is_data
&& refs_to_drop
!= 1);
3771 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
3772 bytenr
, num_bytes
, parent
,
3773 root_objectid
, owner_objectid
,
3776 extent_slot
= path
->slots
[0];
3777 while (extent_slot
>= 0) {
3778 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
3780 if (key
.objectid
!= bytenr
)
3782 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3783 key
.offset
== num_bytes
) {
3787 if (path
->slots
[0] - extent_slot
> 5)
3791 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3792 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
3793 if (found_extent
&& item_size
< sizeof(*ei
))
3796 if (!found_extent
) {
3798 ret
= remove_extent_backref(trans
, extent_root
, path
,
3802 btrfs_release_path(extent_root
, path
);
3803 path
->leave_spinning
= 1;
3805 key
.objectid
= bytenr
;
3806 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3807 key
.offset
= num_bytes
;
3809 ret
= btrfs_search_slot(trans
, extent_root
,
3812 printk(KERN_ERR
"umm, got %d back from search"
3813 ", was looking for %llu\n", ret
,
3814 (unsigned long long)bytenr
);
3815 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3818 extent_slot
= path
->slots
[0];
3821 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3823 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
3824 "parent %llu root %llu owner %llu offset %llu\n",
3825 (unsigned long long)bytenr
,
3826 (unsigned long long)parent
,
3827 (unsigned long long)root_objectid
,
3828 (unsigned long long)owner_objectid
,
3829 (unsigned long long)owner_offset
);
3832 leaf
= path
->nodes
[0];
3833 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
3834 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3835 if (item_size
< sizeof(*ei
)) {
3836 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
3837 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
3841 btrfs_release_path(extent_root
, path
);
3842 path
->leave_spinning
= 1;
3844 key
.objectid
= bytenr
;
3845 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3846 key
.offset
= num_bytes
;
3848 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
3851 printk(KERN_ERR
"umm, got %d back from search"
3852 ", was looking for %llu\n", ret
,
3853 (unsigned long long)bytenr
);
3854 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3857 extent_slot
= path
->slots
[0];
3858 leaf
= path
->nodes
[0];
3859 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
3862 BUG_ON(item_size
< sizeof(*ei
));
3863 ei
= btrfs_item_ptr(leaf
, extent_slot
,
3864 struct btrfs_extent_item
);
3865 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
3866 struct btrfs_tree_block_info
*bi
;
3867 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
3868 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
3869 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
3872 refs
= btrfs_extent_refs(leaf
, ei
);
3873 BUG_ON(refs
< refs_to_drop
);
3874 refs
-= refs_to_drop
;
3878 __run_delayed_extent_op(extent_op
, leaf
, ei
);
3880 * In the case of inline back ref, reference count will
3881 * be updated by remove_extent_backref
3884 BUG_ON(!found_extent
);
3886 btrfs_set_extent_refs(leaf
, ei
, refs
);
3887 btrfs_mark_buffer_dirty(leaf
);
3890 ret
= remove_extent_backref(trans
, extent_root
, path
,
3897 struct extent_buffer
*must_clean
= NULL
;
3900 BUG_ON(is_data
&& refs_to_drop
!=
3901 extent_data_ref_count(root
, path
, iref
));
3903 BUG_ON(path
->slots
[0] != extent_slot
);
3905 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
3906 path
->slots
[0] = extent_slot
;
3911 ret
= pin_down_bytes(trans
, root
, path
, bytenr
,
3912 num_bytes
, is_data
, 0, &must_clean
);
3917 * it is going to be very rare for someone to be waiting
3918 * on the block we're freeing. del_items might need to
3919 * schedule, so rather than get fancy, just force it
3923 btrfs_set_lock_blocking(must_clean
);
3925 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
3928 btrfs_release_path(extent_root
, path
);
3931 clean_tree_block(NULL
, root
, must_clean
);
3932 btrfs_tree_unlock(must_clean
);
3933 free_extent_buffer(must_clean
);
3937 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
3940 invalidate_mapping_pages(info
->btree_inode
->i_mapping
,
3941 bytenr
>> PAGE_CACHE_SHIFT
,
3942 (bytenr
+ num_bytes
- 1) >> PAGE_CACHE_SHIFT
);
3945 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
3949 btrfs_free_path(path
);
3954 * when we free an extent, it is possible (and likely) that we free the last
3955 * delayed ref for that extent as well. This searches the delayed ref tree for
3956 * a given extent, and if there are no other delayed refs to be processed, it
3957 * removes it from the tree.
3959 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
3960 struct btrfs_root
*root
, u64 bytenr
)
3962 struct btrfs_delayed_ref_head
*head
;
3963 struct btrfs_delayed_ref_root
*delayed_refs
;
3964 struct btrfs_delayed_ref_node
*ref
;
3965 struct rb_node
*node
;
3968 delayed_refs
= &trans
->transaction
->delayed_refs
;
3969 spin_lock(&delayed_refs
->lock
);
3970 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
3974 node
= rb_prev(&head
->node
.rb_node
);
3978 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
3980 /* there are still entries for this ref, we can't drop it */
3981 if (ref
->bytenr
== bytenr
)
3984 if (head
->extent_op
) {
3985 if (!head
->must_insert_reserved
)
3987 kfree(head
->extent_op
);
3988 head
->extent_op
= NULL
;
3992 * waiting for the lock here would deadlock. If someone else has it
3993 * locked they are already in the process of dropping it anyway
3995 if (!mutex_trylock(&head
->mutex
))
3999 * at this point we have a head with no other entries. Go
4000 * ahead and process it.
4002 head
->node
.in_tree
= 0;
4003 rb_erase(&head
->node
.rb_node
, &delayed_refs
->root
);
4005 delayed_refs
->num_entries
--;
4008 * we don't take a ref on the node because we're removing it from the
4009 * tree, so we just steal the ref the tree was holding.
4011 delayed_refs
->num_heads
--;
4012 if (list_empty(&head
->cluster
))
4013 delayed_refs
->num_heads_ready
--;
4015 list_del_init(&head
->cluster
);
4016 spin_unlock(&delayed_refs
->lock
);
4018 ret
= run_one_delayed_ref(trans
, root
->fs_info
->tree_root
,
4019 &head
->node
, head
->extent_op
,
4020 head
->must_insert_reserved
);
4022 btrfs_put_delayed_ref(&head
->node
);
4025 spin_unlock(&delayed_refs
->lock
);
4029 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
4030 struct btrfs_root
*root
,
4031 u64 bytenr
, u64 num_bytes
, u64 parent
,
4032 u64 root_objectid
, u64 owner
, u64 offset
)
4037 * tree log blocks never actually go into the extent allocation
4038 * tree, just update pinning info and exit early.
4040 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
) {
4041 WARN_ON(owner
>= BTRFS_FIRST_FREE_OBJECTID
);
4042 /* unlocks the pinned mutex */
4043 btrfs_pin_extent(root
, bytenr
, num_bytes
, 1);
4045 } else if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
4046 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
4047 parent
, root_objectid
, (int)owner
,
4048 BTRFS_DROP_DELAYED_REF
, NULL
);
4050 ret
= check_ref_cleanup(trans
, root
, bytenr
);
4053 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
4054 parent
, root_objectid
, owner
,
4055 offset
, BTRFS_DROP_DELAYED_REF
, NULL
);
4061 int btrfs_free_tree_block(struct btrfs_trans_handle
*trans
,
4062 struct btrfs_root
*root
,
4063 u64 bytenr
, u32 blocksize
,
4064 u64 parent
, u64 root_objectid
, int level
)
4067 spin_lock(&root
->node_lock
);
4068 used
= btrfs_root_used(&root
->root_item
) - blocksize
;
4069 btrfs_set_root_used(&root
->root_item
, used
);
4070 spin_unlock(&root
->node_lock
);
4072 return btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
4073 parent
, root_objectid
, level
, 0);
4076 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
4078 u64 mask
= ((u64
)root
->stripesize
- 1);
4079 u64 ret
= (val
+ mask
) & ~mask
;
4084 * when we wait for progress in the block group caching, its because
4085 * our allocation attempt failed at least once. So, we must sleep
4086 * and let some progress happen before we try again.
4088 * This function will sleep at least once waiting for new free space to
4089 * show up, and then it will check the block group free space numbers
4090 * for our min num_bytes. Another option is to have it go ahead
4091 * and look in the rbtree for a free extent of a given size, but this
4095 wait_block_group_cache_progress(struct btrfs_block_group_cache
*cache
,
4098 struct btrfs_caching_control
*caching_ctl
;
4101 caching_ctl
= get_caching_control(cache
);
4105 wait_event(caching_ctl
->wait
, block_group_cache_done(cache
) ||
4106 (cache
->free_space
>= num_bytes
));
4108 put_caching_control(caching_ctl
);
4113 wait_block_group_cache_done(struct btrfs_block_group_cache
*cache
)
4115 struct btrfs_caching_control
*caching_ctl
;
4118 caching_ctl
= get_caching_control(cache
);
4122 wait_event(caching_ctl
->wait
, block_group_cache_done(cache
));
4124 put_caching_control(caching_ctl
);
4128 enum btrfs_loop_type
{
4129 LOOP_FIND_IDEAL
= 0,
4130 LOOP_CACHING_NOWAIT
= 1,
4131 LOOP_CACHING_WAIT
= 2,
4132 LOOP_ALLOC_CHUNK
= 3,
4133 LOOP_NO_EMPTY_SIZE
= 4,
4137 * walks the btree of allocated extents and find a hole of a given size.
4138 * The key ins is changed to record the hole:
4139 * ins->objectid == block start
4140 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4141 * ins->offset == number of blocks
4142 * Any available blocks before search_start are skipped.
4144 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
4145 struct btrfs_root
*orig_root
,
4146 u64 num_bytes
, u64 empty_size
,
4147 u64 search_start
, u64 search_end
,
4148 u64 hint_byte
, struct btrfs_key
*ins
,
4149 u64 exclude_start
, u64 exclude_nr
,
4153 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
4154 struct btrfs_free_cluster
*last_ptr
= NULL
;
4155 struct btrfs_block_group_cache
*block_group
= NULL
;
4156 int empty_cluster
= 2 * 1024 * 1024;
4157 int allowed_chunk_alloc
= 0;
4158 int done_chunk_alloc
= 0;
4159 struct btrfs_space_info
*space_info
;
4160 int last_ptr_loop
= 0;
4162 bool found_uncached_bg
= false;
4163 bool failed_cluster_refill
= false;
4164 bool failed_alloc
= false;
4165 u64 ideal_cache_percent
= 0;
4166 u64 ideal_cache_offset
= 0;
4168 WARN_ON(num_bytes
< root
->sectorsize
);
4169 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
4173 space_info
= __find_space_info(root
->fs_info
, data
);
4175 if (orig_root
->ref_cows
|| empty_size
)
4176 allowed_chunk_alloc
= 1;
4178 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
4179 last_ptr
= &root
->fs_info
->meta_alloc_cluster
;
4180 if (!btrfs_test_opt(root
, SSD
))
4181 empty_cluster
= 64 * 1024;
4184 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
4185 last_ptr
= &root
->fs_info
->data_alloc_cluster
;
4189 spin_lock(&last_ptr
->lock
);
4190 if (last_ptr
->block_group
)
4191 hint_byte
= last_ptr
->window_start
;
4192 spin_unlock(&last_ptr
->lock
);
4195 search_start
= max(search_start
, first_logical_byte(root
, 0));
4196 search_start
= max(search_start
, hint_byte
);
4201 if (search_start
== hint_byte
) {
4203 block_group
= btrfs_lookup_block_group(root
->fs_info
,
4206 * we don't want to use the block group if it doesn't match our
4207 * allocation bits, or if its not cached.
4209 * However if we are re-searching with an ideal block group
4210 * picked out then we don't care that the block group is cached.
4212 if (block_group
&& block_group_bits(block_group
, data
) &&
4213 (block_group
->cached
!= BTRFS_CACHE_NO
||
4214 search_start
== ideal_cache_offset
)) {
4215 down_read(&space_info
->groups_sem
);
4216 if (list_empty(&block_group
->list
) ||
4219 * someone is removing this block group,
4220 * we can't jump into the have_block_group
4221 * target because our list pointers are not
4224 btrfs_put_block_group(block_group
);
4225 up_read(&space_info
->groups_sem
);
4227 goto have_block_group
;
4229 } else if (block_group
) {
4230 btrfs_put_block_group(block_group
);
4234 down_read(&space_info
->groups_sem
);
4235 list_for_each_entry(block_group
, &space_info
->block_groups
, list
) {
4239 btrfs_get_block_group(block_group
);
4240 search_start
= block_group
->key
.objectid
;
4243 if (unlikely(block_group
->cached
== BTRFS_CACHE_NO
)) {
4246 free_percent
= btrfs_block_group_used(&block_group
->item
);
4247 free_percent
*= 100;
4248 free_percent
= div64_u64(free_percent
,
4249 block_group
->key
.offset
);
4250 free_percent
= 100 - free_percent
;
4251 if (free_percent
> ideal_cache_percent
&&
4252 likely(!block_group
->ro
)) {
4253 ideal_cache_offset
= block_group
->key
.objectid
;
4254 ideal_cache_percent
= free_percent
;
4258 * We only want to start kthread caching if we are at
4259 * the point where we will wait for caching to make
4260 * progress, or if our ideal search is over and we've
4261 * found somebody to start caching.
4263 if (loop
> LOOP_CACHING_NOWAIT
||
4264 (loop
> LOOP_FIND_IDEAL
&&
4265 atomic_read(&space_info
->caching_threads
) < 2)) {
4266 ret
= cache_block_group(block_group
);
4269 found_uncached_bg
= true;
4272 * If loop is set for cached only, try the next block
4275 if (loop
== LOOP_FIND_IDEAL
)
4279 cached
= block_group_cache_done(block_group
);
4280 if (unlikely(!cached
))
4281 found_uncached_bg
= true;
4283 if (unlikely(block_group
->ro
))
4287 * Ok we want to try and use the cluster allocator, so lets look
4288 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4289 * have tried the cluster allocator plenty of times at this
4290 * point and not have found anything, so we are likely way too
4291 * fragmented for the clustering stuff to find anything, so lets
4292 * just skip it and let the allocator find whatever block it can
4295 if (last_ptr
&& loop
< LOOP_NO_EMPTY_SIZE
) {
4297 * the refill lock keeps out other
4298 * people trying to start a new cluster
4300 spin_lock(&last_ptr
->refill_lock
);
4301 if (last_ptr
->block_group
&&
4302 (last_ptr
->block_group
->ro
||
4303 !block_group_bits(last_ptr
->block_group
, data
))) {
4305 goto refill_cluster
;
4308 offset
= btrfs_alloc_from_cluster(block_group
, last_ptr
,
4309 num_bytes
, search_start
);
4311 /* we have a block, we're done */
4312 spin_unlock(&last_ptr
->refill_lock
);
4316 spin_lock(&last_ptr
->lock
);
4318 * whoops, this cluster doesn't actually point to
4319 * this block group. Get a ref on the block
4320 * group is does point to and try again
4322 if (!last_ptr_loop
&& last_ptr
->block_group
&&
4323 last_ptr
->block_group
!= block_group
) {
4325 btrfs_put_block_group(block_group
);
4326 block_group
= last_ptr
->block_group
;
4327 btrfs_get_block_group(block_group
);
4328 spin_unlock(&last_ptr
->lock
);
4329 spin_unlock(&last_ptr
->refill_lock
);
4332 search_start
= block_group
->key
.objectid
;
4334 * we know this block group is properly
4335 * in the list because
4336 * btrfs_remove_block_group, drops the
4337 * cluster before it removes the block
4338 * group from the list
4340 goto have_block_group
;
4342 spin_unlock(&last_ptr
->lock
);
4345 * this cluster didn't work out, free it and
4348 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
4352 /* allocate a cluster in this block group */
4353 ret
= btrfs_find_space_cluster(trans
, root
,
4354 block_group
, last_ptr
,
4356 empty_cluster
+ empty_size
);
4359 * now pull our allocation out of this
4362 offset
= btrfs_alloc_from_cluster(block_group
,
4363 last_ptr
, num_bytes
,
4366 /* we found one, proceed */
4367 spin_unlock(&last_ptr
->refill_lock
);
4370 } else if (!cached
&& loop
> LOOP_CACHING_NOWAIT
4371 && !failed_cluster_refill
) {
4372 spin_unlock(&last_ptr
->refill_lock
);
4374 failed_cluster_refill
= true;
4375 wait_block_group_cache_progress(block_group
,
4376 num_bytes
+ empty_cluster
+ empty_size
);
4377 goto have_block_group
;
4381 * at this point we either didn't find a cluster
4382 * or we weren't able to allocate a block from our
4383 * cluster. Free the cluster we've been trying
4384 * to use, and go to the next block group
4386 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
4387 spin_unlock(&last_ptr
->refill_lock
);
4391 offset
= btrfs_find_space_for_alloc(block_group
, search_start
,
4392 num_bytes
, empty_size
);
4394 * If we didn't find a chunk, and we haven't failed on this
4395 * block group before, and this block group is in the middle of
4396 * caching and we are ok with waiting, then go ahead and wait
4397 * for progress to be made, and set failed_alloc to true.
4399 * If failed_alloc is true then we've already waited on this
4400 * block group once and should move on to the next block group.
4402 if (!offset
&& !failed_alloc
&& !cached
&&
4403 loop
> LOOP_CACHING_NOWAIT
) {
4404 wait_block_group_cache_progress(block_group
,
4405 num_bytes
+ empty_size
);
4406 failed_alloc
= true;
4407 goto have_block_group
;
4408 } else if (!offset
) {
4412 search_start
= stripe_align(root
, offset
);
4413 /* move on to the next group */
4414 if (search_start
+ num_bytes
>= search_end
) {
4415 btrfs_add_free_space(block_group
, offset
, num_bytes
);
4419 /* move on to the next group */
4420 if (search_start
+ num_bytes
>
4421 block_group
->key
.objectid
+ block_group
->key
.offset
) {
4422 btrfs_add_free_space(block_group
, offset
, num_bytes
);
4426 if (exclude_nr
> 0 &&
4427 (search_start
+ num_bytes
> exclude_start
&&
4428 search_start
< exclude_start
+ exclude_nr
)) {
4429 search_start
= exclude_start
+ exclude_nr
;
4431 btrfs_add_free_space(block_group
, offset
, num_bytes
);
4433 * if search_start is still in this block group
4434 * then we just re-search this block group
4436 if (search_start
>= block_group
->key
.objectid
&&
4437 search_start
< (block_group
->key
.objectid
+
4438 block_group
->key
.offset
))
4439 goto have_block_group
;
4443 ins
->objectid
= search_start
;
4444 ins
->offset
= num_bytes
;
4446 if (offset
< search_start
)
4447 btrfs_add_free_space(block_group
, offset
,
4448 search_start
- offset
);
4449 BUG_ON(offset
> search_start
);
4451 update_reserved_extents(block_group
, num_bytes
, 1);
4453 /* we are all good, lets return */
4456 failed_cluster_refill
= false;
4457 failed_alloc
= false;
4458 btrfs_put_block_group(block_group
);
4460 up_read(&space_info
->groups_sem
);
4462 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4463 * for them to make caching progress. Also
4464 * determine the best possible bg to cache
4465 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4466 * caching kthreads as we move along
4467 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4468 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4469 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4472 if (!ins
->objectid
&& loop
< LOOP_NO_EMPTY_SIZE
&&
4473 (found_uncached_bg
|| empty_size
|| empty_cluster
||
4474 allowed_chunk_alloc
)) {
4475 if (loop
== LOOP_FIND_IDEAL
&& found_uncached_bg
) {
4476 found_uncached_bg
= false;
4478 if (!ideal_cache_percent
&&
4479 atomic_read(&space_info
->caching_threads
))
4483 * 1 of the following 2 things have happened so far
4485 * 1) We found an ideal block group for caching that
4486 * is mostly full and will cache quickly, so we might
4487 * as well wait for it.
4489 * 2) We searched for cached only and we didn't find
4490 * anything, and we didn't start any caching kthreads
4491 * either, so chances are we will loop through and
4492 * start a couple caching kthreads, and then come back
4493 * around and just wait for them. This will be slower
4494 * because we will have 2 caching kthreads reading at
4495 * the same time when we could have just started one
4496 * and waited for it to get far enough to give us an
4497 * allocation, so go ahead and go to the wait caching
4500 loop
= LOOP_CACHING_WAIT
;
4501 search_start
= ideal_cache_offset
;
4502 ideal_cache_percent
= 0;
4504 } else if (loop
== LOOP_FIND_IDEAL
) {
4506 * Didn't find a uncached bg, wait on anything we find
4509 loop
= LOOP_CACHING_WAIT
;
4513 if (loop
< LOOP_CACHING_WAIT
) {
4518 if (loop
== LOOP_ALLOC_CHUNK
) {
4523 if (allowed_chunk_alloc
) {
4524 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
4525 2 * 1024 * 1024, data
, 1);
4526 allowed_chunk_alloc
= 0;
4527 done_chunk_alloc
= 1;
4528 } else if (!done_chunk_alloc
) {
4529 space_info
->force_alloc
= 1;
4532 if (loop
< LOOP_NO_EMPTY_SIZE
) {
4537 } else if (!ins
->objectid
) {
4541 /* we found what we needed */
4542 if (ins
->objectid
) {
4543 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
4544 trans
->block_group
= block_group
->key
.objectid
;
4546 btrfs_put_block_group(block_group
);
4553 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
,
4554 int dump_block_groups
)
4556 struct btrfs_block_group_cache
*cache
;
4558 spin_lock(&info
->lock
);
4559 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
4560 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
4561 info
->bytes_pinned
- info
->bytes_reserved
-
4563 (info
->full
) ? "" : "not ");
4564 printk(KERN_INFO
"space_info total=%llu, pinned=%llu, delalloc=%llu,"
4565 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4567 (unsigned long long)info
->total_bytes
,
4568 (unsigned long long)info
->bytes_pinned
,
4569 (unsigned long long)info
->bytes_delalloc
,
4570 (unsigned long long)info
->bytes_may_use
,
4571 (unsigned long long)info
->bytes_used
,
4572 (unsigned long long)info
->bytes_root
,
4573 (unsigned long long)info
->bytes_super
,
4574 (unsigned long long)info
->bytes_reserved
);
4575 spin_unlock(&info
->lock
);
4577 if (!dump_block_groups
)
4580 down_read(&info
->groups_sem
);
4581 list_for_each_entry(cache
, &info
->block_groups
, list
) {
4582 spin_lock(&cache
->lock
);
4583 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
4584 "%llu pinned %llu reserved\n",
4585 (unsigned long long)cache
->key
.objectid
,
4586 (unsigned long long)cache
->key
.offset
,
4587 (unsigned long long)btrfs_block_group_used(&cache
->item
),
4588 (unsigned long long)cache
->pinned
,
4589 (unsigned long long)cache
->reserved
);
4590 btrfs_dump_free_space(cache
, bytes
);
4591 spin_unlock(&cache
->lock
);
4593 up_read(&info
->groups_sem
);
4596 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
4597 struct btrfs_root
*root
,
4598 u64 num_bytes
, u64 min_alloc_size
,
4599 u64 empty_size
, u64 hint_byte
,
4600 u64 search_end
, struct btrfs_key
*ins
,
4604 u64 search_start
= 0;
4606 data
= btrfs_get_alloc_profile(root
, data
);
4609 * the only place that sets empty_size is btrfs_realloc_node, which
4610 * is not called recursively on allocations
4612 if (empty_size
|| root
->ref_cows
)
4613 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
4614 num_bytes
+ 2 * 1024 * 1024, data
, 0);
4616 WARN_ON(num_bytes
< root
->sectorsize
);
4617 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
4618 search_start
, search_end
, hint_byte
, ins
,
4619 trans
->alloc_exclude_start
,
4620 trans
->alloc_exclude_nr
, data
);
4622 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
4623 num_bytes
= num_bytes
>> 1;
4624 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
4625 num_bytes
= max(num_bytes
, min_alloc_size
);
4626 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
4627 num_bytes
, data
, 1);
4630 if (ret
== -ENOSPC
) {
4631 struct btrfs_space_info
*sinfo
;
4633 sinfo
= __find_space_info(root
->fs_info
, data
);
4634 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
4635 "wanted %llu\n", (unsigned long long)data
,
4636 (unsigned long long)num_bytes
);
4637 dump_space_info(sinfo
, num_bytes
, 1);
4643 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
4645 struct btrfs_block_group_cache
*cache
;
4648 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
4650 printk(KERN_ERR
"Unable to find block group for %llu\n",
4651 (unsigned long long)start
);
4655 ret
= btrfs_discard_extent(root
, start
, len
);
4657 btrfs_add_free_space(cache
, start
, len
);
4658 update_reserved_extents(cache
, len
, 0);
4659 btrfs_put_block_group(cache
);
4664 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4665 struct btrfs_root
*root
,
4666 u64 parent
, u64 root_objectid
,
4667 u64 flags
, u64 owner
, u64 offset
,
4668 struct btrfs_key
*ins
, int ref_mod
)
4671 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4672 struct btrfs_extent_item
*extent_item
;
4673 struct btrfs_extent_inline_ref
*iref
;
4674 struct btrfs_path
*path
;
4675 struct extent_buffer
*leaf
;
4680 type
= BTRFS_SHARED_DATA_REF_KEY
;
4682 type
= BTRFS_EXTENT_DATA_REF_KEY
;
4684 size
= sizeof(*extent_item
) + btrfs_extent_inline_ref_size(type
);
4686 path
= btrfs_alloc_path();
4689 path
->leave_spinning
= 1;
4690 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4694 leaf
= path
->nodes
[0];
4695 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4696 struct btrfs_extent_item
);
4697 btrfs_set_extent_refs(leaf
, extent_item
, ref_mod
);
4698 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4699 btrfs_set_extent_flags(leaf
, extent_item
,
4700 flags
| BTRFS_EXTENT_FLAG_DATA
);
4702 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
4703 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
4705 struct btrfs_shared_data_ref
*ref
;
4706 ref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
4707 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
4708 btrfs_set_shared_data_ref_count(leaf
, ref
, ref_mod
);
4710 struct btrfs_extent_data_ref
*ref
;
4711 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
4712 btrfs_set_extent_data_ref_root(leaf
, ref
, root_objectid
);
4713 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
4714 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
4715 btrfs_set_extent_data_ref_count(leaf
, ref
, ref_mod
);
4718 btrfs_mark_buffer_dirty(path
->nodes
[0]);
4719 btrfs_free_path(path
);
4721 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
4724 printk(KERN_ERR
"btrfs update block group failed for %llu "
4725 "%llu\n", (unsigned long long)ins
->objectid
,
4726 (unsigned long long)ins
->offset
);
4732 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
4733 struct btrfs_root
*root
,
4734 u64 parent
, u64 root_objectid
,
4735 u64 flags
, struct btrfs_disk_key
*key
,
4736 int level
, struct btrfs_key
*ins
)
4739 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4740 struct btrfs_extent_item
*extent_item
;
4741 struct btrfs_tree_block_info
*block_info
;
4742 struct btrfs_extent_inline_ref
*iref
;
4743 struct btrfs_path
*path
;
4744 struct extent_buffer
*leaf
;
4745 u32 size
= sizeof(*extent_item
) + sizeof(*block_info
) + sizeof(*iref
);
4747 path
= btrfs_alloc_path();
4750 path
->leave_spinning
= 1;
4751 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4755 leaf
= path
->nodes
[0];
4756 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4757 struct btrfs_extent_item
);
4758 btrfs_set_extent_refs(leaf
, extent_item
, 1);
4759 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4760 btrfs_set_extent_flags(leaf
, extent_item
,
4761 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
4762 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
4764 btrfs_set_tree_block_key(leaf
, block_info
, key
);
4765 btrfs_set_tree_block_level(leaf
, block_info
, level
);
4767 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
4769 BUG_ON(!(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
4770 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4771 BTRFS_SHARED_BLOCK_REF_KEY
);
4772 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
4774 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4775 BTRFS_TREE_BLOCK_REF_KEY
);
4776 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
4779 btrfs_mark_buffer_dirty(leaf
);
4780 btrfs_free_path(path
);
4782 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
4785 printk(KERN_ERR
"btrfs update block group failed for %llu "
4786 "%llu\n", (unsigned long long)ins
->objectid
,
4787 (unsigned long long)ins
->offset
);
4793 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4794 struct btrfs_root
*root
,
4795 u64 root_objectid
, u64 owner
,
4796 u64 offset
, struct btrfs_key
*ins
)
4800 BUG_ON(root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
4802 ret
= btrfs_add_delayed_data_ref(trans
, ins
->objectid
, ins
->offset
,
4803 0, root_objectid
, owner
, offset
,
4804 BTRFS_ADD_DELAYED_EXTENT
, NULL
);
4809 * this is used by the tree logging recovery code. It records that
4810 * an extent has been allocated and makes sure to clear the free
4811 * space cache bits as well
4813 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle
*trans
,
4814 struct btrfs_root
*root
,
4815 u64 root_objectid
, u64 owner
, u64 offset
,
4816 struct btrfs_key
*ins
)
4819 struct btrfs_block_group_cache
*block_group
;
4820 struct btrfs_caching_control
*caching_ctl
;
4821 u64 start
= ins
->objectid
;
4822 u64 num_bytes
= ins
->offset
;
4824 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
4825 cache_block_group(block_group
);
4826 caching_ctl
= get_caching_control(block_group
);
4829 BUG_ON(!block_group_cache_done(block_group
));
4830 ret
= btrfs_remove_free_space(block_group
, start
, num_bytes
);
4833 mutex_lock(&caching_ctl
->mutex
);
4835 if (start
>= caching_ctl
->progress
) {
4836 ret
= add_excluded_extent(root
, start
, num_bytes
);
4838 } else if (start
+ num_bytes
<= caching_ctl
->progress
) {
4839 ret
= btrfs_remove_free_space(block_group
,
4843 num_bytes
= caching_ctl
->progress
- start
;
4844 ret
= btrfs_remove_free_space(block_group
,
4848 start
= caching_ctl
->progress
;
4849 num_bytes
= ins
->objectid
+ ins
->offset
-
4850 caching_ctl
->progress
;
4851 ret
= add_excluded_extent(root
, start
, num_bytes
);
4855 mutex_unlock(&caching_ctl
->mutex
);
4856 put_caching_control(caching_ctl
);
4859 update_reserved_extents(block_group
, ins
->offset
, 1);
4860 btrfs_put_block_group(block_group
);
4861 ret
= alloc_reserved_file_extent(trans
, root
, 0, root_objectid
,
4862 0, owner
, offset
, ins
, 1);
4867 * finds a free extent and does all the dirty work required for allocation
4868 * returns the key for the extent through ins, and a tree buffer for
4869 * the first block of the extent through buf.
4871 * returns 0 if everything worked, non-zero otherwise.
4873 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
4874 struct btrfs_root
*root
,
4875 u64 num_bytes
, u64 parent
, u64 root_objectid
,
4876 struct btrfs_disk_key
*key
, int level
,
4877 u64 empty_size
, u64 hint_byte
, u64 search_end
,
4878 struct btrfs_key
*ins
)
4883 ret
= btrfs_reserve_extent(trans
, root
, num_bytes
, num_bytes
,
4884 empty_size
, hint_byte
, search_end
,
4889 if (root_objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
4891 parent
= ins
->objectid
;
4892 flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
4896 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4897 struct btrfs_delayed_extent_op
*extent_op
;
4898 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
4901 memcpy(&extent_op
->key
, key
, sizeof(extent_op
->key
));
4903 memset(&extent_op
->key
, 0, sizeof(extent_op
->key
));
4904 extent_op
->flags_to_set
= flags
;
4905 extent_op
->update_key
= 1;
4906 extent_op
->update_flags
= 1;
4907 extent_op
->is_data
= 0;
4909 ret
= btrfs_add_delayed_tree_ref(trans
, ins
->objectid
,
4910 ins
->offset
, parent
, root_objectid
,
4911 level
, BTRFS_ADD_DELAYED_EXTENT
,
4916 if (root_objectid
== root
->root_key
.objectid
) {
4918 spin_lock(&root
->node_lock
);
4919 used
= btrfs_root_used(&root
->root_item
) + num_bytes
;
4920 btrfs_set_root_used(&root
->root_item
, used
);
4921 spin_unlock(&root
->node_lock
);
4926 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
4927 struct btrfs_root
*root
,
4928 u64 bytenr
, u32 blocksize
,
4931 struct extent_buffer
*buf
;
4933 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
4935 return ERR_PTR(-ENOMEM
);
4936 btrfs_set_header_generation(buf
, trans
->transid
);
4937 btrfs_set_buffer_lockdep_class(buf
, level
);
4938 btrfs_tree_lock(buf
);
4939 clean_tree_block(trans
, root
, buf
);
4941 btrfs_set_lock_blocking(buf
);
4942 btrfs_set_buffer_uptodate(buf
);
4944 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
4946 * we allow two log transactions at a time, use different
4947 * EXENT bit to differentiate dirty pages.
4949 if (root
->log_transid
% 2 == 0)
4950 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
4951 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4953 set_extent_new(&root
->dirty_log_pages
, buf
->start
,
4954 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4956 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
4957 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4959 trans
->blocks_used
++;
4960 /* this returns a buffer locked for blocking */
4965 * helper function to allocate a block for a given tree
4966 * returns the tree buffer or NULL.
4968 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
4969 struct btrfs_root
*root
, u32 blocksize
,
4970 u64 parent
, u64 root_objectid
,
4971 struct btrfs_disk_key
*key
, int level
,
4972 u64 hint
, u64 empty_size
)
4974 struct btrfs_key ins
;
4976 struct extent_buffer
*buf
;
4978 ret
= alloc_tree_block(trans
, root
, blocksize
, parent
, root_objectid
,
4979 key
, level
, empty_size
, hint
, (u64
)-1, &ins
);
4982 return ERR_PTR(ret
);
4985 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
,
4990 struct walk_control
{
4991 u64 refs
[BTRFS_MAX_LEVEL
];
4992 u64 flags
[BTRFS_MAX_LEVEL
];
4993 struct btrfs_key update_progress
;
5003 #define DROP_REFERENCE 1
5004 #define UPDATE_BACKREF 2
5006 static noinline
void reada_walk_down(struct btrfs_trans_handle
*trans
,
5007 struct btrfs_root
*root
,
5008 struct walk_control
*wc
,
5009 struct btrfs_path
*path
)
5018 struct btrfs_key key
;
5019 struct extent_buffer
*eb
;
5024 if (path
->slots
[wc
->level
] < wc
->reada_slot
) {
5025 wc
->reada_count
= wc
->reada_count
* 2 / 3;
5026 wc
->reada_count
= max(wc
->reada_count
, 2);
5028 wc
->reada_count
= wc
->reada_count
* 3 / 2;
5029 wc
->reada_count
= min_t(int, wc
->reada_count
,
5030 BTRFS_NODEPTRS_PER_BLOCK(root
));
5033 eb
= path
->nodes
[wc
->level
];
5034 nritems
= btrfs_header_nritems(eb
);
5035 blocksize
= btrfs_level_size(root
, wc
->level
- 1);
5037 for (slot
= path
->slots
[wc
->level
]; slot
< nritems
; slot
++) {
5038 if (nread
>= wc
->reada_count
)
5042 bytenr
= btrfs_node_blockptr(eb
, slot
);
5043 generation
= btrfs_node_ptr_generation(eb
, slot
);
5045 if (slot
== path
->slots
[wc
->level
])
5048 if (wc
->stage
== UPDATE_BACKREF
&&
5049 generation
<= root
->root_key
.offset
)
5052 /* We don't lock the tree block, it's OK to be racy here */
5053 ret
= btrfs_lookup_extent_info(trans
, root
, bytenr
, blocksize
,
5058 if (wc
->stage
== DROP_REFERENCE
) {
5062 if (wc
->level
== 1 &&
5063 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5065 if (!wc
->update_ref
||
5066 generation
<= root
->root_key
.offset
)
5068 btrfs_node_key_to_cpu(eb
, &key
, slot
);
5069 ret
= btrfs_comp_cpu_keys(&key
,
5070 &wc
->update_progress
);
5074 if (wc
->level
== 1 &&
5075 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5079 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
5083 last
= bytenr
+ blocksize
;
5086 wc
->reada_slot
= slot
;
5090 * hepler to process tree block while walking down the tree.
5092 * when wc->stage == UPDATE_BACKREF, this function updates
5093 * back refs for pointers in the block.
5095 * NOTE: return value 1 means we should stop walking down.
5097 static noinline
int walk_down_proc(struct btrfs_trans_handle
*trans
,
5098 struct btrfs_root
*root
,
5099 struct btrfs_path
*path
,
5100 struct walk_control
*wc
, int lookup_info
)
5102 int level
= wc
->level
;
5103 struct extent_buffer
*eb
= path
->nodes
[level
];
5104 u64 flag
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5107 if (wc
->stage
== UPDATE_BACKREF
&&
5108 btrfs_header_owner(eb
) != root
->root_key
.objectid
)
5112 * when reference count of tree block is 1, it won't increase
5113 * again. once full backref flag is set, we never clear it.
5116 ((wc
->stage
== DROP_REFERENCE
&& wc
->refs
[level
] != 1) ||
5117 (wc
->stage
== UPDATE_BACKREF
&& !(wc
->flags
[level
] & flag
)))) {
5118 BUG_ON(!path
->locks
[level
]);
5119 ret
= btrfs_lookup_extent_info(trans
, root
,
5124 BUG_ON(wc
->refs
[level
] == 0);
5127 if (wc
->stage
== DROP_REFERENCE
) {
5128 if (wc
->refs
[level
] > 1)
5131 if (path
->locks
[level
] && !wc
->keep_locks
) {
5132 btrfs_tree_unlock(eb
);
5133 path
->locks
[level
] = 0;
5138 /* wc->stage == UPDATE_BACKREF */
5139 if (!(wc
->flags
[level
] & flag
)) {
5140 BUG_ON(!path
->locks
[level
]);
5141 ret
= btrfs_inc_ref(trans
, root
, eb
, 1);
5143 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
5145 ret
= btrfs_set_disk_extent_flags(trans
, root
, eb
->start
,
5148 wc
->flags
[level
] |= flag
;
5152 * the block is shared by multiple trees, so it's not good to
5153 * keep the tree lock
5155 if (path
->locks
[level
] && level
> 0) {
5156 btrfs_tree_unlock(eb
);
5157 path
->locks
[level
] = 0;
5163 * hepler to process tree block pointer.
5165 * when wc->stage == DROP_REFERENCE, this function checks
5166 * reference count of the block pointed to. if the block
5167 * is shared and we need update back refs for the subtree
5168 * rooted at the block, this function changes wc->stage to
5169 * UPDATE_BACKREF. if the block is shared and there is no
5170 * need to update back, this function drops the reference
5173 * NOTE: return value 1 means we should stop walking down.
5175 static noinline
int do_walk_down(struct btrfs_trans_handle
*trans
,
5176 struct btrfs_root
*root
,
5177 struct btrfs_path
*path
,
5178 struct walk_control
*wc
, int *lookup_info
)
5184 struct btrfs_key key
;
5185 struct extent_buffer
*next
;
5186 int level
= wc
->level
;
5190 generation
= btrfs_node_ptr_generation(path
->nodes
[level
],
5191 path
->slots
[level
]);
5193 * if the lower level block was created before the snapshot
5194 * was created, we know there is no need to update back refs
5197 if (wc
->stage
== UPDATE_BACKREF
&&
5198 generation
<= root
->root_key
.offset
) {
5203 bytenr
= btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]);
5204 blocksize
= btrfs_level_size(root
, level
- 1);
5206 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
5208 next
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
5211 btrfs_tree_lock(next
);
5212 btrfs_set_lock_blocking(next
);
5214 ret
= btrfs_lookup_extent_info(trans
, root
, bytenr
, blocksize
,
5215 &wc
->refs
[level
- 1],
5216 &wc
->flags
[level
- 1]);
5218 BUG_ON(wc
->refs
[level
- 1] == 0);
5221 if (wc
->stage
== DROP_REFERENCE
) {
5222 if (wc
->refs
[level
- 1] > 1) {
5224 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5227 if (!wc
->update_ref
||
5228 generation
<= root
->root_key
.offset
)
5231 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
5232 path
->slots
[level
]);
5233 ret
= btrfs_comp_cpu_keys(&key
, &wc
->update_progress
);
5237 wc
->stage
= UPDATE_BACKREF
;
5238 wc
->shared_level
= level
- 1;
5242 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5246 if (!btrfs_buffer_uptodate(next
, generation
)) {
5247 btrfs_tree_unlock(next
);
5248 free_extent_buffer(next
);
5254 if (reada
&& level
== 1)
5255 reada_walk_down(trans
, root
, wc
, path
);
5256 next
= read_tree_block(root
, bytenr
, blocksize
, generation
);
5257 btrfs_tree_lock(next
);
5258 btrfs_set_lock_blocking(next
);
5262 BUG_ON(level
!= btrfs_header_level(next
));
5263 path
->nodes
[level
] = next
;
5264 path
->slots
[level
] = 0;
5265 path
->locks
[level
] = 1;
5271 wc
->refs
[level
- 1] = 0;
5272 wc
->flags
[level
- 1] = 0;
5273 if (wc
->stage
== DROP_REFERENCE
) {
5274 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
5275 parent
= path
->nodes
[level
]->start
;
5277 BUG_ON(root
->root_key
.objectid
!=
5278 btrfs_header_owner(path
->nodes
[level
]));
5282 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
, parent
,
5283 root
->root_key
.objectid
, level
- 1, 0);
5286 btrfs_tree_unlock(next
);
5287 free_extent_buffer(next
);
5293 * hepler to process tree block while walking up the tree.
5295 * when wc->stage == DROP_REFERENCE, this function drops
5296 * reference count on the block.
5298 * when wc->stage == UPDATE_BACKREF, this function changes
5299 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5300 * to UPDATE_BACKREF previously while processing the block.
5302 * NOTE: return value 1 means we should stop walking up.
5304 static noinline
int walk_up_proc(struct btrfs_trans_handle
*trans
,
5305 struct btrfs_root
*root
,
5306 struct btrfs_path
*path
,
5307 struct walk_control
*wc
)
5310 int level
= wc
->level
;
5311 struct extent_buffer
*eb
= path
->nodes
[level
];
5314 if (wc
->stage
== UPDATE_BACKREF
) {
5315 BUG_ON(wc
->shared_level
< level
);
5316 if (level
< wc
->shared_level
)
5319 ret
= find_next_key(path
, level
+ 1, &wc
->update_progress
);
5323 wc
->stage
= DROP_REFERENCE
;
5324 wc
->shared_level
= -1;
5325 path
->slots
[level
] = 0;
5328 * check reference count again if the block isn't locked.
5329 * we should start walking down the tree again if reference
5332 if (!path
->locks
[level
]) {
5334 btrfs_tree_lock(eb
);
5335 btrfs_set_lock_blocking(eb
);
5336 path
->locks
[level
] = 1;
5338 ret
= btrfs_lookup_extent_info(trans
, root
,
5343 BUG_ON(wc
->refs
[level
] == 0);
5344 if (wc
->refs
[level
] == 1) {
5345 btrfs_tree_unlock(eb
);
5346 path
->locks
[level
] = 0;
5352 /* wc->stage == DROP_REFERENCE */
5353 BUG_ON(wc
->refs
[level
] > 1 && !path
->locks
[level
]);
5355 if (wc
->refs
[level
] == 1) {
5357 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5358 ret
= btrfs_dec_ref(trans
, root
, eb
, 1);
5360 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
5363 /* make block locked assertion in clean_tree_block happy */
5364 if (!path
->locks
[level
] &&
5365 btrfs_header_generation(eb
) == trans
->transid
) {
5366 btrfs_tree_lock(eb
);
5367 btrfs_set_lock_blocking(eb
);
5368 path
->locks
[level
] = 1;
5370 clean_tree_block(trans
, root
, eb
);
5373 if (eb
== root
->node
) {
5374 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5377 BUG_ON(root
->root_key
.objectid
!=
5378 btrfs_header_owner(eb
));
5380 if (wc
->flags
[level
+ 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5381 parent
= path
->nodes
[level
+ 1]->start
;
5383 BUG_ON(root
->root_key
.objectid
!=
5384 btrfs_header_owner(path
->nodes
[level
+ 1]));
5387 ret
= btrfs_free_extent(trans
, root
, eb
->start
, eb
->len
, parent
,
5388 root
->root_key
.objectid
, level
, 0);
5391 wc
->refs
[level
] = 0;
5392 wc
->flags
[level
] = 0;
5396 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
5397 struct btrfs_root
*root
,
5398 struct btrfs_path
*path
,
5399 struct walk_control
*wc
)
5401 int level
= wc
->level
;
5402 int lookup_info
= 1;
5405 while (level
>= 0) {
5406 ret
= walk_down_proc(trans
, root
, path
, wc
, lookup_info
);
5413 if (path
->slots
[level
] >=
5414 btrfs_header_nritems(path
->nodes
[level
]))
5417 ret
= do_walk_down(trans
, root
, path
, wc
, &lookup_info
);
5419 path
->slots
[level
]++;
5427 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
5428 struct btrfs_root
*root
,
5429 struct btrfs_path
*path
,
5430 struct walk_control
*wc
, int max_level
)
5432 int level
= wc
->level
;
5435 path
->slots
[level
] = btrfs_header_nritems(path
->nodes
[level
]);
5436 while (level
< max_level
&& path
->nodes
[level
]) {
5438 if (path
->slots
[level
] + 1 <
5439 btrfs_header_nritems(path
->nodes
[level
])) {
5440 path
->slots
[level
]++;
5443 ret
= walk_up_proc(trans
, root
, path
, wc
);
5447 if (path
->locks
[level
]) {
5448 btrfs_tree_unlock(path
->nodes
[level
]);
5449 path
->locks
[level
] = 0;
5451 free_extent_buffer(path
->nodes
[level
]);
5452 path
->nodes
[level
] = NULL
;
5460 * drop a subvolume tree.
5462 * this function traverses the tree freeing any blocks that only
5463 * referenced by the tree.
5465 * when a shared tree block is found. this function decreases its
5466 * reference count by one. if update_ref is true, this function
5467 * also make sure backrefs for the shared block and all lower level
5468 * blocks are properly updated.
5470 int btrfs_drop_snapshot(struct btrfs_root
*root
, int update_ref
)
5472 struct btrfs_path
*path
;
5473 struct btrfs_trans_handle
*trans
;
5474 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
5475 struct btrfs_root_item
*root_item
= &root
->root_item
;
5476 struct walk_control
*wc
;
5477 struct btrfs_key key
;
5482 path
= btrfs_alloc_path();
5485 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5488 trans
= btrfs_start_transaction(tree_root
, 1);
5490 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
5491 level
= btrfs_header_level(root
->node
);
5492 path
->nodes
[level
] = btrfs_lock_root_node(root
);
5493 btrfs_set_lock_blocking(path
->nodes
[level
]);
5494 path
->slots
[level
] = 0;
5495 path
->locks
[level
] = 1;
5496 memset(&wc
->update_progress
, 0,
5497 sizeof(wc
->update_progress
));
5499 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
5500 memcpy(&wc
->update_progress
, &key
,
5501 sizeof(wc
->update_progress
));
5503 level
= root_item
->drop_level
;
5505 path
->lowest_level
= level
;
5506 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5507 path
->lowest_level
= 0;
5515 * unlock our path, this is safe because only this
5516 * function is allowed to delete this snapshot
5518 btrfs_unlock_up_safe(path
, 0);
5520 level
= btrfs_header_level(root
->node
);
5522 btrfs_tree_lock(path
->nodes
[level
]);
5523 btrfs_set_lock_blocking(path
->nodes
[level
]);
5525 ret
= btrfs_lookup_extent_info(trans
, root
,
5526 path
->nodes
[level
]->start
,
5527 path
->nodes
[level
]->len
,
5531 BUG_ON(wc
->refs
[level
] == 0);
5533 if (level
== root_item
->drop_level
)
5536 btrfs_tree_unlock(path
->nodes
[level
]);
5537 WARN_ON(wc
->refs
[level
] != 1);
5543 wc
->shared_level
= -1;
5544 wc
->stage
= DROP_REFERENCE
;
5545 wc
->update_ref
= update_ref
;
5547 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(root
);
5550 ret
= walk_down_tree(trans
, root
, path
, wc
);
5556 ret
= walk_up_tree(trans
, root
, path
, wc
, BTRFS_MAX_LEVEL
);
5563 BUG_ON(wc
->stage
!= DROP_REFERENCE
);
5567 if (wc
->stage
== DROP_REFERENCE
) {
5569 btrfs_node_key(path
->nodes
[level
],
5570 &root_item
->drop_progress
,
5571 path
->slots
[level
]);
5572 root_item
->drop_level
= level
;
5575 BUG_ON(wc
->level
== 0);
5576 if (trans
->transaction
->in_commit
||
5577 trans
->transaction
->delayed_refs
.flushing
) {
5578 ret
= btrfs_update_root(trans
, tree_root
,
5583 btrfs_end_transaction(trans
, tree_root
);
5584 trans
= btrfs_start_transaction(tree_root
, 1);
5586 unsigned long update
;
5587 update
= trans
->delayed_ref_updates
;
5588 trans
->delayed_ref_updates
= 0;
5590 btrfs_run_delayed_refs(trans
, tree_root
,
5594 btrfs_release_path(root
, path
);
5597 ret
= btrfs_del_root(trans
, tree_root
, &root
->root_key
);
5600 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
5601 ret
= btrfs_find_last_root(tree_root
, root
->root_key
.objectid
,
5605 ret
= btrfs_del_orphan_item(trans
, tree_root
,
5606 root
->root_key
.objectid
);
5611 if (root
->in_radix
) {
5612 btrfs_free_fs_root(tree_root
->fs_info
, root
);
5614 free_extent_buffer(root
->node
);
5615 free_extent_buffer(root
->commit_root
);
5619 btrfs_end_transaction(trans
, tree_root
);
5621 btrfs_free_path(path
);
5626 * drop subtree rooted at tree block 'node'.
5628 * NOTE: this function will unlock and release tree block 'node'
5630 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
5631 struct btrfs_root
*root
,
5632 struct extent_buffer
*node
,
5633 struct extent_buffer
*parent
)
5635 struct btrfs_path
*path
;
5636 struct walk_control
*wc
;
5642 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
5644 path
= btrfs_alloc_path();
5647 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5650 btrfs_assert_tree_locked(parent
);
5651 parent_level
= btrfs_header_level(parent
);
5652 extent_buffer_get(parent
);
5653 path
->nodes
[parent_level
] = parent
;
5654 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
5656 btrfs_assert_tree_locked(node
);
5657 level
= btrfs_header_level(node
);
5658 path
->nodes
[level
] = node
;
5659 path
->slots
[level
] = 0;
5660 path
->locks
[level
] = 1;
5662 wc
->refs
[parent_level
] = 1;
5663 wc
->flags
[parent_level
] = BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5665 wc
->shared_level
= -1;
5666 wc
->stage
= DROP_REFERENCE
;
5669 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(root
);
5672 wret
= walk_down_tree(trans
, root
, path
, wc
);
5678 wret
= walk_up_tree(trans
, root
, path
, wc
, parent_level
);
5686 btrfs_free_path(path
);
5691 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
5694 return min(last
, start
+ nr
- 1);
5697 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
5702 unsigned long first_index
;
5703 unsigned long last_index
;
5706 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5707 struct file_ra_state
*ra
;
5708 struct btrfs_ordered_extent
*ordered
;
5709 unsigned int total_read
= 0;
5710 unsigned int total_dirty
= 0;
5713 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
5715 mutex_lock(&inode
->i_mutex
);
5716 first_index
= start
>> PAGE_CACHE_SHIFT
;
5717 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
5719 /* make sure the dirty trick played by the caller work */
5720 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
5721 first_index
, last_index
);
5725 file_ra_state_init(ra
, inode
->i_mapping
);
5727 for (i
= first_index
; i
<= last_index
; i
++) {
5728 if (total_read
% ra
->ra_pages
== 0) {
5729 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
5730 calc_ra(i
, last_index
, ra
->ra_pages
));
5734 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
5736 page
= grab_cache_page(inode
->i_mapping
, i
);
5741 if (!PageUptodate(page
)) {
5742 btrfs_readpage(NULL
, page
);
5744 if (!PageUptodate(page
)) {
5746 page_cache_release(page
);
5751 wait_on_page_writeback(page
);
5753 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
5754 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
5755 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5757 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
5759 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5761 page_cache_release(page
);
5762 btrfs_start_ordered_extent(inode
, ordered
, 1);
5763 btrfs_put_ordered_extent(ordered
);
5766 set_page_extent_mapped(page
);
5768 if (i
== first_index
)
5769 set_extent_bits(io_tree
, page_start
, page_end
,
5770 EXTENT_BOUNDARY
, GFP_NOFS
);
5771 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
5773 set_page_dirty(page
);
5776 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5778 page_cache_release(page
);
5783 mutex_unlock(&inode
->i_mutex
);
5784 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
5788 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
5789 struct btrfs_key
*extent_key
,
5792 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
5793 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
5794 struct extent_map
*em
;
5795 u64 start
= extent_key
->objectid
- offset
;
5796 u64 end
= start
+ extent_key
->offset
- 1;
5798 em
= alloc_extent_map(GFP_NOFS
);
5799 BUG_ON(!em
|| IS_ERR(em
));
5802 em
->len
= extent_key
->offset
;
5803 em
->block_len
= extent_key
->offset
;
5804 em
->block_start
= extent_key
->objectid
;
5805 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
5806 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
5808 /* setup extent map to cheat btrfs_readpage */
5809 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
5812 write_lock(&em_tree
->lock
);
5813 ret
= add_extent_mapping(em_tree
, em
);
5814 write_unlock(&em_tree
->lock
);
5815 if (ret
!= -EEXIST
) {
5816 free_extent_map(em
);
5819 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
5821 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
5823 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
5826 struct btrfs_ref_path
{
5828 u64 nodes
[BTRFS_MAX_LEVEL
];
5830 u64 root_generation
;
5837 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
5838 u64 new_nodes
[BTRFS_MAX_LEVEL
];
5841 struct disk_extent
{
5852 static int is_cowonly_root(u64 root_objectid
)
5854 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
5855 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
5856 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
5857 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
5858 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
5859 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
5864 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
5865 struct btrfs_root
*extent_root
,
5866 struct btrfs_ref_path
*ref_path
,
5869 struct extent_buffer
*leaf
;
5870 struct btrfs_path
*path
;
5871 struct btrfs_extent_ref
*ref
;
5872 struct btrfs_key key
;
5873 struct btrfs_key found_key
;
5879 path
= btrfs_alloc_path();
5884 ref_path
->lowest_level
= -1;
5885 ref_path
->current_level
= -1;
5886 ref_path
->shared_level
= -1;
5890 level
= ref_path
->current_level
- 1;
5891 while (level
>= -1) {
5893 if (level
< ref_path
->lowest_level
)
5897 bytenr
= ref_path
->nodes
[level
];
5899 bytenr
= ref_path
->extent_start
;
5900 BUG_ON(bytenr
== 0);
5902 parent
= ref_path
->nodes
[level
+ 1];
5903 ref_path
->nodes
[level
+ 1] = 0;
5904 ref_path
->current_level
= level
;
5905 BUG_ON(parent
== 0);
5907 key
.objectid
= bytenr
;
5908 key
.offset
= parent
+ 1;
5909 key
.type
= BTRFS_EXTENT_REF_KEY
;
5911 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
5916 leaf
= path
->nodes
[0];
5917 nritems
= btrfs_header_nritems(leaf
);
5918 if (path
->slots
[0] >= nritems
) {
5919 ret
= btrfs_next_leaf(extent_root
, path
);
5924 leaf
= path
->nodes
[0];
5927 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5928 if (found_key
.objectid
== bytenr
&&
5929 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
5930 if (level
< ref_path
->shared_level
)
5931 ref_path
->shared_level
= level
;
5936 btrfs_release_path(extent_root
, path
);
5939 /* reached lowest level */
5943 level
= ref_path
->current_level
;
5944 while (level
< BTRFS_MAX_LEVEL
- 1) {
5948 bytenr
= ref_path
->nodes
[level
];
5950 bytenr
= ref_path
->extent_start
;
5952 BUG_ON(bytenr
== 0);
5954 key
.objectid
= bytenr
;
5956 key
.type
= BTRFS_EXTENT_REF_KEY
;
5958 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
5962 leaf
= path
->nodes
[0];
5963 nritems
= btrfs_header_nritems(leaf
);
5964 if (path
->slots
[0] >= nritems
) {
5965 ret
= btrfs_next_leaf(extent_root
, path
);
5969 /* the extent was freed by someone */
5970 if (ref_path
->lowest_level
== level
)
5972 btrfs_release_path(extent_root
, path
);
5975 leaf
= path
->nodes
[0];
5978 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5979 if (found_key
.objectid
!= bytenr
||
5980 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
5981 /* the extent was freed by someone */
5982 if (ref_path
->lowest_level
== level
) {
5986 btrfs_release_path(extent_root
, path
);
5990 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
5991 struct btrfs_extent_ref
);
5992 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
5993 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
5995 level
= (int)ref_objectid
;
5996 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
5997 ref_path
->lowest_level
= level
;
5998 ref_path
->current_level
= level
;
5999 ref_path
->nodes
[level
] = bytenr
;
6001 WARN_ON(ref_objectid
!= level
);
6004 WARN_ON(level
!= -1);
6008 if (ref_path
->lowest_level
== level
) {
6009 ref_path
->owner_objectid
= ref_objectid
;
6010 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
6014 * the block is tree root or the block isn't in reference
6017 if (found_key
.objectid
== found_key
.offset
||
6018 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
6019 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
6020 ref_path
->root_generation
=
6021 btrfs_ref_generation(leaf
, ref
);
6023 /* special reference from the tree log */
6024 ref_path
->nodes
[0] = found_key
.offset
;
6025 ref_path
->current_level
= 0;
6032 BUG_ON(ref_path
->nodes
[level
] != 0);
6033 ref_path
->nodes
[level
] = found_key
.offset
;
6034 ref_path
->current_level
= level
;
6037 * the reference was created in the running transaction,
6038 * no need to continue walking up.
6040 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
6041 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
6042 ref_path
->root_generation
=
6043 btrfs_ref_generation(leaf
, ref
);
6048 btrfs_release_path(extent_root
, path
);
6051 /* reached max tree level, but no tree root found. */
6054 btrfs_free_path(path
);
6058 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
6059 struct btrfs_root
*extent_root
,
6060 struct btrfs_ref_path
*ref_path
,
6063 memset(ref_path
, 0, sizeof(*ref_path
));
6064 ref_path
->extent_start
= extent_start
;
6066 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
6069 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
6070 struct btrfs_root
*extent_root
,
6071 struct btrfs_ref_path
*ref_path
)
6073 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
6076 static noinline
int get_new_locations(struct inode
*reloc_inode
,
6077 struct btrfs_key
*extent_key
,
6078 u64 offset
, int no_fragment
,
6079 struct disk_extent
**extents
,
6082 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
6083 struct btrfs_path
*path
;
6084 struct btrfs_file_extent_item
*fi
;
6085 struct extent_buffer
*leaf
;
6086 struct disk_extent
*exts
= *extents
;
6087 struct btrfs_key found_key
;
6092 int max
= *nr_extents
;
6095 WARN_ON(!no_fragment
&& *extents
);
6098 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
6103 path
= btrfs_alloc_path();
6106 cur_pos
= extent_key
->objectid
- offset
;
6107 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
6108 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
6118 leaf
= path
->nodes
[0];
6119 nritems
= btrfs_header_nritems(leaf
);
6120 if (path
->slots
[0] >= nritems
) {
6121 ret
= btrfs_next_leaf(root
, path
);
6126 leaf
= path
->nodes
[0];
6129 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6130 if (found_key
.offset
!= cur_pos
||
6131 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
6132 found_key
.objectid
!= reloc_inode
->i_ino
)
6135 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6136 struct btrfs_file_extent_item
);
6137 if (btrfs_file_extent_type(leaf
, fi
) !=
6138 BTRFS_FILE_EXTENT_REG
||
6139 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
6143 struct disk_extent
*old
= exts
;
6145 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
6146 memcpy(exts
, old
, sizeof(*exts
) * nr
);
6147 if (old
!= *extents
)
6151 exts
[nr
].disk_bytenr
=
6152 btrfs_file_extent_disk_bytenr(leaf
, fi
);
6153 exts
[nr
].disk_num_bytes
=
6154 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
6155 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
6156 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6157 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
6158 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
6159 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
6160 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
6162 BUG_ON(exts
[nr
].offset
> 0);
6163 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
6164 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
6166 cur_pos
+= exts
[nr
].num_bytes
;
6169 if (cur_pos
+ offset
>= last_byte
)
6179 BUG_ON(cur_pos
+ offset
> last_byte
);
6180 if (cur_pos
+ offset
< last_byte
) {
6186 btrfs_free_path(path
);
6188 if (exts
!= *extents
)
6197 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
6198 struct btrfs_root
*root
,
6199 struct btrfs_path
*path
,
6200 struct btrfs_key
*extent_key
,
6201 struct btrfs_key
*leaf_key
,
6202 struct btrfs_ref_path
*ref_path
,
6203 struct disk_extent
*new_extents
,
6206 struct extent_buffer
*leaf
;
6207 struct btrfs_file_extent_item
*fi
;
6208 struct inode
*inode
= NULL
;
6209 struct btrfs_key key
;
6214 u64 search_end
= (u64
)-1;
6217 int extent_locked
= 0;
6221 memcpy(&key
, leaf_key
, sizeof(key
));
6222 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
6223 if (key
.objectid
< ref_path
->owner_objectid
||
6224 (key
.objectid
== ref_path
->owner_objectid
&&
6225 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
6226 key
.objectid
= ref_path
->owner_objectid
;
6227 key
.type
= BTRFS_EXTENT_DATA_KEY
;
6233 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
6237 leaf
= path
->nodes
[0];
6238 nritems
= btrfs_header_nritems(leaf
);
6240 if (extent_locked
&& ret
> 0) {
6242 * the file extent item was modified by someone
6243 * before the extent got locked.
6245 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6246 lock_end
, GFP_NOFS
);
6250 if (path
->slots
[0] >= nritems
) {
6251 if (++nr_scaned
> 2)
6254 BUG_ON(extent_locked
);
6255 ret
= btrfs_next_leaf(root
, path
);
6260 leaf
= path
->nodes
[0];
6261 nritems
= btrfs_header_nritems(leaf
);
6264 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
6266 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
6267 if ((key
.objectid
> ref_path
->owner_objectid
) ||
6268 (key
.objectid
== ref_path
->owner_objectid
&&
6269 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
6270 key
.offset
>= search_end
)
6274 if (inode
&& key
.objectid
!= inode
->i_ino
) {
6275 BUG_ON(extent_locked
);
6276 btrfs_release_path(root
, path
);
6277 mutex_unlock(&inode
->i_mutex
);
6283 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6288 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6289 struct btrfs_file_extent_item
);
6290 extent_type
= btrfs_file_extent_type(leaf
, fi
);
6291 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
6292 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
6293 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
6294 extent_key
->objectid
)) {
6300 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6301 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
6303 if (search_end
== (u64
)-1) {
6304 search_end
= key
.offset
- ext_offset
+
6305 btrfs_file_extent_ram_bytes(leaf
, fi
);
6308 if (!extent_locked
) {
6309 lock_start
= key
.offset
;
6310 lock_end
= lock_start
+ num_bytes
- 1;
6312 if (lock_start
> key
.offset
||
6313 lock_end
+ 1 < key
.offset
+ num_bytes
) {
6314 unlock_extent(&BTRFS_I(inode
)->io_tree
,
6315 lock_start
, lock_end
, GFP_NOFS
);
6321 btrfs_release_path(root
, path
);
6323 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
6324 key
.objectid
, root
);
6325 if (inode
->i_state
& I_NEW
) {
6326 BTRFS_I(inode
)->root
= root
;
6327 BTRFS_I(inode
)->location
.objectid
=
6329 BTRFS_I(inode
)->location
.type
=
6330 BTRFS_INODE_ITEM_KEY
;
6331 BTRFS_I(inode
)->location
.offset
= 0;
6332 btrfs_read_locked_inode(inode
);
6333 unlock_new_inode(inode
);
6336 * some code call btrfs_commit_transaction while
6337 * holding the i_mutex, so we can't use mutex_lock
6340 if (is_bad_inode(inode
) ||
6341 !mutex_trylock(&inode
->i_mutex
)) {
6344 key
.offset
= (u64
)-1;
6349 if (!extent_locked
) {
6350 struct btrfs_ordered_extent
*ordered
;
6352 btrfs_release_path(root
, path
);
6354 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6355 lock_end
, GFP_NOFS
);
6356 ordered
= btrfs_lookup_first_ordered_extent(inode
,
6359 ordered
->file_offset
<= lock_end
&&
6360 ordered
->file_offset
+ ordered
->len
> lock_start
) {
6361 unlock_extent(&BTRFS_I(inode
)->io_tree
,
6362 lock_start
, lock_end
, GFP_NOFS
);
6363 btrfs_start_ordered_extent(inode
, ordered
, 1);
6364 btrfs_put_ordered_extent(ordered
);
6365 key
.offset
+= num_bytes
;
6369 btrfs_put_ordered_extent(ordered
);
6375 if (nr_extents
== 1) {
6376 /* update extent pointer in place */
6377 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
6378 new_extents
[0].disk_bytenr
);
6379 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
6380 new_extents
[0].disk_num_bytes
);
6381 btrfs_mark_buffer_dirty(leaf
);
6383 btrfs_drop_extent_cache(inode
, key
.offset
,
6384 key
.offset
+ num_bytes
- 1, 0);
6386 ret
= btrfs_inc_extent_ref(trans
, root
,
6387 new_extents
[0].disk_bytenr
,
6388 new_extents
[0].disk_num_bytes
,
6390 root
->root_key
.objectid
,
6395 ret
= btrfs_free_extent(trans
, root
,
6396 extent_key
->objectid
,
6399 btrfs_header_owner(leaf
),
6400 btrfs_header_generation(leaf
),
6404 btrfs_release_path(root
, path
);
6405 key
.offset
+= num_bytes
;
6413 * drop old extent pointer at first, then insert the
6414 * new pointers one bye one
6416 btrfs_release_path(root
, path
);
6417 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
6418 key
.offset
+ num_bytes
,
6419 key
.offset
, &alloc_hint
);
6422 for (i
= 0; i
< nr_extents
; i
++) {
6423 if (ext_offset
>= new_extents
[i
].num_bytes
) {
6424 ext_offset
-= new_extents
[i
].num_bytes
;
6427 extent_len
= min(new_extents
[i
].num_bytes
-
6428 ext_offset
, num_bytes
);
6430 ret
= btrfs_insert_empty_item(trans
, root
,
6435 leaf
= path
->nodes
[0];
6436 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6437 struct btrfs_file_extent_item
);
6438 btrfs_set_file_extent_generation(leaf
, fi
,
6440 btrfs_set_file_extent_type(leaf
, fi
,
6441 BTRFS_FILE_EXTENT_REG
);
6442 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
6443 new_extents
[i
].disk_bytenr
);
6444 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
6445 new_extents
[i
].disk_num_bytes
);
6446 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
6447 new_extents
[i
].ram_bytes
);
6449 btrfs_set_file_extent_compression(leaf
, fi
,
6450 new_extents
[i
].compression
);
6451 btrfs_set_file_extent_encryption(leaf
, fi
,
6452 new_extents
[i
].encryption
);
6453 btrfs_set_file_extent_other_encoding(leaf
, fi
,
6454 new_extents
[i
].other_encoding
);
6456 btrfs_set_file_extent_num_bytes(leaf
, fi
,
6458 ext_offset
+= new_extents
[i
].offset
;
6459 btrfs_set_file_extent_offset(leaf
, fi
,
6461 btrfs_mark_buffer_dirty(leaf
);
6463 btrfs_drop_extent_cache(inode
, key
.offset
,
6464 key
.offset
+ extent_len
- 1, 0);
6466 ret
= btrfs_inc_extent_ref(trans
, root
,
6467 new_extents
[i
].disk_bytenr
,
6468 new_extents
[i
].disk_num_bytes
,
6470 root
->root_key
.objectid
,
6471 trans
->transid
, key
.objectid
);
6473 btrfs_release_path(root
, path
);
6475 inode_add_bytes(inode
, extent_len
);
6478 num_bytes
-= extent_len
;
6479 key
.offset
+= extent_len
;
6484 BUG_ON(i
>= nr_extents
);
6488 if (extent_locked
) {
6489 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6490 lock_end
, GFP_NOFS
);
6494 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
6495 key
.offset
>= search_end
)
6502 btrfs_release_path(root
, path
);
6504 mutex_unlock(&inode
->i_mutex
);
6505 if (extent_locked
) {
6506 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6507 lock_end
, GFP_NOFS
);
6514 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
6515 struct btrfs_root
*root
,
6516 struct extent_buffer
*buf
, u64 orig_start
)
6521 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
6522 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
6524 level
= btrfs_header_level(buf
);
6526 struct btrfs_leaf_ref
*ref
;
6527 struct btrfs_leaf_ref
*orig_ref
;
6529 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
6533 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
6535 btrfs_free_leaf_ref(root
, orig_ref
);
6539 ref
->nritems
= orig_ref
->nritems
;
6540 memcpy(ref
->extents
, orig_ref
->extents
,
6541 sizeof(ref
->extents
[0]) * ref
->nritems
);
6543 btrfs_free_leaf_ref(root
, orig_ref
);
6545 ref
->root_gen
= trans
->transid
;
6546 ref
->bytenr
= buf
->start
;
6547 ref
->owner
= btrfs_header_owner(buf
);
6548 ref
->generation
= btrfs_header_generation(buf
);
6550 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
6552 btrfs_free_leaf_ref(root
, ref
);
6557 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
6558 struct extent_buffer
*leaf
,
6559 struct btrfs_block_group_cache
*group
,
6560 struct btrfs_root
*target_root
)
6562 struct btrfs_key key
;
6563 struct inode
*inode
= NULL
;
6564 struct btrfs_file_extent_item
*fi
;
6565 struct extent_state
*cached_state
= NULL
;
6567 u64 skip_objectid
= 0;
6571 nritems
= btrfs_header_nritems(leaf
);
6572 for (i
= 0; i
< nritems
; i
++) {
6573 btrfs_item_key_to_cpu(leaf
, &key
, i
);
6574 if (key
.objectid
== skip_objectid
||
6575 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6577 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
6578 if (btrfs_file_extent_type(leaf
, fi
) ==
6579 BTRFS_FILE_EXTENT_INLINE
)
6581 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
6583 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
6585 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
6586 key
.objectid
, target_root
, 1);
6589 skip_objectid
= key
.objectid
;
6592 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6594 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, key
.offset
,
6595 key
.offset
+ num_bytes
- 1, 0, &cached_state
,
6597 btrfs_drop_extent_cache(inode
, key
.offset
,
6598 key
.offset
+ num_bytes
- 1, 1);
6599 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, key
.offset
,
6600 key
.offset
+ num_bytes
- 1, &cached_state
,
6608 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
6609 struct btrfs_root
*root
,
6610 struct extent_buffer
*leaf
,
6611 struct btrfs_block_group_cache
*group
,
6612 struct inode
*reloc_inode
)
6614 struct btrfs_key key
;
6615 struct btrfs_key extent_key
;
6616 struct btrfs_file_extent_item
*fi
;
6617 struct btrfs_leaf_ref
*ref
;
6618 struct disk_extent
*new_extent
;
6627 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
6628 BUG_ON(!new_extent
);
6630 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
6634 nritems
= btrfs_header_nritems(leaf
);
6635 for (i
= 0; i
< nritems
; i
++) {
6636 btrfs_item_key_to_cpu(leaf
, &key
, i
);
6637 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
6639 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
6640 if (btrfs_file_extent_type(leaf
, fi
) ==
6641 BTRFS_FILE_EXTENT_INLINE
)
6643 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
6644 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
6649 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
6650 bytenr
+ num_bytes
<= group
->key
.objectid
)
6653 extent_key
.objectid
= bytenr
;
6654 extent_key
.offset
= num_bytes
;
6655 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
6657 ret
= get_new_locations(reloc_inode
, &extent_key
,
6658 group
->key
.objectid
, 1,
6659 &new_extent
, &nr_extent
);
6664 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
6665 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
6666 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
6667 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
6669 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
6670 new_extent
->disk_bytenr
);
6671 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
6672 new_extent
->disk_num_bytes
);
6673 btrfs_mark_buffer_dirty(leaf
);
6675 ret
= btrfs_inc_extent_ref(trans
, root
,
6676 new_extent
->disk_bytenr
,
6677 new_extent
->disk_num_bytes
,
6679 root
->root_key
.objectid
,
6680 trans
->transid
, key
.objectid
);
6683 ret
= btrfs_free_extent(trans
, root
,
6684 bytenr
, num_bytes
, leaf
->start
,
6685 btrfs_header_owner(leaf
),
6686 btrfs_header_generation(leaf
),
6692 BUG_ON(ext_index
+ 1 != ref
->nritems
);
6693 btrfs_free_leaf_ref(root
, ref
);
6697 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
6698 struct btrfs_root
*root
)
6700 struct btrfs_root
*reloc_root
;
6703 if (root
->reloc_root
) {
6704 reloc_root
= root
->reloc_root
;
6705 root
->reloc_root
= NULL
;
6706 list_add(&reloc_root
->dead_list
,
6707 &root
->fs_info
->dead_reloc_roots
);
6709 btrfs_set_root_bytenr(&reloc_root
->root_item
,
6710 reloc_root
->node
->start
);
6711 btrfs_set_root_level(&root
->root_item
,
6712 btrfs_header_level(reloc_root
->node
));
6713 memset(&reloc_root
->root_item
.drop_progress
, 0,
6714 sizeof(struct btrfs_disk_key
));
6715 reloc_root
->root_item
.drop_level
= 0;
6717 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
6718 &reloc_root
->root_key
,
6719 &reloc_root
->root_item
);
6725 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
6727 struct btrfs_trans_handle
*trans
;
6728 struct btrfs_root
*reloc_root
;
6729 struct btrfs_root
*prev_root
= NULL
;
6730 struct list_head dead_roots
;
6734 INIT_LIST_HEAD(&dead_roots
);
6735 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
6737 while (!list_empty(&dead_roots
)) {
6738 reloc_root
= list_entry(dead_roots
.prev
,
6739 struct btrfs_root
, dead_list
);
6740 list_del_init(&reloc_root
->dead_list
);
6742 BUG_ON(reloc_root
->commit_root
!= NULL
);
6744 trans
= btrfs_join_transaction(root
, 1);
6747 mutex_lock(&root
->fs_info
->drop_mutex
);
6748 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
6751 mutex_unlock(&root
->fs_info
->drop_mutex
);
6753 nr
= trans
->blocks_used
;
6754 ret
= btrfs_end_transaction(trans
, root
);
6756 btrfs_btree_balance_dirty(root
, nr
);
6759 free_extent_buffer(reloc_root
->node
);
6761 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
6762 &reloc_root
->root_key
);
6764 mutex_unlock(&root
->fs_info
->drop_mutex
);
6766 nr
= trans
->blocks_used
;
6767 ret
= btrfs_end_transaction(trans
, root
);
6769 btrfs_btree_balance_dirty(root
, nr
);
6772 prev_root
= reloc_root
;
6775 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
6781 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
6783 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
6787 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
6789 struct btrfs_root
*reloc_root
;
6790 struct btrfs_trans_handle
*trans
;
6791 struct btrfs_key location
;
6795 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
6796 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
6798 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
6799 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
6802 trans
= btrfs_start_transaction(root
, 1);
6804 ret
= btrfs_commit_transaction(trans
, root
);
6808 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
6809 location
.offset
= (u64
)-1;
6810 location
.type
= BTRFS_ROOT_ITEM_KEY
;
6812 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
6813 BUG_ON(!reloc_root
);
6814 btrfs_orphan_cleanup(reloc_root
);
6818 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
6819 struct btrfs_root
*root
)
6821 struct btrfs_root
*reloc_root
;
6822 struct extent_buffer
*eb
;
6823 struct btrfs_root_item
*root_item
;
6824 struct btrfs_key root_key
;
6827 BUG_ON(!root
->ref_cows
);
6828 if (root
->reloc_root
)
6831 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
6834 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
6835 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
6838 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
6839 root_key
.offset
= root
->root_key
.objectid
;
6840 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
6842 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
6843 btrfs_set_root_refs(root_item
, 0);
6844 btrfs_set_root_bytenr(root_item
, eb
->start
);
6845 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
6846 btrfs_set_root_generation(root_item
, trans
->transid
);
6848 btrfs_tree_unlock(eb
);
6849 free_extent_buffer(eb
);
6851 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
6852 &root_key
, root_item
);
6856 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
6858 BUG_ON(!reloc_root
);
6859 reloc_root
->last_trans
= trans
->transid
;
6860 reloc_root
->commit_root
= NULL
;
6861 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
6863 root
->reloc_root
= reloc_root
;
6868 * Core function of space balance.
6870 * The idea is using reloc trees to relocate tree blocks in reference
6871 * counted roots. There is one reloc tree for each subvol, and all
6872 * reloc trees share same root key objectid. Reloc trees are snapshots
6873 * of the latest committed roots of subvols (root->commit_root).
6875 * To relocate a tree block referenced by a subvol, there are two steps.
6876 * COW the block through subvol's reloc tree, then update block pointer
6877 * in the subvol to point to the new block. Since all reloc trees share
6878 * same root key objectid, doing special handing for tree blocks owned
6879 * by them is easy. Once a tree block has been COWed in one reloc tree,
6880 * we can use the resulting new block directly when the same block is
6881 * required to COW again through other reloc trees. By this way, relocated
6882 * tree blocks are shared between reloc trees, so they are also shared
6885 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
6886 struct btrfs_root
*root
,
6887 struct btrfs_path
*path
,
6888 struct btrfs_key
*first_key
,
6889 struct btrfs_ref_path
*ref_path
,
6890 struct btrfs_block_group_cache
*group
,
6891 struct inode
*reloc_inode
)
6893 struct btrfs_root
*reloc_root
;
6894 struct extent_buffer
*eb
= NULL
;
6895 struct btrfs_key
*keys
;
6899 int lowest_level
= 0;
6902 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
6903 lowest_level
= ref_path
->owner_objectid
;
6905 if (!root
->ref_cows
) {
6906 path
->lowest_level
= lowest_level
;
6907 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
6909 path
->lowest_level
= 0;
6910 btrfs_release_path(root
, path
);
6914 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
6915 ret
= init_reloc_tree(trans
, root
);
6917 reloc_root
= root
->reloc_root
;
6919 shared_level
= ref_path
->shared_level
;
6920 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
6922 keys
= ref_path
->node_keys
;
6923 nodes
= ref_path
->new_nodes
;
6924 memset(&keys
[shared_level
+ 1], 0,
6925 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
6926 memset(&nodes
[shared_level
+ 1], 0,
6927 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
6929 if (nodes
[lowest_level
] == 0) {
6930 path
->lowest_level
= lowest_level
;
6931 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
6934 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
6935 eb
= path
->nodes
[level
];
6936 if (!eb
|| eb
== reloc_root
->node
)
6938 nodes
[level
] = eb
->start
;
6940 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
6942 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
6945 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6946 eb
= path
->nodes
[0];
6947 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
6948 group
, reloc_inode
);
6951 btrfs_release_path(reloc_root
, path
);
6953 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
6959 * replace tree blocks in the fs tree with tree blocks in
6962 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
6965 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6966 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
6969 extent_buffer_get(path
->nodes
[0]);
6970 eb
= path
->nodes
[0];
6971 btrfs_release_path(reloc_root
, path
);
6972 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
6974 free_extent_buffer(eb
);
6977 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
6978 path
->lowest_level
= 0;
6982 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
6983 struct btrfs_root
*root
,
6984 struct btrfs_path
*path
,
6985 struct btrfs_key
*first_key
,
6986 struct btrfs_ref_path
*ref_path
)
6990 ret
= relocate_one_path(trans
, root
, path
, first_key
,
6991 ref_path
, NULL
, NULL
);
6997 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
6998 struct btrfs_root
*extent_root
,
6999 struct btrfs_path
*path
,
7000 struct btrfs_key
*extent_key
)
7004 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
7007 ret
= btrfs_del_item(trans
, extent_root
, path
);
7009 btrfs_release_path(extent_root
, path
);
7013 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
7014 struct btrfs_ref_path
*ref_path
)
7016 struct btrfs_key root_key
;
7018 root_key
.objectid
= ref_path
->root_objectid
;
7019 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
7020 if (is_cowonly_root(ref_path
->root_objectid
))
7021 root_key
.offset
= 0;
7023 root_key
.offset
= (u64
)-1;
7025 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
7028 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
7029 struct btrfs_path
*path
,
7030 struct btrfs_key
*extent_key
,
7031 struct btrfs_block_group_cache
*group
,
7032 struct inode
*reloc_inode
, int pass
)
7034 struct btrfs_trans_handle
*trans
;
7035 struct btrfs_root
*found_root
;
7036 struct btrfs_ref_path
*ref_path
= NULL
;
7037 struct disk_extent
*new_extents
= NULL
;
7042 struct btrfs_key first_key
;
7046 trans
= btrfs_start_transaction(extent_root
, 1);
7049 if (extent_key
->objectid
== 0) {
7050 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
7054 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
7060 for (loops
= 0; ; loops
++) {
7062 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
7063 extent_key
->objectid
);
7065 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
7072 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
7073 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
7076 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
7077 BUG_ON(!found_root
);
7079 * for reference counted tree, only process reference paths
7080 * rooted at the latest committed root.
7082 if (found_root
->ref_cows
&&
7083 ref_path
->root_generation
!= found_root
->root_key
.offset
)
7086 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7089 * copy data extents to new locations
7091 u64 group_start
= group
->key
.objectid
;
7092 ret
= relocate_data_extent(reloc_inode
,
7101 level
= ref_path
->owner_objectid
;
7104 if (prev_block
!= ref_path
->nodes
[level
]) {
7105 struct extent_buffer
*eb
;
7106 u64 block_start
= ref_path
->nodes
[level
];
7107 u64 block_size
= btrfs_level_size(found_root
, level
);
7109 eb
= read_tree_block(found_root
, block_start
,
7111 btrfs_tree_lock(eb
);
7112 BUG_ON(level
!= btrfs_header_level(eb
));
7115 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
7117 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
7119 btrfs_tree_unlock(eb
);
7120 free_extent_buffer(eb
);
7121 prev_block
= block_start
;
7124 mutex_lock(&extent_root
->fs_info
->trans_mutex
);
7125 btrfs_record_root_in_trans(found_root
);
7126 mutex_unlock(&extent_root
->fs_info
->trans_mutex
);
7127 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7129 * try to update data extent references while
7130 * keeping metadata shared between snapshots.
7133 ret
= relocate_one_path(trans
, found_root
,
7134 path
, &first_key
, ref_path
,
7135 group
, reloc_inode
);
7141 * use fallback method to process the remaining
7145 u64 group_start
= group
->key
.objectid
;
7146 new_extents
= kmalloc(sizeof(*new_extents
),
7149 ret
= get_new_locations(reloc_inode
,
7157 ret
= replace_one_extent(trans
, found_root
,
7159 &first_key
, ref_path
,
7160 new_extents
, nr_extents
);
7162 ret
= relocate_tree_block(trans
, found_root
, path
,
7163 &first_key
, ref_path
);
7170 btrfs_end_transaction(trans
, extent_root
);
7177 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
7180 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
7181 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
7183 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
7184 if (num_devices
== 1) {
7185 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
7186 stripped
= flags
& ~stripped
;
7188 /* turn raid0 into single device chunks */
7189 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
7192 /* turn mirroring into duplication */
7193 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
7194 BTRFS_BLOCK_GROUP_RAID10
))
7195 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
7198 /* they already had raid on here, just return */
7199 if (flags
& stripped
)
7202 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
7203 stripped
= flags
& ~stripped
;
7205 /* switch duplicated blocks with raid1 */
7206 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
7207 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
7209 /* turn single device chunks into raid0 */
7210 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
7215 static int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
7216 struct btrfs_block_group_cache
*shrink_block_group
,
7219 struct btrfs_trans_handle
*trans
;
7220 u64 new_alloc_flags
;
7223 spin_lock(&shrink_block_group
->lock
);
7224 if (btrfs_block_group_used(&shrink_block_group
->item
) +
7225 shrink_block_group
->reserved
> 0) {
7226 spin_unlock(&shrink_block_group
->lock
);
7228 trans
= btrfs_start_transaction(root
, 1);
7229 spin_lock(&shrink_block_group
->lock
);
7231 new_alloc_flags
= update_block_group_flags(root
,
7232 shrink_block_group
->flags
);
7233 if (new_alloc_flags
!= shrink_block_group
->flags
) {
7235 btrfs_block_group_used(&shrink_block_group
->item
);
7237 calc
= shrink_block_group
->key
.offset
;
7239 spin_unlock(&shrink_block_group
->lock
);
7241 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
7242 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
7244 btrfs_end_transaction(trans
, root
);
7246 spin_unlock(&shrink_block_group
->lock
);
7251 int btrfs_prepare_block_group_relocation(struct btrfs_root
*root
,
7252 struct btrfs_block_group_cache
*group
)
7255 __alloc_chunk_for_shrink(root
, group
, 1);
7256 set_block_group_readonly(group
);
7261 * checks to see if its even possible to relocate this block group.
7263 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7264 * ok to go ahead and try.
7266 int btrfs_can_relocate(struct btrfs_root
*root
, u64 bytenr
)
7268 struct btrfs_block_group_cache
*block_group
;
7269 struct btrfs_space_info
*space_info
;
7270 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
7271 struct btrfs_device
*device
;
7275 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
7277 /* odd, couldn't find the block group, leave it alone */
7281 /* no bytes used, we're good */
7282 if (!btrfs_block_group_used(&block_group
->item
))
7285 space_info
= block_group
->space_info
;
7286 spin_lock(&space_info
->lock
);
7288 full
= space_info
->full
;
7291 * if this is the last block group we have in this space, we can't
7292 * relocate it unless we're able to allocate a new chunk below.
7294 * Otherwise, we need to make sure we have room in the space to handle
7295 * all of the extents from this block group. If we can, we're good
7297 if ((space_info
->total_bytes
!= block_group
->key
.offset
) &&
7298 (space_info
->bytes_used
+ space_info
->bytes_reserved
+
7299 space_info
->bytes_pinned
+ space_info
->bytes_readonly
+
7300 btrfs_block_group_used(&block_group
->item
) <
7301 space_info
->total_bytes
)) {
7302 spin_unlock(&space_info
->lock
);
7305 spin_unlock(&space_info
->lock
);
7308 * ok we don't have enough space, but maybe we have free space on our
7309 * devices to allocate new chunks for relocation, so loop through our
7310 * alloc devices and guess if we have enough space. However, if we
7311 * were marked as full, then we know there aren't enough chunks, and we
7318 mutex_lock(&root
->fs_info
->chunk_mutex
);
7319 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
7320 u64 min_free
= btrfs_block_group_used(&block_group
->item
);
7321 u64 dev_offset
, max_avail
;
7324 * check to make sure we can actually find a chunk with enough
7325 * space to fit our block group in.
7327 if (device
->total_bytes
> device
->bytes_used
+ min_free
) {
7328 ret
= find_free_dev_extent(NULL
, device
, min_free
,
7329 &dev_offset
, &max_avail
);
7335 mutex_unlock(&root
->fs_info
->chunk_mutex
);
7337 btrfs_put_block_group(block_group
);
7341 static int find_first_block_group(struct btrfs_root
*root
,
7342 struct btrfs_path
*path
, struct btrfs_key
*key
)
7345 struct btrfs_key found_key
;
7346 struct extent_buffer
*leaf
;
7349 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
7354 slot
= path
->slots
[0];
7355 leaf
= path
->nodes
[0];
7356 if (slot
>= btrfs_header_nritems(leaf
)) {
7357 ret
= btrfs_next_leaf(root
, path
);
7364 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
7366 if (found_key
.objectid
>= key
->objectid
&&
7367 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
7378 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
7380 struct btrfs_block_group_cache
*block_group
;
7381 struct btrfs_space_info
*space_info
;
7382 struct btrfs_caching_control
*caching_ctl
;
7385 down_write(&info
->extent_commit_sem
);
7386 while (!list_empty(&info
->caching_block_groups
)) {
7387 caching_ctl
= list_entry(info
->caching_block_groups
.next
,
7388 struct btrfs_caching_control
, list
);
7389 list_del(&caching_ctl
->list
);
7390 put_caching_control(caching_ctl
);
7392 up_write(&info
->extent_commit_sem
);
7394 spin_lock(&info
->block_group_cache_lock
);
7395 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
7396 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
7398 rb_erase(&block_group
->cache_node
,
7399 &info
->block_group_cache_tree
);
7400 spin_unlock(&info
->block_group_cache_lock
);
7402 down_write(&block_group
->space_info
->groups_sem
);
7403 list_del(&block_group
->list
);
7404 up_write(&block_group
->space_info
->groups_sem
);
7406 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
7407 wait_block_group_cache_done(block_group
);
7409 btrfs_remove_free_space_cache(block_group
);
7410 btrfs_put_block_group(block_group
);
7412 spin_lock(&info
->block_group_cache_lock
);
7414 spin_unlock(&info
->block_group_cache_lock
);
7416 /* now that all the block groups are freed, go through and
7417 * free all the space_info structs. This is only called during
7418 * the final stages of unmount, and so we know nobody is
7419 * using them. We call synchronize_rcu() once before we start,
7420 * just to be on the safe side.
7424 while(!list_empty(&info
->space_info
)) {
7425 space_info
= list_entry(info
->space_info
.next
,
7426 struct btrfs_space_info
,
7429 list_del(&space_info
->list
);
7435 int btrfs_read_block_groups(struct btrfs_root
*root
)
7437 struct btrfs_path
*path
;
7439 struct btrfs_block_group_cache
*cache
;
7440 struct btrfs_fs_info
*info
= root
->fs_info
;
7441 struct btrfs_space_info
*space_info
;
7442 struct btrfs_key key
;
7443 struct btrfs_key found_key
;
7444 struct extent_buffer
*leaf
;
7446 root
= info
->extent_root
;
7449 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
7450 path
= btrfs_alloc_path();
7455 ret
= find_first_block_group(root
, path
, &key
);
7463 leaf
= path
->nodes
[0];
7464 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
7465 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
7471 atomic_set(&cache
->count
, 1);
7472 spin_lock_init(&cache
->lock
);
7473 spin_lock_init(&cache
->tree_lock
);
7474 cache
->fs_info
= info
;
7475 INIT_LIST_HEAD(&cache
->list
);
7476 INIT_LIST_HEAD(&cache
->cluster_list
);
7479 * we only want to have 32k of ram per block group for keeping
7480 * track of free space, and if we pass 1/2 of that we want to
7481 * start converting things over to using bitmaps
7483 cache
->extents_thresh
= ((1024 * 32) / 2) /
7484 sizeof(struct btrfs_free_space
);
7486 read_extent_buffer(leaf
, &cache
->item
,
7487 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
7488 sizeof(cache
->item
));
7489 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
7491 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
7492 btrfs_release_path(root
, path
);
7493 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
7494 cache
->sectorsize
= root
->sectorsize
;
7497 * check for two cases, either we are full, and therefore
7498 * don't need to bother with the caching work since we won't
7499 * find any space, or we are empty, and we can just add all
7500 * the space in and be done with it. This saves us _alot_ of
7501 * time, particularly in the full case.
7503 if (found_key
.offset
== btrfs_block_group_used(&cache
->item
)) {
7504 exclude_super_stripes(root
, cache
);
7505 cache
->last_byte_to_unpin
= (u64
)-1;
7506 cache
->cached
= BTRFS_CACHE_FINISHED
;
7507 free_excluded_extents(root
, cache
);
7508 } else if (btrfs_block_group_used(&cache
->item
) == 0) {
7509 exclude_super_stripes(root
, cache
);
7510 cache
->last_byte_to_unpin
= (u64
)-1;
7511 cache
->cached
= BTRFS_CACHE_FINISHED
;
7512 add_new_free_space(cache
, root
->fs_info
,
7514 found_key
.objectid
+
7516 free_excluded_extents(root
, cache
);
7519 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
7520 btrfs_block_group_used(&cache
->item
),
7523 cache
->space_info
= space_info
;
7524 spin_lock(&cache
->space_info
->lock
);
7525 cache
->space_info
->bytes_super
+= cache
->bytes_super
;
7526 spin_unlock(&cache
->space_info
->lock
);
7528 down_write(&space_info
->groups_sem
);
7529 list_add_tail(&cache
->list
, &space_info
->block_groups
);
7530 up_write(&space_info
->groups_sem
);
7532 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
7535 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
7536 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
7537 set_block_group_readonly(cache
);
7541 btrfs_free_path(path
);
7545 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
7546 struct btrfs_root
*root
, u64 bytes_used
,
7547 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
7551 struct btrfs_root
*extent_root
;
7552 struct btrfs_block_group_cache
*cache
;
7554 extent_root
= root
->fs_info
->extent_root
;
7556 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
7558 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
7562 cache
->key
.objectid
= chunk_offset
;
7563 cache
->key
.offset
= size
;
7564 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
7565 cache
->sectorsize
= root
->sectorsize
;
7568 * we only want to have 32k of ram per block group for keeping track
7569 * of free space, and if we pass 1/2 of that we want to start
7570 * converting things over to using bitmaps
7572 cache
->extents_thresh
= ((1024 * 32) / 2) /
7573 sizeof(struct btrfs_free_space
);
7574 atomic_set(&cache
->count
, 1);
7575 spin_lock_init(&cache
->lock
);
7576 spin_lock_init(&cache
->tree_lock
);
7577 INIT_LIST_HEAD(&cache
->list
);
7578 INIT_LIST_HEAD(&cache
->cluster_list
);
7580 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
7581 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
7582 cache
->flags
= type
;
7583 btrfs_set_block_group_flags(&cache
->item
, type
);
7585 cache
->last_byte_to_unpin
= (u64
)-1;
7586 cache
->cached
= BTRFS_CACHE_FINISHED
;
7587 exclude_super_stripes(root
, cache
);
7589 add_new_free_space(cache
, root
->fs_info
, chunk_offset
,
7590 chunk_offset
+ size
);
7592 free_excluded_extents(root
, cache
);
7594 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
7595 &cache
->space_info
);
7598 spin_lock(&cache
->space_info
->lock
);
7599 cache
->space_info
->bytes_super
+= cache
->bytes_super
;
7600 spin_unlock(&cache
->space_info
->lock
);
7602 down_write(&cache
->space_info
->groups_sem
);
7603 list_add_tail(&cache
->list
, &cache
->space_info
->block_groups
);
7604 up_write(&cache
->space_info
->groups_sem
);
7606 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
7609 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
7610 sizeof(cache
->item
));
7613 set_avail_alloc_bits(extent_root
->fs_info
, type
);
7618 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
7619 struct btrfs_root
*root
, u64 group_start
)
7621 struct btrfs_path
*path
;
7622 struct btrfs_block_group_cache
*block_group
;
7623 struct btrfs_free_cluster
*cluster
;
7624 struct btrfs_key key
;
7627 root
= root
->fs_info
->extent_root
;
7629 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
7630 BUG_ON(!block_group
);
7631 BUG_ON(!block_group
->ro
);
7633 memcpy(&key
, &block_group
->key
, sizeof(key
));
7635 /* make sure this block group isn't part of an allocation cluster */
7636 cluster
= &root
->fs_info
->data_alloc_cluster
;
7637 spin_lock(&cluster
->refill_lock
);
7638 btrfs_return_cluster_to_free_space(block_group
, cluster
);
7639 spin_unlock(&cluster
->refill_lock
);
7642 * make sure this block group isn't part of a metadata
7643 * allocation cluster
7645 cluster
= &root
->fs_info
->meta_alloc_cluster
;
7646 spin_lock(&cluster
->refill_lock
);
7647 btrfs_return_cluster_to_free_space(block_group
, cluster
);
7648 spin_unlock(&cluster
->refill_lock
);
7650 path
= btrfs_alloc_path();
7653 spin_lock(&root
->fs_info
->block_group_cache_lock
);
7654 rb_erase(&block_group
->cache_node
,
7655 &root
->fs_info
->block_group_cache_tree
);
7656 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
7658 down_write(&block_group
->space_info
->groups_sem
);
7660 * we must use list_del_init so people can check to see if they
7661 * are still on the list after taking the semaphore
7663 list_del_init(&block_group
->list
);
7664 up_write(&block_group
->space_info
->groups_sem
);
7666 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
7667 wait_block_group_cache_done(block_group
);
7669 btrfs_remove_free_space_cache(block_group
);
7671 spin_lock(&block_group
->space_info
->lock
);
7672 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
7673 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
7674 spin_unlock(&block_group
->space_info
->lock
);
7676 btrfs_clear_space_info_full(root
->fs_info
);
7678 btrfs_put_block_group(block_group
);
7679 btrfs_put_block_group(block_group
);
7681 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
7687 ret
= btrfs_del_item(trans
, root
, path
);
7689 btrfs_free_path(path
);