2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
30 #include "print-tree.h"
31 #include "transaction.h"
34 #include "free-space-cache.h"
36 static int update_block_group(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 u64 bytenr
, u64 num_bytes
, int alloc
);
39 static int update_reserved_bytes(struct btrfs_block_group_cache
*cache
,
40 u64 num_bytes
, int reserve
, int sinfo
);
41 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
42 struct btrfs_root
*root
,
43 u64 bytenr
, u64 num_bytes
, u64 parent
,
44 u64 root_objectid
, u64 owner_objectid
,
45 u64 owner_offset
, int refs_to_drop
,
46 struct btrfs_delayed_extent_op
*extra_op
);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
48 struct extent_buffer
*leaf
,
49 struct btrfs_extent_item
*ei
);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
51 struct btrfs_root
*root
,
52 u64 parent
, u64 root_objectid
,
53 u64 flags
, u64 owner
, u64 offset
,
54 struct btrfs_key
*ins
, int ref_mod
);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
56 struct btrfs_root
*root
,
57 u64 parent
, u64 root_objectid
,
58 u64 flags
, struct btrfs_disk_key
*key
,
59 int level
, struct btrfs_key
*ins
);
60 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
61 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
62 u64 flags
, int force
);
63 static int find_next_key(struct btrfs_path
*path
, int level
,
64 struct btrfs_key
*key
);
65 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
,
66 int dump_block_groups
);
69 block_group_cache_done(struct btrfs_block_group_cache
*cache
)
72 return cache
->cached
== BTRFS_CACHE_FINISHED
;
75 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
77 return (cache
->flags
& bits
) == bits
;
80 void btrfs_get_block_group(struct btrfs_block_group_cache
*cache
)
82 atomic_inc(&cache
->count
);
85 void btrfs_put_block_group(struct btrfs_block_group_cache
*cache
)
87 if (atomic_dec_and_test(&cache
->count
)) {
88 WARN_ON(cache
->pinned
> 0);
89 WARN_ON(cache
->reserved
> 0);
90 WARN_ON(cache
->reserved_pinned
> 0);
96 * this adds the block group to the fs_info rb tree for the block group
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
100 struct btrfs_block_group_cache
*block_group
)
103 struct rb_node
*parent
= NULL
;
104 struct btrfs_block_group_cache
*cache
;
106 spin_lock(&info
->block_group_cache_lock
);
107 p
= &info
->block_group_cache_tree
.rb_node
;
111 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
113 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
115 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
118 spin_unlock(&info
->block_group_cache_lock
);
123 rb_link_node(&block_group
->cache_node
, parent
, p
);
124 rb_insert_color(&block_group
->cache_node
,
125 &info
->block_group_cache_tree
);
126 spin_unlock(&info
->block_group_cache_lock
);
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
135 static struct btrfs_block_group_cache
*
136 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
139 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
143 spin_lock(&info
->block_group_cache_lock
);
144 n
= info
->block_group_cache_tree
.rb_node
;
147 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
149 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
150 start
= cache
->key
.objectid
;
152 if (bytenr
< start
) {
153 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
156 } else if (bytenr
> start
) {
157 if (contains
&& bytenr
<= end
) {
168 btrfs_get_block_group(ret
);
169 spin_unlock(&info
->block_group_cache_lock
);
174 static int add_excluded_extent(struct btrfs_root
*root
,
175 u64 start
, u64 num_bytes
)
177 u64 end
= start
+ num_bytes
- 1;
178 set_extent_bits(&root
->fs_info
->freed_extents
[0],
179 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
180 set_extent_bits(&root
->fs_info
->freed_extents
[1],
181 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
185 static void free_excluded_extents(struct btrfs_root
*root
,
186 struct btrfs_block_group_cache
*cache
)
190 start
= cache
->key
.objectid
;
191 end
= start
+ cache
->key
.offset
- 1;
193 clear_extent_bits(&root
->fs_info
->freed_extents
[0],
194 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
195 clear_extent_bits(&root
->fs_info
->freed_extents
[1],
196 start
, end
, EXTENT_UPTODATE
, GFP_NOFS
);
199 static int exclude_super_stripes(struct btrfs_root
*root
,
200 struct btrfs_block_group_cache
*cache
)
207 if (cache
->key
.objectid
< BTRFS_SUPER_INFO_OFFSET
) {
208 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->key
.objectid
;
209 cache
->bytes_super
+= stripe_len
;
210 ret
= add_excluded_extent(root
, cache
->key
.objectid
,
215 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
216 bytenr
= btrfs_sb_offset(i
);
217 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
218 cache
->key
.objectid
, bytenr
,
219 0, &logical
, &nr
, &stripe_len
);
223 cache
->bytes_super
+= stripe_len
;
224 ret
= add_excluded_extent(root
, logical
[nr
],
234 static struct btrfs_caching_control
*
235 get_caching_control(struct btrfs_block_group_cache
*cache
)
237 struct btrfs_caching_control
*ctl
;
239 spin_lock(&cache
->lock
);
240 if (cache
->cached
!= BTRFS_CACHE_STARTED
) {
241 spin_unlock(&cache
->lock
);
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache
->caching_ctl
) {
247 spin_unlock(&cache
->lock
);
251 ctl
= cache
->caching_ctl
;
252 atomic_inc(&ctl
->count
);
253 spin_unlock(&cache
->lock
);
257 static void put_caching_control(struct btrfs_caching_control
*ctl
)
259 if (atomic_dec_and_test(&ctl
->count
))
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
268 static u64
add_new_free_space(struct btrfs_block_group_cache
*block_group
,
269 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
271 u64 extent_start
, extent_end
, size
, total_added
= 0;
274 while (start
< end
) {
275 ret
= find_first_extent_bit(info
->pinned_extents
, start
,
276 &extent_start
, &extent_end
,
277 EXTENT_DIRTY
| EXTENT_UPTODATE
);
281 if (extent_start
<= start
) {
282 start
= extent_end
+ 1;
283 } else if (extent_start
> start
&& extent_start
< end
) {
284 size
= extent_start
- start
;
286 ret
= btrfs_add_free_space(block_group
, start
,
289 start
= extent_end
+ 1;
298 ret
= btrfs_add_free_space(block_group
, start
, size
);
305 static int caching_kthread(void *data
)
307 struct btrfs_block_group_cache
*block_group
= data
;
308 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
309 struct btrfs_caching_control
*caching_ctl
= block_group
->caching_ctl
;
310 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
311 struct btrfs_path
*path
;
312 struct extent_buffer
*leaf
;
313 struct btrfs_key key
;
319 path
= btrfs_alloc_path();
323 exclude_super_stripes(extent_root
, block_group
);
324 spin_lock(&block_group
->space_info
->lock
);
325 block_group
->space_info
->bytes_readonly
+= block_group
->bytes_super
;
326 spin_unlock(&block_group
->space_info
->lock
);
328 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
331 * We don't want to deadlock with somebody trying to allocate a new
332 * extent for the extent root while also trying to search the extent
333 * root to add free space. So we skip locking and search the commit
334 * root, since its read-only
336 path
->skip_locking
= 1;
337 path
->search_commit_root
= 1;
342 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
344 mutex_lock(&caching_ctl
->mutex
);
345 /* need to make sure the commit_root doesn't disappear */
346 down_read(&fs_info
->extent_commit_sem
);
348 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
352 leaf
= path
->nodes
[0];
353 nritems
= btrfs_header_nritems(leaf
);
357 if (fs_info
->closing
> 1) {
362 if (path
->slots
[0] < nritems
) {
363 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
365 ret
= find_next_key(path
, 0, &key
);
369 caching_ctl
->progress
= last
;
370 btrfs_release_path(extent_root
, path
);
371 up_read(&fs_info
->extent_commit_sem
);
372 mutex_unlock(&caching_ctl
->mutex
);
373 if (btrfs_transaction_in_commit(fs_info
))
380 if (key
.objectid
< block_group
->key
.objectid
) {
385 if (key
.objectid
>= block_group
->key
.objectid
+
386 block_group
->key
.offset
)
389 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
390 total_found
+= add_new_free_space(block_group
,
393 last
= key
.objectid
+ key
.offset
;
395 if (total_found
> (1024 * 1024 * 2)) {
397 wake_up(&caching_ctl
->wait
);
404 total_found
+= add_new_free_space(block_group
, fs_info
, last
,
405 block_group
->key
.objectid
+
406 block_group
->key
.offset
);
407 caching_ctl
->progress
= (u64
)-1;
409 spin_lock(&block_group
->lock
);
410 block_group
->caching_ctl
= NULL
;
411 block_group
->cached
= BTRFS_CACHE_FINISHED
;
412 spin_unlock(&block_group
->lock
);
415 btrfs_free_path(path
);
416 up_read(&fs_info
->extent_commit_sem
);
418 free_excluded_extents(extent_root
, block_group
);
420 mutex_unlock(&caching_ctl
->mutex
);
421 wake_up(&caching_ctl
->wait
);
423 put_caching_control(caching_ctl
);
424 atomic_dec(&block_group
->space_info
->caching_threads
);
425 btrfs_put_block_group(block_group
);
430 static int cache_block_group(struct btrfs_block_group_cache
*cache
,
431 struct btrfs_trans_handle
*trans
,
434 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
435 struct btrfs_caching_control
*caching_ctl
;
436 struct task_struct
*tsk
;
440 if (cache
->cached
!= BTRFS_CACHE_NO
)
444 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking.
447 if (!trans
->transaction
->in_commit
) {
448 spin_lock(&cache
->lock
);
449 if (cache
->cached
!= BTRFS_CACHE_NO
) {
450 spin_unlock(&cache
->lock
);
453 cache
->cached
= BTRFS_CACHE_STARTED
;
454 spin_unlock(&cache
->lock
);
456 ret
= load_free_space_cache(fs_info
, cache
);
458 spin_lock(&cache
->lock
);
460 cache
->cached
= BTRFS_CACHE_FINISHED
;
461 cache
->last_byte_to_unpin
= (u64
)-1;
463 cache
->cached
= BTRFS_CACHE_NO
;
465 spin_unlock(&cache
->lock
);
473 caching_ctl
= kzalloc(sizeof(*caching_ctl
), GFP_KERNEL
);
474 BUG_ON(!caching_ctl
);
476 INIT_LIST_HEAD(&caching_ctl
->list
);
477 mutex_init(&caching_ctl
->mutex
);
478 init_waitqueue_head(&caching_ctl
->wait
);
479 caching_ctl
->block_group
= cache
;
480 caching_ctl
->progress
= cache
->key
.objectid
;
481 /* one for caching kthread, one for caching block group list */
482 atomic_set(&caching_ctl
->count
, 2);
484 spin_lock(&cache
->lock
);
485 if (cache
->cached
!= BTRFS_CACHE_NO
) {
486 spin_unlock(&cache
->lock
);
490 cache
->caching_ctl
= caching_ctl
;
491 cache
->cached
= BTRFS_CACHE_STARTED
;
492 spin_unlock(&cache
->lock
);
494 down_write(&fs_info
->extent_commit_sem
);
495 list_add_tail(&caching_ctl
->list
, &fs_info
->caching_block_groups
);
496 up_write(&fs_info
->extent_commit_sem
);
498 atomic_inc(&cache
->space_info
->caching_threads
);
499 btrfs_get_block_group(cache
);
501 tsk
= kthread_run(caching_kthread
, cache
, "btrfs-cache-%llu\n",
502 cache
->key
.objectid
);
505 printk(KERN_ERR
"error running thread %d\n", ret
);
513 * return the block group that starts at or after bytenr
515 static struct btrfs_block_group_cache
*
516 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
518 struct btrfs_block_group_cache
*cache
;
520 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
526 * return the block group that contains the given bytenr
528 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
529 struct btrfs_fs_info
*info
,
532 struct btrfs_block_group_cache
*cache
;
534 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
539 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
542 struct list_head
*head
= &info
->space_info
;
543 struct btrfs_space_info
*found
;
545 flags
&= BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_SYSTEM
|
546 BTRFS_BLOCK_GROUP_METADATA
;
549 list_for_each_entry_rcu(found
, head
, list
) {
550 if (found
->flags
& flags
) {
560 * after adding space to the filesystem, we need to clear the full flags
561 * on all the space infos.
563 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
565 struct list_head
*head
= &info
->space_info
;
566 struct btrfs_space_info
*found
;
569 list_for_each_entry_rcu(found
, head
, list
)
574 static u64
div_factor(u64 num
, int factor
)
583 u64
btrfs_find_block_group(struct btrfs_root
*root
,
584 u64 search_start
, u64 search_hint
, int owner
)
586 struct btrfs_block_group_cache
*cache
;
588 u64 last
= max(search_hint
, search_start
);
595 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
599 spin_lock(&cache
->lock
);
600 last
= cache
->key
.objectid
+ cache
->key
.offset
;
601 used
= btrfs_block_group_used(&cache
->item
);
603 if ((full_search
|| !cache
->ro
) &&
604 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
605 if (used
+ cache
->pinned
+ cache
->reserved
<
606 div_factor(cache
->key
.offset
, factor
)) {
607 group_start
= cache
->key
.objectid
;
608 spin_unlock(&cache
->lock
);
609 btrfs_put_block_group(cache
);
613 spin_unlock(&cache
->lock
);
614 btrfs_put_block_group(cache
);
622 if (!full_search
&& factor
< 10) {
632 /* simple helper to search for an existing extent at a given offset */
633 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
636 struct btrfs_key key
;
637 struct btrfs_path
*path
;
639 path
= btrfs_alloc_path();
641 key
.objectid
= start
;
643 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
644 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
646 btrfs_free_path(path
);
651 * helper function to lookup reference count and flags of extent.
653 * the head node for delayed ref is used to store the sum of all the
654 * reference count modifications queued up in the rbtree. the head
655 * node may also store the extent flags to set. This way you can check
656 * to see what the reference count and extent flags would be if all of
657 * the delayed refs are not processed.
659 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
660 struct btrfs_root
*root
, u64 bytenr
,
661 u64 num_bytes
, u64
*refs
, u64
*flags
)
663 struct btrfs_delayed_ref_head
*head
;
664 struct btrfs_delayed_ref_root
*delayed_refs
;
665 struct btrfs_path
*path
;
666 struct btrfs_extent_item
*ei
;
667 struct extent_buffer
*leaf
;
668 struct btrfs_key key
;
674 path
= btrfs_alloc_path();
678 key
.objectid
= bytenr
;
679 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
680 key
.offset
= num_bytes
;
682 path
->skip_locking
= 1;
683 path
->search_commit_root
= 1;
686 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
692 leaf
= path
->nodes
[0];
693 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
694 if (item_size
>= sizeof(*ei
)) {
695 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
696 struct btrfs_extent_item
);
697 num_refs
= btrfs_extent_refs(leaf
, ei
);
698 extent_flags
= btrfs_extent_flags(leaf
, ei
);
700 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
701 struct btrfs_extent_item_v0
*ei0
;
702 BUG_ON(item_size
!= sizeof(*ei0
));
703 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
704 struct btrfs_extent_item_v0
);
705 num_refs
= btrfs_extent_refs_v0(leaf
, ei0
);
706 /* FIXME: this isn't correct for data */
707 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
712 BUG_ON(num_refs
== 0);
722 delayed_refs
= &trans
->transaction
->delayed_refs
;
723 spin_lock(&delayed_refs
->lock
);
724 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
726 if (!mutex_trylock(&head
->mutex
)) {
727 atomic_inc(&head
->node
.refs
);
728 spin_unlock(&delayed_refs
->lock
);
730 btrfs_release_path(root
->fs_info
->extent_root
, path
);
732 mutex_lock(&head
->mutex
);
733 mutex_unlock(&head
->mutex
);
734 btrfs_put_delayed_ref(&head
->node
);
737 if (head
->extent_op
&& head
->extent_op
->update_flags
)
738 extent_flags
|= head
->extent_op
->flags_to_set
;
740 BUG_ON(num_refs
== 0);
742 num_refs
+= head
->node
.ref_mod
;
743 mutex_unlock(&head
->mutex
);
745 spin_unlock(&delayed_refs
->lock
);
747 WARN_ON(num_refs
== 0);
751 *flags
= extent_flags
;
753 btrfs_free_path(path
);
758 * Back reference rules. Back refs have three main goals:
760 * 1) differentiate between all holders of references to an extent so that
761 * when a reference is dropped we can make sure it was a valid reference
762 * before freeing the extent.
764 * 2) Provide enough information to quickly find the holders of an extent
765 * if we notice a given block is corrupted or bad.
767 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
768 * maintenance. This is actually the same as #2, but with a slightly
769 * different use case.
771 * There are two kinds of back refs. The implicit back refs is optimized
772 * for pointers in non-shared tree blocks. For a given pointer in a block,
773 * back refs of this kind provide information about the block's owner tree
774 * and the pointer's key. These information allow us to find the block by
775 * b-tree searching. The full back refs is for pointers in tree blocks not
776 * referenced by their owner trees. The location of tree block is recorded
777 * in the back refs. Actually the full back refs is generic, and can be
778 * used in all cases the implicit back refs is used. The major shortcoming
779 * of the full back refs is its overhead. Every time a tree block gets
780 * COWed, we have to update back refs entry for all pointers in it.
782 * For a newly allocated tree block, we use implicit back refs for
783 * pointers in it. This means most tree related operations only involve
784 * implicit back refs. For a tree block created in old transaction, the
785 * only way to drop a reference to it is COW it. So we can detect the
786 * event that tree block loses its owner tree's reference and do the
787 * back refs conversion.
789 * When a tree block is COW'd through a tree, there are four cases:
791 * The reference count of the block is one and the tree is the block's
792 * owner tree. Nothing to do in this case.
794 * The reference count of the block is one and the tree is not the
795 * block's owner tree. In this case, full back refs is used for pointers
796 * in the block. Remove these full back refs, add implicit back refs for
797 * every pointers in the new block.
799 * The reference count of the block is greater than one and the tree is
800 * the block's owner tree. In this case, implicit back refs is used for
801 * pointers in the block. Add full back refs for every pointers in the
802 * block, increase lower level extents' reference counts. The original
803 * implicit back refs are entailed to the new block.
805 * The reference count of the block is greater than one and the tree is
806 * not the block's owner tree. Add implicit back refs for every pointer in
807 * the new block, increase lower level extents' reference count.
809 * Back Reference Key composing:
811 * The key objectid corresponds to the first byte in the extent,
812 * The key type is used to differentiate between types of back refs.
813 * There are different meanings of the key offset for different types
816 * File extents can be referenced by:
818 * - multiple snapshots, subvolumes, or different generations in one subvol
819 * - different files inside a single subvolume
820 * - different offsets inside a file (bookend extents in file.c)
822 * The extent ref structure for the implicit back refs has fields for:
824 * - Objectid of the subvolume root
825 * - objectid of the file holding the reference
826 * - original offset in the file
827 * - how many bookend extents
829 * The key offset for the implicit back refs is hash of the first
832 * The extent ref structure for the full back refs has field for:
834 * - number of pointers in the tree leaf
836 * The key offset for the implicit back refs is the first byte of
839 * When a file extent is allocated, The implicit back refs is used.
840 * the fields are filled in:
842 * (root_key.objectid, inode objectid, offset in file, 1)
844 * When a file extent is removed file truncation, we find the
845 * corresponding implicit back refs and check the following fields:
847 * (btrfs_header_owner(leaf), inode objectid, offset in file)
849 * Btree extents can be referenced by:
851 * - Different subvolumes
853 * Both the implicit back refs and the full back refs for tree blocks
854 * only consist of key. The key offset for the implicit back refs is
855 * objectid of block's owner tree. The key offset for the full back refs
856 * is the first byte of parent block.
858 * When implicit back refs is used, information about the lowest key and
859 * level of the tree block are required. These information are stored in
860 * tree block info structure.
863 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
864 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
865 struct btrfs_root
*root
,
866 struct btrfs_path
*path
,
867 u64 owner
, u32 extra_size
)
869 struct btrfs_extent_item
*item
;
870 struct btrfs_extent_item_v0
*ei0
;
871 struct btrfs_extent_ref_v0
*ref0
;
872 struct btrfs_tree_block_info
*bi
;
873 struct extent_buffer
*leaf
;
874 struct btrfs_key key
;
875 struct btrfs_key found_key
;
876 u32 new_size
= sizeof(*item
);
880 leaf
= path
->nodes
[0];
881 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
883 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
884 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
885 struct btrfs_extent_item_v0
);
886 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
888 if (owner
== (u64
)-1) {
890 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
891 ret
= btrfs_next_leaf(root
, path
);
895 leaf
= path
->nodes
[0];
897 btrfs_item_key_to_cpu(leaf
, &found_key
,
899 BUG_ON(key
.objectid
!= found_key
.objectid
);
900 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
904 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
905 struct btrfs_extent_ref_v0
);
906 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
910 btrfs_release_path(root
, path
);
912 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
913 new_size
+= sizeof(*bi
);
915 new_size
-= sizeof(*ei0
);
916 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
917 new_size
+ extra_size
, 1);
922 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
925 leaf
= path
->nodes
[0];
926 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
927 btrfs_set_extent_refs(leaf
, item
, refs
);
928 /* FIXME: get real generation */
929 btrfs_set_extent_generation(leaf
, item
, 0);
930 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
931 btrfs_set_extent_flags(leaf
, item
,
932 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
933 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
934 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
935 /* FIXME: get first key of the block */
936 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
937 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
939 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
941 btrfs_mark_buffer_dirty(leaf
);
946 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
948 u32 high_crc
= ~(u32
)0;
949 u32 low_crc
= ~(u32
)0;
952 lenum
= cpu_to_le64(root_objectid
);
953 high_crc
= crc32c(high_crc
, &lenum
, sizeof(lenum
));
954 lenum
= cpu_to_le64(owner
);
955 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
956 lenum
= cpu_to_le64(offset
);
957 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
959 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
962 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
963 struct btrfs_extent_data_ref
*ref
)
965 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
966 btrfs_extent_data_ref_objectid(leaf
, ref
),
967 btrfs_extent_data_ref_offset(leaf
, ref
));
970 static int match_extent_data_ref(struct extent_buffer
*leaf
,
971 struct btrfs_extent_data_ref
*ref
,
972 u64 root_objectid
, u64 owner
, u64 offset
)
974 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
975 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
976 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
981 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
982 struct btrfs_root
*root
,
983 struct btrfs_path
*path
,
984 u64 bytenr
, u64 parent
,
986 u64 owner
, u64 offset
)
988 struct btrfs_key key
;
989 struct btrfs_extent_data_ref
*ref
;
990 struct extent_buffer
*leaf
;
996 key
.objectid
= bytenr
;
998 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
1001 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
1002 key
.offset
= hash_extent_data_ref(root_objectid
,
1007 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1016 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1017 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
1018 btrfs_release_path(root
, path
);
1019 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1030 leaf
= path
->nodes
[0];
1031 nritems
= btrfs_header_nritems(leaf
);
1033 if (path
->slots
[0] >= nritems
) {
1034 ret
= btrfs_next_leaf(root
, path
);
1040 leaf
= path
->nodes
[0];
1041 nritems
= btrfs_header_nritems(leaf
);
1045 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1046 if (key
.objectid
!= bytenr
||
1047 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
1050 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
1051 struct btrfs_extent_data_ref
);
1053 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
1056 btrfs_release_path(root
, path
);
1068 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
1069 struct btrfs_root
*root
,
1070 struct btrfs_path
*path
,
1071 u64 bytenr
, u64 parent
,
1072 u64 root_objectid
, u64 owner
,
1073 u64 offset
, int refs_to_add
)
1075 struct btrfs_key key
;
1076 struct extent_buffer
*leaf
;
1081 key
.objectid
= bytenr
;
1083 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
1084 key
.offset
= parent
;
1085 size
= sizeof(struct btrfs_shared_data_ref
);
1087 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
1088 key
.offset
= hash_extent_data_ref(root_objectid
,
1090 size
= sizeof(struct btrfs_extent_data_ref
);
1093 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
1094 if (ret
&& ret
!= -EEXIST
)
1097 leaf
= path
->nodes
[0];
1099 struct btrfs_shared_data_ref
*ref
;
1100 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
1101 struct btrfs_shared_data_ref
);
1103 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
1105 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
1106 num_refs
+= refs_to_add
;
1107 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
1110 struct btrfs_extent_data_ref
*ref
;
1111 while (ret
== -EEXIST
) {
1112 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
1113 struct btrfs_extent_data_ref
);
1114 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
1117 btrfs_release_path(root
, path
);
1119 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1121 if (ret
&& ret
!= -EEXIST
)
1124 leaf
= path
->nodes
[0];
1126 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
1127 struct btrfs_extent_data_ref
);
1129 btrfs_set_extent_data_ref_root(leaf
, ref
,
1131 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
1132 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
1133 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
1135 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
1136 num_refs
+= refs_to_add
;
1137 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
1140 btrfs_mark_buffer_dirty(leaf
);
1143 btrfs_release_path(root
, path
);
1147 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
1148 struct btrfs_root
*root
,
1149 struct btrfs_path
*path
,
1152 struct btrfs_key key
;
1153 struct btrfs_extent_data_ref
*ref1
= NULL
;
1154 struct btrfs_shared_data_ref
*ref2
= NULL
;
1155 struct extent_buffer
*leaf
;
1159 leaf
= path
->nodes
[0];
1160 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1162 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1163 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
1164 struct btrfs_extent_data_ref
);
1165 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1166 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
1167 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
1168 struct btrfs_shared_data_ref
);
1169 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1170 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1171 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
1172 struct btrfs_extent_ref_v0
*ref0
;
1173 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1174 struct btrfs_extent_ref_v0
);
1175 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
1181 BUG_ON(num_refs
< refs_to_drop
);
1182 num_refs
-= refs_to_drop
;
1184 if (num_refs
== 0) {
1185 ret
= btrfs_del_item(trans
, root
, path
);
1187 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
1188 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
1189 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
1190 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
1191 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1193 struct btrfs_extent_ref_v0
*ref0
;
1194 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1195 struct btrfs_extent_ref_v0
);
1196 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
1199 btrfs_mark_buffer_dirty(leaf
);
1204 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
1205 struct btrfs_path
*path
,
1206 struct btrfs_extent_inline_ref
*iref
)
1208 struct btrfs_key key
;
1209 struct extent_buffer
*leaf
;
1210 struct btrfs_extent_data_ref
*ref1
;
1211 struct btrfs_shared_data_ref
*ref2
;
1214 leaf
= path
->nodes
[0];
1215 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1217 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
1218 BTRFS_EXTENT_DATA_REF_KEY
) {
1219 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1220 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1222 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1223 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1225 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1226 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
1227 struct btrfs_extent_data_ref
);
1228 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1229 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
1230 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
1231 struct btrfs_shared_data_ref
);
1232 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1233 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
1235 struct btrfs_extent_ref_v0
*ref0
;
1236 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1237 struct btrfs_extent_ref_v0
);
1238 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
1246 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
1247 struct btrfs_root
*root
,
1248 struct btrfs_path
*path
,
1249 u64 bytenr
, u64 parent
,
1252 struct btrfs_key key
;
1255 key
.objectid
= bytenr
;
1257 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1258 key
.offset
= parent
;
1260 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1261 key
.offset
= root_objectid
;
1264 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268 if (ret
== -ENOENT
&& parent
) {
1269 btrfs_release_path(root
, path
);
1270 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
1271 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1279 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
1280 struct btrfs_root
*root
,
1281 struct btrfs_path
*path
,
1282 u64 bytenr
, u64 parent
,
1285 struct btrfs_key key
;
1288 key
.objectid
= bytenr
;
1290 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1291 key
.offset
= parent
;
1293 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1294 key
.offset
= root_objectid
;
1297 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1298 btrfs_release_path(root
, path
);
1302 static inline int extent_ref_type(u64 parent
, u64 owner
)
1305 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1307 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1309 type
= BTRFS_TREE_BLOCK_REF_KEY
;
1312 type
= BTRFS_SHARED_DATA_REF_KEY
;
1314 type
= BTRFS_EXTENT_DATA_REF_KEY
;
1319 static int find_next_key(struct btrfs_path
*path
, int level
,
1320 struct btrfs_key
*key
)
1323 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
1324 if (!path
->nodes
[level
])
1326 if (path
->slots
[level
] + 1 >=
1327 btrfs_header_nritems(path
->nodes
[level
]))
1330 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
1331 path
->slots
[level
] + 1);
1333 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
1334 path
->slots
[level
] + 1);
1341 * look for inline back ref. if back ref is found, *ref_ret is set
1342 * to the address of inline back ref, and 0 is returned.
1344 * if back ref isn't found, *ref_ret is set to the address where it
1345 * should be inserted, and -ENOENT is returned.
1347 * if insert is true and there are too many inline back refs, the path
1348 * points to the extent item, and -EAGAIN is returned.
1350 * NOTE: inline back refs are ordered in the same way that back ref
1351 * items in the tree are ordered.
1353 static noinline_for_stack
1354 int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1355 struct btrfs_root
*root
,
1356 struct btrfs_path
*path
,
1357 struct btrfs_extent_inline_ref
**ref_ret
,
1358 u64 bytenr
, u64 num_bytes
,
1359 u64 parent
, u64 root_objectid
,
1360 u64 owner
, u64 offset
, int insert
)
1362 struct btrfs_key key
;
1363 struct extent_buffer
*leaf
;
1364 struct btrfs_extent_item
*ei
;
1365 struct btrfs_extent_inline_ref
*iref
;
1376 key
.objectid
= bytenr
;
1377 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1378 key
.offset
= num_bytes
;
1380 want
= extent_ref_type(parent
, owner
);
1382 extra_size
= btrfs_extent_inline_ref_size(want
);
1383 path
->keep_locks
= 1;
1386 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1393 leaf
= path
->nodes
[0];
1394 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1395 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1396 if (item_size
< sizeof(*ei
)) {
1401 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1407 leaf
= path
->nodes
[0];
1408 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1411 BUG_ON(item_size
< sizeof(*ei
));
1413 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1414 flags
= btrfs_extent_flags(leaf
, ei
);
1416 ptr
= (unsigned long)(ei
+ 1);
1417 end
= (unsigned long)ei
+ item_size
;
1419 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1420 ptr
+= sizeof(struct btrfs_tree_block_info
);
1423 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
1432 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1433 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1437 ptr
+= btrfs_extent_inline_ref_size(type
);
1441 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1442 struct btrfs_extent_data_ref
*dref
;
1443 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1444 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1449 if (hash_extent_data_ref_item(leaf
, dref
) <
1450 hash_extent_data_ref(root_objectid
, owner
, offset
))
1454 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1456 if (parent
== ref_offset
) {
1460 if (ref_offset
< parent
)
1463 if (root_objectid
== ref_offset
) {
1467 if (ref_offset
< root_objectid
)
1471 ptr
+= btrfs_extent_inline_ref_size(type
);
1473 if (err
== -ENOENT
&& insert
) {
1474 if (item_size
+ extra_size
>=
1475 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1480 * To add new inline back ref, we have to make sure
1481 * there is no corresponding back ref item.
1482 * For simplicity, we just do not add new inline back
1483 * ref if there is any kind of item for this block
1485 if (find_next_key(path
, 0, &key
) == 0 &&
1486 key
.objectid
== bytenr
&&
1487 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1492 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1495 path
->keep_locks
= 0;
1496 btrfs_unlock_up_safe(path
, 1);
1502 * helper to add new inline back ref
1504 static noinline_for_stack
1505 int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1506 struct btrfs_root
*root
,
1507 struct btrfs_path
*path
,
1508 struct btrfs_extent_inline_ref
*iref
,
1509 u64 parent
, u64 root_objectid
,
1510 u64 owner
, u64 offset
, int refs_to_add
,
1511 struct btrfs_delayed_extent_op
*extent_op
)
1513 struct extent_buffer
*leaf
;
1514 struct btrfs_extent_item
*ei
;
1517 unsigned long item_offset
;
1523 leaf
= path
->nodes
[0];
1524 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1525 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1527 type
= extent_ref_type(parent
, owner
);
1528 size
= btrfs_extent_inline_ref_size(type
);
1530 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1533 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1534 refs
= btrfs_extent_refs(leaf
, ei
);
1535 refs
+= refs_to_add
;
1536 btrfs_set_extent_refs(leaf
, ei
, refs
);
1538 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1540 ptr
= (unsigned long)ei
+ item_offset
;
1541 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1542 if (ptr
< end
- size
)
1543 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1546 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1547 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1548 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1549 struct btrfs_extent_data_ref
*dref
;
1550 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1551 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1552 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1553 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1554 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1555 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1556 struct btrfs_shared_data_ref
*sref
;
1557 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1558 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1559 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1560 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1561 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1563 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1565 btrfs_mark_buffer_dirty(leaf
);
1569 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1570 struct btrfs_root
*root
,
1571 struct btrfs_path
*path
,
1572 struct btrfs_extent_inline_ref
**ref_ret
,
1573 u64 bytenr
, u64 num_bytes
, u64 parent
,
1574 u64 root_objectid
, u64 owner
, u64 offset
)
1578 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1579 bytenr
, num_bytes
, parent
,
1580 root_objectid
, owner
, offset
, 0);
1584 btrfs_release_path(root
, path
);
1587 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1588 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1591 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1592 root_objectid
, owner
, offset
);
1598 * helper to update/remove inline back ref
1600 static noinline_for_stack
1601 int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1602 struct btrfs_root
*root
,
1603 struct btrfs_path
*path
,
1604 struct btrfs_extent_inline_ref
*iref
,
1606 struct btrfs_delayed_extent_op
*extent_op
)
1608 struct extent_buffer
*leaf
;
1609 struct btrfs_extent_item
*ei
;
1610 struct btrfs_extent_data_ref
*dref
= NULL
;
1611 struct btrfs_shared_data_ref
*sref
= NULL
;
1620 leaf
= path
->nodes
[0];
1621 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1622 refs
= btrfs_extent_refs(leaf
, ei
);
1623 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1624 refs
+= refs_to_mod
;
1625 btrfs_set_extent_refs(leaf
, ei
, refs
);
1627 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1629 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1631 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1632 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1633 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1634 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1635 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1636 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1639 BUG_ON(refs_to_mod
!= -1);
1642 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1643 refs
+= refs_to_mod
;
1646 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1647 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1649 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1651 size
= btrfs_extent_inline_ref_size(type
);
1652 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1653 ptr
= (unsigned long)iref
;
1654 end
= (unsigned long)ei
+ item_size
;
1655 if (ptr
+ size
< end
)
1656 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1659 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1662 btrfs_mark_buffer_dirty(leaf
);
1666 static noinline_for_stack
1667 int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1668 struct btrfs_root
*root
,
1669 struct btrfs_path
*path
,
1670 u64 bytenr
, u64 num_bytes
, u64 parent
,
1671 u64 root_objectid
, u64 owner
,
1672 u64 offset
, int refs_to_add
,
1673 struct btrfs_delayed_extent_op
*extent_op
)
1675 struct btrfs_extent_inline_ref
*iref
;
1678 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1679 bytenr
, num_bytes
, parent
,
1680 root_objectid
, owner
, offset
, 1);
1682 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1683 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1684 refs_to_add
, extent_op
);
1685 } else if (ret
== -ENOENT
) {
1686 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1687 parent
, root_objectid
,
1688 owner
, offset
, refs_to_add
,
1694 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1695 struct btrfs_root
*root
,
1696 struct btrfs_path
*path
,
1697 u64 bytenr
, u64 parent
, u64 root_objectid
,
1698 u64 owner
, u64 offset
, int refs_to_add
)
1701 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1702 BUG_ON(refs_to_add
!= 1);
1703 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1704 parent
, root_objectid
);
1706 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1707 parent
, root_objectid
,
1708 owner
, offset
, refs_to_add
);
1713 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1714 struct btrfs_root
*root
,
1715 struct btrfs_path
*path
,
1716 struct btrfs_extent_inline_ref
*iref
,
1717 int refs_to_drop
, int is_data
)
1721 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1723 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1724 -refs_to_drop
, NULL
);
1725 } else if (is_data
) {
1726 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1728 ret
= btrfs_del_item(trans
, root
, path
);
1733 static void btrfs_issue_discard(struct block_device
*bdev
,
1736 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
,
1737 BLKDEV_IFL_WAIT
| BLKDEV_IFL_BARRIER
);
1740 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
1744 u64 map_length
= num_bytes
;
1745 struct btrfs_multi_bio
*multi
= NULL
;
1747 if (!btrfs_test_opt(root
, DISCARD
))
1750 /* Tell the block device(s) that the sectors can be discarded */
1751 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
1752 bytenr
, &map_length
, &multi
, 0);
1754 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
1757 if (map_length
> num_bytes
)
1758 map_length
= num_bytes
;
1760 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
1761 btrfs_issue_discard(stripe
->dev
->bdev
,
1771 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1772 struct btrfs_root
*root
,
1773 u64 bytenr
, u64 num_bytes
, u64 parent
,
1774 u64 root_objectid
, u64 owner
, u64 offset
)
1777 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
&&
1778 root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
1780 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1781 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
1782 parent
, root_objectid
, (int)owner
,
1783 BTRFS_ADD_DELAYED_REF
, NULL
);
1785 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
1786 parent
, root_objectid
, owner
, offset
,
1787 BTRFS_ADD_DELAYED_REF
, NULL
);
1792 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1793 struct btrfs_root
*root
,
1794 u64 bytenr
, u64 num_bytes
,
1795 u64 parent
, u64 root_objectid
,
1796 u64 owner
, u64 offset
, int refs_to_add
,
1797 struct btrfs_delayed_extent_op
*extent_op
)
1799 struct btrfs_path
*path
;
1800 struct extent_buffer
*leaf
;
1801 struct btrfs_extent_item
*item
;
1806 path
= btrfs_alloc_path();
1811 path
->leave_spinning
= 1;
1812 /* this will setup the path even if it fails to insert the back ref */
1813 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1814 path
, bytenr
, num_bytes
, parent
,
1815 root_objectid
, owner
, offset
,
1816 refs_to_add
, extent_op
);
1820 if (ret
!= -EAGAIN
) {
1825 leaf
= path
->nodes
[0];
1826 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1827 refs
= btrfs_extent_refs(leaf
, item
);
1828 btrfs_set_extent_refs(leaf
, item
, refs
+ refs_to_add
);
1830 __run_delayed_extent_op(extent_op
, leaf
, item
);
1832 btrfs_mark_buffer_dirty(leaf
);
1833 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1836 path
->leave_spinning
= 1;
1838 /* now insert the actual backref */
1839 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1840 path
, bytenr
, parent
, root_objectid
,
1841 owner
, offset
, refs_to_add
);
1844 btrfs_free_path(path
);
1848 static int run_delayed_data_ref(struct btrfs_trans_handle
*trans
,
1849 struct btrfs_root
*root
,
1850 struct btrfs_delayed_ref_node
*node
,
1851 struct btrfs_delayed_extent_op
*extent_op
,
1852 int insert_reserved
)
1855 struct btrfs_delayed_data_ref
*ref
;
1856 struct btrfs_key ins
;
1861 ins
.objectid
= node
->bytenr
;
1862 ins
.offset
= node
->num_bytes
;
1863 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1865 ref
= btrfs_delayed_node_to_data_ref(node
);
1866 if (node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1867 parent
= ref
->parent
;
1869 ref_root
= ref
->root
;
1871 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1873 BUG_ON(extent_op
->update_key
);
1874 flags
|= extent_op
->flags_to_set
;
1876 ret
= alloc_reserved_file_extent(trans
, root
,
1877 parent
, ref_root
, flags
,
1878 ref
->objectid
, ref
->offset
,
1879 &ins
, node
->ref_mod
);
1880 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1881 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
1882 node
->num_bytes
, parent
,
1883 ref_root
, ref
->objectid
,
1884 ref
->offset
, node
->ref_mod
,
1886 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1887 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
1888 node
->num_bytes
, parent
,
1889 ref_root
, ref
->objectid
,
1890 ref
->offset
, node
->ref_mod
,
1898 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
1899 struct extent_buffer
*leaf
,
1900 struct btrfs_extent_item
*ei
)
1902 u64 flags
= btrfs_extent_flags(leaf
, ei
);
1903 if (extent_op
->update_flags
) {
1904 flags
|= extent_op
->flags_to_set
;
1905 btrfs_set_extent_flags(leaf
, ei
, flags
);
1908 if (extent_op
->update_key
) {
1909 struct btrfs_tree_block_info
*bi
;
1910 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
));
1911 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1912 btrfs_set_tree_block_key(leaf
, bi
, &extent_op
->key
);
1916 static int run_delayed_extent_op(struct btrfs_trans_handle
*trans
,
1917 struct btrfs_root
*root
,
1918 struct btrfs_delayed_ref_node
*node
,
1919 struct btrfs_delayed_extent_op
*extent_op
)
1921 struct btrfs_key key
;
1922 struct btrfs_path
*path
;
1923 struct btrfs_extent_item
*ei
;
1924 struct extent_buffer
*leaf
;
1929 path
= btrfs_alloc_path();
1933 key
.objectid
= node
->bytenr
;
1934 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1935 key
.offset
= node
->num_bytes
;
1938 path
->leave_spinning
= 1;
1939 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
,
1950 leaf
= path
->nodes
[0];
1951 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1952 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1953 if (item_size
< sizeof(*ei
)) {
1954 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1960 leaf
= path
->nodes
[0];
1961 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1964 BUG_ON(item_size
< sizeof(*ei
));
1965 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1966 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1968 btrfs_mark_buffer_dirty(leaf
);
1970 btrfs_free_path(path
);
1974 static int run_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
1975 struct btrfs_root
*root
,
1976 struct btrfs_delayed_ref_node
*node
,
1977 struct btrfs_delayed_extent_op
*extent_op
,
1978 int insert_reserved
)
1981 struct btrfs_delayed_tree_ref
*ref
;
1982 struct btrfs_key ins
;
1986 ins
.objectid
= node
->bytenr
;
1987 ins
.offset
= node
->num_bytes
;
1988 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1990 ref
= btrfs_delayed_node_to_tree_ref(node
);
1991 if (node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1992 parent
= ref
->parent
;
1994 ref_root
= ref
->root
;
1996 BUG_ON(node
->ref_mod
!= 1);
1997 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1998 BUG_ON(!extent_op
|| !extent_op
->update_flags
||
1999 !extent_op
->update_key
);
2000 ret
= alloc_reserved_tree_block(trans
, root
,
2002 extent_op
->flags_to_set
,
2005 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
2006 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
2007 node
->num_bytes
, parent
, ref_root
,
2008 ref
->level
, 0, 1, extent_op
);
2009 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
2010 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
2011 node
->num_bytes
, parent
, ref_root
,
2012 ref
->level
, 0, 1, extent_op
);
2019 /* helper function to actually process a single delayed ref entry */
2020 static int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
2021 struct btrfs_root
*root
,
2022 struct btrfs_delayed_ref_node
*node
,
2023 struct btrfs_delayed_extent_op
*extent_op
,
2024 int insert_reserved
)
2027 if (btrfs_delayed_ref_is_head(node
)) {
2028 struct btrfs_delayed_ref_head
*head
;
2030 * we've hit the end of the chain and we were supposed
2031 * to insert this extent into the tree. But, it got
2032 * deleted before we ever needed to insert it, so all
2033 * we have to do is clean up the accounting
2036 head
= btrfs_delayed_node_to_head(node
);
2037 if (insert_reserved
) {
2038 btrfs_pin_extent(root
, node
->bytenr
,
2039 node
->num_bytes
, 1);
2040 if (head
->is_data
) {
2041 ret
= btrfs_del_csums(trans
, root
,
2047 mutex_unlock(&head
->mutex
);
2051 if (node
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
2052 node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
2053 ret
= run_delayed_tree_ref(trans
, root
, node
, extent_op
,
2055 else if (node
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
2056 node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
2057 ret
= run_delayed_data_ref(trans
, root
, node
, extent_op
,
2064 static noinline
struct btrfs_delayed_ref_node
*
2065 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
2067 struct rb_node
*node
;
2068 struct btrfs_delayed_ref_node
*ref
;
2069 int action
= BTRFS_ADD_DELAYED_REF
;
2072 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2073 * this prevents ref count from going down to zero when
2074 * there still are pending delayed ref.
2076 node
= rb_prev(&head
->node
.rb_node
);
2080 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
2082 if (ref
->bytenr
!= head
->node
.bytenr
)
2084 if (ref
->action
== action
)
2086 node
= rb_prev(node
);
2088 if (action
== BTRFS_ADD_DELAYED_REF
) {
2089 action
= BTRFS_DROP_DELAYED_REF
;
2095 static noinline
int run_clustered_refs(struct btrfs_trans_handle
*trans
,
2096 struct btrfs_root
*root
,
2097 struct list_head
*cluster
)
2099 struct btrfs_delayed_ref_root
*delayed_refs
;
2100 struct btrfs_delayed_ref_node
*ref
;
2101 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
2102 struct btrfs_delayed_extent_op
*extent_op
;
2105 int must_insert_reserved
= 0;
2107 delayed_refs
= &trans
->transaction
->delayed_refs
;
2110 /* pick a new head ref from the cluster list */
2111 if (list_empty(cluster
))
2114 locked_ref
= list_entry(cluster
->next
,
2115 struct btrfs_delayed_ref_head
, cluster
);
2117 /* grab the lock that says we are going to process
2118 * all the refs for this head */
2119 ret
= btrfs_delayed_ref_lock(trans
, locked_ref
);
2122 * we may have dropped the spin lock to get the head
2123 * mutex lock, and that might have given someone else
2124 * time to free the head. If that's true, it has been
2125 * removed from our list and we can move on.
2127 if (ret
== -EAGAIN
) {
2135 * record the must insert reserved flag before we
2136 * drop the spin lock.
2138 must_insert_reserved
= locked_ref
->must_insert_reserved
;
2139 locked_ref
->must_insert_reserved
= 0;
2141 extent_op
= locked_ref
->extent_op
;
2142 locked_ref
->extent_op
= NULL
;
2145 * locked_ref is the head node, so we have to go one
2146 * node back for any delayed ref updates
2148 ref
= select_delayed_ref(locked_ref
);
2150 /* All delayed refs have been processed, Go ahead
2151 * and send the head node to run_one_delayed_ref,
2152 * so that any accounting fixes can happen
2154 ref
= &locked_ref
->node
;
2156 if (extent_op
&& must_insert_reserved
) {
2162 spin_unlock(&delayed_refs
->lock
);
2164 ret
= run_delayed_extent_op(trans
, root
,
2170 spin_lock(&delayed_refs
->lock
);
2174 list_del_init(&locked_ref
->cluster
);
2179 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
2180 delayed_refs
->num_entries
--;
2182 spin_unlock(&delayed_refs
->lock
);
2184 ret
= run_one_delayed_ref(trans
, root
, ref
, extent_op
,
2185 must_insert_reserved
);
2188 btrfs_put_delayed_ref(ref
);
2193 spin_lock(&delayed_refs
->lock
);
2199 * this starts processing the delayed reference count updates and
2200 * extent insertions we have queued up so far. count can be
2201 * 0, which means to process everything in the tree at the start
2202 * of the run (but not newly added entries), or it can be some target
2203 * number you'd like to process.
2205 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
2206 struct btrfs_root
*root
, unsigned long count
)
2208 struct rb_node
*node
;
2209 struct btrfs_delayed_ref_root
*delayed_refs
;
2210 struct btrfs_delayed_ref_node
*ref
;
2211 struct list_head cluster
;
2213 int run_all
= count
== (unsigned long)-1;
2216 if (root
== root
->fs_info
->extent_root
)
2217 root
= root
->fs_info
->tree_root
;
2219 delayed_refs
= &trans
->transaction
->delayed_refs
;
2220 INIT_LIST_HEAD(&cluster
);
2222 spin_lock(&delayed_refs
->lock
);
2224 count
= delayed_refs
->num_entries
* 2;
2228 if (!(run_all
|| run_most
) &&
2229 delayed_refs
->num_heads_ready
< 64)
2233 * go find something we can process in the rbtree. We start at
2234 * the beginning of the tree, and then build a cluster
2235 * of refs to process starting at the first one we are able to
2238 ret
= btrfs_find_ref_cluster(trans
, &cluster
,
2239 delayed_refs
->run_delayed_start
);
2243 ret
= run_clustered_refs(trans
, root
, &cluster
);
2246 count
-= min_t(unsigned long, ret
, count
);
2253 node
= rb_first(&delayed_refs
->root
);
2256 count
= (unsigned long)-1;
2259 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
2261 if (btrfs_delayed_ref_is_head(ref
)) {
2262 struct btrfs_delayed_ref_head
*head
;
2264 head
= btrfs_delayed_node_to_head(ref
);
2265 atomic_inc(&ref
->refs
);
2267 spin_unlock(&delayed_refs
->lock
);
2268 mutex_lock(&head
->mutex
);
2269 mutex_unlock(&head
->mutex
);
2271 btrfs_put_delayed_ref(ref
);
2275 node
= rb_next(node
);
2277 spin_unlock(&delayed_refs
->lock
);
2278 schedule_timeout(1);
2282 spin_unlock(&delayed_refs
->lock
);
2286 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle
*trans
,
2287 struct btrfs_root
*root
,
2288 u64 bytenr
, u64 num_bytes
, u64 flags
,
2291 struct btrfs_delayed_extent_op
*extent_op
;
2294 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2298 extent_op
->flags_to_set
= flags
;
2299 extent_op
->update_flags
= 1;
2300 extent_op
->update_key
= 0;
2301 extent_op
->is_data
= is_data
? 1 : 0;
2303 ret
= btrfs_add_delayed_extent_op(trans
, bytenr
, num_bytes
, extent_op
);
2309 static noinline
int check_delayed_ref(struct btrfs_trans_handle
*trans
,
2310 struct btrfs_root
*root
,
2311 struct btrfs_path
*path
,
2312 u64 objectid
, u64 offset
, u64 bytenr
)
2314 struct btrfs_delayed_ref_head
*head
;
2315 struct btrfs_delayed_ref_node
*ref
;
2316 struct btrfs_delayed_data_ref
*data_ref
;
2317 struct btrfs_delayed_ref_root
*delayed_refs
;
2318 struct rb_node
*node
;
2322 delayed_refs
= &trans
->transaction
->delayed_refs
;
2323 spin_lock(&delayed_refs
->lock
);
2324 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
2328 if (!mutex_trylock(&head
->mutex
)) {
2329 atomic_inc(&head
->node
.refs
);
2330 spin_unlock(&delayed_refs
->lock
);
2332 btrfs_release_path(root
->fs_info
->extent_root
, path
);
2334 mutex_lock(&head
->mutex
);
2335 mutex_unlock(&head
->mutex
);
2336 btrfs_put_delayed_ref(&head
->node
);
2340 node
= rb_prev(&head
->node
.rb_node
);
2344 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2346 if (ref
->bytenr
!= bytenr
)
2350 if (ref
->type
!= BTRFS_EXTENT_DATA_REF_KEY
)
2353 data_ref
= btrfs_delayed_node_to_data_ref(ref
);
2355 node
= rb_prev(node
);
2357 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2358 if (ref
->bytenr
== bytenr
)
2362 if (data_ref
->root
!= root
->root_key
.objectid
||
2363 data_ref
->objectid
!= objectid
|| data_ref
->offset
!= offset
)
2368 mutex_unlock(&head
->mutex
);
2370 spin_unlock(&delayed_refs
->lock
);
2374 static noinline
int check_committed_ref(struct btrfs_trans_handle
*trans
,
2375 struct btrfs_root
*root
,
2376 struct btrfs_path
*path
,
2377 u64 objectid
, u64 offset
, u64 bytenr
)
2379 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2380 struct extent_buffer
*leaf
;
2381 struct btrfs_extent_data_ref
*ref
;
2382 struct btrfs_extent_inline_ref
*iref
;
2383 struct btrfs_extent_item
*ei
;
2384 struct btrfs_key key
;
2388 key
.objectid
= bytenr
;
2389 key
.offset
= (u64
)-1;
2390 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2392 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
2398 if (path
->slots
[0] == 0)
2402 leaf
= path
->nodes
[0];
2403 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2405 if (key
.objectid
!= bytenr
|| key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
2409 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
2410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2411 if (item_size
< sizeof(*ei
)) {
2412 WARN_ON(item_size
!= sizeof(struct btrfs_extent_item_v0
));
2416 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
2418 if (item_size
!= sizeof(*ei
) +
2419 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY
))
2422 if (btrfs_extent_generation(leaf
, ei
) <=
2423 btrfs_root_last_snapshot(&root
->root_item
))
2426 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
2427 if (btrfs_extent_inline_ref_type(leaf
, iref
) !=
2428 BTRFS_EXTENT_DATA_REF_KEY
)
2431 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
2432 if (btrfs_extent_refs(leaf
, ei
) !=
2433 btrfs_extent_data_ref_count(leaf
, ref
) ||
2434 btrfs_extent_data_ref_root(leaf
, ref
) !=
2435 root
->root_key
.objectid
||
2436 btrfs_extent_data_ref_objectid(leaf
, ref
) != objectid
||
2437 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
2445 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
2446 struct btrfs_root
*root
,
2447 u64 objectid
, u64 offset
, u64 bytenr
)
2449 struct btrfs_path
*path
;
2453 path
= btrfs_alloc_path();
2458 ret
= check_committed_ref(trans
, root
, path
, objectid
,
2460 if (ret
&& ret
!= -ENOENT
)
2463 ret2
= check_delayed_ref(trans
, root
, path
, objectid
,
2465 } while (ret2
== -EAGAIN
);
2467 if (ret2
&& ret2
!= -ENOENT
) {
2472 if (ret
!= -ENOENT
|| ret2
!= -ENOENT
)
2475 btrfs_free_path(path
);
2476 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
2482 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2483 struct extent_buffer
*buf
, u32 nr_extents
)
2485 struct btrfs_key key
;
2486 struct btrfs_file_extent_item
*fi
;
2494 if (!root
->ref_cows
)
2497 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
2499 root_gen
= root
->root_key
.offset
;
2502 root_gen
= trans
->transid
- 1;
2505 level
= btrfs_header_level(buf
);
2506 nritems
= btrfs_header_nritems(buf
);
2509 struct btrfs_leaf_ref
*ref
;
2510 struct btrfs_extent_info
*info
;
2512 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
2518 ref
->root_gen
= root_gen
;
2519 ref
->bytenr
= buf
->start
;
2520 ref
->owner
= btrfs_header_owner(buf
);
2521 ref
->generation
= btrfs_header_generation(buf
);
2522 ref
->nritems
= nr_extents
;
2523 info
= ref
->extents
;
2525 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
2527 btrfs_item_key_to_cpu(buf
, &key
, i
);
2528 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2530 fi
= btrfs_item_ptr(buf
, i
,
2531 struct btrfs_file_extent_item
);
2532 if (btrfs_file_extent_type(buf
, fi
) ==
2533 BTRFS_FILE_EXTENT_INLINE
)
2535 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2536 if (disk_bytenr
== 0)
2539 info
->bytenr
= disk_bytenr
;
2541 btrfs_file_extent_disk_num_bytes(buf
, fi
);
2542 info
->objectid
= key
.objectid
;
2543 info
->offset
= key
.offset
;
2547 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2548 if (ret
== -EEXIST
&& shared
) {
2549 struct btrfs_leaf_ref
*old
;
2550 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
2552 btrfs_remove_leaf_ref(root
, old
);
2553 btrfs_free_leaf_ref(root
, old
);
2554 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2557 btrfs_free_leaf_ref(root
, ref
);
2563 /* when a block goes through cow, we update the reference counts of
2564 * everything that block points to. The internal pointers of the block
2565 * can be in just about any order, and it is likely to have clusters of
2566 * things that are close together and clusters of things that are not.
2568 * To help reduce the seeks that come with updating all of these reference
2569 * counts, sort them by byte number before actual updates are done.
2571 * struct refsort is used to match byte number to slot in the btree block.
2572 * we sort based on the byte number and then use the slot to actually
2575 * struct refsort is smaller than strcut btrfs_item and smaller than
2576 * struct btrfs_key_ptr. Since we're currently limited to the page size
2577 * for a btree block, there's no way for a kmalloc of refsorts for a
2578 * single node to be bigger than a page.
2586 * for passing into sort()
2588 static int refsort_cmp(const void *a_void
, const void *b_void
)
2590 const struct refsort
*a
= a_void
;
2591 const struct refsort
*b
= b_void
;
2593 if (a
->bytenr
< b
->bytenr
)
2595 if (a
->bytenr
> b
->bytenr
)
2601 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
2602 struct btrfs_root
*root
,
2603 struct extent_buffer
*buf
,
2604 int full_backref
, int inc
)
2611 struct btrfs_key key
;
2612 struct btrfs_file_extent_item
*fi
;
2616 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
2617 u64
, u64
, u64
, u64
, u64
, u64
);
2619 ref_root
= btrfs_header_owner(buf
);
2620 nritems
= btrfs_header_nritems(buf
);
2621 level
= btrfs_header_level(buf
);
2623 if (!root
->ref_cows
&& level
== 0)
2627 process_func
= btrfs_inc_extent_ref
;
2629 process_func
= btrfs_free_extent
;
2632 parent
= buf
->start
;
2636 for (i
= 0; i
< nritems
; i
++) {
2638 btrfs_item_key_to_cpu(buf
, &key
, i
);
2639 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2641 fi
= btrfs_item_ptr(buf
, i
,
2642 struct btrfs_file_extent_item
);
2643 if (btrfs_file_extent_type(buf
, fi
) ==
2644 BTRFS_FILE_EXTENT_INLINE
)
2646 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2650 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
2651 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
2652 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2653 parent
, ref_root
, key
.objectid
,
2658 bytenr
= btrfs_node_blockptr(buf
, i
);
2659 num_bytes
= btrfs_level_size(root
, level
- 1);
2660 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2661 parent
, ref_root
, level
- 1, 0);
2672 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2673 struct extent_buffer
*buf
, int full_backref
)
2675 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 1);
2678 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2679 struct extent_buffer
*buf
, int full_backref
)
2681 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 0);
2684 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
2685 struct btrfs_root
*root
,
2686 struct btrfs_path
*path
,
2687 struct btrfs_block_group_cache
*cache
)
2690 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2692 struct extent_buffer
*leaf
;
2694 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
2699 leaf
= path
->nodes
[0];
2700 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
2701 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
2702 btrfs_mark_buffer_dirty(leaf
);
2703 btrfs_release_path(extent_root
, path
);
2711 static struct btrfs_block_group_cache
*
2712 next_block_group(struct btrfs_root
*root
,
2713 struct btrfs_block_group_cache
*cache
)
2715 struct rb_node
*node
;
2716 spin_lock(&root
->fs_info
->block_group_cache_lock
);
2717 node
= rb_next(&cache
->cache_node
);
2718 btrfs_put_block_group(cache
);
2720 cache
= rb_entry(node
, struct btrfs_block_group_cache
,
2722 btrfs_get_block_group(cache
);
2725 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
2729 static int cache_save_setup(struct btrfs_block_group_cache
*block_group
,
2730 struct btrfs_trans_handle
*trans
,
2731 struct btrfs_path
*path
)
2733 struct btrfs_root
*root
= block_group
->fs_info
->tree_root
;
2734 struct inode
*inode
= NULL
;
2741 * If this block group is smaller than 100 megs don't bother caching the
2744 if (block_group
->key
.offset
< (100 * 1024 * 1024)) {
2745 spin_lock(&block_group
->lock
);
2746 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
2747 spin_unlock(&block_group
->lock
);
2752 inode
= lookup_free_space_inode(root
, block_group
, path
);
2753 if (IS_ERR(inode
) && PTR_ERR(inode
) != -ENOENT
) {
2754 ret
= PTR_ERR(inode
);
2755 btrfs_release_path(root
, path
);
2759 if (IS_ERR(inode
)) {
2763 if (block_group
->ro
)
2766 ret
= create_free_space_inode(root
, trans
, block_group
, path
);
2773 * We want to set the generation to 0, that way if anything goes wrong
2774 * from here on out we know not to trust this cache when we load up next
2777 BTRFS_I(inode
)->generation
= 0;
2778 ret
= btrfs_update_inode(trans
, root
, inode
);
2781 if (i_size_read(inode
) > 0) {
2782 ret
= btrfs_truncate_free_space_cache(root
, trans
, path
,
2788 spin_lock(&block_group
->lock
);
2789 if (block_group
->cached
!= BTRFS_CACHE_FINISHED
) {
2790 spin_unlock(&block_group
->lock
);
2793 spin_unlock(&block_group
->lock
);
2795 num_pages
= (int)div64_u64(block_group
->key
.offset
, 1024 * 1024 * 1024);
2800 * Just to make absolutely sure we have enough space, we're going to
2801 * preallocate 12 pages worth of space for each block group. In
2802 * practice we ought to use at most 8, but we need extra space so we can
2803 * add our header and have a terminator between the extents and the
2807 num_pages
*= PAGE_CACHE_SIZE
;
2809 ret
= btrfs_check_data_free_space(inode
, num_pages
);
2813 ret
= btrfs_prealloc_file_range_trans(inode
, trans
, 0, 0, num_pages
,
2814 num_pages
, num_pages
,
2816 btrfs_free_reserved_data_space(inode
, num_pages
);
2820 btrfs_release_path(root
, path
);
2822 spin_lock(&block_group
->lock
);
2824 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
2826 block_group
->disk_cache_state
= BTRFS_DC_SETUP
;
2827 spin_unlock(&block_group
->lock
);
2832 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
2833 struct btrfs_root
*root
)
2835 struct btrfs_block_group_cache
*cache
;
2837 struct btrfs_path
*path
;
2840 path
= btrfs_alloc_path();
2846 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
2848 if (cache
->disk_cache_state
== BTRFS_DC_CLEAR
)
2850 cache
= next_block_group(root
, cache
);
2858 err
= cache_save_setup(cache
, trans
, path
);
2859 last
= cache
->key
.objectid
+ cache
->key
.offset
;
2860 btrfs_put_block_group(cache
);
2865 err
= btrfs_run_delayed_refs(trans
, root
,
2870 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
2872 if (cache
->disk_cache_state
== BTRFS_DC_CLEAR
) {
2873 btrfs_put_block_group(cache
);
2879 cache
= next_block_group(root
, cache
);
2888 if (cache
->disk_cache_state
== BTRFS_DC_SETUP
)
2889 cache
->disk_cache_state
= BTRFS_DC_NEED_WRITE
;
2891 last
= cache
->key
.objectid
+ cache
->key
.offset
;
2893 err
= write_one_cache_group(trans
, root
, path
, cache
);
2895 btrfs_put_block_group(cache
);
2900 * I don't think this is needed since we're just marking our
2901 * preallocated extent as written, but just in case it can't
2905 err
= btrfs_run_delayed_refs(trans
, root
,
2910 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
2913 * Really this shouldn't happen, but it could if we
2914 * couldn't write the entire preallocated extent and
2915 * splitting the extent resulted in a new block.
2918 btrfs_put_block_group(cache
);
2921 if (cache
->disk_cache_state
== BTRFS_DC_NEED_WRITE
)
2923 cache
= next_block_group(root
, cache
);
2932 btrfs_write_out_cache(root
, trans
, cache
, path
);
2935 * If we didn't have an error then the cache state is still
2936 * NEED_WRITE, so we can set it to WRITTEN.
2938 if (cache
->disk_cache_state
== BTRFS_DC_NEED_WRITE
)
2939 cache
->disk_cache_state
= BTRFS_DC_WRITTEN
;
2940 last
= cache
->key
.objectid
+ cache
->key
.offset
;
2941 btrfs_put_block_group(cache
);
2944 btrfs_free_path(path
);
2948 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
2950 struct btrfs_block_group_cache
*block_group
;
2953 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
2954 if (!block_group
|| block_group
->ro
)
2957 btrfs_put_block_group(block_group
);
2961 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
2962 u64 total_bytes
, u64 bytes_used
,
2963 struct btrfs_space_info
**space_info
)
2965 struct btrfs_space_info
*found
;
2969 if (flags
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
|
2970 BTRFS_BLOCK_GROUP_RAID10
))
2975 found
= __find_space_info(info
, flags
);
2977 spin_lock(&found
->lock
);
2978 found
->total_bytes
+= total_bytes
;
2979 found
->bytes_used
+= bytes_used
;
2980 found
->disk_used
+= bytes_used
* factor
;
2982 spin_unlock(&found
->lock
);
2983 *space_info
= found
;
2986 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
2990 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
2991 INIT_LIST_HEAD(&found
->block_groups
[i
]);
2992 init_rwsem(&found
->groups_sem
);
2993 spin_lock_init(&found
->lock
);
2994 found
->flags
= flags
& (BTRFS_BLOCK_GROUP_DATA
|
2995 BTRFS_BLOCK_GROUP_SYSTEM
|
2996 BTRFS_BLOCK_GROUP_METADATA
);
2997 found
->total_bytes
= total_bytes
;
2998 found
->bytes_used
= bytes_used
;
2999 found
->disk_used
= bytes_used
* factor
;
3000 found
->bytes_pinned
= 0;
3001 found
->bytes_reserved
= 0;
3002 found
->bytes_readonly
= 0;
3003 found
->bytes_may_use
= 0;
3005 found
->force_alloc
= 0;
3006 *space_info
= found
;
3007 list_add_rcu(&found
->list
, &info
->space_info
);
3008 atomic_set(&found
->caching_threads
, 0);
3012 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
3014 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
3015 BTRFS_BLOCK_GROUP_RAID1
|
3016 BTRFS_BLOCK_GROUP_RAID10
|
3017 BTRFS_BLOCK_GROUP_DUP
);
3019 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
3020 fs_info
->avail_data_alloc_bits
|= extra_flags
;
3021 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
3022 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
3023 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
3024 fs_info
->avail_system_alloc_bits
|= extra_flags
;
3028 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
3030 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
3032 if (num_devices
== 1)
3033 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
3034 if (num_devices
< 4)
3035 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
3037 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
3038 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
3039 BTRFS_BLOCK_GROUP_RAID10
))) {
3040 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
3043 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
3044 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
3045 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
3048 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
3049 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
3050 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
3051 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
3052 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
3056 static u64
get_alloc_profile(struct btrfs_root
*root
, u64 flags
)
3058 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
3059 flags
|= root
->fs_info
->avail_data_alloc_bits
&
3060 root
->fs_info
->data_alloc_profile
;
3061 else if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
3062 flags
|= root
->fs_info
->avail_system_alloc_bits
&
3063 root
->fs_info
->system_alloc_profile
;
3064 else if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
3065 flags
|= root
->fs_info
->avail_metadata_alloc_bits
&
3066 root
->fs_info
->metadata_alloc_profile
;
3067 return btrfs_reduce_alloc_profile(root
, flags
);
3070 static u64
btrfs_get_alloc_profile(struct btrfs_root
*root
, int data
)
3075 flags
= BTRFS_BLOCK_GROUP_DATA
;
3076 else if (root
== root
->fs_info
->chunk_root
)
3077 flags
= BTRFS_BLOCK_GROUP_SYSTEM
;
3079 flags
= BTRFS_BLOCK_GROUP_METADATA
;
3081 return get_alloc_profile(root
, flags
);
3084 void btrfs_set_inode_space_info(struct btrfs_root
*root
, struct inode
*inode
)
3086 BTRFS_I(inode
)->space_info
= __find_space_info(root
->fs_info
,
3087 BTRFS_BLOCK_GROUP_DATA
);
3091 * This will check the space that the inode allocates from to make sure we have
3092 * enough space for bytes.
3094 int btrfs_check_data_free_space(struct inode
*inode
, u64 bytes
)
3096 struct btrfs_space_info
*data_sinfo
;
3097 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3099 int ret
= 0, committed
= 0, alloc_chunk
= 1;
3101 /* make sure bytes are sectorsize aligned */
3102 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
3104 if (root
== root
->fs_info
->tree_root
) {
3109 data_sinfo
= BTRFS_I(inode
)->space_info
;
3114 /* make sure we have enough space to handle the data first */
3115 spin_lock(&data_sinfo
->lock
);
3116 used
= data_sinfo
->bytes_used
+ data_sinfo
->bytes_reserved
+
3117 data_sinfo
->bytes_pinned
+ data_sinfo
->bytes_readonly
+
3118 data_sinfo
->bytes_may_use
;
3120 if (used
+ bytes
> data_sinfo
->total_bytes
) {
3121 struct btrfs_trans_handle
*trans
;
3124 * if we don't have enough free bytes in this space then we need
3125 * to alloc a new chunk.
3127 if (!data_sinfo
->full
&& alloc_chunk
) {
3130 data_sinfo
->force_alloc
= 1;
3131 spin_unlock(&data_sinfo
->lock
);
3133 alloc_target
= btrfs_get_alloc_profile(root
, 1);
3134 trans
= btrfs_join_transaction(root
, 1);
3136 return PTR_ERR(trans
);
3138 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3139 bytes
+ 2 * 1024 * 1024,
3141 btrfs_end_transaction(trans
, root
);
3146 btrfs_set_inode_space_info(root
, inode
);
3147 data_sinfo
= BTRFS_I(inode
)->space_info
;
3151 spin_unlock(&data_sinfo
->lock
);
3153 /* commit the current transaction and try again */
3154 if (!committed
&& !root
->fs_info
->open_ioctl_trans
) {
3156 trans
= btrfs_join_transaction(root
, 1);
3158 return PTR_ERR(trans
);
3159 ret
= btrfs_commit_transaction(trans
, root
);
3165 #if 0 /* I hope we never need this code again, just in case */
3166 printk(KERN_ERR
"no space left, need %llu, %llu bytes_used, "
3167 "%llu bytes_reserved, " "%llu bytes_pinned, "
3168 "%llu bytes_readonly, %llu may use %llu total\n",
3169 (unsigned long long)bytes
,
3170 (unsigned long long)data_sinfo
->bytes_used
,
3171 (unsigned long long)data_sinfo
->bytes_reserved
,
3172 (unsigned long long)data_sinfo
->bytes_pinned
,
3173 (unsigned long long)data_sinfo
->bytes_readonly
,
3174 (unsigned long long)data_sinfo
->bytes_may_use
,
3175 (unsigned long long)data_sinfo
->total_bytes
);
3179 data_sinfo
->bytes_may_use
+= bytes
;
3180 BTRFS_I(inode
)->reserved_bytes
+= bytes
;
3181 spin_unlock(&data_sinfo
->lock
);
3187 * called when we are clearing an delalloc extent from the
3188 * inode's io_tree or there was an error for whatever reason
3189 * after calling btrfs_check_data_free_space
3191 void btrfs_free_reserved_data_space(struct inode
*inode
, u64 bytes
)
3193 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3194 struct btrfs_space_info
*data_sinfo
;
3196 /* make sure bytes are sectorsize aligned */
3197 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
3199 data_sinfo
= BTRFS_I(inode
)->space_info
;
3200 spin_lock(&data_sinfo
->lock
);
3201 data_sinfo
->bytes_may_use
-= bytes
;
3202 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
3203 spin_unlock(&data_sinfo
->lock
);
3206 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
3208 struct list_head
*head
= &info
->space_info
;
3209 struct btrfs_space_info
*found
;
3212 list_for_each_entry_rcu(found
, head
, list
) {
3213 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
3214 found
->force_alloc
= 1;
3219 static int should_alloc_chunk(struct btrfs_space_info
*sinfo
,
3222 u64 num_bytes
= sinfo
->total_bytes
- sinfo
->bytes_readonly
;
3224 if (sinfo
->bytes_used
+ sinfo
->bytes_reserved
+
3225 alloc_bytes
+ 256 * 1024 * 1024 < num_bytes
)
3228 if (sinfo
->bytes_used
+ sinfo
->bytes_reserved
+
3229 alloc_bytes
< div_factor(num_bytes
, 8))
3235 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
3236 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
3237 u64 flags
, int force
)
3239 struct btrfs_space_info
*space_info
;
3240 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
3243 mutex_lock(&fs_info
->chunk_mutex
);
3245 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
3247 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
3249 ret
= update_space_info(extent_root
->fs_info
, flags
,
3253 BUG_ON(!space_info
);
3255 spin_lock(&space_info
->lock
);
3256 if (space_info
->force_alloc
)
3258 if (space_info
->full
) {
3259 spin_unlock(&space_info
->lock
);
3263 if (!force
&& !should_alloc_chunk(space_info
, alloc_bytes
)) {
3264 spin_unlock(&space_info
->lock
);
3267 spin_unlock(&space_info
->lock
);
3270 * If we have mixed data/metadata chunks we want to make sure we keep
3271 * allocating mixed chunks instead of individual chunks.
3273 if (btrfs_mixed_space_info(space_info
))
3274 flags
|= (BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_METADATA
);
3277 * if we're doing a data chunk, go ahead and make sure that
3278 * we keep a reasonable number of metadata chunks allocated in the
3281 if (flags
& BTRFS_BLOCK_GROUP_DATA
&& fs_info
->metadata_ratio
) {
3282 fs_info
->data_chunk_allocations
++;
3283 if (!(fs_info
->data_chunk_allocations
%
3284 fs_info
->metadata_ratio
))
3285 force_metadata_allocation(fs_info
);
3288 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
3289 spin_lock(&space_info
->lock
);
3291 space_info
->full
= 1;
3294 space_info
->force_alloc
= 0;
3295 spin_unlock(&space_info
->lock
);
3297 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
3301 static int maybe_allocate_chunk(struct btrfs_trans_handle
*trans
,
3302 struct btrfs_root
*root
,
3303 struct btrfs_space_info
*sinfo
, u64 num_bytes
)
3311 spin_lock(&sinfo
->lock
);
3312 ret
= should_alloc_chunk(sinfo
, num_bytes
+ 2 * 1024 * 1024);
3313 spin_unlock(&sinfo
->lock
);
3318 trans
= btrfs_join_transaction(root
, 1);
3319 BUG_ON(IS_ERR(trans
));
3323 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3324 num_bytes
+ 2 * 1024 * 1024,
3325 get_alloc_profile(root
, sinfo
->flags
), 0);
3328 btrfs_end_transaction(trans
, root
);
3330 return ret
== 1 ? 1 : 0;
3334 * shrink metadata reservation for delalloc
3336 static int shrink_delalloc(struct btrfs_trans_handle
*trans
,
3337 struct btrfs_root
*root
, u64 to_reclaim
)
3339 struct btrfs_block_rsv
*block_rsv
;
3346 block_rsv
= &root
->fs_info
->delalloc_block_rsv
;
3347 spin_lock(&block_rsv
->lock
);
3348 reserved
= block_rsv
->reserved
;
3349 spin_unlock(&block_rsv
->lock
);
3354 max_reclaim
= min(reserved
, to_reclaim
);
3357 ret
= btrfs_start_one_delalloc_inode(root
, trans
? 1 : 0);
3359 __set_current_state(TASK_INTERRUPTIBLE
);
3360 schedule_timeout(pause
);
3362 if (pause
> HZ
/ 10)
3368 spin_lock(&block_rsv
->lock
);
3369 if (reserved
> block_rsv
->reserved
)
3370 reclaimed
= reserved
- block_rsv
->reserved
;
3371 reserved
= block_rsv
->reserved
;
3372 spin_unlock(&block_rsv
->lock
);
3374 if (reserved
== 0 || reclaimed
>= max_reclaim
)
3377 if (trans
&& trans
->transaction
->blocked
)
3380 return reclaimed
>= to_reclaim
;
3383 static int should_retry_reserve(struct btrfs_trans_handle
*trans
,
3384 struct btrfs_root
*root
,
3385 struct btrfs_block_rsv
*block_rsv
,
3386 u64 num_bytes
, int *retries
)
3388 struct btrfs_space_info
*space_info
= block_rsv
->space_info
;
3394 ret
= maybe_allocate_chunk(trans
, root
, space_info
, num_bytes
);
3398 if (trans
&& trans
->transaction
->in_commit
)
3401 ret
= shrink_delalloc(trans
, root
, num_bytes
);
3405 spin_lock(&space_info
->lock
);
3406 if (space_info
->bytes_pinned
< num_bytes
)
3408 spin_unlock(&space_info
->lock
);
3417 trans
= btrfs_join_transaction(root
, 1);
3418 BUG_ON(IS_ERR(trans
));
3419 ret
= btrfs_commit_transaction(trans
, root
);
3425 static int reserve_metadata_bytes(struct btrfs_block_rsv
*block_rsv
,
3428 struct btrfs_space_info
*space_info
= block_rsv
->space_info
;
3432 spin_lock(&space_info
->lock
);
3433 unused
= space_info
->bytes_used
+ space_info
->bytes_reserved
+
3434 space_info
->bytes_pinned
+ space_info
->bytes_readonly
;
3436 if (unused
< space_info
->total_bytes
)
3437 unused
= space_info
->total_bytes
- unused
;
3441 if (unused
>= num_bytes
) {
3442 if (block_rsv
->priority
>= 10) {
3443 space_info
->bytes_reserved
+= num_bytes
;
3446 if ((unused
+ block_rsv
->reserved
) *
3447 block_rsv
->priority
>=
3448 (num_bytes
+ block_rsv
->reserved
) * 10) {
3449 space_info
->bytes_reserved
+= num_bytes
;
3454 spin_unlock(&space_info
->lock
);
3459 static struct btrfs_block_rsv
*get_block_rsv(struct btrfs_trans_handle
*trans
,
3460 struct btrfs_root
*root
)
3462 struct btrfs_block_rsv
*block_rsv
;
3464 block_rsv
= trans
->block_rsv
;
3466 block_rsv
= root
->block_rsv
;
3469 block_rsv
= &root
->fs_info
->empty_block_rsv
;
3474 static int block_rsv_use_bytes(struct btrfs_block_rsv
*block_rsv
,
3478 spin_lock(&block_rsv
->lock
);
3479 if (block_rsv
->reserved
>= num_bytes
) {
3480 block_rsv
->reserved
-= num_bytes
;
3481 if (block_rsv
->reserved
< block_rsv
->size
)
3482 block_rsv
->full
= 0;
3485 spin_unlock(&block_rsv
->lock
);
3489 static void block_rsv_add_bytes(struct btrfs_block_rsv
*block_rsv
,
3490 u64 num_bytes
, int update_size
)
3492 spin_lock(&block_rsv
->lock
);
3493 block_rsv
->reserved
+= num_bytes
;
3495 block_rsv
->size
+= num_bytes
;
3496 else if (block_rsv
->reserved
>= block_rsv
->size
)
3497 block_rsv
->full
= 1;
3498 spin_unlock(&block_rsv
->lock
);
3501 void block_rsv_release_bytes(struct btrfs_block_rsv
*block_rsv
,
3502 struct btrfs_block_rsv
*dest
, u64 num_bytes
)
3504 struct btrfs_space_info
*space_info
= block_rsv
->space_info
;
3506 spin_lock(&block_rsv
->lock
);
3507 if (num_bytes
== (u64
)-1)
3508 num_bytes
= block_rsv
->size
;
3509 block_rsv
->size
-= num_bytes
;
3510 if (block_rsv
->reserved
>= block_rsv
->size
) {
3511 num_bytes
= block_rsv
->reserved
- block_rsv
->size
;
3512 block_rsv
->reserved
= block_rsv
->size
;
3513 block_rsv
->full
= 1;
3517 spin_unlock(&block_rsv
->lock
);
3519 if (num_bytes
> 0) {
3521 block_rsv_add_bytes(dest
, num_bytes
, 0);
3523 spin_lock(&space_info
->lock
);
3524 space_info
->bytes_reserved
-= num_bytes
;
3525 spin_unlock(&space_info
->lock
);
3530 static int block_rsv_migrate_bytes(struct btrfs_block_rsv
*src
,
3531 struct btrfs_block_rsv
*dst
, u64 num_bytes
)
3535 ret
= block_rsv_use_bytes(src
, num_bytes
);
3539 block_rsv_add_bytes(dst
, num_bytes
, 1);
3543 void btrfs_init_block_rsv(struct btrfs_block_rsv
*rsv
)
3545 memset(rsv
, 0, sizeof(*rsv
));
3546 spin_lock_init(&rsv
->lock
);
3547 atomic_set(&rsv
->usage
, 1);
3549 INIT_LIST_HEAD(&rsv
->list
);
3552 struct btrfs_block_rsv
*btrfs_alloc_block_rsv(struct btrfs_root
*root
)
3554 struct btrfs_block_rsv
*block_rsv
;
3555 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3558 block_rsv
= kmalloc(sizeof(*block_rsv
), GFP_NOFS
);
3562 btrfs_init_block_rsv(block_rsv
);
3564 alloc_target
= btrfs_get_alloc_profile(root
, 0);
3565 block_rsv
->space_info
= __find_space_info(fs_info
,
3566 BTRFS_BLOCK_GROUP_METADATA
);
3571 void btrfs_free_block_rsv(struct btrfs_root
*root
,
3572 struct btrfs_block_rsv
*rsv
)
3574 if (rsv
&& atomic_dec_and_test(&rsv
->usage
)) {
3575 btrfs_block_rsv_release(root
, rsv
, (u64
)-1);
3582 * make the block_rsv struct be able to capture freed space.
3583 * the captured space will re-add to the the block_rsv struct
3584 * after transaction commit
3586 void btrfs_add_durable_block_rsv(struct btrfs_fs_info
*fs_info
,
3587 struct btrfs_block_rsv
*block_rsv
)
3589 block_rsv
->durable
= 1;
3590 mutex_lock(&fs_info
->durable_block_rsv_mutex
);
3591 list_add_tail(&block_rsv
->list
, &fs_info
->durable_block_rsv_list
);
3592 mutex_unlock(&fs_info
->durable_block_rsv_mutex
);
3595 int btrfs_block_rsv_add(struct btrfs_trans_handle
*trans
,
3596 struct btrfs_root
*root
,
3597 struct btrfs_block_rsv
*block_rsv
,
3598 u64 num_bytes
, int *retries
)
3605 ret
= reserve_metadata_bytes(block_rsv
, num_bytes
);
3607 block_rsv_add_bytes(block_rsv
, num_bytes
, 1);
3611 ret
= should_retry_reserve(trans
, root
, block_rsv
, num_bytes
, retries
);
3618 int btrfs_block_rsv_check(struct btrfs_trans_handle
*trans
,
3619 struct btrfs_root
*root
,
3620 struct btrfs_block_rsv
*block_rsv
,
3621 u64 min_reserved
, int min_factor
)
3624 int commit_trans
= 0;
3630 spin_lock(&block_rsv
->lock
);
3632 num_bytes
= div_factor(block_rsv
->size
, min_factor
);
3633 if (min_reserved
> num_bytes
)
3634 num_bytes
= min_reserved
;
3636 if (block_rsv
->reserved
>= num_bytes
) {
3639 num_bytes
-= block_rsv
->reserved
;
3640 if (block_rsv
->durable
&&
3641 block_rsv
->freed
[0] + block_rsv
->freed
[1] >= num_bytes
)
3644 spin_unlock(&block_rsv
->lock
);
3648 if (block_rsv
->refill_used
) {
3649 ret
= reserve_metadata_bytes(block_rsv
, num_bytes
);
3651 block_rsv_add_bytes(block_rsv
, num_bytes
, 0);
3660 trans
= btrfs_join_transaction(root
, 1);
3661 BUG_ON(IS_ERR(trans
));
3662 ret
= btrfs_commit_transaction(trans
, root
);
3667 printk(KERN_INFO
"block_rsv size %llu reserved %llu freed %llu %llu\n",
3668 block_rsv
->size
, block_rsv
->reserved
,
3669 block_rsv
->freed
[0], block_rsv
->freed
[1]);
3674 int btrfs_block_rsv_migrate(struct btrfs_block_rsv
*src_rsv
,
3675 struct btrfs_block_rsv
*dst_rsv
,
3678 return block_rsv_migrate_bytes(src_rsv
, dst_rsv
, num_bytes
);
3681 void btrfs_block_rsv_release(struct btrfs_root
*root
,
3682 struct btrfs_block_rsv
*block_rsv
,
3685 struct btrfs_block_rsv
*global_rsv
= &root
->fs_info
->global_block_rsv
;
3686 if (global_rsv
->full
|| global_rsv
== block_rsv
||
3687 block_rsv
->space_info
!= global_rsv
->space_info
)
3689 block_rsv_release_bytes(block_rsv
, global_rsv
, num_bytes
);
3693 * helper to calculate size of global block reservation.
3694 * the desired value is sum of space used by extent tree,
3695 * checksum tree and root tree
3697 static u64
calc_global_metadata_size(struct btrfs_fs_info
*fs_info
)
3699 struct btrfs_space_info
*sinfo
;
3703 int csum_size
= btrfs_super_csum_size(&fs_info
->super_copy
);
3706 * per tree used space accounting can be inaccuracy, so we
3709 spin_lock(&fs_info
->extent_root
->accounting_lock
);
3710 num_bytes
= btrfs_root_used(&fs_info
->extent_root
->root_item
);
3711 spin_unlock(&fs_info
->extent_root
->accounting_lock
);
3713 spin_lock(&fs_info
->csum_root
->accounting_lock
);
3714 num_bytes
+= btrfs_root_used(&fs_info
->csum_root
->root_item
);
3715 spin_unlock(&fs_info
->csum_root
->accounting_lock
);
3717 spin_lock(&fs_info
->tree_root
->accounting_lock
);
3718 num_bytes
+= btrfs_root_used(&fs_info
->tree_root
->root_item
);
3719 spin_unlock(&fs_info
->tree_root
->accounting_lock
);
3721 sinfo
= __find_space_info(fs_info
, BTRFS_BLOCK_GROUP_DATA
);
3722 spin_lock(&sinfo
->lock
);
3723 data_used
= sinfo
->bytes_used
;
3724 spin_unlock(&sinfo
->lock
);
3726 sinfo
= __find_space_info(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
3727 spin_lock(&sinfo
->lock
);
3728 meta_used
= sinfo
->bytes_used
;
3729 spin_unlock(&sinfo
->lock
);
3731 num_bytes
= (data_used
>> fs_info
->sb
->s_blocksize_bits
) *
3733 num_bytes
+= div64_u64(data_used
+ meta_used
, 50);
3735 if (num_bytes
* 3 > meta_used
)
3736 num_bytes
= div64_u64(meta_used
, 3);
3738 return ALIGN(num_bytes
, fs_info
->extent_root
->leafsize
<< 10);
3741 static void update_global_block_rsv(struct btrfs_fs_info
*fs_info
)
3743 struct btrfs_block_rsv
*block_rsv
= &fs_info
->global_block_rsv
;
3744 struct btrfs_space_info
*sinfo
= block_rsv
->space_info
;
3747 num_bytes
= calc_global_metadata_size(fs_info
);
3749 spin_lock(&block_rsv
->lock
);
3750 spin_lock(&sinfo
->lock
);
3752 block_rsv
->size
= num_bytes
;
3754 num_bytes
= sinfo
->bytes_used
+ sinfo
->bytes_pinned
+
3755 sinfo
->bytes_reserved
+ sinfo
->bytes_readonly
;
3757 if (sinfo
->total_bytes
> num_bytes
) {
3758 num_bytes
= sinfo
->total_bytes
- num_bytes
;
3759 block_rsv
->reserved
+= num_bytes
;
3760 sinfo
->bytes_reserved
+= num_bytes
;
3763 if (block_rsv
->reserved
>= block_rsv
->size
) {
3764 num_bytes
= block_rsv
->reserved
- block_rsv
->size
;
3765 sinfo
->bytes_reserved
-= num_bytes
;
3766 block_rsv
->reserved
= block_rsv
->size
;
3767 block_rsv
->full
= 1;
3770 printk(KERN_INFO
"global block rsv size %llu reserved %llu\n",
3771 block_rsv
->size
, block_rsv
->reserved
);
3773 spin_unlock(&sinfo
->lock
);
3774 spin_unlock(&block_rsv
->lock
);
3777 static void init_global_block_rsv(struct btrfs_fs_info
*fs_info
)
3779 struct btrfs_space_info
*space_info
;
3781 space_info
= __find_space_info(fs_info
, BTRFS_BLOCK_GROUP_SYSTEM
);
3782 fs_info
->chunk_block_rsv
.space_info
= space_info
;
3783 fs_info
->chunk_block_rsv
.priority
= 10;
3785 space_info
= __find_space_info(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
3786 fs_info
->global_block_rsv
.space_info
= space_info
;
3787 fs_info
->global_block_rsv
.priority
= 10;
3788 fs_info
->global_block_rsv
.refill_used
= 1;
3789 fs_info
->delalloc_block_rsv
.space_info
= space_info
;
3790 fs_info
->trans_block_rsv
.space_info
= space_info
;
3791 fs_info
->empty_block_rsv
.space_info
= space_info
;
3792 fs_info
->empty_block_rsv
.priority
= 10;
3794 fs_info
->extent_root
->block_rsv
= &fs_info
->global_block_rsv
;
3795 fs_info
->csum_root
->block_rsv
= &fs_info
->global_block_rsv
;
3796 fs_info
->dev_root
->block_rsv
= &fs_info
->global_block_rsv
;
3797 fs_info
->tree_root
->block_rsv
= &fs_info
->global_block_rsv
;
3798 fs_info
->chunk_root
->block_rsv
= &fs_info
->chunk_block_rsv
;
3800 btrfs_add_durable_block_rsv(fs_info
, &fs_info
->global_block_rsv
);
3802 btrfs_add_durable_block_rsv(fs_info
, &fs_info
->delalloc_block_rsv
);
3804 update_global_block_rsv(fs_info
);
3807 static void release_global_block_rsv(struct btrfs_fs_info
*fs_info
)
3809 block_rsv_release_bytes(&fs_info
->global_block_rsv
, NULL
, (u64
)-1);
3810 WARN_ON(fs_info
->delalloc_block_rsv
.size
> 0);
3811 WARN_ON(fs_info
->delalloc_block_rsv
.reserved
> 0);
3812 WARN_ON(fs_info
->trans_block_rsv
.size
> 0);
3813 WARN_ON(fs_info
->trans_block_rsv
.reserved
> 0);
3814 WARN_ON(fs_info
->chunk_block_rsv
.size
> 0);
3815 WARN_ON(fs_info
->chunk_block_rsv
.reserved
> 0);
3818 static u64
calc_trans_metadata_size(struct btrfs_root
*root
, int num_items
)
3820 return (root
->leafsize
+ root
->nodesize
* (BTRFS_MAX_LEVEL
- 1)) *
3824 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle
*trans
,
3825 struct btrfs_root
*root
,
3826 int num_items
, int *retries
)
3831 if (num_items
== 0 || root
->fs_info
->chunk_root
== root
)
3834 num_bytes
= calc_trans_metadata_size(root
, num_items
);
3835 ret
= btrfs_block_rsv_add(trans
, root
, &root
->fs_info
->trans_block_rsv
,
3836 num_bytes
, retries
);
3838 trans
->bytes_reserved
+= num_bytes
;
3839 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
3844 void btrfs_trans_release_metadata(struct btrfs_trans_handle
*trans
,
3845 struct btrfs_root
*root
)
3847 if (!trans
->bytes_reserved
)
3850 BUG_ON(trans
->block_rsv
!= &root
->fs_info
->trans_block_rsv
);
3851 btrfs_block_rsv_release(root
, trans
->block_rsv
,
3852 trans
->bytes_reserved
);
3853 trans
->bytes_reserved
= 0;
3856 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle
*trans
,
3857 struct inode
*inode
)
3859 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3860 struct btrfs_block_rsv
*src_rsv
= get_block_rsv(trans
, root
);
3861 struct btrfs_block_rsv
*dst_rsv
= root
->orphan_block_rsv
;
3864 * one for deleting orphan item, one for updating inode and
3865 * two for calling btrfs_truncate_inode_items.
3867 * btrfs_truncate_inode_items is a delete operation, it frees
3868 * more space than it uses in most cases. So two units of
3869 * metadata space should be enough for calling it many times.
3870 * If all of the metadata space is used, we can commit
3871 * transaction and use space it freed.
3873 u64 num_bytes
= calc_trans_metadata_size(root
, 4);
3874 return block_rsv_migrate_bytes(src_rsv
, dst_rsv
, num_bytes
);
3877 void btrfs_orphan_release_metadata(struct inode
*inode
)
3879 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3880 u64 num_bytes
= calc_trans_metadata_size(root
, 4);
3881 btrfs_block_rsv_release(root
, root
->orphan_block_rsv
, num_bytes
);
3884 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle
*trans
,
3885 struct btrfs_pending_snapshot
*pending
)
3887 struct btrfs_root
*root
= pending
->root
;
3888 struct btrfs_block_rsv
*src_rsv
= get_block_rsv(trans
, root
);
3889 struct btrfs_block_rsv
*dst_rsv
= &pending
->block_rsv
;
3891 * two for root back/forward refs, two for directory entries
3892 * and one for root of the snapshot.
3894 u64 num_bytes
= calc_trans_metadata_size(root
, 5);
3895 dst_rsv
->space_info
= src_rsv
->space_info
;
3896 return block_rsv_migrate_bytes(src_rsv
, dst_rsv
, num_bytes
);
3899 static u64
calc_csum_metadata_size(struct inode
*inode
, u64 num_bytes
)
3901 return num_bytes
>>= 3;
3904 int btrfs_delalloc_reserve_metadata(struct inode
*inode
, u64 num_bytes
)
3906 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3907 struct btrfs_block_rsv
*block_rsv
= &root
->fs_info
->delalloc_block_rsv
;
3913 if (btrfs_transaction_in_commit(root
->fs_info
))
3914 schedule_timeout(1);
3916 num_bytes
= ALIGN(num_bytes
, root
->sectorsize
);
3918 spin_lock(&BTRFS_I(inode
)->accounting_lock
);
3919 nr_extents
= atomic_read(&BTRFS_I(inode
)->outstanding_extents
) + 1;
3920 if (nr_extents
> BTRFS_I(inode
)->reserved_extents
) {
3921 nr_extents
-= BTRFS_I(inode
)->reserved_extents
;
3922 to_reserve
= calc_trans_metadata_size(root
, nr_extents
);
3928 to_reserve
+= calc_csum_metadata_size(inode
, num_bytes
);
3929 ret
= reserve_metadata_bytes(block_rsv
, to_reserve
);
3931 spin_unlock(&BTRFS_I(inode
)->accounting_lock
);
3932 ret
= should_retry_reserve(NULL
, root
, block_rsv
, to_reserve
,
3939 BTRFS_I(inode
)->reserved_extents
+= nr_extents
;
3940 atomic_inc(&BTRFS_I(inode
)->outstanding_extents
);
3941 spin_unlock(&BTRFS_I(inode
)->accounting_lock
);
3943 block_rsv_add_bytes(block_rsv
, to_reserve
, 1);
3945 if (block_rsv
->size
> 512 * 1024 * 1024)
3946 shrink_delalloc(NULL
, root
, to_reserve
);
3951 void btrfs_delalloc_release_metadata(struct inode
*inode
, u64 num_bytes
)
3953 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3957 num_bytes
= ALIGN(num_bytes
, root
->sectorsize
);
3958 atomic_dec(&BTRFS_I(inode
)->outstanding_extents
);
3960 spin_lock(&BTRFS_I(inode
)->accounting_lock
);
3961 nr_extents
= atomic_read(&BTRFS_I(inode
)->outstanding_extents
);
3962 if (nr_extents
< BTRFS_I(inode
)->reserved_extents
) {
3963 nr_extents
= BTRFS_I(inode
)->reserved_extents
- nr_extents
;
3964 BTRFS_I(inode
)->reserved_extents
-= nr_extents
;
3968 spin_unlock(&BTRFS_I(inode
)->accounting_lock
);
3970 to_free
= calc_csum_metadata_size(inode
, num_bytes
);
3972 to_free
+= calc_trans_metadata_size(root
, nr_extents
);
3974 btrfs_block_rsv_release(root
, &root
->fs_info
->delalloc_block_rsv
,
3978 int btrfs_delalloc_reserve_space(struct inode
*inode
, u64 num_bytes
)
3982 ret
= btrfs_check_data_free_space(inode
, num_bytes
);
3986 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
);
3988 btrfs_free_reserved_data_space(inode
, num_bytes
);
3995 void btrfs_delalloc_release_space(struct inode
*inode
, u64 num_bytes
)
3997 btrfs_delalloc_release_metadata(inode
, num_bytes
);
3998 btrfs_free_reserved_data_space(inode
, num_bytes
);
4001 static int update_block_group(struct btrfs_trans_handle
*trans
,
4002 struct btrfs_root
*root
,
4003 u64 bytenr
, u64 num_bytes
, int alloc
)
4005 struct btrfs_block_group_cache
*cache
= NULL
;
4006 struct btrfs_fs_info
*info
= root
->fs_info
;
4007 u64 total
= num_bytes
;
4012 /* block accounting for super block */
4013 spin_lock(&info
->delalloc_lock
);
4014 old_val
= btrfs_super_bytes_used(&info
->super_copy
);
4016 old_val
+= num_bytes
;
4018 old_val
-= num_bytes
;
4019 btrfs_set_super_bytes_used(&info
->super_copy
, old_val
);
4020 spin_unlock(&info
->delalloc_lock
);
4023 cache
= btrfs_lookup_block_group(info
, bytenr
);
4026 if (cache
->flags
& (BTRFS_BLOCK_GROUP_DUP
|
4027 BTRFS_BLOCK_GROUP_RAID1
|
4028 BTRFS_BLOCK_GROUP_RAID10
))
4033 * If this block group has free space cache written out, we
4034 * need to make sure to load it if we are removing space. This
4035 * is because we need the unpinning stage to actually add the
4036 * space back to the block group, otherwise we will leak space.
4038 if (!alloc
&& cache
->cached
== BTRFS_CACHE_NO
)
4039 cache_block_group(cache
, trans
, 1);
4041 byte_in_group
= bytenr
- cache
->key
.objectid
;
4042 WARN_ON(byte_in_group
> cache
->key
.offset
);
4044 spin_lock(&cache
->space_info
->lock
);
4045 spin_lock(&cache
->lock
);
4047 if (btrfs_super_cache_generation(&info
->super_copy
) != 0 &&
4048 cache
->disk_cache_state
< BTRFS_DC_CLEAR
)
4049 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
4052 old_val
= btrfs_block_group_used(&cache
->item
);
4053 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
4055 old_val
+= num_bytes
;
4056 btrfs_set_block_group_used(&cache
->item
, old_val
);
4057 cache
->reserved
-= num_bytes
;
4058 cache
->space_info
->bytes_reserved
-= num_bytes
;
4059 cache
->space_info
->bytes_used
+= num_bytes
;
4060 cache
->space_info
->disk_used
+= num_bytes
* factor
;
4061 spin_unlock(&cache
->lock
);
4062 spin_unlock(&cache
->space_info
->lock
);
4064 old_val
-= num_bytes
;
4065 btrfs_set_block_group_used(&cache
->item
, old_val
);
4066 cache
->pinned
+= num_bytes
;
4067 cache
->space_info
->bytes_pinned
+= num_bytes
;
4068 cache
->space_info
->bytes_used
-= num_bytes
;
4069 cache
->space_info
->disk_used
-= num_bytes
* factor
;
4070 spin_unlock(&cache
->lock
);
4071 spin_unlock(&cache
->space_info
->lock
);
4073 set_extent_dirty(info
->pinned_extents
,
4074 bytenr
, bytenr
+ num_bytes
- 1,
4075 GFP_NOFS
| __GFP_NOFAIL
);
4077 btrfs_put_block_group(cache
);
4079 bytenr
+= num_bytes
;
4084 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
4086 struct btrfs_block_group_cache
*cache
;
4089 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
4093 bytenr
= cache
->key
.objectid
;
4094 btrfs_put_block_group(cache
);
4099 static int pin_down_extent(struct btrfs_root
*root
,
4100 struct btrfs_block_group_cache
*cache
,
4101 u64 bytenr
, u64 num_bytes
, int reserved
)
4103 spin_lock(&cache
->space_info
->lock
);
4104 spin_lock(&cache
->lock
);
4105 cache
->pinned
+= num_bytes
;
4106 cache
->space_info
->bytes_pinned
+= num_bytes
;
4108 cache
->reserved
-= num_bytes
;
4109 cache
->space_info
->bytes_reserved
-= num_bytes
;
4111 spin_unlock(&cache
->lock
);
4112 spin_unlock(&cache
->space_info
->lock
);
4114 set_extent_dirty(root
->fs_info
->pinned_extents
, bytenr
,
4115 bytenr
+ num_bytes
- 1, GFP_NOFS
| __GFP_NOFAIL
);
4120 * this function must be called within transaction
4122 int btrfs_pin_extent(struct btrfs_root
*root
,
4123 u64 bytenr
, u64 num_bytes
, int reserved
)
4125 struct btrfs_block_group_cache
*cache
;
4127 cache
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
4130 pin_down_extent(root
, cache
, bytenr
, num_bytes
, reserved
);
4132 btrfs_put_block_group(cache
);
4137 * update size of reserved extents. this function may return -EAGAIN
4138 * if 'reserve' is true or 'sinfo' is false.
4140 static int update_reserved_bytes(struct btrfs_block_group_cache
*cache
,
4141 u64 num_bytes
, int reserve
, int sinfo
)
4145 struct btrfs_space_info
*space_info
= cache
->space_info
;
4146 spin_lock(&space_info
->lock
);
4147 spin_lock(&cache
->lock
);
4152 cache
->reserved
+= num_bytes
;
4153 space_info
->bytes_reserved
+= num_bytes
;
4157 space_info
->bytes_readonly
+= num_bytes
;
4158 cache
->reserved
-= num_bytes
;
4159 space_info
->bytes_reserved
-= num_bytes
;
4161 spin_unlock(&cache
->lock
);
4162 spin_unlock(&space_info
->lock
);
4164 spin_lock(&cache
->lock
);
4169 cache
->reserved
+= num_bytes
;
4171 cache
->reserved
-= num_bytes
;
4173 spin_unlock(&cache
->lock
);
4178 int btrfs_prepare_extent_commit(struct btrfs_trans_handle
*trans
,
4179 struct btrfs_root
*root
)
4181 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4182 struct btrfs_caching_control
*next
;
4183 struct btrfs_caching_control
*caching_ctl
;
4184 struct btrfs_block_group_cache
*cache
;
4186 down_write(&fs_info
->extent_commit_sem
);
4188 list_for_each_entry_safe(caching_ctl
, next
,
4189 &fs_info
->caching_block_groups
, list
) {
4190 cache
= caching_ctl
->block_group
;
4191 if (block_group_cache_done(cache
)) {
4192 cache
->last_byte_to_unpin
= (u64
)-1;
4193 list_del_init(&caching_ctl
->list
);
4194 put_caching_control(caching_ctl
);
4196 cache
->last_byte_to_unpin
= caching_ctl
->progress
;
4200 if (fs_info
->pinned_extents
== &fs_info
->freed_extents
[0])
4201 fs_info
->pinned_extents
= &fs_info
->freed_extents
[1];
4203 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
4205 up_write(&fs_info
->extent_commit_sem
);
4207 update_global_block_rsv(fs_info
);
4211 static int unpin_extent_range(struct btrfs_root
*root
, u64 start
, u64 end
)
4213 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4214 struct btrfs_block_group_cache
*cache
= NULL
;
4217 while (start
<= end
) {
4219 start
>= cache
->key
.objectid
+ cache
->key
.offset
) {
4221 btrfs_put_block_group(cache
);
4222 cache
= btrfs_lookup_block_group(fs_info
, start
);
4226 len
= cache
->key
.objectid
+ cache
->key
.offset
- start
;
4227 len
= min(len
, end
+ 1 - start
);
4229 if (start
< cache
->last_byte_to_unpin
) {
4230 len
= min(len
, cache
->last_byte_to_unpin
- start
);
4231 btrfs_add_free_space(cache
, start
, len
);
4236 spin_lock(&cache
->space_info
->lock
);
4237 spin_lock(&cache
->lock
);
4238 cache
->pinned
-= len
;
4239 cache
->space_info
->bytes_pinned
-= len
;
4241 cache
->space_info
->bytes_readonly
+= len
;
4242 } else if (cache
->reserved_pinned
> 0) {
4243 len
= min(len
, cache
->reserved_pinned
);
4244 cache
->reserved_pinned
-= len
;
4245 cache
->space_info
->bytes_reserved
+= len
;
4247 spin_unlock(&cache
->lock
);
4248 spin_unlock(&cache
->space_info
->lock
);
4252 btrfs_put_block_group(cache
);
4256 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
4257 struct btrfs_root
*root
)
4259 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4260 struct extent_io_tree
*unpin
;
4261 struct btrfs_block_rsv
*block_rsv
;
4262 struct btrfs_block_rsv
*next_rsv
;
4268 if (fs_info
->pinned_extents
== &fs_info
->freed_extents
[0])
4269 unpin
= &fs_info
->freed_extents
[1];
4271 unpin
= &fs_info
->freed_extents
[0];
4274 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
4279 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
4281 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
4282 unpin_extent_range(root
, start
, end
);
4286 mutex_lock(&fs_info
->durable_block_rsv_mutex
);
4287 list_for_each_entry_safe(block_rsv
, next_rsv
,
4288 &fs_info
->durable_block_rsv_list
, list
) {
4290 idx
= trans
->transid
& 0x1;
4291 if (block_rsv
->freed
[idx
] > 0) {
4292 block_rsv_add_bytes(block_rsv
,
4293 block_rsv
->freed
[idx
], 0);
4294 block_rsv
->freed
[idx
] = 0;
4296 if (atomic_read(&block_rsv
->usage
) == 0) {
4297 btrfs_block_rsv_release(root
, block_rsv
, (u64
)-1);
4299 if (block_rsv
->freed
[0] == 0 &&
4300 block_rsv
->freed
[1] == 0) {
4301 list_del_init(&block_rsv
->list
);
4305 btrfs_block_rsv_release(root
, block_rsv
, 0);
4308 mutex_unlock(&fs_info
->durable_block_rsv_mutex
);
4313 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
4314 struct btrfs_root
*root
,
4315 u64 bytenr
, u64 num_bytes
, u64 parent
,
4316 u64 root_objectid
, u64 owner_objectid
,
4317 u64 owner_offset
, int refs_to_drop
,
4318 struct btrfs_delayed_extent_op
*extent_op
)
4320 struct btrfs_key key
;
4321 struct btrfs_path
*path
;
4322 struct btrfs_fs_info
*info
= root
->fs_info
;
4323 struct btrfs_root
*extent_root
= info
->extent_root
;
4324 struct extent_buffer
*leaf
;
4325 struct btrfs_extent_item
*ei
;
4326 struct btrfs_extent_inline_ref
*iref
;
4329 int extent_slot
= 0;
4330 int found_extent
= 0;
4335 path
= btrfs_alloc_path();
4340 path
->leave_spinning
= 1;
4342 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
4343 BUG_ON(!is_data
&& refs_to_drop
!= 1);
4345 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
4346 bytenr
, num_bytes
, parent
,
4347 root_objectid
, owner_objectid
,
4350 extent_slot
= path
->slots
[0];
4351 while (extent_slot
>= 0) {
4352 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
4354 if (key
.objectid
!= bytenr
)
4356 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
4357 key
.offset
== num_bytes
) {
4361 if (path
->slots
[0] - extent_slot
> 5)
4365 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4366 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
4367 if (found_extent
&& item_size
< sizeof(*ei
))
4370 if (!found_extent
) {
4372 ret
= remove_extent_backref(trans
, extent_root
, path
,
4376 btrfs_release_path(extent_root
, path
);
4377 path
->leave_spinning
= 1;
4379 key
.objectid
= bytenr
;
4380 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4381 key
.offset
= num_bytes
;
4383 ret
= btrfs_search_slot(trans
, extent_root
,
4386 printk(KERN_ERR
"umm, got %d back from search"
4387 ", was looking for %llu\n", ret
,
4388 (unsigned long long)bytenr
);
4389 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
4392 extent_slot
= path
->slots
[0];
4395 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
4397 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
4398 "parent %llu root %llu owner %llu offset %llu\n",
4399 (unsigned long long)bytenr
,
4400 (unsigned long long)parent
,
4401 (unsigned long long)root_objectid
,
4402 (unsigned long long)owner_objectid
,
4403 (unsigned long long)owner_offset
);
4406 leaf
= path
->nodes
[0];
4407 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
4408 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4409 if (item_size
< sizeof(*ei
)) {
4410 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
4411 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
4415 btrfs_release_path(extent_root
, path
);
4416 path
->leave_spinning
= 1;
4418 key
.objectid
= bytenr
;
4419 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4420 key
.offset
= num_bytes
;
4422 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
4425 printk(KERN_ERR
"umm, got %d back from search"
4426 ", was looking for %llu\n", ret
,
4427 (unsigned long long)bytenr
);
4428 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
4431 extent_slot
= path
->slots
[0];
4432 leaf
= path
->nodes
[0];
4433 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
4436 BUG_ON(item_size
< sizeof(*ei
));
4437 ei
= btrfs_item_ptr(leaf
, extent_slot
,
4438 struct btrfs_extent_item
);
4439 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
4440 struct btrfs_tree_block_info
*bi
;
4441 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
4442 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
4443 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
4446 refs
= btrfs_extent_refs(leaf
, ei
);
4447 BUG_ON(refs
< refs_to_drop
);
4448 refs
-= refs_to_drop
;
4452 __run_delayed_extent_op(extent_op
, leaf
, ei
);
4454 * In the case of inline back ref, reference count will
4455 * be updated by remove_extent_backref
4458 BUG_ON(!found_extent
);
4460 btrfs_set_extent_refs(leaf
, ei
, refs
);
4461 btrfs_mark_buffer_dirty(leaf
);
4464 ret
= remove_extent_backref(trans
, extent_root
, path
,
4471 BUG_ON(is_data
&& refs_to_drop
!=
4472 extent_data_ref_count(root
, path
, iref
));
4474 BUG_ON(path
->slots
[0] != extent_slot
);
4476 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
4477 path
->slots
[0] = extent_slot
;
4482 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
4485 btrfs_release_path(extent_root
, path
);
4488 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
4491 invalidate_mapping_pages(info
->btree_inode
->i_mapping
,
4492 bytenr
>> PAGE_CACHE_SHIFT
,
4493 (bytenr
+ num_bytes
- 1) >> PAGE_CACHE_SHIFT
);
4496 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0);
4499 btrfs_free_path(path
);
4504 * when we free an block, it is possible (and likely) that we free the last
4505 * delayed ref for that extent as well. This searches the delayed ref tree for
4506 * a given extent, and if there are no other delayed refs to be processed, it
4507 * removes it from the tree.
4509 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
4510 struct btrfs_root
*root
, u64 bytenr
)
4512 struct btrfs_delayed_ref_head
*head
;
4513 struct btrfs_delayed_ref_root
*delayed_refs
;
4514 struct btrfs_delayed_ref_node
*ref
;
4515 struct rb_node
*node
;
4518 delayed_refs
= &trans
->transaction
->delayed_refs
;
4519 spin_lock(&delayed_refs
->lock
);
4520 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
4524 node
= rb_prev(&head
->node
.rb_node
);
4528 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
4530 /* there are still entries for this ref, we can't drop it */
4531 if (ref
->bytenr
== bytenr
)
4534 if (head
->extent_op
) {
4535 if (!head
->must_insert_reserved
)
4537 kfree(head
->extent_op
);
4538 head
->extent_op
= NULL
;
4542 * waiting for the lock here would deadlock. If someone else has it
4543 * locked they are already in the process of dropping it anyway
4545 if (!mutex_trylock(&head
->mutex
))
4549 * at this point we have a head with no other entries. Go
4550 * ahead and process it.
4552 head
->node
.in_tree
= 0;
4553 rb_erase(&head
->node
.rb_node
, &delayed_refs
->root
);
4555 delayed_refs
->num_entries
--;
4558 * we don't take a ref on the node because we're removing it from the
4559 * tree, so we just steal the ref the tree was holding.
4561 delayed_refs
->num_heads
--;
4562 if (list_empty(&head
->cluster
))
4563 delayed_refs
->num_heads_ready
--;
4565 list_del_init(&head
->cluster
);
4566 spin_unlock(&delayed_refs
->lock
);
4568 BUG_ON(head
->extent_op
);
4569 if (head
->must_insert_reserved
)
4572 mutex_unlock(&head
->mutex
);
4573 btrfs_put_delayed_ref(&head
->node
);
4576 spin_unlock(&delayed_refs
->lock
);
4580 void btrfs_free_tree_block(struct btrfs_trans_handle
*trans
,
4581 struct btrfs_root
*root
,
4582 struct extent_buffer
*buf
,
4583 u64 parent
, int last_ref
)
4585 struct btrfs_block_rsv
*block_rsv
;
4586 struct btrfs_block_group_cache
*cache
= NULL
;
4589 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4590 ret
= btrfs_add_delayed_tree_ref(trans
, buf
->start
, buf
->len
,
4591 parent
, root
->root_key
.objectid
,
4592 btrfs_header_level(buf
),
4593 BTRFS_DROP_DELAYED_REF
, NULL
);
4600 block_rsv
= get_block_rsv(trans
, root
);
4601 cache
= btrfs_lookup_block_group(root
->fs_info
, buf
->start
);
4602 if (block_rsv
->space_info
!= cache
->space_info
)
4605 if (btrfs_header_generation(buf
) == trans
->transid
) {
4606 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4607 ret
= check_ref_cleanup(trans
, root
, buf
->start
);
4612 if (btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
4613 pin_down_extent(root
, cache
, buf
->start
, buf
->len
, 1);
4617 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
));
4619 btrfs_add_free_space(cache
, buf
->start
, buf
->len
);
4620 ret
= update_reserved_bytes(cache
, buf
->len
, 0, 0);
4621 if (ret
== -EAGAIN
) {
4622 /* block group became read-only */
4623 update_reserved_bytes(cache
, buf
->len
, 0, 1);
4628 spin_lock(&block_rsv
->lock
);
4629 if (block_rsv
->reserved
< block_rsv
->size
) {
4630 block_rsv
->reserved
+= buf
->len
;
4633 spin_unlock(&block_rsv
->lock
);
4636 spin_lock(&cache
->space_info
->lock
);
4637 cache
->space_info
->bytes_reserved
-= buf
->len
;
4638 spin_unlock(&cache
->space_info
->lock
);
4643 if (block_rsv
->durable
&& !cache
->ro
) {
4645 spin_lock(&cache
->lock
);
4647 cache
->reserved_pinned
+= buf
->len
;
4650 spin_unlock(&cache
->lock
);
4653 spin_lock(&block_rsv
->lock
);
4654 block_rsv
->freed
[trans
->transid
& 0x1] += buf
->len
;
4655 spin_unlock(&block_rsv
->lock
);
4659 btrfs_put_block_group(cache
);
4662 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
4663 struct btrfs_root
*root
,
4664 u64 bytenr
, u64 num_bytes
, u64 parent
,
4665 u64 root_objectid
, u64 owner
, u64 offset
)
4670 * tree log blocks never actually go into the extent allocation
4671 * tree, just update pinning info and exit early.
4673 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
) {
4674 WARN_ON(owner
>= BTRFS_FIRST_FREE_OBJECTID
);
4675 /* unlocks the pinned mutex */
4676 btrfs_pin_extent(root
, bytenr
, num_bytes
, 1);
4678 } else if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
4679 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
4680 parent
, root_objectid
, (int)owner
,
4681 BTRFS_DROP_DELAYED_REF
, NULL
);
4684 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
4685 parent
, root_objectid
, owner
,
4686 offset
, BTRFS_DROP_DELAYED_REF
, NULL
);
4692 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
4694 u64 mask
= ((u64
)root
->stripesize
- 1);
4695 u64 ret
= (val
+ mask
) & ~mask
;
4700 * when we wait for progress in the block group caching, its because
4701 * our allocation attempt failed at least once. So, we must sleep
4702 * and let some progress happen before we try again.
4704 * This function will sleep at least once waiting for new free space to
4705 * show up, and then it will check the block group free space numbers
4706 * for our min num_bytes. Another option is to have it go ahead
4707 * and look in the rbtree for a free extent of a given size, but this
4711 wait_block_group_cache_progress(struct btrfs_block_group_cache
*cache
,
4714 struct btrfs_caching_control
*caching_ctl
;
4717 caching_ctl
= get_caching_control(cache
);
4721 wait_event(caching_ctl
->wait
, block_group_cache_done(cache
) ||
4722 (cache
->free_space
>= num_bytes
));
4724 put_caching_control(caching_ctl
);
4729 wait_block_group_cache_done(struct btrfs_block_group_cache
*cache
)
4731 struct btrfs_caching_control
*caching_ctl
;
4734 caching_ctl
= get_caching_control(cache
);
4738 wait_event(caching_ctl
->wait
, block_group_cache_done(cache
));
4740 put_caching_control(caching_ctl
);
4744 static int get_block_group_index(struct btrfs_block_group_cache
*cache
)
4747 if (cache
->flags
& BTRFS_BLOCK_GROUP_RAID10
)
4749 else if (cache
->flags
& BTRFS_BLOCK_GROUP_RAID1
)
4751 else if (cache
->flags
& BTRFS_BLOCK_GROUP_DUP
)
4753 else if (cache
->flags
& BTRFS_BLOCK_GROUP_RAID0
)
4760 enum btrfs_loop_type
{
4761 LOOP_FIND_IDEAL
= 0,
4762 LOOP_CACHING_NOWAIT
= 1,
4763 LOOP_CACHING_WAIT
= 2,
4764 LOOP_ALLOC_CHUNK
= 3,
4765 LOOP_NO_EMPTY_SIZE
= 4,
4769 * walks the btree of allocated extents and find a hole of a given size.
4770 * The key ins is changed to record the hole:
4771 * ins->objectid == block start
4772 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4773 * ins->offset == number of blocks
4774 * Any available blocks before search_start are skipped.
4776 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
4777 struct btrfs_root
*orig_root
,
4778 u64 num_bytes
, u64 empty_size
,
4779 u64 search_start
, u64 search_end
,
4780 u64 hint_byte
, struct btrfs_key
*ins
,
4784 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
4785 struct btrfs_free_cluster
*last_ptr
= NULL
;
4786 struct btrfs_block_group_cache
*block_group
= NULL
;
4787 int empty_cluster
= 2 * 1024 * 1024;
4788 int allowed_chunk_alloc
= 0;
4789 int done_chunk_alloc
= 0;
4790 struct btrfs_space_info
*space_info
;
4791 int last_ptr_loop
= 0;
4794 bool found_uncached_bg
= false;
4795 bool failed_cluster_refill
= false;
4796 bool failed_alloc
= false;
4797 bool use_cluster
= true;
4798 u64 ideal_cache_percent
= 0;
4799 u64 ideal_cache_offset
= 0;
4801 WARN_ON(num_bytes
< root
->sectorsize
);
4802 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
4806 space_info
= __find_space_info(root
->fs_info
, data
);
4808 printk(KERN_ERR
"No space info for %d\n", data
);
4813 * If the space info is for both data and metadata it means we have a
4814 * small filesystem and we can't use the clustering stuff.
4816 if (btrfs_mixed_space_info(space_info
))
4817 use_cluster
= false;
4819 if (orig_root
->ref_cows
|| empty_size
)
4820 allowed_chunk_alloc
= 1;
4822 if (data
& BTRFS_BLOCK_GROUP_METADATA
&& use_cluster
) {
4823 last_ptr
= &root
->fs_info
->meta_alloc_cluster
;
4824 if (!btrfs_test_opt(root
, SSD
))
4825 empty_cluster
= 64 * 1024;
4828 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && use_cluster
&&
4829 btrfs_test_opt(root
, SSD
)) {
4830 last_ptr
= &root
->fs_info
->data_alloc_cluster
;
4834 spin_lock(&last_ptr
->lock
);
4835 if (last_ptr
->block_group
)
4836 hint_byte
= last_ptr
->window_start
;
4837 spin_unlock(&last_ptr
->lock
);
4840 search_start
= max(search_start
, first_logical_byte(root
, 0));
4841 search_start
= max(search_start
, hint_byte
);
4846 if (search_start
== hint_byte
) {
4848 block_group
= btrfs_lookup_block_group(root
->fs_info
,
4851 * we don't want to use the block group if it doesn't match our
4852 * allocation bits, or if its not cached.
4854 * However if we are re-searching with an ideal block group
4855 * picked out then we don't care that the block group is cached.
4857 if (block_group
&& block_group_bits(block_group
, data
) &&
4858 (block_group
->cached
!= BTRFS_CACHE_NO
||
4859 search_start
== ideal_cache_offset
)) {
4860 down_read(&space_info
->groups_sem
);
4861 if (list_empty(&block_group
->list
) ||
4864 * someone is removing this block group,
4865 * we can't jump into the have_block_group
4866 * target because our list pointers are not
4869 btrfs_put_block_group(block_group
);
4870 up_read(&space_info
->groups_sem
);
4872 index
= get_block_group_index(block_group
);
4873 goto have_block_group
;
4875 } else if (block_group
) {
4876 btrfs_put_block_group(block_group
);
4880 down_read(&space_info
->groups_sem
);
4881 list_for_each_entry(block_group
, &space_info
->block_groups
[index
],
4886 btrfs_get_block_group(block_group
);
4887 search_start
= block_group
->key
.objectid
;
4890 if (unlikely(block_group
->cached
== BTRFS_CACHE_NO
)) {
4893 ret
= cache_block_group(block_group
, trans
, 1);
4894 if (block_group
->cached
== BTRFS_CACHE_FINISHED
)
4895 goto have_block_group
;
4897 free_percent
= btrfs_block_group_used(&block_group
->item
);
4898 free_percent
*= 100;
4899 free_percent
= div64_u64(free_percent
,
4900 block_group
->key
.offset
);
4901 free_percent
= 100 - free_percent
;
4902 if (free_percent
> ideal_cache_percent
&&
4903 likely(!block_group
->ro
)) {
4904 ideal_cache_offset
= block_group
->key
.objectid
;
4905 ideal_cache_percent
= free_percent
;
4909 * We only want to start kthread caching if we are at
4910 * the point where we will wait for caching to make
4911 * progress, or if our ideal search is over and we've
4912 * found somebody to start caching.
4914 if (loop
> LOOP_CACHING_NOWAIT
||
4915 (loop
> LOOP_FIND_IDEAL
&&
4916 atomic_read(&space_info
->caching_threads
) < 2)) {
4917 ret
= cache_block_group(block_group
, trans
, 0);
4920 found_uncached_bg
= true;
4923 * If loop is set for cached only, try the next block
4926 if (loop
== LOOP_FIND_IDEAL
)
4930 cached
= block_group_cache_done(block_group
);
4931 if (unlikely(!cached
))
4932 found_uncached_bg
= true;
4934 if (unlikely(block_group
->ro
))
4938 * Ok we want to try and use the cluster allocator, so lets look
4939 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4940 * have tried the cluster allocator plenty of times at this
4941 * point and not have found anything, so we are likely way too
4942 * fragmented for the clustering stuff to find anything, so lets
4943 * just skip it and let the allocator find whatever block it can
4946 if (last_ptr
&& loop
< LOOP_NO_EMPTY_SIZE
) {
4948 * the refill lock keeps out other
4949 * people trying to start a new cluster
4951 spin_lock(&last_ptr
->refill_lock
);
4952 if (last_ptr
->block_group
&&
4953 (last_ptr
->block_group
->ro
||
4954 !block_group_bits(last_ptr
->block_group
, data
))) {
4956 goto refill_cluster
;
4959 offset
= btrfs_alloc_from_cluster(block_group
, last_ptr
,
4960 num_bytes
, search_start
);
4962 /* we have a block, we're done */
4963 spin_unlock(&last_ptr
->refill_lock
);
4967 spin_lock(&last_ptr
->lock
);
4969 * whoops, this cluster doesn't actually point to
4970 * this block group. Get a ref on the block
4971 * group is does point to and try again
4973 if (!last_ptr_loop
&& last_ptr
->block_group
&&
4974 last_ptr
->block_group
!= block_group
) {
4976 btrfs_put_block_group(block_group
);
4977 block_group
= last_ptr
->block_group
;
4978 btrfs_get_block_group(block_group
);
4979 spin_unlock(&last_ptr
->lock
);
4980 spin_unlock(&last_ptr
->refill_lock
);
4983 search_start
= block_group
->key
.objectid
;
4985 * we know this block group is properly
4986 * in the list because
4987 * btrfs_remove_block_group, drops the
4988 * cluster before it removes the block
4989 * group from the list
4991 goto have_block_group
;
4993 spin_unlock(&last_ptr
->lock
);
4996 * this cluster didn't work out, free it and
4999 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
5003 /* allocate a cluster in this block group */
5004 ret
= btrfs_find_space_cluster(trans
, root
,
5005 block_group
, last_ptr
,
5007 empty_cluster
+ empty_size
);
5010 * now pull our allocation out of this
5013 offset
= btrfs_alloc_from_cluster(block_group
,
5014 last_ptr
, num_bytes
,
5017 /* we found one, proceed */
5018 spin_unlock(&last_ptr
->refill_lock
);
5021 } else if (!cached
&& loop
> LOOP_CACHING_NOWAIT
5022 && !failed_cluster_refill
) {
5023 spin_unlock(&last_ptr
->refill_lock
);
5025 failed_cluster_refill
= true;
5026 wait_block_group_cache_progress(block_group
,
5027 num_bytes
+ empty_cluster
+ empty_size
);
5028 goto have_block_group
;
5032 * at this point we either didn't find a cluster
5033 * or we weren't able to allocate a block from our
5034 * cluster. Free the cluster we've been trying
5035 * to use, and go to the next block group
5037 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
5038 spin_unlock(&last_ptr
->refill_lock
);
5042 offset
= btrfs_find_space_for_alloc(block_group
, search_start
,
5043 num_bytes
, empty_size
);
5045 * If we didn't find a chunk, and we haven't failed on this
5046 * block group before, and this block group is in the middle of
5047 * caching and we are ok with waiting, then go ahead and wait
5048 * for progress to be made, and set failed_alloc to true.
5050 * If failed_alloc is true then we've already waited on this
5051 * block group once and should move on to the next block group.
5053 if (!offset
&& !failed_alloc
&& !cached
&&
5054 loop
> LOOP_CACHING_NOWAIT
) {
5055 wait_block_group_cache_progress(block_group
,
5056 num_bytes
+ empty_size
);
5057 failed_alloc
= true;
5058 goto have_block_group
;
5059 } else if (!offset
) {
5063 search_start
= stripe_align(root
, offset
);
5064 /* move on to the next group */
5065 if (search_start
+ num_bytes
>= search_end
) {
5066 btrfs_add_free_space(block_group
, offset
, num_bytes
);
5070 /* move on to the next group */
5071 if (search_start
+ num_bytes
>
5072 block_group
->key
.objectid
+ block_group
->key
.offset
) {
5073 btrfs_add_free_space(block_group
, offset
, num_bytes
);
5077 ins
->objectid
= search_start
;
5078 ins
->offset
= num_bytes
;
5080 if (offset
< search_start
)
5081 btrfs_add_free_space(block_group
, offset
,
5082 search_start
- offset
);
5083 BUG_ON(offset
> search_start
);
5085 ret
= update_reserved_bytes(block_group
, num_bytes
, 1,
5086 (data
& BTRFS_BLOCK_GROUP_DATA
));
5087 if (ret
== -EAGAIN
) {
5088 btrfs_add_free_space(block_group
, offset
, num_bytes
);
5092 /* we are all good, lets return */
5093 ins
->objectid
= search_start
;
5094 ins
->offset
= num_bytes
;
5096 if (offset
< search_start
)
5097 btrfs_add_free_space(block_group
, offset
,
5098 search_start
- offset
);
5099 BUG_ON(offset
> search_start
);
5102 failed_cluster_refill
= false;
5103 failed_alloc
= false;
5104 BUG_ON(index
!= get_block_group_index(block_group
));
5105 btrfs_put_block_group(block_group
);
5107 up_read(&space_info
->groups_sem
);
5109 if (!ins
->objectid
&& ++index
< BTRFS_NR_RAID_TYPES
)
5112 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5113 * for them to make caching progress. Also
5114 * determine the best possible bg to cache
5115 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5116 * caching kthreads as we move along
5117 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5118 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5119 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5122 if (!ins
->objectid
&& loop
< LOOP_NO_EMPTY_SIZE
&&
5123 (found_uncached_bg
|| empty_size
|| empty_cluster
||
5124 allowed_chunk_alloc
)) {
5126 if (loop
== LOOP_FIND_IDEAL
&& found_uncached_bg
) {
5127 found_uncached_bg
= false;
5129 if (!ideal_cache_percent
&&
5130 atomic_read(&space_info
->caching_threads
))
5134 * 1 of the following 2 things have happened so far
5136 * 1) We found an ideal block group for caching that
5137 * is mostly full and will cache quickly, so we might
5138 * as well wait for it.
5140 * 2) We searched for cached only and we didn't find
5141 * anything, and we didn't start any caching kthreads
5142 * either, so chances are we will loop through and
5143 * start a couple caching kthreads, and then come back
5144 * around and just wait for them. This will be slower
5145 * because we will have 2 caching kthreads reading at
5146 * the same time when we could have just started one
5147 * and waited for it to get far enough to give us an
5148 * allocation, so go ahead and go to the wait caching
5151 loop
= LOOP_CACHING_WAIT
;
5152 search_start
= ideal_cache_offset
;
5153 ideal_cache_percent
= 0;
5155 } else if (loop
== LOOP_FIND_IDEAL
) {
5157 * Didn't find a uncached bg, wait on anything we find
5160 loop
= LOOP_CACHING_WAIT
;
5164 if (loop
< LOOP_CACHING_WAIT
) {
5169 if (loop
== LOOP_ALLOC_CHUNK
) {
5174 if (allowed_chunk_alloc
) {
5175 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
5176 2 * 1024 * 1024, data
, 1);
5177 allowed_chunk_alloc
= 0;
5178 done_chunk_alloc
= 1;
5179 } else if (!done_chunk_alloc
) {
5180 space_info
->force_alloc
= 1;
5183 if (loop
< LOOP_NO_EMPTY_SIZE
) {
5188 } else if (!ins
->objectid
) {
5192 /* we found what we needed */
5193 if (ins
->objectid
) {
5194 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
5195 trans
->block_group
= block_group
->key
.objectid
;
5197 btrfs_put_block_group(block_group
);
5204 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
,
5205 int dump_block_groups
)
5207 struct btrfs_block_group_cache
*cache
;
5210 spin_lock(&info
->lock
);
5211 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
5212 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
5213 info
->bytes_pinned
- info
->bytes_reserved
-
5214 info
->bytes_readonly
),
5215 (info
->full
) ? "" : "not ");
5216 printk(KERN_INFO
"space_info total=%llu, used=%llu, pinned=%llu, "
5217 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5218 (unsigned long long)info
->total_bytes
,
5219 (unsigned long long)info
->bytes_used
,
5220 (unsigned long long)info
->bytes_pinned
,
5221 (unsigned long long)info
->bytes_reserved
,
5222 (unsigned long long)info
->bytes_may_use
,
5223 (unsigned long long)info
->bytes_readonly
);
5224 spin_unlock(&info
->lock
);
5226 if (!dump_block_groups
)
5229 down_read(&info
->groups_sem
);
5231 list_for_each_entry(cache
, &info
->block_groups
[index
], list
) {
5232 spin_lock(&cache
->lock
);
5233 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
5234 "%llu pinned %llu reserved\n",
5235 (unsigned long long)cache
->key
.objectid
,
5236 (unsigned long long)cache
->key
.offset
,
5237 (unsigned long long)btrfs_block_group_used(&cache
->item
),
5238 (unsigned long long)cache
->pinned
,
5239 (unsigned long long)cache
->reserved
);
5240 btrfs_dump_free_space(cache
, bytes
);
5241 spin_unlock(&cache
->lock
);
5243 if (++index
< BTRFS_NR_RAID_TYPES
)
5245 up_read(&info
->groups_sem
);
5248 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
5249 struct btrfs_root
*root
,
5250 u64 num_bytes
, u64 min_alloc_size
,
5251 u64 empty_size
, u64 hint_byte
,
5252 u64 search_end
, struct btrfs_key
*ins
,
5256 u64 search_start
= 0;
5258 data
= btrfs_get_alloc_profile(root
, data
);
5261 * the only place that sets empty_size is btrfs_realloc_node, which
5262 * is not called recursively on allocations
5264 if (empty_size
|| root
->ref_cows
)
5265 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
5266 num_bytes
+ 2 * 1024 * 1024, data
, 0);
5268 WARN_ON(num_bytes
< root
->sectorsize
);
5269 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
5270 search_start
, search_end
, hint_byte
,
5273 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
5274 num_bytes
= num_bytes
>> 1;
5275 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
5276 num_bytes
= max(num_bytes
, min_alloc_size
);
5277 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
5278 num_bytes
, data
, 1);
5281 if (ret
== -ENOSPC
) {
5282 struct btrfs_space_info
*sinfo
;
5284 sinfo
= __find_space_info(root
->fs_info
, data
);
5285 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
5286 "wanted %llu\n", (unsigned long long)data
,
5287 (unsigned long long)num_bytes
);
5288 dump_space_info(sinfo
, num_bytes
, 1);
5294 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
5296 struct btrfs_block_group_cache
*cache
;
5299 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
5301 printk(KERN_ERR
"Unable to find block group for %llu\n",
5302 (unsigned long long)start
);
5306 ret
= btrfs_discard_extent(root
, start
, len
);
5308 btrfs_add_free_space(cache
, start
, len
);
5309 update_reserved_bytes(cache
, len
, 0, 1);
5310 btrfs_put_block_group(cache
);
5315 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
5316 struct btrfs_root
*root
,
5317 u64 parent
, u64 root_objectid
,
5318 u64 flags
, u64 owner
, u64 offset
,
5319 struct btrfs_key
*ins
, int ref_mod
)
5322 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5323 struct btrfs_extent_item
*extent_item
;
5324 struct btrfs_extent_inline_ref
*iref
;
5325 struct btrfs_path
*path
;
5326 struct extent_buffer
*leaf
;
5331 type
= BTRFS_SHARED_DATA_REF_KEY
;
5333 type
= BTRFS_EXTENT_DATA_REF_KEY
;
5335 size
= sizeof(*extent_item
) + btrfs_extent_inline_ref_size(type
);
5337 path
= btrfs_alloc_path();
5340 path
->leave_spinning
= 1;
5341 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
5345 leaf
= path
->nodes
[0];
5346 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
5347 struct btrfs_extent_item
);
5348 btrfs_set_extent_refs(leaf
, extent_item
, ref_mod
);
5349 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
5350 btrfs_set_extent_flags(leaf
, extent_item
,
5351 flags
| BTRFS_EXTENT_FLAG_DATA
);
5353 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
5354 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
5356 struct btrfs_shared_data_ref
*ref
;
5357 ref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
5358 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
5359 btrfs_set_shared_data_ref_count(leaf
, ref
, ref_mod
);
5361 struct btrfs_extent_data_ref
*ref
;
5362 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
5363 btrfs_set_extent_data_ref_root(leaf
, ref
, root_objectid
);
5364 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
5365 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
5366 btrfs_set_extent_data_ref_count(leaf
, ref
, ref_mod
);
5369 btrfs_mark_buffer_dirty(path
->nodes
[0]);
5370 btrfs_free_path(path
);
5372 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1);
5374 printk(KERN_ERR
"btrfs update block group failed for %llu "
5375 "%llu\n", (unsigned long long)ins
->objectid
,
5376 (unsigned long long)ins
->offset
);
5382 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
5383 struct btrfs_root
*root
,
5384 u64 parent
, u64 root_objectid
,
5385 u64 flags
, struct btrfs_disk_key
*key
,
5386 int level
, struct btrfs_key
*ins
)
5389 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5390 struct btrfs_extent_item
*extent_item
;
5391 struct btrfs_tree_block_info
*block_info
;
5392 struct btrfs_extent_inline_ref
*iref
;
5393 struct btrfs_path
*path
;
5394 struct extent_buffer
*leaf
;
5395 u32 size
= sizeof(*extent_item
) + sizeof(*block_info
) + sizeof(*iref
);
5397 path
= btrfs_alloc_path();
5400 path
->leave_spinning
= 1;
5401 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
5405 leaf
= path
->nodes
[0];
5406 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
5407 struct btrfs_extent_item
);
5408 btrfs_set_extent_refs(leaf
, extent_item
, 1);
5409 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
5410 btrfs_set_extent_flags(leaf
, extent_item
,
5411 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
5412 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
5414 btrfs_set_tree_block_key(leaf
, block_info
, key
);
5415 btrfs_set_tree_block_level(leaf
, block_info
, level
);
5417 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
5419 BUG_ON(!(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
5420 btrfs_set_extent_inline_ref_type(leaf
, iref
,
5421 BTRFS_SHARED_BLOCK_REF_KEY
);
5422 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
5424 btrfs_set_extent_inline_ref_type(leaf
, iref
,
5425 BTRFS_TREE_BLOCK_REF_KEY
);
5426 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
5429 btrfs_mark_buffer_dirty(leaf
);
5430 btrfs_free_path(path
);
5432 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1);
5434 printk(KERN_ERR
"btrfs update block group failed for %llu "
5435 "%llu\n", (unsigned long long)ins
->objectid
,
5436 (unsigned long long)ins
->offset
);
5442 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
5443 struct btrfs_root
*root
,
5444 u64 root_objectid
, u64 owner
,
5445 u64 offset
, struct btrfs_key
*ins
)
5449 BUG_ON(root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
5451 ret
= btrfs_add_delayed_data_ref(trans
, ins
->objectid
, ins
->offset
,
5452 0, root_objectid
, owner
, offset
,
5453 BTRFS_ADD_DELAYED_EXTENT
, NULL
);
5458 * this is used by the tree logging recovery code. It records that
5459 * an extent has been allocated and makes sure to clear the free
5460 * space cache bits as well
5462 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle
*trans
,
5463 struct btrfs_root
*root
,
5464 u64 root_objectid
, u64 owner
, u64 offset
,
5465 struct btrfs_key
*ins
)
5468 struct btrfs_block_group_cache
*block_group
;
5469 struct btrfs_caching_control
*caching_ctl
;
5470 u64 start
= ins
->objectid
;
5471 u64 num_bytes
= ins
->offset
;
5473 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
5474 cache_block_group(block_group
, trans
, 0);
5475 caching_ctl
= get_caching_control(block_group
);
5478 BUG_ON(!block_group_cache_done(block_group
));
5479 ret
= btrfs_remove_free_space(block_group
, start
, num_bytes
);
5482 mutex_lock(&caching_ctl
->mutex
);
5484 if (start
>= caching_ctl
->progress
) {
5485 ret
= add_excluded_extent(root
, start
, num_bytes
);
5487 } else if (start
+ num_bytes
<= caching_ctl
->progress
) {
5488 ret
= btrfs_remove_free_space(block_group
,
5492 num_bytes
= caching_ctl
->progress
- start
;
5493 ret
= btrfs_remove_free_space(block_group
,
5497 start
= caching_ctl
->progress
;
5498 num_bytes
= ins
->objectid
+ ins
->offset
-
5499 caching_ctl
->progress
;
5500 ret
= add_excluded_extent(root
, start
, num_bytes
);
5504 mutex_unlock(&caching_ctl
->mutex
);
5505 put_caching_control(caching_ctl
);
5508 ret
= update_reserved_bytes(block_group
, ins
->offset
, 1, 1);
5510 btrfs_put_block_group(block_group
);
5511 ret
= alloc_reserved_file_extent(trans
, root
, 0, root_objectid
,
5512 0, owner
, offset
, ins
, 1);
5516 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
5517 struct btrfs_root
*root
,
5518 u64 bytenr
, u32 blocksize
,
5521 struct extent_buffer
*buf
;
5523 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
5525 return ERR_PTR(-ENOMEM
);
5526 btrfs_set_header_generation(buf
, trans
->transid
);
5527 btrfs_set_buffer_lockdep_class(buf
, level
);
5528 btrfs_tree_lock(buf
);
5529 clean_tree_block(trans
, root
, buf
);
5531 btrfs_set_lock_blocking(buf
);
5532 btrfs_set_buffer_uptodate(buf
);
5534 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
5536 * we allow two log transactions at a time, use different
5537 * EXENT bit to differentiate dirty pages.
5539 if (root
->log_transid
% 2 == 0)
5540 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
5541 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
5543 set_extent_new(&root
->dirty_log_pages
, buf
->start
,
5544 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
5546 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
5547 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
5549 trans
->blocks_used
++;
5550 /* this returns a buffer locked for blocking */
5554 static struct btrfs_block_rsv
*
5555 use_block_rsv(struct btrfs_trans_handle
*trans
,
5556 struct btrfs_root
*root
, u32 blocksize
)
5558 struct btrfs_block_rsv
*block_rsv
;
5561 block_rsv
= get_block_rsv(trans
, root
);
5563 if (block_rsv
->size
== 0) {
5564 ret
= reserve_metadata_bytes(block_rsv
, blocksize
);
5566 return ERR_PTR(ret
);
5570 ret
= block_rsv_use_bytes(block_rsv
, blocksize
);
5575 printk(KERN_INFO
"block_rsv size %llu reserved %llu freed %llu %llu\n",
5576 block_rsv
->size
, block_rsv
->reserved
,
5577 block_rsv
->freed
[0], block_rsv
->freed
[1]);
5579 return ERR_PTR(-ENOSPC
);
5582 static void unuse_block_rsv(struct btrfs_block_rsv
*block_rsv
, u32 blocksize
)
5584 block_rsv_add_bytes(block_rsv
, blocksize
, 0);
5585 block_rsv_release_bytes(block_rsv
, NULL
, 0);
5589 * finds a free extent and does all the dirty work required for allocation
5590 * returns the key for the extent through ins, and a tree buffer for
5591 * the first block of the extent through buf.
5593 * returns the tree buffer or NULL.
5595 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
5596 struct btrfs_root
*root
, u32 blocksize
,
5597 u64 parent
, u64 root_objectid
,
5598 struct btrfs_disk_key
*key
, int level
,
5599 u64 hint
, u64 empty_size
)
5601 struct btrfs_key ins
;
5602 struct btrfs_block_rsv
*block_rsv
;
5603 struct extent_buffer
*buf
;
5608 block_rsv
= use_block_rsv(trans
, root
, blocksize
);
5609 if (IS_ERR(block_rsv
))
5610 return ERR_CAST(block_rsv
);
5612 ret
= btrfs_reserve_extent(trans
, root
, blocksize
, blocksize
,
5613 empty_size
, hint
, (u64
)-1, &ins
, 0);
5615 unuse_block_rsv(block_rsv
, blocksize
);
5616 return ERR_PTR(ret
);
5619 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
,
5621 BUG_ON(IS_ERR(buf
));
5623 if (root_objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
5625 parent
= ins
.objectid
;
5626 flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5630 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
5631 struct btrfs_delayed_extent_op
*extent_op
;
5632 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
5635 memcpy(&extent_op
->key
, key
, sizeof(extent_op
->key
));
5637 memset(&extent_op
->key
, 0, sizeof(extent_op
->key
));
5638 extent_op
->flags_to_set
= flags
;
5639 extent_op
->update_key
= 1;
5640 extent_op
->update_flags
= 1;
5641 extent_op
->is_data
= 0;
5643 ret
= btrfs_add_delayed_tree_ref(trans
, ins
.objectid
,
5644 ins
.offset
, parent
, root_objectid
,
5645 level
, BTRFS_ADD_DELAYED_EXTENT
,
5652 struct walk_control
{
5653 u64 refs
[BTRFS_MAX_LEVEL
];
5654 u64 flags
[BTRFS_MAX_LEVEL
];
5655 struct btrfs_key update_progress
;
5665 #define DROP_REFERENCE 1
5666 #define UPDATE_BACKREF 2
5668 static noinline
void reada_walk_down(struct btrfs_trans_handle
*trans
,
5669 struct btrfs_root
*root
,
5670 struct walk_control
*wc
,
5671 struct btrfs_path
*path
)
5680 struct btrfs_key key
;
5681 struct extent_buffer
*eb
;
5686 if (path
->slots
[wc
->level
] < wc
->reada_slot
) {
5687 wc
->reada_count
= wc
->reada_count
* 2 / 3;
5688 wc
->reada_count
= max(wc
->reada_count
, 2);
5690 wc
->reada_count
= wc
->reada_count
* 3 / 2;
5691 wc
->reada_count
= min_t(int, wc
->reada_count
,
5692 BTRFS_NODEPTRS_PER_BLOCK(root
));
5695 eb
= path
->nodes
[wc
->level
];
5696 nritems
= btrfs_header_nritems(eb
);
5697 blocksize
= btrfs_level_size(root
, wc
->level
- 1);
5699 for (slot
= path
->slots
[wc
->level
]; slot
< nritems
; slot
++) {
5700 if (nread
>= wc
->reada_count
)
5704 bytenr
= btrfs_node_blockptr(eb
, slot
);
5705 generation
= btrfs_node_ptr_generation(eb
, slot
);
5707 if (slot
== path
->slots
[wc
->level
])
5710 if (wc
->stage
== UPDATE_BACKREF
&&
5711 generation
<= root
->root_key
.offset
)
5714 /* We don't lock the tree block, it's OK to be racy here */
5715 ret
= btrfs_lookup_extent_info(trans
, root
, bytenr
, blocksize
,
5720 if (wc
->stage
== DROP_REFERENCE
) {
5724 if (wc
->level
== 1 &&
5725 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5727 if (!wc
->update_ref
||
5728 generation
<= root
->root_key
.offset
)
5730 btrfs_node_key_to_cpu(eb
, &key
, slot
);
5731 ret
= btrfs_comp_cpu_keys(&key
,
5732 &wc
->update_progress
);
5736 if (wc
->level
== 1 &&
5737 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5741 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
5745 last
= bytenr
+ blocksize
;
5748 wc
->reada_slot
= slot
;
5752 * hepler to process tree block while walking down the tree.
5754 * when wc->stage == UPDATE_BACKREF, this function updates
5755 * back refs for pointers in the block.
5757 * NOTE: return value 1 means we should stop walking down.
5759 static noinline
int walk_down_proc(struct btrfs_trans_handle
*trans
,
5760 struct btrfs_root
*root
,
5761 struct btrfs_path
*path
,
5762 struct walk_control
*wc
, int lookup_info
)
5764 int level
= wc
->level
;
5765 struct extent_buffer
*eb
= path
->nodes
[level
];
5766 u64 flag
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5769 if (wc
->stage
== UPDATE_BACKREF
&&
5770 btrfs_header_owner(eb
) != root
->root_key
.objectid
)
5774 * when reference count of tree block is 1, it won't increase
5775 * again. once full backref flag is set, we never clear it.
5778 ((wc
->stage
== DROP_REFERENCE
&& wc
->refs
[level
] != 1) ||
5779 (wc
->stage
== UPDATE_BACKREF
&& !(wc
->flags
[level
] & flag
)))) {
5780 BUG_ON(!path
->locks
[level
]);
5781 ret
= btrfs_lookup_extent_info(trans
, root
,
5786 BUG_ON(wc
->refs
[level
] == 0);
5789 if (wc
->stage
== DROP_REFERENCE
) {
5790 if (wc
->refs
[level
] > 1)
5793 if (path
->locks
[level
] && !wc
->keep_locks
) {
5794 btrfs_tree_unlock(eb
);
5795 path
->locks
[level
] = 0;
5800 /* wc->stage == UPDATE_BACKREF */
5801 if (!(wc
->flags
[level
] & flag
)) {
5802 BUG_ON(!path
->locks
[level
]);
5803 ret
= btrfs_inc_ref(trans
, root
, eb
, 1);
5805 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
5807 ret
= btrfs_set_disk_extent_flags(trans
, root
, eb
->start
,
5810 wc
->flags
[level
] |= flag
;
5814 * the block is shared by multiple trees, so it's not good to
5815 * keep the tree lock
5817 if (path
->locks
[level
] && level
> 0) {
5818 btrfs_tree_unlock(eb
);
5819 path
->locks
[level
] = 0;
5825 * hepler to process tree block pointer.
5827 * when wc->stage == DROP_REFERENCE, this function checks
5828 * reference count of the block pointed to. if the block
5829 * is shared and we need update back refs for the subtree
5830 * rooted at the block, this function changes wc->stage to
5831 * UPDATE_BACKREF. if the block is shared and there is no
5832 * need to update back, this function drops the reference
5835 * NOTE: return value 1 means we should stop walking down.
5837 static noinline
int do_walk_down(struct btrfs_trans_handle
*trans
,
5838 struct btrfs_root
*root
,
5839 struct btrfs_path
*path
,
5840 struct walk_control
*wc
, int *lookup_info
)
5846 struct btrfs_key key
;
5847 struct extent_buffer
*next
;
5848 int level
= wc
->level
;
5852 generation
= btrfs_node_ptr_generation(path
->nodes
[level
],
5853 path
->slots
[level
]);
5855 * if the lower level block was created before the snapshot
5856 * was created, we know there is no need to update back refs
5859 if (wc
->stage
== UPDATE_BACKREF
&&
5860 generation
<= root
->root_key
.offset
) {
5865 bytenr
= btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]);
5866 blocksize
= btrfs_level_size(root
, level
- 1);
5868 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
5870 next
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
5875 btrfs_tree_lock(next
);
5876 btrfs_set_lock_blocking(next
);
5878 ret
= btrfs_lookup_extent_info(trans
, root
, bytenr
, blocksize
,
5879 &wc
->refs
[level
- 1],
5880 &wc
->flags
[level
- 1]);
5882 BUG_ON(wc
->refs
[level
- 1] == 0);
5885 if (wc
->stage
== DROP_REFERENCE
) {
5886 if (wc
->refs
[level
- 1] > 1) {
5888 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5891 if (!wc
->update_ref
||
5892 generation
<= root
->root_key
.offset
)
5895 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
5896 path
->slots
[level
]);
5897 ret
= btrfs_comp_cpu_keys(&key
, &wc
->update_progress
);
5901 wc
->stage
= UPDATE_BACKREF
;
5902 wc
->shared_level
= level
- 1;
5906 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
5910 if (!btrfs_buffer_uptodate(next
, generation
)) {
5911 btrfs_tree_unlock(next
);
5912 free_extent_buffer(next
);
5918 if (reada
&& level
== 1)
5919 reada_walk_down(trans
, root
, wc
, path
);
5920 next
= read_tree_block(root
, bytenr
, blocksize
, generation
);
5921 btrfs_tree_lock(next
);
5922 btrfs_set_lock_blocking(next
);
5926 BUG_ON(level
!= btrfs_header_level(next
));
5927 path
->nodes
[level
] = next
;
5928 path
->slots
[level
] = 0;
5929 path
->locks
[level
] = 1;
5935 wc
->refs
[level
- 1] = 0;
5936 wc
->flags
[level
- 1] = 0;
5937 if (wc
->stage
== DROP_REFERENCE
) {
5938 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
5939 parent
= path
->nodes
[level
]->start
;
5941 BUG_ON(root
->root_key
.objectid
!=
5942 btrfs_header_owner(path
->nodes
[level
]));
5946 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
, parent
,
5947 root
->root_key
.objectid
, level
- 1, 0);
5950 btrfs_tree_unlock(next
);
5951 free_extent_buffer(next
);
5957 * hepler to process tree block while walking up the tree.
5959 * when wc->stage == DROP_REFERENCE, this function drops
5960 * reference count on the block.
5962 * when wc->stage == UPDATE_BACKREF, this function changes
5963 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5964 * to UPDATE_BACKREF previously while processing the block.
5966 * NOTE: return value 1 means we should stop walking up.
5968 static noinline
int walk_up_proc(struct btrfs_trans_handle
*trans
,
5969 struct btrfs_root
*root
,
5970 struct btrfs_path
*path
,
5971 struct walk_control
*wc
)
5974 int level
= wc
->level
;
5975 struct extent_buffer
*eb
= path
->nodes
[level
];
5978 if (wc
->stage
== UPDATE_BACKREF
) {
5979 BUG_ON(wc
->shared_level
< level
);
5980 if (level
< wc
->shared_level
)
5983 ret
= find_next_key(path
, level
+ 1, &wc
->update_progress
);
5987 wc
->stage
= DROP_REFERENCE
;
5988 wc
->shared_level
= -1;
5989 path
->slots
[level
] = 0;
5992 * check reference count again if the block isn't locked.
5993 * we should start walking down the tree again if reference
5996 if (!path
->locks
[level
]) {
5998 btrfs_tree_lock(eb
);
5999 btrfs_set_lock_blocking(eb
);
6000 path
->locks
[level
] = 1;
6002 ret
= btrfs_lookup_extent_info(trans
, root
,
6007 BUG_ON(wc
->refs
[level
] == 0);
6008 if (wc
->refs
[level
] == 1) {
6009 btrfs_tree_unlock(eb
);
6010 path
->locks
[level
] = 0;
6016 /* wc->stage == DROP_REFERENCE */
6017 BUG_ON(wc
->refs
[level
] > 1 && !path
->locks
[level
]);
6019 if (wc
->refs
[level
] == 1) {
6021 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
6022 ret
= btrfs_dec_ref(trans
, root
, eb
, 1);
6024 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
6027 /* make block locked assertion in clean_tree_block happy */
6028 if (!path
->locks
[level
] &&
6029 btrfs_header_generation(eb
) == trans
->transid
) {
6030 btrfs_tree_lock(eb
);
6031 btrfs_set_lock_blocking(eb
);
6032 path
->locks
[level
] = 1;
6034 clean_tree_block(trans
, root
, eb
);
6037 if (eb
== root
->node
) {
6038 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
6041 BUG_ON(root
->root_key
.objectid
!=
6042 btrfs_header_owner(eb
));
6044 if (wc
->flags
[level
+ 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
6045 parent
= path
->nodes
[level
+ 1]->start
;
6047 BUG_ON(root
->root_key
.objectid
!=
6048 btrfs_header_owner(path
->nodes
[level
+ 1]));
6051 btrfs_free_tree_block(trans
, root
, eb
, parent
, wc
->refs
[level
] == 1);
6053 wc
->refs
[level
] = 0;
6054 wc
->flags
[level
] = 0;
6058 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
6059 struct btrfs_root
*root
,
6060 struct btrfs_path
*path
,
6061 struct walk_control
*wc
)
6063 int level
= wc
->level
;
6064 int lookup_info
= 1;
6067 while (level
>= 0) {
6068 ret
= walk_down_proc(trans
, root
, path
, wc
, lookup_info
);
6075 if (path
->slots
[level
] >=
6076 btrfs_header_nritems(path
->nodes
[level
]))
6079 ret
= do_walk_down(trans
, root
, path
, wc
, &lookup_info
);
6081 path
->slots
[level
]++;
6090 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
6091 struct btrfs_root
*root
,
6092 struct btrfs_path
*path
,
6093 struct walk_control
*wc
, int max_level
)
6095 int level
= wc
->level
;
6098 path
->slots
[level
] = btrfs_header_nritems(path
->nodes
[level
]);
6099 while (level
< max_level
&& path
->nodes
[level
]) {
6101 if (path
->slots
[level
] + 1 <
6102 btrfs_header_nritems(path
->nodes
[level
])) {
6103 path
->slots
[level
]++;
6106 ret
= walk_up_proc(trans
, root
, path
, wc
);
6110 if (path
->locks
[level
]) {
6111 btrfs_tree_unlock(path
->nodes
[level
]);
6112 path
->locks
[level
] = 0;
6114 free_extent_buffer(path
->nodes
[level
]);
6115 path
->nodes
[level
] = NULL
;
6123 * drop a subvolume tree.
6125 * this function traverses the tree freeing any blocks that only
6126 * referenced by the tree.
6128 * when a shared tree block is found. this function decreases its
6129 * reference count by one. if update_ref is true, this function
6130 * also make sure backrefs for the shared block and all lower level
6131 * blocks are properly updated.
6133 int btrfs_drop_snapshot(struct btrfs_root
*root
,
6134 struct btrfs_block_rsv
*block_rsv
, int update_ref
)
6136 struct btrfs_path
*path
;
6137 struct btrfs_trans_handle
*trans
;
6138 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
6139 struct btrfs_root_item
*root_item
= &root
->root_item
;
6140 struct walk_control
*wc
;
6141 struct btrfs_key key
;
6146 path
= btrfs_alloc_path();
6149 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
6152 trans
= btrfs_start_transaction(tree_root
, 0);
6154 trans
->block_rsv
= block_rsv
;
6156 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
6157 level
= btrfs_header_level(root
->node
);
6158 path
->nodes
[level
] = btrfs_lock_root_node(root
);
6159 btrfs_set_lock_blocking(path
->nodes
[level
]);
6160 path
->slots
[level
] = 0;
6161 path
->locks
[level
] = 1;
6162 memset(&wc
->update_progress
, 0,
6163 sizeof(wc
->update_progress
));
6165 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
6166 memcpy(&wc
->update_progress
, &key
,
6167 sizeof(wc
->update_progress
));
6169 level
= root_item
->drop_level
;
6171 path
->lowest_level
= level
;
6172 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
6173 path
->lowest_level
= 0;
6181 * unlock our path, this is safe because only this
6182 * function is allowed to delete this snapshot
6184 btrfs_unlock_up_safe(path
, 0);
6186 level
= btrfs_header_level(root
->node
);
6188 btrfs_tree_lock(path
->nodes
[level
]);
6189 btrfs_set_lock_blocking(path
->nodes
[level
]);
6191 ret
= btrfs_lookup_extent_info(trans
, root
,
6192 path
->nodes
[level
]->start
,
6193 path
->nodes
[level
]->len
,
6197 BUG_ON(wc
->refs
[level
] == 0);
6199 if (level
== root_item
->drop_level
)
6202 btrfs_tree_unlock(path
->nodes
[level
]);
6203 WARN_ON(wc
->refs
[level
] != 1);
6209 wc
->shared_level
= -1;
6210 wc
->stage
= DROP_REFERENCE
;
6211 wc
->update_ref
= update_ref
;
6213 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(root
);
6216 ret
= walk_down_tree(trans
, root
, path
, wc
);
6222 ret
= walk_up_tree(trans
, root
, path
, wc
, BTRFS_MAX_LEVEL
);
6229 BUG_ON(wc
->stage
!= DROP_REFERENCE
);
6233 if (wc
->stage
== DROP_REFERENCE
) {
6235 btrfs_node_key(path
->nodes
[level
],
6236 &root_item
->drop_progress
,
6237 path
->slots
[level
]);
6238 root_item
->drop_level
= level
;
6241 BUG_ON(wc
->level
== 0);
6242 if (btrfs_should_end_transaction(trans
, tree_root
)) {
6243 ret
= btrfs_update_root(trans
, tree_root
,
6248 btrfs_end_transaction_throttle(trans
, tree_root
);
6249 trans
= btrfs_start_transaction(tree_root
, 0);
6251 trans
->block_rsv
= block_rsv
;
6254 btrfs_release_path(root
, path
);
6257 ret
= btrfs_del_root(trans
, tree_root
, &root
->root_key
);
6260 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
6261 ret
= btrfs_find_last_root(tree_root
, root
->root_key
.objectid
,
6265 ret
= btrfs_del_orphan_item(trans
, tree_root
,
6266 root
->root_key
.objectid
);
6271 if (root
->in_radix
) {
6272 btrfs_free_fs_root(tree_root
->fs_info
, root
);
6274 free_extent_buffer(root
->node
);
6275 free_extent_buffer(root
->commit_root
);
6279 btrfs_end_transaction_throttle(trans
, tree_root
);
6281 btrfs_free_path(path
);
6286 * drop subtree rooted at tree block 'node'.
6288 * NOTE: this function will unlock and release tree block 'node'
6290 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
6291 struct btrfs_root
*root
,
6292 struct extent_buffer
*node
,
6293 struct extent_buffer
*parent
)
6295 struct btrfs_path
*path
;
6296 struct walk_control
*wc
;
6302 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
6304 path
= btrfs_alloc_path();
6307 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
6310 btrfs_assert_tree_locked(parent
);
6311 parent_level
= btrfs_header_level(parent
);
6312 extent_buffer_get(parent
);
6313 path
->nodes
[parent_level
] = parent
;
6314 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
6316 btrfs_assert_tree_locked(node
);
6317 level
= btrfs_header_level(node
);
6318 path
->nodes
[level
] = node
;
6319 path
->slots
[level
] = 0;
6320 path
->locks
[level
] = 1;
6322 wc
->refs
[parent_level
] = 1;
6323 wc
->flags
[parent_level
] = BTRFS_BLOCK_FLAG_FULL_BACKREF
;
6325 wc
->shared_level
= -1;
6326 wc
->stage
= DROP_REFERENCE
;
6329 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(root
);
6332 wret
= walk_down_tree(trans
, root
, path
, wc
);
6338 wret
= walk_up_tree(trans
, root
, path
, wc
, parent_level
);
6346 btrfs_free_path(path
);
6351 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
6354 return min(last
, start
+ nr
- 1);
6357 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
6362 unsigned long first_index
;
6363 unsigned long last_index
;
6366 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
6367 struct file_ra_state
*ra
;
6368 struct btrfs_ordered_extent
*ordered
;
6369 unsigned int total_read
= 0;
6370 unsigned int total_dirty
= 0;
6373 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
6375 mutex_lock(&inode
->i_mutex
);
6376 first_index
= start
>> PAGE_CACHE_SHIFT
;
6377 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
6379 /* make sure the dirty trick played by the caller work */
6380 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
6381 first_index
, last_index
);
6385 file_ra_state_init(ra
, inode
->i_mapping
);
6387 for (i
= first_index
; i
<= last_index
; i
++) {
6388 if (total_read
% ra
->ra_pages
== 0) {
6389 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
6390 calc_ra(i
, last_index
, ra
->ra_pages
));
6394 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
6396 page
= grab_cache_page(inode
->i_mapping
, i
);
6401 if (!PageUptodate(page
)) {
6402 btrfs_readpage(NULL
, page
);
6404 if (!PageUptodate(page
)) {
6406 page_cache_release(page
);
6411 wait_on_page_writeback(page
);
6413 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
6414 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
6415 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
6417 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
6419 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
6421 page_cache_release(page
);
6422 btrfs_start_ordered_extent(inode
, ordered
, 1);
6423 btrfs_put_ordered_extent(ordered
);
6426 set_page_extent_mapped(page
);
6428 if (i
== first_index
)
6429 set_extent_bits(io_tree
, page_start
, page_end
,
6430 EXTENT_BOUNDARY
, GFP_NOFS
);
6431 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
6433 set_page_dirty(page
);
6436 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
6438 page_cache_release(page
);
6443 mutex_unlock(&inode
->i_mutex
);
6444 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
6448 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
6449 struct btrfs_key
*extent_key
,
6452 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
6453 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
6454 struct extent_map
*em
;
6455 u64 start
= extent_key
->objectid
- offset
;
6456 u64 end
= start
+ extent_key
->offset
- 1;
6458 em
= alloc_extent_map(GFP_NOFS
);
6459 BUG_ON(!em
|| IS_ERR(em
));
6462 em
->len
= extent_key
->offset
;
6463 em
->block_len
= extent_key
->offset
;
6464 em
->block_start
= extent_key
->objectid
;
6465 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
6466 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
6468 /* setup extent map to cheat btrfs_readpage */
6469 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
6472 write_lock(&em_tree
->lock
);
6473 ret
= add_extent_mapping(em_tree
, em
);
6474 write_unlock(&em_tree
->lock
);
6475 if (ret
!= -EEXIST
) {
6476 free_extent_map(em
);
6479 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
6481 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
6483 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
6486 struct btrfs_ref_path
{
6488 u64 nodes
[BTRFS_MAX_LEVEL
];
6490 u64 root_generation
;
6497 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
6498 u64 new_nodes
[BTRFS_MAX_LEVEL
];
6501 struct disk_extent
{
6512 static int is_cowonly_root(u64 root_objectid
)
6514 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
6515 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
6516 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
6517 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
6518 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
6519 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
6524 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
6525 struct btrfs_root
*extent_root
,
6526 struct btrfs_ref_path
*ref_path
,
6529 struct extent_buffer
*leaf
;
6530 struct btrfs_path
*path
;
6531 struct btrfs_extent_ref
*ref
;
6532 struct btrfs_key key
;
6533 struct btrfs_key found_key
;
6539 path
= btrfs_alloc_path();
6544 ref_path
->lowest_level
= -1;
6545 ref_path
->current_level
= -1;
6546 ref_path
->shared_level
= -1;
6550 level
= ref_path
->current_level
- 1;
6551 while (level
>= -1) {
6553 if (level
< ref_path
->lowest_level
)
6557 bytenr
= ref_path
->nodes
[level
];
6559 bytenr
= ref_path
->extent_start
;
6560 BUG_ON(bytenr
== 0);
6562 parent
= ref_path
->nodes
[level
+ 1];
6563 ref_path
->nodes
[level
+ 1] = 0;
6564 ref_path
->current_level
= level
;
6565 BUG_ON(parent
== 0);
6567 key
.objectid
= bytenr
;
6568 key
.offset
= parent
+ 1;
6569 key
.type
= BTRFS_EXTENT_REF_KEY
;
6571 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
6576 leaf
= path
->nodes
[0];
6577 nritems
= btrfs_header_nritems(leaf
);
6578 if (path
->slots
[0] >= nritems
) {
6579 ret
= btrfs_next_leaf(extent_root
, path
);
6584 leaf
= path
->nodes
[0];
6587 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6588 if (found_key
.objectid
== bytenr
&&
6589 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
6590 if (level
< ref_path
->shared_level
)
6591 ref_path
->shared_level
= level
;
6596 btrfs_release_path(extent_root
, path
);
6599 /* reached lowest level */
6603 level
= ref_path
->current_level
;
6604 while (level
< BTRFS_MAX_LEVEL
- 1) {
6608 bytenr
= ref_path
->nodes
[level
];
6610 bytenr
= ref_path
->extent_start
;
6612 BUG_ON(bytenr
== 0);
6614 key
.objectid
= bytenr
;
6616 key
.type
= BTRFS_EXTENT_REF_KEY
;
6618 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
6622 leaf
= path
->nodes
[0];
6623 nritems
= btrfs_header_nritems(leaf
);
6624 if (path
->slots
[0] >= nritems
) {
6625 ret
= btrfs_next_leaf(extent_root
, path
);
6629 /* the extent was freed by someone */
6630 if (ref_path
->lowest_level
== level
)
6632 btrfs_release_path(extent_root
, path
);
6635 leaf
= path
->nodes
[0];
6638 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6639 if (found_key
.objectid
!= bytenr
||
6640 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
6641 /* the extent was freed by someone */
6642 if (ref_path
->lowest_level
== level
) {
6646 btrfs_release_path(extent_root
, path
);
6650 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
6651 struct btrfs_extent_ref
);
6652 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
6653 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
6655 level
= (int)ref_objectid
;
6656 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
6657 ref_path
->lowest_level
= level
;
6658 ref_path
->current_level
= level
;
6659 ref_path
->nodes
[level
] = bytenr
;
6661 WARN_ON(ref_objectid
!= level
);
6664 WARN_ON(level
!= -1);
6668 if (ref_path
->lowest_level
== level
) {
6669 ref_path
->owner_objectid
= ref_objectid
;
6670 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
6674 * the block is tree root or the block isn't in reference
6677 if (found_key
.objectid
== found_key
.offset
||
6678 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
6679 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
6680 ref_path
->root_generation
=
6681 btrfs_ref_generation(leaf
, ref
);
6683 /* special reference from the tree log */
6684 ref_path
->nodes
[0] = found_key
.offset
;
6685 ref_path
->current_level
= 0;
6692 BUG_ON(ref_path
->nodes
[level
] != 0);
6693 ref_path
->nodes
[level
] = found_key
.offset
;
6694 ref_path
->current_level
= level
;
6697 * the reference was created in the running transaction,
6698 * no need to continue walking up.
6700 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
6701 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
6702 ref_path
->root_generation
=
6703 btrfs_ref_generation(leaf
, ref
);
6708 btrfs_release_path(extent_root
, path
);
6711 /* reached max tree level, but no tree root found. */
6714 btrfs_free_path(path
);
6718 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
6719 struct btrfs_root
*extent_root
,
6720 struct btrfs_ref_path
*ref_path
,
6723 memset(ref_path
, 0, sizeof(*ref_path
));
6724 ref_path
->extent_start
= extent_start
;
6726 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
6729 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
6730 struct btrfs_root
*extent_root
,
6731 struct btrfs_ref_path
*ref_path
)
6733 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
6736 static noinline
int get_new_locations(struct inode
*reloc_inode
,
6737 struct btrfs_key
*extent_key
,
6738 u64 offset
, int no_fragment
,
6739 struct disk_extent
**extents
,
6742 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
6743 struct btrfs_path
*path
;
6744 struct btrfs_file_extent_item
*fi
;
6745 struct extent_buffer
*leaf
;
6746 struct disk_extent
*exts
= *extents
;
6747 struct btrfs_key found_key
;
6752 int max
= *nr_extents
;
6755 WARN_ON(!no_fragment
&& *extents
);
6758 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
6763 path
= btrfs_alloc_path();
6766 cur_pos
= extent_key
->objectid
- offset
;
6767 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
6768 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
6778 leaf
= path
->nodes
[0];
6779 nritems
= btrfs_header_nritems(leaf
);
6780 if (path
->slots
[0] >= nritems
) {
6781 ret
= btrfs_next_leaf(root
, path
);
6786 leaf
= path
->nodes
[0];
6789 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6790 if (found_key
.offset
!= cur_pos
||
6791 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
6792 found_key
.objectid
!= reloc_inode
->i_ino
)
6795 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6796 struct btrfs_file_extent_item
);
6797 if (btrfs_file_extent_type(leaf
, fi
) !=
6798 BTRFS_FILE_EXTENT_REG
||
6799 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
6803 struct disk_extent
*old
= exts
;
6805 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
6806 memcpy(exts
, old
, sizeof(*exts
) * nr
);
6807 if (old
!= *extents
)
6811 exts
[nr
].disk_bytenr
=
6812 btrfs_file_extent_disk_bytenr(leaf
, fi
);
6813 exts
[nr
].disk_num_bytes
=
6814 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
6815 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
6816 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6817 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
6818 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
6819 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
6820 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
6822 BUG_ON(exts
[nr
].offset
> 0);
6823 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
6824 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
6826 cur_pos
+= exts
[nr
].num_bytes
;
6829 if (cur_pos
+ offset
>= last_byte
)
6839 BUG_ON(cur_pos
+ offset
> last_byte
);
6840 if (cur_pos
+ offset
< last_byte
) {
6846 btrfs_free_path(path
);
6848 if (exts
!= *extents
)
6857 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
6858 struct btrfs_root
*root
,
6859 struct btrfs_path
*path
,
6860 struct btrfs_key
*extent_key
,
6861 struct btrfs_key
*leaf_key
,
6862 struct btrfs_ref_path
*ref_path
,
6863 struct disk_extent
*new_extents
,
6866 struct extent_buffer
*leaf
;
6867 struct btrfs_file_extent_item
*fi
;
6868 struct inode
*inode
= NULL
;
6869 struct btrfs_key key
;
6874 u64 search_end
= (u64
)-1;
6877 int extent_locked
= 0;
6881 memcpy(&key
, leaf_key
, sizeof(key
));
6882 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
6883 if (key
.objectid
< ref_path
->owner_objectid
||
6884 (key
.objectid
== ref_path
->owner_objectid
&&
6885 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
6886 key
.objectid
= ref_path
->owner_objectid
;
6887 key
.type
= BTRFS_EXTENT_DATA_KEY
;
6893 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
6897 leaf
= path
->nodes
[0];
6898 nritems
= btrfs_header_nritems(leaf
);
6900 if (extent_locked
&& ret
> 0) {
6902 * the file extent item was modified by someone
6903 * before the extent got locked.
6905 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6906 lock_end
, GFP_NOFS
);
6910 if (path
->slots
[0] >= nritems
) {
6911 if (++nr_scaned
> 2)
6914 BUG_ON(extent_locked
);
6915 ret
= btrfs_next_leaf(root
, path
);
6920 leaf
= path
->nodes
[0];
6921 nritems
= btrfs_header_nritems(leaf
);
6924 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
6926 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
6927 if ((key
.objectid
> ref_path
->owner_objectid
) ||
6928 (key
.objectid
== ref_path
->owner_objectid
&&
6929 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
6930 key
.offset
>= search_end
)
6934 if (inode
&& key
.objectid
!= inode
->i_ino
) {
6935 BUG_ON(extent_locked
);
6936 btrfs_release_path(root
, path
);
6937 mutex_unlock(&inode
->i_mutex
);
6943 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6948 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6949 struct btrfs_file_extent_item
);
6950 extent_type
= btrfs_file_extent_type(leaf
, fi
);
6951 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
6952 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
6953 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
6954 extent_key
->objectid
)) {
6960 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6961 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
6963 if (search_end
== (u64
)-1) {
6964 search_end
= key
.offset
- ext_offset
+
6965 btrfs_file_extent_ram_bytes(leaf
, fi
);
6968 if (!extent_locked
) {
6969 lock_start
= key
.offset
;
6970 lock_end
= lock_start
+ num_bytes
- 1;
6972 if (lock_start
> key
.offset
||
6973 lock_end
+ 1 < key
.offset
+ num_bytes
) {
6974 unlock_extent(&BTRFS_I(inode
)->io_tree
,
6975 lock_start
, lock_end
, GFP_NOFS
);
6981 btrfs_release_path(root
, path
);
6983 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
6984 key
.objectid
, root
);
6985 if (inode
->i_state
& I_NEW
) {
6986 BTRFS_I(inode
)->root
= root
;
6987 BTRFS_I(inode
)->location
.objectid
=
6989 BTRFS_I(inode
)->location
.type
=
6990 BTRFS_INODE_ITEM_KEY
;
6991 BTRFS_I(inode
)->location
.offset
= 0;
6992 btrfs_read_locked_inode(inode
);
6993 unlock_new_inode(inode
);
6996 * some code call btrfs_commit_transaction while
6997 * holding the i_mutex, so we can't use mutex_lock
7000 if (is_bad_inode(inode
) ||
7001 !mutex_trylock(&inode
->i_mutex
)) {
7004 key
.offset
= (u64
)-1;
7009 if (!extent_locked
) {
7010 struct btrfs_ordered_extent
*ordered
;
7012 btrfs_release_path(root
, path
);
7014 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
7015 lock_end
, GFP_NOFS
);
7016 ordered
= btrfs_lookup_first_ordered_extent(inode
,
7019 ordered
->file_offset
<= lock_end
&&
7020 ordered
->file_offset
+ ordered
->len
> lock_start
) {
7021 unlock_extent(&BTRFS_I(inode
)->io_tree
,
7022 lock_start
, lock_end
, GFP_NOFS
);
7023 btrfs_start_ordered_extent(inode
, ordered
, 1);
7024 btrfs_put_ordered_extent(ordered
);
7025 key
.offset
+= num_bytes
;
7029 btrfs_put_ordered_extent(ordered
);
7035 if (nr_extents
== 1) {
7036 /* update extent pointer in place */
7037 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
7038 new_extents
[0].disk_bytenr
);
7039 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
7040 new_extents
[0].disk_num_bytes
);
7041 btrfs_mark_buffer_dirty(leaf
);
7043 btrfs_drop_extent_cache(inode
, key
.offset
,
7044 key
.offset
+ num_bytes
- 1, 0);
7046 ret
= btrfs_inc_extent_ref(trans
, root
,
7047 new_extents
[0].disk_bytenr
,
7048 new_extents
[0].disk_num_bytes
,
7050 root
->root_key
.objectid
,
7055 ret
= btrfs_free_extent(trans
, root
,
7056 extent_key
->objectid
,
7059 btrfs_header_owner(leaf
),
7060 btrfs_header_generation(leaf
),
7064 btrfs_release_path(root
, path
);
7065 key
.offset
+= num_bytes
;
7073 * drop old extent pointer at first, then insert the
7074 * new pointers one bye one
7076 btrfs_release_path(root
, path
);
7077 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
7078 key
.offset
+ num_bytes
,
7079 key
.offset
, &alloc_hint
);
7082 for (i
= 0; i
< nr_extents
; i
++) {
7083 if (ext_offset
>= new_extents
[i
].num_bytes
) {
7084 ext_offset
-= new_extents
[i
].num_bytes
;
7087 extent_len
= min(new_extents
[i
].num_bytes
-
7088 ext_offset
, num_bytes
);
7090 ret
= btrfs_insert_empty_item(trans
, root
,
7095 leaf
= path
->nodes
[0];
7096 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
7097 struct btrfs_file_extent_item
);
7098 btrfs_set_file_extent_generation(leaf
, fi
,
7100 btrfs_set_file_extent_type(leaf
, fi
,
7101 BTRFS_FILE_EXTENT_REG
);
7102 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
7103 new_extents
[i
].disk_bytenr
);
7104 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
7105 new_extents
[i
].disk_num_bytes
);
7106 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
7107 new_extents
[i
].ram_bytes
);
7109 btrfs_set_file_extent_compression(leaf
, fi
,
7110 new_extents
[i
].compression
);
7111 btrfs_set_file_extent_encryption(leaf
, fi
,
7112 new_extents
[i
].encryption
);
7113 btrfs_set_file_extent_other_encoding(leaf
, fi
,
7114 new_extents
[i
].other_encoding
);
7116 btrfs_set_file_extent_num_bytes(leaf
, fi
,
7118 ext_offset
+= new_extents
[i
].offset
;
7119 btrfs_set_file_extent_offset(leaf
, fi
,
7121 btrfs_mark_buffer_dirty(leaf
);
7123 btrfs_drop_extent_cache(inode
, key
.offset
,
7124 key
.offset
+ extent_len
- 1, 0);
7126 ret
= btrfs_inc_extent_ref(trans
, root
,
7127 new_extents
[i
].disk_bytenr
,
7128 new_extents
[i
].disk_num_bytes
,
7130 root
->root_key
.objectid
,
7131 trans
->transid
, key
.objectid
);
7133 btrfs_release_path(root
, path
);
7135 inode_add_bytes(inode
, extent_len
);
7138 num_bytes
-= extent_len
;
7139 key
.offset
+= extent_len
;
7144 BUG_ON(i
>= nr_extents
);
7148 if (extent_locked
) {
7149 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
7150 lock_end
, GFP_NOFS
);
7154 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
7155 key
.offset
>= search_end
)
7162 btrfs_release_path(root
, path
);
7164 mutex_unlock(&inode
->i_mutex
);
7165 if (extent_locked
) {
7166 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
7167 lock_end
, GFP_NOFS
);
7174 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
7175 struct btrfs_root
*root
,
7176 struct extent_buffer
*buf
, u64 orig_start
)
7181 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
7182 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
7184 level
= btrfs_header_level(buf
);
7186 struct btrfs_leaf_ref
*ref
;
7187 struct btrfs_leaf_ref
*orig_ref
;
7189 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
7193 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
7195 btrfs_free_leaf_ref(root
, orig_ref
);
7199 ref
->nritems
= orig_ref
->nritems
;
7200 memcpy(ref
->extents
, orig_ref
->extents
,
7201 sizeof(ref
->extents
[0]) * ref
->nritems
);
7203 btrfs_free_leaf_ref(root
, orig_ref
);
7205 ref
->root_gen
= trans
->transid
;
7206 ref
->bytenr
= buf
->start
;
7207 ref
->owner
= btrfs_header_owner(buf
);
7208 ref
->generation
= btrfs_header_generation(buf
);
7210 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
7212 btrfs_free_leaf_ref(root
, ref
);
7217 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
7218 struct extent_buffer
*leaf
,
7219 struct btrfs_block_group_cache
*group
,
7220 struct btrfs_root
*target_root
)
7222 struct btrfs_key key
;
7223 struct inode
*inode
= NULL
;
7224 struct btrfs_file_extent_item
*fi
;
7225 struct extent_state
*cached_state
= NULL
;
7227 u64 skip_objectid
= 0;
7231 nritems
= btrfs_header_nritems(leaf
);
7232 for (i
= 0; i
< nritems
; i
++) {
7233 btrfs_item_key_to_cpu(leaf
, &key
, i
);
7234 if (key
.objectid
== skip_objectid
||
7235 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
7237 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
7238 if (btrfs_file_extent_type(leaf
, fi
) ==
7239 BTRFS_FILE_EXTENT_INLINE
)
7241 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
7243 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
7245 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
7246 key
.objectid
, target_root
, 1);
7249 skip_objectid
= key
.objectid
;
7252 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
7254 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, key
.offset
,
7255 key
.offset
+ num_bytes
- 1, 0, &cached_state
,
7257 btrfs_drop_extent_cache(inode
, key
.offset
,
7258 key
.offset
+ num_bytes
- 1, 1);
7259 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, key
.offset
,
7260 key
.offset
+ num_bytes
- 1, &cached_state
,
7268 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
7269 struct btrfs_root
*root
,
7270 struct extent_buffer
*leaf
,
7271 struct btrfs_block_group_cache
*group
,
7272 struct inode
*reloc_inode
)
7274 struct btrfs_key key
;
7275 struct btrfs_key extent_key
;
7276 struct btrfs_file_extent_item
*fi
;
7277 struct btrfs_leaf_ref
*ref
;
7278 struct disk_extent
*new_extent
;
7287 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
7288 BUG_ON(!new_extent
);
7290 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
7294 nritems
= btrfs_header_nritems(leaf
);
7295 for (i
= 0; i
< nritems
; i
++) {
7296 btrfs_item_key_to_cpu(leaf
, &key
, i
);
7297 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
7299 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
7300 if (btrfs_file_extent_type(leaf
, fi
) ==
7301 BTRFS_FILE_EXTENT_INLINE
)
7303 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
7304 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
7309 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
7310 bytenr
+ num_bytes
<= group
->key
.objectid
)
7313 extent_key
.objectid
= bytenr
;
7314 extent_key
.offset
= num_bytes
;
7315 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
7317 ret
= get_new_locations(reloc_inode
, &extent_key
,
7318 group
->key
.objectid
, 1,
7319 &new_extent
, &nr_extent
);
7324 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
7325 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
7326 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
7327 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
7329 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
7330 new_extent
->disk_bytenr
);
7331 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
7332 new_extent
->disk_num_bytes
);
7333 btrfs_mark_buffer_dirty(leaf
);
7335 ret
= btrfs_inc_extent_ref(trans
, root
,
7336 new_extent
->disk_bytenr
,
7337 new_extent
->disk_num_bytes
,
7339 root
->root_key
.objectid
,
7340 trans
->transid
, key
.objectid
);
7343 ret
= btrfs_free_extent(trans
, root
,
7344 bytenr
, num_bytes
, leaf
->start
,
7345 btrfs_header_owner(leaf
),
7346 btrfs_header_generation(leaf
),
7352 BUG_ON(ext_index
+ 1 != ref
->nritems
);
7353 btrfs_free_leaf_ref(root
, ref
);
7357 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
7358 struct btrfs_root
*root
)
7360 struct btrfs_root
*reloc_root
;
7363 if (root
->reloc_root
) {
7364 reloc_root
= root
->reloc_root
;
7365 root
->reloc_root
= NULL
;
7366 list_add(&reloc_root
->dead_list
,
7367 &root
->fs_info
->dead_reloc_roots
);
7369 btrfs_set_root_bytenr(&reloc_root
->root_item
,
7370 reloc_root
->node
->start
);
7371 btrfs_set_root_level(&root
->root_item
,
7372 btrfs_header_level(reloc_root
->node
));
7373 memset(&reloc_root
->root_item
.drop_progress
, 0,
7374 sizeof(struct btrfs_disk_key
));
7375 reloc_root
->root_item
.drop_level
= 0;
7377 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
7378 &reloc_root
->root_key
,
7379 &reloc_root
->root_item
);
7385 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
7387 struct btrfs_trans_handle
*trans
;
7388 struct btrfs_root
*reloc_root
;
7389 struct btrfs_root
*prev_root
= NULL
;
7390 struct list_head dead_roots
;
7394 INIT_LIST_HEAD(&dead_roots
);
7395 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
7397 while (!list_empty(&dead_roots
)) {
7398 reloc_root
= list_entry(dead_roots
.prev
,
7399 struct btrfs_root
, dead_list
);
7400 list_del_init(&reloc_root
->dead_list
);
7402 BUG_ON(reloc_root
->commit_root
!= NULL
);
7404 trans
= btrfs_join_transaction(root
, 1);
7407 mutex_lock(&root
->fs_info
->drop_mutex
);
7408 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
7411 mutex_unlock(&root
->fs_info
->drop_mutex
);
7413 nr
= trans
->blocks_used
;
7414 ret
= btrfs_end_transaction(trans
, root
);
7416 btrfs_btree_balance_dirty(root
, nr
);
7419 free_extent_buffer(reloc_root
->node
);
7421 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
7422 &reloc_root
->root_key
);
7424 mutex_unlock(&root
->fs_info
->drop_mutex
);
7426 nr
= trans
->blocks_used
;
7427 ret
= btrfs_end_transaction(trans
, root
);
7429 btrfs_btree_balance_dirty(root
, nr
);
7432 prev_root
= reloc_root
;
7435 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
7441 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
7443 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
7447 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
7449 struct btrfs_root
*reloc_root
;
7450 struct btrfs_trans_handle
*trans
;
7451 struct btrfs_key location
;
7455 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
7456 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
7458 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
7459 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
7462 trans
= btrfs_start_transaction(root
, 1);
7464 ret
= btrfs_commit_transaction(trans
, root
);
7468 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
7469 location
.offset
= (u64
)-1;
7470 location
.type
= BTRFS_ROOT_ITEM_KEY
;
7472 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
7473 BUG_ON(!reloc_root
);
7474 btrfs_orphan_cleanup(reloc_root
);
7478 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
7479 struct btrfs_root
*root
)
7481 struct btrfs_root
*reloc_root
;
7482 struct extent_buffer
*eb
;
7483 struct btrfs_root_item
*root_item
;
7484 struct btrfs_key root_key
;
7487 BUG_ON(!root
->ref_cows
);
7488 if (root
->reloc_root
)
7491 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
7494 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
7495 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
7498 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
7499 root_key
.offset
= root
->root_key
.objectid
;
7500 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
7502 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
7503 btrfs_set_root_refs(root_item
, 0);
7504 btrfs_set_root_bytenr(root_item
, eb
->start
);
7505 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
7506 btrfs_set_root_generation(root_item
, trans
->transid
);
7508 btrfs_tree_unlock(eb
);
7509 free_extent_buffer(eb
);
7511 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
7512 &root_key
, root_item
);
7516 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
7518 BUG_ON(!reloc_root
);
7519 reloc_root
->last_trans
= trans
->transid
;
7520 reloc_root
->commit_root
= NULL
;
7521 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
7523 root
->reloc_root
= reloc_root
;
7528 * Core function of space balance.
7530 * The idea is using reloc trees to relocate tree blocks in reference
7531 * counted roots. There is one reloc tree for each subvol, and all
7532 * reloc trees share same root key objectid. Reloc trees are snapshots
7533 * of the latest committed roots of subvols (root->commit_root).
7535 * To relocate a tree block referenced by a subvol, there are two steps.
7536 * COW the block through subvol's reloc tree, then update block pointer
7537 * in the subvol to point to the new block. Since all reloc trees share
7538 * same root key objectid, doing special handing for tree blocks owned
7539 * by them is easy. Once a tree block has been COWed in one reloc tree,
7540 * we can use the resulting new block directly when the same block is
7541 * required to COW again through other reloc trees. By this way, relocated
7542 * tree blocks are shared between reloc trees, so they are also shared
7545 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
7546 struct btrfs_root
*root
,
7547 struct btrfs_path
*path
,
7548 struct btrfs_key
*first_key
,
7549 struct btrfs_ref_path
*ref_path
,
7550 struct btrfs_block_group_cache
*group
,
7551 struct inode
*reloc_inode
)
7553 struct btrfs_root
*reloc_root
;
7554 struct extent_buffer
*eb
= NULL
;
7555 struct btrfs_key
*keys
;
7559 int lowest_level
= 0;
7562 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
7563 lowest_level
= ref_path
->owner_objectid
;
7565 if (!root
->ref_cows
) {
7566 path
->lowest_level
= lowest_level
;
7567 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
7569 path
->lowest_level
= 0;
7570 btrfs_release_path(root
, path
);
7574 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
7575 ret
= init_reloc_tree(trans
, root
);
7577 reloc_root
= root
->reloc_root
;
7579 shared_level
= ref_path
->shared_level
;
7580 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
7582 keys
= ref_path
->node_keys
;
7583 nodes
= ref_path
->new_nodes
;
7584 memset(&keys
[shared_level
+ 1], 0,
7585 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
7586 memset(&nodes
[shared_level
+ 1], 0,
7587 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
7589 if (nodes
[lowest_level
] == 0) {
7590 path
->lowest_level
= lowest_level
;
7591 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
7594 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
7595 eb
= path
->nodes
[level
];
7596 if (!eb
|| eb
== reloc_root
->node
)
7598 nodes
[level
] = eb
->start
;
7600 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
7602 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
7605 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7606 eb
= path
->nodes
[0];
7607 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
7608 group
, reloc_inode
);
7611 btrfs_release_path(reloc_root
, path
);
7613 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
7619 * replace tree blocks in the fs tree with tree blocks in
7622 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
7625 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7626 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
7629 extent_buffer_get(path
->nodes
[0]);
7630 eb
= path
->nodes
[0];
7631 btrfs_release_path(reloc_root
, path
);
7632 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
7634 free_extent_buffer(eb
);
7637 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
7638 path
->lowest_level
= 0;
7642 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
7643 struct btrfs_root
*root
,
7644 struct btrfs_path
*path
,
7645 struct btrfs_key
*first_key
,
7646 struct btrfs_ref_path
*ref_path
)
7650 ret
= relocate_one_path(trans
, root
, path
, first_key
,
7651 ref_path
, NULL
, NULL
);
7657 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
7658 struct btrfs_root
*extent_root
,
7659 struct btrfs_path
*path
,
7660 struct btrfs_key
*extent_key
)
7664 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
7667 ret
= btrfs_del_item(trans
, extent_root
, path
);
7669 btrfs_release_path(extent_root
, path
);
7673 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
7674 struct btrfs_ref_path
*ref_path
)
7676 struct btrfs_key root_key
;
7678 root_key
.objectid
= ref_path
->root_objectid
;
7679 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
7680 if (is_cowonly_root(ref_path
->root_objectid
))
7681 root_key
.offset
= 0;
7683 root_key
.offset
= (u64
)-1;
7685 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
7688 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
7689 struct btrfs_path
*path
,
7690 struct btrfs_key
*extent_key
,
7691 struct btrfs_block_group_cache
*group
,
7692 struct inode
*reloc_inode
, int pass
)
7694 struct btrfs_trans_handle
*trans
;
7695 struct btrfs_root
*found_root
;
7696 struct btrfs_ref_path
*ref_path
= NULL
;
7697 struct disk_extent
*new_extents
= NULL
;
7702 struct btrfs_key first_key
;
7706 trans
= btrfs_start_transaction(extent_root
, 1);
7709 if (extent_key
->objectid
== 0) {
7710 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
7714 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
7720 for (loops
= 0; ; loops
++) {
7722 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
7723 extent_key
->objectid
);
7725 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
7732 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
7733 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
7736 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
7737 BUG_ON(!found_root
);
7739 * for reference counted tree, only process reference paths
7740 * rooted at the latest committed root.
7742 if (found_root
->ref_cows
&&
7743 ref_path
->root_generation
!= found_root
->root_key
.offset
)
7746 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7749 * copy data extents to new locations
7751 u64 group_start
= group
->key
.objectid
;
7752 ret
= relocate_data_extent(reloc_inode
,
7761 level
= ref_path
->owner_objectid
;
7764 if (prev_block
!= ref_path
->nodes
[level
]) {
7765 struct extent_buffer
*eb
;
7766 u64 block_start
= ref_path
->nodes
[level
];
7767 u64 block_size
= btrfs_level_size(found_root
, level
);
7769 eb
= read_tree_block(found_root
, block_start
,
7771 btrfs_tree_lock(eb
);
7772 BUG_ON(level
!= btrfs_header_level(eb
));
7775 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
7777 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
7779 btrfs_tree_unlock(eb
);
7780 free_extent_buffer(eb
);
7781 prev_block
= block_start
;
7784 mutex_lock(&extent_root
->fs_info
->trans_mutex
);
7785 btrfs_record_root_in_trans(found_root
);
7786 mutex_unlock(&extent_root
->fs_info
->trans_mutex
);
7787 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
7789 * try to update data extent references while
7790 * keeping metadata shared between snapshots.
7793 ret
= relocate_one_path(trans
, found_root
,
7794 path
, &first_key
, ref_path
,
7795 group
, reloc_inode
);
7801 * use fallback method to process the remaining
7805 u64 group_start
= group
->key
.objectid
;
7806 new_extents
= kmalloc(sizeof(*new_extents
),
7809 ret
= get_new_locations(reloc_inode
,
7817 ret
= replace_one_extent(trans
, found_root
,
7819 &first_key
, ref_path
,
7820 new_extents
, nr_extents
);
7822 ret
= relocate_tree_block(trans
, found_root
, path
,
7823 &first_key
, ref_path
);
7830 btrfs_end_transaction(trans
, extent_root
);
7837 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
7840 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
7841 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
7843 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
7844 if (num_devices
== 1) {
7845 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
7846 stripped
= flags
& ~stripped
;
7848 /* turn raid0 into single device chunks */
7849 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
7852 /* turn mirroring into duplication */
7853 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
7854 BTRFS_BLOCK_GROUP_RAID10
))
7855 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
7858 /* they already had raid on here, just return */
7859 if (flags
& stripped
)
7862 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
7863 stripped
= flags
& ~stripped
;
7865 /* switch duplicated blocks with raid1 */
7866 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
7867 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
7869 /* turn single device chunks into raid0 */
7870 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
7875 static int set_block_group_ro(struct btrfs_block_group_cache
*cache
)
7877 struct btrfs_space_info
*sinfo
= cache
->space_info
;
7884 spin_lock(&sinfo
->lock
);
7885 spin_lock(&cache
->lock
);
7886 num_bytes
= cache
->key
.offset
- cache
->reserved
- cache
->pinned
-
7887 cache
->bytes_super
- btrfs_block_group_used(&cache
->item
);
7889 if (sinfo
->bytes_used
+ sinfo
->bytes_reserved
+ sinfo
->bytes_pinned
+
7890 sinfo
->bytes_may_use
+ sinfo
->bytes_readonly
+
7891 cache
->reserved_pinned
+ num_bytes
< sinfo
->total_bytes
) {
7892 sinfo
->bytes_readonly
+= num_bytes
;
7893 sinfo
->bytes_reserved
+= cache
->reserved_pinned
;
7894 cache
->reserved_pinned
= 0;
7898 spin_unlock(&cache
->lock
);
7899 spin_unlock(&sinfo
->lock
);
7903 int btrfs_set_block_group_ro(struct btrfs_root
*root
,
7904 struct btrfs_block_group_cache
*cache
)
7907 struct btrfs_trans_handle
*trans
;
7913 trans
= btrfs_join_transaction(root
, 1);
7914 BUG_ON(IS_ERR(trans
));
7916 alloc_flags
= update_block_group_flags(root
, cache
->flags
);
7917 if (alloc_flags
!= cache
->flags
)
7918 do_chunk_alloc(trans
, root
, 2 * 1024 * 1024, alloc_flags
, 1);
7920 ret
= set_block_group_ro(cache
);
7923 alloc_flags
= get_alloc_profile(root
, cache
->space_info
->flags
);
7924 ret
= do_chunk_alloc(trans
, root
, 2 * 1024 * 1024, alloc_flags
, 1);
7927 ret
= set_block_group_ro(cache
);
7929 btrfs_end_transaction(trans
, root
);
7933 int btrfs_set_block_group_rw(struct btrfs_root
*root
,
7934 struct btrfs_block_group_cache
*cache
)
7936 struct btrfs_space_info
*sinfo
= cache
->space_info
;
7941 spin_lock(&sinfo
->lock
);
7942 spin_lock(&cache
->lock
);
7943 num_bytes
= cache
->key
.offset
- cache
->reserved
- cache
->pinned
-
7944 cache
->bytes_super
- btrfs_block_group_used(&cache
->item
);
7945 sinfo
->bytes_readonly
-= num_bytes
;
7947 spin_unlock(&cache
->lock
);
7948 spin_unlock(&sinfo
->lock
);
7953 * checks to see if its even possible to relocate this block group.
7955 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7956 * ok to go ahead and try.
7958 int btrfs_can_relocate(struct btrfs_root
*root
, u64 bytenr
)
7960 struct btrfs_block_group_cache
*block_group
;
7961 struct btrfs_space_info
*space_info
;
7962 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
7963 struct btrfs_device
*device
;
7967 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
7969 /* odd, couldn't find the block group, leave it alone */
7973 /* no bytes used, we're good */
7974 if (!btrfs_block_group_used(&block_group
->item
))
7977 space_info
= block_group
->space_info
;
7978 spin_lock(&space_info
->lock
);
7980 full
= space_info
->full
;
7983 * if this is the last block group we have in this space, we can't
7984 * relocate it unless we're able to allocate a new chunk below.
7986 * Otherwise, we need to make sure we have room in the space to handle
7987 * all of the extents from this block group. If we can, we're good
7989 if ((space_info
->total_bytes
!= block_group
->key
.offset
) &&
7990 (space_info
->bytes_used
+ space_info
->bytes_reserved
+
7991 space_info
->bytes_pinned
+ space_info
->bytes_readonly
+
7992 btrfs_block_group_used(&block_group
->item
) <
7993 space_info
->total_bytes
)) {
7994 spin_unlock(&space_info
->lock
);
7997 spin_unlock(&space_info
->lock
);
8000 * ok we don't have enough space, but maybe we have free space on our
8001 * devices to allocate new chunks for relocation, so loop through our
8002 * alloc devices and guess if we have enough space. However, if we
8003 * were marked as full, then we know there aren't enough chunks, and we
8010 mutex_lock(&root
->fs_info
->chunk_mutex
);
8011 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
8012 u64 min_free
= btrfs_block_group_used(&block_group
->item
);
8013 u64 dev_offset
, max_avail
;
8016 * check to make sure we can actually find a chunk with enough
8017 * space to fit our block group in.
8019 if (device
->total_bytes
> device
->bytes_used
+ min_free
) {
8020 ret
= find_free_dev_extent(NULL
, device
, min_free
,
8021 &dev_offset
, &max_avail
);
8027 mutex_unlock(&root
->fs_info
->chunk_mutex
);
8029 btrfs_put_block_group(block_group
);
8033 static int find_first_block_group(struct btrfs_root
*root
,
8034 struct btrfs_path
*path
, struct btrfs_key
*key
)
8037 struct btrfs_key found_key
;
8038 struct extent_buffer
*leaf
;
8041 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
8046 slot
= path
->slots
[0];
8047 leaf
= path
->nodes
[0];
8048 if (slot
>= btrfs_header_nritems(leaf
)) {
8049 ret
= btrfs_next_leaf(root
, path
);
8056 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
8058 if (found_key
.objectid
>= key
->objectid
&&
8059 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
8069 void btrfs_put_block_group_cache(struct btrfs_fs_info
*info
)
8071 struct btrfs_block_group_cache
*block_group
;
8075 struct inode
*inode
;
8077 block_group
= btrfs_lookup_first_block_group(info
, last
);
8078 while (block_group
) {
8079 spin_lock(&block_group
->lock
);
8080 if (block_group
->iref
)
8082 spin_unlock(&block_group
->lock
);
8083 block_group
= next_block_group(info
->tree_root
,
8093 inode
= block_group
->inode
;
8094 block_group
->iref
= 0;
8095 block_group
->inode
= NULL
;
8096 spin_unlock(&block_group
->lock
);
8098 last
= block_group
->key
.objectid
+ block_group
->key
.offset
;
8099 btrfs_put_block_group(block_group
);
8103 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
8105 struct btrfs_block_group_cache
*block_group
;
8106 struct btrfs_space_info
*space_info
;
8107 struct btrfs_caching_control
*caching_ctl
;
8110 down_write(&info
->extent_commit_sem
);
8111 while (!list_empty(&info
->caching_block_groups
)) {
8112 caching_ctl
= list_entry(info
->caching_block_groups
.next
,
8113 struct btrfs_caching_control
, list
);
8114 list_del(&caching_ctl
->list
);
8115 put_caching_control(caching_ctl
);
8117 up_write(&info
->extent_commit_sem
);
8119 spin_lock(&info
->block_group_cache_lock
);
8120 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
8121 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
8123 rb_erase(&block_group
->cache_node
,
8124 &info
->block_group_cache_tree
);
8125 spin_unlock(&info
->block_group_cache_lock
);
8127 down_write(&block_group
->space_info
->groups_sem
);
8128 list_del(&block_group
->list
);
8129 up_write(&block_group
->space_info
->groups_sem
);
8131 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
8132 wait_block_group_cache_done(block_group
);
8134 btrfs_remove_free_space_cache(block_group
);
8135 btrfs_put_block_group(block_group
);
8137 spin_lock(&info
->block_group_cache_lock
);
8139 spin_unlock(&info
->block_group_cache_lock
);
8141 /* now that all the block groups are freed, go through and
8142 * free all the space_info structs. This is only called during
8143 * the final stages of unmount, and so we know nobody is
8144 * using them. We call synchronize_rcu() once before we start,
8145 * just to be on the safe side.
8149 release_global_block_rsv(info
);
8151 while(!list_empty(&info
->space_info
)) {
8152 space_info
= list_entry(info
->space_info
.next
,
8153 struct btrfs_space_info
,
8155 if (space_info
->bytes_pinned
> 0 ||
8156 space_info
->bytes_reserved
> 0) {
8158 dump_space_info(space_info
, 0, 0);
8160 list_del(&space_info
->list
);
8166 static void __link_block_group(struct btrfs_space_info
*space_info
,
8167 struct btrfs_block_group_cache
*cache
)
8169 int index
= get_block_group_index(cache
);
8171 down_write(&space_info
->groups_sem
);
8172 list_add_tail(&cache
->list
, &space_info
->block_groups
[index
]);
8173 up_write(&space_info
->groups_sem
);
8176 int btrfs_read_block_groups(struct btrfs_root
*root
)
8178 struct btrfs_path
*path
;
8180 struct btrfs_block_group_cache
*cache
;
8181 struct btrfs_fs_info
*info
= root
->fs_info
;
8182 struct btrfs_space_info
*space_info
;
8183 struct btrfs_key key
;
8184 struct btrfs_key found_key
;
8185 struct extent_buffer
*leaf
;
8189 root
= info
->extent_root
;
8192 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
8193 path
= btrfs_alloc_path();
8197 cache_gen
= btrfs_super_cache_generation(&root
->fs_info
->super_copy
);
8198 if (cache_gen
!= 0 &&
8199 btrfs_super_generation(&root
->fs_info
->super_copy
) != cache_gen
)
8201 if (btrfs_test_opt(root
, CLEAR_CACHE
))
8203 if (!btrfs_test_opt(root
, SPACE_CACHE
) && cache_gen
)
8204 printk(KERN_INFO
"btrfs: disk space caching is enabled\n");
8207 ret
= find_first_block_group(root
, path
, &key
);
8213 leaf
= path
->nodes
[0];
8214 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
8215 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
8221 atomic_set(&cache
->count
, 1);
8222 spin_lock_init(&cache
->lock
);
8223 spin_lock_init(&cache
->tree_lock
);
8224 cache
->fs_info
= info
;
8225 INIT_LIST_HEAD(&cache
->list
);
8226 INIT_LIST_HEAD(&cache
->cluster_list
);
8229 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
8232 * we only want to have 32k of ram per block group for keeping
8233 * track of free space, and if we pass 1/2 of that we want to
8234 * start converting things over to using bitmaps
8236 cache
->extents_thresh
= ((1024 * 32) / 2) /
8237 sizeof(struct btrfs_free_space
);
8239 read_extent_buffer(leaf
, &cache
->item
,
8240 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
8241 sizeof(cache
->item
));
8242 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
8244 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
8245 btrfs_release_path(root
, path
);
8246 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
8247 cache
->sectorsize
= root
->sectorsize
;
8250 * check for two cases, either we are full, and therefore
8251 * don't need to bother with the caching work since we won't
8252 * find any space, or we are empty, and we can just add all
8253 * the space in and be done with it. This saves us _alot_ of
8254 * time, particularly in the full case.
8256 if (found_key
.offset
== btrfs_block_group_used(&cache
->item
)) {
8257 exclude_super_stripes(root
, cache
);
8258 cache
->last_byte_to_unpin
= (u64
)-1;
8259 cache
->cached
= BTRFS_CACHE_FINISHED
;
8260 free_excluded_extents(root
, cache
);
8261 } else if (btrfs_block_group_used(&cache
->item
) == 0) {
8262 exclude_super_stripes(root
, cache
);
8263 cache
->last_byte_to_unpin
= (u64
)-1;
8264 cache
->cached
= BTRFS_CACHE_FINISHED
;
8265 add_new_free_space(cache
, root
->fs_info
,
8267 found_key
.objectid
+
8269 free_excluded_extents(root
, cache
);
8272 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
8273 btrfs_block_group_used(&cache
->item
),
8276 cache
->space_info
= space_info
;
8277 spin_lock(&cache
->space_info
->lock
);
8278 cache
->space_info
->bytes_readonly
+= cache
->bytes_super
;
8279 spin_unlock(&cache
->space_info
->lock
);
8281 __link_block_group(space_info
, cache
);
8283 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
8286 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
8287 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
8288 set_block_group_ro(cache
);
8291 list_for_each_entry_rcu(space_info
, &root
->fs_info
->space_info
, list
) {
8292 if (!(get_alloc_profile(root
, space_info
->flags
) &
8293 (BTRFS_BLOCK_GROUP_RAID10
|
8294 BTRFS_BLOCK_GROUP_RAID1
|
8295 BTRFS_BLOCK_GROUP_DUP
)))
8298 * avoid allocating from un-mirrored block group if there are
8299 * mirrored block groups.
8301 list_for_each_entry(cache
, &space_info
->block_groups
[3], list
)
8302 set_block_group_ro(cache
);
8303 list_for_each_entry(cache
, &space_info
->block_groups
[4], list
)
8304 set_block_group_ro(cache
);
8307 init_global_block_rsv(info
);
8310 btrfs_free_path(path
);
8314 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
8315 struct btrfs_root
*root
, u64 bytes_used
,
8316 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
8320 struct btrfs_root
*extent_root
;
8321 struct btrfs_block_group_cache
*cache
;
8323 extent_root
= root
->fs_info
->extent_root
;
8325 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
8327 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
8331 cache
->key
.objectid
= chunk_offset
;
8332 cache
->key
.offset
= size
;
8333 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
8334 cache
->sectorsize
= root
->sectorsize
;
8335 cache
->fs_info
= root
->fs_info
;
8338 * we only want to have 32k of ram per block group for keeping track
8339 * of free space, and if we pass 1/2 of that we want to start
8340 * converting things over to using bitmaps
8342 cache
->extents_thresh
= ((1024 * 32) / 2) /
8343 sizeof(struct btrfs_free_space
);
8344 atomic_set(&cache
->count
, 1);
8345 spin_lock_init(&cache
->lock
);
8346 spin_lock_init(&cache
->tree_lock
);
8347 INIT_LIST_HEAD(&cache
->list
);
8348 INIT_LIST_HEAD(&cache
->cluster_list
);
8350 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
8351 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
8352 cache
->flags
= type
;
8353 btrfs_set_block_group_flags(&cache
->item
, type
);
8355 cache
->last_byte_to_unpin
= (u64
)-1;
8356 cache
->cached
= BTRFS_CACHE_FINISHED
;
8357 exclude_super_stripes(root
, cache
);
8359 add_new_free_space(cache
, root
->fs_info
, chunk_offset
,
8360 chunk_offset
+ size
);
8362 free_excluded_extents(root
, cache
);
8364 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
8365 &cache
->space_info
);
8368 spin_lock(&cache
->space_info
->lock
);
8369 cache
->space_info
->bytes_readonly
+= cache
->bytes_super
;
8370 spin_unlock(&cache
->space_info
->lock
);
8372 __link_block_group(cache
->space_info
, cache
);
8374 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
8377 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
8378 sizeof(cache
->item
));
8381 set_avail_alloc_bits(extent_root
->fs_info
, type
);
8386 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
8387 struct btrfs_root
*root
, u64 group_start
)
8389 struct btrfs_path
*path
;
8390 struct btrfs_block_group_cache
*block_group
;
8391 struct btrfs_free_cluster
*cluster
;
8392 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
8393 struct btrfs_key key
;
8394 struct inode
*inode
;
8397 root
= root
->fs_info
->extent_root
;
8399 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
8400 BUG_ON(!block_group
);
8401 BUG_ON(!block_group
->ro
);
8403 /* make sure this block group isn't part of an allocation cluster */
8404 cluster
= &root
->fs_info
->data_alloc_cluster
;
8405 spin_lock(&cluster
->refill_lock
);
8406 btrfs_return_cluster_to_free_space(block_group
, cluster
);
8407 spin_unlock(&cluster
->refill_lock
);
8410 * make sure this block group isn't part of a metadata
8411 * allocation cluster
8413 cluster
= &root
->fs_info
->meta_alloc_cluster
;
8414 spin_lock(&cluster
->refill_lock
);
8415 btrfs_return_cluster_to_free_space(block_group
, cluster
);
8416 spin_unlock(&cluster
->refill_lock
);
8418 path
= btrfs_alloc_path();
8421 inode
= lookup_free_space_inode(root
, block_group
, path
);
8422 if (!IS_ERR(inode
)) {
8423 btrfs_orphan_add(trans
, inode
);
8425 /* One for the block groups ref */
8426 spin_lock(&block_group
->lock
);
8427 if (block_group
->iref
) {
8428 block_group
->iref
= 0;
8429 block_group
->inode
= NULL
;
8430 spin_unlock(&block_group
->lock
);
8433 spin_unlock(&block_group
->lock
);
8435 /* One for our lookup ref */
8439 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
8440 key
.offset
= block_group
->key
.objectid
;
8443 ret
= btrfs_search_slot(trans
, tree_root
, &key
, path
, -1, 1);
8447 btrfs_release_path(tree_root
, path
);
8449 ret
= btrfs_del_item(trans
, tree_root
, path
);
8452 btrfs_release_path(tree_root
, path
);
8455 spin_lock(&root
->fs_info
->block_group_cache_lock
);
8456 rb_erase(&block_group
->cache_node
,
8457 &root
->fs_info
->block_group_cache_tree
);
8458 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
8460 down_write(&block_group
->space_info
->groups_sem
);
8462 * we must use list_del_init so people can check to see if they
8463 * are still on the list after taking the semaphore
8465 list_del_init(&block_group
->list
);
8466 up_write(&block_group
->space_info
->groups_sem
);
8468 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
8469 wait_block_group_cache_done(block_group
);
8471 btrfs_remove_free_space_cache(block_group
);
8473 spin_lock(&block_group
->space_info
->lock
);
8474 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
8475 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
8476 spin_unlock(&block_group
->space_info
->lock
);
8478 memcpy(&key
, &block_group
->key
, sizeof(key
));
8480 btrfs_clear_space_info_full(root
->fs_info
);
8482 btrfs_put_block_group(block_group
);
8483 btrfs_put_block_group(block_group
);
8485 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
8491 ret
= btrfs_del_item(trans
, root
, path
);
8493 btrfs_free_path(path
);