1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
15 #include "transaction.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "inode-map.h"
23 #include "print-tree.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
31 * [What does relocation do]
33 * The objective of relocation is to relocate all extents of the target block
34 * group to other block groups.
35 * This is utilized by resize (shrink only), profile converting, compacting
36 * space, or balance routine to spread chunks over devices.
39 * ------------------------------------------------------------------
40 * BG A: 10 data extents | BG A: deleted
41 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
42 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
44 * [How does relocation work]
46 * 1. Mark the target block group read-only
47 * New extents won't be allocated from the target block group.
49 * 2.1 Record each extent in the target block group
50 * To build a proper map of extents to be relocated.
52 * 2.2 Build data reloc tree and reloc trees
53 * Data reloc tree will contain an inode, recording all newly relocated
55 * There will be only one data reloc tree for one data block group.
57 * Reloc tree will be a special snapshot of its source tree, containing
58 * relocated tree blocks.
59 * Each tree referring to a tree block in target block group will get its
62 * 2.3 Swap source tree with its corresponding reloc tree
63 * Each involved tree only refers to new extents after swap.
65 * 3. Cleanup reloc trees and data reloc tree.
66 * As old extents in the target block group are still referenced by reloc
67 * trees, we need to clean them up before really freeing the target block
70 * The main complexity is in steps 2.2 and 2.3.
72 * The entry point of relocation is relocate_block_group() function.
76 * backref_node, mapping_node and tree_block start with this
79 struct rb_node rb_node
;
84 * present a tree block in the backref cache
87 struct rb_node rb_node
;
91 /* objectid of tree block owner, can be not uptodate */
93 /* link to pending, changed or detached list */
94 struct list_head list
;
95 /* list of upper level blocks reference this block */
96 struct list_head upper
;
97 /* list of child blocks in the cache */
98 struct list_head lower
;
99 /* NULL if this node is not tree root */
100 struct btrfs_root
*root
;
101 /* extent buffer got by COW the block */
102 struct extent_buffer
*eb
;
103 /* level of tree block */
104 unsigned int level
:8;
105 /* is the block in non-reference counted tree */
106 unsigned int cowonly
:1;
107 /* 1 if no child node in the cache */
108 unsigned int lowest
:1;
109 /* is the extent buffer locked */
110 unsigned int locked
:1;
111 /* has the block been processed */
112 unsigned int processed
:1;
113 /* have backrefs of this block been checked */
114 unsigned int checked
:1;
116 * 1 if corresponding block has been cowed but some upper
117 * level block pointers may not point to the new location
119 unsigned int pending
:1;
121 * 1 if the backref node isn't connected to any other
124 unsigned int detached
:1;
128 * present a block pointer in the backref cache
130 struct backref_edge
{
131 struct list_head list
[2];
132 struct backref_node
*node
[2];
137 #define RELOCATION_RESERVED_NODES 256
139 struct backref_cache
{
140 /* red black tree of all backref nodes in the cache */
141 struct rb_root rb_root
;
142 /* for passing backref nodes to btrfs_reloc_cow_block */
143 struct backref_node
*path
[BTRFS_MAX_LEVEL
];
145 * list of blocks that have been cowed but some block
146 * pointers in upper level blocks may not reflect the
149 struct list_head pending
[BTRFS_MAX_LEVEL
];
150 /* list of backref nodes with no child node */
151 struct list_head leaves
;
152 /* list of blocks that have been cowed in current transaction */
153 struct list_head changed
;
154 /* list of detached backref node. */
155 struct list_head detached
;
164 * map address of tree root to tree
166 struct mapping_node
{
167 struct rb_node rb_node
;
172 struct mapping_tree
{
173 struct rb_root rb_root
;
178 * present a tree block to process
181 struct rb_node rb_node
;
183 struct btrfs_key key
;
184 unsigned int level
:8;
185 unsigned int key_ready
:1;
188 #define MAX_EXTENTS 128
190 struct file_extent_cluster
{
193 u64 boundary
[MAX_EXTENTS
];
197 struct reloc_control
{
198 /* block group to relocate */
199 struct btrfs_block_group
*block_group
;
201 struct btrfs_root
*extent_root
;
202 /* inode for moving data */
203 struct inode
*data_inode
;
205 struct btrfs_block_rsv
*block_rsv
;
207 struct backref_cache backref_cache
;
209 struct file_extent_cluster cluster
;
210 /* tree blocks have been processed */
211 struct extent_io_tree processed_blocks
;
212 /* map start of tree root to corresponding reloc tree */
213 struct mapping_tree reloc_root_tree
;
214 /* list of reloc trees */
215 struct list_head reloc_roots
;
216 /* list of subvolume trees that get relocated */
217 struct list_head dirty_subvol_roots
;
218 /* size of metadata reservation for merging reloc trees */
219 u64 merging_rsv_size
;
220 /* size of relocated tree nodes */
222 /* reserved size for block group relocation*/
228 unsigned int stage
:8;
229 unsigned int create_reloc_tree
:1;
230 unsigned int merge_reloc_tree
:1;
231 unsigned int found_file_extent
:1;
234 /* stages of data relocation */
235 #define MOVE_DATA_EXTENTS 0
236 #define UPDATE_DATA_PTRS 1
238 static void remove_backref_node(struct backref_cache
*cache
,
239 struct backref_node
*node
);
240 static void __mark_block_processed(struct reloc_control
*rc
,
241 struct backref_node
*node
);
243 static void mapping_tree_init(struct mapping_tree
*tree
)
245 tree
->rb_root
= RB_ROOT
;
246 spin_lock_init(&tree
->lock
);
249 static void backref_cache_init(struct backref_cache
*cache
)
252 cache
->rb_root
= RB_ROOT
;
253 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
254 INIT_LIST_HEAD(&cache
->pending
[i
]);
255 INIT_LIST_HEAD(&cache
->changed
);
256 INIT_LIST_HEAD(&cache
->detached
);
257 INIT_LIST_HEAD(&cache
->leaves
);
260 static void backref_cache_cleanup(struct backref_cache
*cache
)
262 struct backref_node
*node
;
265 while (!list_empty(&cache
->detached
)) {
266 node
= list_entry(cache
->detached
.next
,
267 struct backref_node
, list
);
268 remove_backref_node(cache
, node
);
271 while (!list_empty(&cache
->leaves
)) {
272 node
= list_entry(cache
->leaves
.next
,
273 struct backref_node
, lower
);
274 remove_backref_node(cache
, node
);
277 cache
->last_trans
= 0;
279 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
280 ASSERT(list_empty(&cache
->pending
[i
]));
281 ASSERT(list_empty(&cache
->changed
));
282 ASSERT(list_empty(&cache
->detached
));
283 ASSERT(RB_EMPTY_ROOT(&cache
->rb_root
));
284 ASSERT(!cache
->nr_nodes
);
285 ASSERT(!cache
->nr_edges
);
288 static struct backref_node
*alloc_backref_node(struct backref_cache
*cache
)
290 struct backref_node
*node
;
292 node
= kzalloc(sizeof(*node
), GFP_NOFS
);
294 INIT_LIST_HEAD(&node
->list
);
295 INIT_LIST_HEAD(&node
->upper
);
296 INIT_LIST_HEAD(&node
->lower
);
297 RB_CLEAR_NODE(&node
->rb_node
);
303 static void free_backref_node(struct backref_cache
*cache
,
304 struct backref_node
*node
)
308 btrfs_put_root(node
->root
);
313 static struct backref_edge
*alloc_backref_edge(struct backref_cache
*cache
)
315 struct backref_edge
*edge
;
317 edge
= kzalloc(sizeof(*edge
), GFP_NOFS
);
323 static void free_backref_edge(struct backref_cache
*cache
,
324 struct backref_edge
*edge
)
332 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 bytenr
,
333 struct rb_node
*node
)
335 struct rb_node
**p
= &root
->rb_node
;
336 struct rb_node
*parent
= NULL
;
337 struct tree_entry
*entry
;
341 entry
= rb_entry(parent
, struct tree_entry
, rb_node
);
343 if (bytenr
< entry
->bytenr
)
345 else if (bytenr
> entry
->bytenr
)
351 rb_link_node(node
, parent
, p
);
352 rb_insert_color(node
, root
);
356 static struct rb_node
*tree_search(struct rb_root
*root
, u64 bytenr
)
358 struct rb_node
*n
= root
->rb_node
;
359 struct tree_entry
*entry
;
362 entry
= rb_entry(n
, struct tree_entry
, rb_node
);
364 if (bytenr
< entry
->bytenr
)
366 else if (bytenr
> entry
->bytenr
)
374 static void backref_tree_panic(struct rb_node
*rb_node
, int errno
, u64 bytenr
)
377 struct btrfs_fs_info
*fs_info
= NULL
;
378 struct backref_node
*bnode
= rb_entry(rb_node
, struct backref_node
,
381 fs_info
= bnode
->root
->fs_info
;
382 btrfs_panic(fs_info
, errno
,
383 "Inconsistency in backref cache found at offset %llu",
388 * walk up backref nodes until reach node presents tree root
390 static struct backref_node
*walk_up_backref(struct backref_node
*node
,
391 struct backref_edge
*edges
[],
394 struct backref_edge
*edge
;
397 while (!list_empty(&node
->upper
)) {
398 edge
= list_entry(node
->upper
.next
,
399 struct backref_edge
, list
[LOWER
]);
401 node
= edge
->node
[UPPER
];
403 BUG_ON(node
->detached
);
409 * walk down backref nodes to find start of next reference path
411 static struct backref_node
*walk_down_backref(struct backref_edge
*edges
[],
414 struct backref_edge
*edge
;
415 struct backref_node
*lower
;
419 edge
= edges
[idx
- 1];
420 lower
= edge
->node
[LOWER
];
421 if (list_is_last(&edge
->list
[LOWER
], &lower
->upper
)) {
425 edge
= list_entry(edge
->list
[LOWER
].next
,
426 struct backref_edge
, list
[LOWER
]);
427 edges
[idx
- 1] = edge
;
429 return edge
->node
[UPPER
];
435 static void unlock_node_buffer(struct backref_node
*node
)
438 btrfs_tree_unlock(node
->eb
);
443 static void drop_node_buffer(struct backref_node
*node
)
446 unlock_node_buffer(node
);
447 free_extent_buffer(node
->eb
);
452 static void drop_backref_node(struct backref_cache
*tree
,
453 struct backref_node
*node
)
455 BUG_ON(!list_empty(&node
->upper
));
457 drop_node_buffer(node
);
458 list_del(&node
->list
);
459 list_del(&node
->lower
);
460 if (!RB_EMPTY_NODE(&node
->rb_node
))
461 rb_erase(&node
->rb_node
, &tree
->rb_root
);
462 free_backref_node(tree
, node
);
466 * remove a backref node from the backref cache
468 static void remove_backref_node(struct backref_cache
*cache
,
469 struct backref_node
*node
)
471 struct backref_node
*upper
;
472 struct backref_edge
*edge
;
477 BUG_ON(!node
->lowest
&& !node
->detached
);
478 while (!list_empty(&node
->upper
)) {
479 edge
= list_entry(node
->upper
.next
, struct backref_edge
,
481 upper
= edge
->node
[UPPER
];
482 list_del(&edge
->list
[LOWER
]);
483 list_del(&edge
->list
[UPPER
]);
484 free_backref_edge(cache
, edge
);
486 if (RB_EMPTY_NODE(&upper
->rb_node
)) {
487 BUG_ON(!list_empty(&node
->upper
));
488 drop_backref_node(cache
, node
);
494 * add the node to leaf node list if no other
495 * child block cached.
497 if (list_empty(&upper
->lower
)) {
498 list_add_tail(&upper
->lower
, &cache
->leaves
);
503 drop_backref_node(cache
, node
);
506 static void update_backref_node(struct backref_cache
*cache
,
507 struct backref_node
*node
, u64 bytenr
)
509 struct rb_node
*rb_node
;
510 rb_erase(&node
->rb_node
, &cache
->rb_root
);
511 node
->bytenr
= bytenr
;
512 rb_node
= tree_insert(&cache
->rb_root
, node
->bytenr
, &node
->rb_node
);
514 backref_tree_panic(rb_node
, -EEXIST
, bytenr
);
518 * update backref cache after a transaction commit
520 static int update_backref_cache(struct btrfs_trans_handle
*trans
,
521 struct backref_cache
*cache
)
523 struct backref_node
*node
;
526 if (cache
->last_trans
== 0) {
527 cache
->last_trans
= trans
->transid
;
531 if (cache
->last_trans
== trans
->transid
)
535 * detached nodes are used to avoid unnecessary backref
536 * lookup. transaction commit changes the extent tree.
537 * so the detached nodes are no longer useful.
539 while (!list_empty(&cache
->detached
)) {
540 node
= list_entry(cache
->detached
.next
,
541 struct backref_node
, list
);
542 remove_backref_node(cache
, node
);
545 while (!list_empty(&cache
->changed
)) {
546 node
= list_entry(cache
->changed
.next
,
547 struct backref_node
, list
);
548 list_del_init(&node
->list
);
549 BUG_ON(node
->pending
);
550 update_backref_node(cache
, node
, node
->new_bytenr
);
554 * some nodes can be left in the pending list if there were
555 * errors during processing the pending nodes.
557 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
558 list_for_each_entry(node
, &cache
->pending
[level
], list
) {
559 BUG_ON(!node
->pending
);
560 if (node
->bytenr
== node
->new_bytenr
)
562 update_backref_node(cache
, node
, node
->new_bytenr
);
566 cache
->last_trans
= 0;
570 static bool reloc_root_is_dead(struct btrfs_root
*root
)
573 * Pair with set_bit/clear_bit in clean_dirty_subvols and
574 * btrfs_update_reloc_root. We need to see the updated bit before
575 * trying to access reloc_root
578 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE
, &root
->state
))
584 * Check if this subvolume tree has valid reloc tree.
586 * Reloc tree after swap is considered dead, thus not considered as valid.
587 * This is enough for most callers, as they don't distinguish dead reloc root
588 * from no reloc root. But should_ignore_root() below is a special case.
590 static bool have_reloc_root(struct btrfs_root
*root
)
592 if (reloc_root_is_dead(root
))
594 if (!root
->reloc_root
)
599 static int should_ignore_root(struct btrfs_root
*root
)
601 struct btrfs_root
*reloc_root
;
603 if (!test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
))
606 /* This root has been merged with its reloc tree, we can ignore it */
607 if (reloc_root_is_dead(root
))
610 reloc_root
= root
->reloc_root
;
614 if (btrfs_root_last_snapshot(&reloc_root
->root_item
) ==
615 root
->fs_info
->running_transaction
->transid
- 1)
618 * if there is reloc tree and it was created in previous
619 * transaction backref lookup can find the reloc tree,
620 * so backref node for the fs tree root is useless for
626 * find reloc tree by address of tree root
628 static struct btrfs_root
*find_reloc_root(struct reloc_control
*rc
,
631 struct rb_node
*rb_node
;
632 struct mapping_node
*node
;
633 struct btrfs_root
*root
= NULL
;
635 spin_lock(&rc
->reloc_root_tree
.lock
);
636 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
, bytenr
);
638 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
639 root
= (struct btrfs_root
*)node
->data
;
641 spin_unlock(&rc
->reloc_root_tree
.lock
);
642 return btrfs_grab_root(root
);
645 static struct btrfs_root
*read_fs_root(struct btrfs_fs_info
*fs_info
,
648 struct btrfs_key key
;
650 key
.objectid
= root_objectid
;
651 key
.type
= BTRFS_ROOT_ITEM_KEY
;
652 key
.offset
= (u64
)-1;
654 return btrfs_get_fs_root(fs_info
, &key
, false);
657 static noinline_for_stack
658 int find_inline_backref(struct extent_buffer
*leaf
, int slot
,
659 unsigned long *ptr
, unsigned long *end
)
661 struct btrfs_key key
;
662 struct btrfs_extent_item
*ei
;
663 struct btrfs_tree_block_info
*bi
;
666 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
668 item_size
= btrfs_item_size_nr(leaf
, slot
);
669 if (item_size
< sizeof(*ei
)) {
670 btrfs_print_v0_err(leaf
->fs_info
);
671 btrfs_handle_fs_error(leaf
->fs_info
, -EINVAL
, NULL
);
674 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
675 WARN_ON(!(btrfs_extent_flags(leaf
, ei
) &
676 BTRFS_EXTENT_FLAG_TREE_BLOCK
));
678 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
679 item_size
<= sizeof(*ei
) + sizeof(*bi
)) {
680 WARN_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
683 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
684 item_size
<= sizeof(*ei
)) {
685 WARN_ON(item_size
< sizeof(*ei
));
689 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
690 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
691 *ptr
= (unsigned long)(bi
+ 1);
693 *ptr
= (unsigned long)(ei
+ 1);
695 *end
= (unsigned long)ei
+ item_size
;
700 * build backref tree for a given tree block. root of the backref tree
701 * corresponds the tree block, leaves of the backref tree correspond
702 * roots of b-trees that reference the tree block.
704 * the basic idea of this function is check backrefs of a given block
705 * to find upper level blocks that reference the block, and then check
706 * backrefs of these upper level blocks recursively. the recursion stop
707 * when tree root is reached or backrefs for the block is cached.
709 * NOTE: if we find backrefs for a block are cached, we know backrefs
710 * for all upper level blocks that directly/indirectly reference the
711 * block are also cached.
713 static noinline_for_stack
714 struct backref_node
*build_backref_tree(struct reloc_control
*rc
,
715 struct btrfs_key
*node_key
,
716 int level
, u64 bytenr
)
718 struct backref_cache
*cache
= &rc
->backref_cache
;
719 struct btrfs_path
*path1
; /* For searching extent root */
720 struct btrfs_path
*path2
; /* For searching parent of TREE_BLOCK_REF */
721 struct extent_buffer
*eb
;
722 struct btrfs_root
*root
;
723 struct backref_node
*cur
;
724 struct backref_node
*upper
;
725 struct backref_node
*lower
;
726 struct backref_node
*node
= NULL
;
727 struct backref_node
*exist
= NULL
;
728 struct backref_edge
*edge
;
729 struct rb_node
*rb_node
;
730 struct btrfs_key key
;
733 LIST_HEAD(list
); /* Pending edge list, upper node needs to be checked */
738 bool need_check
= true;
740 path1
= btrfs_alloc_path();
741 path2
= btrfs_alloc_path();
742 if (!path1
|| !path2
) {
746 path1
->reada
= READA_FORWARD
;
747 path2
->reada
= READA_FORWARD
;
749 node
= alloc_backref_node(cache
);
755 node
->bytenr
= bytenr
;
762 key
.objectid
= cur
->bytenr
;
763 key
.type
= BTRFS_METADATA_ITEM_KEY
;
764 key
.offset
= (u64
)-1;
766 path1
->search_commit_root
= 1;
767 path1
->skip_locking
= 1;
768 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path1
,
775 ASSERT(path1
->slots
[0]);
779 WARN_ON(cur
->checked
);
780 if (!list_empty(&cur
->upper
)) {
782 * the backref was added previously when processing
783 * backref of type BTRFS_TREE_BLOCK_REF_KEY
785 ASSERT(list_is_singular(&cur
->upper
));
786 edge
= list_entry(cur
->upper
.next
, struct backref_edge
,
788 ASSERT(list_empty(&edge
->list
[UPPER
]));
789 exist
= edge
->node
[UPPER
];
791 * add the upper level block to pending list if we need
795 list_add_tail(&edge
->list
[UPPER
], &list
);
802 eb
= path1
->nodes
[0];
805 if (path1
->slots
[0] >= btrfs_header_nritems(eb
)) {
806 ret
= btrfs_next_leaf(rc
->extent_root
, path1
);
813 eb
= path1
->nodes
[0];
816 btrfs_item_key_to_cpu(eb
, &key
, path1
->slots
[0]);
817 if (key
.objectid
!= cur
->bytenr
) {
822 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
823 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
824 ret
= find_inline_backref(eb
, path1
->slots
[0],
832 /* update key for inline back ref */
833 struct btrfs_extent_inline_ref
*iref
;
835 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
836 type
= btrfs_get_extent_inline_ref_type(eb
, iref
,
837 BTRFS_REF_TYPE_BLOCK
);
838 if (type
== BTRFS_REF_TYPE_INVALID
) {
843 key
.offset
= btrfs_extent_inline_ref_offset(eb
, iref
);
845 WARN_ON(key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
&&
846 key
.type
!= BTRFS_SHARED_BLOCK_REF_KEY
);
850 * Parent node found and matches current inline ref, no need to
851 * rebuild this node for this inline ref.
854 ((key
.type
== BTRFS_TREE_BLOCK_REF_KEY
&&
855 exist
->owner
== key
.offset
) ||
856 (key
.type
== BTRFS_SHARED_BLOCK_REF_KEY
&&
857 exist
->bytenr
== key
.offset
))) {
862 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
863 if (key
.type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
864 if (key
.objectid
== key
.offset
) {
866 * Only root blocks of reloc trees use backref
867 * pointing to itself.
869 root
= find_reloc_root(rc
, cur
->bytenr
);
875 edge
= alloc_backref_edge(cache
);
880 rb_node
= tree_search(&cache
->rb_root
, key
.offset
);
882 upper
= alloc_backref_node(cache
);
884 free_backref_edge(cache
, edge
);
888 upper
->bytenr
= key
.offset
;
889 upper
->level
= cur
->level
+ 1;
891 * backrefs for the upper level block isn't
892 * cached, add the block to pending list
894 list_add_tail(&edge
->list
[UPPER
], &list
);
896 upper
= rb_entry(rb_node
, struct backref_node
,
898 ASSERT(upper
->checked
);
899 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
901 list_add_tail(&edge
->list
[LOWER
], &cur
->upper
);
902 edge
->node
[LOWER
] = cur
;
903 edge
->node
[UPPER
] = upper
;
906 } else if (unlikely(key
.type
== BTRFS_EXTENT_REF_V0_KEY
)) {
908 btrfs_print_v0_err(rc
->extent_root
->fs_info
);
909 btrfs_handle_fs_error(rc
->extent_root
->fs_info
, err
,
912 } else if (key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
) {
917 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
918 * means the root objectid. We need to search the tree to get
921 root
= read_fs_root(rc
->extent_root
->fs_info
, key
.offset
);
927 if (!test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
))
930 if (btrfs_root_level(&root
->root_item
) == cur
->level
) {
932 ASSERT(btrfs_root_bytenr(&root
->root_item
) ==
934 if (should_ignore_root(root
)) {
935 btrfs_put_root(root
);
936 list_add(&cur
->list
, &useless
);
943 level
= cur
->level
+ 1;
945 /* Search the tree to find parent blocks referring the block. */
946 path2
->search_commit_root
= 1;
947 path2
->skip_locking
= 1;
948 path2
->lowest_level
= level
;
949 ret
= btrfs_search_slot(NULL
, root
, node_key
, path2
, 0, 0);
950 path2
->lowest_level
= 0;
952 btrfs_put_root(root
);
956 if (ret
> 0 && path2
->slots
[level
] > 0)
957 path2
->slots
[level
]--;
959 eb
= path2
->nodes
[level
];
960 if (btrfs_node_blockptr(eb
, path2
->slots
[level
]) !=
962 btrfs_err(root
->fs_info
,
963 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
964 cur
->bytenr
, level
- 1,
965 root
->root_key
.objectid
,
966 node_key
->objectid
, node_key
->type
,
968 btrfs_put_root(root
);
975 /* Add all nodes and edges in the path */
976 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
977 if (!path2
->nodes
[level
]) {
978 ASSERT(btrfs_root_bytenr(&root
->root_item
) ==
980 if (should_ignore_root(root
)) {
981 btrfs_put_root(root
);
982 list_add(&lower
->list
, &useless
);
989 edge
= alloc_backref_edge(cache
);
991 btrfs_put_root(root
);
996 eb
= path2
->nodes
[level
];
997 rb_node
= tree_search(&cache
->rb_root
, eb
->start
);
999 upper
= alloc_backref_node(cache
);
1001 btrfs_put_root(root
);
1002 free_backref_edge(cache
, edge
);
1006 upper
->bytenr
= eb
->start
;
1007 upper
->owner
= btrfs_header_owner(eb
);
1008 upper
->level
= lower
->level
+ 1;
1009 if (!test_bit(BTRFS_ROOT_REF_COWS
,
1014 * if we know the block isn't shared
1015 * we can void checking its backrefs.
1017 if (btrfs_block_can_be_shared(root
, eb
))
1023 * add the block to pending list if we
1024 * need check its backrefs, we only do this once
1025 * while walking up a tree as we will catch
1026 * anything else later on.
1028 if (!upper
->checked
&& need_check
) {
1030 list_add_tail(&edge
->list
[UPPER
],
1035 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
1038 upper
= rb_entry(rb_node
, struct backref_node
,
1040 ASSERT(upper
->checked
);
1041 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
1043 upper
->owner
= btrfs_header_owner(eb
);
1045 list_add_tail(&edge
->list
[LOWER
], &lower
->upper
);
1046 edge
->node
[LOWER
] = lower
;
1047 edge
->node
[UPPER
] = upper
;
1050 btrfs_put_root(root
);
1056 btrfs_release_path(path2
);
1059 ptr
+= btrfs_extent_inline_ref_size(key
.type
);
1069 btrfs_release_path(path1
);
1074 /* the pending list isn't empty, take the first block to process */
1075 if (!list_empty(&list
)) {
1076 edge
= list_entry(list
.next
, struct backref_edge
, list
[UPPER
]);
1077 list_del_init(&edge
->list
[UPPER
]);
1078 cur
= edge
->node
[UPPER
];
1083 * everything goes well, connect backref nodes and insert backref nodes
1086 ASSERT(node
->checked
);
1087 cowonly
= node
->cowonly
;
1089 rb_node
= tree_insert(&cache
->rb_root
, node
->bytenr
,
1092 backref_tree_panic(rb_node
, -EEXIST
, node
->bytenr
);
1093 list_add_tail(&node
->lower
, &cache
->leaves
);
1096 list_for_each_entry(edge
, &node
->upper
, list
[LOWER
])
1097 list_add_tail(&edge
->list
[UPPER
], &list
);
1099 while (!list_empty(&list
)) {
1100 edge
= list_entry(list
.next
, struct backref_edge
, list
[UPPER
]);
1101 list_del_init(&edge
->list
[UPPER
]);
1102 upper
= edge
->node
[UPPER
];
1103 if (upper
->detached
) {
1104 list_del(&edge
->list
[LOWER
]);
1105 lower
= edge
->node
[LOWER
];
1106 free_backref_edge(cache
, edge
);
1107 if (list_empty(&lower
->upper
))
1108 list_add(&lower
->list
, &useless
);
1112 if (!RB_EMPTY_NODE(&upper
->rb_node
)) {
1113 if (upper
->lowest
) {
1114 list_del_init(&upper
->lower
);
1118 list_add_tail(&edge
->list
[UPPER
], &upper
->lower
);
1122 if (!upper
->checked
) {
1124 * Still want to blow up for developers since this is a
1131 if (cowonly
!= upper
->cowonly
) {
1138 rb_node
= tree_insert(&cache
->rb_root
, upper
->bytenr
,
1141 backref_tree_panic(rb_node
, -EEXIST
,
1145 list_add_tail(&edge
->list
[UPPER
], &upper
->lower
);
1147 list_for_each_entry(edge
, &upper
->upper
, list
[LOWER
])
1148 list_add_tail(&edge
->list
[UPPER
], &list
);
1151 * process useless backref nodes. backref nodes for tree leaves
1152 * are deleted from the cache. backref nodes for upper level
1153 * tree blocks are left in the cache to avoid unnecessary backref
1156 while (!list_empty(&useless
)) {
1157 upper
= list_entry(useless
.next
, struct backref_node
, list
);
1158 list_del_init(&upper
->list
);
1159 ASSERT(list_empty(&upper
->upper
));
1162 if (upper
->lowest
) {
1163 list_del_init(&upper
->lower
);
1166 while (!list_empty(&upper
->lower
)) {
1167 edge
= list_entry(upper
->lower
.next
,
1168 struct backref_edge
, list
[UPPER
]);
1169 list_del(&edge
->list
[UPPER
]);
1170 list_del(&edge
->list
[LOWER
]);
1171 lower
= edge
->node
[LOWER
];
1172 free_backref_edge(cache
, edge
);
1174 if (list_empty(&lower
->upper
))
1175 list_add(&lower
->list
, &useless
);
1177 __mark_block_processed(rc
, upper
);
1178 if (upper
->level
> 0) {
1179 list_add(&upper
->list
, &cache
->detached
);
1180 upper
->detached
= 1;
1182 rb_erase(&upper
->rb_node
, &cache
->rb_root
);
1183 free_backref_node(cache
, upper
);
1187 btrfs_free_path(path1
);
1188 btrfs_free_path(path2
);
1190 while (!list_empty(&useless
)) {
1191 lower
= list_entry(useless
.next
,
1192 struct backref_node
, list
);
1193 list_del_init(&lower
->list
);
1195 while (!list_empty(&list
)) {
1196 edge
= list_first_entry(&list
, struct backref_edge
,
1198 list_del(&edge
->list
[UPPER
]);
1199 list_del(&edge
->list
[LOWER
]);
1200 lower
= edge
->node
[LOWER
];
1201 upper
= edge
->node
[UPPER
];
1202 free_backref_edge(cache
, edge
);
1205 * Lower is no longer linked to any upper backref nodes
1206 * and isn't in the cache, we can free it ourselves.
1208 if (list_empty(&lower
->upper
) &&
1209 RB_EMPTY_NODE(&lower
->rb_node
))
1210 list_add(&lower
->list
, &useless
);
1212 if (!RB_EMPTY_NODE(&upper
->rb_node
))
1215 /* Add this guy's upper edges to the list to process */
1216 list_for_each_entry(edge
, &upper
->upper
, list
[LOWER
])
1217 list_add_tail(&edge
->list
[UPPER
], &list
);
1218 if (list_empty(&upper
->upper
))
1219 list_add(&upper
->list
, &useless
);
1222 while (!list_empty(&useless
)) {
1223 lower
= list_entry(useless
.next
,
1224 struct backref_node
, list
);
1225 list_del_init(&lower
->list
);
1228 free_backref_node(cache
, lower
);
1231 remove_backref_node(cache
, node
);
1232 return ERR_PTR(err
);
1234 ASSERT(!node
|| !node
->detached
);
1239 * helper to add backref node for the newly created snapshot.
1240 * the backref node is created by cloning backref node that
1241 * corresponds to root of source tree
1243 static int clone_backref_node(struct btrfs_trans_handle
*trans
,
1244 struct reloc_control
*rc
,
1245 struct btrfs_root
*src
,
1246 struct btrfs_root
*dest
)
1248 struct btrfs_root
*reloc_root
= src
->reloc_root
;
1249 struct backref_cache
*cache
= &rc
->backref_cache
;
1250 struct backref_node
*node
= NULL
;
1251 struct backref_node
*new_node
;
1252 struct backref_edge
*edge
;
1253 struct backref_edge
*new_edge
;
1254 struct rb_node
*rb_node
;
1256 if (cache
->last_trans
> 0)
1257 update_backref_cache(trans
, cache
);
1259 rb_node
= tree_search(&cache
->rb_root
, src
->commit_root
->start
);
1261 node
= rb_entry(rb_node
, struct backref_node
, rb_node
);
1265 BUG_ON(node
->new_bytenr
!= reloc_root
->node
->start
);
1269 rb_node
= tree_search(&cache
->rb_root
,
1270 reloc_root
->commit_root
->start
);
1272 node
= rb_entry(rb_node
, struct backref_node
,
1274 BUG_ON(node
->detached
);
1281 new_node
= alloc_backref_node(cache
);
1285 new_node
->bytenr
= dest
->node
->start
;
1286 new_node
->level
= node
->level
;
1287 new_node
->lowest
= node
->lowest
;
1288 new_node
->checked
= 1;
1289 new_node
->root
= btrfs_grab_root(dest
);
1290 ASSERT(new_node
->root
);
1292 if (!node
->lowest
) {
1293 list_for_each_entry(edge
, &node
->lower
, list
[UPPER
]) {
1294 new_edge
= alloc_backref_edge(cache
);
1298 new_edge
->node
[UPPER
] = new_node
;
1299 new_edge
->node
[LOWER
] = edge
->node
[LOWER
];
1300 list_add_tail(&new_edge
->list
[UPPER
],
1304 list_add_tail(&new_node
->lower
, &cache
->leaves
);
1307 rb_node
= tree_insert(&cache
->rb_root
, new_node
->bytenr
,
1308 &new_node
->rb_node
);
1310 backref_tree_panic(rb_node
, -EEXIST
, new_node
->bytenr
);
1312 if (!new_node
->lowest
) {
1313 list_for_each_entry(new_edge
, &new_node
->lower
, list
[UPPER
]) {
1314 list_add_tail(&new_edge
->list
[LOWER
],
1315 &new_edge
->node
[LOWER
]->upper
);
1320 while (!list_empty(&new_node
->lower
)) {
1321 new_edge
= list_entry(new_node
->lower
.next
,
1322 struct backref_edge
, list
[UPPER
]);
1323 list_del(&new_edge
->list
[UPPER
]);
1324 free_backref_edge(cache
, new_edge
);
1326 free_backref_node(cache
, new_node
);
1331 * helper to add 'address of tree root -> reloc tree' mapping
1333 static int __must_check
__add_reloc_root(struct btrfs_root
*root
)
1335 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1336 struct rb_node
*rb_node
;
1337 struct mapping_node
*node
;
1338 struct reloc_control
*rc
= fs_info
->reloc_ctl
;
1340 node
= kmalloc(sizeof(*node
), GFP_NOFS
);
1344 node
->bytenr
= root
->node
->start
;
1347 spin_lock(&rc
->reloc_root_tree
.lock
);
1348 rb_node
= tree_insert(&rc
->reloc_root_tree
.rb_root
,
1349 node
->bytenr
, &node
->rb_node
);
1350 spin_unlock(&rc
->reloc_root_tree
.lock
);
1352 btrfs_panic(fs_info
, -EEXIST
,
1353 "Duplicate root found for start=%llu while inserting into relocation tree",
1357 list_add_tail(&root
->root_list
, &rc
->reloc_roots
);
1362 * helper to delete the 'address of tree root -> reloc tree'
1365 static void __del_reloc_root(struct btrfs_root
*root
)
1367 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1368 struct rb_node
*rb_node
;
1369 struct mapping_node
*node
= NULL
;
1370 struct reloc_control
*rc
= fs_info
->reloc_ctl
;
1372 if (rc
&& root
->node
) {
1373 spin_lock(&rc
->reloc_root_tree
.lock
);
1374 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
,
1377 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
1378 rb_erase(&node
->rb_node
, &rc
->reloc_root_tree
.rb_root
);
1380 spin_unlock(&rc
->reloc_root_tree
.lock
);
1383 BUG_ON((struct btrfs_root
*)node
->data
!= root
);
1386 spin_lock(&fs_info
->trans_lock
);
1387 list_del_init(&root
->root_list
);
1388 spin_unlock(&fs_info
->trans_lock
);
1393 * helper to update the 'address of tree root -> reloc tree'
1396 static int __update_reloc_root(struct btrfs_root
*root
, u64 new_bytenr
)
1398 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1399 struct rb_node
*rb_node
;
1400 struct mapping_node
*node
= NULL
;
1401 struct reloc_control
*rc
= fs_info
->reloc_ctl
;
1403 spin_lock(&rc
->reloc_root_tree
.lock
);
1404 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
,
1407 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
1408 rb_erase(&node
->rb_node
, &rc
->reloc_root_tree
.rb_root
);
1410 spin_unlock(&rc
->reloc_root_tree
.lock
);
1414 BUG_ON((struct btrfs_root
*)node
->data
!= root
);
1416 spin_lock(&rc
->reloc_root_tree
.lock
);
1417 node
->bytenr
= new_bytenr
;
1418 rb_node
= tree_insert(&rc
->reloc_root_tree
.rb_root
,
1419 node
->bytenr
, &node
->rb_node
);
1420 spin_unlock(&rc
->reloc_root_tree
.lock
);
1422 backref_tree_panic(rb_node
, -EEXIST
, node
->bytenr
);
1426 static struct btrfs_root
*create_reloc_root(struct btrfs_trans_handle
*trans
,
1427 struct btrfs_root
*root
, u64 objectid
)
1429 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1430 struct btrfs_root
*reloc_root
;
1431 struct extent_buffer
*eb
;
1432 struct btrfs_root_item
*root_item
;
1433 struct btrfs_key root_key
;
1436 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
1439 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
1440 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1441 root_key
.offset
= objectid
;
1443 if (root
->root_key
.objectid
== objectid
) {
1444 u64 commit_root_gen
;
1446 /* called by btrfs_init_reloc_root */
1447 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
, &eb
,
1448 BTRFS_TREE_RELOC_OBJECTID
);
1451 * Set the last_snapshot field to the generation of the commit
1452 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1453 * correctly (returns true) when the relocation root is created
1454 * either inside the critical section of a transaction commit
1455 * (through transaction.c:qgroup_account_snapshot()) and when
1456 * it's created before the transaction commit is started.
1458 commit_root_gen
= btrfs_header_generation(root
->commit_root
);
1459 btrfs_set_root_last_snapshot(&root
->root_item
, commit_root_gen
);
1462 * called by btrfs_reloc_post_snapshot_hook.
1463 * the source tree is a reloc tree, all tree blocks
1464 * modified after it was created have RELOC flag
1465 * set in their headers. so it's OK to not update
1466 * the 'last_snapshot'.
1468 ret
= btrfs_copy_root(trans
, root
, root
->node
, &eb
,
1469 BTRFS_TREE_RELOC_OBJECTID
);
1473 memcpy(root_item
, &root
->root_item
, sizeof(*root_item
));
1474 btrfs_set_root_bytenr(root_item
, eb
->start
);
1475 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
1476 btrfs_set_root_generation(root_item
, trans
->transid
);
1478 if (root
->root_key
.objectid
== objectid
) {
1479 btrfs_set_root_refs(root_item
, 0);
1480 memset(&root_item
->drop_progress
, 0,
1481 sizeof(struct btrfs_disk_key
));
1482 root_item
->drop_level
= 0;
1485 btrfs_tree_unlock(eb
);
1486 free_extent_buffer(eb
);
1488 ret
= btrfs_insert_root(trans
, fs_info
->tree_root
,
1489 &root_key
, root_item
);
1493 reloc_root
= btrfs_read_tree_root(fs_info
->tree_root
, &root_key
);
1494 BUG_ON(IS_ERR(reloc_root
));
1495 set_bit(BTRFS_ROOT_REF_COWS
, &reloc_root
->state
);
1496 reloc_root
->last_trans
= trans
->transid
;
1501 * create reloc tree for a given fs tree. reloc tree is just a
1502 * snapshot of the fs tree with special root objectid.
1504 int btrfs_init_reloc_root(struct btrfs_trans_handle
*trans
,
1505 struct btrfs_root
*root
)
1507 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1508 struct btrfs_root
*reloc_root
;
1509 struct reloc_control
*rc
= fs_info
->reloc_ctl
;
1510 struct btrfs_block_rsv
*rsv
;
1515 * The subvolume has reloc tree but the swap is finished, no need to
1516 * create/update the dead reloc tree
1518 if (reloc_root_is_dead(root
))
1521 if (root
->reloc_root
) {
1522 reloc_root
= root
->reloc_root
;
1523 reloc_root
->last_trans
= trans
->transid
;
1527 if (!rc
|| !rc
->create_reloc_tree
||
1528 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1531 if (!trans
->reloc_reserved
) {
1532 rsv
= trans
->block_rsv
;
1533 trans
->block_rsv
= rc
->block_rsv
;
1536 reloc_root
= create_reloc_root(trans
, root
, root
->root_key
.objectid
);
1538 trans
->block_rsv
= rsv
;
1540 ret
= __add_reloc_root(reloc_root
);
1542 root
->reloc_root
= reloc_root
;
1547 * update root item of reloc tree
1549 int btrfs_update_reloc_root(struct btrfs_trans_handle
*trans
,
1550 struct btrfs_root
*root
)
1552 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1553 struct btrfs_root
*reloc_root
;
1554 struct btrfs_root_item
*root_item
;
1557 if (!have_reloc_root(root
))
1560 reloc_root
= root
->reloc_root
;
1561 root_item
= &reloc_root
->root_item
;
1563 /* root->reloc_root will stay until current relocation finished */
1564 if (fs_info
->reloc_ctl
->merge_reloc_tree
&&
1565 btrfs_root_refs(root_item
) == 0) {
1566 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE
, &root
->state
);
1568 * Mark the tree as dead before we change reloc_root so
1569 * have_reloc_root will not touch it from now on.
1572 __del_reloc_root(reloc_root
);
1575 if (reloc_root
->commit_root
!= reloc_root
->node
) {
1576 btrfs_set_root_node(root_item
, reloc_root
->node
);
1577 free_extent_buffer(reloc_root
->commit_root
);
1578 reloc_root
->commit_root
= btrfs_root_node(reloc_root
);
1581 ret
= btrfs_update_root(trans
, fs_info
->tree_root
,
1582 &reloc_root
->root_key
, root_item
);
1590 * helper to find first cached inode with inode number >= objectid
1593 static struct inode
*find_next_inode(struct btrfs_root
*root
, u64 objectid
)
1595 struct rb_node
*node
;
1596 struct rb_node
*prev
;
1597 struct btrfs_inode
*entry
;
1598 struct inode
*inode
;
1600 spin_lock(&root
->inode_lock
);
1602 node
= root
->inode_tree
.rb_node
;
1606 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
1608 if (objectid
< btrfs_ino(entry
))
1609 node
= node
->rb_left
;
1610 else if (objectid
> btrfs_ino(entry
))
1611 node
= node
->rb_right
;
1617 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
1618 if (objectid
<= btrfs_ino(entry
)) {
1622 prev
= rb_next(prev
);
1626 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
1627 inode
= igrab(&entry
->vfs_inode
);
1629 spin_unlock(&root
->inode_lock
);
1633 objectid
= btrfs_ino(entry
) + 1;
1634 if (cond_resched_lock(&root
->inode_lock
))
1637 node
= rb_next(node
);
1639 spin_unlock(&root
->inode_lock
);
1643 static int in_block_group(u64 bytenr
, struct btrfs_block_group
*block_group
)
1645 if (bytenr
>= block_group
->start
&&
1646 bytenr
< block_group
->start
+ block_group
->length
)
1652 * get new location of data
1654 static int get_new_location(struct inode
*reloc_inode
, u64
*new_bytenr
,
1655 u64 bytenr
, u64 num_bytes
)
1657 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
1658 struct btrfs_path
*path
;
1659 struct btrfs_file_extent_item
*fi
;
1660 struct extent_buffer
*leaf
;
1663 path
= btrfs_alloc_path();
1667 bytenr
-= BTRFS_I(reloc_inode
)->index_cnt
;
1668 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
1669 btrfs_ino(BTRFS_I(reloc_inode
)), bytenr
, 0);
1677 leaf
= path
->nodes
[0];
1678 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1679 struct btrfs_file_extent_item
);
1681 BUG_ON(btrfs_file_extent_offset(leaf
, fi
) ||
1682 btrfs_file_extent_compression(leaf
, fi
) ||
1683 btrfs_file_extent_encryption(leaf
, fi
) ||
1684 btrfs_file_extent_other_encoding(leaf
, fi
));
1686 if (num_bytes
!= btrfs_file_extent_disk_num_bytes(leaf
, fi
)) {
1691 *new_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1694 btrfs_free_path(path
);
1699 * update file extent items in the tree leaf to point to
1700 * the new locations.
1702 static noinline_for_stack
1703 int replace_file_extents(struct btrfs_trans_handle
*trans
,
1704 struct reloc_control
*rc
,
1705 struct btrfs_root
*root
,
1706 struct extent_buffer
*leaf
)
1708 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1709 struct btrfs_key key
;
1710 struct btrfs_file_extent_item
*fi
;
1711 struct inode
*inode
= NULL
;
1723 if (rc
->stage
!= UPDATE_DATA_PTRS
)
1726 /* reloc trees always use full backref */
1727 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1728 parent
= leaf
->start
;
1732 nritems
= btrfs_header_nritems(leaf
);
1733 for (i
= 0; i
< nritems
; i
++) {
1734 struct btrfs_ref ref
= { 0 };
1737 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1738 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1740 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
1741 if (btrfs_file_extent_type(leaf
, fi
) ==
1742 BTRFS_FILE_EXTENT_INLINE
)
1744 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1745 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1748 if (!in_block_group(bytenr
, rc
->block_group
))
1752 * if we are modifying block in fs tree, wait for readpage
1753 * to complete and drop the extent cache
1755 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
1757 inode
= find_next_inode(root
, key
.objectid
);
1759 } else if (inode
&& btrfs_ino(BTRFS_I(inode
)) < key
.objectid
) {
1760 btrfs_add_delayed_iput(inode
);
1761 inode
= find_next_inode(root
, key
.objectid
);
1763 if (inode
&& btrfs_ino(BTRFS_I(inode
)) == key
.objectid
) {
1765 btrfs_file_extent_num_bytes(leaf
, fi
);
1766 WARN_ON(!IS_ALIGNED(key
.offset
,
1767 fs_info
->sectorsize
));
1768 WARN_ON(!IS_ALIGNED(end
, fs_info
->sectorsize
));
1770 ret
= try_lock_extent(&BTRFS_I(inode
)->io_tree
,
1775 btrfs_drop_extent_cache(BTRFS_I(inode
),
1776 key
.offset
, end
, 1);
1777 unlock_extent(&BTRFS_I(inode
)->io_tree
,
1782 ret
= get_new_location(rc
->data_inode
, &new_bytenr
,
1786 * Don't have to abort since we've not changed anything
1787 * in the file extent yet.
1792 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, new_bytenr
);
1795 key
.offset
-= btrfs_file_extent_offset(leaf
, fi
);
1796 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
, new_bytenr
,
1798 ref
.real_root
= root
->root_key
.objectid
;
1799 btrfs_init_data_ref(&ref
, btrfs_header_owner(leaf
),
1800 key
.objectid
, key
.offset
);
1801 ret
= btrfs_inc_extent_ref(trans
, &ref
);
1803 btrfs_abort_transaction(trans
, ret
);
1807 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
, bytenr
,
1809 ref
.real_root
= root
->root_key
.objectid
;
1810 btrfs_init_data_ref(&ref
, btrfs_header_owner(leaf
),
1811 key
.objectid
, key
.offset
);
1812 ret
= btrfs_free_extent(trans
, &ref
);
1814 btrfs_abort_transaction(trans
, ret
);
1819 btrfs_mark_buffer_dirty(leaf
);
1821 btrfs_add_delayed_iput(inode
);
1825 static noinline_for_stack
1826 int memcmp_node_keys(struct extent_buffer
*eb
, int slot
,
1827 struct btrfs_path
*path
, int level
)
1829 struct btrfs_disk_key key1
;
1830 struct btrfs_disk_key key2
;
1831 btrfs_node_key(eb
, &key1
, slot
);
1832 btrfs_node_key(path
->nodes
[level
], &key2
, path
->slots
[level
]);
1833 return memcmp(&key1
, &key2
, sizeof(key1
));
1837 * try to replace tree blocks in fs tree with the new blocks
1838 * in reloc tree. tree blocks haven't been modified since the
1839 * reloc tree was create can be replaced.
1841 * if a block was replaced, level of the block + 1 is returned.
1842 * if no block got replaced, 0 is returned. if there are other
1843 * errors, a negative error number is returned.
1845 static noinline_for_stack
1846 int replace_path(struct btrfs_trans_handle
*trans
, struct reloc_control
*rc
,
1847 struct btrfs_root
*dest
, struct btrfs_root
*src
,
1848 struct btrfs_path
*path
, struct btrfs_key
*next_key
,
1849 int lowest_level
, int max_level
)
1851 struct btrfs_fs_info
*fs_info
= dest
->fs_info
;
1852 struct extent_buffer
*eb
;
1853 struct extent_buffer
*parent
;
1854 struct btrfs_ref ref
= { 0 };
1855 struct btrfs_key key
;
1867 BUG_ON(src
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
1868 BUG_ON(dest
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
);
1870 last_snapshot
= btrfs_root_last_snapshot(&src
->root_item
);
1872 slot
= path
->slots
[lowest_level
];
1873 btrfs_node_key_to_cpu(path
->nodes
[lowest_level
], &key
, slot
);
1875 eb
= btrfs_lock_root_node(dest
);
1876 btrfs_set_lock_blocking_write(eb
);
1877 level
= btrfs_header_level(eb
);
1879 if (level
< lowest_level
) {
1880 btrfs_tree_unlock(eb
);
1881 free_extent_buffer(eb
);
1886 ret
= btrfs_cow_block(trans
, dest
, eb
, NULL
, 0, &eb
);
1889 btrfs_set_lock_blocking_write(eb
);
1892 next_key
->objectid
= (u64
)-1;
1893 next_key
->type
= (u8
)-1;
1894 next_key
->offset
= (u64
)-1;
1899 struct btrfs_key first_key
;
1901 level
= btrfs_header_level(parent
);
1902 BUG_ON(level
< lowest_level
);
1904 ret
= btrfs_bin_search(parent
, &key
, level
, &slot
);
1907 if (ret
&& slot
> 0)
1910 if (next_key
&& slot
+ 1 < btrfs_header_nritems(parent
))
1911 btrfs_node_key_to_cpu(parent
, next_key
, slot
+ 1);
1913 old_bytenr
= btrfs_node_blockptr(parent
, slot
);
1914 blocksize
= fs_info
->nodesize
;
1915 old_ptr_gen
= btrfs_node_ptr_generation(parent
, slot
);
1916 btrfs_node_key_to_cpu(parent
, &first_key
, slot
);
1918 if (level
<= max_level
) {
1919 eb
= path
->nodes
[level
];
1920 new_bytenr
= btrfs_node_blockptr(eb
,
1921 path
->slots
[level
]);
1922 new_ptr_gen
= btrfs_node_ptr_generation(eb
,
1923 path
->slots
[level
]);
1929 if (WARN_ON(new_bytenr
> 0 && new_bytenr
== old_bytenr
)) {
1934 if (new_bytenr
== 0 || old_ptr_gen
> last_snapshot
||
1935 memcmp_node_keys(parent
, slot
, path
, level
)) {
1936 if (level
<= lowest_level
) {
1941 eb
= read_tree_block(fs_info
, old_bytenr
, old_ptr_gen
,
1942 level
- 1, &first_key
);
1946 } else if (!extent_buffer_uptodate(eb
)) {
1948 free_extent_buffer(eb
);
1951 btrfs_tree_lock(eb
);
1953 ret
= btrfs_cow_block(trans
, dest
, eb
, parent
,
1957 btrfs_set_lock_blocking_write(eb
);
1959 btrfs_tree_unlock(parent
);
1960 free_extent_buffer(parent
);
1967 btrfs_tree_unlock(parent
);
1968 free_extent_buffer(parent
);
1973 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
1974 path
->slots
[level
]);
1975 btrfs_release_path(path
);
1977 path
->lowest_level
= level
;
1978 ret
= btrfs_search_slot(trans
, src
, &key
, path
, 0, 1);
1979 path
->lowest_level
= 0;
1983 * Info qgroup to trace both subtrees.
1985 * We must trace both trees.
1986 * 1) Tree reloc subtree
1987 * If not traced, we will leak data numbers
1989 * If not traced, we will double count old data
1991 * We don't scan the subtree right now, but only record
1992 * the swapped tree blocks.
1993 * The real subtree rescan is delayed until we have new
1994 * CoW on the subtree root node before transaction commit.
1996 ret
= btrfs_qgroup_add_swapped_blocks(trans
, dest
,
1997 rc
->block_group
, parent
, slot
,
1998 path
->nodes
[level
], path
->slots
[level
],
2003 * swap blocks in fs tree and reloc tree.
2005 btrfs_set_node_blockptr(parent
, slot
, new_bytenr
);
2006 btrfs_set_node_ptr_generation(parent
, slot
, new_ptr_gen
);
2007 btrfs_mark_buffer_dirty(parent
);
2009 btrfs_set_node_blockptr(path
->nodes
[level
],
2010 path
->slots
[level
], old_bytenr
);
2011 btrfs_set_node_ptr_generation(path
->nodes
[level
],
2012 path
->slots
[level
], old_ptr_gen
);
2013 btrfs_mark_buffer_dirty(path
->nodes
[level
]);
2015 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
, old_bytenr
,
2016 blocksize
, path
->nodes
[level
]->start
);
2017 ref
.skip_qgroup
= true;
2018 btrfs_init_tree_ref(&ref
, level
- 1, src
->root_key
.objectid
);
2019 ret
= btrfs_inc_extent_ref(trans
, &ref
);
2021 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
, new_bytenr
,
2023 ref
.skip_qgroup
= true;
2024 btrfs_init_tree_ref(&ref
, level
- 1, dest
->root_key
.objectid
);
2025 ret
= btrfs_inc_extent_ref(trans
, &ref
);
2028 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
, new_bytenr
,
2029 blocksize
, path
->nodes
[level
]->start
);
2030 btrfs_init_tree_ref(&ref
, level
- 1, src
->root_key
.objectid
);
2031 ref
.skip_qgroup
= true;
2032 ret
= btrfs_free_extent(trans
, &ref
);
2035 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
, old_bytenr
,
2037 btrfs_init_tree_ref(&ref
, level
- 1, dest
->root_key
.objectid
);
2038 ref
.skip_qgroup
= true;
2039 ret
= btrfs_free_extent(trans
, &ref
);
2042 btrfs_unlock_up_safe(path
, 0);
2047 btrfs_tree_unlock(parent
);
2048 free_extent_buffer(parent
);
2053 * helper to find next relocated block in reloc tree
2055 static noinline_for_stack
2056 int walk_up_reloc_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
2059 struct extent_buffer
*eb
;
2064 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
2066 for (i
= 0; i
< *level
; i
++) {
2067 free_extent_buffer(path
->nodes
[i
]);
2068 path
->nodes
[i
] = NULL
;
2071 for (i
= *level
; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
2072 eb
= path
->nodes
[i
];
2073 nritems
= btrfs_header_nritems(eb
);
2074 while (path
->slots
[i
] + 1 < nritems
) {
2076 if (btrfs_node_ptr_generation(eb
, path
->slots
[i
]) <=
2083 free_extent_buffer(path
->nodes
[i
]);
2084 path
->nodes
[i
] = NULL
;
2090 * walk down reloc tree to find relocated block of lowest level
2092 static noinline_for_stack
2093 int walk_down_reloc_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
2096 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2097 struct extent_buffer
*eb
= NULL
;
2104 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
2106 for (i
= *level
; i
> 0; i
--) {
2107 struct btrfs_key first_key
;
2109 eb
= path
->nodes
[i
];
2110 nritems
= btrfs_header_nritems(eb
);
2111 while (path
->slots
[i
] < nritems
) {
2112 ptr_gen
= btrfs_node_ptr_generation(eb
, path
->slots
[i
]);
2113 if (ptr_gen
> last_snapshot
)
2117 if (path
->slots
[i
] >= nritems
) {
2128 bytenr
= btrfs_node_blockptr(eb
, path
->slots
[i
]);
2129 btrfs_node_key_to_cpu(eb
, &first_key
, path
->slots
[i
]);
2130 eb
= read_tree_block(fs_info
, bytenr
, ptr_gen
, i
- 1,
2134 } else if (!extent_buffer_uptodate(eb
)) {
2135 free_extent_buffer(eb
);
2138 BUG_ON(btrfs_header_level(eb
) != i
- 1);
2139 path
->nodes
[i
- 1] = eb
;
2140 path
->slots
[i
- 1] = 0;
2146 * invalidate extent cache for file extents whose key in range of
2147 * [min_key, max_key)
2149 static int invalidate_extent_cache(struct btrfs_root
*root
,
2150 struct btrfs_key
*min_key
,
2151 struct btrfs_key
*max_key
)
2153 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2154 struct inode
*inode
= NULL
;
2159 objectid
= min_key
->objectid
;
2164 if (objectid
> max_key
->objectid
)
2167 inode
= find_next_inode(root
, objectid
);
2170 ino
= btrfs_ino(BTRFS_I(inode
));
2172 if (ino
> max_key
->objectid
) {
2178 if (!S_ISREG(inode
->i_mode
))
2181 if (unlikely(min_key
->objectid
== ino
)) {
2182 if (min_key
->type
> BTRFS_EXTENT_DATA_KEY
)
2184 if (min_key
->type
< BTRFS_EXTENT_DATA_KEY
)
2187 start
= min_key
->offset
;
2188 WARN_ON(!IS_ALIGNED(start
, fs_info
->sectorsize
));
2194 if (unlikely(max_key
->objectid
== ino
)) {
2195 if (max_key
->type
< BTRFS_EXTENT_DATA_KEY
)
2197 if (max_key
->type
> BTRFS_EXTENT_DATA_KEY
) {
2200 if (max_key
->offset
== 0)
2202 end
= max_key
->offset
;
2203 WARN_ON(!IS_ALIGNED(end
, fs_info
->sectorsize
));
2210 /* the lock_extent waits for readpage to complete */
2211 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
2212 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, end
, 1);
2213 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
2218 static int find_next_key(struct btrfs_path
*path
, int level
,
2219 struct btrfs_key
*key
)
2222 while (level
< BTRFS_MAX_LEVEL
) {
2223 if (!path
->nodes
[level
])
2225 if (path
->slots
[level
] + 1 <
2226 btrfs_header_nritems(path
->nodes
[level
])) {
2227 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
2228 path
->slots
[level
] + 1);
2237 * Insert current subvolume into reloc_control::dirty_subvol_roots
2239 static void insert_dirty_subvol(struct btrfs_trans_handle
*trans
,
2240 struct reloc_control
*rc
,
2241 struct btrfs_root
*root
)
2243 struct btrfs_root
*reloc_root
= root
->reloc_root
;
2244 struct btrfs_root_item
*reloc_root_item
;
2246 /* @root must be a subvolume tree root with a valid reloc tree */
2247 ASSERT(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
2250 reloc_root_item
= &reloc_root
->root_item
;
2251 memset(&reloc_root_item
->drop_progress
, 0,
2252 sizeof(reloc_root_item
->drop_progress
));
2253 reloc_root_item
->drop_level
= 0;
2254 btrfs_set_root_refs(reloc_root_item
, 0);
2255 btrfs_update_reloc_root(trans
, root
);
2257 if (list_empty(&root
->reloc_dirty_list
)) {
2258 btrfs_grab_root(root
);
2259 list_add_tail(&root
->reloc_dirty_list
, &rc
->dirty_subvol_roots
);
2263 static int clean_dirty_subvols(struct reloc_control
*rc
)
2265 struct btrfs_root
*root
;
2266 struct btrfs_root
*next
;
2270 list_for_each_entry_safe(root
, next
, &rc
->dirty_subvol_roots
,
2272 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
2273 /* Merged subvolume, cleanup its reloc root */
2274 struct btrfs_root
*reloc_root
= root
->reloc_root
;
2276 list_del_init(&root
->reloc_dirty_list
);
2277 root
->reloc_root
= NULL
;
2280 ret2
= btrfs_drop_snapshot(reloc_root
, 0, 1);
2281 if (ret2
< 0 && !ret
)
2285 * Need barrier to ensure clear_bit() only happens after
2286 * root->reloc_root = NULL. Pairs with have_reloc_root.
2289 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE
, &root
->state
);
2290 btrfs_put_root(root
);
2292 /* Orphan reloc tree, just clean it up */
2293 ret2
= btrfs_drop_snapshot(root
, 0, 1);
2294 if (ret2
< 0 && !ret
)
2302 * merge the relocated tree blocks in reloc tree with corresponding
2305 static noinline_for_stack
int merge_reloc_root(struct reloc_control
*rc
,
2306 struct btrfs_root
*root
)
2308 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
2309 struct btrfs_key key
;
2310 struct btrfs_key next_key
;
2311 struct btrfs_trans_handle
*trans
= NULL
;
2312 struct btrfs_root
*reloc_root
;
2313 struct btrfs_root_item
*root_item
;
2314 struct btrfs_path
*path
;
2315 struct extent_buffer
*leaf
;
2323 path
= btrfs_alloc_path();
2326 path
->reada
= READA_FORWARD
;
2328 reloc_root
= root
->reloc_root
;
2329 root_item
= &reloc_root
->root_item
;
2331 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2332 level
= btrfs_root_level(root_item
);
2333 atomic_inc(&reloc_root
->node
->refs
);
2334 path
->nodes
[level
] = reloc_root
->node
;
2335 path
->slots
[level
] = 0;
2337 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2339 level
= root_item
->drop_level
;
2341 path
->lowest_level
= level
;
2342 ret
= btrfs_search_slot(NULL
, reloc_root
, &key
, path
, 0, 0);
2343 path
->lowest_level
= 0;
2345 btrfs_free_path(path
);
2349 btrfs_node_key_to_cpu(path
->nodes
[level
], &next_key
,
2350 path
->slots
[level
]);
2351 WARN_ON(memcmp(&key
, &next_key
, sizeof(key
)));
2353 btrfs_unlock_up_safe(path
, 0);
2356 min_reserved
= fs_info
->nodesize
* (BTRFS_MAX_LEVEL
- 1) * 2;
2357 memset(&next_key
, 0, sizeof(next_key
));
2360 ret
= btrfs_block_rsv_refill(root
, rc
->block_rsv
, min_reserved
,
2361 BTRFS_RESERVE_FLUSH_ALL
);
2366 trans
= btrfs_start_transaction(root
, 0);
2367 if (IS_ERR(trans
)) {
2368 err
= PTR_ERR(trans
);
2372 trans
->block_rsv
= rc
->block_rsv
;
2377 ret
= walk_down_reloc_tree(reloc_root
, path
, &level
);
2385 if (!find_next_key(path
, level
, &key
) &&
2386 btrfs_comp_cpu_keys(&next_key
, &key
) >= 0) {
2389 ret
= replace_path(trans
, rc
, root
, reloc_root
, path
,
2390 &next_key
, level
, max_level
);
2399 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
2400 path
->slots
[level
]);
2404 ret
= walk_up_reloc_tree(reloc_root
, path
, &level
);
2410 * save the merging progress in the drop_progress.
2411 * this is OK since root refs == 1 in this case.
2413 btrfs_node_key(path
->nodes
[level
], &root_item
->drop_progress
,
2414 path
->slots
[level
]);
2415 root_item
->drop_level
= level
;
2417 btrfs_end_transaction_throttle(trans
);
2420 btrfs_btree_balance_dirty(fs_info
);
2422 if (replaced
&& rc
->stage
== UPDATE_DATA_PTRS
)
2423 invalidate_extent_cache(root
, &key
, &next_key
);
2427 * handle the case only one block in the fs tree need to be
2428 * relocated and the block is tree root.
2430 leaf
= btrfs_lock_root_node(root
);
2431 ret
= btrfs_cow_block(trans
, root
, leaf
, NULL
, 0, &leaf
);
2432 btrfs_tree_unlock(leaf
);
2433 free_extent_buffer(leaf
);
2437 btrfs_free_path(path
);
2440 insert_dirty_subvol(trans
, rc
, root
);
2443 btrfs_end_transaction_throttle(trans
);
2445 btrfs_btree_balance_dirty(fs_info
);
2447 if (replaced
&& rc
->stage
== UPDATE_DATA_PTRS
)
2448 invalidate_extent_cache(root
, &key
, &next_key
);
2453 static noinline_for_stack
2454 int prepare_to_merge(struct reloc_control
*rc
, int err
)
2456 struct btrfs_root
*root
= rc
->extent_root
;
2457 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2458 struct btrfs_root
*reloc_root
;
2459 struct btrfs_trans_handle
*trans
;
2460 LIST_HEAD(reloc_roots
);
2464 mutex_lock(&fs_info
->reloc_mutex
);
2465 rc
->merging_rsv_size
+= fs_info
->nodesize
* (BTRFS_MAX_LEVEL
- 1) * 2;
2466 rc
->merging_rsv_size
+= rc
->nodes_relocated
* 2;
2467 mutex_unlock(&fs_info
->reloc_mutex
);
2471 num_bytes
= rc
->merging_rsv_size
;
2472 ret
= btrfs_block_rsv_add(root
, rc
->block_rsv
, num_bytes
,
2473 BTRFS_RESERVE_FLUSH_ALL
);
2478 trans
= btrfs_join_transaction(rc
->extent_root
);
2479 if (IS_ERR(trans
)) {
2481 btrfs_block_rsv_release(fs_info
, rc
->block_rsv
,
2483 return PTR_ERR(trans
);
2487 if (num_bytes
!= rc
->merging_rsv_size
) {
2488 btrfs_end_transaction(trans
);
2489 btrfs_block_rsv_release(fs_info
, rc
->block_rsv
,
2495 rc
->merge_reloc_tree
= 1;
2497 while (!list_empty(&rc
->reloc_roots
)) {
2498 reloc_root
= list_entry(rc
->reloc_roots
.next
,
2499 struct btrfs_root
, root_list
);
2500 list_del_init(&reloc_root
->root_list
);
2502 root
= read_fs_root(fs_info
, reloc_root
->root_key
.offset
);
2503 BUG_ON(IS_ERR(root
));
2504 BUG_ON(root
->reloc_root
!= reloc_root
);
2507 * set reference count to 1, so btrfs_recover_relocation
2508 * knows it should resumes merging
2511 btrfs_set_root_refs(&reloc_root
->root_item
, 1);
2512 btrfs_update_reloc_root(trans
, root
);
2514 list_add(&reloc_root
->root_list
, &reloc_roots
);
2515 btrfs_put_root(root
);
2518 list_splice(&reloc_roots
, &rc
->reloc_roots
);
2521 btrfs_commit_transaction(trans
);
2523 btrfs_end_transaction(trans
);
2527 static noinline_for_stack
2528 void free_reloc_roots(struct list_head
*list
)
2530 struct btrfs_root
*reloc_root
;
2532 while (!list_empty(list
)) {
2533 reloc_root
= list_entry(list
->next
, struct btrfs_root
,
2535 __del_reloc_root(reloc_root
);
2536 free_extent_buffer(reloc_root
->node
);
2537 free_extent_buffer(reloc_root
->commit_root
);
2538 reloc_root
->node
= NULL
;
2539 reloc_root
->commit_root
= NULL
;
2543 static noinline_for_stack
2544 void merge_reloc_roots(struct reloc_control
*rc
)
2546 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
2547 struct btrfs_root
*root
;
2548 struct btrfs_root
*reloc_root
;
2549 LIST_HEAD(reloc_roots
);
2553 root
= rc
->extent_root
;
2556 * this serializes us with btrfs_record_root_in_transaction,
2557 * we have to make sure nobody is in the middle of
2558 * adding their roots to the list while we are
2561 mutex_lock(&fs_info
->reloc_mutex
);
2562 list_splice_init(&rc
->reloc_roots
, &reloc_roots
);
2563 mutex_unlock(&fs_info
->reloc_mutex
);
2565 while (!list_empty(&reloc_roots
)) {
2567 reloc_root
= list_entry(reloc_roots
.next
,
2568 struct btrfs_root
, root_list
);
2570 if (btrfs_root_refs(&reloc_root
->root_item
) > 0) {
2571 root
= read_fs_root(fs_info
,
2572 reloc_root
->root_key
.offset
);
2573 BUG_ON(IS_ERR(root
));
2574 BUG_ON(root
->reloc_root
!= reloc_root
);
2576 ret
= merge_reloc_root(rc
, root
);
2577 btrfs_put_root(root
);
2579 if (list_empty(&reloc_root
->root_list
))
2580 list_add_tail(&reloc_root
->root_list
,
2585 list_del_init(&reloc_root
->root_list
);
2586 /* Don't forget to queue this reloc root for cleanup */
2587 list_add_tail(&reloc_root
->reloc_dirty_list
,
2588 &rc
->dirty_subvol_roots
);
2598 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
2599 if (!list_empty(&reloc_roots
))
2600 free_reloc_roots(&reloc_roots
);
2602 /* new reloc root may be added */
2603 mutex_lock(&fs_info
->reloc_mutex
);
2604 list_splice_init(&rc
->reloc_roots
, &reloc_roots
);
2605 mutex_unlock(&fs_info
->reloc_mutex
);
2606 if (!list_empty(&reloc_roots
))
2607 free_reloc_roots(&reloc_roots
);
2610 BUG_ON(!RB_EMPTY_ROOT(&rc
->reloc_root_tree
.rb_root
));
2613 static void free_block_list(struct rb_root
*blocks
)
2615 struct tree_block
*block
;
2616 struct rb_node
*rb_node
;
2617 while ((rb_node
= rb_first(blocks
))) {
2618 block
= rb_entry(rb_node
, struct tree_block
, rb_node
);
2619 rb_erase(rb_node
, blocks
);
2624 static int record_reloc_root_in_trans(struct btrfs_trans_handle
*trans
,
2625 struct btrfs_root
*reloc_root
)
2627 struct btrfs_fs_info
*fs_info
= reloc_root
->fs_info
;
2628 struct btrfs_root
*root
;
2631 if (reloc_root
->last_trans
== trans
->transid
)
2634 root
= read_fs_root(fs_info
, reloc_root
->root_key
.offset
);
2635 BUG_ON(IS_ERR(root
));
2636 BUG_ON(root
->reloc_root
!= reloc_root
);
2637 ret
= btrfs_record_root_in_trans(trans
, root
);
2638 btrfs_put_root(root
);
2643 static noinline_for_stack
2644 struct btrfs_root
*select_reloc_root(struct btrfs_trans_handle
*trans
,
2645 struct reloc_control
*rc
,
2646 struct backref_node
*node
,
2647 struct backref_edge
*edges
[])
2649 struct backref_node
*next
;
2650 struct btrfs_root
*root
;
2656 next
= walk_up_backref(next
, edges
, &index
);
2659 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
));
2661 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
2662 record_reloc_root_in_trans(trans
, root
);
2666 btrfs_record_root_in_trans(trans
, root
);
2667 root
= root
->reloc_root
;
2669 if (next
->new_bytenr
!= root
->node
->start
) {
2670 BUG_ON(next
->new_bytenr
);
2671 BUG_ON(!list_empty(&next
->list
));
2672 next
->new_bytenr
= root
->node
->start
;
2673 btrfs_put_root(next
->root
);
2674 next
->root
= btrfs_grab_root(root
);
2676 list_add_tail(&next
->list
,
2677 &rc
->backref_cache
.changed
);
2678 __mark_block_processed(rc
, next
);
2684 next
= walk_down_backref(edges
, &index
);
2685 if (!next
|| next
->level
<= node
->level
)
2692 /* setup backref node path for btrfs_reloc_cow_block */
2694 rc
->backref_cache
.path
[next
->level
] = next
;
2697 next
= edges
[index
]->node
[UPPER
];
2703 * select a tree root for relocation. return NULL if the block
2704 * is reference counted. we should use do_relocation() in this
2705 * case. return a tree root pointer if the block isn't reference
2706 * counted. return -ENOENT if the block is root of reloc tree.
2708 static noinline_for_stack
2709 struct btrfs_root
*select_one_root(struct backref_node
*node
)
2711 struct backref_node
*next
;
2712 struct btrfs_root
*root
;
2713 struct btrfs_root
*fs_root
= NULL
;
2714 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2720 next
= walk_up_backref(next
, edges
, &index
);
2724 /* no other choice for non-references counted tree */
2725 if (!test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
))
2728 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
)
2734 next
= walk_down_backref(edges
, &index
);
2735 if (!next
|| next
->level
<= node
->level
)
2740 return ERR_PTR(-ENOENT
);
2744 static noinline_for_stack
2745 u64
calcu_metadata_size(struct reloc_control
*rc
,
2746 struct backref_node
*node
, int reserve
)
2748 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
2749 struct backref_node
*next
= node
;
2750 struct backref_edge
*edge
;
2751 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2755 BUG_ON(reserve
&& node
->processed
);
2760 if (next
->processed
&& (reserve
|| next
!= node
))
2763 num_bytes
+= fs_info
->nodesize
;
2765 if (list_empty(&next
->upper
))
2768 edge
= list_entry(next
->upper
.next
,
2769 struct backref_edge
, list
[LOWER
]);
2770 edges
[index
++] = edge
;
2771 next
= edge
->node
[UPPER
];
2773 next
= walk_down_backref(edges
, &index
);
2778 static int reserve_metadata_space(struct btrfs_trans_handle
*trans
,
2779 struct reloc_control
*rc
,
2780 struct backref_node
*node
)
2782 struct btrfs_root
*root
= rc
->extent_root
;
2783 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2788 num_bytes
= calcu_metadata_size(rc
, node
, 1) * 2;
2790 trans
->block_rsv
= rc
->block_rsv
;
2791 rc
->reserved_bytes
+= num_bytes
;
2794 * We are under a transaction here so we can only do limited flushing.
2795 * If we get an enospc just kick back -EAGAIN so we know to drop the
2796 * transaction and try to refill when we can flush all the things.
2798 ret
= btrfs_block_rsv_refill(root
, rc
->block_rsv
, num_bytes
,
2799 BTRFS_RESERVE_FLUSH_LIMIT
);
2801 tmp
= fs_info
->nodesize
* RELOCATION_RESERVED_NODES
;
2802 while (tmp
<= rc
->reserved_bytes
)
2805 * only one thread can access block_rsv at this point,
2806 * so we don't need hold lock to protect block_rsv.
2807 * we expand more reservation size here to allow enough
2808 * space for relocation and we will return earlier in
2811 rc
->block_rsv
->size
= tmp
+ fs_info
->nodesize
*
2812 RELOCATION_RESERVED_NODES
;
2820 * relocate a block tree, and then update pointers in upper level
2821 * blocks that reference the block to point to the new location.
2823 * if called by link_to_upper, the block has already been relocated.
2824 * in that case this function just updates pointers.
2826 static int do_relocation(struct btrfs_trans_handle
*trans
,
2827 struct reloc_control
*rc
,
2828 struct backref_node
*node
,
2829 struct btrfs_key
*key
,
2830 struct btrfs_path
*path
, int lowest
)
2832 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
2833 struct backref_node
*upper
;
2834 struct backref_edge
*edge
;
2835 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2836 struct btrfs_root
*root
;
2837 struct extent_buffer
*eb
;
2845 BUG_ON(lowest
&& node
->eb
);
2847 path
->lowest_level
= node
->level
+ 1;
2848 rc
->backref_cache
.path
[node
->level
] = node
;
2849 list_for_each_entry(edge
, &node
->upper
, list
[LOWER
]) {
2850 struct btrfs_key first_key
;
2851 struct btrfs_ref ref
= { 0 };
2855 upper
= edge
->node
[UPPER
];
2856 root
= select_reloc_root(trans
, rc
, upper
, edges
);
2859 if (upper
->eb
&& !upper
->locked
) {
2861 ret
= btrfs_bin_search(upper
->eb
, key
,
2862 upper
->level
, &slot
);
2868 bytenr
= btrfs_node_blockptr(upper
->eb
, slot
);
2869 if (node
->eb
->start
== bytenr
)
2872 drop_node_buffer(upper
);
2876 ret
= btrfs_search_slot(trans
, root
, key
, path
, 0, 1);
2883 btrfs_release_path(path
);
2888 upper
->eb
= path
->nodes
[upper
->level
];
2889 path
->nodes
[upper
->level
] = NULL
;
2891 BUG_ON(upper
->eb
!= path
->nodes
[upper
->level
]);
2895 path
->locks
[upper
->level
] = 0;
2897 slot
= path
->slots
[upper
->level
];
2898 btrfs_release_path(path
);
2900 ret
= btrfs_bin_search(upper
->eb
, key
, upper
->level
,
2909 bytenr
= btrfs_node_blockptr(upper
->eb
, slot
);
2911 if (bytenr
!= node
->bytenr
) {
2912 btrfs_err(root
->fs_info
,
2913 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2914 bytenr
, node
->bytenr
, slot
,
2920 if (node
->eb
->start
== bytenr
)
2924 blocksize
= root
->fs_info
->nodesize
;
2925 generation
= btrfs_node_ptr_generation(upper
->eb
, slot
);
2926 btrfs_node_key_to_cpu(upper
->eb
, &first_key
, slot
);
2927 eb
= read_tree_block(fs_info
, bytenr
, generation
,
2928 upper
->level
- 1, &first_key
);
2932 } else if (!extent_buffer_uptodate(eb
)) {
2933 free_extent_buffer(eb
);
2937 btrfs_tree_lock(eb
);
2938 btrfs_set_lock_blocking_write(eb
);
2941 ret
= btrfs_cow_block(trans
, root
, eb
, upper
->eb
,
2943 btrfs_tree_unlock(eb
);
2944 free_extent_buffer(eb
);
2949 BUG_ON(node
->eb
!= eb
);
2951 btrfs_set_node_blockptr(upper
->eb
, slot
,
2953 btrfs_set_node_ptr_generation(upper
->eb
, slot
,
2955 btrfs_mark_buffer_dirty(upper
->eb
);
2957 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
,
2958 node
->eb
->start
, blocksize
,
2960 ref
.real_root
= root
->root_key
.objectid
;
2961 btrfs_init_tree_ref(&ref
, node
->level
,
2962 btrfs_header_owner(upper
->eb
));
2963 ret
= btrfs_inc_extent_ref(trans
, &ref
);
2966 ret
= btrfs_drop_subtree(trans
, root
, eb
, upper
->eb
);
2970 if (!upper
->pending
)
2971 drop_node_buffer(upper
);
2973 unlock_node_buffer(upper
);
2978 if (!err
&& node
->pending
) {
2979 drop_node_buffer(node
);
2980 list_move_tail(&node
->list
, &rc
->backref_cache
.changed
);
2984 path
->lowest_level
= 0;
2985 BUG_ON(err
== -ENOSPC
);
2989 static int link_to_upper(struct btrfs_trans_handle
*trans
,
2990 struct reloc_control
*rc
,
2991 struct backref_node
*node
,
2992 struct btrfs_path
*path
)
2994 struct btrfs_key key
;
2996 btrfs_node_key_to_cpu(node
->eb
, &key
, 0);
2997 return do_relocation(trans
, rc
, node
, &key
, path
, 0);
3000 static int finish_pending_nodes(struct btrfs_trans_handle
*trans
,
3001 struct reloc_control
*rc
,
3002 struct btrfs_path
*path
, int err
)
3005 struct backref_cache
*cache
= &rc
->backref_cache
;
3006 struct backref_node
*node
;
3010 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
3011 while (!list_empty(&cache
->pending
[level
])) {
3012 node
= list_entry(cache
->pending
[level
].next
,
3013 struct backref_node
, list
);
3014 list_move_tail(&node
->list
, &list
);
3015 BUG_ON(!node
->pending
);
3018 ret
= link_to_upper(trans
, rc
, node
, path
);
3023 list_splice_init(&list
, &cache
->pending
[level
]);
3028 static void mark_block_processed(struct reloc_control
*rc
,
3029 u64 bytenr
, u32 blocksize
)
3031 set_extent_bits(&rc
->processed_blocks
, bytenr
, bytenr
+ blocksize
- 1,
3035 static void __mark_block_processed(struct reloc_control
*rc
,
3036 struct backref_node
*node
)
3039 if (node
->level
== 0 ||
3040 in_block_group(node
->bytenr
, rc
->block_group
)) {
3041 blocksize
= rc
->extent_root
->fs_info
->nodesize
;
3042 mark_block_processed(rc
, node
->bytenr
, blocksize
);
3044 node
->processed
= 1;
3048 * mark a block and all blocks directly/indirectly reference the block
3051 static void update_processed_blocks(struct reloc_control
*rc
,
3052 struct backref_node
*node
)
3054 struct backref_node
*next
= node
;
3055 struct backref_edge
*edge
;
3056 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
3062 if (next
->processed
)
3065 __mark_block_processed(rc
, next
);
3067 if (list_empty(&next
->upper
))
3070 edge
= list_entry(next
->upper
.next
,
3071 struct backref_edge
, list
[LOWER
]);
3072 edges
[index
++] = edge
;
3073 next
= edge
->node
[UPPER
];
3075 next
= walk_down_backref(edges
, &index
);
3079 static int tree_block_processed(u64 bytenr
, struct reloc_control
*rc
)
3081 u32 blocksize
= rc
->extent_root
->fs_info
->nodesize
;
3083 if (test_range_bit(&rc
->processed_blocks
, bytenr
,
3084 bytenr
+ blocksize
- 1, EXTENT_DIRTY
, 1, NULL
))
3089 static int get_tree_block_key(struct btrfs_fs_info
*fs_info
,
3090 struct tree_block
*block
)
3092 struct extent_buffer
*eb
;
3094 eb
= read_tree_block(fs_info
, block
->bytenr
, block
->key
.offset
,
3095 block
->level
, NULL
);
3098 } else if (!extent_buffer_uptodate(eb
)) {
3099 free_extent_buffer(eb
);
3102 if (block
->level
== 0)
3103 btrfs_item_key_to_cpu(eb
, &block
->key
, 0);
3105 btrfs_node_key_to_cpu(eb
, &block
->key
, 0);
3106 free_extent_buffer(eb
);
3107 block
->key_ready
= 1;
3112 * helper function to relocate a tree block
3114 static int relocate_tree_block(struct btrfs_trans_handle
*trans
,
3115 struct reloc_control
*rc
,
3116 struct backref_node
*node
,
3117 struct btrfs_key
*key
,
3118 struct btrfs_path
*path
)
3120 struct btrfs_root
*root
;
3126 BUG_ON(node
->processed
);
3127 root
= select_one_root(node
);
3128 if (root
== ERR_PTR(-ENOENT
)) {
3129 update_processed_blocks(rc
, node
);
3133 if (!root
|| test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
3134 ret
= reserve_metadata_space(trans
, rc
, node
);
3140 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
3141 BUG_ON(node
->new_bytenr
);
3142 BUG_ON(!list_empty(&node
->list
));
3143 btrfs_record_root_in_trans(trans
, root
);
3144 root
= root
->reloc_root
;
3145 node
->new_bytenr
= root
->node
->start
;
3146 btrfs_put_root(node
->root
);
3147 node
->root
= btrfs_grab_root(root
);
3149 list_add_tail(&node
->list
, &rc
->backref_cache
.changed
);
3151 path
->lowest_level
= node
->level
;
3152 ret
= btrfs_search_slot(trans
, root
, key
, path
, 0, 1);
3153 btrfs_release_path(path
);
3158 update_processed_blocks(rc
, node
);
3160 ret
= do_relocation(trans
, rc
, node
, key
, path
, 1);
3163 if (ret
|| node
->level
== 0 || node
->cowonly
)
3164 remove_backref_node(&rc
->backref_cache
, node
);
3169 * relocate a list of blocks
3171 static noinline_for_stack
3172 int relocate_tree_blocks(struct btrfs_trans_handle
*trans
,
3173 struct reloc_control
*rc
, struct rb_root
*blocks
)
3175 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3176 struct backref_node
*node
;
3177 struct btrfs_path
*path
;
3178 struct tree_block
*block
;
3179 struct tree_block
*next
;
3183 path
= btrfs_alloc_path();
3186 goto out_free_blocks
;
3189 /* Kick in readahead for tree blocks with missing keys */
3190 rbtree_postorder_for_each_entry_safe(block
, next
, blocks
, rb_node
) {
3191 if (!block
->key_ready
)
3192 readahead_tree_block(fs_info
, block
->bytenr
);
3195 /* Get first keys */
3196 rbtree_postorder_for_each_entry_safe(block
, next
, blocks
, rb_node
) {
3197 if (!block
->key_ready
) {
3198 err
= get_tree_block_key(fs_info
, block
);
3204 /* Do tree relocation */
3205 rbtree_postorder_for_each_entry_safe(block
, next
, blocks
, rb_node
) {
3206 node
= build_backref_tree(rc
, &block
->key
,
3207 block
->level
, block
->bytenr
);
3209 err
= PTR_ERR(node
);
3213 ret
= relocate_tree_block(trans
, rc
, node
, &block
->key
,
3216 if (ret
!= -EAGAIN
|| &block
->rb_node
== rb_first(blocks
))
3222 err
= finish_pending_nodes(trans
, rc
, path
, err
);
3225 btrfs_free_path(path
);
3227 free_block_list(blocks
);
3231 static noinline_for_stack
3232 int prealloc_file_extent_cluster(struct inode
*inode
,
3233 struct file_extent_cluster
*cluster
)
3238 u64 offset
= BTRFS_I(inode
)->index_cnt
;
3242 u64 prealloc_start
= cluster
->start
- offset
;
3243 u64 prealloc_end
= cluster
->end
- offset
;
3245 struct extent_changeset
*data_reserved
= NULL
;
3247 BUG_ON(cluster
->start
!= cluster
->boundary
[0]);
3250 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, prealloc_start
,
3251 prealloc_end
+ 1 - prealloc_start
);
3255 cur_offset
= prealloc_start
;
3256 while (nr
< cluster
->nr
) {
3257 start
= cluster
->boundary
[nr
] - offset
;
3258 if (nr
+ 1 < cluster
->nr
)
3259 end
= cluster
->boundary
[nr
+ 1] - 1 - offset
;
3261 end
= cluster
->end
- offset
;
3263 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3264 num_bytes
= end
+ 1 - start
;
3265 if (cur_offset
< start
)
3266 btrfs_free_reserved_data_space(inode
, data_reserved
,
3267 cur_offset
, start
- cur_offset
);
3268 ret
= btrfs_prealloc_file_range(inode
, 0, start
,
3269 num_bytes
, num_bytes
,
3270 end
+ 1, &alloc_hint
);
3271 cur_offset
= end
+ 1;
3272 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3277 if (cur_offset
< prealloc_end
)
3278 btrfs_free_reserved_data_space(inode
, data_reserved
,
3279 cur_offset
, prealloc_end
+ 1 - cur_offset
);
3281 inode_unlock(inode
);
3282 extent_changeset_free(data_reserved
);
3286 static noinline_for_stack
3287 int setup_extent_mapping(struct inode
*inode
, u64 start
, u64 end
,
3290 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
3291 struct extent_map
*em
;
3294 em
= alloc_extent_map();
3299 em
->len
= end
+ 1 - start
;
3300 em
->block_len
= em
->len
;
3301 em
->block_start
= block_start
;
3302 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
3304 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3306 write_lock(&em_tree
->lock
);
3307 ret
= add_extent_mapping(em_tree
, em
, 0);
3308 write_unlock(&em_tree
->lock
);
3309 if (ret
!= -EEXIST
) {
3310 free_extent_map(em
);
3313 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, end
, 0);
3315 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3320 * Allow error injection to test balance cancellation
3322 int btrfs_should_cancel_balance(struct btrfs_fs_info
*fs_info
)
3324 return atomic_read(&fs_info
->balance_cancel_req
);
3326 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance
, TRUE
);
3328 static int relocate_file_extent_cluster(struct inode
*inode
,
3329 struct file_extent_cluster
*cluster
)
3331 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3334 u64 offset
= BTRFS_I(inode
)->index_cnt
;
3335 unsigned long index
;
3336 unsigned long last_index
;
3338 struct file_ra_state
*ra
;
3339 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
3346 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
3350 ret
= prealloc_file_extent_cluster(inode
, cluster
);
3354 file_ra_state_init(ra
, inode
->i_mapping
);
3356 ret
= setup_extent_mapping(inode
, cluster
->start
- offset
,
3357 cluster
->end
- offset
, cluster
->start
);
3361 index
= (cluster
->start
- offset
) >> PAGE_SHIFT
;
3362 last_index
= (cluster
->end
- offset
) >> PAGE_SHIFT
;
3363 while (index
<= last_index
) {
3364 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
),
3369 page
= find_lock_page(inode
->i_mapping
, index
);
3371 page_cache_sync_readahead(inode
->i_mapping
,
3373 last_index
+ 1 - index
);
3374 page
= find_or_create_page(inode
->i_mapping
, index
,
3377 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3379 btrfs_delalloc_release_extents(BTRFS_I(inode
),
3386 if (PageReadahead(page
)) {
3387 page_cache_async_readahead(inode
->i_mapping
,
3388 ra
, NULL
, page
, index
,
3389 last_index
+ 1 - index
);
3392 if (!PageUptodate(page
)) {
3393 btrfs_readpage(NULL
, page
);
3395 if (!PageUptodate(page
)) {
3398 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3400 btrfs_delalloc_release_extents(BTRFS_I(inode
),
3407 page_start
= page_offset(page
);
3408 page_end
= page_start
+ PAGE_SIZE
- 1;
3410 lock_extent(&BTRFS_I(inode
)->io_tree
, page_start
, page_end
);
3412 set_page_extent_mapped(page
);
3414 if (nr
< cluster
->nr
&&
3415 page_start
+ offset
== cluster
->boundary
[nr
]) {
3416 set_extent_bits(&BTRFS_I(inode
)->io_tree
,
3417 page_start
, page_end
,
3422 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
3427 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3429 btrfs_delalloc_release_extents(BTRFS_I(inode
),
3432 clear_extent_bits(&BTRFS_I(inode
)->io_tree
,
3433 page_start
, page_end
,
3434 EXTENT_LOCKED
| EXTENT_BOUNDARY
);
3438 set_page_dirty(page
);
3440 unlock_extent(&BTRFS_I(inode
)->io_tree
,
3441 page_start
, page_end
);
3446 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
3447 balance_dirty_pages_ratelimited(inode
->i_mapping
);
3448 btrfs_throttle(fs_info
);
3449 if (btrfs_should_cancel_balance(fs_info
)) {
3454 WARN_ON(nr
!= cluster
->nr
);
3460 static noinline_for_stack
3461 int relocate_data_extent(struct inode
*inode
, struct btrfs_key
*extent_key
,
3462 struct file_extent_cluster
*cluster
)
3466 if (cluster
->nr
> 0 && extent_key
->objectid
!= cluster
->end
+ 1) {
3467 ret
= relocate_file_extent_cluster(inode
, cluster
);
3474 cluster
->start
= extent_key
->objectid
;
3476 BUG_ON(cluster
->nr
>= MAX_EXTENTS
);
3477 cluster
->end
= extent_key
->objectid
+ extent_key
->offset
- 1;
3478 cluster
->boundary
[cluster
->nr
] = extent_key
->objectid
;
3481 if (cluster
->nr
>= MAX_EXTENTS
) {
3482 ret
= relocate_file_extent_cluster(inode
, cluster
);
3491 * helper to add a tree block to the list.
3492 * the major work is getting the generation and level of the block
3494 static int add_tree_block(struct reloc_control
*rc
,
3495 struct btrfs_key
*extent_key
,
3496 struct btrfs_path
*path
,
3497 struct rb_root
*blocks
)
3499 struct extent_buffer
*eb
;
3500 struct btrfs_extent_item
*ei
;
3501 struct btrfs_tree_block_info
*bi
;
3502 struct tree_block
*block
;
3503 struct rb_node
*rb_node
;
3508 eb
= path
->nodes
[0];
3509 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
3511 if (extent_key
->type
== BTRFS_METADATA_ITEM_KEY
||
3512 item_size
>= sizeof(*ei
) + sizeof(*bi
)) {
3513 ei
= btrfs_item_ptr(eb
, path
->slots
[0],
3514 struct btrfs_extent_item
);
3515 if (extent_key
->type
== BTRFS_EXTENT_ITEM_KEY
) {
3516 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
3517 level
= btrfs_tree_block_level(eb
, bi
);
3519 level
= (int)extent_key
->offset
;
3521 generation
= btrfs_extent_generation(eb
, ei
);
3522 } else if (unlikely(item_size
== sizeof(struct btrfs_extent_item_v0
))) {
3523 btrfs_print_v0_err(eb
->fs_info
);
3524 btrfs_handle_fs_error(eb
->fs_info
, -EINVAL
, NULL
);
3530 btrfs_release_path(path
);
3532 BUG_ON(level
== -1);
3534 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
3538 block
->bytenr
= extent_key
->objectid
;
3539 block
->key
.objectid
= rc
->extent_root
->fs_info
->nodesize
;
3540 block
->key
.offset
= generation
;
3541 block
->level
= level
;
3542 block
->key_ready
= 0;
3544 rb_node
= tree_insert(blocks
, block
->bytenr
, &block
->rb_node
);
3546 backref_tree_panic(rb_node
, -EEXIST
, block
->bytenr
);
3552 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3554 static int __add_tree_block(struct reloc_control
*rc
,
3555 u64 bytenr
, u32 blocksize
,
3556 struct rb_root
*blocks
)
3558 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3559 struct btrfs_path
*path
;
3560 struct btrfs_key key
;
3562 bool skinny
= btrfs_fs_incompat(fs_info
, SKINNY_METADATA
);
3564 if (tree_block_processed(bytenr
, rc
))
3567 if (tree_search(blocks
, bytenr
))
3570 path
= btrfs_alloc_path();
3574 key
.objectid
= bytenr
;
3576 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3577 key
.offset
= (u64
)-1;
3579 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3580 key
.offset
= blocksize
;
3583 path
->search_commit_root
= 1;
3584 path
->skip_locking
= 1;
3585 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path
, 0, 0);
3589 if (ret
> 0 && skinny
) {
3590 if (path
->slots
[0]) {
3592 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
3594 if (key
.objectid
== bytenr
&&
3595 (key
.type
== BTRFS_METADATA_ITEM_KEY
||
3596 (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3597 key
.offset
== blocksize
)))
3603 btrfs_release_path(path
);
3609 btrfs_print_leaf(path
->nodes
[0]);
3611 "tree block extent item (%llu) is not found in extent tree",
3618 ret
= add_tree_block(rc
, &key
, path
, blocks
);
3620 btrfs_free_path(path
);
3624 static int delete_block_group_cache(struct btrfs_fs_info
*fs_info
,
3625 struct btrfs_block_group
*block_group
,
3626 struct inode
*inode
,
3629 struct btrfs_key key
;
3630 struct btrfs_root
*root
= fs_info
->tree_root
;
3631 struct btrfs_trans_handle
*trans
;
3638 key
.type
= BTRFS_INODE_ITEM_KEY
;
3641 inode
= btrfs_iget(fs_info
->sb
, &key
, root
);
3646 ret
= btrfs_check_trunc_cache_free_space(fs_info
,
3647 &fs_info
->global_block_rsv
);
3651 trans
= btrfs_join_transaction(root
);
3652 if (IS_ERR(trans
)) {
3653 ret
= PTR_ERR(trans
);
3657 ret
= btrfs_truncate_free_space_cache(trans
, block_group
, inode
);
3659 btrfs_end_transaction(trans
);
3660 btrfs_btree_balance_dirty(fs_info
);
3667 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3668 * cache inode, to avoid free space cache data extent blocking data relocation.
3670 static int delete_v1_space_cache(struct extent_buffer
*leaf
,
3671 struct btrfs_block_group
*block_group
,
3674 u64 space_cache_ino
;
3675 struct btrfs_file_extent_item
*ei
;
3676 struct btrfs_key key
;
3681 if (btrfs_header_owner(leaf
) != BTRFS_ROOT_TREE_OBJECTID
)
3684 for (i
= 0; i
< btrfs_header_nritems(leaf
); i
++) {
3685 btrfs_item_key_to_cpu(leaf
, &key
, i
);
3686 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3688 ei
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
3689 if (btrfs_file_extent_type(leaf
, ei
) == BTRFS_FILE_EXTENT_REG
&&
3690 btrfs_file_extent_disk_bytenr(leaf
, ei
) == data_bytenr
) {
3692 space_cache_ino
= key
.objectid
;
3698 ret
= delete_block_group_cache(leaf
->fs_info
, block_group
, NULL
,
3704 * helper to find all tree blocks that reference a given data extent
3706 static noinline_for_stack
3707 int add_data_references(struct reloc_control
*rc
,
3708 struct btrfs_key
*extent_key
,
3709 struct btrfs_path
*path
,
3710 struct rb_root
*blocks
)
3712 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3713 struct ulist
*leaves
= NULL
;
3714 struct ulist_iterator leaf_uiter
;
3715 struct ulist_node
*ref_node
= NULL
;
3716 const u32 blocksize
= fs_info
->nodesize
;
3719 btrfs_release_path(path
);
3720 ret
= btrfs_find_all_leafs(NULL
, fs_info
, extent_key
->objectid
,
3721 0, &leaves
, NULL
, true);
3725 ULIST_ITER_INIT(&leaf_uiter
);
3726 while ((ref_node
= ulist_next(leaves
, &leaf_uiter
))) {
3727 struct extent_buffer
*eb
;
3729 eb
= read_tree_block(fs_info
, ref_node
->val
, 0, 0, NULL
);
3734 ret
= delete_v1_space_cache(eb
, rc
->block_group
,
3735 extent_key
->objectid
);
3736 free_extent_buffer(eb
);
3739 ret
= __add_tree_block(rc
, ref_node
->val
, blocksize
, blocks
);
3744 free_block_list(blocks
);
3750 * helper to find next unprocessed extent
3752 static noinline_for_stack
3753 int find_next_extent(struct reloc_control
*rc
, struct btrfs_path
*path
,
3754 struct btrfs_key
*extent_key
)
3756 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3757 struct btrfs_key key
;
3758 struct extent_buffer
*leaf
;
3759 u64 start
, end
, last
;
3762 last
= rc
->block_group
->start
+ rc
->block_group
->length
;
3765 if (rc
->search_start
>= last
) {
3770 key
.objectid
= rc
->search_start
;
3771 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3774 path
->search_commit_root
= 1;
3775 path
->skip_locking
= 1;
3776 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path
,
3781 leaf
= path
->nodes
[0];
3782 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3783 ret
= btrfs_next_leaf(rc
->extent_root
, path
);
3786 leaf
= path
->nodes
[0];
3789 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3790 if (key
.objectid
>= last
) {
3795 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3796 key
.type
!= BTRFS_METADATA_ITEM_KEY
) {
3801 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3802 key
.objectid
+ key
.offset
<= rc
->search_start
) {
3807 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
3808 key
.objectid
+ fs_info
->nodesize
<=
3814 ret
= find_first_extent_bit(&rc
->processed_blocks
,
3815 key
.objectid
, &start
, &end
,
3816 EXTENT_DIRTY
, NULL
);
3818 if (ret
== 0 && start
<= key
.objectid
) {
3819 btrfs_release_path(path
);
3820 rc
->search_start
= end
+ 1;
3822 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
)
3823 rc
->search_start
= key
.objectid
+ key
.offset
;
3825 rc
->search_start
= key
.objectid
+
3827 memcpy(extent_key
, &key
, sizeof(key
));
3831 btrfs_release_path(path
);
3835 static void set_reloc_control(struct reloc_control
*rc
)
3837 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3839 mutex_lock(&fs_info
->reloc_mutex
);
3840 fs_info
->reloc_ctl
= rc
;
3841 mutex_unlock(&fs_info
->reloc_mutex
);
3844 static void unset_reloc_control(struct reloc_control
*rc
)
3846 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3848 mutex_lock(&fs_info
->reloc_mutex
);
3849 fs_info
->reloc_ctl
= NULL
;
3850 mutex_unlock(&fs_info
->reloc_mutex
);
3853 static int check_extent_flags(u64 flags
)
3855 if ((flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3856 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
))
3858 if (!(flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3859 !(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
))
3861 if ((flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3862 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
3867 static noinline_for_stack
3868 int prepare_to_relocate(struct reloc_control
*rc
)
3870 struct btrfs_trans_handle
*trans
;
3873 rc
->block_rsv
= btrfs_alloc_block_rsv(rc
->extent_root
->fs_info
,
3874 BTRFS_BLOCK_RSV_TEMP
);
3878 memset(&rc
->cluster
, 0, sizeof(rc
->cluster
));
3879 rc
->search_start
= rc
->block_group
->start
;
3880 rc
->extents_found
= 0;
3881 rc
->nodes_relocated
= 0;
3882 rc
->merging_rsv_size
= 0;
3883 rc
->reserved_bytes
= 0;
3884 rc
->block_rsv
->size
= rc
->extent_root
->fs_info
->nodesize
*
3885 RELOCATION_RESERVED_NODES
;
3886 ret
= btrfs_block_rsv_refill(rc
->extent_root
,
3887 rc
->block_rsv
, rc
->block_rsv
->size
,
3888 BTRFS_RESERVE_FLUSH_ALL
);
3892 rc
->create_reloc_tree
= 1;
3893 set_reloc_control(rc
);
3895 trans
= btrfs_join_transaction(rc
->extent_root
);
3896 if (IS_ERR(trans
)) {
3897 unset_reloc_control(rc
);
3899 * extent tree is not a ref_cow tree and has no reloc_root to
3900 * cleanup. And callers are responsible to free the above
3903 return PTR_ERR(trans
);
3905 btrfs_commit_transaction(trans
);
3909 static noinline_for_stack
int relocate_block_group(struct reloc_control
*rc
)
3911 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3912 struct rb_root blocks
= RB_ROOT
;
3913 struct btrfs_key key
;
3914 struct btrfs_trans_handle
*trans
= NULL
;
3915 struct btrfs_path
*path
;
3916 struct btrfs_extent_item
*ei
;
3923 path
= btrfs_alloc_path();
3926 path
->reada
= READA_FORWARD
;
3928 ret
= prepare_to_relocate(rc
);
3935 rc
->reserved_bytes
= 0;
3936 ret
= btrfs_block_rsv_refill(rc
->extent_root
,
3937 rc
->block_rsv
, rc
->block_rsv
->size
,
3938 BTRFS_RESERVE_FLUSH_ALL
);
3944 trans
= btrfs_start_transaction(rc
->extent_root
, 0);
3945 if (IS_ERR(trans
)) {
3946 err
= PTR_ERR(trans
);
3951 if (update_backref_cache(trans
, &rc
->backref_cache
)) {
3952 btrfs_end_transaction(trans
);
3957 ret
= find_next_extent(rc
, path
, &key
);
3963 rc
->extents_found
++;
3965 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3966 struct btrfs_extent_item
);
3967 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
3968 if (item_size
>= sizeof(*ei
)) {
3969 flags
= btrfs_extent_flags(path
->nodes
[0], ei
);
3970 ret
= check_extent_flags(flags
);
3972 } else if (unlikely(item_size
== sizeof(struct btrfs_extent_item_v0
))) {
3974 btrfs_print_v0_err(trans
->fs_info
);
3975 btrfs_abort_transaction(trans
, err
);
3981 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
3982 ret
= add_tree_block(rc
, &key
, path
, &blocks
);
3983 } else if (rc
->stage
== UPDATE_DATA_PTRS
&&
3984 (flags
& BTRFS_EXTENT_FLAG_DATA
)) {
3985 ret
= add_data_references(rc
, &key
, path
, &blocks
);
3987 btrfs_release_path(path
);
3995 if (!RB_EMPTY_ROOT(&blocks
)) {
3996 ret
= relocate_tree_blocks(trans
, rc
, &blocks
);
3999 * if we fail to relocate tree blocks, force to update
4000 * backref cache when committing transaction.
4002 rc
->backref_cache
.last_trans
= trans
->transid
- 1;
4004 if (ret
!= -EAGAIN
) {
4008 rc
->extents_found
--;
4009 rc
->search_start
= key
.objectid
;
4013 btrfs_end_transaction_throttle(trans
);
4014 btrfs_btree_balance_dirty(fs_info
);
4017 if (rc
->stage
== MOVE_DATA_EXTENTS
&&
4018 (flags
& BTRFS_EXTENT_FLAG_DATA
)) {
4019 rc
->found_file_extent
= 1;
4020 ret
= relocate_data_extent(rc
->data_inode
,
4021 &key
, &rc
->cluster
);
4027 if (btrfs_should_cancel_balance(fs_info
)) {
4032 if (trans
&& progress
&& err
== -ENOSPC
) {
4033 ret
= btrfs_force_chunk_alloc(trans
, rc
->block_group
->flags
);
4041 btrfs_release_path(path
);
4042 clear_extent_bits(&rc
->processed_blocks
, 0, (u64
)-1, EXTENT_DIRTY
);
4045 btrfs_end_transaction_throttle(trans
);
4046 btrfs_btree_balance_dirty(fs_info
);
4050 ret
= relocate_file_extent_cluster(rc
->data_inode
,
4056 rc
->create_reloc_tree
= 0;
4057 set_reloc_control(rc
);
4059 backref_cache_cleanup(&rc
->backref_cache
);
4060 btrfs_block_rsv_release(fs_info
, rc
->block_rsv
, (u64
)-1, NULL
);
4063 * Even in the case when the relocation is cancelled, we should all go
4064 * through prepare_to_merge() and merge_reloc_roots().
4066 * For error (including cancelled balance), prepare_to_merge() will
4067 * mark all reloc trees orphan, then queue them for cleanup in
4068 * merge_reloc_roots()
4070 err
= prepare_to_merge(rc
, err
);
4072 merge_reloc_roots(rc
);
4074 rc
->merge_reloc_tree
= 0;
4075 unset_reloc_control(rc
);
4076 btrfs_block_rsv_release(fs_info
, rc
->block_rsv
, (u64
)-1, NULL
);
4078 /* get rid of pinned extents */
4079 trans
= btrfs_join_transaction(rc
->extent_root
);
4080 if (IS_ERR(trans
)) {
4081 err
= PTR_ERR(trans
);
4084 btrfs_commit_transaction(trans
);
4086 ret
= clean_dirty_subvols(rc
);
4087 if (ret
< 0 && !err
)
4089 btrfs_free_block_rsv(fs_info
, rc
->block_rsv
);
4090 btrfs_free_path(path
);
4094 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
4095 struct btrfs_root
*root
, u64 objectid
)
4097 struct btrfs_path
*path
;
4098 struct btrfs_inode_item
*item
;
4099 struct extent_buffer
*leaf
;
4102 path
= btrfs_alloc_path();
4106 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
4110 leaf
= path
->nodes
[0];
4111 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
4112 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
4113 btrfs_set_inode_generation(leaf
, item
, 1);
4114 btrfs_set_inode_size(leaf
, item
, 0);
4115 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
4116 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
|
4117 BTRFS_INODE_PREALLOC
);
4118 btrfs_mark_buffer_dirty(leaf
);
4120 btrfs_free_path(path
);
4125 * helper to create inode for data relocation.
4126 * the inode is in data relocation tree and its link count is 0
4128 static noinline_for_stack
4129 struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
4130 struct btrfs_block_group
*group
)
4132 struct inode
*inode
= NULL
;
4133 struct btrfs_trans_handle
*trans
;
4134 struct btrfs_root
*root
;
4135 struct btrfs_key key
;
4139 root
= read_fs_root(fs_info
, BTRFS_DATA_RELOC_TREE_OBJECTID
);
4141 return ERR_CAST(root
);
4143 trans
= btrfs_start_transaction(root
, 6);
4144 if (IS_ERR(trans
)) {
4145 btrfs_put_root(root
);
4146 return ERR_CAST(trans
);
4149 err
= btrfs_find_free_objectid(root
, &objectid
);
4153 err
= __insert_orphan_inode(trans
, root
, objectid
);
4156 key
.objectid
= objectid
;
4157 key
.type
= BTRFS_INODE_ITEM_KEY
;
4159 inode
= btrfs_iget(fs_info
->sb
, &key
, root
);
4160 BUG_ON(IS_ERR(inode
));
4161 BTRFS_I(inode
)->index_cnt
= group
->start
;
4163 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4165 btrfs_put_root(root
);
4166 btrfs_end_transaction(trans
);
4167 btrfs_btree_balance_dirty(fs_info
);
4171 inode
= ERR_PTR(err
);
4176 static struct reloc_control
*alloc_reloc_control(struct btrfs_fs_info
*fs_info
)
4178 struct reloc_control
*rc
;
4180 rc
= kzalloc(sizeof(*rc
), GFP_NOFS
);
4184 INIT_LIST_HEAD(&rc
->reloc_roots
);
4185 INIT_LIST_HEAD(&rc
->dirty_subvol_roots
);
4186 backref_cache_init(&rc
->backref_cache
);
4187 mapping_tree_init(&rc
->reloc_root_tree
);
4188 extent_io_tree_init(fs_info
, &rc
->processed_blocks
,
4189 IO_TREE_RELOC_BLOCKS
, NULL
);
4194 * Print the block group being relocated
4196 static void describe_relocation(struct btrfs_fs_info
*fs_info
,
4197 struct btrfs_block_group
*block_group
)
4199 char buf
[128] = {'\0'};
4201 btrfs_describe_block_groups(block_group
->flags
, buf
, sizeof(buf
));
4204 "relocating block group %llu flags %s",
4205 block_group
->start
, buf
);
4208 static const char *stage_to_string(int stage
)
4210 if (stage
== MOVE_DATA_EXTENTS
)
4211 return "move data extents";
4212 if (stage
== UPDATE_DATA_PTRS
)
4213 return "update data pointers";
4218 * function to relocate all extents in a block group.
4220 int btrfs_relocate_block_group(struct btrfs_fs_info
*fs_info
, u64 group_start
)
4222 struct btrfs_block_group
*bg
;
4223 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
4224 struct reloc_control
*rc
;
4225 struct inode
*inode
;
4226 struct btrfs_path
*path
;
4231 bg
= btrfs_lookup_block_group(fs_info
, group_start
);
4235 if (btrfs_pinned_by_swapfile(fs_info
, bg
)) {
4236 btrfs_put_block_group(bg
);
4240 rc
= alloc_reloc_control(fs_info
);
4242 btrfs_put_block_group(bg
);
4246 rc
->extent_root
= extent_root
;
4247 rc
->block_group
= bg
;
4249 ret
= btrfs_inc_block_group_ro(rc
->block_group
, true);
4256 path
= btrfs_alloc_path();
4262 inode
= lookup_free_space_inode(rc
->block_group
, path
);
4263 btrfs_free_path(path
);
4266 ret
= delete_block_group_cache(fs_info
, rc
->block_group
, inode
, 0);
4268 ret
= PTR_ERR(inode
);
4270 if (ret
&& ret
!= -ENOENT
) {
4275 rc
->data_inode
= create_reloc_inode(fs_info
, rc
->block_group
);
4276 if (IS_ERR(rc
->data_inode
)) {
4277 err
= PTR_ERR(rc
->data_inode
);
4278 rc
->data_inode
= NULL
;
4282 describe_relocation(fs_info
, rc
->block_group
);
4284 btrfs_wait_block_group_reservations(rc
->block_group
);
4285 btrfs_wait_nocow_writers(rc
->block_group
);
4286 btrfs_wait_ordered_roots(fs_info
, U64_MAX
,
4287 rc
->block_group
->start
,
4288 rc
->block_group
->length
);
4293 mutex_lock(&fs_info
->cleaner_mutex
);
4294 ret
= relocate_block_group(rc
);
4295 mutex_unlock(&fs_info
->cleaner_mutex
);
4299 finishes_stage
= rc
->stage
;
4301 * We may have gotten ENOSPC after we already dirtied some
4302 * extents. If writeout happens while we're relocating a
4303 * different block group we could end up hitting the
4304 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4305 * btrfs_reloc_cow_block. Make sure we write everything out
4306 * properly so we don't trip over this problem, and then break
4307 * out of the loop if we hit an error.
4309 if (rc
->stage
== MOVE_DATA_EXTENTS
&& rc
->found_file_extent
) {
4310 ret
= btrfs_wait_ordered_range(rc
->data_inode
, 0,
4314 invalidate_mapping_pages(rc
->data_inode
->i_mapping
,
4316 rc
->stage
= UPDATE_DATA_PTRS
;
4322 if (rc
->extents_found
== 0)
4325 btrfs_info(fs_info
, "found %llu extents, stage: %s",
4326 rc
->extents_found
, stage_to_string(finishes_stage
));
4329 WARN_ON(rc
->block_group
->pinned
> 0);
4330 WARN_ON(rc
->block_group
->reserved
> 0);
4331 WARN_ON(rc
->block_group
->used
> 0);
4334 btrfs_dec_block_group_ro(rc
->block_group
);
4335 iput(rc
->data_inode
);
4336 btrfs_put_block_group(rc
->block_group
);
4341 static noinline_for_stack
int mark_garbage_root(struct btrfs_root
*root
)
4343 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4344 struct btrfs_trans_handle
*trans
;
4347 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
4349 return PTR_ERR(trans
);
4351 memset(&root
->root_item
.drop_progress
, 0,
4352 sizeof(root
->root_item
.drop_progress
));
4353 root
->root_item
.drop_level
= 0;
4354 btrfs_set_root_refs(&root
->root_item
, 0);
4355 ret
= btrfs_update_root(trans
, fs_info
->tree_root
,
4356 &root
->root_key
, &root
->root_item
);
4358 err
= btrfs_end_transaction(trans
);
4365 * recover relocation interrupted by system crash.
4367 * this function resumes merging reloc trees with corresponding fs trees.
4368 * this is important for keeping the sharing of tree blocks
4370 int btrfs_recover_relocation(struct btrfs_root
*root
)
4372 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4373 LIST_HEAD(reloc_roots
);
4374 struct btrfs_key key
;
4375 struct btrfs_root
*fs_root
;
4376 struct btrfs_root
*reloc_root
;
4377 struct btrfs_path
*path
;
4378 struct extent_buffer
*leaf
;
4379 struct reloc_control
*rc
= NULL
;
4380 struct btrfs_trans_handle
*trans
;
4384 path
= btrfs_alloc_path();
4387 path
->reada
= READA_BACK
;
4389 key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
4390 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4391 key
.offset
= (u64
)-1;
4394 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
,
4401 if (path
->slots
[0] == 0)
4405 leaf
= path
->nodes
[0];
4406 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4407 btrfs_release_path(path
);
4409 if (key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
||
4410 key
.type
!= BTRFS_ROOT_ITEM_KEY
)
4413 reloc_root
= btrfs_read_tree_root(root
, &key
);
4414 if (IS_ERR(reloc_root
)) {
4415 err
= PTR_ERR(reloc_root
);
4419 set_bit(BTRFS_ROOT_REF_COWS
, &reloc_root
->state
);
4420 list_add(&reloc_root
->root_list
, &reloc_roots
);
4422 if (btrfs_root_refs(&reloc_root
->root_item
) > 0) {
4423 fs_root
= read_fs_root(fs_info
,
4424 reloc_root
->root_key
.offset
);
4425 if (IS_ERR(fs_root
)) {
4426 ret
= PTR_ERR(fs_root
);
4427 if (ret
!= -ENOENT
) {
4431 ret
= mark_garbage_root(reloc_root
);
4437 btrfs_put_root(fs_root
);
4441 if (key
.offset
== 0)
4446 btrfs_release_path(path
);
4448 if (list_empty(&reloc_roots
))
4451 rc
= alloc_reloc_control(fs_info
);
4457 rc
->extent_root
= fs_info
->extent_root
;
4459 set_reloc_control(rc
);
4461 trans
= btrfs_join_transaction(rc
->extent_root
);
4462 if (IS_ERR(trans
)) {
4463 err
= PTR_ERR(trans
);
4467 rc
->merge_reloc_tree
= 1;
4469 while (!list_empty(&reloc_roots
)) {
4470 reloc_root
= list_entry(reloc_roots
.next
,
4471 struct btrfs_root
, root_list
);
4472 list_del(&reloc_root
->root_list
);
4474 if (btrfs_root_refs(&reloc_root
->root_item
) == 0) {
4475 list_add_tail(&reloc_root
->root_list
,
4480 fs_root
= read_fs_root(fs_info
, reloc_root
->root_key
.offset
);
4481 if (IS_ERR(fs_root
)) {
4482 err
= PTR_ERR(fs_root
);
4483 list_add_tail(&reloc_root
->root_list
, &reloc_roots
);
4487 err
= __add_reloc_root(reloc_root
);
4488 BUG_ON(err
< 0); /* -ENOMEM or logic error */
4489 fs_root
->reloc_root
= reloc_root
;
4490 btrfs_put_root(fs_root
);
4493 err
= btrfs_commit_transaction(trans
);
4497 merge_reloc_roots(rc
);
4499 unset_reloc_control(rc
);
4501 trans
= btrfs_join_transaction(rc
->extent_root
);
4502 if (IS_ERR(trans
)) {
4503 err
= PTR_ERR(trans
);
4506 err
= btrfs_commit_transaction(trans
);
4508 ret
= clean_dirty_subvols(rc
);
4509 if (ret
< 0 && !err
)
4512 unset_reloc_control(rc
);
4515 if (!list_empty(&reloc_roots
))
4516 free_reloc_roots(&reloc_roots
);
4518 btrfs_free_path(path
);
4521 /* cleanup orphan inode in data relocation tree */
4522 fs_root
= read_fs_root(fs_info
, BTRFS_DATA_RELOC_TREE_OBJECTID
);
4523 if (IS_ERR(fs_root
)) {
4524 err
= PTR_ERR(fs_root
);
4526 err
= btrfs_orphan_cleanup(fs_root
);
4527 btrfs_put_root(fs_root
);
4534 * helper to add ordered checksum for data relocation.
4536 * cloning checksum properly handles the nodatasum extents.
4537 * it also saves CPU time to re-calculate the checksum.
4539 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
4541 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4542 struct btrfs_ordered_sum
*sums
;
4543 struct btrfs_ordered_extent
*ordered
;
4549 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
4550 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->num_bytes
!= len
);
4552 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
4553 ret
= btrfs_lookup_csums_range(fs_info
->csum_root
, disk_bytenr
,
4554 disk_bytenr
+ len
- 1, &list
, 0);
4558 while (!list_empty(&list
)) {
4559 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
4560 list_del_init(&sums
->list
);
4563 * We need to offset the new_bytenr based on where the csum is.
4564 * We need to do this because we will read in entire prealloc
4565 * extents but we may have written to say the middle of the
4566 * prealloc extent, so we need to make sure the csum goes with
4567 * the right disk offset.
4569 * We can do this because the data reloc inode refers strictly
4570 * to the on disk bytes, so we don't have to worry about
4571 * disk_len vs real len like with real inodes since it's all
4574 new_bytenr
= ordered
->disk_bytenr
+ sums
->bytenr
- disk_bytenr
;
4575 sums
->bytenr
= new_bytenr
;
4577 btrfs_add_ordered_sum(ordered
, sums
);
4580 btrfs_put_ordered_extent(ordered
);
4584 int btrfs_reloc_cow_block(struct btrfs_trans_handle
*trans
,
4585 struct btrfs_root
*root
, struct extent_buffer
*buf
,
4586 struct extent_buffer
*cow
)
4588 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4589 struct reloc_control
*rc
;
4590 struct backref_node
*node
;
4595 rc
= fs_info
->reloc_ctl
;
4599 BUG_ON(rc
->stage
== UPDATE_DATA_PTRS
&&
4600 root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
);
4602 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
4603 if (buf
== root
->node
)
4604 __update_reloc_root(root
, cow
->start
);
4607 level
= btrfs_header_level(buf
);
4608 if (btrfs_header_generation(buf
) <=
4609 btrfs_root_last_snapshot(&root
->root_item
))
4612 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
&&
4613 rc
->create_reloc_tree
) {
4614 WARN_ON(!first_cow
&& level
== 0);
4616 node
= rc
->backref_cache
.path
[level
];
4617 BUG_ON(node
->bytenr
!= buf
->start
&&
4618 node
->new_bytenr
!= buf
->start
);
4620 drop_node_buffer(node
);
4621 atomic_inc(&cow
->refs
);
4623 node
->new_bytenr
= cow
->start
;
4625 if (!node
->pending
) {
4626 list_move_tail(&node
->list
,
4627 &rc
->backref_cache
.pending
[level
]);
4632 __mark_block_processed(rc
, node
);
4634 if (first_cow
&& level
> 0)
4635 rc
->nodes_relocated
+= buf
->len
;
4638 if (level
== 0 && first_cow
&& rc
->stage
== UPDATE_DATA_PTRS
)
4639 ret
= replace_file_extents(trans
, rc
, root
, cow
);
4644 * called before creating snapshot. it calculates metadata reservation
4645 * required for relocating tree blocks in the snapshot
4647 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot
*pending
,
4648 u64
*bytes_to_reserve
)
4650 struct btrfs_root
*root
= pending
->root
;
4651 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
4653 if (!rc
|| !have_reloc_root(root
))
4656 if (!rc
->merge_reloc_tree
)
4659 root
= root
->reloc_root
;
4660 BUG_ON(btrfs_root_refs(&root
->root_item
) == 0);
4662 * relocation is in the stage of merging trees. the space
4663 * used by merging a reloc tree is twice the size of
4664 * relocated tree nodes in the worst case. half for cowing
4665 * the reloc tree, half for cowing the fs tree. the space
4666 * used by cowing the reloc tree will be freed after the
4667 * tree is dropped. if we create snapshot, cowing the fs
4668 * tree may use more space than it frees. so we need
4669 * reserve extra space.
4671 *bytes_to_reserve
+= rc
->nodes_relocated
;
4675 * called after snapshot is created. migrate block reservation
4676 * and create reloc root for the newly created snapshot
4678 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle
*trans
,
4679 struct btrfs_pending_snapshot
*pending
)
4681 struct btrfs_root
*root
= pending
->root
;
4682 struct btrfs_root
*reloc_root
;
4683 struct btrfs_root
*new_root
;
4684 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
4687 if (!rc
|| !have_reloc_root(root
))
4690 rc
= root
->fs_info
->reloc_ctl
;
4691 rc
->merging_rsv_size
+= rc
->nodes_relocated
;
4693 if (rc
->merge_reloc_tree
) {
4694 ret
= btrfs_block_rsv_migrate(&pending
->block_rsv
,
4696 rc
->nodes_relocated
, true);
4701 new_root
= pending
->snap
;
4702 reloc_root
= create_reloc_root(trans
, root
->reloc_root
,
4703 new_root
->root_key
.objectid
);
4704 if (IS_ERR(reloc_root
))
4705 return PTR_ERR(reloc_root
);
4707 ret
= __add_reloc_root(reloc_root
);
4709 new_root
->reloc_root
= reloc_root
;
4711 if (rc
->create_reloc_tree
)
4712 ret
= clone_backref_node(trans
, rc
, root
, reloc_root
);