1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
17 /* Just an arbitrary number so we can be sure this happened */
18 #define BACKREF_FOUND_SHARED 6
20 struct extent_inode_elem
{
23 struct extent_inode_elem
*next
;
26 static int check_extent_in_eb(const struct btrfs_key
*key
,
27 const struct extent_buffer
*eb
,
28 const struct btrfs_file_extent_item
*fi
,
30 struct extent_inode_elem
**eie
,
34 struct extent_inode_elem
*e
;
37 !btrfs_file_extent_compression(eb
, fi
) &&
38 !btrfs_file_extent_encryption(eb
, fi
) &&
39 !btrfs_file_extent_other_encoding(eb
, fi
)) {
43 data_offset
= btrfs_file_extent_offset(eb
, fi
);
44 data_len
= btrfs_file_extent_num_bytes(eb
, fi
);
46 if (extent_item_pos
< data_offset
||
47 extent_item_pos
>= data_offset
+ data_len
)
49 offset
= extent_item_pos
- data_offset
;
52 e
= kmalloc(sizeof(*e
), GFP_NOFS
);
57 e
->inum
= key
->objectid
;
58 e
->offset
= key
->offset
+ offset
;
64 static void free_inode_elem_list(struct extent_inode_elem
*eie
)
66 struct extent_inode_elem
*eie_next
;
68 for (; eie
; eie
= eie_next
) {
74 static int find_extent_in_eb(const struct extent_buffer
*eb
,
75 u64 wanted_disk_byte
, u64 extent_item_pos
,
76 struct extent_inode_elem
**eie
,
81 struct btrfs_file_extent_item
*fi
;
88 * from the shared data ref, we only have the leaf but we need
89 * the key. thus, we must look into all items and see that we
90 * find one (some) with a reference to our extent item.
92 nritems
= btrfs_header_nritems(eb
);
93 for (slot
= 0; slot
< nritems
; ++slot
) {
94 btrfs_item_key_to_cpu(eb
, &key
, slot
);
95 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
97 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
98 extent_type
= btrfs_file_extent_type(eb
, fi
);
99 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
101 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
102 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
103 if (disk_byte
!= wanted_disk_byte
)
106 ret
= check_extent_in_eb(&key
, eb
, fi
, extent_item_pos
, eie
, ignore_offset
);
115 struct rb_root_cached root
;
119 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
122 struct preftree direct
; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
123 struct preftree indirect
; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
124 struct preftree indirect_missing_keys
;
128 * Checks for a shared extent during backref search.
130 * The share_count tracks prelim_refs (direct and indirect) having a
132 * - incremented when a ref->count transitions to >0
133 * - decremented when a ref->count transitions to <1
141 static inline int extent_is_shared(struct share_check
*sc
)
143 return (sc
&& sc
->share_count
> 1) ? BACKREF_FOUND_SHARED
: 0;
146 static struct kmem_cache
*btrfs_prelim_ref_cache
;
148 int __init
btrfs_prelim_ref_init(void)
150 btrfs_prelim_ref_cache
= kmem_cache_create("btrfs_prelim_ref",
151 sizeof(struct prelim_ref
),
155 if (!btrfs_prelim_ref_cache
)
160 void __cold
btrfs_prelim_ref_exit(void)
162 kmem_cache_destroy(btrfs_prelim_ref_cache
);
165 static void free_pref(struct prelim_ref
*ref
)
167 kmem_cache_free(btrfs_prelim_ref_cache
, ref
);
171 * Return 0 when both refs are for the same block (and can be merged).
172 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
173 * indicates a 'higher' block.
175 static int prelim_ref_compare(struct prelim_ref
*ref1
,
176 struct prelim_ref
*ref2
)
178 if (ref1
->level
< ref2
->level
)
180 if (ref1
->level
> ref2
->level
)
182 if (ref1
->root_id
< ref2
->root_id
)
184 if (ref1
->root_id
> ref2
->root_id
)
186 if (ref1
->key_for_search
.type
< ref2
->key_for_search
.type
)
188 if (ref1
->key_for_search
.type
> ref2
->key_for_search
.type
)
190 if (ref1
->key_for_search
.objectid
< ref2
->key_for_search
.objectid
)
192 if (ref1
->key_for_search
.objectid
> ref2
->key_for_search
.objectid
)
194 if (ref1
->key_for_search
.offset
< ref2
->key_for_search
.offset
)
196 if (ref1
->key_for_search
.offset
> ref2
->key_for_search
.offset
)
198 if (ref1
->parent
< ref2
->parent
)
200 if (ref1
->parent
> ref2
->parent
)
206 static void update_share_count(struct share_check
*sc
, int oldcount
,
209 if ((!sc
) || (oldcount
== 0 && newcount
< 1))
212 if (oldcount
> 0 && newcount
< 1)
214 else if (oldcount
< 1 && newcount
> 0)
219 * Add @newref to the @root rbtree, merging identical refs.
221 * Callers should assume that newref has been freed after calling.
223 static void prelim_ref_insert(const struct btrfs_fs_info
*fs_info
,
224 struct preftree
*preftree
,
225 struct prelim_ref
*newref
,
226 struct share_check
*sc
)
228 struct rb_root_cached
*root
;
230 struct rb_node
*parent
= NULL
;
231 struct prelim_ref
*ref
;
233 bool leftmost
= true;
235 root
= &preftree
->root
;
236 p
= &root
->rb_root
.rb_node
;
240 ref
= rb_entry(parent
, struct prelim_ref
, rbnode
);
241 result
= prelim_ref_compare(ref
, newref
);
244 } else if (result
> 0) {
248 /* Identical refs, merge them and free @newref */
249 struct extent_inode_elem
*eie
= ref
->inode_list
;
251 while (eie
&& eie
->next
)
255 ref
->inode_list
= newref
->inode_list
;
257 eie
->next
= newref
->inode_list
;
258 trace_btrfs_prelim_ref_merge(fs_info
, ref
, newref
,
261 * A delayed ref can have newref->count < 0.
262 * The ref->count is updated to follow any
263 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265 update_share_count(sc
, ref
->count
,
266 ref
->count
+ newref
->count
);
267 ref
->count
+= newref
->count
;
273 update_share_count(sc
, 0, newref
->count
);
275 trace_btrfs_prelim_ref_insert(fs_info
, newref
, NULL
, preftree
->count
);
276 rb_link_node(&newref
->rbnode
, parent
, p
);
277 rb_insert_color_cached(&newref
->rbnode
, root
, leftmost
);
281 * Release the entire tree. We don't care about internal consistency so
282 * just free everything and then reset the tree root.
284 static void prelim_release(struct preftree
*preftree
)
286 struct prelim_ref
*ref
, *next_ref
;
288 rbtree_postorder_for_each_entry_safe(ref
, next_ref
,
289 &preftree
->root
.rb_root
, rbnode
)
292 preftree
->root
= RB_ROOT_CACHED
;
297 * the rules for all callers of this function are:
298 * - obtaining the parent is the goal
299 * - if you add a key, you must know that it is a correct key
300 * - if you cannot add the parent or a correct key, then we will look into the
301 * block later to set a correct key
305 * backref type | shared | indirect | shared | indirect
306 * information | tree | tree | data | data
307 * --------------------+--------+----------+--------+----------
308 * parent logical | y | - | - | -
309 * key to resolve | - | y | y | y
310 * tree block logical | - | - | - | -
311 * root for resolving | y | y | y | y
313 * - column 1: we've the parent -> done
314 * - column 2, 3, 4: we use the key to find the parent
316 * on disk refs (inline or keyed)
317 * ==============================
318 * backref type | shared | indirect | shared | indirect
319 * information | tree | tree | data | data
320 * --------------------+--------+----------+--------+----------
321 * parent logical | y | - | y | -
322 * key to resolve | - | - | - | y
323 * tree block logical | y | y | y | y
324 * root for resolving | - | y | y | y
326 * - column 1, 3: we've the parent -> done
327 * - column 2: we take the first key from the block to find the parent
328 * (see add_missing_keys)
329 * - column 4: we use the key to find the parent
331 * additional information that's available but not required to find the parent
332 * block might help in merging entries to gain some speed.
334 static int add_prelim_ref(const struct btrfs_fs_info
*fs_info
,
335 struct preftree
*preftree
, u64 root_id
,
336 const struct btrfs_key
*key
, int level
, u64 parent
,
337 u64 wanted_disk_byte
, int count
,
338 struct share_check
*sc
, gfp_t gfp_mask
)
340 struct prelim_ref
*ref
;
342 if (root_id
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
345 ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
, gfp_mask
);
349 ref
->root_id
= root_id
;
351 ref
->key_for_search
= *key
;
353 memset(&ref
->key_for_search
, 0, sizeof(ref
->key_for_search
));
355 ref
->inode_list
= NULL
;
358 ref
->parent
= parent
;
359 ref
->wanted_disk_byte
= wanted_disk_byte
;
360 prelim_ref_insert(fs_info
, preftree
, ref
, sc
);
361 return extent_is_shared(sc
);
364 /* direct refs use root == 0, key == NULL */
365 static int add_direct_ref(const struct btrfs_fs_info
*fs_info
,
366 struct preftrees
*preftrees
, int level
, u64 parent
,
367 u64 wanted_disk_byte
, int count
,
368 struct share_check
*sc
, gfp_t gfp_mask
)
370 return add_prelim_ref(fs_info
, &preftrees
->direct
, 0, NULL
, level
,
371 parent
, wanted_disk_byte
, count
, sc
, gfp_mask
);
374 /* indirect refs use parent == 0 */
375 static int add_indirect_ref(const struct btrfs_fs_info
*fs_info
,
376 struct preftrees
*preftrees
, u64 root_id
,
377 const struct btrfs_key
*key
, int level
,
378 u64 wanted_disk_byte
, int count
,
379 struct share_check
*sc
, gfp_t gfp_mask
)
381 struct preftree
*tree
= &preftrees
->indirect
;
384 tree
= &preftrees
->indirect_missing_keys
;
385 return add_prelim_ref(fs_info
, tree
, root_id
, key
, level
, 0,
386 wanted_disk_byte
, count
, sc
, gfp_mask
);
389 static int is_shared_data_backref(struct preftrees
*preftrees
, u64 bytenr
)
391 struct rb_node
**p
= &preftrees
->direct
.root
.rb_root
.rb_node
;
392 struct rb_node
*parent
= NULL
;
393 struct prelim_ref
*ref
= NULL
;
394 struct prelim_ref target
= {};
397 target
.parent
= bytenr
;
401 ref
= rb_entry(parent
, struct prelim_ref
, rbnode
);
402 result
= prelim_ref_compare(ref
, &target
);
414 static int add_all_parents(struct btrfs_root
*root
, struct btrfs_path
*path
,
415 struct ulist
*parents
,
416 struct preftrees
*preftrees
, struct prelim_ref
*ref
,
417 int level
, u64 time_seq
, const u64
*extent_item_pos
,
422 struct extent_buffer
*eb
;
423 struct btrfs_key key
;
424 struct btrfs_key
*key_for_search
= &ref
->key_for_search
;
425 struct btrfs_file_extent_item
*fi
;
426 struct extent_inode_elem
*eie
= NULL
, *old
= NULL
;
428 u64 wanted_disk_byte
= ref
->wanted_disk_byte
;
433 eb
= path
->nodes
[level
];
434 ret
= ulist_add(parents
, eb
->start
, 0, GFP_NOFS
);
441 * 1. We normally enter this function with the path already pointing to
442 * the first item to check. But sometimes, we may enter it with
444 * 2. We are searching for normal backref but bytenr of this leaf
445 * matches shared data backref
446 * 3. The leaf owner is not equal to the root we are searching
448 * For these cases, go to the next leaf before we continue.
451 if (path
->slots
[0] >= btrfs_header_nritems(eb
) ||
452 is_shared_data_backref(preftrees
, eb
->start
) ||
453 ref
->root_id
!= btrfs_header_owner(eb
)) {
454 if (time_seq
== SEQ_LAST
)
455 ret
= btrfs_next_leaf(root
, path
);
457 ret
= btrfs_next_old_leaf(root
, path
, time_seq
);
460 while (!ret
&& count
< ref
->count
) {
462 slot
= path
->slots
[0];
464 btrfs_item_key_to_cpu(eb
, &key
, slot
);
466 if (key
.objectid
!= key_for_search
->objectid
||
467 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
471 * We are searching for normal backref but bytenr of this leaf
472 * matches shared data backref, OR
473 * the leaf owner is not equal to the root we are searching for
476 (is_shared_data_backref(preftrees
, eb
->start
) ||
477 ref
->root_id
!= btrfs_header_owner(eb
))) {
478 if (time_seq
== SEQ_LAST
)
479 ret
= btrfs_next_leaf(root
, path
);
481 ret
= btrfs_next_old_leaf(root
, path
, time_seq
);
484 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
485 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
486 data_offset
= btrfs_file_extent_offset(eb
, fi
);
488 if (disk_byte
== wanted_disk_byte
) {
491 if (ref
->key_for_search
.offset
== key
.offset
- data_offset
)
495 if (extent_item_pos
) {
496 ret
= check_extent_in_eb(&key
, eb
, fi
,
498 &eie
, ignore_offset
);
504 ret
= ulist_add_merge_ptr(parents
, eb
->start
,
505 eie
, (void **)&old
, GFP_NOFS
);
508 if (!ret
&& extent_item_pos
) {
516 if (time_seq
== SEQ_LAST
)
517 ret
= btrfs_next_item(root
, path
);
519 ret
= btrfs_next_old_item(root
, path
, time_seq
);
525 free_inode_elem_list(eie
);
530 * resolve an indirect backref in the form (root_id, key, level)
531 * to a logical address
533 static int resolve_indirect_ref(struct btrfs_fs_info
*fs_info
,
534 struct btrfs_path
*path
, u64 time_seq
,
535 struct preftrees
*preftrees
,
536 struct prelim_ref
*ref
, struct ulist
*parents
,
537 const u64
*extent_item_pos
, bool ignore_offset
)
539 struct btrfs_root
*root
;
540 struct btrfs_key root_key
;
541 struct extent_buffer
*eb
;
544 int level
= ref
->level
;
545 struct btrfs_key search_key
= ref
->key_for_search
;
547 root_key
.objectid
= ref
->root_id
;
548 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
549 root_key
.offset
= (u64
)-1;
551 root
= btrfs_get_fs_root(fs_info
, &root_key
, false);
557 if (!path
->search_commit_root
&&
558 test_bit(BTRFS_ROOT_DELETING
, &root
->state
)) {
563 if (btrfs_is_testing(fs_info
)) {
568 if (path
->search_commit_root
)
569 root_level
= btrfs_header_level(root
->commit_root
);
570 else if (time_seq
== SEQ_LAST
)
571 root_level
= btrfs_header_level(root
->node
);
573 root_level
= btrfs_old_root_level(root
, time_seq
);
575 if (root_level
+ 1 == level
)
579 * We can often find data backrefs with an offset that is too large
580 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
581 * subtracting a file's offset with the data offset of its
582 * corresponding extent data item. This can happen for example in the
585 * So if we detect such case we set the search key's offset to zero to
586 * make sure we will find the matching file extent item at
587 * add_all_parents(), otherwise we will miss it because the offset
588 * taken form the backref is much larger then the offset of the file
589 * extent item. This can make us scan a very large number of file
590 * extent items, but at least it will not make us miss any.
592 * This is an ugly workaround for a behaviour that should have never
593 * existed, but it does and a fix for the clone ioctl would touch a lot
594 * of places, cause backwards incompatibility and would not fix the
595 * problem for extents cloned with older kernels.
597 if (search_key
.type
== BTRFS_EXTENT_DATA_KEY
&&
598 search_key
.offset
>= LLONG_MAX
)
599 search_key
.offset
= 0;
600 path
->lowest_level
= level
;
601 if (time_seq
== SEQ_LAST
)
602 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
604 ret
= btrfs_search_old_slot(root
, &search_key
, path
, time_seq
);
607 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
608 ref
->root_id
, level
, ref
->count
, ret
,
609 ref
->key_for_search
.objectid
, ref
->key_for_search
.type
,
610 ref
->key_for_search
.offset
);
614 eb
= path
->nodes
[level
];
616 if (WARN_ON(!level
)) {
621 eb
= path
->nodes
[level
];
624 ret
= add_all_parents(root
, path
, parents
, preftrees
, ref
, level
,
625 time_seq
, extent_item_pos
, ignore_offset
);
627 btrfs_put_root(root
);
629 path
->lowest_level
= 0;
630 btrfs_release_path(path
);
634 static struct extent_inode_elem
*
635 unode_aux_to_inode_list(struct ulist_node
*node
)
639 return (struct extent_inode_elem
*)(uintptr_t)node
->aux
;
643 * We maintain three separate rbtrees: one for direct refs, one for
644 * indirect refs which have a key, and one for indirect refs which do not
645 * have a key. Each tree does merge on insertion.
647 * Once all of the references are located, we iterate over the tree of
648 * indirect refs with missing keys. An appropriate key is located and
649 * the ref is moved onto the tree for indirect refs. After all missing
650 * keys are thus located, we iterate over the indirect ref tree, resolve
651 * each reference, and then insert the resolved reference onto the
652 * direct tree (merging there too).
654 * New backrefs (i.e., for parent nodes) are added to the appropriate
655 * rbtree as they are encountered. The new backrefs are subsequently
658 static int resolve_indirect_refs(struct btrfs_fs_info
*fs_info
,
659 struct btrfs_path
*path
, u64 time_seq
,
660 struct preftrees
*preftrees
,
661 const u64
*extent_item_pos
,
662 struct share_check
*sc
, bool ignore_offset
)
666 struct ulist
*parents
;
667 struct ulist_node
*node
;
668 struct ulist_iterator uiter
;
669 struct rb_node
*rnode
;
671 parents
= ulist_alloc(GFP_NOFS
);
676 * We could trade memory usage for performance here by iterating
677 * the tree, allocating new refs for each insertion, and then
678 * freeing the entire indirect tree when we're done. In some test
679 * cases, the tree can grow quite large (~200k objects).
681 while ((rnode
= rb_first_cached(&preftrees
->indirect
.root
))) {
682 struct prelim_ref
*ref
;
684 ref
= rb_entry(rnode
, struct prelim_ref
, rbnode
);
685 if (WARN(ref
->parent
,
686 "BUG: direct ref found in indirect tree")) {
691 rb_erase_cached(&ref
->rbnode
, &preftrees
->indirect
.root
);
692 preftrees
->indirect
.count
--;
694 if (ref
->count
== 0) {
699 if (sc
&& sc
->root_objectid
&&
700 ref
->root_id
!= sc
->root_objectid
) {
702 ret
= BACKREF_FOUND_SHARED
;
705 err
= resolve_indirect_ref(fs_info
, path
, time_seq
, preftrees
,
706 ref
, parents
, extent_item_pos
,
709 * we can only tolerate ENOENT,otherwise,we should catch error
710 * and return directly.
712 if (err
== -ENOENT
) {
713 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
,
722 /* we put the first parent into the ref at hand */
723 ULIST_ITER_INIT(&uiter
);
724 node
= ulist_next(parents
, &uiter
);
725 ref
->parent
= node
? node
->val
: 0;
726 ref
->inode_list
= unode_aux_to_inode_list(node
);
728 /* Add a prelim_ref(s) for any other parent(s). */
729 while ((node
= ulist_next(parents
, &uiter
))) {
730 struct prelim_ref
*new_ref
;
732 new_ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
,
739 memcpy(new_ref
, ref
, sizeof(*ref
));
740 new_ref
->parent
= node
->val
;
741 new_ref
->inode_list
= unode_aux_to_inode_list(node
);
742 prelim_ref_insert(fs_info
, &preftrees
->direct
,
747 * Now it's a direct ref, put it in the direct tree. We must
748 * do this last because the ref could be merged/freed here.
750 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
, NULL
);
752 ulist_reinit(parents
);
761 * read tree blocks and add keys where required.
763 static int add_missing_keys(struct btrfs_fs_info
*fs_info
,
764 struct preftrees
*preftrees
, bool lock
)
766 struct prelim_ref
*ref
;
767 struct extent_buffer
*eb
;
768 struct preftree
*tree
= &preftrees
->indirect_missing_keys
;
769 struct rb_node
*node
;
771 while ((node
= rb_first_cached(&tree
->root
))) {
772 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
773 rb_erase_cached(node
, &tree
->root
);
775 BUG_ON(ref
->parent
); /* should not be a direct ref */
776 BUG_ON(ref
->key_for_search
.type
);
777 BUG_ON(!ref
->wanted_disk_byte
);
779 eb
= read_tree_block(fs_info
, ref
->wanted_disk_byte
, 0,
780 ref
->level
- 1, NULL
);
784 } else if (!extent_buffer_uptodate(eb
)) {
786 free_extent_buffer(eb
);
790 btrfs_tree_read_lock(eb
);
791 if (btrfs_header_level(eb
) == 0)
792 btrfs_item_key_to_cpu(eb
, &ref
->key_for_search
, 0);
794 btrfs_node_key_to_cpu(eb
, &ref
->key_for_search
, 0);
796 btrfs_tree_read_unlock(eb
);
797 free_extent_buffer(eb
);
798 prelim_ref_insert(fs_info
, &preftrees
->indirect
, ref
, NULL
);
805 * add all currently queued delayed refs from this head whose seq nr is
806 * smaller or equal that seq to the list
808 static int add_delayed_refs(const struct btrfs_fs_info
*fs_info
,
809 struct btrfs_delayed_ref_head
*head
, u64 seq
,
810 struct preftrees
*preftrees
, struct share_check
*sc
)
812 struct btrfs_delayed_ref_node
*node
;
813 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
814 struct btrfs_key key
;
815 struct btrfs_key tmp_op_key
;
820 if (extent_op
&& extent_op
->update_key
)
821 btrfs_disk_key_to_cpu(&tmp_op_key
, &extent_op
->key
);
823 spin_lock(&head
->lock
);
824 for (n
= rb_first_cached(&head
->ref_tree
); n
; n
= rb_next(n
)) {
825 node
= rb_entry(n
, struct btrfs_delayed_ref_node
,
830 switch (node
->action
) {
831 case BTRFS_ADD_DELAYED_EXTENT
:
832 case BTRFS_UPDATE_DELAYED_HEAD
:
835 case BTRFS_ADD_DELAYED_REF
:
836 count
= node
->ref_mod
;
838 case BTRFS_DROP_DELAYED_REF
:
839 count
= node
->ref_mod
* -1;
844 switch (node
->type
) {
845 case BTRFS_TREE_BLOCK_REF_KEY
: {
846 /* NORMAL INDIRECT METADATA backref */
847 struct btrfs_delayed_tree_ref
*ref
;
849 ref
= btrfs_delayed_node_to_tree_ref(node
);
850 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
851 &tmp_op_key
, ref
->level
+ 1,
852 node
->bytenr
, count
, sc
,
856 case BTRFS_SHARED_BLOCK_REF_KEY
: {
857 /* SHARED DIRECT METADATA backref */
858 struct btrfs_delayed_tree_ref
*ref
;
860 ref
= btrfs_delayed_node_to_tree_ref(node
);
862 ret
= add_direct_ref(fs_info
, preftrees
, ref
->level
+ 1,
863 ref
->parent
, node
->bytenr
, count
,
867 case BTRFS_EXTENT_DATA_REF_KEY
: {
868 /* NORMAL INDIRECT DATA backref */
869 struct btrfs_delayed_data_ref
*ref
;
870 ref
= btrfs_delayed_node_to_data_ref(node
);
872 key
.objectid
= ref
->objectid
;
873 key
.type
= BTRFS_EXTENT_DATA_KEY
;
874 key
.offset
= ref
->offset
;
877 * Found a inum that doesn't match our known inum, we
880 if (sc
&& sc
->inum
&& ref
->objectid
!= sc
->inum
) {
881 ret
= BACKREF_FOUND_SHARED
;
885 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
886 &key
, 0, node
->bytenr
, count
, sc
,
890 case BTRFS_SHARED_DATA_REF_KEY
: {
891 /* SHARED DIRECT FULL backref */
892 struct btrfs_delayed_data_ref
*ref
;
894 ref
= btrfs_delayed_node_to_data_ref(node
);
896 ret
= add_direct_ref(fs_info
, preftrees
, 0, ref
->parent
,
897 node
->bytenr
, count
, sc
,
905 * We must ignore BACKREF_FOUND_SHARED until all delayed
906 * refs have been checked.
908 if (ret
&& (ret
!= BACKREF_FOUND_SHARED
))
912 ret
= extent_is_shared(sc
);
914 spin_unlock(&head
->lock
);
919 * add all inline backrefs for bytenr to the list
921 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
923 static int add_inline_refs(const struct btrfs_fs_info
*fs_info
,
924 struct btrfs_path
*path
, u64 bytenr
,
925 int *info_level
, struct preftrees
*preftrees
,
926 struct share_check
*sc
)
930 struct extent_buffer
*leaf
;
931 struct btrfs_key key
;
932 struct btrfs_key found_key
;
935 struct btrfs_extent_item
*ei
;
940 * enumerate all inline refs
942 leaf
= path
->nodes
[0];
943 slot
= path
->slots
[0];
945 item_size
= btrfs_item_size_nr(leaf
, slot
);
946 BUG_ON(item_size
< sizeof(*ei
));
948 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
949 flags
= btrfs_extent_flags(leaf
, ei
);
950 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
952 ptr
= (unsigned long)(ei
+ 1);
953 end
= (unsigned long)ei
+ item_size
;
955 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
956 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
957 struct btrfs_tree_block_info
*info
;
959 info
= (struct btrfs_tree_block_info
*)ptr
;
960 *info_level
= btrfs_tree_block_level(leaf
, info
);
961 ptr
+= sizeof(struct btrfs_tree_block_info
);
963 } else if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
) {
964 *info_level
= found_key
.offset
;
966 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
970 struct btrfs_extent_inline_ref
*iref
;
974 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
975 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
,
977 if (type
== BTRFS_REF_TYPE_INVALID
)
980 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
983 case BTRFS_SHARED_BLOCK_REF_KEY
:
984 ret
= add_direct_ref(fs_info
, preftrees
,
985 *info_level
+ 1, offset
,
986 bytenr
, 1, NULL
, GFP_NOFS
);
988 case BTRFS_SHARED_DATA_REF_KEY
: {
989 struct btrfs_shared_data_ref
*sdref
;
992 sdref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
993 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
995 ret
= add_direct_ref(fs_info
, preftrees
, 0, offset
,
996 bytenr
, count
, sc
, GFP_NOFS
);
999 case BTRFS_TREE_BLOCK_REF_KEY
:
1000 ret
= add_indirect_ref(fs_info
, preftrees
, offset
,
1001 NULL
, *info_level
+ 1,
1002 bytenr
, 1, NULL
, GFP_NOFS
);
1004 case BTRFS_EXTENT_DATA_REF_KEY
: {
1005 struct btrfs_extent_data_ref
*dref
;
1009 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1010 count
= btrfs_extent_data_ref_count(leaf
, dref
);
1011 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
1013 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1014 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
1016 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
1017 ret
= BACKREF_FOUND_SHARED
;
1021 root
= btrfs_extent_data_ref_root(leaf
, dref
);
1023 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
1024 &key
, 0, bytenr
, count
,
1033 ptr
+= btrfs_extent_inline_ref_size(type
);
1040 * add all non-inline backrefs for bytenr to the list
1042 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1044 static int add_keyed_refs(struct btrfs_fs_info
*fs_info
,
1045 struct btrfs_path
*path
, u64 bytenr
,
1046 int info_level
, struct preftrees
*preftrees
,
1047 struct share_check
*sc
)
1049 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
1052 struct extent_buffer
*leaf
;
1053 struct btrfs_key key
;
1056 ret
= btrfs_next_item(extent_root
, path
);
1064 slot
= path
->slots
[0];
1065 leaf
= path
->nodes
[0];
1066 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1068 if (key
.objectid
!= bytenr
)
1070 if (key
.type
< BTRFS_TREE_BLOCK_REF_KEY
)
1072 if (key
.type
> BTRFS_SHARED_DATA_REF_KEY
)
1076 case BTRFS_SHARED_BLOCK_REF_KEY
:
1077 /* SHARED DIRECT METADATA backref */
1078 ret
= add_direct_ref(fs_info
, preftrees
,
1079 info_level
+ 1, key
.offset
,
1080 bytenr
, 1, NULL
, GFP_NOFS
);
1082 case BTRFS_SHARED_DATA_REF_KEY
: {
1083 /* SHARED DIRECT FULL backref */
1084 struct btrfs_shared_data_ref
*sdref
;
1087 sdref
= btrfs_item_ptr(leaf
, slot
,
1088 struct btrfs_shared_data_ref
);
1089 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
1090 ret
= add_direct_ref(fs_info
, preftrees
, 0,
1091 key
.offset
, bytenr
, count
,
1095 case BTRFS_TREE_BLOCK_REF_KEY
:
1096 /* NORMAL INDIRECT METADATA backref */
1097 ret
= add_indirect_ref(fs_info
, preftrees
, key
.offset
,
1098 NULL
, info_level
+ 1, bytenr
,
1101 case BTRFS_EXTENT_DATA_REF_KEY
: {
1102 /* NORMAL INDIRECT DATA backref */
1103 struct btrfs_extent_data_ref
*dref
;
1107 dref
= btrfs_item_ptr(leaf
, slot
,
1108 struct btrfs_extent_data_ref
);
1109 count
= btrfs_extent_data_ref_count(leaf
, dref
);
1110 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
1112 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1113 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
1115 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
1116 ret
= BACKREF_FOUND_SHARED
;
1120 root
= btrfs_extent_data_ref_root(leaf
, dref
);
1121 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
1122 &key
, 0, bytenr
, count
,
1138 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1139 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1140 * indirect refs to their parent bytenr.
1141 * When roots are found, they're added to the roots list
1143 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1144 * much like trans == NULL case, the difference only lies in it will not
1146 * The special case is for qgroup to search roots in commit_transaction().
1148 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1149 * shared extent is detected.
1151 * Otherwise this returns 0 for success and <0 for an error.
1153 * If ignore_offset is set to false, only extent refs whose offsets match
1154 * extent_item_pos are returned. If true, every extent ref is returned
1155 * and extent_item_pos is ignored.
1157 * FIXME some caching might speed things up
1159 static int find_parent_nodes(struct btrfs_trans_handle
*trans
,
1160 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1161 u64 time_seq
, struct ulist
*refs
,
1162 struct ulist
*roots
, const u64
*extent_item_pos
,
1163 struct share_check
*sc
, bool ignore_offset
)
1165 struct btrfs_key key
;
1166 struct btrfs_path
*path
;
1167 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
1168 struct btrfs_delayed_ref_head
*head
;
1171 struct prelim_ref
*ref
;
1172 struct rb_node
*node
;
1173 struct extent_inode_elem
*eie
= NULL
;
1174 struct preftrees preftrees
= {
1175 .direct
= PREFTREE_INIT
,
1176 .indirect
= PREFTREE_INIT
,
1177 .indirect_missing_keys
= PREFTREE_INIT
1180 key
.objectid
= bytenr
;
1181 key
.offset
= (u64
)-1;
1182 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1183 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1185 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1187 path
= btrfs_alloc_path();
1191 path
->search_commit_root
= 1;
1192 path
->skip_locking
= 1;
1195 if (time_seq
== SEQ_LAST
)
1196 path
->skip_locking
= 1;
1199 * grab both a lock on the path and a lock on the delayed ref head.
1200 * We need both to get a consistent picture of how the refs look
1201 * at a specified point in time
1206 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
1211 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1212 if (trans
&& likely(trans
->type
!= __TRANS_DUMMY
) &&
1213 time_seq
!= SEQ_LAST
) {
1215 if (trans
&& time_seq
!= SEQ_LAST
) {
1218 * look if there are updates for this ref queued and lock the
1221 delayed_refs
= &trans
->transaction
->delayed_refs
;
1222 spin_lock(&delayed_refs
->lock
);
1223 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
1225 if (!mutex_trylock(&head
->mutex
)) {
1226 refcount_inc(&head
->refs
);
1227 spin_unlock(&delayed_refs
->lock
);
1229 btrfs_release_path(path
);
1232 * Mutex was contended, block until it's
1233 * released and try again
1235 mutex_lock(&head
->mutex
);
1236 mutex_unlock(&head
->mutex
);
1237 btrfs_put_delayed_ref_head(head
);
1240 spin_unlock(&delayed_refs
->lock
);
1241 ret
= add_delayed_refs(fs_info
, head
, time_seq
,
1243 mutex_unlock(&head
->mutex
);
1247 spin_unlock(&delayed_refs
->lock
);
1251 if (path
->slots
[0]) {
1252 struct extent_buffer
*leaf
;
1256 leaf
= path
->nodes
[0];
1257 slot
= path
->slots
[0];
1258 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1259 if (key
.objectid
== bytenr
&&
1260 (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1261 key
.type
== BTRFS_METADATA_ITEM_KEY
)) {
1262 ret
= add_inline_refs(fs_info
, path
, bytenr
,
1263 &info_level
, &preftrees
, sc
);
1266 ret
= add_keyed_refs(fs_info
, path
, bytenr
, info_level
,
1273 btrfs_release_path(path
);
1275 ret
= add_missing_keys(fs_info
, &preftrees
, path
->skip_locking
== 0);
1279 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect_missing_keys
.root
.rb_root
));
1281 ret
= resolve_indirect_refs(fs_info
, path
, time_seq
, &preftrees
,
1282 extent_item_pos
, sc
, ignore_offset
);
1286 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect
.root
.rb_root
));
1289 * This walks the tree of merged and resolved refs. Tree blocks are
1290 * read in as needed. Unique entries are added to the ulist, and
1291 * the list of found roots is updated.
1293 * We release the entire tree in one go before returning.
1295 node
= rb_first_cached(&preftrees
.direct
.root
);
1297 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
1298 node
= rb_next(&ref
->rbnode
);
1300 * ref->count < 0 can happen here if there are delayed
1301 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1302 * prelim_ref_insert() relies on this when merging
1303 * identical refs to keep the overall count correct.
1304 * prelim_ref_insert() will merge only those refs
1305 * which compare identically. Any refs having
1306 * e.g. different offsets would not be merged,
1307 * and would retain their original ref->count < 0.
1309 if (roots
&& ref
->count
&& ref
->root_id
&& ref
->parent
== 0) {
1310 if (sc
&& sc
->root_objectid
&&
1311 ref
->root_id
!= sc
->root_objectid
) {
1312 ret
= BACKREF_FOUND_SHARED
;
1316 /* no parent == root of tree */
1317 ret
= ulist_add(roots
, ref
->root_id
, 0, GFP_NOFS
);
1321 if (ref
->count
&& ref
->parent
) {
1322 if (extent_item_pos
&& !ref
->inode_list
&&
1324 struct extent_buffer
*eb
;
1326 eb
= read_tree_block(fs_info
, ref
->parent
, 0,
1331 } else if (!extent_buffer_uptodate(eb
)) {
1332 free_extent_buffer(eb
);
1337 if (!path
->skip_locking
) {
1338 btrfs_tree_read_lock(eb
);
1339 btrfs_set_lock_blocking_read(eb
);
1341 ret
= find_extent_in_eb(eb
, bytenr
,
1342 *extent_item_pos
, &eie
, ignore_offset
);
1343 if (!path
->skip_locking
)
1344 btrfs_tree_read_unlock_blocking(eb
);
1345 free_extent_buffer(eb
);
1348 ref
->inode_list
= eie
;
1350 ret
= ulist_add_merge_ptr(refs
, ref
->parent
,
1352 (void **)&eie
, GFP_NOFS
);
1355 if (!ret
&& extent_item_pos
) {
1357 * we've recorded that parent, so we must extend
1358 * its inode list here
1363 eie
->next
= ref
->inode_list
;
1371 btrfs_free_path(path
);
1373 prelim_release(&preftrees
.direct
);
1374 prelim_release(&preftrees
.indirect
);
1375 prelim_release(&preftrees
.indirect_missing_keys
);
1378 free_inode_elem_list(eie
);
1382 static void free_leaf_list(struct ulist
*blocks
)
1384 struct ulist_node
*node
= NULL
;
1385 struct extent_inode_elem
*eie
;
1386 struct ulist_iterator uiter
;
1388 ULIST_ITER_INIT(&uiter
);
1389 while ((node
= ulist_next(blocks
, &uiter
))) {
1392 eie
= unode_aux_to_inode_list(node
);
1393 free_inode_elem_list(eie
);
1401 * Finds all leafs with a reference to the specified combination of bytenr and
1402 * offset. key_list_head will point to a list of corresponding keys (caller must
1403 * free each list element). The leafs will be stored in the leafs ulist, which
1404 * must be freed with ulist_free.
1406 * returns 0 on success, <0 on error
1408 int btrfs_find_all_leafs(struct btrfs_trans_handle
*trans
,
1409 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1410 u64 time_seq
, struct ulist
**leafs
,
1411 const u64
*extent_item_pos
, bool ignore_offset
)
1415 *leafs
= ulist_alloc(GFP_NOFS
);
1419 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1420 *leafs
, NULL
, extent_item_pos
, NULL
, ignore_offset
);
1421 if (ret
< 0 && ret
!= -ENOENT
) {
1422 free_leaf_list(*leafs
);
1430 * walk all backrefs for a given extent to find all roots that reference this
1431 * extent. Walking a backref means finding all extents that reference this
1432 * extent and in turn walk the backrefs of those, too. Naturally this is a
1433 * recursive process, but here it is implemented in an iterative fashion: We
1434 * find all referencing extents for the extent in question and put them on a
1435 * list. In turn, we find all referencing extents for those, further appending
1436 * to the list. The way we iterate the list allows adding more elements after
1437 * the current while iterating. The process stops when we reach the end of the
1438 * list. Found roots are added to the roots list.
1440 * returns 0 on success, < 0 on error.
1442 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle
*trans
,
1443 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1444 u64 time_seq
, struct ulist
**roots
,
1448 struct ulist_node
*node
= NULL
;
1449 struct ulist_iterator uiter
;
1452 tmp
= ulist_alloc(GFP_NOFS
);
1455 *roots
= ulist_alloc(GFP_NOFS
);
1461 ULIST_ITER_INIT(&uiter
);
1463 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1464 tmp
, *roots
, NULL
, NULL
, ignore_offset
);
1465 if (ret
< 0 && ret
!= -ENOENT
) {
1470 node
= ulist_next(tmp
, &uiter
);
1481 int btrfs_find_all_roots(struct btrfs_trans_handle
*trans
,
1482 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1483 u64 time_seq
, struct ulist
**roots
,
1489 down_read(&fs_info
->commit_root_sem
);
1490 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, bytenr
,
1491 time_seq
, roots
, ignore_offset
);
1493 up_read(&fs_info
->commit_root_sem
);
1498 * btrfs_check_shared - tell us whether an extent is shared
1500 * btrfs_check_shared uses the backref walking code but will short
1501 * circuit as soon as it finds a root or inode that doesn't match the
1502 * one passed in. This provides a significant performance benefit for
1503 * callers (such as fiemap) which want to know whether the extent is
1504 * shared but do not need a ref count.
1506 * This attempts to attach to the running transaction in order to account for
1507 * delayed refs, but continues on even when no running transaction exists.
1509 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1511 int btrfs_check_shared(struct btrfs_root
*root
, u64 inum
, u64 bytenr
,
1512 struct ulist
*roots
, struct ulist
*tmp
)
1514 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1515 struct btrfs_trans_handle
*trans
;
1516 struct ulist_iterator uiter
;
1517 struct ulist_node
*node
;
1518 struct seq_list elem
= SEQ_LIST_INIT(elem
);
1520 struct share_check shared
= {
1521 .root_objectid
= root
->root_key
.objectid
,
1529 trans
= btrfs_join_transaction_nostart(root
);
1530 if (IS_ERR(trans
)) {
1531 if (PTR_ERR(trans
) != -ENOENT
&& PTR_ERR(trans
) != -EROFS
) {
1532 ret
= PTR_ERR(trans
);
1536 down_read(&fs_info
->commit_root_sem
);
1538 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1541 ULIST_ITER_INIT(&uiter
);
1543 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, elem
.seq
, tmp
,
1544 roots
, NULL
, &shared
, false);
1545 if (ret
== BACKREF_FOUND_SHARED
) {
1546 /* this is the only condition under which we return 1 */
1550 if (ret
< 0 && ret
!= -ENOENT
)
1553 node
= ulist_next(tmp
, &uiter
);
1557 shared
.share_count
= 0;
1562 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1563 btrfs_end_transaction(trans
);
1565 up_read(&fs_info
->commit_root_sem
);
1568 ulist_release(roots
);
1573 int btrfs_find_one_extref(struct btrfs_root
*root
, u64 inode_objectid
,
1574 u64 start_off
, struct btrfs_path
*path
,
1575 struct btrfs_inode_extref
**ret_extref
,
1579 struct btrfs_key key
;
1580 struct btrfs_key found_key
;
1581 struct btrfs_inode_extref
*extref
;
1582 const struct extent_buffer
*leaf
;
1585 key
.objectid
= inode_objectid
;
1586 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1587 key
.offset
= start_off
;
1589 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1594 leaf
= path
->nodes
[0];
1595 slot
= path
->slots
[0];
1596 if (slot
>= btrfs_header_nritems(leaf
)) {
1598 * If the item at offset is not found,
1599 * btrfs_search_slot will point us to the slot
1600 * where it should be inserted. In our case
1601 * that will be the slot directly before the
1602 * next INODE_REF_KEY_V2 item. In the case
1603 * that we're pointing to the last slot in a
1604 * leaf, we must move one leaf over.
1606 ret
= btrfs_next_leaf(root
, path
);
1615 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1618 * Check that we're still looking at an extended ref key for
1619 * this particular objectid. If we have different
1620 * objectid or type then there are no more to be found
1621 * in the tree and we can exit.
1624 if (found_key
.objectid
!= inode_objectid
)
1626 if (found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1630 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1631 extref
= (struct btrfs_inode_extref
*)ptr
;
1632 *ret_extref
= extref
;
1634 *found_off
= found_key
.offset
;
1642 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1643 * Elements of the path are separated by '/' and the path is guaranteed to be
1644 * 0-terminated. the path is only given within the current file system.
1645 * Therefore, it never starts with a '/'. the caller is responsible to provide
1646 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1647 * the start point of the resulting string is returned. this pointer is within
1649 * in case the path buffer would overflow, the pointer is decremented further
1650 * as if output was written to the buffer, though no more output is actually
1651 * generated. that way, the caller can determine how much space would be
1652 * required for the path to fit into the buffer. in that case, the returned
1653 * value will be smaller than dest. callers must check this!
1655 char *btrfs_ref_to_path(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
1656 u32 name_len
, unsigned long name_off
,
1657 struct extent_buffer
*eb_in
, u64 parent
,
1658 char *dest
, u32 size
)
1663 s64 bytes_left
= ((s64
)size
) - 1;
1664 struct extent_buffer
*eb
= eb_in
;
1665 struct btrfs_key found_key
;
1666 int leave_spinning
= path
->leave_spinning
;
1667 struct btrfs_inode_ref
*iref
;
1669 if (bytes_left
>= 0)
1670 dest
[bytes_left
] = '\0';
1672 path
->leave_spinning
= 1;
1674 bytes_left
-= name_len
;
1675 if (bytes_left
>= 0)
1676 read_extent_buffer(eb
, dest
+ bytes_left
,
1677 name_off
, name_len
);
1679 if (!path
->skip_locking
)
1680 btrfs_tree_read_unlock_blocking(eb
);
1681 free_extent_buffer(eb
);
1683 ret
= btrfs_find_item(fs_root
, path
, parent
, 0,
1684 BTRFS_INODE_REF_KEY
, &found_key
);
1690 next_inum
= found_key
.offset
;
1692 /* regular exit ahead */
1693 if (parent
== next_inum
)
1696 slot
= path
->slots
[0];
1697 eb
= path
->nodes
[0];
1698 /* make sure we can use eb after releasing the path */
1700 if (!path
->skip_locking
)
1701 btrfs_set_lock_blocking_read(eb
);
1702 path
->nodes
[0] = NULL
;
1705 btrfs_release_path(path
);
1706 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1708 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
1709 name_off
= (unsigned long)(iref
+ 1);
1713 if (bytes_left
>= 0)
1714 dest
[bytes_left
] = '/';
1717 btrfs_release_path(path
);
1718 path
->leave_spinning
= leave_spinning
;
1721 return ERR_PTR(ret
);
1723 return dest
+ bytes_left
;
1727 * this makes the path point to (logical EXTENT_ITEM *)
1728 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1729 * tree blocks and <0 on error.
1731 int extent_from_logical(struct btrfs_fs_info
*fs_info
, u64 logical
,
1732 struct btrfs_path
*path
, struct btrfs_key
*found_key
,
1739 const struct extent_buffer
*eb
;
1740 struct btrfs_extent_item
*ei
;
1741 struct btrfs_key key
;
1743 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1744 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1746 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1747 key
.objectid
= logical
;
1748 key
.offset
= (u64
)-1;
1750 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
1754 ret
= btrfs_previous_extent_item(fs_info
->extent_root
, path
, 0);
1760 btrfs_item_key_to_cpu(path
->nodes
[0], found_key
, path
->slots
[0]);
1761 if (found_key
->type
== BTRFS_METADATA_ITEM_KEY
)
1762 size
= fs_info
->nodesize
;
1763 else if (found_key
->type
== BTRFS_EXTENT_ITEM_KEY
)
1764 size
= found_key
->offset
;
1766 if (found_key
->objectid
> logical
||
1767 found_key
->objectid
+ size
<= logical
) {
1768 btrfs_debug(fs_info
,
1769 "logical %llu is not within any extent", logical
);
1773 eb
= path
->nodes
[0];
1774 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
1775 BUG_ON(item_size
< sizeof(*ei
));
1777 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
1778 flags
= btrfs_extent_flags(eb
, ei
);
1780 btrfs_debug(fs_info
,
1781 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1782 logical
, logical
- found_key
->objectid
, found_key
->objectid
,
1783 found_key
->offset
, flags
, item_size
);
1785 WARN_ON(!flags_ret
);
1787 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1788 *flags_ret
= BTRFS_EXTENT_FLAG_TREE_BLOCK
;
1789 else if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1790 *flags_ret
= BTRFS_EXTENT_FLAG_DATA
;
1800 * helper function to iterate extent inline refs. ptr must point to a 0 value
1801 * for the first call and may be modified. it is used to track state.
1802 * if more refs exist, 0 is returned and the next call to
1803 * get_extent_inline_ref must pass the modified ptr parameter to get the
1804 * next ref. after the last ref was processed, 1 is returned.
1805 * returns <0 on error
1807 static int get_extent_inline_ref(unsigned long *ptr
,
1808 const struct extent_buffer
*eb
,
1809 const struct btrfs_key
*key
,
1810 const struct btrfs_extent_item
*ei
,
1812 struct btrfs_extent_inline_ref
**out_eiref
,
1817 struct btrfs_tree_block_info
*info
;
1821 flags
= btrfs_extent_flags(eb
, ei
);
1822 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1823 if (key
->type
== BTRFS_METADATA_ITEM_KEY
) {
1824 /* a skinny metadata extent */
1826 (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1828 WARN_ON(key
->type
!= BTRFS_EXTENT_ITEM_KEY
);
1829 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1831 (struct btrfs_extent_inline_ref
*)(info
+ 1);
1834 *out_eiref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1836 *ptr
= (unsigned long)*out_eiref
;
1837 if ((unsigned long)(*ptr
) >= (unsigned long)ei
+ item_size
)
1841 end
= (unsigned long)ei
+ item_size
;
1842 *out_eiref
= (struct btrfs_extent_inline_ref
*)(*ptr
);
1843 *out_type
= btrfs_get_extent_inline_ref_type(eb
, *out_eiref
,
1844 BTRFS_REF_TYPE_ANY
);
1845 if (*out_type
== BTRFS_REF_TYPE_INVALID
)
1848 *ptr
+= btrfs_extent_inline_ref_size(*out_type
);
1849 WARN_ON(*ptr
> end
);
1851 return 1; /* last */
1857 * reads the tree block backref for an extent. tree level and root are returned
1858 * through out_level and out_root. ptr must point to a 0 value for the first
1859 * call and may be modified (see get_extent_inline_ref comment).
1860 * returns 0 if data was provided, 1 if there was no more data to provide or
1863 int tree_backref_for_extent(unsigned long *ptr
, struct extent_buffer
*eb
,
1864 struct btrfs_key
*key
, struct btrfs_extent_item
*ei
,
1865 u32 item_size
, u64
*out_root
, u8
*out_level
)
1869 struct btrfs_extent_inline_ref
*eiref
;
1871 if (*ptr
== (unsigned long)-1)
1875 ret
= get_extent_inline_ref(ptr
, eb
, key
, ei
, item_size
,
1880 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
1881 type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1888 /* we can treat both ref types equally here */
1889 *out_root
= btrfs_extent_inline_ref_offset(eb
, eiref
);
1891 if (key
->type
== BTRFS_EXTENT_ITEM_KEY
) {
1892 struct btrfs_tree_block_info
*info
;
1894 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1895 *out_level
= btrfs_tree_block_level(eb
, info
);
1897 ASSERT(key
->type
== BTRFS_METADATA_ITEM_KEY
);
1898 *out_level
= (u8
)key
->offset
;
1902 *ptr
= (unsigned long)-1;
1907 static int iterate_leaf_refs(struct btrfs_fs_info
*fs_info
,
1908 struct extent_inode_elem
*inode_list
,
1909 u64 root
, u64 extent_item_objectid
,
1910 iterate_extent_inodes_t
*iterate
, void *ctx
)
1912 struct extent_inode_elem
*eie
;
1915 for (eie
= inode_list
; eie
; eie
= eie
->next
) {
1916 btrfs_debug(fs_info
,
1917 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1918 extent_item_objectid
, eie
->inum
,
1920 ret
= iterate(eie
->inum
, eie
->offset
, root
, ctx
);
1922 btrfs_debug(fs_info
,
1923 "stopping iteration for %llu due to ret=%d",
1924 extent_item_objectid
, ret
);
1933 * calls iterate() for every inode that references the extent identified by
1934 * the given parameters.
1935 * when the iterator function returns a non-zero value, iteration stops.
1937 int iterate_extent_inodes(struct btrfs_fs_info
*fs_info
,
1938 u64 extent_item_objectid
, u64 extent_item_pos
,
1939 int search_commit_root
,
1940 iterate_extent_inodes_t
*iterate
, void *ctx
,
1944 struct btrfs_trans_handle
*trans
= NULL
;
1945 struct ulist
*refs
= NULL
;
1946 struct ulist
*roots
= NULL
;
1947 struct ulist_node
*ref_node
= NULL
;
1948 struct ulist_node
*root_node
= NULL
;
1949 struct seq_list tree_mod_seq_elem
= SEQ_LIST_INIT(tree_mod_seq_elem
);
1950 struct ulist_iterator ref_uiter
;
1951 struct ulist_iterator root_uiter
;
1953 btrfs_debug(fs_info
, "resolving all inodes for extent %llu",
1954 extent_item_objectid
);
1956 if (!search_commit_root
) {
1957 trans
= btrfs_attach_transaction(fs_info
->extent_root
);
1958 if (IS_ERR(trans
)) {
1959 if (PTR_ERR(trans
) != -ENOENT
&&
1960 PTR_ERR(trans
) != -EROFS
)
1961 return PTR_ERR(trans
);
1967 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1969 down_read(&fs_info
->commit_root_sem
);
1971 ret
= btrfs_find_all_leafs(trans
, fs_info
, extent_item_objectid
,
1972 tree_mod_seq_elem
.seq
, &refs
,
1973 &extent_item_pos
, ignore_offset
);
1977 ULIST_ITER_INIT(&ref_uiter
);
1978 while (!ret
&& (ref_node
= ulist_next(refs
, &ref_uiter
))) {
1979 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, ref_node
->val
,
1980 tree_mod_seq_elem
.seq
, &roots
,
1984 ULIST_ITER_INIT(&root_uiter
);
1985 while (!ret
&& (root_node
= ulist_next(roots
, &root_uiter
))) {
1986 btrfs_debug(fs_info
,
1987 "root %llu references leaf %llu, data list %#llx",
1988 root_node
->val
, ref_node
->val
,
1990 ret
= iterate_leaf_refs(fs_info
,
1991 (struct extent_inode_elem
*)
1992 (uintptr_t)ref_node
->aux
,
1994 extent_item_objectid
,
2000 free_leaf_list(refs
);
2003 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2004 btrfs_end_transaction(trans
);
2006 up_read(&fs_info
->commit_root_sem
);
2012 int iterate_inodes_from_logical(u64 logical
, struct btrfs_fs_info
*fs_info
,
2013 struct btrfs_path
*path
,
2014 iterate_extent_inodes_t
*iterate
, void *ctx
,
2018 u64 extent_item_pos
;
2020 struct btrfs_key found_key
;
2021 int search_commit_root
= path
->search_commit_root
;
2023 ret
= extent_from_logical(fs_info
, logical
, path
, &found_key
, &flags
);
2024 btrfs_release_path(path
);
2027 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
2030 extent_item_pos
= logical
- found_key
.objectid
;
2031 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
2032 extent_item_pos
, search_commit_root
,
2033 iterate
, ctx
, ignore_offset
);
2038 typedef int (iterate_irefs_t
)(u64 parent
, u32 name_len
, unsigned long name_off
,
2039 struct extent_buffer
*eb
, void *ctx
);
2041 static int iterate_inode_refs(u64 inum
, struct btrfs_root
*fs_root
,
2042 struct btrfs_path
*path
,
2043 iterate_irefs_t
*iterate
, void *ctx
)
2052 struct extent_buffer
*eb
;
2053 struct btrfs_item
*item
;
2054 struct btrfs_inode_ref
*iref
;
2055 struct btrfs_key found_key
;
2058 ret
= btrfs_find_item(fs_root
, path
, inum
,
2059 parent
? parent
+ 1 : 0, BTRFS_INODE_REF_KEY
,
2065 ret
= found
? 0 : -ENOENT
;
2070 parent
= found_key
.offset
;
2071 slot
= path
->slots
[0];
2072 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2077 btrfs_release_path(path
);
2079 item
= btrfs_item_nr(slot
);
2080 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
2082 for (cur
= 0; cur
< btrfs_item_size(eb
, item
); cur
+= len
) {
2083 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
2084 /* path must be released before calling iterate()! */
2085 btrfs_debug(fs_root
->fs_info
,
2086 "following ref at offset %u for inode %llu in tree %llu",
2087 cur
, found_key
.objectid
,
2088 fs_root
->root_key
.objectid
);
2089 ret
= iterate(parent
, name_len
,
2090 (unsigned long)(iref
+ 1), eb
, ctx
);
2093 len
= sizeof(*iref
) + name_len
;
2094 iref
= (struct btrfs_inode_ref
*)((char *)iref
+ len
);
2096 free_extent_buffer(eb
);
2099 btrfs_release_path(path
);
2104 static int iterate_inode_extrefs(u64 inum
, struct btrfs_root
*fs_root
,
2105 struct btrfs_path
*path
,
2106 iterate_irefs_t
*iterate
, void *ctx
)
2113 struct extent_buffer
*eb
;
2114 struct btrfs_inode_extref
*extref
;
2120 ret
= btrfs_find_one_extref(fs_root
, inum
, offset
, path
, &extref
,
2125 ret
= found
? 0 : -ENOENT
;
2130 slot
= path
->slots
[0];
2131 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2136 btrfs_release_path(path
);
2138 item_size
= btrfs_item_size_nr(eb
, slot
);
2139 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2142 while (cur_offset
< item_size
) {
2145 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur_offset
);
2146 parent
= btrfs_inode_extref_parent(eb
, extref
);
2147 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
2148 ret
= iterate(parent
, name_len
,
2149 (unsigned long)&extref
->name
, eb
, ctx
);
2153 cur_offset
+= btrfs_inode_extref_name_len(eb
, extref
);
2154 cur_offset
+= sizeof(*extref
);
2156 free_extent_buffer(eb
);
2161 btrfs_release_path(path
);
2166 static int iterate_irefs(u64 inum
, struct btrfs_root
*fs_root
,
2167 struct btrfs_path
*path
, iterate_irefs_t
*iterate
,
2173 ret
= iterate_inode_refs(inum
, fs_root
, path
, iterate
, ctx
);
2176 else if (ret
!= -ENOENT
)
2179 ret
= iterate_inode_extrefs(inum
, fs_root
, path
, iterate
, ctx
);
2180 if (ret
== -ENOENT
&& found_refs
)
2187 * returns 0 if the path could be dumped (probably truncated)
2188 * returns <0 in case of an error
2190 static int inode_to_path(u64 inum
, u32 name_len
, unsigned long name_off
,
2191 struct extent_buffer
*eb
, void *ctx
)
2193 struct inode_fs_paths
*ipath
= ctx
;
2196 int i
= ipath
->fspath
->elem_cnt
;
2197 const int s_ptr
= sizeof(char *);
2200 bytes_left
= ipath
->fspath
->bytes_left
> s_ptr
?
2201 ipath
->fspath
->bytes_left
- s_ptr
: 0;
2203 fspath_min
= (char *)ipath
->fspath
->val
+ (i
+ 1) * s_ptr
;
2204 fspath
= btrfs_ref_to_path(ipath
->fs_root
, ipath
->btrfs_path
, name_len
,
2205 name_off
, eb
, inum
, fspath_min
, bytes_left
);
2207 return PTR_ERR(fspath
);
2209 if (fspath
> fspath_min
) {
2210 ipath
->fspath
->val
[i
] = (u64
)(unsigned long)fspath
;
2211 ++ipath
->fspath
->elem_cnt
;
2212 ipath
->fspath
->bytes_left
= fspath
- fspath_min
;
2214 ++ipath
->fspath
->elem_missed
;
2215 ipath
->fspath
->bytes_missing
+= fspath_min
- fspath
;
2216 ipath
->fspath
->bytes_left
= 0;
2223 * this dumps all file system paths to the inode into the ipath struct, provided
2224 * is has been created large enough. each path is zero-terminated and accessed
2225 * from ipath->fspath->val[i].
2226 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2227 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2228 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2229 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2230 * have been needed to return all paths.
2232 int paths_from_inode(u64 inum
, struct inode_fs_paths
*ipath
)
2234 return iterate_irefs(inum
, ipath
->fs_root
, ipath
->btrfs_path
,
2235 inode_to_path
, ipath
);
2238 struct btrfs_data_container
*init_data_container(u32 total_bytes
)
2240 struct btrfs_data_container
*data
;
2243 alloc_bytes
= max_t(size_t, total_bytes
, sizeof(*data
));
2244 data
= kvmalloc(alloc_bytes
, GFP_KERNEL
);
2246 return ERR_PTR(-ENOMEM
);
2248 if (total_bytes
>= sizeof(*data
)) {
2249 data
->bytes_left
= total_bytes
- sizeof(*data
);
2250 data
->bytes_missing
= 0;
2252 data
->bytes_missing
= sizeof(*data
) - total_bytes
;
2253 data
->bytes_left
= 0;
2257 data
->elem_missed
= 0;
2263 * allocates space to return multiple file system paths for an inode.
2264 * total_bytes to allocate are passed, note that space usable for actual path
2265 * information will be total_bytes - sizeof(struct inode_fs_paths).
2266 * the returned pointer must be freed with free_ipath() in the end.
2268 struct inode_fs_paths
*init_ipath(s32 total_bytes
, struct btrfs_root
*fs_root
,
2269 struct btrfs_path
*path
)
2271 struct inode_fs_paths
*ifp
;
2272 struct btrfs_data_container
*fspath
;
2274 fspath
= init_data_container(total_bytes
);
2276 return ERR_CAST(fspath
);
2278 ifp
= kmalloc(sizeof(*ifp
), GFP_KERNEL
);
2281 return ERR_PTR(-ENOMEM
);
2284 ifp
->btrfs_path
= path
;
2285 ifp
->fspath
= fspath
;
2286 ifp
->fs_root
= fs_root
;
2291 void free_ipath(struct inode_fs_paths
*ipath
)
2295 kvfree(ipath
->fspath
);
2299 struct btrfs_backref_iter
*btrfs_backref_iter_alloc(
2300 struct btrfs_fs_info
*fs_info
, gfp_t gfp_flag
)
2302 struct btrfs_backref_iter
*ret
;
2304 ret
= kzalloc(sizeof(*ret
), gfp_flag
);
2308 ret
->path
= btrfs_alloc_path();
2314 /* Current backref iterator only supports iteration in commit root */
2315 ret
->path
->search_commit_root
= 1;
2316 ret
->path
->skip_locking
= 1;
2317 ret
->fs_info
= fs_info
;
2322 int btrfs_backref_iter_start(struct btrfs_backref_iter
*iter
, u64 bytenr
)
2324 struct btrfs_fs_info
*fs_info
= iter
->fs_info
;
2325 struct btrfs_path
*path
= iter
->path
;
2326 struct btrfs_extent_item
*ei
;
2327 struct btrfs_key key
;
2330 key
.objectid
= bytenr
;
2331 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2332 key
.offset
= (u64
)-1;
2333 iter
->bytenr
= bytenr
;
2335 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
2342 if (path
->slots
[0] == 0) {
2343 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
2349 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2350 if ((key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2351 key
.type
!= BTRFS_METADATA_ITEM_KEY
) || key
.objectid
!= bytenr
) {
2355 memcpy(&iter
->cur_key
, &key
, sizeof(key
));
2356 iter
->item_ptr
= (u32
)btrfs_item_ptr_offset(path
->nodes
[0],
2358 iter
->end_ptr
= (u32
)(iter
->item_ptr
+
2359 btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]));
2360 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2361 struct btrfs_extent_item
);
2364 * Only support iteration on tree backref yet.
2366 * This is an extra precaution for non skinny-metadata, where
2367 * EXTENT_ITEM is also used for tree blocks, that we can only use
2368 * extent flags to determine if it's a tree block.
2370 if (btrfs_extent_flags(path
->nodes
[0], ei
) & BTRFS_EXTENT_FLAG_DATA
) {
2374 iter
->cur_ptr
= (u32
)(iter
->item_ptr
+ sizeof(*ei
));
2376 /* If there is no inline backref, go search for keyed backref */
2377 if (iter
->cur_ptr
>= iter
->end_ptr
) {
2378 ret
= btrfs_next_item(fs_info
->extent_root
, path
);
2380 /* No inline nor keyed ref */
2388 btrfs_item_key_to_cpu(path
->nodes
[0], &iter
->cur_key
,
2390 if (iter
->cur_key
.objectid
!= bytenr
||
2391 (iter
->cur_key
.type
!= BTRFS_SHARED_BLOCK_REF_KEY
&&
2392 iter
->cur_key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
)) {
2396 iter
->cur_ptr
= (u32
)btrfs_item_ptr_offset(path
->nodes
[0],
2398 iter
->item_ptr
= iter
->cur_ptr
;
2399 iter
->end_ptr
= (u32
)(iter
->item_ptr
+ btrfs_item_size_nr(
2400 path
->nodes
[0], path
->slots
[0]));
2405 btrfs_backref_iter_release(iter
);
2410 * Go to the next backref item of current bytenr, can be either inlined or
2413 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2415 * Return 0 if we get next backref without problem.
2416 * Return >0 if there is no extra backref for this bytenr.
2417 * Return <0 if there is something wrong happened.
2419 int btrfs_backref_iter_next(struct btrfs_backref_iter
*iter
)
2421 struct extent_buffer
*eb
= btrfs_backref_get_eb(iter
);
2422 struct btrfs_path
*path
= iter
->path
;
2423 struct btrfs_extent_inline_ref
*iref
;
2427 if (btrfs_backref_iter_is_inline_ref(iter
)) {
2428 /* We're still inside the inline refs */
2429 ASSERT(iter
->cur_ptr
< iter
->end_ptr
);
2431 if (btrfs_backref_has_tree_block_info(iter
)) {
2432 /* First tree block info */
2433 size
= sizeof(struct btrfs_tree_block_info
);
2435 /* Use inline ref type to determine the size */
2438 iref
= (struct btrfs_extent_inline_ref
*)
2439 ((unsigned long)iter
->cur_ptr
);
2440 type
= btrfs_extent_inline_ref_type(eb
, iref
);
2442 size
= btrfs_extent_inline_ref_size(type
);
2444 iter
->cur_ptr
+= size
;
2445 if (iter
->cur_ptr
< iter
->end_ptr
)
2448 /* All inline items iterated, fall through */
2451 /* We're at keyed items, there is no inline item, go to the next one */
2452 ret
= btrfs_next_item(iter
->fs_info
->extent_root
, iter
->path
);
2456 btrfs_item_key_to_cpu(path
->nodes
[0], &iter
->cur_key
, path
->slots
[0]);
2457 if (iter
->cur_key
.objectid
!= iter
->bytenr
||
2458 (iter
->cur_key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
&&
2459 iter
->cur_key
.type
!= BTRFS_SHARED_BLOCK_REF_KEY
))
2461 iter
->item_ptr
= (u32
)btrfs_item_ptr_offset(path
->nodes
[0],
2463 iter
->cur_ptr
= iter
->item_ptr
;
2464 iter
->end_ptr
= iter
->item_ptr
+ (u32
)btrfs_item_size_nr(path
->nodes
[0],
2469 void btrfs_backref_init_cache(struct btrfs_fs_info
*fs_info
,
2470 struct btrfs_backref_cache
*cache
, int is_reloc
)
2474 cache
->rb_root
= RB_ROOT
;
2475 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
2476 INIT_LIST_HEAD(&cache
->pending
[i
]);
2477 INIT_LIST_HEAD(&cache
->changed
);
2478 INIT_LIST_HEAD(&cache
->detached
);
2479 INIT_LIST_HEAD(&cache
->leaves
);
2480 INIT_LIST_HEAD(&cache
->pending_edge
);
2481 INIT_LIST_HEAD(&cache
->useless_node
);
2482 cache
->fs_info
= fs_info
;
2483 cache
->is_reloc
= is_reloc
;
2486 struct btrfs_backref_node
*btrfs_backref_alloc_node(
2487 struct btrfs_backref_cache
*cache
, u64 bytenr
, int level
)
2489 struct btrfs_backref_node
*node
;
2491 ASSERT(level
>= 0 && level
< BTRFS_MAX_LEVEL
);
2492 node
= kzalloc(sizeof(*node
), GFP_NOFS
);
2496 INIT_LIST_HEAD(&node
->list
);
2497 INIT_LIST_HEAD(&node
->upper
);
2498 INIT_LIST_HEAD(&node
->lower
);
2499 RB_CLEAR_NODE(&node
->rb_node
);
2501 node
->level
= level
;
2502 node
->bytenr
= bytenr
;
2507 struct btrfs_backref_edge
*btrfs_backref_alloc_edge(
2508 struct btrfs_backref_cache
*cache
)
2510 struct btrfs_backref_edge
*edge
;
2512 edge
= kzalloc(sizeof(*edge
), GFP_NOFS
);