]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/backref.c
Merge remote-tracking branches 'spi/topic/armada', 'spi/topic/axi', 'spi/topic/davinc...
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / backref.c
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/mm.h>
20 #include <linux/rbtree.h>
21 #include <trace/events/btrfs.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "backref.h"
25 #include "ulist.h"
26 #include "transaction.h"
27 #include "delayed-ref.h"
28 #include "locking.h"
29
30 /* Just an arbitrary number so we can be sure this happened */
31 #define BACKREF_FOUND_SHARED 6
32
33 struct extent_inode_elem {
34 u64 inum;
35 u64 offset;
36 struct extent_inode_elem *next;
37 };
38
39 static int check_extent_in_eb(const struct btrfs_key *key,
40 const struct extent_buffer *eb,
41 const struct btrfs_file_extent_item *fi,
42 u64 extent_item_pos,
43 struct extent_inode_elem **eie)
44 {
45 u64 offset = 0;
46 struct extent_inode_elem *e;
47
48 if (!btrfs_file_extent_compression(eb, fi) &&
49 !btrfs_file_extent_encryption(eb, fi) &&
50 !btrfs_file_extent_other_encoding(eb, fi)) {
51 u64 data_offset;
52 u64 data_len;
53
54 data_offset = btrfs_file_extent_offset(eb, fi);
55 data_len = btrfs_file_extent_num_bytes(eb, fi);
56
57 if (extent_item_pos < data_offset ||
58 extent_item_pos >= data_offset + data_len)
59 return 1;
60 offset = extent_item_pos - data_offset;
61 }
62
63 e = kmalloc(sizeof(*e), GFP_NOFS);
64 if (!e)
65 return -ENOMEM;
66
67 e->next = *eie;
68 e->inum = key->objectid;
69 e->offset = key->offset + offset;
70 *eie = e;
71
72 return 0;
73 }
74
75 static void free_inode_elem_list(struct extent_inode_elem *eie)
76 {
77 struct extent_inode_elem *eie_next;
78
79 for (; eie; eie = eie_next) {
80 eie_next = eie->next;
81 kfree(eie);
82 }
83 }
84
85 static int find_extent_in_eb(const struct extent_buffer *eb,
86 u64 wanted_disk_byte, u64 extent_item_pos,
87 struct extent_inode_elem **eie)
88 {
89 u64 disk_byte;
90 struct btrfs_key key;
91 struct btrfs_file_extent_item *fi;
92 int slot;
93 int nritems;
94 int extent_type;
95 int ret;
96
97 /*
98 * from the shared data ref, we only have the leaf but we need
99 * the key. thus, we must look into all items and see that we
100 * find one (some) with a reference to our extent item.
101 */
102 nritems = btrfs_header_nritems(eb);
103 for (slot = 0; slot < nritems; ++slot) {
104 btrfs_item_key_to_cpu(eb, &key, slot);
105 if (key.type != BTRFS_EXTENT_DATA_KEY)
106 continue;
107 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
108 extent_type = btrfs_file_extent_type(eb, fi);
109 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
110 continue;
111 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
112 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
113 if (disk_byte != wanted_disk_byte)
114 continue;
115
116 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
117 if (ret < 0)
118 return ret;
119 }
120
121 return 0;
122 }
123
124 struct preftree {
125 struct rb_root root;
126 unsigned int count;
127 };
128
129 #define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
130
131 struct preftrees {
132 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
133 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
134 struct preftree indirect_missing_keys;
135 };
136
137 /*
138 * Checks for a shared extent during backref search.
139 *
140 * The share_count tracks prelim_refs (direct and indirect) having a
141 * ref->count >0:
142 * - incremented when a ref->count transitions to >0
143 * - decremented when a ref->count transitions to <1
144 */
145 struct share_check {
146 u64 root_objectid;
147 u64 inum;
148 int share_count;
149 };
150
151 static inline int extent_is_shared(struct share_check *sc)
152 {
153 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
154 }
155
156 static struct kmem_cache *btrfs_prelim_ref_cache;
157
158 int __init btrfs_prelim_ref_init(void)
159 {
160 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
161 sizeof(struct prelim_ref),
162 0,
163 SLAB_MEM_SPREAD,
164 NULL);
165 if (!btrfs_prelim_ref_cache)
166 return -ENOMEM;
167 return 0;
168 }
169
170 void btrfs_prelim_ref_exit(void)
171 {
172 kmem_cache_destroy(btrfs_prelim_ref_cache);
173 }
174
175 static void free_pref(struct prelim_ref *ref)
176 {
177 kmem_cache_free(btrfs_prelim_ref_cache, ref);
178 }
179
180 /*
181 * Return 0 when both refs are for the same block (and can be merged).
182 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
183 * indicates a 'higher' block.
184 */
185 static int prelim_ref_compare(struct prelim_ref *ref1,
186 struct prelim_ref *ref2)
187 {
188 if (ref1->level < ref2->level)
189 return -1;
190 if (ref1->level > ref2->level)
191 return 1;
192 if (ref1->root_id < ref2->root_id)
193 return -1;
194 if (ref1->root_id > ref2->root_id)
195 return 1;
196 if (ref1->key_for_search.type < ref2->key_for_search.type)
197 return -1;
198 if (ref1->key_for_search.type > ref2->key_for_search.type)
199 return 1;
200 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
201 return -1;
202 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
203 return 1;
204 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
205 return -1;
206 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
207 return 1;
208 if (ref1->parent < ref2->parent)
209 return -1;
210 if (ref1->parent > ref2->parent)
211 return 1;
212
213 return 0;
214 }
215
216 void update_share_count(struct share_check *sc, int oldcount, int newcount)
217 {
218 if ((!sc) || (oldcount == 0 && newcount < 1))
219 return;
220
221 if (oldcount > 0 && newcount < 1)
222 sc->share_count--;
223 else if (oldcount < 1 && newcount > 0)
224 sc->share_count++;
225 }
226
227 /*
228 * Add @newref to the @root rbtree, merging identical refs.
229 *
230 * Callers should assume that newref has been freed after calling.
231 */
232 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
233 struct preftree *preftree,
234 struct prelim_ref *newref,
235 struct share_check *sc)
236 {
237 struct rb_root *root;
238 struct rb_node **p;
239 struct rb_node *parent = NULL;
240 struct prelim_ref *ref;
241 int result;
242
243 root = &preftree->root;
244 p = &root->rb_node;
245
246 while (*p) {
247 parent = *p;
248 ref = rb_entry(parent, struct prelim_ref, rbnode);
249 result = prelim_ref_compare(ref, newref);
250 if (result < 0) {
251 p = &(*p)->rb_left;
252 } else if (result > 0) {
253 p = &(*p)->rb_right;
254 } else {
255 /* Identical refs, merge them and free @newref */
256 struct extent_inode_elem *eie = ref->inode_list;
257
258 while (eie && eie->next)
259 eie = eie->next;
260
261 if (!eie)
262 ref->inode_list = newref->inode_list;
263 else
264 eie->next = newref->inode_list;
265 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
266 preftree->count);
267 /*
268 * A delayed ref can have newref->count < 0.
269 * The ref->count is updated to follow any
270 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
271 */
272 update_share_count(sc, ref->count,
273 ref->count + newref->count);
274 ref->count += newref->count;
275 free_pref(newref);
276 return;
277 }
278 }
279
280 update_share_count(sc, 0, newref->count);
281 preftree->count++;
282 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
283 rb_link_node(&newref->rbnode, parent, p);
284 rb_insert_color(&newref->rbnode, root);
285 }
286
287 /*
288 * Release the entire tree. We don't care about internal consistency so
289 * just free everything and then reset the tree root.
290 */
291 static void prelim_release(struct preftree *preftree)
292 {
293 struct prelim_ref *ref, *next_ref;
294
295 rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
296 rbnode)
297 free_pref(ref);
298
299 preftree->root = RB_ROOT;
300 preftree->count = 0;
301 }
302
303 /*
304 * the rules for all callers of this function are:
305 * - obtaining the parent is the goal
306 * - if you add a key, you must know that it is a correct key
307 * - if you cannot add the parent or a correct key, then we will look into the
308 * block later to set a correct key
309 *
310 * delayed refs
311 * ============
312 * backref type | shared | indirect | shared | indirect
313 * information | tree | tree | data | data
314 * --------------------+--------+----------+--------+----------
315 * parent logical | y | - | - | -
316 * key to resolve | - | y | y | y
317 * tree block logical | - | - | - | -
318 * root for resolving | y | y | y | y
319 *
320 * - column 1: we've the parent -> done
321 * - column 2, 3, 4: we use the key to find the parent
322 *
323 * on disk refs (inline or keyed)
324 * ==============================
325 * backref type | shared | indirect | shared | indirect
326 * information | tree | tree | data | data
327 * --------------------+--------+----------+--------+----------
328 * parent logical | y | - | y | -
329 * key to resolve | - | - | - | y
330 * tree block logical | y | y | y | y
331 * root for resolving | - | y | y | y
332 *
333 * - column 1, 3: we've the parent -> done
334 * - column 2: we take the first key from the block to find the parent
335 * (see add_missing_keys)
336 * - column 4: we use the key to find the parent
337 *
338 * additional information that's available but not required to find the parent
339 * block might help in merging entries to gain some speed.
340 */
341 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
342 struct preftree *preftree, u64 root_id,
343 const struct btrfs_key *key, int level, u64 parent,
344 u64 wanted_disk_byte, int count,
345 struct share_check *sc, gfp_t gfp_mask)
346 {
347 struct prelim_ref *ref;
348
349 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
350 return 0;
351
352 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
353 if (!ref)
354 return -ENOMEM;
355
356 ref->root_id = root_id;
357 if (key) {
358 ref->key_for_search = *key;
359 /*
360 * We can often find data backrefs with an offset that is too
361 * large (>= LLONG_MAX, maximum allowed file offset) due to
362 * underflows when subtracting a file's offset with the data
363 * offset of its corresponding extent data item. This can
364 * happen for example in the clone ioctl.
365 * So if we detect such case we set the search key's offset to
366 * zero to make sure we will find the matching file extent item
367 * at add_all_parents(), otherwise we will miss it because the
368 * offset taken form the backref is much larger then the offset
369 * of the file extent item. This can make us scan a very large
370 * number of file extent items, but at least it will not make
371 * us miss any.
372 * This is an ugly workaround for a behaviour that should have
373 * never existed, but it does and a fix for the clone ioctl
374 * would touch a lot of places, cause backwards incompatibility
375 * and would not fix the problem for extents cloned with older
376 * kernels.
377 */
378 if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
379 ref->key_for_search.offset >= LLONG_MAX)
380 ref->key_for_search.offset = 0;
381 } else {
382 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
383 }
384
385 ref->inode_list = NULL;
386 ref->level = level;
387 ref->count = count;
388 ref->parent = parent;
389 ref->wanted_disk_byte = wanted_disk_byte;
390 prelim_ref_insert(fs_info, preftree, ref, sc);
391 return extent_is_shared(sc);
392 }
393
394 /* direct refs use root == 0, key == NULL */
395 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
396 struct preftrees *preftrees, int level, u64 parent,
397 u64 wanted_disk_byte, int count,
398 struct share_check *sc, gfp_t gfp_mask)
399 {
400 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
401 parent, wanted_disk_byte, count, sc, gfp_mask);
402 }
403
404 /* indirect refs use parent == 0 */
405 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
406 struct preftrees *preftrees, u64 root_id,
407 const struct btrfs_key *key, int level,
408 u64 wanted_disk_byte, int count,
409 struct share_check *sc, gfp_t gfp_mask)
410 {
411 struct preftree *tree = &preftrees->indirect;
412
413 if (!key)
414 tree = &preftrees->indirect_missing_keys;
415 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
416 wanted_disk_byte, count, sc, gfp_mask);
417 }
418
419 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
420 struct ulist *parents, struct prelim_ref *ref,
421 int level, u64 time_seq, const u64 *extent_item_pos,
422 u64 total_refs)
423 {
424 int ret = 0;
425 int slot;
426 struct extent_buffer *eb;
427 struct btrfs_key key;
428 struct btrfs_key *key_for_search = &ref->key_for_search;
429 struct btrfs_file_extent_item *fi;
430 struct extent_inode_elem *eie = NULL, *old = NULL;
431 u64 disk_byte;
432 u64 wanted_disk_byte = ref->wanted_disk_byte;
433 u64 count = 0;
434
435 if (level != 0) {
436 eb = path->nodes[level];
437 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
438 if (ret < 0)
439 return ret;
440 return 0;
441 }
442
443 /*
444 * We normally enter this function with the path already pointing to
445 * the first item to check. But sometimes, we may enter it with
446 * slot==nritems. In that case, go to the next leaf before we continue.
447 */
448 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
449 if (time_seq == SEQ_LAST)
450 ret = btrfs_next_leaf(root, path);
451 else
452 ret = btrfs_next_old_leaf(root, path, time_seq);
453 }
454
455 while (!ret && count < total_refs) {
456 eb = path->nodes[0];
457 slot = path->slots[0];
458
459 btrfs_item_key_to_cpu(eb, &key, slot);
460
461 if (key.objectid != key_for_search->objectid ||
462 key.type != BTRFS_EXTENT_DATA_KEY)
463 break;
464
465 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
466 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
467
468 if (disk_byte == wanted_disk_byte) {
469 eie = NULL;
470 old = NULL;
471 count++;
472 if (extent_item_pos) {
473 ret = check_extent_in_eb(&key, eb, fi,
474 *extent_item_pos,
475 &eie);
476 if (ret < 0)
477 break;
478 }
479 if (ret > 0)
480 goto next;
481 ret = ulist_add_merge_ptr(parents, eb->start,
482 eie, (void **)&old, GFP_NOFS);
483 if (ret < 0)
484 break;
485 if (!ret && extent_item_pos) {
486 while (old->next)
487 old = old->next;
488 old->next = eie;
489 }
490 eie = NULL;
491 }
492 next:
493 if (time_seq == SEQ_LAST)
494 ret = btrfs_next_item(root, path);
495 else
496 ret = btrfs_next_old_item(root, path, time_seq);
497 }
498
499 if (ret > 0)
500 ret = 0;
501 else if (ret < 0)
502 free_inode_elem_list(eie);
503 return ret;
504 }
505
506 /*
507 * resolve an indirect backref in the form (root_id, key, level)
508 * to a logical address
509 */
510 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
511 struct btrfs_path *path, u64 time_seq,
512 struct prelim_ref *ref, struct ulist *parents,
513 const u64 *extent_item_pos, u64 total_refs)
514 {
515 struct btrfs_root *root;
516 struct btrfs_key root_key;
517 struct extent_buffer *eb;
518 int ret = 0;
519 int root_level;
520 int level = ref->level;
521 int index;
522
523 root_key.objectid = ref->root_id;
524 root_key.type = BTRFS_ROOT_ITEM_KEY;
525 root_key.offset = (u64)-1;
526
527 index = srcu_read_lock(&fs_info->subvol_srcu);
528
529 root = btrfs_get_fs_root(fs_info, &root_key, false);
530 if (IS_ERR(root)) {
531 srcu_read_unlock(&fs_info->subvol_srcu, index);
532 ret = PTR_ERR(root);
533 goto out;
534 }
535
536 if (btrfs_is_testing(fs_info)) {
537 srcu_read_unlock(&fs_info->subvol_srcu, index);
538 ret = -ENOENT;
539 goto out;
540 }
541
542 if (path->search_commit_root)
543 root_level = btrfs_header_level(root->commit_root);
544 else if (time_seq == SEQ_LAST)
545 root_level = btrfs_header_level(root->node);
546 else
547 root_level = btrfs_old_root_level(root, time_seq);
548
549 if (root_level + 1 == level) {
550 srcu_read_unlock(&fs_info->subvol_srcu, index);
551 goto out;
552 }
553
554 path->lowest_level = level;
555 if (time_seq == SEQ_LAST)
556 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
557 0, 0);
558 else
559 ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
560 time_seq);
561
562 /* root node has been locked, we can release @subvol_srcu safely here */
563 srcu_read_unlock(&fs_info->subvol_srcu, index);
564
565 btrfs_debug(fs_info,
566 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
567 ref->root_id, level, ref->count, ret,
568 ref->key_for_search.objectid, ref->key_for_search.type,
569 ref->key_for_search.offset);
570 if (ret < 0)
571 goto out;
572
573 eb = path->nodes[level];
574 while (!eb) {
575 if (WARN_ON(!level)) {
576 ret = 1;
577 goto out;
578 }
579 level--;
580 eb = path->nodes[level];
581 }
582
583 ret = add_all_parents(root, path, parents, ref, level, time_seq,
584 extent_item_pos, total_refs);
585 out:
586 path->lowest_level = 0;
587 btrfs_release_path(path);
588 return ret;
589 }
590
591 static struct extent_inode_elem *
592 unode_aux_to_inode_list(struct ulist_node *node)
593 {
594 if (!node)
595 return NULL;
596 return (struct extent_inode_elem *)(uintptr_t)node->aux;
597 }
598
599 /*
600 * We maintain three seperate rbtrees: one for direct refs, one for
601 * indirect refs which have a key, and one for indirect refs which do not
602 * have a key. Each tree does merge on insertion.
603 *
604 * Once all of the references are located, we iterate over the tree of
605 * indirect refs with missing keys. An appropriate key is located and
606 * the ref is moved onto the tree for indirect refs. After all missing
607 * keys are thus located, we iterate over the indirect ref tree, resolve
608 * each reference, and then insert the resolved reference onto the
609 * direct tree (merging there too).
610 *
611 * New backrefs (i.e., for parent nodes) are added to the appropriate
612 * rbtree as they are encountered. The new backrefs are subsequently
613 * resolved as above.
614 */
615 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
616 struct btrfs_path *path, u64 time_seq,
617 struct preftrees *preftrees,
618 const u64 *extent_item_pos, u64 total_refs,
619 struct share_check *sc)
620 {
621 int err;
622 int ret = 0;
623 struct ulist *parents;
624 struct ulist_node *node;
625 struct ulist_iterator uiter;
626 struct rb_node *rnode;
627
628 parents = ulist_alloc(GFP_NOFS);
629 if (!parents)
630 return -ENOMEM;
631
632 /*
633 * We could trade memory usage for performance here by iterating
634 * the tree, allocating new refs for each insertion, and then
635 * freeing the entire indirect tree when we're done. In some test
636 * cases, the tree can grow quite large (~200k objects).
637 */
638 while ((rnode = rb_first(&preftrees->indirect.root))) {
639 struct prelim_ref *ref;
640
641 ref = rb_entry(rnode, struct prelim_ref, rbnode);
642 if (WARN(ref->parent,
643 "BUG: direct ref found in indirect tree")) {
644 ret = -EINVAL;
645 goto out;
646 }
647
648 rb_erase(&ref->rbnode, &preftrees->indirect.root);
649 preftrees->indirect.count--;
650
651 if (ref->count == 0) {
652 free_pref(ref);
653 continue;
654 }
655
656 if (sc && sc->root_objectid &&
657 ref->root_id != sc->root_objectid) {
658 free_pref(ref);
659 ret = BACKREF_FOUND_SHARED;
660 goto out;
661 }
662 err = resolve_indirect_ref(fs_info, path, time_seq, ref,
663 parents, extent_item_pos,
664 total_refs);
665 /*
666 * we can only tolerate ENOENT,otherwise,we should catch error
667 * and return directly.
668 */
669 if (err == -ENOENT) {
670 prelim_ref_insert(fs_info, &preftrees->direct, ref,
671 NULL);
672 continue;
673 } else if (err) {
674 free_pref(ref);
675 ret = err;
676 goto out;
677 }
678
679 /* we put the first parent into the ref at hand */
680 ULIST_ITER_INIT(&uiter);
681 node = ulist_next(parents, &uiter);
682 ref->parent = node ? node->val : 0;
683 ref->inode_list = unode_aux_to_inode_list(node);
684
685 /* Add a prelim_ref(s) for any other parent(s). */
686 while ((node = ulist_next(parents, &uiter))) {
687 struct prelim_ref *new_ref;
688
689 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
690 GFP_NOFS);
691 if (!new_ref) {
692 free_pref(ref);
693 ret = -ENOMEM;
694 goto out;
695 }
696 memcpy(new_ref, ref, sizeof(*ref));
697 new_ref->parent = node->val;
698 new_ref->inode_list = unode_aux_to_inode_list(node);
699 prelim_ref_insert(fs_info, &preftrees->direct,
700 new_ref, NULL);
701 }
702
703 /*
704 * Now it's a direct ref, put it in the the direct tree. We must
705 * do this last because the ref could be merged/freed here.
706 */
707 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
708
709 ulist_reinit(parents);
710 cond_resched();
711 }
712 out:
713 ulist_free(parents);
714 return ret;
715 }
716
717 /*
718 * read tree blocks and add keys where required.
719 */
720 static int add_missing_keys(struct btrfs_fs_info *fs_info,
721 struct preftrees *preftrees)
722 {
723 struct prelim_ref *ref;
724 struct extent_buffer *eb;
725 struct preftree *tree = &preftrees->indirect_missing_keys;
726 struct rb_node *node;
727
728 while ((node = rb_first(&tree->root))) {
729 ref = rb_entry(node, struct prelim_ref, rbnode);
730 rb_erase(node, &tree->root);
731
732 BUG_ON(ref->parent); /* should not be a direct ref */
733 BUG_ON(ref->key_for_search.type);
734 BUG_ON(!ref->wanted_disk_byte);
735
736 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
737 if (IS_ERR(eb)) {
738 free_pref(ref);
739 return PTR_ERR(eb);
740 } else if (!extent_buffer_uptodate(eb)) {
741 free_pref(ref);
742 free_extent_buffer(eb);
743 return -EIO;
744 }
745 btrfs_tree_read_lock(eb);
746 if (btrfs_header_level(eb) == 0)
747 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
748 else
749 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
750 btrfs_tree_read_unlock(eb);
751 free_extent_buffer(eb);
752 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
753 cond_resched();
754 }
755 return 0;
756 }
757
758 /*
759 * add all currently queued delayed refs from this head whose seq nr is
760 * smaller or equal that seq to the list
761 */
762 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
763 struct btrfs_delayed_ref_head *head, u64 seq,
764 struct preftrees *preftrees, u64 *total_refs,
765 struct share_check *sc)
766 {
767 struct btrfs_delayed_ref_node *node;
768 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
769 struct btrfs_key key;
770 struct btrfs_key tmp_op_key;
771 struct btrfs_key *op_key = NULL;
772 int count;
773 int ret = 0;
774
775 if (extent_op && extent_op->update_key) {
776 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
777 op_key = &tmp_op_key;
778 }
779
780 spin_lock(&head->lock);
781 list_for_each_entry(node, &head->ref_list, list) {
782 if (node->seq > seq)
783 continue;
784
785 switch (node->action) {
786 case BTRFS_ADD_DELAYED_EXTENT:
787 case BTRFS_UPDATE_DELAYED_HEAD:
788 WARN_ON(1);
789 continue;
790 case BTRFS_ADD_DELAYED_REF:
791 count = node->ref_mod;
792 break;
793 case BTRFS_DROP_DELAYED_REF:
794 count = node->ref_mod * -1;
795 break;
796 default:
797 BUG_ON(1);
798 }
799 *total_refs += count;
800 switch (node->type) {
801 case BTRFS_TREE_BLOCK_REF_KEY: {
802 /* NORMAL INDIRECT METADATA backref */
803 struct btrfs_delayed_tree_ref *ref;
804
805 ref = btrfs_delayed_node_to_tree_ref(node);
806 ret = add_indirect_ref(fs_info, preftrees, ref->root,
807 &tmp_op_key, ref->level + 1,
808 node->bytenr, count, sc,
809 GFP_ATOMIC);
810 break;
811 }
812 case BTRFS_SHARED_BLOCK_REF_KEY: {
813 /* SHARED DIRECT METADATA backref */
814 struct btrfs_delayed_tree_ref *ref;
815
816 ref = btrfs_delayed_node_to_tree_ref(node);
817
818 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
819 ref->parent, node->bytenr, count,
820 sc, GFP_ATOMIC);
821 break;
822 }
823 case BTRFS_EXTENT_DATA_REF_KEY: {
824 /* NORMAL INDIRECT DATA backref */
825 struct btrfs_delayed_data_ref *ref;
826 ref = btrfs_delayed_node_to_data_ref(node);
827
828 key.objectid = ref->objectid;
829 key.type = BTRFS_EXTENT_DATA_KEY;
830 key.offset = ref->offset;
831
832 /*
833 * Found a inum that doesn't match our known inum, we
834 * know it's shared.
835 */
836 if (sc && sc->inum && ref->objectid != sc->inum) {
837 ret = BACKREF_FOUND_SHARED;
838 goto out;
839 }
840
841 ret = add_indirect_ref(fs_info, preftrees, ref->root,
842 &key, 0, node->bytenr, count, sc,
843 GFP_ATOMIC);
844 break;
845 }
846 case BTRFS_SHARED_DATA_REF_KEY: {
847 /* SHARED DIRECT FULL backref */
848 struct btrfs_delayed_data_ref *ref;
849
850 ref = btrfs_delayed_node_to_data_ref(node);
851
852 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
853 node->bytenr, count, sc,
854 GFP_ATOMIC);
855 break;
856 }
857 default:
858 WARN_ON(1);
859 }
860 /*
861 * We must ignore BACKREF_FOUND_SHARED until all delayed
862 * refs have been checked.
863 */
864 if (ret && (ret != BACKREF_FOUND_SHARED))
865 break;
866 }
867 if (!ret)
868 ret = extent_is_shared(sc);
869 out:
870 spin_unlock(&head->lock);
871 return ret;
872 }
873
874 /*
875 * add all inline backrefs for bytenr to the list
876 *
877 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
878 */
879 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
880 struct btrfs_path *path, u64 bytenr,
881 int *info_level, struct preftrees *preftrees,
882 u64 *total_refs, struct share_check *sc)
883 {
884 int ret = 0;
885 int slot;
886 struct extent_buffer *leaf;
887 struct btrfs_key key;
888 struct btrfs_key found_key;
889 unsigned long ptr;
890 unsigned long end;
891 struct btrfs_extent_item *ei;
892 u64 flags;
893 u64 item_size;
894
895 /*
896 * enumerate all inline refs
897 */
898 leaf = path->nodes[0];
899 slot = path->slots[0];
900
901 item_size = btrfs_item_size_nr(leaf, slot);
902 BUG_ON(item_size < sizeof(*ei));
903
904 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
905 flags = btrfs_extent_flags(leaf, ei);
906 *total_refs += btrfs_extent_refs(leaf, ei);
907 btrfs_item_key_to_cpu(leaf, &found_key, slot);
908
909 ptr = (unsigned long)(ei + 1);
910 end = (unsigned long)ei + item_size;
911
912 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
913 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
914 struct btrfs_tree_block_info *info;
915
916 info = (struct btrfs_tree_block_info *)ptr;
917 *info_level = btrfs_tree_block_level(leaf, info);
918 ptr += sizeof(struct btrfs_tree_block_info);
919 BUG_ON(ptr > end);
920 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
921 *info_level = found_key.offset;
922 } else {
923 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
924 }
925
926 while (ptr < end) {
927 struct btrfs_extent_inline_ref *iref;
928 u64 offset;
929 int type;
930
931 iref = (struct btrfs_extent_inline_ref *)ptr;
932 type = btrfs_get_extent_inline_ref_type(leaf, iref,
933 BTRFS_REF_TYPE_ANY);
934 if (type == BTRFS_REF_TYPE_INVALID)
935 return -EINVAL;
936
937 offset = btrfs_extent_inline_ref_offset(leaf, iref);
938
939 switch (type) {
940 case BTRFS_SHARED_BLOCK_REF_KEY:
941 ret = add_direct_ref(fs_info, preftrees,
942 *info_level + 1, offset,
943 bytenr, 1, NULL, GFP_NOFS);
944 break;
945 case BTRFS_SHARED_DATA_REF_KEY: {
946 struct btrfs_shared_data_ref *sdref;
947 int count;
948
949 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
950 count = btrfs_shared_data_ref_count(leaf, sdref);
951
952 ret = add_direct_ref(fs_info, preftrees, 0, offset,
953 bytenr, count, sc, GFP_NOFS);
954 break;
955 }
956 case BTRFS_TREE_BLOCK_REF_KEY:
957 ret = add_indirect_ref(fs_info, preftrees, offset,
958 NULL, *info_level + 1,
959 bytenr, 1, NULL, GFP_NOFS);
960 break;
961 case BTRFS_EXTENT_DATA_REF_KEY: {
962 struct btrfs_extent_data_ref *dref;
963 int count;
964 u64 root;
965
966 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
967 count = btrfs_extent_data_ref_count(leaf, dref);
968 key.objectid = btrfs_extent_data_ref_objectid(leaf,
969 dref);
970 key.type = BTRFS_EXTENT_DATA_KEY;
971 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
972
973 if (sc && sc->inum && key.objectid != sc->inum) {
974 ret = BACKREF_FOUND_SHARED;
975 break;
976 }
977
978 root = btrfs_extent_data_ref_root(leaf, dref);
979
980 ret = add_indirect_ref(fs_info, preftrees, root,
981 &key, 0, bytenr, count,
982 sc, GFP_NOFS);
983 break;
984 }
985 default:
986 WARN_ON(1);
987 }
988 if (ret)
989 return ret;
990 ptr += btrfs_extent_inline_ref_size(type);
991 }
992
993 return 0;
994 }
995
996 /*
997 * add all non-inline backrefs for bytenr to the list
998 *
999 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1000 */
1001 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1002 struct btrfs_path *path, u64 bytenr,
1003 int info_level, struct preftrees *preftrees,
1004 struct share_check *sc)
1005 {
1006 struct btrfs_root *extent_root = fs_info->extent_root;
1007 int ret;
1008 int slot;
1009 struct extent_buffer *leaf;
1010 struct btrfs_key key;
1011
1012 while (1) {
1013 ret = btrfs_next_item(extent_root, path);
1014 if (ret < 0)
1015 break;
1016 if (ret) {
1017 ret = 0;
1018 break;
1019 }
1020
1021 slot = path->slots[0];
1022 leaf = path->nodes[0];
1023 btrfs_item_key_to_cpu(leaf, &key, slot);
1024
1025 if (key.objectid != bytenr)
1026 break;
1027 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1028 continue;
1029 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1030 break;
1031
1032 switch (key.type) {
1033 case BTRFS_SHARED_BLOCK_REF_KEY:
1034 /* SHARED DIRECT METADATA backref */
1035 ret = add_direct_ref(fs_info, preftrees,
1036 info_level + 1, key.offset,
1037 bytenr, 1, NULL, GFP_NOFS);
1038 break;
1039 case BTRFS_SHARED_DATA_REF_KEY: {
1040 /* SHARED DIRECT FULL backref */
1041 struct btrfs_shared_data_ref *sdref;
1042 int count;
1043
1044 sdref = btrfs_item_ptr(leaf, slot,
1045 struct btrfs_shared_data_ref);
1046 count = btrfs_shared_data_ref_count(leaf, sdref);
1047 ret = add_direct_ref(fs_info, preftrees, 0,
1048 key.offset, bytenr, count,
1049 sc, GFP_NOFS);
1050 break;
1051 }
1052 case BTRFS_TREE_BLOCK_REF_KEY:
1053 /* NORMAL INDIRECT METADATA backref */
1054 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1055 NULL, info_level + 1, bytenr,
1056 1, NULL, GFP_NOFS);
1057 break;
1058 case BTRFS_EXTENT_DATA_REF_KEY: {
1059 /* NORMAL INDIRECT DATA backref */
1060 struct btrfs_extent_data_ref *dref;
1061 int count;
1062 u64 root;
1063
1064 dref = btrfs_item_ptr(leaf, slot,
1065 struct btrfs_extent_data_ref);
1066 count = btrfs_extent_data_ref_count(leaf, dref);
1067 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1068 dref);
1069 key.type = BTRFS_EXTENT_DATA_KEY;
1070 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1071
1072 if (sc && sc->inum && key.objectid != sc->inum) {
1073 ret = BACKREF_FOUND_SHARED;
1074 break;
1075 }
1076
1077 root = btrfs_extent_data_ref_root(leaf, dref);
1078 ret = add_indirect_ref(fs_info, preftrees, root,
1079 &key, 0, bytenr, count,
1080 sc, GFP_NOFS);
1081 break;
1082 }
1083 default:
1084 WARN_ON(1);
1085 }
1086 if (ret)
1087 return ret;
1088
1089 }
1090
1091 return ret;
1092 }
1093
1094 /*
1095 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1096 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1097 * indirect refs to their parent bytenr.
1098 * When roots are found, they're added to the roots list
1099 *
1100 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1101 * much like trans == NULL case, the difference only lies in it will not
1102 * commit root.
1103 * The special case is for qgroup to search roots in commit_transaction().
1104 *
1105 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1106 * shared extent is detected.
1107 *
1108 * Otherwise this returns 0 for success and <0 for an error.
1109 *
1110 * FIXME some caching might speed things up
1111 */
1112 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1113 struct btrfs_fs_info *fs_info, u64 bytenr,
1114 u64 time_seq, struct ulist *refs,
1115 struct ulist *roots, const u64 *extent_item_pos,
1116 struct share_check *sc)
1117 {
1118 struct btrfs_key key;
1119 struct btrfs_path *path;
1120 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1121 struct btrfs_delayed_ref_head *head;
1122 int info_level = 0;
1123 int ret;
1124 struct prelim_ref *ref;
1125 struct rb_node *node;
1126 struct extent_inode_elem *eie = NULL;
1127 /* total of both direct AND indirect refs! */
1128 u64 total_refs = 0;
1129 struct preftrees preftrees = {
1130 .direct = PREFTREE_INIT,
1131 .indirect = PREFTREE_INIT,
1132 .indirect_missing_keys = PREFTREE_INIT
1133 };
1134
1135 key.objectid = bytenr;
1136 key.offset = (u64)-1;
1137 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1138 key.type = BTRFS_METADATA_ITEM_KEY;
1139 else
1140 key.type = BTRFS_EXTENT_ITEM_KEY;
1141
1142 path = btrfs_alloc_path();
1143 if (!path)
1144 return -ENOMEM;
1145 if (!trans) {
1146 path->search_commit_root = 1;
1147 path->skip_locking = 1;
1148 }
1149
1150 if (time_seq == SEQ_LAST)
1151 path->skip_locking = 1;
1152
1153 /*
1154 * grab both a lock on the path and a lock on the delayed ref head.
1155 * We need both to get a consistent picture of how the refs look
1156 * at a specified point in time
1157 */
1158 again:
1159 head = NULL;
1160
1161 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1162 if (ret < 0)
1163 goto out;
1164 BUG_ON(ret == 0);
1165
1166 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1167 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1168 time_seq != SEQ_LAST) {
1169 #else
1170 if (trans && time_seq != SEQ_LAST) {
1171 #endif
1172 /*
1173 * look if there are updates for this ref queued and lock the
1174 * head
1175 */
1176 delayed_refs = &trans->transaction->delayed_refs;
1177 spin_lock(&delayed_refs->lock);
1178 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1179 if (head) {
1180 if (!mutex_trylock(&head->mutex)) {
1181 refcount_inc(&head->node.refs);
1182 spin_unlock(&delayed_refs->lock);
1183
1184 btrfs_release_path(path);
1185
1186 /*
1187 * Mutex was contended, block until it's
1188 * released and try again
1189 */
1190 mutex_lock(&head->mutex);
1191 mutex_unlock(&head->mutex);
1192 btrfs_put_delayed_ref(&head->node);
1193 goto again;
1194 }
1195 spin_unlock(&delayed_refs->lock);
1196 ret = add_delayed_refs(fs_info, head, time_seq,
1197 &preftrees, &total_refs, sc);
1198 mutex_unlock(&head->mutex);
1199 if (ret)
1200 goto out;
1201 } else {
1202 spin_unlock(&delayed_refs->lock);
1203 }
1204 }
1205
1206 if (path->slots[0]) {
1207 struct extent_buffer *leaf;
1208 int slot;
1209
1210 path->slots[0]--;
1211 leaf = path->nodes[0];
1212 slot = path->slots[0];
1213 btrfs_item_key_to_cpu(leaf, &key, slot);
1214 if (key.objectid == bytenr &&
1215 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1216 key.type == BTRFS_METADATA_ITEM_KEY)) {
1217 ret = add_inline_refs(fs_info, path, bytenr,
1218 &info_level, &preftrees,
1219 &total_refs, sc);
1220 if (ret)
1221 goto out;
1222 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1223 &preftrees, sc);
1224 if (ret)
1225 goto out;
1226 }
1227 }
1228
1229 btrfs_release_path(path);
1230
1231 ret = add_missing_keys(fs_info, &preftrees);
1232 if (ret)
1233 goto out;
1234
1235 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
1236
1237 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1238 extent_item_pos, total_refs, sc);
1239 if (ret)
1240 goto out;
1241
1242 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
1243
1244 /*
1245 * This walks the tree of merged and resolved refs. Tree blocks are
1246 * read in as needed. Unique entries are added to the ulist, and
1247 * the list of found roots is updated.
1248 *
1249 * We release the entire tree in one go before returning.
1250 */
1251 node = rb_first(&preftrees.direct.root);
1252 while (node) {
1253 ref = rb_entry(node, struct prelim_ref, rbnode);
1254 node = rb_next(&ref->rbnode);
1255 WARN_ON(ref->count < 0);
1256 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1257 if (sc && sc->root_objectid &&
1258 ref->root_id != sc->root_objectid) {
1259 ret = BACKREF_FOUND_SHARED;
1260 goto out;
1261 }
1262
1263 /* no parent == root of tree */
1264 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1265 if (ret < 0)
1266 goto out;
1267 }
1268 if (ref->count && ref->parent) {
1269 if (extent_item_pos && !ref->inode_list &&
1270 ref->level == 0) {
1271 struct extent_buffer *eb;
1272
1273 eb = read_tree_block(fs_info, ref->parent, 0);
1274 if (IS_ERR(eb)) {
1275 ret = PTR_ERR(eb);
1276 goto out;
1277 } else if (!extent_buffer_uptodate(eb)) {
1278 free_extent_buffer(eb);
1279 ret = -EIO;
1280 goto out;
1281 }
1282 btrfs_tree_read_lock(eb);
1283 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1284 ret = find_extent_in_eb(eb, bytenr,
1285 *extent_item_pos, &eie);
1286 btrfs_tree_read_unlock_blocking(eb);
1287 free_extent_buffer(eb);
1288 if (ret < 0)
1289 goto out;
1290 ref->inode_list = eie;
1291 }
1292 ret = ulist_add_merge_ptr(refs, ref->parent,
1293 ref->inode_list,
1294 (void **)&eie, GFP_NOFS);
1295 if (ret < 0)
1296 goto out;
1297 if (!ret && extent_item_pos) {
1298 /*
1299 * we've recorded that parent, so we must extend
1300 * its inode list here
1301 */
1302 BUG_ON(!eie);
1303 while (eie->next)
1304 eie = eie->next;
1305 eie->next = ref->inode_list;
1306 }
1307 eie = NULL;
1308 }
1309 cond_resched();
1310 }
1311
1312 out:
1313 btrfs_free_path(path);
1314
1315 prelim_release(&preftrees.direct);
1316 prelim_release(&preftrees.indirect);
1317 prelim_release(&preftrees.indirect_missing_keys);
1318
1319 if (ret < 0)
1320 free_inode_elem_list(eie);
1321 return ret;
1322 }
1323
1324 static void free_leaf_list(struct ulist *blocks)
1325 {
1326 struct ulist_node *node = NULL;
1327 struct extent_inode_elem *eie;
1328 struct ulist_iterator uiter;
1329
1330 ULIST_ITER_INIT(&uiter);
1331 while ((node = ulist_next(blocks, &uiter))) {
1332 if (!node->aux)
1333 continue;
1334 eie = unode_aux_to_inode_list(node);
1335 free_inode_elem_list(eie);
1336 node->aux = 0;
1337 }
1338
1339 ulist_free(blocks);
1340 }
1341
1342 /*
1343 * Finds all leafs with a reference to the specified combination of bytenr and
1344 * offset. key_list_head will point to a list of corresponding keys (caller must
1345 * free each list element). The leafs will be stored in the leafs ulist, which
1346 * must be freed with ulist_free.
1347 *
1348 * returns 0 on success, <0 on error
1349 */
1350 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1351 struct btrfs_fs_info *fs_info, u64 bytenr,
1352 u64 time_seq, struct ulist **leafs,
1353 const u64 *extent_item_pos)
1354 {
1355 int ret;
1356
1357 *leafs = ulist_alloc(GFP_NOFS);
1358 if (!*leafs)
1359 return -ENOMEM;
1360
1361 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1362 *leafs, NULL, extent_item_pos, NULL);
1363 if (ret < 0 && ret != -ENOENT) {
1364 free_leaf_list(*leafs);
1365 return ret;
1366 }
1367
1368 return 0;
1369 }
1370
1371 /*
1372 * walk all backrefs for a given extent to find all roots that reference this
1373 * extent. Walking a backref means finding all extents that reference this
1374 * extent and in turn walk the backrefs of those, too. Naturally this is a
1375 * recursive process, but here it is implemented in an iterative fashion: We
1376 * find all referencing extents for the extent in question and put them on a
1377 * list. In turn, we find all referencing extents for those, further appending
1378 * to the list. The way we iterate the list allows adding more elements after
1379 * the current while iterating. The process stops when we reach the end of the
1380 * list. Found roots are added to the roots list.
1381 *
1382 * returns 0 on success, < 0 on error.
1383 */
1384 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1385 struct btrfs_fs_info *fs_info, u64 bytenr,
1386 u64 time_seq, struct ulist **roots)
1387 {
1388 struct ulist *tmp;
1389 struct ulist_node *node = NULL;
1390 struct ulist_iterator uiter;
1391 int ret;
1392
1393 tmp = ulist_alloc(GFP_NOFS);
1394 if (!tmp)
1395 return -ENOMEM;
1396 *roots = ulist_alloc(GFP_NOFS);
1397 if (!*roots) {
1398 ulist_free(tmp);
1399 return -ENOMEM;
1400 }
1401
1402 ULIST_ITER_INIT(&uiter);
1403 while (1) {
1404 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1405 tmp, *roots, NULL, NULL);
1406 if (ret < 0 && ret != -ENOENT) {
1407 ulist_free(tmp);
1408 ulist_free(*roots);
1409 return ret;
1410 }
1411 node = ulist_next(tmp, &uiter);
1412 if (!node)
1413 break;
1414 bytenr = node->val;
1415 cond_resched();
1416 }
1417
1418 ulist_free(tmp);
1419 return 0;
1420 }
1421
1422 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1423 struct btrfs_fs_info *fs_info, u64 bytenr,
1424 u64 time_seq, struct ulist **roots)
1425 {
1426 int ret;
1427
1428 if (!trans)
1429 down_read(&fs_info->commit_root_sem);
1430 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1431 time_seq, roots);
1432 if (!trans)
1433 up_read(&fs_info->commit_root_sem);
1434 return ret;
1435 }
1436
1437 /**
1438 * btrfs_check_shared - tell us whether an extent is shared
1439 *
1440 * btrfs_check_shared uses the backref walking code but will short
1441 * circuit as soon as it finds a root or inode that doesn't match the
1442 * one passed in. This provides a significant performance benefit for
1443 * callers (such as fiemap) which want to know whether the extent is
1444 * shared but do not need a ref count.
1445 *
1446 * This attempts to allocate a transaction in order to account for
1447 * delayed refs, but continues on even when the alloc fails.
1448 *
1449 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1450 */
1451 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1452 {
1453 struct btrfs_fs_info *fs_info = root->fs_info;
1454 struct btrfs_trans_handle *trans;
1455 struct ulist *tmp = NULL;
1456 struct ulist *roots = NULL;
1457 struct ulist_iterator uiter;
1458 struct ulist_node *node;
1459 struct seq_list elem = SEQ_LIST_INIT(elem);
1460 int ret = 0;
1461 struct share_check shared = {
1462 .root_objectid = root->objectid,
1463 .inum = inum,
1464 .share_count = 0,
1465 };
1466
1467 tmp = ulist_alloc(GFP_NOFS);
1468 roots = ulist_alloc(GFP_NOFS);
1469 if (!tmp || !roots) {
1470 ulist_free(tmp);
1471 ulist_free(roots);
1472 return -ENOMEM;
1473 }
1474
1475 trans = btrfs_join_transaction(root);
1476 if (IS_ERR(trans)) {
1477 trans = NULL;
1478 down_read(&fs_info->commit_root_sem);
1479 } else {
1480 btrfs_get_tree_mod_seq(fs_info, &elem);
1481 }
1482
1483 ULIST_ITER_INIT(&uiter);
1484 while (1) {
1485 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1486 roots, NULL, &shared);
1487 if (ret == BACKREF_FOUND_SHARED) {
1488 /* this is the only condition under which we return 1 */
1489 ret = 1;
1490 break;
1491 }
1492 if (ret < 0 && ret != -ENOENT)
1493 break;
1494 ret = 0;
1495 node = ulist_next(tmp, &uiter);
1496 if (!node)
1497 break;
1498 bytenr = node->val;
1499 cond_resched();
1500 }
1501
1502 if (trans) {
1503 btrfs_put_tree_mod_seq(fs_info, &elem);
1504 btrfs_end_transaction(trans);
1505 } else {
1506 up_read(&fs_info->commit_root_sem);
1507 }
1508 ulist_free(tmp);
1509 ulist_free(roots);
1510 return ret;
1511 }
1512
1513 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1514 u64 start_off, struct btrfs_path *path,
1515 struct btrfs_inode_extref **ret_extref,
1516 u64 *found_off)
1517 {
1518 int ret, slot;
1519 struct btrfs_key key;
1520 struct btrfs_key found_key;
1521 struct btrfs_inode_extref *extref;
1522 const struct extent_buffer *leaf;
1523 unsigned long ptr;
1524
1525 key.objectid = inode_objectid;
1526 key.type = BTRFS_INODE_EXTREF_KEY;
1527 key.offset = start_off;
1528
1529 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1530 if (ret < 0)
1531 return ret;
1532
1533 while (1) {
1534 leaf = path->nodes[0];
1535 slot = path->slots[0];
1536 if (slot >= btrfs_header_nritems(leaf)) {
1537 /*
1538 * If the item at offset is not found,
1539 * btrfs_search_slot will point us to the slot
1540 * where it should be inserted. In our case
1541 * that will be the slot directly before the
1542 * next INODE_REF_KEY_V2 item. In the case
1543 * that we're pointing to the last slot in a
1544 * leaf, we must move one leaf over.
1545 */
1546 ret = btrfs_next_leaf(root, path);
1547 if (ret) {
1548 if (ret >= 1)
1549 ret = -ENOENT;
1550 break;
1551 }
1552 continue;
1553 }
1554
1555 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1556
1557 /*
1558 * Check that we're still looking at an extended ref key for
1559 * this particular objectid. If we have different
1560 * objectid or type then there are no more to be found
1561 * in the tree and we can exit.
1562 */
1563 ret = -ENOENT;
1564 if (found_key.objectid != inode_objectid)
1565 break;
1566 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1567 break;
1568
1569 ret = 0;
1570 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1571 extref = (struct btrfs_inode_extref *)ptr;
1572 *ret_extref = extref;
1573 if (found_off)
1574 *found_off = found_key.offset;
1575 break;
1576 }
1577
1578 return ret;
1579 }
1580
1581 /*
1582 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1583 * Elements of the path are separated by '/' and the path is guaranteed to be
1584 * 0-terminated. the path is only given within the current file system.
1585 * Therefore, it never starts with a '/'. the caller is responsible to provide
1586 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1587 * the start point of the resulting string is returned. this pointer is within
1588 * dest, normally.
1589 * in case the path buffer would overflow, the pointer is decremented further
1590 * as if output was written to the buffer, though no more output is actually
1591 * generated. that way, the caller can determine how much space would be
1592 * required for the path to fit into the buffer. in that case, the returned
1593 * value will be smaller than dest. callers must check this!
1594 */
1595 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1596 u32 name_len, unsigned long name_off,
1597 struct extent_buffer *eb_in, u64 parent,
1598 char *dest, u32 size)
1599 {
1600 int slot;
1601 u64 next_inum;
1602 int ret;
1603 s64 bytes_left = ((s64)size) - 1;
1604 struct extent_buffer *eb = eb_in;
1605 struct btrfs_key found_key;
1606 int leave_spinning = path->leave_spinning;
1607 struct btrfs_inode_ref *iref;
1608
1609 if (bytes_left >= 0)
1610 dest[bytes_left] = '\0';
1611
1612 path->leave_spinning = 1;
1613 while (1) {
1614 bytes_left -= name_len;
1615 if (bytes_left >= 0)
1616 read_extent_buffer(eb, dest + bytes_left,
1617 name_off, name_len);
1618 if (eb != eb_in) {
1619 if (!path->skip_locking)
1620 btrfs_tree_read_unlock_blocking(eb);
1621 free_extent_buffer(eb);
1622 }
1623 ret = btrfs_find_item(fs_root, path, parent, 0,
1624 BTRFS_INODE_REF_KEY, &found_key);
1625 if (ret > 0)
1626 ret = -ENOENT;
1627 if (ret)
1628 break;
1629
1630 next_inum = found_key.offset;
1631
1632 /* regular exit ahead */
1633 if (parent == next_inum)
1634 break;
1635
1636 slot = path->slots[0];
1637 eb = path->nodes[0];
1638 /* make sure we can use eb after releasing the path */
1639 if (eb != eb_in) {
1640 if (!path->skip_locking)
1641 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1642 path->nodes[0] = NULL;
1643 path->locks[0] = 0;
1644 }
1645 btrfs_release_path(path);
1646 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1647
1648 name_len = btrfs_inode_ref_name_len(eb, iref);
1649 name_off = (unsigned long)(iref + 1);
1650
1651 parent = next_inum;
1652 --bytes_left;
1653 if (bytes_left >= 0)
1654 dest[bytes_left] = '/';
1655 }
1656
1657 btrfs_release_path(path);
1658 path->leave_spinning = leave_spinning;
1659
1660 if (ret)
1661 return ERR_PTR(ret);
1662
1663 return dest + bytes_left;
1664 }
1665
1666 /*
1667 * this makes the path point to (logical EXTENT_ITEM *)
1668 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1669 * tree blocks and <0 on error.
1670 */
1671 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1672 struct btrfs_path *path, struct btrfs_key *found_key,
1673 u64 *flags_ret)
1674 {
1675 int ret;
1676 u64 flags;
1677 u64 size = 0;
1678 u32 item_size;
1679 const struct extent_buffer *eb;
1680 struct btrfs_extent_item *ei;
1681 struct btrfs_key key;
1682
1683 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1684 key.type = BTRFS_METADATA_ITEM_KEY;
1685 else
1686 key.type = BTRFS_EXTENT_ITEM_KEY;
1687 key.objectid = logical;
1688 key.offset = (u64)-1;
1689
1690 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1691 if (ret < 0)
1692 return ret;
1693
1694 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1695 if (ret) {
1696 if (ret > 0)
1697 ret = -ENOENT;
1698 return ret;
1699 }
1700 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1701 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1702 size = fs_info->nodesize;
1703 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1704 size = found_key->offset;
1705
1706 if (found_key->objectid > logical ||
1707 found_key->objectid + size <= logical) {
1708 btrfs_debug(fs_info,
1709 "logical %llu is not within any extent", logical);
1710 return -ENOENT;
1711 }
1712
1713 eb = path->nodes[0];
1714 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1715 BUG_ON(item_size < sizeof(*ei));
1716
1717 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1718 flags = btrfs_extent_flags(eb, ei);
1719
1720 btrfs_debug(fs_info,
1721 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1722 logical, logical - found_key->objectid, found_key->objectid,
1723 found_key->offset, flags, item_size);
1724
1725 WARN_ON(!flags_ret);
1726 if (flags_ret) {
1727 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1728 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1729 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1730 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1731 else
1732 BUG_ON(1);
1733 return 0;
1734 }
1735
1736 return -EIO;
1737 }
1738
1739 /*
1740 * helper function to iterate extent inline refs. ptr must point to a 0 value
1741 * for the first call and may be modified. it is used to track state.
1742 * if more refs exist, 0 is returned and the next call to
1743 * get_extent_inline_ref must pass the modified ptr parameter to get the
1744 * next ref. after the last ref was processed, 1 is returned.
1745 * returns <0 on error
1746 */
1747 static int get_extent_inline_ref(unsigned long *ptr,
1748 const struct extent_buffer *eb,
1749 const struct btrfs_key *key,
1750 const struct btrfs_extent_item *ei,
1751 u32 item_size,
1752 struct btrfs_extent_inline_ref **out_eiref,
1753 int *out_type)
1754 {
1755 unsigned long end;
1756 u64 flags;
1757 struct btrfs_tree_block_info *info;
1758
1759 if (!*ptr) {
1760 /* first call */
1761 flags = btrfs_extent_flags(eb, ei);
1762 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1763 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1764 /* a skinny metadata extent */
1765 *out_eiref =
1766 (struct btrfs_extent_inline_ref *)(ei + 1);
1767 } else {
1768 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1769 info = (struct btrfs_tree_block_info *)(ei + 1);
1770 *out_eiref =
1771 (struct btrfs_extent_inline_ref *)(info + 1);
1772 }
1773 } else {
1774 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1775 }
1776 *ptr = (unsigned long)*out_eiref;
1777 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1778 return -ENOENT;
1779 }
1780
1781 end = (unsigned long)ei + item_size;
1782 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1783 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1784 BTRFS_REF_TYPE_ANY);
1785 if (*out_type == BTRFS_REF_TYPE_INVALID)
1786 return -EINVAL;
1787
1788 *ptr += btrfs_extent_inline_ref_size(*out_type);
1789 WARN_ON(*ptr > end);
1790 if (*ptr == end)
1791 return 1; /* last */
1792
1793 return 0;
1794 }
1795
1796 /*
1797 * reads the tree block backref for an extent. tree level and root are returned
1798 * through out_level and out_root. ptr must point to a 0 value for the first
1799 * call and may be modified (see get_extent_inline_ref comment).
1800 * returns 0 if data was provided, 1 if there was no more data to provide or
1801 * <0 on error.
1802 */
1803 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1804 struct btrfs_key *key, struct btrfs_extent_item *ei,
1805 u32 item_size, u64 *out_root, u8 *out_level)
1806 {
1807 int ret;
1808 int type;
1809 struct btrfs_extent_inline_ref *eiref;
1810
1811 if (*ptr == (unsigned long)-1)
1812 return 1;
1813
1814 while (1) {
1815 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1816 &eiref, &type);
1817 if (ret < 0)
1818 return ret;
1819
1820 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1821 type == BTRFS_SHARED_BLOCK_REF_KEY)
1822 break;
1823
1824 if (ret == 1)
1825 return 1;
1826 }
1827
1828 /* we can treat both ref types equally here */
1829 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1830
1831 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1832 struct btrfs_tree_block_info *info;
1833
1834 info = (struct btrfs_tree_block_info *)(ei + 1);
1835 *out_level = btrfs_tree_block_level(eb, info);
1836 } else {
1837 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1838 *out_level = (u8)key->offset;
1839 }
1840
1841 if (ret == 1)
1842 *ptr = (unsigned long)-1;
1843
1844 return 0;
1845 }
1846
1847 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1848 struct extent_inode_elem *inode_list,
1849 u64 root, u64 extent_item_objectid,
1850 iterate_extent_inodes_t *iterate, void *ctx)
1851 {
1852 struct extent_inode_elem *eie;
1853 int ret = 0;
1854
1855 for (eie = inode_list; eie; eie = eie->next) {
1856 btrfs_debug(fs_info,
1857 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1858 extent_item_objectid, eie->inum,
1859 eie->offset, root);
1860 ret = iterate(eie->inum, eie->offset, root, ctx);
1861 if (ret) {
1862 btrfs_debug(fs_info,
1863 "stopping iteration for %llu due to ret=%d",
1864 extent_item_objectid, ret);
1865 break;
1866 }
1867 }
1868
1869 return ret;
1870 }
1871
1872 /*
1873 * calls iterate() for every inode that references the extent identified by
1874 * the given parameters.
1875 * when the iterator function returns a non-zero value, iteration stops.
1876 */
1877 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1878 u64 extent_item_objectid, u64 extent_item_pos,
1879 int search_commit_root,
1880 iterate_extent_inodes_t *iterate, void *ctx)
1881 {
1882 int ret;
1883 struct btrfs_trans_handle *trans = NULL;
1884 struct ulist *refs = NULL;
1885 struct ulist *roots = NULL;
1886 struct ulist_node *ref_node = NULL;
1887 struct ulist_node *root_node = NULL;
1888 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1889 struct ulist_iterator ref_uiter;
1890 struct ulist_iterator root_uiter;
1891
1892 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1893 extent_item_objectid);
1894
1895 if (!search_commit_root) {
1896 trans = btrfs_join_transaction(fs_info->extent_root);
1897 if (IS_ERR(trans))
1898 return PTR_ERR(trans);
1899 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1900 } else {
1901 down_read(&fs_info->commit_root_sem);
1902 }
1903
1904 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1905 tree_mod_seq_elem.seq, &refs,
1906 &extent_item_pos);
1907 if (ret)
1908 goto out;
1909
1910 ULIST_ITER_INIT(&ref_uiter);
1911 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1912 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1913 tree_mod_seq_elem.seq, &roots);
1914 if (ret)
1915 break;
1916 ULIST_ITER_INIT(&root_uiter);
1917 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1918 btrfs_debug(fs_info,
1919 "root %llu references leaf %llu, data list %#llx",
1920 root_node->val, ref_node->val,
1921 ref_node->aux);
1922 ret = iterate_leaf_refs(fs_info,
1923 (struct extent_inode_elem *)
1924 (uintptr_t)ref_node->aux,
1925 root_node->val,
1926 extent_item_objectid,
1927 iterate, ctx);
1928 }
1929 ulist_free(roots);
1930 }
1931
1932 free_leaf_list(refs);
1933 out:
1934 if (!search_commit_root) {
1935 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1936 btrfs_end_transaction(trans);
1937 } else {
1938 up_read(&fs_info->commit_root_sem);
1939 }
1940
1941 return ret;
1942 }
1943
1944 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1945 struct btrfs_path *path,
1946 iterate_extent_inodes_t *iterate, void *ctx)
1947 {
1948 int ret;
1949 u64 extent_item_pos;
1950 u64 flags = 0;
1951 struct btrfs_key found_key;
1952 int search_commit_root = path->search_commit_root;
1953
1954 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1955 btrfs_release_path(path);
1956 if (ret < 0)
1957 return ret;
1958 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1959 return -EINVAL;
1960
1961 extent_item_pos = logical - found_key.objectid;
1962 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1963 extent_item_pos, search_commit_root,
1964 iterate, ctx);
1965
1966 return ret;
1967 }
1968
1969 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1970 struct extent_buffer *eb, void *ctx);
1971
1972 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1973 struct btrfs_path *path,
1974 iterate_irefs_t *iterate, void *ctx)
1975 {
1976 int ret = 0;
1977 int slot;
1978 u32 cur;
1979 u32 len;
1980 u32 name_len;
1981 u64 parent = 0;
1982 int found = 0;
1983 struct extent_buffer *eb;
1984 struct btrfs_item *item;
1985 struct btrfs_inode_ref *iref;
1986 struct btrfs_key found_key;
1987
1988 while (!ret) {
1989 ret = btrfs_find_item(fs_root, path, inum,
1990 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
1991 &found_key);
1992
1993 if (ret < 0)
1994 break;
1995 if (ret) {
1996 ret = found ? 0 : -ENOENT;
1997 break;
1998 }
1999 ++found;
2000
2001 parent = found_key.offset;
2002 slot = path->slots[0];
2003 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2004 if (!eb) {
2005 ret = -ENOMEM;
2006 break;
2007 }
2008 extent_buffer_get(eb);
2009 btrfs_tree_read_lock(eb);
2010 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2011 btrfs_release_path(path);
2012
2013 item = btrfs_item_nr(slot);
2014 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2015
2016 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2017 name_len = btrfs_inode_ref_name_len(eb, iref);
2018 /* path must be released before calling iterate()! */
2019 btrfs_debug(fs_root->fs_info,
2020 "following ref at offset %u for inode %llu in tree %llu",
2021 cur, found_key.objectid, fs_root->objectid);
2022 ret = iterate(parent, name_len,
2023 (unsigned long)(iref + 1), eb, ctx);
2024 if (ret)
2025 break;
2026 len = sizeof(*iref) + name_len;
2027 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2028 }
2029 btrfs_tree_read_unlock_blocking(eb);
2030 free_extent_buffer(eb);
2031 }
2032
2033 btrfs_release_path(path);
2034
2035 return ret;
2036 }
2037
2038 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2039 struct btrfs_path *path,
2040 iterate_irefs_t *iterate, void *ctx)
2041 {
2042 int ret;
2043 int slot;
2044 u64 offset = 0;
2045 u64 parent;
2046 int found = 0;
2047 struct extent_buffer *eb;
2048 struct btrfs_inode_extref *extref;
2049 u32 item_size;
2050 u32 cur_offset;
2051 unsigned long ptr;
2052
2053 while (1) {
2054 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2055 &offset);
2056 if (ret < 0)
2057 break;
2058 if (ret) {
2059 ret = found ? 0 : -ENOENT;
2060 break;
2061 }
2062 ++found;
2063
2064 slot = path->slots[0];
2065 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2066 if (!eb) {
2067 ret = -ENOMEM;
2068 break;
2069 }
2070 extent_buffer_get(eb);
2071
2072 btrfs_tree_read_lock(eb);
2073 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2074 btrfs_release_path(path);
2075
2076 item_size = btrfs_item_size_nr(eb, slot);
2077 ptr = btrfs_item_ptr_offset(eb, slot);
2078 cur_offset = 0;
2079
2080 while (cur_offset < item_size) {
2081 u32 name_len;
2082
2083 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2084 parent = btrfs_inode_extref_parent(eb, extref);
2085 name_len = btrfs_inode_extref_name_len(eb, extref);
2086 ret = iterate(parent, name_len,
2087 (unsigned long)&extref->name, eb, ctx);
2088 if (ret)
2089 break;
2090
2091 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2092 cur_offset += sizeof(*extref);
2093 }
2094 btrfs_tree_read_unlock_blocking(eb);
2095 free_extent_buffer(eb);
2096
2097 offset++;
2098 }
2099
2100 btrfs_release_path(path);
2101
2102 return ret;
2103 }
2104
2105 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2106 struct btrfs_path *path, iterate_irefs_t *iterate,
2107 void *ctx)
2108 {
2109 int ret;
2110 int found_refs = 0;
2111
2112 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2113 if (!ret)
2114 ++found_refs;
2115 else if (ret != -ENOENT)
2116 return ret;
2117
2118 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2119 if (ret == -ENOENT && found_refs)
2120 return 0;
2121
2122 return ret;
2123 }
2124
2125 /*
2126 * returns 0 if the path could be dumped (probably truncated)
2127 * returns <0 in case of an error
2128 */
2129 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2130 struct extent_buffer *eb, void *ctx)
2131 {
2132 struct inode_fs_paths *ipath = ctx;
2133 char *fspath;
2134 char *fspath_min;
2135 int i = ipath->fspath->elem_cnt;
2136 const int s_ptr = sizeof(char *);
2137 u32 bytes_left;
2138
2139 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2140 ipath->fspath->bytes_left - s_ptr : 0;
2141
2142 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2143 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2144 name_off, eb, inum, fspath_min, bytes_left);
2145 if (IS_ERR(fspath))
2146 return PTR_ERR(fspath);
2147
2148 if (fspath > fspath_min) {
2149 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2150 ++ipath->fspath->elem_cnt;
2151 ipath->fspath->bytes_left = fspath - fspath_min;
2152 } else {
2153 ++ipath->fspath->elem_missed;
2154 ipath->fspath->bytes_missing += fspath_min - fspath;
2155 ipath->fspath->bytes_left = 0;
2156 }
2157
2158 return 0;
2159 }
2160
2161 /*
2162 * this dumps all file system paths to the inode into the ipath struct, provided
2163 * is has been created large enough. each path is zero-terminated and accessed
2164 * from ipath->fspath->val[i].
2165 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2166 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2167 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2168 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2169 * have been needed to return all paths.
2170 */
2171 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2172 {
2173 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2174 inode_to_path, ipath);
2175 }
2176
2177 struct btrfs_data_container *init_data_container(u32 total_bytes)
2178 {
2179 struct btrfs_data_container *data;
2180 size_t alloc_bytes;
2181
2182 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2183 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2184 if (!data)
2185 return ERR_PTR(-ENOMEM);
2186
2187 if (total_bytes >= sizeof(*data)) {
2188 data->bytes_left = total_bytes - sizeof(*data);
2189 data->bytes_missing = 0;
2190 } else {
2191 data->bytes_missing = sizeof(*data) - total_bytes;
2192 data->bytes_left = 0;
2193 }
2194
2195 data->elem_cnt = 0;
2196 data->elem_missed = 0;
2197
2198 return data;
2199 }
2200
2201 /*
2202 * allocates space to return multiple file system paths for an inode.
2203 * total_bytes to allocate are passed, note that space usable for actual path
2204 * information will be total_bytes - sizeof(struct inode_fs_paths).
2205 * the returned pointer must be freed with free_ipath() in the end.
2206 */
2207 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2208 struct btrfs_path *path)
2209 {
2210 struct inode_fs_paths *ifp;
2211 struct btrfs_data_container *fspath;
2212
2213 fspath = init_data_container(total_bytes);
2214 if (IS_ERR(fspath))
2215 return (void *)fspath;
2216
2217 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2218 if (!ifp) {
2219 kvfree(fspath);
2220 return ERR_PTR(-ENOMEM);
2221 }
2222
2223 ifp->btrfs_path = path;
2224 ifp->fspath = fspath;
2225 ifp->fs_root = fs_root;
2226
2227 return ifp;
2228 }
2229
2230 void free_ipath(struct inode_fs_paths *ipath)
2231 {
2232 if (!ipath)
2233 return;
2234 kvfree(ipath->fspath);
2235 kfree(ipath);
2236 }