]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/backref.c
Btrfs: added btrfs_find_all_roots()
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / backref.c
CommitLineData
a542ad1b
JS
1/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21#include "backref.h"
8da6d581
JS
22#include "ulist.h"
23#include "transaction.h"
24#include "delayed-ref.h"
a542ad1b
JS
25
26struct __data_ref {
27 struct list_head list;
28 u64 inum;
29 u64 root;
30 u64 extent_data_item_offset;
31};
32
33struct __shared_ref {
34 struct list_head list;
35 u64 disk_byte;
36};
37
8da6d581
JS
38/*
39 * this structure records all encountered refs on the way up to the root
40 */
41struct __prelim_ref {
42 struct list_head list;
43 u64 root_id;
44 struct btrfs_key key;
45 int level;
46 int count;
47 u64 parent;
48 u64 wanted_disk_byte;
49};
50
51static int __add_prelim_ref(struct list_head *head, u64 root_id,
52 struct btrfs_key *key, int level, u64 parent,
53 u64 wanted_disk_byte, int count)
54{
55 struct __prelim_ref *ref;
56
57 /* in case we're adding delayed refs, we're holding the refs spinlock */
58 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
59 if (!ref)
60 return -ENOMEM;
61
62 ref->root_id = root_id;
63 if (key)
64 ref->key = *key;
65 else
66 memset(&ref->key, 0, sizeof(ref->key));
67
68 ref->level = level;
69 ref->count = count;
70 ref->parent = parent;
71 ref->wanted_disk_byte = wanted_disk_byte;
72 list_add_tail(&ref->list, head);
73
74 return 0;
75}
76
77static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
78 struct ulist *parents,
79 struct extent_buffer *eb, int level,
80 u64 wanted_objectid, u64 wanted_disk_byte)
81{
82 int ret;
83 int slot;
84 struct btrfs_file_extent_item *fi;
85 struct btrfs_key key;
86 u64 disk_byte;
87
88add_parent:
89 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
90 if (ret < 0)
91 return ret;
92
93 if (level != 0)
94 return 0;
95
96 /*
97 * if the current leaf is full with EXTENT_DATA items, we must
98 * check the next one if that holds a reference as well.
99 * ref->count cannot be used to skip this check.
100 * repeat this until we don't find any additional EXTENT_DATA items.
101 */
102 while (1) {
103 ret = btrfs_next_leaf(root, path);
104 if (ret < 0)
105 return ret;
106 if (ret)
107 return 0;
108
109 eb = path->nodes[0];
110 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
111 btrfs_item_key_to_cpu(eb, &key, slot);
112 if (key.objectid != wanted_objectid ||
113 key.type != BTRFS_EXTENT_DATA_KEY)
114 return 0;
115 fi = btrfs_item_ptr(eb, slot,
116 struct btrfs_file_extent_item);
117 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
118 if (disk_byte == wanted_disk_byte)
119 goto add_parent;
120 }
121 }
122
123 return 0;
124}
125
126/*
127 * resolve an indirect backref in the form (root_id, key, level)
128 * to a logical address
129 */
130static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
131 struct __prelim_ref *ref,
132 struct ulist *parents)
133{
134 struct btrfs_path *path;
135 struct btrfs_root *root;
136 struct btrfs_key root_key;
137 struct btrfs_key key = {0};
138 struct extent_buffer *eb;
139 int ret = 0;
140 int root_level;
141 int level = ref->level;
142
143 path = btrfs_alloc_path();
144 if (!path)
145 return -ENOMEM;
146
147 root_key.objectid = ref->root_id;
148 root_key.type = BTRFS_ROOT_ITEM_KEY;
149 root_key.offset = (u64)-1;
150 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
151 if (IS_ERR(root)) {
152 ret = PTR_ERR(root);
153 goto out;
154 }
155
156 rcu_read_lock();
157 root_level = btrfs_header_level(root->node);
158 rcu_read_unlock();
159
160 if (root_level + 1 == level)
161 goto out;
162
163 path->lowest_level = level;
164 ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
165 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
166 "%d for key (%llu %u %llu)\n",
167 (unsigned long long)ref->root_id, level, ref->count, ret,
168 (unsigned long long)ref->key.objectid, ref->key.type,
169 (unsigned long long)ref->key.offset);
170 if (ret < 0)
171 goto out;
172
173 eb = path->nodes[level];
174 if (!eb) {
175 WARN_ON(1);
176 ret = 1;
177 goto out;
178 }
179
180 if (level == 0) {
181 if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
182 ret = btrfs_next_leaf(root, path);
183 if (ret)
184 goto out;
185 eb = path->nodes[0];
186 }
187
188 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
189 }
190
191 /* the last two parameters will only be used for level == 0 */
192 ret = add_all_parents(root, path, parents, eb, level, key.objectid,
193 ref->wanted_disk_byte);
194out:
195 btrfs_free_path(path);
196 return ret;
197}
198
199/*
200 * resolve all indirect backrefs from the list
201 */
202static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
203 struct list_head *head)
204{
205 int err;
206 int ret = 0;
207 struct __prelim_ref *ref;
208 struct __prelim_ref *ref_safe;
209 struct __prelim_ref *new_ref;
210 struct ulist *parents;
211 struct ulist_node *node;
212
213 parents = ulist_alloc(GFP_NOFS);
214 if (!parents)
215 return -ENOMEM;
216
217 /*
218 * _safe allows us to insert directly after the current item without
219 * iterating over the newly inserted items.
220 * we're also allowed to re-assign ref during iteration.
221 */
222 list_for_each_entry_safe(ref, ref_safe, head, list) {
223 if (ref->parent) /* already direct */
224 continue;
225 if (ref->count == 0)
226 continue;
227 err = __resolve_indirect_ref(fs_info, ref, parents);
228 if (err) {
229 if (ret == 0)
230 ret = err;
231 continue;
232 }
233
234 /* we put the first parent into the ref at hand */
235 node = ulist_next(parents, NULL);
236 ref->parent = node ? node->val : 0;
237
238 /* additional parents require new refs being added here */
239 while ((node = ulist_next(parents, node))) {
240 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
241 if (!new_ref) {
242 ret = -ENOMEM;
243 break;
244 }
245 memcpy(new_ref, ref, sizeof(*ref));
246 new_ref->parent = node->val;
247 list_add(&new_ref->list, &ref->list);
248 }
249 ulist_reinit(parents);
250 }
251
252 ulist_free(parents);
253 return ret;
254}
255
256/*
257 * merge two lists of backrefs and adjust counts accordingly
258 *
259 * mode = 1: merge identical keys, if key is set
260 * mode = 2: merge identical parents
261 */
262static int __merge_refs(struct list_head *head, int mode)
263{
264 struct list_head *pos1;
265
266 list_for_each(pos1, head) {
267 struct list_head *n2;
268 struct list_head *pos2;
269 struct __prelim_ref *ref1;
270
271 ref1 = list_entry(pos1, struct __prelim_ref, list);
272
273 if (mode == 1 && ref1->key.type == 0)
274 continue;
275 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
276 pos2 = n2, n2 = pos2->next) {
277 struct __prelim_ref *ref2;
278
279 ref2 = list_entry(pos2, struct __prelim_ref, list);
280
281 if (mode == 1) {
282 if (memcmp(&ref1->key, &ref2->key,
283 sizeof(ref1->key)) ||
284 ref1->level != ref2->level ||
285 ref1->root_id != ref2->root_id)
286 continue;
287 ref1->count += ref2->count;
288 } else {
289 if (ref1->parent != ref2->parent)
290 continue;
291 ref1->count += ref2->count;
292 }
293 list_del(&ref2->list);
294 kfree(ref2);
295 }
296
297 }
298 return 0;
299}
300
301/*
302 * add all currently queued delayed refs from this head whose seq nr is
303 * smaller or equal that seq to the list
304 */
305static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
306 struct btrfs_key *info_key,
307 struct list_head *prefs)
308{
309 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
310 struct rb_node *n = &head->node.rb_node;
311 int sgn;
312 int ret;
313
314 if (extent_op && extent_op->update_key)
315 btrfs_disk_key_to_cpu(info_key, &extent_op->key);
316
317 while ((n = rb_prev(n))) {
318 struct btrfs_delayed_ref_node *node;
319 node = rb_entry(n, struct btrfs_delayed_ref_node,
320 rb_node);
321 if (node->bytenr != head->node.bytenr)
322 break;
323 WARN_ON(node->is_head);
324
325 if (node->seq > seq)
326 continue;
327
328 switch (node->action) {
329 case BTRFS_ADD_DELAYED_EXTENT:
330 case BTRFS_UPDATE_DELAYED_HEAD:
331 WARN_ON(1);
332 continue;
333 case BTRFS_ADD_DELAYED_REF:
334 sgn = 1;
335 break;
336 case BTRFS_DROP_DELAYED_REF:
337 sgn = -1;
338 break;
339 default:
340 BUG_ON(1);
341 }
342 switch (node->type) {
343 case BTRFS_TREE_BLOCK_REF_KEY: {
344 struct btrfs_delayed_tree_ref *ref;
345
346 ref = btrfs_delayed_node_to_tree_ref(node);
347 ret = __add_prelim_ref(prefs, ref->root, info_key,
348 ref->level + 1, 0, node->bytenr,
349 node->ref_mod * sgn);
350 break;
351 }
352 case BTRFS_SHARED_BLOCK_REF_KEY: {
353 struct btrfs_delayed_tree_ref *ref;
354
355 ref = btrfs_delayed_node_to_tree_ref(node);
356 ret = __add_prelim_ref(prefs, ref->root, info_key,
357 ref->level + 1, ref->parent,
358 node->bytenr,
359 node->ref_mod * sgn);
360 break;
361 }
362 case BTRFS_EXTENT_DATA_REF_KEY: {
363 struct btrfs_delayed_data_ref *ref;
364 struct btrfs_key key;
365
366 ref = btrfs_delayed_node_to_data_ref(node);
367
368 key.objectid = ref->objectid;
369 key.type = BTRFS_EXTENT_DATA_KEY;
370 key.offset = ref->offset;
371 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
372 node->bytenr,
373 node->ref_mod * sgn);
374 break;
375 }
376 case BTRFS_SHARED_DATA_REF_KEY: {
377 struct btrfs_delayed_data_ref *ref;
378 struct btrfs_key key;
379
380 ref = btrfs_delayed_node_to_data_ref(node);
381
382 key.objectid = ref->objectid;
383 key.type = BTRFS_EXTENT_DATA_KEY;
384 key.offset = ref->offset;
385 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
386 ref->parent, node->bytenr,
387 node->ref_mod * sgn);
388 break;
389 }
390 default:
391 WARN_ON(1);
392 }
393 BUG_ON(ret);
394 }
395
396 return 0;
397}
398
399/*
400 * add all inline backrefs for bytenr to the list
401 */
402static int __add_inline_refs(struct btrfs_fs_info *fs_info,
403 struct btrfs_path *path, u64 bytenr,
404 struct btrfs_key *info_key, int *info_level,
405 struct list_head *prefs)
406{
407 int ret;
408 int slot;
409 struct extent_buffer *leaf;
410 struct btrfs_key key;
411 unsigned long ptr;
412 unsigned long end;
413 struct btrfs_extent_item *ei;
414 u64 flags;
415 u64 item_size;
416
417 /*
418 * enumerate all inline refs
419 */
420 leaf = path->nodes[0];
421 slot = path->slots[0] - 1;
422
423 item_size = btrfs_item_size_nr(leaf, slot);
424 BUG_ON(item_size < sizeof(*ei));
425
426 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
427 flags = btrfs_extent_flags(leaf, ei);
428
429 ptr = (unsigned long)(ei + 1);
430 end = (unsigned long)ei + item_size;
431
432 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
433 struct btrfs_tree_block_info *info;
434 struct btrfs_disk_key disk_key;
435
436 info = (struct btrfs_tree_block_info *)ptr;
437 *info_level = btrfs_tree_block_level(leaf, info);
438 btrfs_tree_block_key(leaf, info, &disk_key);
439 btrfs_disk_key_to_cpu(info_key, &disk_key);
440 ptr += sizeof(struct btrfs_tree_block_info);
441 BUG_ON(ptr > end);
442 } else {
443 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
444 }
445
446 while (ptr < end) {
447 struct btrfs_extent_inline_ref *iref;
448 u64 offset;
449 int type;
450
451 iref = (struct btrfs_extent_inline_ref *)ptr;
452 type = btrfs_extent_inline_ref_type(leaf, iref);
453 offset = btrfs_extent_inline_ref_offset(leaf, iref);
454
455 switch (type) {
456 case BTRFS_SHARED_BLOCK_REF_KEY:
457 ret = __add_prelim_ref(prefs, 0, info_key,
458 *info_level + 1, offset,
459 bytenr, 1);
460 break;
461 case BTRFS_SHARED_DATA_REF_KEY: {
462 struct btrfs_shared_data_ref *sdref;
463 int count;
464
465 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
466 count = btrfs_shared_data_ref_count(leaf, sdref);
467 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
468 bytenr, count);
469 break;
470 }
471 case BTRFS_TREE_BLOCK_REF_KEY:
472 ret = __add_prelim_ref(prefs, offset, info_key,
473 *info_level + 1, 0, bytenr, 1);
474 break;
475 case BTRFS_EXTENT_DATA_REF_KEY: {
476 struct btrfs_extent_data_ref *dref;
477 int count;
478 u64 root;
479
480 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
481 count = btrfs_extent_data_ref_count(leaf, dref);
482 key.objectid = btrfs_extent_data_ref_objectid(leaf,
483 dref);
484 key.type = BTRFS_EXTENT_DATA_KEY;
485 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
486 root = btrfs_extent_data_ref_root(leaf, dref);
487 ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
488 count);
489 break;
490 }
491 default:
492 WARN_ON(1);
493 }
494 BUG_ON(ret);
495 ptr += btrfs_extent_inline_ref_size(type);
496 }
497
498 return 0;
499}
500
501/*
502 * add all non-inline backrefs for bytenr to the list
503 */
504static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
505 struct btrfs_path *path, u64 bytenr,
506 struct btrfs_key *info_key, int info_level,
507 struct list_head *prefs)
508{
509 struct btrfs_root *extent_root = fs_info->extent_root;
510 int ret;
511 int slot;
512 struct extent_buffer *leaf;
513 struct btrfs_key key;
514
515 while (1) {
516 ret = btrfs_next_item(extent_root, path);
517 if (ret < 0)
518 break;
519 if (ret) {
520 ret = 0;
521 break;
522 }
523
524 slot = path->slots[0];
525 leaf = path->nodes[0];
526 btrfs_item_key_to_cpu(leaf, &key, slot);
527
528 if (key.objectid != bytenr)
529 break;
530 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
531 continue;
532 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
533 break;
534
535 switch (key.type) {
536 case BTRFS_SHARED_BLOCK_REF_KEY:
537 ret = __add_prelim_ref(prefs, 0, info_key,
538 info_level + 1, key.offset,
539 bytenr, 1);
540 break;
541 case BTRFS_SHARED_DATA_REF_KEY: {
542 struct btrfs_shared_data_ref *sdref;
543 int count;
544
545 sdref = btrfs_item_ptr(leaf, slot,
546 struct btrfs_shared_data_ref);
547 count = btrfs_shared_data_ref_count(leaf, sdref);
548 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
549 bytenr, count);
550 break;
551 }
552 case BTRFS_TREE_BLOCK_REF_KEY:
553 ret = __add_prelim_ref(prefs, key.offset, info_key,
554 info_level + 1, 0, bytenr, 1);
555 break;
556 case BTRFS_EXTENT_DATA_REF_KEY: {
557 struct btrfs_extent_data_ref *dref;
558 int count;
559 u64 root;
560
561 dref = btrfs_item_ptr(leaf, slot,
562 struct btrfs_extent_data_ref);
563 count = btrfs_extent_data_ref_count(leaf, dref);
564 key.objectid = btrfs_extent_data_ref_objectid(leaf,
565 dref);
566 key.type = BTRFS_EXTENT_DATA_KEY;
567 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
568 root = btrfs_extent_data_ref_root(leaf, dref);
569 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
570 bytenr, count);
571 break;
572 }
573 default:
574 WARN_ON(1);
575 }
576 BUG_ON(ret);
577 }
578
579 return ret;
580}
581
582/*
583 * this adds all existing backrefs (inline backrefs, backrefs and delayed
584 * refs) for the given bytenr to the refs list, merges duplicates and resolves
585 * indirect refs to their parent bytenr.
586 * When roots are found, they're added to the roots list
587 *
588 * FIXME some caching might speed things up
589 */
590static int find_parent_nodes(struct btrfs_trans_handle *trans,
591 struct btrfs_fs_info *fs_info, u64 bytenr,
592 u64 seq, struct ulist *refs, struct ulist *roots)
593{
594 struct btrfs_key key;
595 struct btrfs_path *path;
596 struct btrfs_key info_key = { 0 };
597 struct btrfs_delayed_ref_root *delayed_refs = NULL;
598 struct btrfs_delayed_ref_head *head = NULL;
599 int info_level = 0;
600 int ret;
601 struct list_head prefs_delayed;
602 struct list_head prefs;
603 struct __prelim_ref *ref;
604
605 INIT_LIST_HEAD(&prefs);
606 INIT_LIST_HEAD(&prefs_delayed);
607
608 key.objectid = bytenr;
609 key.type = BTRFS_EXTENT_ITEM_KEY;
610 key.offset = (u64)-1;
611
612 path = btrfs_alloc_path();
613 if (!path)
614 return -ENOMEM;
615
616 /*
617 * grab both a lock on the path and a lock on the delayed ref head.
618 * We need both to get a consistent picture of how the refs look
619 * at a specified point in time
620 */
621again:
622 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
623 if (ret < 0)
624 goto out;
625 BUG_ON(ret == 0);
626
627 /*
628 * look if there are updates for this ref queued and lock the head
629 */
630 delayed_refs = &trans->transaction->delayed_refs;
631 spin_lock(&delayed_refs->lock);
632 head = btrfs_find_delayed_ref_head(trans, bytenr);
633 if (head) {
634 if (!mutex_trylock(&head->mutex)) {
635 atomic_inc(&head->node.refs);
636 spin_unlock(&delayed_refs->lock);
637
638 btrfs_release_path(path);
639
640 /*
641 * Mutex was contended, block until it's
642 * released and try again
643 */
644 mutex_lock(&head->mutex);
645 mutex_unlock(&head->mutex);
646 btrfs_put_delayed_ref(&head->node);
647 goto again;
648 }
649 ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
650 if (ret)
651 goto out;
652 }
653 spin_unlock(&delayed_refs->lock);
654
655 if (path->slots[0]) {
656 struct extent_buffer *leaf;
657 int slot;
658
659 leaf = path->nodes[0];
660 slot = path->slots[0] - 1;
661 btrfs_item_key_to_cpu(leaf, &key, slot);
662 if (key.objectid == bytenr &&
663 key.type == BTRFS_EXTENT_ITEM_KEY) {
664 ret = __add_inline_refs(fs_info, path, bytenr,
665 &info_key, &info_level, &prefs);
666 if (ret)
667 goto out;
668 ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
669 info_level, &prefs);
670 if (ret)
671 goto out;
672 }
673 }
674 btrfs_release_path(path);
675
676 /*
677 * when adding the delayed refs above, the info_key might not have
678 * been known yet. Go over the list and replace the missing keys
679 */
680 list_for_each_entry(ref, &prefs_delayed, list) {
681 if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
682 memcpy(&ref->key, &info_key, sizeof(ref->key));
683 }
684 list_splice_init(&prefs_delayed, &prefs);
685
686 ret = __merge_refs(&prefs, 1);
687 if (ret)
688 goto out;
689
690 ret = __resolve_indirect_refs(fs_info, &prefs);
691 if (ret)
692 goto out;
693
694 ret = __merge_refs(&prefs, 2);
695 if (ret)
696 goto out;
697
698 while (!list_empty(&prefs)) {
699 ref = list_first_entry(&prefs, struct __prelim_ref, list);
700 list_del(&ref->list);
701 if (ref->count < 0)
702 WARN_ON(1);
703 if (ref->count && ref->root_id && ref->parent == 0) {
704 /* no parent == root of tree */
705 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
706 BUG_ON(ret < 0);
707 }
708 if (ref->count && ref->parent) {
709 ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
710 BUG_ON(ret < 0);
711 }
712 kfree(ref);
713 }
714
715out:
716 if (head)
717 mutex_unlock(&head->mutex);
718 btrfs_free_path(path);
719 while (!list_empty(&prefs)) {
720 ref = list_first_entry(&prefs, struct __prelim_ref, list);
721 list_del(&ref->list);
722 kfree(ref);
723 }
724 while (!list_empty(&prefs_delayed)) {
725 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
726 list);
727 list_del(&ref->list);
728 kfree(ref);
729 }
730
731 return ret;
732}
733
734/*
735 * Finds all leafs with a reference to the specified combination of bytenr and
736 * offset. key_list_head will point to a list of corresponding keys (caller must
737 * free each list element). The leafs will be stored in the leafs ulist, which
738 * must be freed with ulist_free.
739 *
740 * returns 0 on success, <0 on error
741 */
742static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
743 struct btrfs_fs_info *fs_info, u64 bytenr,
744 u64 num_bytes, u64 seq, struct ulist **leafs)
745{
746 struct ulist *tmp;
747 int ret;
748
749 tmp = ulist_alloc(GFP_NOFS);
750 if (!tmp)
751 return -ENOMEM;
752 *leafs = ulist_alloc(GFP_NOFS);
753 if (!*leafs) {
754 ulist_free(tmp);
755 return -ENOMEM;
756 }
757
758 ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
759 ulist_free(tmp);
760
761 if (ret < 0 && ret != -ENOENT) {
762 ulist_free(*leafs);
763 return ret;
764 }
765
766 return 0;
767}
768
769/*
770 * walk all backrefs for a given extent to find all roots that reference this
771 * extent. Walking a backref means finding all extents that reference this
772 * extent and in turn walk the backrefs of those, too. Naturally this is a
773 * recursive process, but here it is implemented in an iterative fashion: We
774 * find all referencing extents for the extent in question and put them on a
775 * list. In turn, we find all referencing extents for those, further appending
776 * to the list. The way we iterate the list allows adding more elements after
777 * the current while iterating. The process stops when we reach the end of the
778 * list. Found roots are added to the roots list.
779 *
780 * returns 0 on success, < 0 on error.
781 */
782int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
783 struct btrfs_fs_info *fs_info, u64 bytenr,
784 u64 num_bytes, u64 seq, struct ulist **roots)
785{
786 struct ulist *tmp;
787 struct ulist_node *node = NULL;
788 int ret;
789
790 tmp = ulist_alloc(GFP_NOFS);
791 if (!tmp)
792 return -ENOMEM;
793 *roots = ulist_alloc(GFP_NOFS);
794 if (!*roots) {
795 ulist_free(tmp);
796 return -ENOMEM;
797 }
798
799 while (1) {
800 ret = find_parent_nodes(trans, fs_info, bytenr, seq,
801 tmp, *roots);
802 if (ret < 0 && ret != -ENOENT) {
803 ulist_free(tmp);
804 ulist_free(*roots);
805 return ret;
806 }
807 node = ulist_next(tmp, node);
808 if (!node)
809 break;
810 bytenr = node->val;
811 }
812
813 ulist_free(tmp);
814 return 0;
815}
816
817
a542ad1b
JS
818static int __inode_info(u64 inum, u64 ioff, u8 key_type,
819 struct btrfs_root *fs_root, struct btrfs_path *path,
820 struct btrfs_key *found_key)
821{
822 int ret;
823 struct btrfs_key key;
824 struct extent_buffer *eb;
825
826 key.type = key_type;
827 key.objectid = inum;
828 key.offset = ioff;
829
830 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
831 if (ret < 0)
832 return ret;
833
834 eb = path->nodes[0];
835 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
836 ret = btrfs_next_leaf(fs_root, path);
837 if (ret)
838 return ret;
839 eb = path->nodes[0];
840 }
841
842 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
843 if (found_key->type != key.type || found_key->objectid != key.objectid)
844 return 1;
845
846 return 0;
847}
848
849/*
850 * this makes the path point to (inum INODE_ITEM ioff)
851 */
852int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
853 struct btrfs_path *path)
854{
855 struct btrfs_key key;
856 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
857 &key);
858}
859
860static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
861 struct btrfs_path *path,
862 struct btrfs_key *found_key)
863{
864 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
865 found_key);
866}
867
868/*
869 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
870 * of the path are separated by '/' and the path is guaranteed to be
871 * 0-terminated. the path is only given within the current file system.
872 * Therefore, it never starts with a '/'. the caller is responsible to provide
873 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
874 * the start point of the resulting string is returned. this pointer is within
875 * dest, normally.
876 * in case the path buffer would overflow, the pointer is decremented further
877 * as if output was written to the buffer, though no more output is actually
878 * generated. that way, the caller can determine how much space would be
879 * required for the path to fit into the buffer. in that case, the returned
880 * value will be smaller than dest. callers must check this!
881 */
882static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
883 struct btrfs_inode_ref *iref,
884 struct extent_buffer *eb_in, u64 parent,
885 char *dest, u32 size)
886{
887 u32 len;
888 int slot;
889 u64 next_inum;
890 int ret;
891 s64 bytes_left = size - 1;
892 struct extent_buffer *eb = eb_in;
893 struct btrfs_key found_key;
894
895 if (bytes_left >= 0)
896 dest[bytes_left] = '\0';
897
898 while (1) {
899 len = btrfs_inode_ref_name_len(eb, iref);
900 bytes_left -= len;
901 if (bytes_left >= 0)
902 read_extent_buffer(eb, dest + bytes_left,
903 (unsigned long)(iref + 1), len);
904 if (eb != eb_in)
905 free_extent_buffer(eb);
906 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
907 if (ret)
908 break;
909 next_inum = found_key.offset;
910
911 /* regular exit ahead */
912 if (parent == next_inum)
913 break;
914
915 slot = path->slots[0];
916 eb = path->nodes[0];
917 /* make sure we can use eb after releasing the path */
918 if (eb != eb_in)
919 atomic_inc(&eb->refs);
920 btrfs_release_path(path);
921
922 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
923 parent = next_inum;
924 --bytes_left;
925 if (bytes_left >= 0)
926 dest[bytes_left] = '/';
927 }
928
929 btrfs_release_path(path);
930
931 if (ret)
932 return ERR_PTR(ret);
933
934 return dest + bytes_left;
935}
936
937/*
938 * this makes the path point to (logical EXTENT_ITEM *)
939 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
940 * tree blocks and <0 on error.
941 */
942int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
943 struct btrfs_path *path, struct btrfs_key *found_key)
944{
945 int ret;
946 u64 flags;
947 u32 item_size;
948 struct extent_buffer *eb;
949 struct btrfs_extent_item *ei;
950 struct btrfs_key key;
951
952 key.type = BTRFS_EXTENT_ITEM_KEY;
953 key.objectid = logical;
954 key.offset = (u64)-1;
955
956 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
957 if (ret < 0)
958 return ret;
959 ret = btrfs_previous_item(fs_info->extent_root, path,
960 0, BTRFS_EXTENT_ITEM_KEY);
961 if (ret < 0)
962 return ret;
963
964 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
965 if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
966 found_key->objectid > logical ||
967 found_key->objectid + found_key->offset <= logical)
968 return -ENOENT;
969
970 eb = path->nodes[0];
971 item_size = btrfs_item_size_nr(eb, path->slots[0]);
972 BUG_ON(item_size < sizeof(*ei));
973
974 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
975 flags = btrfs_extent_flags(eb, ei);
976
977 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
978 return BTRFS_EXTENT_FLAG_TREE_BLOCK;
979 if (flags & BTRFS_EXTENT_FLAG_DATA)
980 return BTRFS_EXTENT_FLAG_DATA;
981
982 return -EIO;
983}
984
985/*
986 * helper function to iterate extent inline refs. ptr must point to a 0 value
987 * for the first call and may be modified. it is used to track state.
988 * if more refs exist, 0 is returned and the next call to
989 * __get_extent_inline_ref must pass the modified ptr parameter to get the
990 * next ref. after the last ref was processed, 1 is returned.
991 * returns <0 on error
992 */
993static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
994 struct btrfs_extent_item *ei, u32 item_size,
995 struct btrfs_extent_inline_ref **out_eiref,
996 int *out_type)
997{
998 unsigned long end;
999 u64 flags;
1000 struct btrfs_tree_block_info *info;
1001
1002 if (!*ptr) {
1003 /* first call */
1004 flags = btrfs_extent_flags(eb, ei);
1005 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1006 info = (struct btrfs_tree_block_info *)(ei + 1);
1007 *out_eiref =
1008 (struct btrfs_extent_inline_ref *)(info + 1);
1009 } else {
1010 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1011 }
1012 *ptr = (unsigned long)*out_eiref;
1013 if ((void *)*ptr >= (void *)ei + item_size)
1014 return -ENOENT;
1015 }
1016
1017 end = (unsigned long)ei + item_size;
1018 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1019 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1020
1021 *ptr += btrfs_extent_inline_ref_size(*out_type);
1022 WARN_ON(*ptr > end);
1023 if (*ptr == end)
1024 return 1; /* last */
1025
1026 return 0;
1027}
1028
1029/*
1030 * reads the tree block backref for an extent. tree level and root are returned
1031 * through out_level and out_root. ptr must point to a 0 value for the first
1032 * call and may be modified (see __get_extent_inline_ref comment).
1033 * returns 0 if data was provided, 1 if there was no more data to provide or
1034 * <0 on error.
1035 */
1036int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1037 struct btrfs_extent_item *ei, u32 item_size,
1038 u64 *out_root, u8 *out_level)
1039{
1040 int ret;
1041 int type;
1042 struct btrfs_tree_block_info *info;
1043 struct btrfs_extent_inline_ref *eiref;
1044
1045 if (*ptr == (unsigned long)-1)
1046 return 1;
1047
1048 while (1) {
1049 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1050 &eiref, &type);
1051 if (ret < 0)
1052 return ret;
1053
1054 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1055 type == BTRFS_SHARED_BLOCK_REF_KEY)
1056 break;
1057
1058 if (ret == 1)
1059 return 1;
1060 }
1061
1062 /* we can treat both ref types equally here */
1063 info = (struct btrfs_tree_block_info *)(ei + 1);
1064 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1065 *out_level = btrfs_tree_block_level(eb, info);
1066
1067 if (ret == 1)
1068 *ptr = (unsigned long)-1;
1069
1070 return 0;
1071}
1072
1073static int __data_list_add(struct list_head *head, u64 inum,
1074 u64 extent_data_item_offset, u64 root)
1075{
1076 struct __data_ref *ref;
1077
1078 ref = kmalloc(sizeof(*ref), GFP_NOFS);
1079 if (!ref)
1080 return -ENOMEM;
1081
1082 ref->inum = inum;
1083 ref->extent_data_item_offset = extent_data_item_offset;
1084 ref->root = root;
1085 list_add_tail(&ref->list, head);
1086
1087 return 0;
1088}
1089
1090static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
1091 struct btrfs_extent_data_ref *dref)
1092{
1093 return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
1094 btrfs_extent_data_ref_offset(eb, dref),
1095 btrfs_extent_data_ref_root(eb, dref));
1096}
1097
1098static int __shared_list_add(struct list_head *head, u64 disk_byte)
1099{
1100 struct __shared_ref *ref;
1101
1102 ref = kmalloc(sizeof(*ref), GFP_NOFS);
1103 if (!ref)
1104 return -ENOMEM;
1105
1106 ref->disk_byte = disk_byte;
1107 list_add_tail(&ref->list, head);
1108
1109 return 0;
1110}
1111
1112static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
1113 u64 logical, u64 inum,
1114 u64 extent_data_item_offset,
1115 u64 extent_offset,
1116 struct btrfs_path *path,
1117 struct list_head *data_refs,
1118 iterate_extent_inodes_t *iterate,
1119 void *ctx)
1120{
1121 u64 ref_root;
1122 u32 item_size;
1123 struct btrfs_key key;
1124 struct extent_buffer *eb;
1125 struct btrfs_extent_item *ei;
1126 struct btrfs_extent_inline_ref *eiref;
1127 struct __data_ref *ref;
1128 int ret;
1129 int type;
1130 int last;
1131 unsigned long ptr = 0;
1132
1133 WARN_ON(!list_empty(data_refs));
1134 ret = extent_from_logical(fs_info, logical, path, &key);
1135 if (ret & BTRFS_EXTENT_FLAG_DATA)
1136 ret = -EIO;
1137 if (ret < 0)
1138 goto out;
1139
1140 eb = path->nodes[0];
1141 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1142 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1143
1144 ret = 0;
1145 ref_root = 0;
1146 /*
1147 * as done in iterate_extent_inodes, we first build a list of refs to
1148 * iterate, then free the path and then iterate them to avoid deadlocks.
1149 */
1150 do {
1151 last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
1152 &eiref, &type);
1153 if (last < 0) {
1154 ret = last;
1155 goto out;
1156 }
1157 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1158 type == BTRFS_SHARED_BLOCK_REF_KEY) {
1159 ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
1160 ret = __data_list_add(data_refs, inum,
1161 extent_data_item_offset,
1162 ref_root);
1163 }
1164 } while (!ret && !last);
1165
1166 btrfs_release_path(path);
1167
1168 if (ref_root == 0) {
1169 printk(KERN_ERR "btrfs: failed to find tree block ref "
1170 "for shared data backref %llu\n", logical);
1171 WARN_ON(1);
1172 ret = -EIO;
1173 }
1174
1175out:
1176 while (!list_empty(data_refs)) {
1177 ref = list_first_entry(data_refs, struct __data_ref, list);
1178 list_del(&ref->list);
1179 if (!ret)
1180 ret = iterate(ref->inum, extent_offset +
1181 ref->extent_data_item_offset,
1182 ref->root, ctx);
1183 kfree(ref);
1184 }
1185
1186 return ret;
1187}
1188
1189static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
1190 u64 logical, u64 orig_extent_item_objectid,
1191 u64 extent_offset, struct btrfs_path *path,
1192 struct list_head *data_refs,
1193 iterate_extent_inodes_t *iterate,
1194 void *ctx)
1195{
1196 u64 disk_byte;
1197 struct btrfs_key key;
1198 struct btrfs_file_extent_item *fi;
1199 struct extent_buffer *eb;
1200 int slot;
1201 int nritems;
1202 int ret;
1203 int found = 0;
1204
1205 eb = read_tree_block(fs_info->tree_root, logical,
1206 fs_info->tree_root->leafsize, 0);
1207 if (!eb)
1208 return -EIO;
1209
1210 /*
1211 * from the shared data ref, we only have the leaf but we need
1212 * the key. thus, we must look into all items and see that we
1213 * find one (some) with a reference to our extent item.
1214 */
1215 nritems = btrfs_header_nritems(eb);
1216 for (slot = 0; slot < nritems; ++slot) {
1217 btrfs_item_key_to_cpu(eb, &key, slot);
1218 if (key.type != BTRFS_EXTENT_DATA_KEY)
1219 continue;
1220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1221 if (!fi) {
1222 free_extent_buffer(eb);
1223 return -EIO;
1224 }
1225 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1226 if (disk_byte != orig_extent_item_objectid) {
1227 if (found)
1228 break;
1229 else
1230 continue;
1231 }
1232 ++found;
1233 ret = __iter_shared_inline_ref_inodes(fs_info, logical,
1234 key.objectid,
1235 key.offset,
1236 extent_offset, path,
1237 data_refs,
1238 iterate, ctx);
1239 if (ret)
1240 break;
1241 }
1242
1243 if (!found) {
1244 printk(KERN_ERR "btrfs: failed to follow shared data backref "
1245 "to parent %llu\n", logical);
1246 WARN_ON(1);
1247 ret = -EIO;
1248 }
1249
1250 free_extent_buffer(eb);
1251 return ret;
1252}
1253
1254/*
1255 * calls iterate() for every inode that references the extent identified by
1256 * the given parameters. will use the path given as a parameter and return it
1257 * released.
1258 * when the iterator function returns a non-zero value, iteration stops.
1259 */
1260int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1261 struct btrfs_path *path,
1262 u64 extent_item_objectid,
1263 u64 extent_offset,
1264 iterate_extent_inodes_t *iterate, void *ctx)
1265{
1266 unsigned long ptr = 0;
1267 int last;
1268 int ret;
1269 int type;
1270 u64 logical;
1271 u32 item_size;
1272 struct btrfs_extent_inline_ref *eiref;
1273 struct btrfs_extent_data_ref *dref;
1274 struct extent_buffer *eb;
1275 struct btrfs_extent_item *ei;
1276 struct btrfs_key key;
1277 struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1278 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1279 struct __data_ref *ref_d;
1280 struct __shared_ref *ref_s;
1281
1282 eb = path->nodes[0];
1283 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1284 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1285
1286 /* first we iterate the inline refs, ... */
1287 do {
1288 last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
1289 &eiref, &type);
1290 if (last == -ENOENT) {
1291 ret = 0;
1292 break;
1293 }
1294 if (last < 0) {
1295 ret = last;
1296 break;
1297 }
1298
1299 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1300 dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
1301 ret = __data_list_add_eb(&data_refs, eb, dref);
1302 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1303 logical = btrfs_extent_inline_ref_offset(eb, eiref);
1304 ret = __shared_list_add(&shared_refs, logical);
1305 }
1306 } while (!ret && !last);
1307
1308 /* ... then we proceed to in-tree references and ... */
1309 while (!ret) {
1310 ++path->slots[0];
1311 if (path->slots[0] > btrfs_header_nritems(eb)) {
1312 ret = btrfs_next_leaf(fs_info->extent_root, path);
1313 if (ret) {
1314 if (ret == 1)
1315 ret = 0; /* we're done */
1316 break;
1317 }
1318 eb = path->nodes[0];
1319 }
1320 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
1321 if (key.objectid != extent_item_objectid)
1322 break;
1323 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1324 dref = btrfs_item_ptr(eb, path->slots[0],
1325 struct btrfs_extent_data_ref);
1326 ret = __data_list_add_eb(&data_refs, eb, dref);
1327 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1328 ret = __shared_list_add(&shared_refs, key.offset);
1329 }
1330 }
1331
1332 btrfs_release_path(path);
1333
1334 /*
1335 * ... only at the very end we can process the refs we found. this is
1336 * because the iterator function we call is allowed to make tree lookups
1337 * and we have to avoid deadlocks. additionally, we need more tree
1338 * lookups ourselves for shared data refs.
1339 */
1340 while (!list_empty(&data_refs)) {
1341 ref_d = list_first_entry(&data_refs, struct __data_ref, list);
1342 list_del(&ref_d->list);
1343 if (!ret)
1344 ret = iterate(ref_d->inum, extent_offset +
1345 ref_d->extent_data_item_offset,
1346 ref_d->root, ctx);
1347 kfree(ref_d);
1348 }
1349
1350 while (!list_empty(&shared_refs)) {
1351 ref_s = list_first_entry(&shared_refs, struct __shared_ref,
1352 list);
1353 list_del(&ref_s->list);
1354 if (!ret)
1355 ret = __iter_shared_inline_ref(fs_info,
1356 ref_s->disk_byte,
1357 extent_item_objectid,
1358 extent_offset, path,
1359 &data_refs,
1360 iterate, ctx);
1361 kfree(ref_s);
1362 }
1363
1364 return ret;
1365}
1366
1367int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1368 struct btrfs_path *path,
1369 iterate_extent_inodes_t *iterate, void *ctx)
1370{
1371 int ret;
1372 u64 offset;
1373 struct btrfs_key found_key;
1374
1375 ret = extent_from_logical(fs_info, logical, path,
1376 &found_key);
1377 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1378 ret = -EINVAL;
1379 if (ret < 0)
1380 return ret;
1381
1382 offset = logical - found_key.objectid;
1383 ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
1384 offset, iterate, ctx);
1385
1386 return ret;
1387}
1388
1389static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1390 struct btrfs_path *path,
1391 iterate_irefs_t *iterate, void *ctx)
1392{
1393 int ret;
1394 int slot;
1395 u32 cur;
1396 u32 len;
1397 u32 name_len;
1398 u64 parent = 0;
1399 int found = 0;
1400 struct extent_buffer *eb;
1401 struct btrfs_item *item;
1402 struct btrfs_inode_ref *iref;
1403 struct btrfs_key found_key;
1404
1405 while (1) {
1406 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1407 &found_key);
1408 if (ret < 0)
1409 break;
1410 if (ret) {
1411 ret = found ? 0 : -ENOENT;
1412 break;
1413 }
1414 ++found;
1415
1416 parent = found_key.offset;
1417 slot = path->slots[0];
1418 eb = path->nodes[0];
1419 /* make sure we can use eb after releasing the path */
1420 atomic_inc(&eb->refs);
1421 btrfs_release_path(path);
1422
1423 item = btrfs_item_nr(eb, slot);
1424 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1425
1426 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1427 name_len = btrfs_inode_ref_name_len(eb, iref);
1428 /* path must be released before calling iterate()! */
1429 ret = iterate(parent, iref, eb, ctx);
1430 if (ret) {
1431 free_extent_buffer(eb);
1432 break;
1433 }
1434 len = sizeof(*iref) + name_len;
1435 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1436 }
1437 free_extent_buffer(eb);
1438 }
1439
1440 btrfs_release_path(path);
1441
1442 return ret;
1443}
1444
1445/*
1446 * returns 0 if the path could be dumped (probably truncated)
1447 * returns <0 in case of an error
1448 */
1449static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
1450 struct extent_buffer *eb, void *ctx)
1451{
1452 struct inode_fs_paths *ipath = ctx;
1453 char *fspath;
1454 char *fspath_min;
1455 int i = ipath->fspath->elem_cnt;
1456 const int s_ptr = sizeof(char *);
1457 u32 bytes_left;
1458
1459 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1460 ipath->fspath->bytes_left - s_ptr : 0;
1461
740c3d22 1462 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
a542ad1b
JS
1463 fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
1464 inum, fspath_min, bytes_left);
1465 if (IS_ERR(fspath))
1466 return PTR_ERR(fspath);
1467
1468 if (fspath > fspath_min) {
745c4d8e 1469 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
a542ad1b
JS
1470 ++ipath->fspath->elem_cnt;
1471 ipath->fspath->bytes_left = fspath - fspath_min;
1472 } else {
1473 ++ipath->fspath->elem_missed;
1474 ipath->fspath->bytes_missing += fspath_min - fspath;
1475 ipath->fspath->bytes_left = 0;
1476 }
1477
1478 return 0;
1479}
1480
1481/*
1482 * this dumps all file system paths to the inode into the ipath struct, provided
1483 * is has been created large enough. each path is zero-terminated and accessed
740c3d22 1484 * from ipath->fspath->val[i].
a542ad1b 1485 * when it returns, there are ipath->fspath->elem_cnt number of paths available
740c3d22 1486 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
a542ad1b
JS
1487 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1488 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1489 * have been needed to return all paths.
1490 */
1491int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1492{
1493 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1494 inode_to_path, ipath);
1495}
1496
1497/*
1498 * allocates space to return multiple file system paths for an inode.
1499 * total_bytes to allocate are passed, note that space usable for actual path
1500 * information will be total_bytes - sizeof(struct inode_fs_paths).
1501 * the returned pointer must be freed with free_ipath() in the end.
1502 */
1503struct btrfs_data_container *init_data_container(u32 total_bytes)
1504{
1505 struct btrfs_data_container *data;
1506 size_t alloc_bytes;
1507
1508 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1509 data = kmalloc(alloc_bytes, GFP_NOFS);
1510 if (!data)
1511 return ERR_PTR(-ENOMEM);
1512
1513 if (total_bytes >= sizeof(*data)) {
1514 data->bytes_left = total_bytes - sizeof(*data);
1515 data->bytes_missing = 0;
1516 } else {
1517 data->bytes_missing = sizeof(*data) - total_bytes;
1518 data->bytes_left = 0;
1519 }
1520
1521 data->elem_cnt = 0;
1522 data->elem_missed = 0;
1523
1524 return data;
1525}
1526
1527/*
1528 * allocates space to return multiple file system paths for an inode.
1529 * total_bytes to allocate are passed, note that space usable for actual path
1530 * information will be total_bytes - sizeof(struct inode_fs_paths).
1531 * the returned pointer must be freed with free_ipath() in the end.
1532 */
1533struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1534 struct btrfs_path *path)
1535{
1536 struct inode_fs_paths *ifp;
1537 struct btrfs_data_container *fspath;
1538
1539 fspath = init_data_container(total_bytes);
1540 if (IS_ERR(fspath))
1541 return (void *)fspath;
1542
1543 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1544 if (!ifp) {
1545 kfree(fspath);
1546 return ERR_PTR(-ENOMEM);
1547 }
1548
1549 ifp->btrfs_path = path;
1550 ifp->fspath = fspath;
1551 ifp->fs_root = fs_root;
1552
1553 return ifp;
1554}
1555
1556void free_ipath(struct inode_fs_paths *ipath)
1557{
1558 kfree(ipath);
1559}