]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/btrfs/relocation.c
btrfs: reloc: clean dirty subvols if we fail to start a transaction
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / relocation.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "locking.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "inode-map.h"
22 #include "qgroup.h"
23 #include "print-tree.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
26 #include "backref.h"
27
28 /*
29 * Relocation overview
30 *
31 * [What does relocation do]
32 *
33 * The objective of relocation is to relocate all extents of the target block
34 * group to other block groups.
35 * This is utilized by resize (shrink only), profile converting, compacting
36 * space, or balance routine to spread chunks over devices.
37 *
38 * Before | After
39 * ------------------------------------------------------------------
40 * BG A: 10 data extents | BG A: deleted
41 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
42 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
43 *
44 * [How does relocation work]
45 *
46 * 1. Mark the target block group read-only
47 * New extents won't be allocated from the target block group.
48 *
49 * 2.1 Record each extent in the target block group
50 * To build a proper map of extents to be relocated.
51 *
52 * 2.2 Build data reloc tree and reloc trees
53 * Data reloc tree will contain an inode, recording all newly relocated
54 * data extents.
55 * There will be only one data reloc tree for one data block group.
56 *
57 * Reloc tree will be a special snapshot of its source tree, containing
58 * relocated tree blocks.
59 * Each tree referring to a tree block in target block group will get its
60 * reloc tree built.
61 *
62 * 2.3 Swap source tree with its corresponding reloc tree
63 * Each involved tree only refers to new extents after swap.
64 *
65 * 3. Cleanup reloc trees and data reloc tree.
66 * As old extents in the target block group are still referenced by reloc
67 * trees, we need to clean them up before really freeing the target block
68 * group.
69 *
70 * The main complexity is in steps 2.2 and 2.3.
71 *
72 * The entry point of relocation is relocate_block_group() function.
73 */
74
75 /*
76 * backref_node, mapping_node and tree_block start with this
77 */
78 struct tree_entry {
79 struct rb_node rb_node;
80 u64 bytenr;
81 };
82
83 /*
84 * present a tree block in the backref cache
85 */
86 struct backref_node {
87 struct rb_node rb_node;
88 u64 bytenr;
89
90 u64 new_bytenr;
91 /* objectid of tree block owner, can be not uptodate */
92 u64 owner;
93 /* link to pending, changed or detached list */
94 struct list_head list;
95 /* list of upper level blocks reference this block */
96 struct list_head upper;
97 /* list of child blocks in the cache */
98 struct list_head lower;
99 /* NULL if this node is not tree root */
100 struct btrfs_root *root;
101 /* extent buffer got by COW the block */
102 struct extent_buffer *eb;
103 /* level of tree block */
104 unsigned int level:8;
105 /* is the block in non-reference counted tree */
106 unsigned int cowonly:1;
107 /* 1 if no child node in the cache */
108 unsigned int lowest:1;
109 /* is the extent buffer locked */
110 unsigned int locked:1;
111 /* has the block been processed */
112 unsigned int processed:1;
113 /* have backrefs of this block been checked */
114 unsigned int checked:1;
115 /*
116 * 1 if corresponding block has been cowed but some upper
117 * level block pointers may not point to the new location
118 */
119 unsigned int pending:1;
120 /*
121 * 1 if the backref node isn't connected to any other
122 * backref node.
123 */
124 unsigned int detached:1;
125 };
126
127 /*
128 * present a block pointer in the backref cache
129 */
130 struct backref_edge {
131 struct list_head list[2];
132 struct backref_node *node[2];
133 };
134
135 #define LOWER 0
136 #define UPPER 1
137 #define RELOCATION_RESERVED_NODES 256
138
139 struct backref_cache {
140 /* red black tree of all backref nodes in the cache */
141 struct rb_root rb_root;
142 /* for passing backref nodes to btrfs_reloc_cow_block */
143 struct backref_node *path[BTRFS_MAX_LEVEL];
144 /*
145 * list of blocks that have been cowed but some block
146 * pointers in upper level blocks may not reflect the
147 * new location
148 */
149 struct list_head pending[BTRFS_MAX_LEVEL];
150 /* list of backref nodes with no child node */
151 struct list_head leaves;
152 /* list of blocks that have been cowed in current transaction */
153 struct list_head changed;
154 /* list of detached backref node. */
155 struct list_head detached;
156
157 u64 last_trans;
158
159 int nr_nodes;
160 int nr_edges;
161 };
162
163 /*
164 * map address of tree root to tree
165 */
166 struct mapping_node {
167 struct rb_node rb_node;
168 u64 bytenr;
169 void *data;
170 };
171
172 struct mapping_tree {
173 struct rb_root rb_root;
174 spinlock_t lock;
175 };
176
177 /*
178 * present a tree block to process
179 */
180 struct tree_block {
181 struct rb_node rb_node;
182 u64 bytenr;
183 struct btrfs_key key;
184 unsigned int level:8;
185 unsigned int key_ready:1;
186 };
187
188 #define MAX_EXTENTS 128
189
190 struct file_extent_cluster {
191 u64 start;
192 u64 end;
193 u64 boundary[MAX_EXTENTS];
194 unsigned int nr;
195 };
196
197 struct reloc_control {
198 /* block group to relocate */
199 struct btrfs_block_group *block_group;
200 /* extent tree */
201 struct btrfs_root *extent_root;
202 /* inode for moving data */
203 struct inode *data_inode;
204
205 struct btrfs_block_rsv *block_rsv;
206
207 struct backref_cache backref_cache;
208
209 struct file_extent_cluster cluster;
210 /* tree blocks have been processed */
211 struct extent_io_tree processed_blocks;
212 /* map start of tree root to corresponding reloc tree */
213 struct mapping_tree reloc_root_tree;
214 /* list of reloc trees */
215 struct list_head reloc_roots;
216 /* list of subvolume trees that get relocated */
217 struct list_head dirty_subvol_roots;
218 /* size of metadata reservation for merging reloc trees */
219 u64 merging_rsv_size;
220 /* size of relocated tree nodes */
221 u64 nodes_relocated;
222 /* reserved size for block group relocation*/
223 u64 reserved_bytes;
224
225 u64 search_start;
226 u64 extents_found;
227
228 unsigned int stage:8;
229 unsigned int create_reloc_tree:1;
230 unsigned int merge_reloc_tree:1;
231 unsigned int found_file_extent:1;
232 };
233
234 /* stages of data relocation */
235 #define MOVE_DATA_EXTENTS 0
236 #define UPDATE_DATA_PTRS 1
237
238 static void remove_backref_node(struct backref_cache *cache,
239 struct backref_node *node);
240 static void __mark_block_processed(struct reloc_control *rc,
241 struct backref_node *node);
242
243 static void mapping_tree_init(struct mapping_tree *tree)
244 {
245 tree->rb_root = RB_ROOT;
246 spin_lock_init(&tree->lock);
247 }
248
249 static void backref_cache_init(struct backref_cache *cache)
250 {
251 int i;
252 cache->rb_root = RB_ROOT;
253 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
254 INIT_LIST_HEAD(&cache->pending[i]);
255 INIT_LIST_HEAD(&cache->changed);
256 INIT_LIST_HEAD(&cache->detached);
257 INIT_LIST_HEAD(&cache->leaves);
258 }
259
260 static void backref_cache_cleanup(struct backref_cache *cache)
261 {
262 struct backref_node *node;
263 int i;
264
265 while (!list_empty(&cache->detached)) {
266 node = list_entry(cache->detached.next,
267 struct backref_node, list);
268 remove_backref_node(cache, node);
269 }
270
271 while (!list_empty(&cache->leaves)) {
272 node = list_entry(cache->leaves.next,
273 struct backref_node, lower);
274 remove_backref_node(cache, node);
275 }
276
277 cache->last_trans = 0;
278
279 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
280 ASSERT(list_empty(&cache->pending[i]));
281 ASSERT(list_empty(&cache->changed));
282 ASSERT(list_empty(&cache->detached));
283 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
284 ASSERT(!cache->nr_nodes);
285 ASSERT(!cache->nr_edges);
286 }
287
288 static struct backref_node *alloc_backref_node(struct backref_cache *cache)
289 {
290 struct backref_node *node;
291
292 node = kzalloc(sizeof(*node), GFP_NOFS);
293 if (node) {
294 INIT_LIST_HEAD(&node->list);
295 INIT_LIST_HEAD(&node->upper);
296 INIT_LIST_HEAD(&node->lower);
297 RB_CLEAR_NODE(&node->rb_node);
298 cache->nr_nodes++;
299 }
300 return node;
301 }
302
303 static void free_backref_node(struct backref_cache *cache,
304 struct backref_node *node)
305 {
306 if (node) {
307 cache->nr_nodes--;
308 btrfs_put_root(node->root);
309 kfree(node);
310 }
311 }
312
313 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
314 {
315 struct backref_edge *edge;
316
317 edge = kzalloc(sizeof(*edge), GFP_NOFS);
318 if (edge)
319 cache->nr_edges++;
320 return edge;
321 }
322
323 static void free_backref_edge(struct backref_cache *cache,
324 struct backref_edge *edge)
325 {
326 if (edge) {
327 cache->nr_edges--;
328 kfree(edge);
329 }
330 }
331
332 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
333 struct rb_node *node)
334 {
335 struct rb_node **p = &root->rb_node;
336 struct rb_node *parent = NULL;
337 struct tree_entry *entry;
338
339 while (*p) {
340 parent = *p;
341 entry = rb_entry(parent, struct tree_entry, rb_node);
342
343 if (bytenr < entry->bytenr)
344 p = &(*p)->rb_left;
345 else if (bytenr > entry->bytenr)
346 p = &(*p)->rb_right;
347 else
348 return parent;
349 }
350
351 rb_link_node(node, parent, p);
352 rb_insert_color(node, root);
353 return NULL;
354 }
355
356 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
357 {
358 struct rb_node *n = root->rb_node;
359 struct tree_entry *entry;
360
361 while (n) {
362 entry = rb_entry(n, struct tree_entry, rb_node);
363
364 if (bytenr < entry->bytenr)
365 n = n->rb_left;
366 else if (bytenr > entry->bytenr)
367 n = n->rb_right;
368 else
369 return n;
370 }
371 return NULL;
372 }
373
374 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
375 {
376
377 struct btrfs_fs_info *fs_info = NULL;
378 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
379 rb_node);
380 if (bnode->root)
381 fs_info = bnode->root->fs_info;
382 btrfs_panic(fs_info, errno,
383 "Inconsistency in backref cache found at offset %llu",
384 bytenr);
385 }
386
387 /*
388 * walk up backref nodes until reach node presents tree root
389 */
390 static struct backref_node *walk_up_backref(struct backref_node *node,
391 struct backref_edge *edges[],
392 int *index)
393 {
394 struct backref_edge *edge;
395 int idx = *index;
396
397 while (!list_empty(&node->upper)) {
398 edge = list_entry(node->upper.next,
399 struct backref_edge, list[LOWER]);
400 edges[idx++] = edge;
401 node = edge->node[UPPER];
402 }
403 BUG_ON(node->detached);
404 *index = idx;
405 return node;
406 }
407
408 /*
409 * walk down backref nodes to find start of next reference path
410 */
411 static struct backref_node *walk_down_backref(struct backref_edge *edges[],
412 int *index)
413 {
414 struct backref_edge *edge;
415 struct backref_node *lower;
416 int idx = *index;
417
418 while (idx > 0) {
419 edge = edges[idx - 1];
420 lower = edge->node[LOWER];
421 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
422 idx--;
423 continue;
424 }
425 edge = list_entry(edge->list[LOWER].next,
426 struct backref_edge, list[LOWER]);
427 edges[idx - 1] = edge;
428 *index = idx;
429 return edge->node[UPPER];
430 }
431 *index = 0;
432 return NULL;
433 }
434
435 static void unlock_node_buffer(struct backref_node *node)
436 {
437 if (node->locked) {
438 btrfs_tree_unlock(node->eb);
439 node->locked = 0;
440 }
441 }
442
443 static void drop_node_buffer(struct backref_node *node)
444 {
445 if (node->eb) {
446 unlock_node_buffer(node);
447 free_extent_buffer(node->eb);
448 node->eb = NULL;
449 }
450 }
451
452 static void drop_backref_node(struct backref_cache *tree,
453 struct backref_node *node)
454 {
455 BUG_ON(!list_empty(&node->upper));
456
457 drop_node_buffer(node);
458 list_del(&node->list);
459 list_del(&node->lower);
460 if (!RB_EMPTY_NODE(&node->rb_node))
461 rb_erase(&node->rb_node, &tree->rb_root);
462 free_backref_node(tree, node);
463 }
464
465 /*
466 * remove a backref node from the backref cache
467 */
468 static void remove_backref_node(struct backref_cache *cache,
469 struct backref_node *node)
470 {
471 struct backref_node *upper;
472 struct backref_edge *edge;
473
474 if (!node)
475 return;
476
477 BUG_ON(!node->lowest && !node->detached);
478 while (!list_empty(&node->upper)) {
479 edge = list_entry(node->upper.next, struct backref_edge,
480 list[LOWER]);
481 upper = edge->node[UPPER];
482 list_del(&edge->list[LOWER]);
483 list_del(&edge->list[UPPER]);
484 free_backref_edge(cache, edge);
485
486 if (RB_EMPTY_NODE(&upper->rb_node)) {
487 BUG_ON(!list_empty(&node->upper));
488 drop_backref_node(cache, node);
489 node = upper;
490 node->lowest = 1;
491 continue;
492 }
493 /*
494 * add the node to leaf node list if no other
495 * child block cached.
496 */
497 if (list_empty(&upper->lower)) {
498 list_add_tail(&upper->lower, &cache->leaves);
499 upper->lowest = 1;
500 }
501 }
502
503 drop_backref_node(cache, node);
504 }
505
506 static void update_backref_node(struct backref_cache *cache,
507 struct backref_node *node, u64 bytenr)
508 {
509 struct rb_node *rb_node;
510 rb_erase(&node->rb_node, &cache->rb_root);
511 node->bytenr = bytenr;
512 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
513 if (rb_node)
514 backref_tree_panic(rb_node, -EEXIST, bytenr);
515 }
516
517 /*
518 * update backref cache after a transaction commit
519 */
520 static int update_backref_cache(struct btrfs_trans_handle *trans,
521 struct backref_cache *cache)
522 {
523 struct backref_node *node;
524 int level = 0;
525
526 if (cache->last_trans == 0) {
527 cache->last_trans = trans->transid;
528 return 0;
529 }
530
531 if (cache->last_trans == trans->transid)
532 return 0;
533
534 /*
535 * detached nodes are used to avoid unnecessary backref
536 * lookup. transaction commit changes the extent tree.
537 * so the detached nodes are no longer useful.
538 */
539 while (!list_empty(&cache->detached)) {
540 node = list_entry(cache->detached.next,
541 struct backref_node, list);
542 remove_backref_node(cache, node);
543 }
544
545 while (!list_empty(&cache->changed)) {
546 node = list_entry(cache->changed.next,
547 struct backref_node, list);
548 list_del_init(&node->list);
549 BUG_ON(node->pending);
550 update_backref_node(cache, node, node->new_bytenr);
551 }
552
553 /*
554 * some nodes can be left in the pending list if there were
555 * errors during processing the pending nodes.
556 */
557 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
558 list_for_each_entry(node, &cache->pending[level], list) {
559 BUG_ON(!node->pending);
560 if (node->bytenr == node->new_bytenr)
561 continue;
562 update_backref_node(cache, node, node->new_bytenr);
563 }
564 }
565
566 cache->last_trans = 0;
567 return 1;
568 }
569
570 static bool reloc_root_is_dead(struct btrfs_root *root)
571 {
572 /*
573 * Pair with set_bit/clear_bit in clean_dirty_subvols and
574 * btrfs_update_reloc_root. We need to see the updated bit before
575 * trying to access reloc_root
576 */
577 smp_rmb();
578 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
579 return true;
580 return false;
581 }
582
583 /*
584 * Check if this subvolume tree has valid reloc tree.
585 *
586 * Reloc tree after swap is considered dead, thus not considered as valid.
587 * This is enough for most callers, as they don't distinguish dead reloc root
588 * from no reloc root. But should_ignore_root() below is a special case.
589 */
590 static bool have_reloc_root(struct btrfs_root *root)
591 {
592 if (reloc_root_is_dead(root))
593 return false;
594 if (!root->reloc_root)
595 return false;
596 return true;
597 }
598
599 static int should_ignore_root(struct btrfs_root *root)
600 {
601 struct btrfs_root *reloc_root;
602
603 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
604 return 0;
605
606 /* This root has been merged with its reloc tree, we can ignore it */
607 if (reloc_root_is_dead(root))
608 return 1;
609
610 reloc_root = root->reloc_root;
611 if (!reloc_root)
612 return 0;
613
614 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
615 root->fs_info->running_transaction->transid - 1)
616 return 0;
617 /*
618 * if there is reloc tree and it was created in previous
619 * transaction backref lookup can find the reloc tree,
620 * so backref node for the fs tree root is useless for
621 * relocation.
622 */
623 return 1;
624 }
625 /*
626 * find reloc tree by address of tree root
627 */
628 static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
629 u64 bytenr)
630 {
631 struct rb_node *rb_node;
632 struct mapping_node *node;
633 struct btrfs_root *root = NULL;
634
635 spin_lock(&rc->reloc_root_tree.lock);
636 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
637 if (rb_node) {
638 node = rb_entry(rb_node, struct mapping_node, rb_node);
639 root = (struct btrfs_root *)node->data;
640 }
641 spin_unlock(&rc->reloc_root_tree.lock);
642 return btrfs_grab_root(root);
643 }
644
645 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
646 u64 root_objectid)
647 {
648 struct btrfs_key key;
649
650 key.objectid = root_objectid;
651 key.type = BTRFS_ROOT_ITEM_KEY;
652 key.offset = (u64)-1;
653
654 return btrfs_get_fs_root(fs_info, &key, false);
655 }
656
657 static noinline_for_stack
658 int find_inline_backref(struct extent_buffer *leaf, int slot,
659 unsigned long *ptr, unsigned long *end)
660 {
661 struct btrfs_key key;
662 struct btrfs_extent_item *ei;
663 struct btrfs_tree_block_info *bi;
664 u32 item_size;
665
666 btrfs_item_key_to_cpu(leaf, &key, slot);
667
668 item_size = btrfs_item_size_nr(leaf, slot);
669 if (item_size < sizeof(*ei)) {
670 btrfs_print_v0_err(leaf->fs_info);
671 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
672 return 1;
673 }
674 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
675 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
676 BTRFS_EXTENT_FLAG_TREE_BLOCK));
677
678 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
679 item_size <= sizeof(*ei) + sizeof(*bi)) {
680 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
681 return 1;
682 }
683 if (key.type == BTRFS_METADATA_ITEM_KEY &&
684 item_size <= sizeof(*ei)) {
685 WARN_ON(item_size < sizeof(*ei));
686 return 1;
687 }
688
689 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
690 bi = (struct btrfs_tree_block_info *)(ei + 1);
691 *ptr = (unsigned long)(bi + 1);
692 } else {
693 *ptr = (unsigned long)(ei + 1);
694 }
695 *end = (unsigned long)ei + item_size;
696 return 0;
697 }
698
699 /*
700 * build backref tree for a given tree block. root of the backref tree
701 * corresponds the tree block, leaves of the backref tree correspond
702 * roots of b-trees that reference the tree block.
703 *
704 * the basic idea of this function is check backrefs of a given block
705 * to find upper level blocks that reference the block, and then check
706 * backrefs of these upper level blocks recursively. the recursion stop
707 * when tree root is reached or backrefs for the block is cached.
708 *
709 * NOTE: if we find backrefs for a block are cached, we know backrefs
710 * for all upper level blocks that directly/indirectly reference the
711 * block are also cached.
712 */
713 static noinline_for_stack
714 struct backref_node *build_backref_tree(struct reloc_control *rc,
715 struct btrfs_key *node_key,
716 int level, u64 bytenr)
717 {
718 struct backref_cache *cache = &rc->backref_cache;
719 struct btrfs_path *path1; /* For searching extent root */
720 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
721 struct extent_buffer *eb;
722 struct btrfs_root *root;
723 struct backref_node *cur;
724 struct backref_node *upper;
725 struct backref_node *lower;
726 struct backref_node *node = NULL;
727 struct backref_node *exist = NULL;
728 struct backref_edge *edge;
729 struct rb_node *rb_node;
730 struct btrfs_key key;
731 unsigned long end;
732 unsigned long ptr;
733 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
734 LIST_HEAD(useless);
735 int cowonly;
736 int ret;
737 int err = 0;
738 bool need_check = true;
739
740 path1 = btrfs_alloc_path();
741 path2 = btrfs_alloc_path();
742 if (!path1 || !path2) {
743 err = -ENOMEM;
744 goto out;
745 }
746 path1->reada = READA_FORWARD;
747 path2->reada = READA_FORWARD;
748
749 node = alloc_backref_node(cache);
750 if (!node) {
751 err = -ENOMEM;
752 goto out;
753 }
754
755 node->bytenr = bytenr;
756 node->level = level;
757 node->lowest = 1;
758 cur = node;
759 again:
760 end = 0;
761 ptr = 0;
762 key.objectid = cur->bytenr;
763 key.type = BTRFS_METADATA_ITEM_KEY;
764 key.offset = (u64)-1;
765
766 path1->search_commit_root = 1;
767 path1->skip_locking = 1;
768 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
769 0, 0);
770 if (ret < 0) {
771 err = ret;
772 goto out;
773 }
774 ASSERT(ret);
775 ASSERT(path1->slots[0]);
776
777 path1->slots[0]--;
778
779 WARN_ON(cur->checked);
780 if (!list_empty(&cur->upper)) {
781 /*
782 * the backref was added previously when processing
783 * backref of type BTRFS_TREE_BLOCK_REF_KEY
784 */
785 ASSERT(list_is_singular(&cur->upper));
786 edge = list_entry(cur->upper.next, struct backref_edge,
787 list[LOWER]);
788 ASSERT(list_empty(&edge->list[UPPER]));
789 exist = edge->node[UPPER];
790 /*
791 * add the upper level block to pending list if we need
792 * check its backrefs
793 */
794 if (!exist->checked)
795 list_add_tail(&edge->list[UPPER], &list);
796 } else {
797 exist = NULL;
798 }
799
800 while (1) {
801 cond_resched();
802 eb = path1->nodes[0];
803
804 if (ptr >= end) {
805 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
806 ret = btrfs_next_leaf(rc->extent_root, path1);
807 if (ret < 0) {
808 err = ret;
809 goto out;
810 }
811 if (ret > 0)
812 break;
813 eb = path1->nodes[0];
814 }
815
816 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
817 if (key.objectid != cur->bytenr) {
818 WARN_ON(exist);
819 break;
820 }
821
822 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
823 key.type == BTRFS_METADATA_ITEM_KEY) {
824 ret = find_inline_backref(eb, path1->slots[0],
825 &ptr, &end);
826 if (ret)
827 goto next;
828 }
829 }
830
831 if (ptr < end) {
832 /* update key for inline back ref */
833 struct btrfs_extent_inline_ref *iref;
834 int type;
835 iref = (struct btrfs_extent_inline_ref *)ptr;
836 type = btrfs_get_extent_inline_ref_type(eb, iref,
837 BTRFS_REF_TYPE_BLOCK);
838 if (type == BTRFS_REF_TYPE_INVALID) {
839 err = -EUCLEAN;
840 goto out;
841 }
842 key.type = type;
843 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
844
845 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
846 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
847 }
848
849 /*
850 * Parent node found and matches current inline ref, no need to
851 * rebuild this node for this inline ref.
852 */
853 if (exist &&
854 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
855 exist->owner == key.offset) ||
856 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
857 exist->bytenr == key.offset))) {
858 exist = NULL;
859 goto next;
860 }
861
862 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
863 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
864 if (key.objectid == key.offset) {
865 /*
866 * Only root blocks of reloc trees use backref
867 * pointing to itself.
868 */
869 root = find_reloc_root(rc, cur->bytenr);
870 ASSERT(root);
871 cur->root = root;
872 break;
873 }
874
875 edge = alloc_backref_edge(cache);
876 if (!edge) {
877 err = -ENOMEM;
878 goto out;
879 }
880 rb_node = tree_search(&cache->rb_root, key.offset);
881 if (!rb_node) {
882 upper = alloc_backref_node(cache);
883 if (!upper) {
884 free_backref_edge(cache, edge);
885 err = -ENOMEM;
886 goto out;
887 }
888 upper->bytenr = key.offset;
889 upper->level = cur->level + 1;
890 /*
891 * backrefs for the upper level block isn't
892 * cached, add the block to pending list
893 */
894 list_add_tail(&edge->list[UPPER], &list);
895 } else {
896 upper = rb_entry(rb_node, struct backref_node,
897 rb_node);
898 ASSERT(upper->checked);
899 INIT_LIST_HEAD(&edge->list[UPPER]);
900 }
901 list_add_tail(&edge->list[LOWER], &cur->upper);
902 edge->node[LOWER] = cur;
903 edge->node[UPPER] = upper;
904
905 goto next;
906 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
907 err = -EINVAL;
908 btrfs_print_v0_err(rc->extent_root->fs_info);
909 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
910 NULL);
911 goto out;
912 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
913 goto next;
914 }
915
916 /*
917 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
918 * means the root objectid. We need to search the tree to get
919 * its parent bytenr.
920 */
921 root = read_fs_root(rc->extent_root->fs_info, key.offset);
922 if (IS_ERR(root)) {
923 err = PTR_ERR(root);
924 goto out;
925 }
926
927 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
928 cur->cowonly = 1;
929
930 if (btrfs_root_level(&root->root_item) == cur->level) {
931 /* tree root */
932 ASSERT(btrfs_root_bytenr(&root->root_item) ==
933 cur->bytenr);
934 if (should_ignore_root(root)) {
935 btrfs_put_root(root);
936 list_add(&cur->list, &useless);
937 } else {
938 cur->root = root;
939 }
940 break;
941 }
942
943 level = cur->level + 1;
944
945 /* Search the tree to find parent blocks referring the block. */
946 path2->search_commit_root = 1;
947 path2->skip_locking = 1;
948 path2->lowest_level = level;
949 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
950 path2->lowest_level = 0;
951 if (ret < 0) {
952 btrfs_put_root(root);
953 err = ret;
954 goto out;
955 }
956 if (ret > 0 && path2->slots[level] > 0)
957 path2->slots[level]--;
958
959 eb = path2->nodes[level];
960 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
961 cur->bytenr) {
962 btrfs_err(root->fs_info,
963 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
964 cur->bytenr, level - 1,
965 root->root_key.objectid,
966 node_key->objectid, node_key->type,
967 node_key->offset);
968 btrfs_put_root(root);
969 err = -ENOENT;
970 goto out;
971 }
972 lower = cur;
973 need_check = true;
974
975 /* Add all nodes and edges in the path */
976 for (; level < BTRFS_MAX_LEVEL; level++) {
977 if (!path2->nodes[level]) {
978 ASSERT(btrfs_root_bytenr(&root->root_item) ==
979 lower->bytenr);
980 if (should_ignore_root(root)) {
981 btrfs_put_root(root);
982 list_add(&lower->list, &useless);
983 } else {
984 lower->root = root;
985 }
986 break;
987 }
988
989 edge = alloc_backref_edge(cache);
990 if (!edge) {
991 btrfs_put_root(root);
992 err = -ENOMEM;
993 goto out;
994 }
995
996 eb = path2->nodes[level];
997 rb_node = tree_search(&cache->rb_root, eb->start);
998 if (!rb_node) {
999 upper = alloc_backref_node(cache);
1000 if (!upper) {
1001 btrfs_put_root(root);
1002 free_backref_edge(cache, edge);
1003 err = -ENOMEM;
1004 goto out;
1005 }
1006 upper->bytenr = eb->start;
1007 upper->owner = btrfs_header_owner(eb);
1008 upper->level = lower->level + 1;
1009 if (!test_bit(BTRFS_ROOT_REF_COWS,
1010 &root->state))
1011 upper->cowonly = 1;
1012
1013 /*
1014 * if we know the block isn't shared
1015 * we can void checking its backrefs.
1016 */
1017 if (btrfs_block_can_be_shared(root, eb))
1018 upper->checked = 0;
1019 else
1020 upper->checked = 1;
1021
1022 /*
1023 * add the block to pending list if we
1024 * need check its backrefs, we only do this once
1025 * while walking up a tree as we will catch
1026 * anything else later on.
1027 */
1028 if (!upper->checked && need_check) {
1029 need_check = false;
1030 list_add_tail(&edge->list[UPPER],
1031 &list);
1032 } else {
1033 if (upper->checked)
1034 need_check = true;
1035 INIT_LIST_HEAD(&edge->list[UPPER]);
1036 }
1037 } else {
1038 upper = rb_entry(rb_node, struct backref_node,
1039 rb_node);
1040 ASSERT(upper->checked);
1041 INIT_LIST_HEAD(&edge->list[UPPER]);
1042 if (!upper->owner)
1043 upper->owner = btrfs_header_owner(eb);
1044 }
1045 list_add_tail(&edge->list[LOWER], &lower->upper);
1046 edge->node[LOWER] = lower;
1047 edge->node[UPPER] = upper;
1048
1049 if (rb_node) {
1050 btrfs_put_root(root);
1051 break;
1052 }
1053 lower = upper;
1054 upper = NULL;
1055 }
1056 btrfs_release_path(path2);
1057 next:
1058 if (ptr < end) {
1059 ptr += btrfs_extent_inline_ref_size(key.type);
1060 if (ptr >= end) {
1061 WARN_ON(ptr > end);
1062 ptr = 0;
1063 end = 0;
1064 }
1065 }
1066 if (ptr >= end)
1067 path1->slots[0]++;
1068 }
1069 btrfs_release_path(path1);
1070
1071 cur->checked = 1;
1072 WARN_ON(exist);
1073
1074 /* the pending list isn't empty, take the first block to process */
1075 if (!list_empty(&list)) {
1076 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1077 list_del_init(&edge->list[UPPER]);
1078 cur = edge->node[UPPER];
1079 goto again;
1080 }
1081
1082 /*
1083 * everything goes well, connect backref nodes and insert backref nodes
1084 * into the cache.
1085 */
1086 ASSERT(node->checked);
1087 cowonly = node->cowonly;
1088 if (!cowonly) {
1089 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1090 &node->rb_node);
1091 if (rb_node)
1092 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1093 list_add_tail(&node->lower, &cache->leaves);
1094 }
1095
1096 list_for_each_entry(edge, &node->upper, list[LOWER])
1097 list_add_tail(&edge->list[UPPER], &list);
1098
1099 while (!list_empty(&list)) {
1100 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1101 list_del_init(&edge->list[UPPER]);
1102 upper = edge->node[UPPER];
1103 if (upper->detached) {
1104 list_del(&edge->list[LOWER]);
1105 lower = edge->node[LOWER];
1106 free_backref_edge(cache, edge);
1107 if (list_empty(&lower->upper))
1108 list_add(&lower->list, &useless);
1109 continue;
1110 }
1111
1112 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1113 if (upper->lowest) {
1114 list_del_init(&upper->lower);
1115 upper->lowest = 0;
1116 }
1117
1118 list_add_tail(&edge->list[UPPER], &upper->lower);
1119 continue;
1120 }
1121
1122 if (!upper->checked) {
1123 /*
1124 * Still want to blow up for developers since this is a
1125 * logic bug.
1126 */
1127 ASSERT(0);
1128 err = -EINVAL;
1129 goto out;
1130 }
1131 if (cowonly != upper->cowonly) {
1132 ASSERT(0);
1133 err = -EINVAL;
1134 goto out;
1135 }
1136
1137 if (!cowonly) {
1138 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1139 &upper->rb_node);
1140 if (rb_node)
1141 backref_tree_panic(rb_node, -EEXIST,
1142 upper->bytenr);
1143 }
1144
1145 list_add_tail(&edge->list[UPPER], &upper->lower);
1146
1147 list_for_each_entry(edge, &upper->upper, list[LOWER])
1148 list_add_tail(&edge->list[UPPER], &list);
1149 }
1150 /*
1151 * process useless backref nodes. backref nodes for tree leaves
1152 * are deleted from the cache. backref nodes for upper level
1153 * tree blocks are left in the cache to avoid unnecessary backref
1154 * lookup.
1155 */
1156 while (!list_empty(&useless)) {
1157 upper = list_entry(useless.next, struct backref_node, list);
1158 list_del_init(&upper->list);
1159 ASSERT(list_empty(&upper->upper));
1160 if (upper == node)
1161 node = NULL;
1162 if (upper->lowest) {
1163 list_del_init(&upper->lower);
1164 upper->lowest = 0;
1165 }
1166 while (!list_empty(&upper->lower)) {
1167 edge = list_entry(upper->lower.next,
1168 struct backref_edge, list[UPPER]);
1169 list_del(&edge->list[UPPER]);
1170 list_del(&edge->list[LOWER]);
1171 lower = edge->node[LOWER];
1172 free_backref_edge(cache, edge);
1173
1174 if (list_empty(&lower->upper))
1175 list_add(&lower->list, &useless);
1176 }
1177 __mark_block_processed(rc, upper);
1178 if (upper->level > 0) {
1179 list_add(&upper->list, &cache->detached);
1180 upper->detached = 1;
1181 } else {
1182 rb_erase(&upper->rb_node, &cache->rb_root);
1183 free_backref_node(cache, upper);
1184 }
1185 }
1186 out:
1187 btrfs_free_path(path1);
1188 btrfs_free_path(path2);
1189 if (err) {
1190 while (!list_empty(&useless)) {
1191 lower = list_entry(useless.next,
1192 struct backref_node, list);
1193 list_del_init(&lower->list);
1194 }
1195 while (!list_empty(&list)) {
1196 edge = list_first_entry(&list, struct backref_edge,
1197 list[UPPER]);
1198 list_del(&edge->list[UPPER]);
1199 list_del(&edge->list[LOWER]);
1200 lower = edge->node[LOWER];
1201 upper = edge->node[UPPER];
1202 free_backref_edge(cache, edge);
1203
1204 /*
1205 * Lower is no longer linked to any upper backref nodes
1206 * and isn't in the cache, we can free it ourselves.
1207 */
1208 if (list_empty(&lower->upper) &&
1209 RB_EMPTY_NODE(&lower->rb_node))
1210 list_add(&lower->list, &useless);
1211
1212 if (!RB_EMPTY_NODE(&upper->rb_node))
1213 continue;
1214
1215 /* Add this guy's upper edges to the list to process */
1216 list_for_each_entry(edge, &upper->upper, list[LOWER])
1217 list_add_tail(&edge->list[UPPER], &list);
1218 if (list_empty(&upper->upper))
1219 list_add(&upper->list, &useless);
1220 }
1221
1222 while (!list_empty(&useless)) {
1223 lower = list_entry(useless.next,
1224 struct backref_node, list);
1225 list_del_init(&lower->list);
1226 if (lower == node)
1227 node = NULL;
1228 free_backref_node(cache, lower);
1229 }
1230
1231 remove_backref_node(cache, node);
1232 return ERR_PTR(err);
1233 }
1234 ASSERT(!node || !node->detached);
1235 return node;
1236 }
1237
1238 /*
1239 * helper to add backref node for the newly created snapshot.
1240 * the backref node is created by cloning backref node that
1241 * corresponds to root of source tree
1242 */
1243 static int clone_backref_node(struct btrfs_trans_handle *trans,
1244 struct reloc_control *rc,
1245 struct btrfs_root *src,
1246 struct btrfs_root *dest)
1247 {
1248 struct btrfs_root *reloc_root = src->reloc_root;
1249 struct backref_cache *cache = &rc->backref_cache;
1250 struct backref_node *node = NULL;
1251 struct backref_node *new_node;
1252 struct backref_edge *edge;
1253 struct backref_edge *new_edge;
1254 struct rb_node *rb_node;
1255
1256 if (cache->last_trans > 0)
1257 update_backref_cache(trans, cache);
1258
1259 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1260 if (rb_node) {
1261 node = rb_entry(rb_node, struct backref_node, rb_node);
1262 if (node->detached)
1263 node = NULL;
1264 else
1265 BUG_ON(node->new_bytenr != reloc_root->node->start);
1266 }
1267
1268 if (!node) {
1269 rb_node = tree_search(&cache->rb_root,
1270 reloc_root->commit_root->start);
1271 if (rb_node) {
1272 node = rb_entry(rb_node, struct backref_node,
1273 rb_node);
1274 BUG_ON(node->detached);
1275 }
1276 }
1277
1278 if (!node)
1279 return 0;
1280
1281 new_node = alloc_backref_node(cache);
1282 if (!new_node)
1283 return -ENOMEM;
1284
1285 new_node->bytenr = dest->node->start;
1286 new_node->level = node->level;
1287 new_node->lowest = node->lowest;
1288 new_node->checked = 1;
1289 new_node->root = btrfs_grab_root(dest);
1290 ASSERT(new_node->root);
1291
1292 if (!node->lowest) {
1293 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1294 new_edge = alloc_backref_edge(cache);
1295 if (!new_edge)
1296 goto fail;
1297
1298 new_edge->node[UPPER] = new_node;
1299 new_edge->node[LOWER] = edge->node[LOWER];
1300 list_add_tail(&new_edge->list[UPPER],
1301 &new_node->lower);
1302 }
1303 } else {
1304 list_add_tail(&new_node->lower, &cache->leaves);
1305 }
1306
1307 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1308 &new_node->rb_node);
1309 if (rb_node)
1310 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1311
1312 if (!new_node->lowest) {
1313 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1314 list_add_tail(&new_edge->list[LOWER],
1315 &new_edge->node[LOWER]->upper);
1316 }
1317 }
1318 return 0;
1319 fail:
1320 while (!list_empty(&new_node->lower)) {
1321 new_edge = list_entry(new_node->lower.next,
1322 struct backref_edge, list[UPPER]);
1323 list_del(&new_edge->list[UPPER]);
1324 free_backref_edge(cache, new_edge);
1325 }
1326 free_backref_node(cache, new_node);
1327 return -ENOMEM;
1328 }
1329
1330 /*
1331 * helper to add 'address of tree root -> reloc tree' mapping
1332 */
1333 static int __must_check __add_reloc_root(struct btrfs_root *root)
1334 {
1335 struct btrfs_fs_info *fs_info = root->fs_info;
1336 struct rb_node *rb_node;
1337 struct mapping_node *node;
1338 struct reloc_control *rc = fs_info->reloc_ctl;
1339
1340 node = kmalloc(sizeof(*node), GFP_NOFS);
1341 if (!node)
1342 return -ENOMEM;
1343
1344 node->bytenr = root->node->start;
1345 node->data = root;
1346
1347 spin_lock(&rc->reloc_root_tree.lock);
1348 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1349 node->bytenr, &node->rb_node);
1350 spin_unlock(&rc->reloc_root_tree.lock);
1351 if (rb_node) {
1352 btrfs_panic(fs_info, -EEXIST,
1353 "Duplicate root found for start=%llu while inserting into relocation tree",
1354 node->bytenr);
1355 }
1356
1357 list_add_tail(&root->root_list, &rc->reloc_roots);
1358 return 0;
1359 }
1360
1361 /*
1362 * helper to delete the 'address of tree root -> reloc tree'
1363 * mapping
1364 */
1365 static void __del_reloc_root(struct btrfs_root *root)
1366 {
1367 struct btrfs_fs_info *fs_info = root->fs_info;
1368 struct rb_node *rb_node;
1369 struct mapping_node *node = NULL;
1370 struct reloc_control *rc = fs_info->reloc_ctl;
1371
1372 if (rc && root->node) {
1373 spin_lock(&rc->reloc_root_tree.lock);
1374 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1375 root->node->start);
1376 if (rb_node) {
1377 node = rb_entry(rb_node, struct mapping_node, rb_node);
1378 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1379 }
1380 spin_unlock(&rc->reloc_root_tree.lock);
1381 if (!node)
1382 return;
1383 BUG_ON((struct btrfs_root *)node->data != root);
1384 }
1385
1386 spin_lock(&fs_info->trans_lock);
1387 list_del_init(&root->root_list);
1388 spin_unlock(&fs_info->trans_lock);
1389 kfree(node);
1390 }
1391
1392 /*
1393 * helper to update the 'address of tree root -> reloc tree'
1394 * mapping
1395 */
1396 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1397 {
1398 struct btrfs_fs_info *fs_info = root->fs_info;
1399 struct rb_node *rb_node;
1400 struct mapping_node *node = NULL;
1401 struct reloc_control *rc = fs_info->reloc_ctl;
1402
1403 spin_lock(&rc->reloc_root_tree.lock);
1404 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1405 root->node->start);
1406 if (rb_node) {
1407 node = rb_entry(rb_node, struct mapping_node, rb_node);
1408 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1409 }
1410 spin_unlock(&rc->reloc_root_tree.lock);
1411
1412 if (!node)
1413 return 0;
1414 BUG_ON((struct btrfs_root *)node->data != root);
1415
1416 spin_lock(&rc->reloc_root_tree.lock);
1417 node->bytenr = new_bytenr;
1418 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1419 node->bytenr, &node->rb_node);
1420 spin_unlock(&rc->reloc_root_tree.lock);
1421 if (rb_node)
1422 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1423 return 0;
1424 }
1425
1426 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1427 struct btrfs_root *root, u64 objectid)
1428 {
1429 struct btrfs_fs_info *fs_info = root->fs_info;
1430 struct btrfs_root *reloc_root;
1431 struct extent_buffer *eb;
1432 struct btrfs_root_item *root_item;
1433 struct btrfs_key root_key;
1434 int ret;
1435
1436 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1437 BUG_ON(!root_item);
1438
1439 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1440 root_key.type = BTRFS_ROOT_ITEM_KEY;
1441 root_key.offset = objectid;
1442
1443 if (root->root_key.objectid == objectid) {
1444 u64 commit_root_gen;
1445
1446 /* called by btrfs_init_reloc_root */
1447 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1448 BTRFS_TREE_RELOC_OBJECTID);
1449 BUG_ON(ret);
1450 /*
1451 * Set the last_snapshot field to the generation of the commit
1452 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1453 * correctly (returns true) when the relocation root is created
1454 * either inside the critical section of a transaction commit
1455 * (through transaction.c:qgroup_account_snapshot()) and when
1456 * it's created before the transaction commit is started.
1457 */
1458 commit_root_gen = btrfs_header_generation(root->commit_root);
1459 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1460 } else {
1461 /*
1462 * called by btrfs_reloc_post_snapshot_hook.
1463 * the source tree is a reloc tree, all tree blocks
1464 * modified after it was created have RELOC flag
1465 * set in their headers. so it's OK to not update
1466 * the 'last_snapshot'.
1467 */
1468 ret = btrfs_copy_root(trans, root, root->node, &eb,
1469 BTRFS_TREE_RELOC_OBJECTID);
1470 BUG_ON(ret);
1471 }
1472
1473 memcpy(root_item, &root->root_item, sizeof(*root_item));
1474 btrfs_set_root_bytenr(root_item, eb->start);
1475 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1476 btrfs_set_root_generation(root_item, trans->transid);
1477
1478 if (root->root_key.objectid == objectid) {
1479 btrfs_set_root_refs(root_item, 0);
1480 memset(&root_item->drop_progress, 0,
1481 sizeof(struct btrfs_disk_key));
1482 root_item->drop_level = 0;
1483 }
1484
1485 btrfs_tree_unlock(eb);
1486 free_extent_buffer(eb);
1487
1488 ret = btrfs_insert_root(trans, fs_info->tree_root,
1489 &root_key, root_item);
1490 BUG_ON(ret);
1491 kfree(root_item);
1492
1493 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
1494 BUG_ON(IS_ERR(reloc_root));
1495 set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
1496 reloc_root->last_trans = trans->transid;
1497 return reloc_root;
1498 }
1499
1500 /*
1501 * create reloc tree for a given fs tree. reloc tree is just a
1502 * snapshot of the fs tree with special root objectid.
1503 */
1504 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1505 struct btrfs_root *root)
1506 {
1507 struct btrfs_fs_info *fs_info = root->fs_info;
1508 struct btrfs_root *reloc_root;
1509 struct reloc_control *rc = fs_info->reloc_ctl;
1510 struct btrfs_block_rsv *rsv;
1511 int clear_rsv = 0;
1512 int ret;
1513
1514 /*
1515 * The subvolume has reloc tree but the swap is finished, no need to
1516 * create/update the dead reloc tree
1517 */
1518 if (reloc_root_is_dead(root))
1519 return 0;
1520
1521 if (root->reloc_root) {
1522 reloc_root = root->reloc_root;
1523 reloc_root->last_trans = trans->transid;
1524 return 0;
1525 }
1526
1527 if (!rc || !rc->create_reloc_tree ||
1528 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1529 return 0;
1530
1531 if (!trans->reloc_reserved) {
1532 rsv = trans->block_rsv;
1533 trans->block_rsv = rc->block_rsv;
1534 clear_rsv = 1;
1535 }
1536 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1537 if (clear_rsv)
1538 trans->block_rsv = rsv;
1539
1540 ret = __add_reloc_root(reloc_root);
1541 BUG_ON(ret < 0);
1542 root->reloc_root = reloc_root;
1543 return 0;
1544 }
1545
1546 /*
1547 * update root item of reloc tree
1548 */
1549 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1550 struct btrfs_root *root)
1551 {
1552 struct btrfs_fs_info *fs_info = root->fs_info;
1553 struct btrfs_root *reloc_root;
1554 struct btrfs_root_item *root_item;
1555 int ret;
1556
1557 if (!have_reloc_root(root))
1558 goto out;
1559
1560 reloc_root = root->reloc_root;
1561 root_item = &reloc_root->root_item;
1562
1563 /* root->reloc_root will stay until current relocation finished */
1564 if (fs_info->reloc_ctl->merge_reloc_tree &&
1565 btrfs_root_refs(root_item) == 0) {
1566 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1567 /*
1568 * Mark the tree as dead before we change reloc_root so
1569 * have_reloc_root will not touch it from now on.
1570 */
1571 smp_wmb();
1572 __del_reloc_root(reloc_root);
1573 }
1574
1575 if (reloc_root->commit_root != reloc_root->node) {
1576 btrfs_set_root_node(root_item, reloc_root->node);
1577 free_extent_buffer(reloc_root->commit_root);
1578 reloc_root->commit_root = btrfs_root_node(reloc_root);
1579 }
1580
1581 ret = btrfs_update_root(trans, fs_info->tree_root,
1582 &reloc_root->root_key, root_item);
1583 BUG_ON(ret);
1584
1585 out:
1586 return 0;
1587 }
1588
1589 /*
1590 * helper to find first cached inode with inode number >= objectid
1591 * in a subvolume
1592 */
1593 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1594 {
1595 struct rb_node *node;
1596 struct rb_node *prev;
1597 struct btrfs_inode *entry;
1598 struct inode *inode;
1599
1600 spin_lock(&root->inode_lock);
1601 again:
1602 node = root->inode_tree.rb_node;
1603 prev = NULL;
1604 while (node) {
1605 prev = node;
1606 entry = rb_entry(node, struct btrfs_inode, rb_node);
1607
1608 if (objectid < btrfs_ino(entry))
1609 node = node->rb_left;
1610 else if (objectid > btrfs_ino(entry))
1611 node = node->rb_right;
1612 else
1613 break;
1614 }
1615 if (!node) {
1616 while (prev) {
1617 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1618 if (objectid <= btrfs_ino(entry)) {
1619 node = prev;
1620 break;
1621 }
1622 prev = rb_next(prev);
1623 }
1624 }
1625 while (node) {
1626 entry = rb_entry(node, struct btrfs_inode, rb_node);
1627 inode = igrab(&entry->vfs_inode);
1628 if (inode) {
1629 spin_unlock(&root->inode_lock);
1630 return inode;
1631 }
1632
1633 objectid = btrfs_ino(entry) + 1;
1634 if (cond_resched_lock(&root->inode_lock))
1635 goto again;
1636
1637 node = rb_next(node);
1638 }
1639 spin_unlock(&root->inode_lock);
1640 return NULL;
1641 }
1642
1643 static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
1644 {
1645 if (bytenr >= block_group->start &&
1646 bytenr < block_group->start + block_group->length)
1647 return 1;
1648 return 0;
1649 }
1650
1651 /*
1652 * get new location of data
1653 */
1654 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1655 u64 bytenr, u64 num_bytes)
1656 {
1657 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1658 struct btrfs_path *path;
1659 struct btrfs_file_extent_item *fi;
1660 struct extent_buffer *leaf;
1661 int ret;
1662
1663 path = btrfs_alloc_path();
1664 if (!path)
1665 return -ENOMEM;
1666
1667 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1668 ret = btrfs_lookup_file_extent(NULL, root, path,
1669 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1670 if (ret < 0)
1671 goto out;
1672 if (ret > 0) {
1673 ret = -ENOENT;
1674 goto out;
1675 }
1676
1677 leaf = path->nodes[0];
1678 fi = btrfs_item_ptr(leaf, path->slots[0],
1679 struct btrfs_file_extent_item);
1680
1681 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1682 btrfs_file_extent_compression(leaf, fi) ||
1683 btrfs_file_extent_encryption(leaf, fi) ||
1684 btrfs_file_extent_other_encoding(leaf, fi));
1685
1686 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1687 ret = -EINVAL;
1688 goto out;
1689 }
1690
1691 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1692 ret = 0;
1693 out:
1694 btrfs_free_path(path);
1695 return ret;
1696 }
1697
1698 /*
1699 * update file extent items in the tree leaf to point to
1700 * the new locations.
1701 */
1702 static noinline_for_stack
1703 int replace_file_extents(struct btrfs_trans_handle *trans,
1704 struct reloc_control *rc,
1705 struct btrfs_root *root,
1706 struct extent_buffer *leaf)
1707 {
1708 struct btrfs_fs_info *fs_info = root->fs_info;
1709 struct btrfs_key key;
1710 struct btrfs_file_extent_item *fi;
1711 struct inode *inode = NULL;
1712 u64 parent;
1713 u64 bytenr;
1714 u64 new_bytenr = 0;
1715 u64 num_bytes;
1716 u64 end;
1717 u32 nritems;
1718 u32 i;
1719 int ret = 0;
1720 int first = 1;
1721 int dirty = 0;
1722
1723 if (rc->stage != UPDATE_DATA_PTRS)
1724 return 0;
1725
1726 /* reloc trees always use full backref */
1727 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1728 parent = leaf->start;
1729 else
1730 parent = 0;
1731
1732 nritems = btrfs_header_nritems(leaf);
1733 for (i = 0; i < nritems; i++) {
1734 struct btrfs_ref ref = { 0 };
1735
1736 cond_resched();
1737 btrfs_item_key_to_cpu(leaf, &key, i);
1738 if (key.type != BTRFS_EXTENT_DATA_KEY)
1739 continue;
1740 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1741 if (btrfs_file_extent_type(leaf, fi) ==
1742 BTRFS_FILE_EXTENT_INLINE)
1743 continue;
1744 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1745 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1746 if (bytenr == 0)
1747 continue;
1748 if (!in_block_group(bytenr, rc->block_group))
1749 continue;
1750
1751 /*
1752 * if we are modifying block in fs tree, wait for readpage
1753 * to complete and drop the extent cache
1754 */
1755 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1756 if (first) {
1757 inode = find_next_inode(root, key.objectid);
1758 first = 0;
1759 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1760 btrfs_add_delayed_iput(inode);
1761 inode = find_next_inode(root, key.objectid);
1762 }
1763 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1764 end = key.offset +
1765 btrfs_file_extent_num_bytes(leaf, fi);
1766 WARN_ON(!IS_ALIGNED(key.offset,
1767 fs_info->sectorsize));
1768 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1769 end--;
1770 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1771 key.offset, end);
1772 if (!ret)
1773 continue;
1774
1775 btrfs_drop_extent_cache(BTRFS_I(inode),
1776 key.offset, end, 1);
1777 unlock_extent(&BTRFS_I(inode)->io_tree,
1778 key.offset, end);
1779 }
1780 }
1781
1782 ret = get_new_location(rc->data_inode, &new_bytenr,
1783 bytenr, num_bytes);
1784 if (ret) {
1785 /*
1786 * Don't have to abort since we've not changed anything
1787 * in the file extent yet.
1788 */
1789 break;
1790 }
1791
1792 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1793 dirty = 1;
1794
1795 key.offset -= btrfs_file_extent_offset(leaf, fi);
1796 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1797 num_bytes, parent);
1798 ref.real_root = root->root_key.objectid;
1799 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1800 key.objectid, key.offset);
1801 ret = btrfs_inc_extent_ref(trans, &ref);
1802 if (ret) {
1803 btrfs_abort_transaction(trans, ret);
1804 break;
1805 }
1806
1807 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1808 num_bytes, parent);
1809 ref.real_root = root->root_key.objectid;
1810 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1811 key.objectid, key.offset);
1812 ret = btrfs_free_extent(trans, &ref);
1813 if (ret) {
1814 btrfs_abort_transaction(trans, ret);
1815 break;
1816 }
1817 }
1818 if (dirty)
1819 btrfs_mark_buffer_dirty(leaf);
1820 if (inode)
1821 btrfs_add_delayed_iput(inode);
1822 return ret;
1823 }
1824
1825 static noinline_for_stack
1826 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1827 struct btrfs_path *path, int level)
1828 {
1829 struct btrfs_disk_key key1;
1830 struct btrfs_disk_key key2;
1831 btrfs_node_key(eb, &key1, slot);
1832 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1833 return memcmp(&key1, &key2, sizeof(key1));
1834 }
1835
1836 /*
1837 * try to replace tree blocks in fs tree with the new blocks
1838 * in reloc tree. tree blocks haven't been modified since the
1839 * reloc tree was create can be replaced.
1840 *
1841 * if a block was replaced, level of the block + 1 is returned.
1842 * if no block got replaced, 0 is returned. if there are other
1843 * errors, a negative error number is returned.
1844 */
1845 static noinline_for_stack
1846 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1847 struct btrfs_root *dest, struct btrfs_root *src,
1848 struct btrfs_path *path, struct btrfs_key *next_key,
1849 int lowest_level, int max_level)
1850 {
1851 struct btrfs_fs_info *fs_info = dest->fs_info;
1852 struct extent_buffer *eb;
1853 struct extent_buffer *parent;
1854 struct btrfs_ref ref = { 0 };
1855 struct btrfs_key key;
1856 u64 old_bytenr;
1857 u64 new_bytenr;
1858 u64 old_ptr_gen;
1859 u64 new_ptr_gen;
1860 u64 last_snapshot;
1861 u32 blocksize;
1862 int cow = 0;
1863 int level;
1864 int ret;
1865 int slot;
1866
1867 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1868 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1869
1870 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1871 again:
1872 slot = path->slots[lowest_level];
1873 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1874
1875 eb = btrfs_lock_root_node(dest);
1876 btrfs_set_lock_blocking_write(eb);
1877 level = btrfs_header_level(eb);
1878
1879 if (level < lowest_level) {
1880 btrfs_tree_unlock(eb);
1881 free_extent_buffer(eb);
1882 return 0;
1883 }
1884
1885 if (cow) {
1886 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1887 BUG_ON(ret);
1888 }
1889 btrfs_set_lock_blocking_write(eb);
1890
1891 if (next_key) {
1892 next_key->objectid = (u64)-1;
1893 next_key->type = (u8)-1;
1894 next_key->offset = (u64)-1;
1895 }
1896
1897 parent = eb;
1898 while (1) {
1899 struct btrfs_key first_key;
1900
1901 level = btrfs_header_level(parent);
1902 BUG_ON(level < lowest_level);
1903
1904 ret = btrfs_bin_search(parent, &key, level, &slot);
1905 if (ret < 0)
1906 break;
1907 if (ret && slot > 0)
1908 slot--;
1909
1910 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1911 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1912
1913 old_bytenr = btrfs_node_blockptr(parent, slot);
1914 blocksize = fs_info->nodesize;
1915 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1916 btrfs_node_key_to_cpu(parent, &first_key, slot);
1917
1918 if (level <= max_level) {
1919 eb = path->nodes[level];
1920 new_bytenr = btrfs_node_blockptr(eb,
1921 path->slots[level]);
1922 new_ptr_gen = btrfs_node_ptr_generation(eb,
1923 path->slots[level]);
1924 } else {
1925 new_bytenr = 0;
1926 new_ptr_gen = 0;
1927 }
1928
1929 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1930 ret = level;
1931 break;
1932 }
1933
1934 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1935 memcmp_node_keys(parent, slot, path, level)) {
1936 if (level <= lowest_level) {
1937 ret = 0;
1938 break;
1939 }
1940
1941 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1942 level - 1, &first_key);
1943 if (IS_ERR(eb)) {
1944 ret = PTR_ERR(eb);
1945 break;
1946 } else if (!extent_buffer_uptodate(eb)) {
1947 ret = -EIO;
1948 free_extent_buffer(eb);
1949 break;
1950 }
1951 btrfs_tree_lock(eb);
1952 if (cow) {
1953 ret = btrfs_cow_block(trans, dest, eb, parent,
1954 slot, &eb);
1955 BUG_ON(ret);
1956 }
1957 btrfs_set_lock_blocking_write(eb);
1958
1959 btrfs_tree_unlock(parent);
1960 free_extent_buffer(parent);
1961
1962 parent = eb;
1963 continue;
1964 }
1965
1966 if (!cow) {
1967 btrfs_tree_unlock(parent);
1968 free_extent_buffer(parent);
1969 cow = 1;
1970 goto again;
1971 }
1972
1973 btrfs_node_key_to_cpu(path->nodes[level], &key,
1974 path->slots[level]);
1975 btrfs_release_path(path);
1976
1977 path->lowest_level = level;
1978 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1979 path->lowest_level = 0;
1980 BUG_ON(ret);
1981
1982 /*
1983 * Info qgroup to trace both subtrees.
1984 *
1985 * We must trace both trees.
1986 * 1) Tree reloc subtree
1987 * If not traced, we will leak data numbers
1988 * 2) Fs subtree
1989 * If not traced, we will double count old data
1990 *
1991 * We don't scan the subtree right now, but only record
1992 * the swapped tree blocks.
1993 * The real subtree rescan is delayed until we have new
1994 * CoW on the subtree root node before transaction commit.
1995 */
1996 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1997 rc->block_group, parent, slot,
1998 path->nodes[level], path->slots[level],
1999 last_snapshot);
2000 if (ret < 0)
2001 break;
2002 /*
2003 * swap blocks in fs tree and reloc tree.
2004 */
2005 btrfs_set_node_blockptr(parent, slot, new_bytenr);
2006 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
2007 btrfs_mark_buffer_dirty(parent);
2008
2009 btrfs_set_node_blockptr(path->nodes[level],
2010 path->slots[level], old_bytenr);
2011 btrfs_set_node_ptr_generation(path->nodes[level],
2012 path->slots[level], old_ptr_gen);
2013 btrfs_mark_buffer_dirty(path->nodes[level]);
2014
2015 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
2016 blocksize, path->nodes[level]->start);
2017 ref.skip_qgroup = true;
2018 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
2019 ret = btrfs_inc_extent_ref(trans, &ref);
2020 BUG_ON(ret);
2021 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
2022 blocksize, 0);
2023 ref.skip_qgroup = true;
2024 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
2025 ret = btrfs_inc_extent_ref(trans, &ref);
2026 BUG_ON(ret);
2027
2028 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
2029 blocksize, path->nodes[level]->start);
2030 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
2031 ref.skip_qgroup = true;
2032 ret = btrfs_free_extent(trans, &ref);
2033 BUG_ON(ret);
2034
2035 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
2036 blocksize, 0);
2037 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
2038 ref.skip_qgroup = true;
2039 ret = btrfs_free_extent(trans, &ref);
2040 BUG_ON(ret);
2041
2042 btrfs_unlock_up_safe(path, 0);
2043
2044 ret = level;
2045 break;
2046 }
2047 btrfs_tree_unlock(parent);
2048 free_extent_buffer(parent);
2049 return ret;
2050 }
2051
2052 /*
2053 * helper to find next relocated block in reloc tree
2054 */
2055 static noinline_for_stack
2056 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
2057 int *level)
2058 {
2059 struct extent_buffer *eb;
2060 int i;
2061 u64 last_snapshot;
2062 u32 nritems;
2063
2064 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2065
2066 for (i = 0; i < *level; i++) {
2067 free_extent_buffer(path->nodes[i]);
2068 path->nodes[i] = NULL;
2069 }
2070
2071 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
2072 eb = path->nodes[i];
2073 nritems = btrfs_header_nritems(eb);
2074 while (path->slots[i] + 1 < nritems) {
2075 path->slots[i]++;
2076 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
2077 last_snapshot)
2078 continue;
2079
2080 *level = i;
2081 return 0;
2082 }
2083 free_extent_buffer(path->nodes[i]);
2084 path->nodes[i] = NULL;
2085 }
2086 return 1;
2087 }
2088
2089 /*
2090 * walk down reloc tree to find relocated block of lowest level
2091 */
2092 static noinline_for_stack
2093 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
2094 int *level)
2095 {
2096 struct btrfs_fs_info *fs_info = root->fs_info;
2097 struct extent_buffer *eb = NULL;
2098 int i;
2099 u64 bytenr;
2100 u64 ptr_gen = 0;
2101 u64 last_snapshot;
2102 u32 nritems;
2103
2104 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2105
2106 for (i = *level; i > 0; i--) {
2107 struct btrfs_key first_key;
2108
2109 eb = path->nodes[i];
2110 nritems = btrfs_header_nritems(eb);
2111 while (path->slots[i] < nritems) {
2112 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2113 if (ptr_gen > last_snapshot)
2114 break;
2115 path->slots[i]++;
2116 }
2117 if (path->slots[i] >= nritems) {
2118 if (i == *level)
2119 break;
2120 *level = i + 1;
2121 return 0;
2122 }
2123 if (i == 1) {
2124 *level = i;
2125 return 0;
2126 }
2127
2128 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2129 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2130 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2131 &first_key);
2132 if (IS_ERR(eb)) {
2133 return PTR_ERR(eb);
2134 } else if (!extent_buffer_uptodate(eb)) {
2135 free_extent_buffer(eb);
2136 return -EIO;
2137 }
2138 BUG_ON(btrfs_header_level(eb) != i - 1);
2139 path->nodes[i - 1] = eb;
2140 path->slots[i - 1] = 0;
2141 }
2142 return 1;
2143 }
2144
2145 /*
2146 * invalidate extent cache for file extents whose key in range of
2147 * [min_key, max_key)
2148 */
2149 static int invalidate_extent_cache(struct btrfs_root *root,
2150 struct btrfs_key *min_key,
2151 struct btrfs_key *max_key)
2152 {
2153 struct btrfs_fs_info *fs_info = root->fs_info;
2154 struct inode *inode = NULL;
2155 u64 objectid;
2156 u64 start, end;
2157 u64 ino;
2158
2159 objectid = min_key->objectid;
2160 while (1) {
2161 cond_resched();
2162 iput(inode);
2163
2164 if (objectid > max_key->objectid)
2165 break;
2166
2167 inode = find_next_inode(root, objectid);
2168 if (!inode)
2169 break;
2170 ino = btrfs_ino(BTRFS_I(inode));
2171
2172 if (ino > max_key->objectid) {
2173 iput(inode);
2174 break;
2175 }
2176
2177 objectid = ino + 1;
2178 if (!S_ISREG(inode->i_mode))
2179 continue;
2180
2181 if (unlikely(min_key->objectid == ino)) {
2182 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2183 continue;
2184 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2185 start = 0;
2186 else {
2187 start = min_key->offset;
2188 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2189 }
2190 } else {
2191 start = 0;
2192 }
2193
2194 if (unlikely(max_key->objectid == ino)) {
2195 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2196 continue;
2197 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2198 end = (u64)-1;
2199 } else {
2200 if (max_key->offset == 0)
2201 continue;
2202 end = max_key->offset;
2203 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2204 end--;
2205 }
2206 } else {
2207 end = (u64)-1;
2208 }
2209
2210 /* the lock_extent waits for readpage to complete */
2211 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2212 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2213 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2214 }
2215 return 0;
2216 }
2217
2218 static int find_next_key(struct btrfs_path *path, int level,
2219 struct btrfs_key *key)
2220
2221 {
2222 while (level < BTRFS_MAX_LEVEL) {
2223 if (!path->nodes[level])
2224 break;
2225 if (path->slots[level] + 1 <
2226 btrfs_header_nritems(path->nodes[level])) {
2227 btrfs_node_key_to_cpu(path->nodes[level], key,
2228 path->slots[level] + 1);
2229 return 0;
2230 }
2231 level++;
2232 }
2233 return 1;
2234 }
2235
2236 /*
2237 * Insert current subvolume into reloc_control::dirty_subvol_roots
2238 */
2239 static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
2240 struct reloc_control *rc,
2241 struct btrfs_root *root)
2242 {
2243 struct btrfs_root *reloc_root = root->reloc_root;
2244 struct btrfs_root_item *reloc_root_item;
2245
2246 /* @root must be a subvolume tree root with a valid reloc tree */
2247 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
2248 ASSERT(reloc_root);
2249
2250 reloc_root_item = &reloc_root->root_item;
2251 memset(&reloc_root_item->drop_progress, 0,
2252 sizeof(reloc_root_item->drop_progress));
2253 reloc_root_item->drop_level = 0;
2254 btrfs_set_root_refs(reloc_root_item, 0);
2255 btrfs_update_reloc_root(trans, root);
2256
2257 if (list_empty(&root->reloc_dirty_list)) {
2258 btrfs_grab_root(root);
2259 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
2260 }
2261 }
2262
2263 static int clean_dirty_subvols(struct reloc_control *rc)
2264 {
2265 struct btrfs_root *root;
2266 struct btrfs_root *next;
2267 int ret = 0;
2268 int ret2;
2269
2270 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
2271 reloc_dirty_list) {
2272 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2273 /* Merged subvolume, cleanup its reloc root */
2274 struct btrfs_root *reloc_root = root->reloc_root;
2275
2276 list_del_init(&root->reloc_dirty_list);
2277 root->reloc_root = NULL;
2278 if (reloc_root) {
2279
2280 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
2281 if (ret2 < 0 && !ret)
2282 ret = ret2;
2283 }
2284 /*
2285 * Need barrier to ensure clear_bit() only happens after
2286 * root->reloc_root = NULL. Pairs with have_reloc_root.
2287 */
2288 smp_wmb();
2289 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2290 btrfs_put_root(root);
2291 } else {
2292 /* Orphan reloc tree, just clean it up */
2293 ret2 = btrfs_drop_snapshot(root, 0, 1);
2294 if (ret2 < 0 && !ret)
2295 ret = ret2;
2296 }
2297 }
2298 return ret;
2299 }
2300
2301 /*
2302 * merge the relocated tree blocks in reloc tree with corresponding
2303 * fs tree.
2304 */
2305 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2306 struct btrfs_root *root)
2307 {
2308 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2309 struct btrfs_key key;
2310 struct btrfs_key next_key;
2311 struct btrfs_trans_handle *trans = NULL;
2312 struct btrfs_root *reloc_root;
2313 struct btrfs_root_item *root_item;
2314 struct btrfs_path *path;
2315 struct extent_buffer *leaf;
2316 int level;
2317 int max_level;
2318 int replaced = 0;
2319 int ret;
2320 int err = 0;
2321 u32 min_reserved;
2322
2323 path = btrfs_alloc_path();
2324 if (!path)
2325 return -ENOMEM;
2326 path->reada = READA_FORWARD;
2327
2328 reloc_root = root->reloc_root;
2329 root_item = &reloc_root->root_item;
2330
2331 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2332 level = btrfs_root_level(root_item);
2333 atomic_inc(&reloc_root->node->refs);
2334 path->nodes[level] = reloc_root->node;
2335 path->slots[level] = 0;
2336 } else {
2337 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2338
2339 level = root_item->drop_level;
2340 BUG_ON(level == 0);
2341 path->lowest_level = level;
2342 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2343 path->lowest_level = 0;
2344 if (ret < 0) {
2345 btrfs_free_path(path);
2346 return ret;
2347 }
2348
2349 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2350 path->slots[level]);
2351 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2352
2353 btrfs_unlock_up_safe(path, 0);
2354 }
2355
2356 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2357 memset(&next_key, 0, sizeof(next_key));
2358
2359 while (1) {
2360 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2361 BTRFS_RESERVE_FLUSH_ALL);
2362 if (ret) {
2363 err = ret;
2364 goto out;
2365 }
2366 trans = btrfs_start_transaction(root, 0);
2367 if (IS_ERR(trans)) {
2368 err = PTR_ERR(trans);
2369 trans = NULL;
2370 goto out;
2371 }
2372 trans->block_rsv = rc->block_rsv;
2373
2374 replaced = 0;
2375 max_level = level;
2376
2377 ret = walk_down_reloc_tree(reloc_root, path, &level);
2378 if (ret < 0) {
2379 err = ret;
2380 goto out;
2381 }
2382 if (ret > 0)
2383 break;
2384
2385 if (!find_next_key(path, level, &key) &&
2386 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2387 ret = 0;
2388 } else {
2389 ret = replace_path(trans, rc, root, reloc_root, path,
2390 &next_key, level, max_level);
2391 }
2392 if (ret < 0) {
2393 err = ret;
2394 goto out;
2395 }
2396
2397 if (ret > 0) {
2398 level = ret;
2399 btrfs_node_key_to_cpu(path->nodes[level], &key,
2400 path->slots[level]);
2401 replaced = 1;
2402 }
2403
2404 ret = walk_up_reloc_tree(reloc_root, path, &level);
2405 if (ret > 0)
2406 break;
2407
2408 BUG_ON(level == 0);
2409 /*
2410 * save the merging progress in the drop_progress.
2411 * this is OK since root refs == 1 in this case.
2412 */
2413 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2414 path->slots[level]);
2415 root_item->drop_level = level;
2416
2417 btrfs_end_transaction_throttle(trans);
2418 trans = NULL;
2419
2420 btrfs_btree_balance_dirty(fs_info);
2421
2422 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2423 invalidate_extent_cache(root, &key, &next_key);
2424 }
2425
2426 /*
2427 * handle the case only one block in the fs tree need to be
2428 * relocated and the block is tree root.
2429 */
2430 leaf = btrfs_lock_root_node(root);
2431 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2432 btrfs_tree_unlock(leaf);
2433 free_extent_buffer(leaf);
2434 if (ret < 0)
2435 err = ret;
2436 out:
2437 btrfs_free_path(path);
2438
2439 if (err == 0)
2440 insert_dirty_subvol(trans, rc, root);
2441
2442 if (trans)
2443 btrfs_end_transaction_throttle(trans);
2444
2445 btrfs_btree_balance_dirty(fs_info);
2446
2447 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2448 invalidate_extent_cache(root, &key, &next_key);
2449
2450 return err;
2451 }
2452
2453 static noinline_for_stack
2454 int prepare_to_merge(struct reloc_control *rc, int err)
2455 {
2456 struct btrfs_root *root = rc->extent_root;
2457 struct btrfs_fs_info *fs_info = root->fs_info;
2458 struct btrfs_root *reloc_root;
2459 struct btrfs_trans_handle *trans;
2460 LIST_HEAD(reloc_roots);
2461 u64 num_bytes = 0;
2462 int ret;
2463
2464 mutex_lock(&fs_info->reloc_mutex);
2465 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2466 rc->merging_rsv_size += rc->nodes_relocated * 2;
2467 mutex_unlock(&fs_info->reloc_mutex);
2468
2469 again:
2470 if (!err) {
2471 num_bytes = rc->merging_rsv_size;
2472 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2473 BTRFS_RESERVE_FLUSH_ALL);
2474 if (ret)
2475 err = ret;
2476 }
2477
2478 trans = btrfs_join_transaction(rc->extent_root);
2479 if (IS_ERR(trans)) {
2480 if (!err)
2481 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2482 num_bytes, NULL);
2483 return PTR_ERR(trans);
2484 }
2485
2486 if (!err) {
2487 if (num_bytes != rc->merging_rsv_size) {
2488 btrfs_end_transaction(trans);
2489 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2490 num_bytes, NULL);
2491 goto again;
2492 }
2493 }
2494
2495 rc->merge_reloc_tree = 1;
2496
2497 while (!list_empty(&rc->reloc_roots)) {
2498 reloc_root = list_entry(rc->reloc_roots.next,
2499 struct btrfs_root, root_list);
2500 list_del_init(&reloc_root->root_list);
2501
2502 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2503 BUG_ON(IS_ERR(root));
2504 BUG_ON(root->reloc_root != reloc_root);
2505
2506 /*
2507 * set reference count to 1, so btrfs_recover_relocation
2508 * knows it should resumes merging
2509 */
2510 if (!err)
2511 btrfs_set_root_refs(&reloc_root->root_item, 1);
2512 btrfs_update_reloc_root(trans, root);
2513
2514 list_add(&reloc_root->root_list, &reloc_roots);
2515 btrfs_put_root(root);
2516 }
2517
2518 list_splice(&reloc_roots, &rc->reloc_roots);
2519
2520 if (!err)
2521 btrfs_commit_transaction(trans);
2522 else
2523 btrfs_end_transaction(trans);
2524 return err;
2525 }
2526
2527 static noinline_for_stack
2528 void free_reloc_roots(struct list_head *list)
2529 {
2530 struct btrfs_root *reloc_root;
2531
2532 while (!list_empty(list)) {
2533 reloc_root = list_entry(list->next, struct btrfs_root,
2534 root_list);
2535 __del_reloc_root(reloc_root);
2536 free_extent_buffer(reloc_root->node);
2537 free_extent_buffer(reloc_root->commit_root);
2538 reloc_root->node = NULL;
2539 reloc_root->commit_root = NULL;
2540 }
2541 }
2542
2543 static noinline_for_stack
2544 void merge_reloc_roots(struct reloc_control *rc)
2545 {
2546 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2547 struct btrfs_root *root;
2548 struct btrfs_root *reloc_root;
2549 LIST_HEAD(reloc_roots);
2550 int found = 0;
2551 int ret = 0;
2552 again:
2553 root = rc->extent_root;
2554
2555 /*
2556 * this serializes us with btrfs_record_root_in_transaction,
2557 * we have to make sure nobody is in the middle of
2558 * adding their roots to the list while we are
2559 * doing this splice
2560 */
2561 mutex_lock(&fs_info->reloc_mutex);
2562 list_splice_init(&rc->reloc_roots, &reloc_roots);
2563 mutex_unlock(&fs_info->reloc_mutex);
2564
2565 while (!list_empty(&reloc_roots)) {
2566 found = 1;
2567 reloc_root = list_entry(reloc_roots.next,
2568 struct btrfs_root, root_list);
2569
2570 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2571 root = read_fs_root(fs_info,
2572 reloc_root->root_key.offset);
2573 BUG_ON(IS_ERR(root));
2574 BUG_ON(root->reloc_root != reloc_root);
2575
2576 ret = merge_reloc_root(rc, root);
2577 btrfs_put_root(root);
2578 if (ret) {
2579 if (list_empty(&reloc_root->root_list))
2580 list_add_tail(&reloc_root->root_list,
2581 &reloc_roots);
2582 goto out;
2583 }
2584 } else {
2585 list_del_init(&reloc_root->root_list);
2586 /* Don't forget to queue this reloc root for cleanup */
2587 list_add_tail(&reloc_root->reloc_dirty_list,
2588 &rc->dirty_subvol_roots);
2589 }
2590 }
2591
2592 if (found) {
2593 found = 0;
2594 goto again;
2595 }
2596 out:
2597 if (ret) {
2598 btrfs_handle_fs_error(fs_info, ret, NULL);
2599 if (!list_empty(&reloc_roots))
2600 free_reloc_roots(&reloc_roots);
2601
2602 /* new reloc root may be added */
2603 mutex_lock(&fs_info->reloc_mutex);
2604 list_splice_init(&rc->reloc_roots, &reloc_roots);
2605 mutex_unlock(&fs_info->reloc_mutex);
2606 if (!list_empty(&reloc_roots))
2607 free_reloc_roots(&reloc_roots);
2608 }
2609
2610 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2611 }
2612
2613 static void free_block_list(struct rb_root *blocks)
2614 {
2615 struct tree_block *block;
2616 struct rb_node *rb_node;
2617 while ((rb_node = rb_first(blocks))) {
2618 block = rb_entry(rb_node, struct tree_block, rb_node);
2619 rb_erase(rb_node, blocks);
2620 kfree(block);
2621 }
2622 }
2623
2624 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2625 struct btrfs_root *reloc_root)
2626 {
2627 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2628 struct btrfs_root *root;
2629 int ret;
2630
2631 if (reloc_root->last_trans == trans->transid)
2632 return 0;
2633
2634 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2635 BUG_ON(IS_ERR(root));
2636 BUG_ON(root->reloc_root != reloc_root);
2637 ret = btrfs_record_root_in_trans(trans, root);
2638 btrfs_put_root(root);
2639
2640 return ret;
2641 }
2642
2643 static noinline_for_stack
2644 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2645 struct reloc_control *rc,
2646 struct backref_node *node,
2647 struct backref_edge *edges[])
2648 {
2649 struct backref_node *next;
2650 struct btrfs_root *root;
2651 int index = 0;
2652
2653 next = node;
2654 while (1) {
2655 cond_resched();
2656 next = walk_up_backref(next, edges, &index);
2657 root = next->root;
2658 BUG_ON(!root);
2659 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2660
2661 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2662 record_reloc_root_in_trans(trans, root);
2663 break;
2664 }
2665
2666 btrfs_record_root_in_trans(trans, root);
2667 root = root->reloc_root;
2668
2669 if (next->new_bytenr != root->node->start) {
2670 BUG_ON(next->new_bytenr);
2671 BUG_ON(!list_empty(&next->list));
2672 next->new_bytenr = root->node->start;
2673 btrfs_put_root(next->root);
2674 next->root = btrfs_grab_root(root);
2675 ASSERT(next->root);
2676 list_add_tail(&next->list,
2677 &rc->backref_cache.changed);
2678 __mark_block_processed(rc, next);
2679 break;
2680 }
2681
2682 WARN_ON(1);
2683 root = NULL;
2684 next = walk_down_backref(edges, &index);
2685 if (!next || next->level <= node->level)
2686 break;
2687 }
2688 if (!root)
2689 return NULL;
2690
2691 next = node;
2692 /* setup backref node path for btrfs_reloc_cow_block */
2693 while (1) {
2694 rc->backref_cache.path[next->level] = next;
2695 if (--index < 0)
2696 break;
2697 next = edges[index]->node[UPPER];
2698 }
2699 return root;
2700 }
2701
2702 /*
2703 * select a tree root for relocation. return NULL if the block
2704 * is reference counted. we should use do_relocation() in this
2705 * case. return a tree root pointer if the block isn't reference
2706 * counted. return -ENOENT if the block is root of reloc tree.
2707 */
2708 static noinline_for_stack
2709 struct btrfs_root *select_one_root(struct backref_node *node)
2710 {
2711 struct backref_node *next;
2712 struct btrfs_root *root;
2713 struct btrfs_root *fs_root = NULL;
2714 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2715 int index = 0;
2716
2717 next = node;
2718 while (1) {
2719 cond_resched();
2720 next = walk_up_backref(next, edges, &index);
2721 root = next->root;
2722 BUG_ON(!root);
2723
2724 /* no other choice for non-references counted tree */
2725 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2726 return root;
2727
2728 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2729 fs_root = root;
2730
2731 if (next != node)
2732 return NULL;
2733
2734 next = walk_down_backref(edges, &index);
2735 if (!next || next->level <= node->level)
2736 break;
2737 }
2738
2739 if (!fs_root)
2740 return ERR_PTR(-ENOENT);
2741 return fs_root;
2742 }
2743
2744 static noinline_for_stack
2745 u64 calcu_metadata_size(struct reloc_control *rc,
2746 struct backref_node *node, int reserve)
2747 {
2748 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2749 struct backref_node *next = node;
2750 struct backref_edge *edge;
2751 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2752 u64 num_bytes = 0;
2753 int index = 0;
2754
2755 BUG_ON(reserve && node->processed);
2756
2757 while (next) {
2758 cond_resched();
2759 while (1) {
2760 if (next->processed && (reserve || next != node))
2761 break;
2762
2763 num_bytes += fs_info->nodesize;
2764
2765 if (list_empty(&next->upper))
2766 break;
2767
2768 edge = list_entry(next->upper.next,
2769 struct backref_edge, list[LOWER]);
2770 edges[index++] = edge;
2771 next = edge->node[UPPER];
2772 }
2773 next = walk_down_backref(edges, &index);
2774 }
2775 return num_bytes;
2776 }
2777
2778 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2779 struct reloc_control *rc,
2780 struct backref_node *node)
2781 {
2782 struct btrfs_root *root = rc->extent_root;
2783 struct btrfs_fs_info *fs_info = root->fs_info;
2784 u64 num_bytes;
2785 int ret;
2786 u64 tmp;
2787
2788 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2789
2790 trans->block_rsv = rc->block_rsv;
2791 rc->reserved_bytes += num_bytes;
2792
2793 /*
2794 * We are under a transaction here so we can only do limited flushing.
2795 * If we get an enospc just kick back -EAGAIN so we know to drop the
2796 * transaction and try to refill when we can flush all the things.
2797 */
2798 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2799 BTRFS_RESERVE_FLUSH_LIMIT);
2800 if (ret) {
2801 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2802 while (tmp <= rc->reserved_bytes)
2803 tmp <<= 1;
2804 /*
2805 * only one thread can access block_rsv at this point,
2806 * so we don't need hold lock to protect block_rsv.
2807 * we expand more reservation size here to allow enough
2808 * space for relocation and we will return earlier in
2809 * enospc case.
2810 */
2811 rc->block_rsv->size = tmp + fs_info->nodesize *
2812 RELOCATION_RESERVED_NODES;
2813 return -EAGAIN;
2814 }
2815
2816 return 0;
2817 }
2818
2819 /*
2820 * relocate a block tree, and then update pointers in upper level
2821 * blocks that reference the block to point to the new location.
2822 *
2823 * if called by link_to_upper, the block has already been relocated.
2824 * in that case this function just updates pointers.
2825 */
2826 static int do_relocation(struct btrfs_trans_handle *trans,
2827 struct reloc_control *rc,
2828 struct backref_node *node,
2829 struct btrfs_key *key,
2830 struct btrfs_path *path, int lowest)
2831 {
2832 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2833 struct backref_node *upper;
2834 struct backref_edge *edge;
2835 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2836 struct btrfs_root *root;
2837 struct extent_buffer *eb;
2838 u32 blocksize;
2839 u64 bytenr;
2840 u64 generation;
2841 int slot;
2842 int ret;
2843 int err = 0;
2844
2845 BUG_ON(lowest && node->eb);
2846
2847 path->lowest_level = node->level + 1;
2848 rc->backref_cache.path[node->level] = node;
2849 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2850 struct btrfs_key first_key;
2851 struct btrfs_ref ref = { 0 };
2852
2853 cond_resched();
2854
2855 upper = edge->node[UPPER];
2856 root = select_reloc_root(trans, rc, upper, edges);
2857 BUG_ON(!root);
2858
2859 if (upper->eb && !upper->locked) {
2860 if (!lowest) {
2861 ret = btrfs_bin_search(upper->eb, key,
2862 upper->level, &slot);
2863 if (ret < 0) {
2864 err = ret;
2865 goto next;
2866 }
2867 BUG_ON(ret);
2868 bytenr = btrfs_node_blockptr(upper->eb, slot);
2869 if (node->eb->start == bytenr)
2870 goto next;
2871 }
2872 drop_node_buffer(upper);
2873 }
2874
2875 if (!upper->eb) {
2876 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2877 if (ret) {
2878 if (ret < 0)
2879 err = ret;
2880 else
2881 err = -ENOENT;
2882
2883 btrfs_release_path(path);
2884 break;
2885 }
2886
2887 if (!upper->eb) {
2888 upper->eb = path->nodes[upper->level];
2889 path->nodes[upper->level] = NULL;
2890 } else {
2891 BUG_ON(upper->eb != path->nodes[upper->level]);
2892 }
2893
2894 upper->locked = 1;
2895 path->locks[upper->level] = 0;
2896
2897 slot = path->slots[upper->level];
2898 btrfs_release_path(path);
2899 } else {
2900 ret = btrfs_bin_search(upper->eb, key, upper->level,
2901 &slot);
2902 if (ret < 0) {
2903 err = ret;
2904 goto next;
2905 }
2906 BUG_ON(ret);
2907 }
2908
2909 bytenr = btrfs_node_blockptr(upper->eb, slot);
2910 if (lowest) {
2911 if (bytenr != node->bytenr) {
2912 btrfs_err(root->fs_info,
2913 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2914 bytenr, node->bytenr, slot,
2915 upper->eb->start);
2916 err = -EIO;
2917 goto next;
2918 }
2919 } else {
2920 if (node->eb->start == bytenr)
2921 goto next;
2922 }
2923
2924 blocksize = root->fs_info->nodesize;
2925 generation = btrfs_node_ptr_generation(upper->eb, slot);
2926 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2927 eb = read_tree_block(fs_info, bytenr, generation,
2928 upper->level - 1, &first_key);
2929 if (IS_ERR(eb)) {
2930 err = PTR_ERR(eb);
2931 goto next;
2932 } else if (!extent_buffer_uptodate(eb)) {
2933 free_extent_buffer(eb);
2934 err = -EIO;
2935 goto next;
2936 }
2937 btrfs_tree_lock(eb);
2938 btrfs_set_lock_blocking_write(eb);
2939
2940 if (!node->eb) {
2941 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2942 slot, &eb);
2943 btrfs_tree_unlock(eb);
2944 free_extent_buffer(eb);
2945 if (ret < 0) {
2946 err = ret;
2947 goto next;
2948 }
2949 BUG_ON(node->eb != eb);
2950 } else {
2951 btrfs_set_node_blockptr(upper->eb, slot,
2952 node->eb->start);
2953 btrfs_set_node_ptr_generation(upper->eb, slot,
2954 trans->transid);
2955 btrfs_mark_buffer_dirty(upper->eb);
2956
2957 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2958 node->eb->start, blocksize,
2959 upper->eb->start);
2960 ref.real_root = root->root_key.objectid;
2961 btrfs_init_tree_ref(&ref, node->level,
2962 btrfs_header_owner(upper->eb));
2963 ret = btrfs_inc_extent_ref(trans, &ref);
2964 BUG_ON(ret);
2965
2966 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2967 BUG_ON(ret);
2968 }
2969 next:
2970 if (!upper->pending)
2971 drop_node_buffer(upper);
2972 else
2973 unlock_node_buffer(upper);
2974 if (err)
2975 break;
2976 }
2977
2978 if (!err && node->pending) {
2979 drop_node_buffer(node);
2980 list_move_tail(&node->list, &rc->backref_cache.changed);
2981 node->pending = 0;
2982 }
2983
2984 path->lowest_level = 0;
2985 BUG_ON(err == -ENOSPC);
2986 return err;
2987 }
2988
2989 static int link_to_upper(struct btrfs_trans_handle *trans,
2990 struct reloc_control *rc,
2991 struct backref_node *node,
2992 struct btrfs_path *path)
2993 {
2994 struct btrfs_key key;
2995
2996 btrfs_node_key_to_cpu(node->eb, &key, 0);
2997 return do_relocation(trans, rc, node, &key, path, 0);
2998 }
2999
3000 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
3001 struct reloc_control *rc,
3002 struct btrfs_path *path, int err)
3003 {
3004 LIST_HEAD(list);
3005 struct backref_cache *cache = &rc->backref_cache;
3006 struct backref_node *node;
3007 int level;
3008 int ret;
3009
3010 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
3011 while (!list_empty(&cache->pending[level])) {
3012 node = list_entry(cache->pending[level].next,
3013 struct backref_node, list);
3014 list_move_tail(&node->list, &list);
3015 BUG_ON(!node->pending);
3016
3017 if (!err) {
3018 ret = link_to_upper(trans, rc, node, path);
3019 if (ret < 0)
3020 err = ret;
3021 }
3022 }
3023 list_splice_init(&list, &cache->pending[level]);
3024 }
3025 return err;
3026 }
3027
3028 static void mark_block_processed(struct reloc_control *rc,
3029 u64 bytenr, u32 blocksize)
3030 {
3031 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
3032 EXTENT_DIRTY);
3033 }
3034
3035 static void __mark_block_processed(struct reloc_control *rc,
3036 struct backref_node *node)
3037 {
3038 u32 blocksize;
3039 if (node->level == 0 ||
3040 in_block_group(node->bytenr, rc->block_group)) {
3041 blocksize = rc->extent_root->fs_info->nodesize;
3042 mark_block_processed(rc, node->bytenr, blocksize);
3043 }
3044 node->processed = 1;
3045 }
3046
3047 /*
3048 * mark a block and all blocks directly/indirectly reference the block
3049 * as processed.
3050 */
3051 static void update_processed_blocks(struct reloc_control *rc,
3052 struct backref_node *node)
3053 {
3054 struct backref_node *next = node;
3055 struct backref_edge *edge;
3056 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
3057 int index = 0;
3058
3059 while (next) {
3060 cond_resched();
3061 while (1) {
3062 if (next->processed)
3063 break;
3064
3065 __mark_block_processed(rc, next);
3066
3067 if (list_empty(&next->upper))
3068 break;
3069
3070 edge = list_entry(next->upper.next,
3071 struct backref_edge, list[LOWER]);
3072 edges[index++] = edge;
3073 next = edge->node[UPPER];
3074 }
3075 next = walk_down_backref(edges, &index);
3076 }
3077 }
3078
3079 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
3080 {
3081 u32 blocksize = rc->extent_root->fs_info->nodesize;
3082
3083 if (test_range_bit(&rc->processed_blocks, bytenr,
3084 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
3085 return 1;
3086 return 0;
3087 }
3088
3089 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
3090 struct tree_block *block)
3091 {
3092 struct extent_buffer *eb;
3093
3094 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
3095 block->level, NULL);
3096 if (IS_ERR(eb)) {
3097 return PTR_ERR(eb);
3098 } else if (!extent_buffer_uptodate(eb)) {
3099 free_extent_buffer(eb);
3100 return -EIO;
3101 }
3102 if (block->level == 0)
3103 btrfs_item_key_to_cpu(eb, &block->key, 0);
3104 else
3105 btrfs_node_key_to_cpu(eb, &block->key, 0);
3106 free_extent_buffer(eb);
3107 block->key_ready = 1;
3108 return 0;
3109 }
3110
3111 /*
3112 * helper function to relocate a tree block
3113 */
3114 static int relocate_tree_block(struct btrfs_trans_handle *trans,
3115 struct reloc_control *rc,
3116 struct backref_node *node,
3117 struct btrfs_key *key,
3118 struct btrfs_path *path)
3119 {
3120 struct btrfs_root *root;
3121 int ret = 0;
3122
3123 if (!node)
3124 return 0;
3125
3126 BUG_ON(node->processed);
3127 root = select_one_root(node);
3128 if (root == ERR_PTR(-ENOENT)) {
3129 update_processed_blocks(rc, node);
3130 goto out;
3131 }
3132
3133 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3134 ret = reserve_metadata_space(trans, rc, node);
3135 if (ret)
3136 goto out;
3137 }
3138
3139 if (root) {
3140 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3141 BUG_ON(node->new_bytenr);
3142 BUG_ON(!list_empty(&node->list));
3143 btrfs_record_root_in_trans(trans, root);
3144 root = root->reloc_root;
3145 node->new_bytenr = root->node->start;
3146 btrfs_put_root(node->root);
3147 node->root = btrfs_grab_root(root);
3148 ASSERT(node->root);
3149 list_add_tail(&node->list, &rc->backref_cache.changed);
3150 } else {
3151 path->lowest_level = node->level;
3152 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
3153 btrfs_release_path(path);
3154 if (ret > 0)
3155 ret = 0;
3156 }
3157 if (!ret)
3158 update_processed_blocks(rc, node);
3159 } else {
3160 ret = do_relocation(trans, rc, node, key, path, 1);
3161 }
3162 out:
3163 if (ret || node->level == 0 || node->cowonly)
3164 remove_backref_node(&rc->backref_cache, node);
3165 return ret;
3166 }
3167
3168 /*
3169 * relocate a list of blocks
3170 */
3171 static noinline_for_stack
3172 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
3173 struct reloc_control *rc, struct rb_root *blocks)
3174 {
3175 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3176 struct backref_node *node;
3177 struct btrfs_path *path;
3178 struct tree_block *block;
3179 struct tree_block *next;
3180 int ret;
3181 int err = 0;
3182
3183 path = btrfs_alloc_path();
3184 if (!path) {
3185 err = -ENOMEM;
3186 goto out_free_blocks;
3187 }
3188
3189 /* Kick in readahead for tree blocks with missing keys */
3190 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3191 if (!block->key_ready)
3192 readahead_tree_block(fs_info, block->bytenr);
3193 }
3194
3195 /* Get first keys */
3196 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3197 if (!block->key_ready) {
3198 err = get_tree_block_key(fs_info, block);
3199 if (err)
3200 goto out_free_path;
3201 }
3202 }
3203
3204 /* Do tree relocation */
3205 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3206 node = build_backref_tree(rc, &block->key,
3207 block->level, block->bytenr);
3208 if (IS_ERR(node)) {
3209 err = PTR_ERR(node);
3210 goto out;
3211 }
3212
3213 ret = relocate_tree_block(trans, rc, node, &block->key,
3214 path);
3215 if (ret < 0) {
3216 if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
3217 err = ret;
3218 goto out;
3219 }
3220 }
3221 out:
3222 err = finish_pending_nodes(trans, rc, path, err);
3223
3224 out_free_path:
3225 btrfs_free_path(path);
3226 out_free_blocks:
3227 free_block_list(blocks);
3228 return err;
3229 }
3230
3231 static noinline_for_stack
3232 int prealloc_file_extent_cluster(struct inode *inode,
3233 struct file_extent_cluster *cluster)
3234 {
3235 u64 alloc_hint = 0;
3236 u64 start;
3237 u64 end;
3238 u64 offset = BTRFS_I(inode)->index_cnt;
3239 u64 num_bytes;
3240 int nr = 0;
3241 int ret = 0;
3242 u64 prealloc_start = cluster->start - offset;
3243 u64 prealloc_end = cluster->end - offset;
3244 u64 cur_offset;
3245 struct extent_changeset *data_reserved = NULL;
3246
3247 BUG_ON(cluster->start != cluster->boundary[0]);
3248 inode_lock(inode);
3249
3250 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3251 prealloc_end + 1 - prealloc_start);
3252 if (ret)
3253 goto out;
3254
3255 cur_offset = prealloc_start;
3256 while (nr < cluster->nr) {
3257 start = cluster->boundary[nr] - offset;
3258 if (nr + 1 < cluster->nr)
3259 end = cluster->boundary[nr + 1] - 1 - offset;
3260 else
3261 end = cluster->end - offset;
3262
3263 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3264 num_bytes = end + 1 - start;
3265 if (cur_offset < start)
3266 btrfs_free_reserved_data_space(inode, data_reserved,
3267 cur_offset, start - cur_offset);
3268 ret = btrfs_prealloc_file_range(inode, 0, start,
3269 num_bytes, num_bytes,
3270 end + 1, &alloc_hint);
3271 cur_offset = end + 1;
3272 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3273 if (ret)
3274 break;
3275 nr++;
3276 }
3277 if (cur_offset < prealloc_end)
3278 btrfs_free_reserved_data_space(inode, data_reserved,
3279 cur_offset, prealloc_end + 1 - cur_offset);
3280 out:
3281 inode_unlock(inode);
3282 extent_changeset_free(data_reserved);
3283 return ret;
3284 }
3285
3286 static noinline_for_stack
3287 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3288 u64 block_start)
3289 {
3290 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3291 struct extent_map *em;
3292 int ret = 0;
3293
3294 em = alloc_extent_map();
3295 if (!em)
3296 return -ENOMEM;
3297
3298 em->start = start;
3299 em->len = end + 1 - start;
3300 em->block_len = em->len;
3301 em->block_start = block_start;
3302 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3303
3304 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3305 while (1) {
3306 write_lock(&em_tree->lock);
3307 ret = add_extent_mapping(em_tree, em, 0);
3308 write_unlock(&em_tree->lock);
3309 if (ret != -EEXIST) {
3310 free_extent_map(em);
3311 break;
3312 }
3313 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3314 }
3315 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3316 return ret;
3317 }
3318
3319 /*
3320 * Allow error injection to test balance cancellation
3321 */
3322 int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
3323 {
3324 return atomic_read(&fs_info->balance_cancel_req);
3325 }
3326 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
3327
3328 static int relocate_file_extent_cluster(struct inode *inode,
3329 struct file_extent_cluster *cluster)
3330 {
3331 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3332 u64 page_start;
3333 u64 page_end;
3334 u64 offset = BTRFS_I(inode)->index_cnt;
3335 unsigned long index;
3336 unsigned long last_index;
3337 struct page *page;
3338 struct file_ra_state *ra;
3339 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3340 int nr = 0;
3341 int ret = 0;
3342
3343 if (!cluster->nr)
3344 return 0;
3345
3346 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3347 if (!ra)
3348 return -ENOMEM;
3349
3350 ret = prealloc_file_extent_cluster(inode, cluster);
3351 if (ret)
3352 goto out;
3353
3354 file_ra_state_init(ra, inode->i_mapping);
3355
3356 ret = setup_extent_mapping(inode, cluster->start - offset,
3357 cluster->end - offset, cluster->start);
3358 if (ret)
3359 goto out;
3360
3361 index = (cluster->start - offset) >> PAGE_SHIFT;
3362 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3363 while (index <= last_index) {
3364 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3365 PAGE_SIZE);
3366 if (ret)
3367 goto out;
3368
3369 page = find_lock_page(inode->i_mapping, index);
3370 if (!page) {
3371 page_cache_sync_readahead(inode->i_mapping,
3372 ra, NULL, index,
3373 last_index + 1 - index);
3374 page = find_or_create_page(inode->i_mapping, index,
3375 mask);
3376 if (!page) {
3377 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3378 PAGE_SIZE, true);
3379 btrfs_delalloc_release_extents(BTRFS_I(inode),
3380 PAGE_SIZE);
3381 ret = -ENOMEM;
3382 goto out;
3383 }
3384 }
3385
3386 if (PageReadahead(page)) {
3387 page_cache_async_readahead(inode->i_mapping,
3388 ra, NULL, page, index,
3389 last_index + 1 - index);
3390 }
3391
3392 if (!PageUptodate(page)) {
3393 btrfs_readpage(NULL, page);
3394 lock_page(page);
3395 if (!PageUptodate(page)) {
3396 unlock_page(page);
3397 put_page(page);
3398 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3399 PAGE_SIZE, true);
3400 btrfs_delalloc_release_extents(BTRFS_I(inode),
3401 PAGE_SIZE);
3402 ret = -EIO;
3403 goto out;
3404 }
3405 }
3406
3407 page_start = page_offset(page);
3408 page_end = page_start + PAGE_SIZE - 1;
3409
3410 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3411
3412 set_page_extent_mapped(page);
3413
3414 if (nr < cluster->nr &&
3415 page_start + offset == cluster->boundary[nr]) {
3416 set_extent_bits(&BTRFS_I(inode)->io_tree,
3417 page_start, page_end,
3418 EXTENT_BOUNDARY);
3419 nr++;
3420 }
3421
3422 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3423 NULL);
3424 if (ret) {
3425 unlock_page(page);
3426 put_page(page);
3427 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3428 PAGE_SIZE, true);
3429 btrfs_delalloc_release_extents(BTRFS_I(inode),
3430 PAGE_SIZE);
3431
3432 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3433 page_start, page_end,
3434 EXTENT_LOCKED | EXTENT_BOUNDARY);
3435 goto out;
3436
3437 }
3438 set_page_dirty(page);
3439
3440 unlock_extent(&BTRFS_I(inode)->io_tree,
3441 page_start, page_end);
3442 unlock_page(page);
3443 put_page(page);
3444
3445 index++;
3446 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
3447 balance_dirty_pages_ratelimited(inode->i_mapping);
3448 btrfs_throttle(fs_info);
3449 if (btrfs_should_cancel_balance(fs_info)) {
3450 ret = -ECANCELED;
3451 goto out;
3452 }
3453 }
3454 WARN_ON(nr != cluster->nr);
3455 out:
3456 kfree(ra);
3457 return ret;
3458 }
3459
3460 static noinline_for_stack
3461 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3462 struct file_extent_cluster *cluster)
3463 {
3464 int ret;
3465
3466 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3467 ret = relocate_file_extent_cluster(inode, cluster);
3468 if (ret)
3469 return ret;
3470 cluster->nr = 0;
3471 }
3472
3473 if (!cluster->nr)
3474 cluster->start = extent_key->objectid;
3475 else
3476 BUG_ON(cluster->nr >= MAX_EXTENTS);
3477 cluster->end = extent_key->objectid + extent_key->offset - 1;
3478 cluster->boundary[cluster->nr] = extent_key->objectid;
3479 cluster->nr++;
3480
3481 if (cluster->nr >= MAX_EXTENTS) {
3482 ret = relocate_file_extent_cluster(inode, cluster);
3483 if (ret)
3484 return ret;
3485 cluster->nr = 0;
3486 }
3487 return 0;
3488 }
3489
3490 /*
3491 * helper to add a tree block to the list.
3492 * the major work is getting the generation and level of the block
3493 */
3494 static int add_tree_block(struct reloc_control *rc,
3495 struct btrfs_key *extent_key,
3496 struct btrfs_path *path,
3497 struct rb_root *blocks)
3498 {
3499 struct extent_buffer *eb;
3500 struct btrfs_extent_item *ei;
3501 struct btrfs_tree_block_info *bi;
3502 struct tree_block *block;
3503 struct rb_node *rb_node;
3504 u32 item_size;
3505 int level = -1;
3506 u64 generation;
3507
3508 eb = path->nodes[0];
3509 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3510
3511 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3512 item_size >= sizeof(*ei) + sizeof(*bi)) {
3513 ei = btrfs_item_ptr(eb, path->slots[0],
3514 struct btrfs_extent_item);
3515 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3516 bi = (struct btrfs_tree_block_info *)(ei + 1);
3517 level = btrfs_tree_block_level(eb, bi);
3518 } else {
3519 level = (int)extent_key->offset;
3520 }
3521 generation = btrfs_extent_generation(eb, ei);
3522 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3523 btrfs_print_v0_err(eb->fs_info);
3524 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3525 return -EINVAL;
3526 } else {
3527 BUG();
3528 }
3529
3530 btrfs_release_path(path);
3531
3532 BUG_ON(level == -1);
3533
3534 block = kmalloc(sizeof(*block), GFP_NOFS);
3535 if (!block)
3536 return -ENOMEM;
3537
3538 block->bytenr = extent_key->objectid;
3539 block->key.objectid = rc->extent_root->fs_info->nodesize;
3540 block->key.offset = generation;
3541 block->level = level;
3542 block->key_ready = 0;
3543
3544 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3545 if (rb_node)
3546 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3547
3548 return 0;
3549 }
3550
3551 /*
3552 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3553 */
3554 static int __add_tree_block(struct reloc_control *rc,
3555 u64 bytenr, u32 blocksize,
3556 struct rb_root *blocks)
3557 {
3558 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3559 struct btrfs_path *path;
3560 struct btrfs_key key;
3561 int ret;
3562 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3563
3564 if (tree_block_processed(bytenr, rc))
3565 return 0;
3566
3567 if (tree_search(blocks, bytenr))
3568 return 0;
3569
3570 path = btrfs_alloc_path();
3571 if (!path)
3572 return -ENOMEM;
3573 again:
3574 key.objectid = bytenr;
3575 if (skinny) {
3576 key.type = BTRFS_METADATA_ITEM_KEY;
3577 key.offset = (u64)-1;
3578 } else {
3579 key.type = BTRFS_EXTENT_ITEM_KEY;
3580 key.offset = blocksize;
3581 }
3582
3583 path->search_commit_root = 1;
3584 path->skip_locking = 1;
3585 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3586 if (ret < 0)
3587 goto out;
3588
3589 if (ret > 0 && skinny) {
3590 if (path->slots[0]) {
3591 path->slots[0]--;
3592 btrfs_item_key_to_cpu(path->nodes[0], &key,
3593 path->slots[0]);
3594 if (key.objectid == bytenr &&
3595 (key.type == BTRFS_METADATA_ITEM_KEY ||
3596 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3597 key.offset == blocksize)))
3598 ret = 0;
3599 }
3600
3601 if (ret) {
3602 skinny = false;
3603 btrfs_release_path(path);
3604 goto again;
3605 }
3606 }
3607 if (ret) {
3608 ASSERT(ret == 1);
3609 btrfs_print_leaf(path->nodes[0]);
3610 btrfs_err(fs_info,
3611 "tree block extent item (%llu) is not found in extent tree",
3612 bytenr);
3613 WARN_ON(1);
3614 ret = -EINVAL;
3615 goto out;
3616 }
3617
3618 ret = add_tree_block(rc, &key, path, blocks);
3619 out:
3620 btrfs_free_path(path);
3621 return ret;
3622 }
3623
3624 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3625 struct btrfs_block_group *block_group,
3626 struct inode *inode,
3627 u64 ino)
3628 {
3629 struct btrfs_key key;
3630 struct btrfs_root *root = fs_info->tree_root;
3631 struct btrfs_trans_handle *trans;
3632 int ret = 0;
3633
3634 if (inode)
3635 goto truncate;
3636
3637 key.objectid = ino;
3638 key.type = BTRFS_INODE_ITEM_KEY;
3639 key.offset = 0;
3640
3641 inode = btrfs_iget(fs_info->sb, &key, root);
3642 if (IS_ERR(inode))
3643 return -ENOENT;
3644
3645 truncate:
3646 ret = btrfs_check_trunc_cache_free_space(fs_info,
3647 &fs_info->global_block_rsv);
3648 if (ret)
3649 goto out;
3650
3651 trans = btrfs_join_transaction(root);
3652 if (IS_ERR(trans)) {
3653 ret = PTR_ERR(trans);
3654 goto out;
3655 }
3656
3657 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3658
3659 btrfs_end_transaction(trans);
3660 btrfs_btree_balance_dirty(fs_info);
3661 out:
3662 iput(inode);
3663 return ret;
3664 }
3665
3666 /*
3667 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3668 * cache inode, to avoid free space cache data extent blocking data relocation.
3669 */
3670 static int delete_v1_space_cache(struct extent_buffer *leaf,
3671 struct btrfs_block_group *block_group,
3672 u64 data_bytenr)
3673 {
3674 u64 space_cache_ino;
3675 struct btrfs_file_extent_item *ei;
3676 struct btrfs_key key;
3677 bool found = false;
3678 int i;
3679 int ret;
3680
3681 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3682 return 0;
3683
3684 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3685 btrfs_item_key_to_cpu(leaf, &key, i);
3686 if (key.type != BTRFS_EXTENT_DATA_KEY)
3687 continue;
3688 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3689 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
3690 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3691 found = true;
3692 space_cache_ino = key.objectid;
3693 break;
3694 }
3695 }
3696 if (!found)
3697 return -ENOENT;
3698 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3699 space_cache_ino);
3700 return ret;
3701 }
3702
3703 /*
3704 * helper to find all tree blocks that reference a given data extent
3705 */
3706 static noinline_for_stack
3707 int add_data_references(struct reloc_control *rc,
3708 struct btrfs_key *extent_key,
3709 struct btrfs_path *path,
3710 struct rb_root *blocks)
3711 {
3712 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3713 struct ulist *leaves = NULL;
3714 struct ulist_iterator leaf_uiter;
3715 struct ulist_node *ref_node = NULL;
3716 const u32 blocksize = fs_info->nodesize;
3717 int ret = 0;
3718
3719 btrfs_release_path(path);
3720 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3721 0, &leaves, NULL, true);
3722 if (ret < 0)
3723 return ret;
3724
3725 ULIST_ITER_INIT(&leaf_uiter);
3726 while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
3727 struct extent_buffer *eb;
3728
3729 eb = read_tree_block(fs_info, ref_node->val, 0, 0, NULL);
3730 if (IS_ERR(eb)) {
3731 ret = PTR_ERR(eb);
3732 break;
3733 }
3734 ret = delete_v1_space_cache(eb, rc->block_group,
3735 extent_key->objectid);
3736 free_extent_buffer(eb);
3737 if (ret < 0)
3738 break;
3739 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3740 if (ret < 0)
3741 break;
3742 }
3743 if (ret < 0)
3744 free_block_list(blocks);
3745 ulist_free(leaves);
3746 return ret;
3747 }
3748
3749 /*
3750 * helper to find next unprocessed extent
3751 */
3752 static noinline_for_stack
3753 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3754 struct btrfs_key *extent_key)
3755 {
3756 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3757 struct btrfs_key key;
3758 struct extent_buffer *leaf;
3759 u64 start, end, last;
3760 int ret;
3761
3762 last = rc->block_group->start + rc->block_group->length;
3763 while (1) {
3764 cond_resched();
3765 if (rc->search_start >= last) {
3766 ret = 1;
3767 break;
3768 }
3769
3770 key.objectid = rc->search_start;
3771 key.type = BTRFS_EXTENT_ITEM_KEY;
3772 key.offset = 0;
3773
3774 path->search_commit_root = 1;
3775 path->skip_locking = 1;
3776 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3777 0, 0);
3778 if (ret < 0)
3779 break;
3780 next:
3781 leaf = path->nodes[0];
3782 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3783 ret = btrfs_next_leaf(rc->extent_root, path);
3784 if (ret != 0)
3785 break;
3786 leaf = path->nodes[0];
3787 }
3788
3789 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3790 if (key.objectid >= last) {
3791 ret = 1;
3792 break;
3793 }
3794
3795 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3796 key.type != BTRFS_METADATA_ITEM_KEY) {
3797 path->slots[0]++;
3798 goto next;
3799 }
3800
3801 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3802 key.objectid + key.offset <= rc->search_start) {
3803 path->slots[0]++;
3804 goto next;
3805 }
3806
3807 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3808 key.objectid + fs_info->nodesize <=
3809 rc->search_start) {
3810 path->slots[0]++;
3811 goto next;
3812 }
3813
3814 ret = find_first_extent_bit(&rc->processed_blocks,
3815 key.objectid, &start, &end,
3816 EXTENT_DIRTY, NULL);
3817
3818 if (ret == 0 && start <= key.objectid) {
3819 btrfs_release_path(path);
3820 rc->search_start = end + 1;
3821 } else {
3822 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3823 rc->search_start = key.objectid + key.offset;
3824 else
3825 rc->search_start = key.objectid +
3826 fs_info->nodesize;
3827 memcpy(extent_key, &key, sizeof(key));
3828 return 0;
3829 }
3830 }
3831 btrfs_release_path(path);
3832 return ret;
3833 }
3834
3835 static void set_reloc_control(struct reloc_control *rc)
3836 {
3837 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3838
3839 mutex_lock(&fs_info->reloc_mutex);
3840 fs_info->reloc_ctl = rc;
3841 mutex_unlock(&fs_info->reloc_mutex);
3842 }
3843
3844 static void unset_reloc_control(struct reloc_control *rc)
3845 {
3846 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3847
3848 mutex_lock(&fs_info->reloc_mutex);
3849 fs_info->reloc_ctl = NULL;
3850 mutex_unlock(&fs_info->reloc_mutex);
3851 }
3852
3853 static int check_extent_flags(u64 flags)
3854 {
3855 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3856 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3857 return 1;
3858 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3859 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3860 return 1;
3861 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3862 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3863 return 1;
3864 return 0;
3865 }
3866
3867 static noinline_for_stack
3868 int prepare_to_relocate(struct reloc_control *rc)
3869 {
3870 struct btrfs_trans_handle *trans;
3871 int ret;
3872
3873 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3874 BTRFS_BLOCK_RSV_TEMP);
3875 if (!rc->block_rsv)
3876 return -ENOMEM;
3877
3878 memset(&rc->cluster, 0, sizeof(rc->cluster));
3879 rc->search_start = rc->block_group->start;
3880 rc->extents_found = 0;
3881 rc->nodes_relocated = 0;
3882 rc->merging_rsv_size = 0;
3883 rc->reserved_bytes = 0;
3884 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3885 RELOCATION_RESERVED_NODES;
3886 ret = btrfs_block_rsv_refill(rc->extent_root,
3887 rc->block_rsv, rc->block_rsv->size,
3888 BTRFS_RESERVE_FLUSH_ALL);
3889 if (ret)
3890 return ret;
3891
3892 rc->create_reloc_tree = 1;
3893 set_reloc_control(rc);
3894
3895 trans = btrfs_join_transaction(rc->extent_root);
3896 if (IS_ERR(trans)) {
3897 unset_reloc_control(rc);
3898 /*
3899 * extent tree is not a ref_cow tree and has no reloc_root to
3900 * cleanup. And callers are responsible to free the above
3901 * block rsv.
3902 */
3903 return PTR_ERR(trans);
3904 }
3905 btrfs_commit_transaction(trans);
3906 return 0;
3907 }
3908
3909 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3910 {
3911 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3912 struct rb_root blocks = RB_ROOT;
3913 struct btrfs_key key;
3914 struct btrfs_trans_handle *trans = NULL;
3915 struct btrfs_path *path;
3916 struct btrfs_extent_item *ei;
3917 u64 flags;
3918 u32 item_size;
3919 int ret;
3920 int err = 0;
3921 int progress = 0;
3922
3923 path = btrfs_alloc_path();
3924 if (!path)
3925 return -ENOMEM;
3926 path->reada = READA_FORWARD;
3927
3928 ret = prepare_to_relocate(rc);
3929 if (ret) {
3930 err = ret;
3931 goto out_free;
3932 }
3933
3934 while (1) {
3935 rc->reserved_bytes = 0;
3936 ret = btrfs_block_rsv_refill(rc->extent_root,
3937 rc->block_rsv, rc->block_rsv->size,
3938 BTRFS_RESERVE_FLUSH_ALL);
3939 if (ret) {
3940 err = ret;
3941 break;
3942 }
3943 progress++;
3944 trans = btrfs_start_transaction(rc->extent_root, 0);
3945 if (IS_ERR(trans)) {
3946 err = PTR_ERR(trans);
3947 trans = NULL;
3948 break;
3949 }
3950 restart:
3951 if (update_backref_cache(trans, &rc->backref_cache)) {
3952 btrfs_end_transaction(trans);
3953 trans = NULL;
3954 continue;
3955 }
3956
3957 ret = find_next_extent(rc, path, &key);
3958 if (ret < 0)
3959 err = ret;
3960 if (ret != 0)
3961 break;
3962
3963 rc->extents_found++;
3964
3965 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3966 struct btrfs_extent_item);
3967 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
3968 if (item_size >= sizeof(*ei)) {
3969 flags = btrfs_extent_flags(path->nodes[0], ei);
3970 ret = check_extent_flags(flags);
3971 BUG_ON(ret);
3972 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3973 err = -EINVAL;
3974 btrfs_print_v0_err(trans->fs_info);
3975 btrfs_abort_transaction(trans, err);
3976 break;
3977 } else {
3978 BUG();
3979 }
3980
3981 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3982 ret = add_tree_block(rc, &key, path, &blocks);
3983 } else if (rc->stage == UPDATE_DATA_PTRS &&
3984 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3985 ret = add_data_references(rc, &key, path, &blocks);
3986 } else {
3987 btrfs_release_path(path);
3988 ret = 0;
3989 }
3990 if (ret < 0) {
3991 err = ret;
3992 break;
3993 }
3994
3995 if (!RB_EMPTY_ROOT(&blocks)) {
3996 ret = relocate_tree_blocks(trans, rc, &blocks);
3997 if (ret < 0) {
3998 /*
3999 * if we fail to relocate tree blocks, force to update
4000 * backref cache when committing transaction.
4001 */
4002 rc->backref_cache.last_trans = trans->transid - 1;
4003
4004 if (ret != -EAGAIN) {
4005 err = ret;
4006 break;
4007 }
4008 rc->extents_found--;
4009 rc->search_start = key.objectid;
4010 }
4011 }
4012
4013 btrfs_end_transaction_throttle(trans);
4014 btrfs_btree_balance_dirty(fs_info);
4015 trans = NULL;
4016
4017 if (rc->stage == MOVE_DATA_EXTENTS &&
4018 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4019 rc->found_file_extent = 1;
4020 ret = relocate_data_extent(rc->data_inode,
4021 &key, &rc->cluster);
4022 if (ret < 0) {
4023 err = ret;
4024 break;
4025 }
4026 }
4027 if (btrfs_should_cancel_balance(fs_info)) {
4028 err = -ECANCELED;
4029 break;
4030 }
4031 }
4032 if (trans && progress && err == -ENOSPC) {
4033 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
4034 if (ret == 1) {
4035 err = 0;
4036 progress = 0;
4037 goto restart;
4038 }
4039 }
4040
4041 btrfs_release_path(path);
4042 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4043
4044 if (trans) {
4045 btrfs_end_transaction_throttle(trans);
4046 btrfs_btree_balance_dirty(fs_info);
4047 }
4048
4049 if (!err) {
4050 ret = relocate_file_extent_cluster(rc->data_inode,
4051 &rc->cluster);
4052 if (ret < 0)
4053 err = ret;
4054 }
4055
4056 rc->create_reloc_tree = 0;
4057 set_reloc_control(rc);
4058
4059 backref_cache_cleanup(&rc->backref_cache);
4060 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
4061
4062 /*
4063 * Even in the case when the relocation is cancelled, we should all go
4064 * through prepare_to_merge() and merge_reloc_roots().
4065 *
4066 * For error (including cancelled balance), prepare_to_merge() will
4067 * mark all reloc trees orphan, then queue them for cleanup in
4068 * merge_reloc_roots()
4069 */
4070 err = prepare_to_merge(rc, err);
4071
4072 merge_reloc_roots(rc);
4073
4074 rc->merge_reloc_tree = 0;
4075 unset_reloc_control(rc);
4076 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
4077
4078 /* get rid of pinned extents */
4079 trans = btrfs_join_transaction(rc->extent_root);
4080 if (IS_ERR(trans)) {
4081 err = PTR_ERR(trans);
4082 goto out_free;
4083 }
4084 btrfs_commit_transaction(trans);
4085 out_free:
4086 ret = clean_dirty_subvols(rc);
4087 if (ret < 0 && !err)
4088 err = ret;
4089 btrfs_free_block_rsv(fs_info, rc->block_rsv);
4090 btrfs_free_path(path);
4091 return err;
4092 }
4093
4094 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4095 struct btrfs_root *root, u64 objectid)
4096 {
4097 struct btrfs_path *path;
4098 struct btrfs_inode_item *item;
4099 struct extent_buffer *leaf;
4100 int ret;
4101
4102 path = btrfs_alloc_path();
4103 if (!path)
4104 return -ENOMEM;
4105
4106 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4107 if (ret)
4108 goto out;
4109
4110 leaf = path->nodes[0];
4111 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4112 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
4113 btrfs_set_inode_generation(leaf, item, 1);
4114 btrfs_set_inode_size(leaf, item, 0);
4115 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4116 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4117 BTRFS_INODE_PREALLOC);
4118 btrfs_mark_buffer_dirty(leaf);
4119 out:
4120 btrfs_free_path(path);
4121 return ret;
4122 }
4123
4124 /*
4125 * helper to create inode for data relocation.
4126 * the inode is in data relocation tree and its link count is 0
4127 */
4128 static noinline_for_stack
4129 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4130 struct btrfs_block_group *group)
4131 {
4132 struct inode *inode = NULL;
4133 struct btrfs_trans_handle *trans;
4134 struct btrfs_root *root;
4135 struct btrfs_key key;
4136 u64 objectid;
4137 int err = 0;
4138
4139 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4140 if (IS_ERR(root))
4141 return ERR_CAST(root);
4142
4143 trans = btrfs_start_transaction(root, 6);
4144 if (IS_ERR(trans)) {
4145 btrfs_put_root(root);
4146 return ERR_CAST(trans);
4147 }
4148
4149 err = btrfs_find_free_objectid(root, &objectid);
4150 if (err)
4151 goto out;
4152
4153 err = __insert_orphan_inode(trans, root, objectid);
4154 BUG_ON(err);
4155
4156 key.objectid = objectid;
4157 key.type = BTRFS_INODE_ITEM_KEY;
4158 key.offset = 0;
4159 inode = btrfs_iget(fs_info->sb, &key, root);
4160 BUG_ON(IS_ERR(inode));
4161 BTRFS_I(inode)->index_cnt = group->start;
4162
4163 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4164 out:
4165 btrfs_put_root(root);
4166 btrfs_end_transaction(trans);
4167 btrfs_btree_balance_dirty(fs_info);
4168 if (err) {
4169 if (inode)
4170 iput(inode);
4171 inode = ERR_PTR(err);
4172 }
4173 return inode;
4174 }
4175
4176 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4177 {
4178 struct reloc_control *rc;
4179
4180 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4181 if (!rc)
4182 return NULL;
4183
4184 INIT_LIST_HEAD(&rc->reloc_roots);
4185 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4186 backref_cache_init(&rc->backref_cache);
4187 mapping_tree_init(&rc->reloc_root_tree);
4188 extent_io_tree_init(fs_info, &rc->processed_blocks,
4189 IO_TREE_RELOC_BLOCKS, NULL);
4190 return rc;
4191 }
4192
4193 /*
4194 * Print the block group being relocated
4195 */
4196 static void describe_relocation(struct btrfs_fs_info *fs_info,
4197 struct btrfs_block_group *block_group)
4198 {
4199 char buf[128] = {'\0'};
4200
4201 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4202
4203 btrfs_info(fs_info,
4204 "relocating block group %llu flags %s",
4205 block_group->start, buf);
4206 }
4207
4208 static const char *stage_to_string(int stage)
4209 {
4210 if (stage == MOVE_DATA_EXTENTS)
4211 return "move data extents";
4212 if (stage == UPDATE_DATA_PTRS)
4213 return "update data pointers";
4214 return "unknown";
4215 }
4216
4217 /*
4218 * function to relocate all extents in a block group.
4219 */
4220 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4221 {
4222 struct btrfs_block_group *bg;
4223 struct btrfs_root *extent_root = fs_info->extent_root;
4224 struct reloc_control *rc;
4225 struct inode *inode;
4226 struct btrfs_path *path;
4227 int ret;
4228 int rw = 0;
4229 int err = 0;
4230
4231 bg = btrfs_lookup_block_group(fs_info, group_start);
4232 if (!bg)
4233 return -ENOENT;
4234
4235 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4236 btrfs_put_block_group(bg);
4237 return -ETXTBSY;
4238 }
4239
4240 rc = alloc_reloc_control(fs_info);
4241 if (!rc) {
4242 btrfs_put_block_group(bg);
4243 return -ENOMEM;
4244 }
4245
4246 rc->extent_root = extent_root;
4247 rc->block_group = bg;
4248
4249 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4250 if (ret) {
4251 err = ret;
4252 goto out;
4253 }
4254 rw = 1;
4255
4256 path = btrfs_alloc_path();
4257 if (!path) {
4258 err = -ENOMEM;
4259 goto out;
4260 }
4261
4262 inode = lookup_free_space_inode(rc->block_group, path);
4263 btrfs_free_path(path);
4264
4265 if (!IS_ERR(inode))
4266 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4267 else
4268 ret = PTR_ERR(inode);
4269
4270 if (ret && ret != -ENOENT) {
4271 err = ret;
4272 goto out;
4273 }
4274
4275 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4276 if (IS_ERR(rc->data_inode)) {
4277 err = PTR_ERR(rc->data_inode);
4278 rc->data_inode = NULL;
4279 goto out;
4280 }
4281
4282 describe_relocation(fs_info, rc->block_group);
4283
4284 btrfs_wait_block_group_reservations(rc->block_group);
4285 btrfs_wait_nocow_writers(rc->block_group);
4286 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4287 rc->block_group->start,
4288 rc->block_group->length);
4289
4290 while (1) {
4291 int finishes_stage;
4292
4293 mutex_lock(&fs_info->cleaner_mutex);
4294 ret = relocate_block_group(rc);
4295 mutex_unlock(&fs_info->cleaner_mutex);
4296 if (ret < 0)
4297 err = ret;
4298
4299 finishes_stage = rc->stage;
4300 /*
4301 * We may have gotten ENOSPC after we already dirtied some
4302 * extents. If writeout happens while we're relocating a
4303 * different block group we could end up hitting the
4304 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4305 * btrfs_reloc_cow_block. Make sure we write everything out
4306 * properly so we don't trip over this problem, and then break
4307 * out of the loop if we hit an error.
4308 */
4309 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4310 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4311 (u64)-1);
4312 if (ret)
4313 err = ret;
4314 invalidate_mapping_pages(rc->data_inode->i_mapping,
4315 0, -1);
4316 rc->stage = UPDATE_DATA_PTRS;
4317 }
4318
4319 if (err < 0)
4320 goto out;
4321
4322 if (rc->extents_found == 0)
4323 break;
4324
4325 btrfs_info(fs_info, "found %llu extents, stage: %s",
4326 rc->extents_found, stage_to_string(finishes_stage));
4327 }
4328
4329 WARN_ON(rc->block_group->pinned > 0);
4330 WARN_ON(rc->block_group->reserved > 0);
4331 WARN_ON(rc->block_group->used > 0);
4332 out:
4333 if (err && rw)
4334 btrfs_dec_block_group_ro(rc->block_group);
4335 iput(rc->data_inode);
4336 btrfs_put_block_group(rc->block_group);
4337 kfree(rc);
4338 return err;
4339 }
4340
4341 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4342 {
4343 struct btrfs_fs_info *fs_info = root->fs_info;
4344 struct btrfs_trans_handle *trans;
4345 int ret, err;
4346
4347 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4348 if (IS_ERR(trans))
4349 return PTR_ERR(trans);
4350
4351 memset(&root->root_item.drop_progress, 0,
4352 sizeof(root->root_item.drop_progress));
4353 root->root_item.drop_level = 0;
4354 btrfs_set_root_refs(&root->root_item, 0);
4355 ret = btrfs_update_root(trans, fs_info->tree_root,
4356 &root->root_key, &root->root_item);
4357
4358 err = btrfs_end_transaction(trans);
4359 if (err)
4360 return err;
4361 return ret;
4362 }
4363
4364 /*
4365 * recover relocation interrupted by system crash.
4366 *
4367 * this function resumes merging reloc trees with corresponding fs trees.
4368 * this is important for keeping the sharing of tree blocks
4369 */
4370 int btrfs_recover_relocation(struct btrfs_root *root)
4371 {
4372 struct btrfs_fs_info *fs_info = root->fs_info;
4373 LIST_HEAD(reloc_roots);
4374 struct btrfs_key key;
4375 struct btrfs_root *fs_root;
4376 struct btrfs_root *reloc_root;
4377 struct btrfs_path *path;
4378 struct extent_buffer *leaf;
4379 struct reloc_control *rc = NULL;
4380 struct btrfs_trans_handle *trans;
4381 int ret;
4382 int err = 0;
4383
4384 path = btrfs_alloc_path();
4385 if (!path)
4386 return -ENOMEM;
4387 path->reada = READA_BACK;
4388
4389 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4390 key.type = BTRFS_ROOT_ITEM_KEY;
4391 key.offset = (u64)-1;
4392
4393 while (1) {
4394 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4395 path, 0, 0);
4396 if (ret < 0) {
4397 err = ret;
4398 goto out;
4399 }
4400 if (ret > 0) {
4401 if (path->slots[0] == 0)
4402 break;
4403 path->slots[0]--;
4404 }
4405 leaf = path->nodes[0];
4406 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4407 btrfs_release_path(path);
4408
4409 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4410 key.type != BTRFS_ROOT_ITEM_KEY)
4411 break;
4412
4413 reloc_root = btrfs_read_tree_root(root, &key);
4414 if (IS_ERR(reloc_root)) {
4415 err = PTR_ERR(reloc_root);
4416 goto out;
4417 }
4418
4419 set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
4420 list_add(&reloc_root->root_list, &reloc_roots);
4421
4422 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4423 fs_root = read_fs_root(fs_info,
4424 reloc_root->root_key.offset);
4425 if (IS_ERR(fs_root)) {
4426 ret = PTR_ERR(fs_root);
4427 if (ret != -ENOENT) {
4428 err = ret;
4429 goto out;
4430 }
4431 ret = mark_garbage_root(reloc_root);
4432 if (ret < 0) {
4433 err = ret;
4434 goto out;
4435 }
4436 } else {
4437 btrfs_put_root(fs_root);
4438 }
4439 }
4440
4441 if (key.offset == 0)
4442 break;
4443
4444 key.offset--;
4445 }
4446 btrfs_release_path(path);
4447
4448 if (list_empty(&reloc_roots))
4449 goto out;
4450
4451 rc = alloc_reloc_control(fs_info);
4452 if (!rc) {
4453 err = -ENOMEM;
4454 goto out;
4455 }
4456
4457 rc->extent_root = fs_info->extent_root;
4458
4459 set_reloc_control(rc);
4460
4461 trans = btrfs_join_transaction(rc->extent_root);
4462 if (IS_ERR(trans)) {
4463 err = PTR_ERR(trans);
4464 goto out_unset;
4465 }
4466
4467 rc->merge_reloc_tree = 1;
4468
4469 while (!list_empty(&reloc_roots)) {
4470 reloc_root = list_entry(reloc_roots.next,
4471 struct btrfs_root, root_list);
4472 list_del(&reloc_root->root_list);
4473
4474 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4475 list_add_tail(&reloc_root->root_list,
4476 &rc->reloc_roots);
4477 continue;
4478 }
4479
4480 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4481 if (IS_ERR(fs_root)) {
4482 err = PTR_ERR(fs_root);
4483 list_add_tail(&reloc_root->root_list, &reloc_roots);
4484 goto out_unset;
4485 }
4486
4487 err = __add_reloc_root(reloc_root);
4488 BUG_ON(err < 0); /* -ENOMEM or logic error */
4489 fs_root->reloc_root = reloc_root;
4490 btrfs_put_root(fs_root);
4491 }
4492
4493 err = btrfs_commit_transaction(trans);
4494 if (err)
4495 goto out_unset;
4496
4497 merge_reloc_roots(rc);
4498
4499 unset_reloc_control(rc);
4500
4501 trans = btrfs_join_transaction(rc->extent_root);
4502 if (IS_ERR(trans)) {
4503 err = PTR_ERR(trans);
4504 goto out_clean;
4505 }
4506 err = btrfs_commit_transaction(trans);
4507 out_clean:
4508 ret = clean_dirty_subvols(rc);
4509 if (ret < 0 && !err)
4510 err = ret;
4511 out_unset:
4512 unset_reloc_control(rc);
4513 kfree(rc);
4514 out:
4515 if (!list_empty(&reloc_roots))
4516 free_reloc_roots(&reloc_roots);
4517
4518 btrfs_free_path(path);
4519
4520 if (err == 0) {
4521 /* cleanup orphan inode in data relocation tree */
4522 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4523 if (IS_ERR(fs_root)) {
4524 err = PTR_ERR(fs_root);
4525 } else {
4526 err = btrfs_orphan_cleanup(fs_root);
4527 btrfs_put_root(fs_root);
4528 }
4529 }
4530 return err;
4531 }
4532
4533 /*
4534 * helper to add ordered checksum for data relocation.
4535 *
4536 * cloning checksum properly handles the nodatasum extents.
4537 * it also saves CPU time to re-calculate the checksum.
4538 */
4539 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4540 {
4541 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4542 struct btrfs_ordered_sum *sums;
4543 struct btrfs_ordered_extent *ordered;
4544 int ret;
4545 u64 disk_bytenr;
4546 u64 new_bytenr;
4547 LIST_HEAD(list);
4548
4549 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4550 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4551
4552 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4553 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4554 disk_bytenr + len - 1, &list, 0);
4555 if (ret)
4556 goto out;
4557
4558 while (!list_empty(&list)) {
4559 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4560 list_del_init(&sums->list);
4561
4562 /*
4563 * We need to offset the new_bytenr based on where the csum is.
4564 * We need to do this because we will read in entire prealloc
4565 * extents but we may have written to say the middle of the
4566 * prealloc extent, so we need to make sure the csum goes with
4567 * the right disk offset.
4568 *
4569 * We can do this because the data reloc inode refers strictly
4570 * to the on disk bytes, so we don't have to worry about
4571 * disk_len vs real len like with real inodes since it's all
4572 * disk length.
4573 */
4574 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4575 sums->bytenr = new_bytenr;
4576
4577 btrfs_add_ordered_sum(ordered, sums);
4578 }
4579 out:
4580 btrfs_put_ordered_extent(ordered);
4581 return ret;
4582 }
4583
4584 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4585 struct btrfs_root *root, struct extent_buffer *buf,
4586 struct extent_buffer *cow)
4587 {
4588 struct btrfs_fs_info *fs_info = root->fs_info;
4589 struct reloc_control *rc;
4590 struct backref_node *node;
4591 int first_cow = 0;
4592 int level;
4593 int ret = 0;
4594
4595 rc = fs_info->reloc_ctl;
4596 if (!rc)
4597 return 0;
4598
4599 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4600 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4601
4602 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4603 if (buf == root->node)
4604 __update_reloc_root(root, cow->start);
4605 }
4606
4607 level = btrfs_header_level(buf);
4608 if (btrfs_header_generation(buf) <=
4609 btrfs_root_last_snapshot(&root->root_item))
4610 first_cow = 1;
4611
4612 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4613 rc->create_reloc_tree) {
4614 WARN_ON(!first_cow && level == 0);
4615
4616 node = rc->backref_cache.path[level];
4617 BUG_ON(node->bytenr != buf->start &&
4618 node->new_bytenr != buf->start);
4619
4620 drop_node_buffer(node);
4621 atomic_inc(&cow->refs);
4622 node->eb = cow;
4623 node->new_bytenr = cow->start;
4624
4625 if (!node->pending) {
4626 list_move_tail(&node->list,
4627 &rc->backref_cache.pending[level]);
4628 node->pending = 1;
4629 }
4630
4631 if (first_cow)
4632 __mark_block_processed(rc, node);
4633
4634 if (first_cow && level > 0)
4635 rc->nodes_relocated += buf->len;
4636 }
4637
4638 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4639 ret = replace_file_extents(trans, rc, root, cow);
4640 return ret;
4641 }
4642
4643 /*
4644 * called before creating snapshot. it calculates metadata reservation
4645 * required for relocating tree blocks in the snapshot
4646 */
4647 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4648 u64 *bytes_to_reserve)
4649 {
4650 struct btrfs_root *root = pending->root;
4651 struct reloc_control *rc = root->fs_info->reloc_ctl;
4652
4653 if (!rc || !have_reloc_root(root))
4654 return;
4655
4656 if (!rc->merge_reloc_tree)
4657 return;
4658
4659 root = root->reloc_root;
4660 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4661 /*
4662 * relocation is in the stage of merging trees. the space
4663 * used by merging a reloc tree is twice the size of
4664 * relocated tree nodes in the worst case. half for cowing
4665 * the reloc tree, half for cowing the fs tree. the space
4666 * used by cowing the reloc tree will be freed after the
4667 * tree is dropped. if we create snapshot, cowing the fs
4668 * tree may use more space than it frees. so we need
4669 * reserve extra space.
4670 */
4671 *bytes_to_reserve += rc->nodes_relocated;
4672 }
4673
4674 /*
4675 * called after snapshot is created. migrate block reservation
4676 * and create reloc root for the newly created snapshot
4677 */
4678 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4679 struct btrfs_pending_snapshot *pending)
4680 {
4681 struct btrfs_root *root = pending->root;
4682 struct btrfs_root *reloc_root;
4683 struct btrfs_root *new_root;
4684 struct reloc_control *rc = root->fs_info->reloc_ctl;
4685 int ret;
4686
4687 if (!rc || !have_reloc_root(root))
4688 return 0;
4689
4690 rc = root->fs_info->reloc_ctl;
4691 rc->merging_rsv_size += rc->nodes_relocated;
4692
4693 if (rc->merge_reloc_tree) {
4694 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4695 rc->block_rsv,
4696 rc->nodes_relocated, true);
4697 if (ret)
4698 return ret;
4699 }
4700
4701 new_root = pending->snap;
4702 reloc_root = create_reloc_root(trans, root->reloc_root,
4703 new_root->root_key.objectid);
4704 if (IS_ERR(reloc_root))
4705 return PTR_ERR(reloc_root);
4706
4707 ret = __add_reloc_root(reloc_root);
4708 BUG_ON(ret < 0);
4709 new_root->reloc_root = reloc_root;
4710
4711 if (rc->create_reloc_tree)
4712 ret = clone_backref_node(trans, rc, root, reloc_root);
4713 return ret;
4714 }