2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
25 #include "transaction.h"
26 #include "print-tree.h"
29 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
30 *root
, struct btrfs_path
*path
, int level
);
31 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
32 const struct btrfs_key
*ins_key
, struct btrfs_path
*path
,
33 int data_size
, int extend
);
34 static int push_node_left(struct btrfs_trans_handle
*trans
,
35 struct btrfs_fs_info
*fs_info
,
36 struct extent_buffer
*dst
,
37 struct extent_buffer
*src
, int empty
);
38 static int balance_node_right(struct btrfs_trans_handle
*trans
,
39 struct btrfs_fs_info
*fs_info
,
40 struct extent_buffer
*dst_buf
,
41 struct extent_buffer
*src_buf
);
42 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
44 static int tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
45 struct extent_buffer
*eb
);
47 struct btrfs_path
*btrfs_alloc_path(void)
49 return kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
59 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
60 if (!p
->nodes
[i
] || !p
->locks
[i
])
62 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
63 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
64 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
65 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
66 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
79 struct extent_buffer
*held
, int held_rw
)
84 btrfs_set_lock_blocking_rw(held
, held_rw
);
85 if (held_rw
== BTRFS_WRITE_LOCK
)
86 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
87 else if (held_rw
== BTRFS_READ_LOCK
)
88 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
90 btrfs_set_path_blocking(p
);
92 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
93 if (p
->nodes
[i
] && p
->locks
[i
]) {
94 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
95 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
96 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
97 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
98 p
->locks
[i
] = BTRFS_READ_LOCK
;
103 btrfs_clear_lock_blocking_rw(held
, held_rw
);
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path
*p
)
111 btrfs_release_path(p
);
112 kmem_cache_free(btrfs_path_cachep
, p
);
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
119 * It is safe to call this on paths that no locks or extent buffers held.
121 noinline
void btrfs_release_path(struct btrfs_path
*p
)
125 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
130 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
133 free_extent_buffer(p
->nodes
[i
]);
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
148 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
150 struct extent_buffer
*eb
;
154 eb
= rcu_dereference(root
->node
);
157 * RCU really hurts here, we could free up the root node because
158 * it was COWed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
162 if (atomic_inc_not_zero(&eb
->refs
)) {
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
176 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
178 struct extent_buffer
*eb
;
181 eb
= btrfs_root_node(root
);
183 if (eb
== root
->node
)
185 btrfs_tree_unlock(eb
);
186 free_extent_buffer(eb
);
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
195 static struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
197 struct extent_buffer
*eb
;
200 eb
= btrfs_root_node(root
);
201 btrfs_tree_read_lock(eb
);
202 if (eb
== root
->node
)
204 btrfs_tree_read_unlock(eb
);
205 free_extent_buffer(eb
);
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
214 static void add_root_to_dirty_list(struct btrfs_root
*root
)
216 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
218 if (test_bit(BTRFS_ROOT_DIRTY
, &root
->state
) ||
219 !test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
))
222 spin_lock(&fs_info
->trans_lock
);
223 if (!test_and_set_bit(BTRFS_ROOT_DIRTY
, &root
->state
)) {
224 /* Want the extent tree to be the last on the list */
225 if (root
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
226 list_move_tail(&root
->dirty_list
,
227 &fs_info
->dirty_cowonly_roots
);
229 list_move(&root
->dirty_list
,
230 &fs_info
->dirty_cowonly_roots
);
232 spin_unlock(&fs_info
->trans_lock
);
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
240 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
241 struct btrfs_root
*root
,
242 struct extent_buffer
*buf
,
243 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
245 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
246 struct extent_buffer
*cow
;
249 struct btrfs_disk_key disk_key
;
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
252 trans
->transid
!= fs_info
->running_transaction
->transid
);
253 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
254 trans
->transid
!= root
->last_trans
);
256 level
= btrfs_header_level(buf
);
258 btrfs_item_key(buf
, &disk_key
, 0);
260 btrfs_node_key(buf
, &disk_key
, 0);
262 cow
= btrfs_alloc_tree_block(trans
, root
, 0, new_root_objectid
,
263 &disk_key
, level
, buf
->start
, 0);
267 copy_extent_buffer_full(cow
, buf
);
268 btrfs_set_header_bytenr(cow
, cow
->start
);
269 btrfs_set_header_generation(cow
, trans
->transid
);
270 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
271 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
272 BTRFS_HEADER_FLAG_RELOC
);
273 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
274 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
276 btrfs_set_header_owner(cow
, new_root_objectid
);
278 write_extent_buffer_fsid(cow
, fs_info
->fsid
);
280 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
281 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
282 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
284 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
289 btrfs_mark_buffer_dirty(cow
);
298 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
299 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
301 MOD_LOG_ROOT_REPLACE
,
304 struct tree_mod_move
{
309 struct tree_mod_root
{
314 struct tree_mod_elem
{
320 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
323 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
326 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 struct btrfs_disk_key key
;
330 /* this is used for op == MOD_LOG_MOVE_KEYS */
331 struct tree_mod_move move
;
333 /* this is used for op == MOD_LOG_ROOT_REPLACE */
334 struct tree_mod_root old_root
;
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info
*fs_info
)
339 read_lock(&fs_info
->tree_mod_log_lock
);
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info
*fs_info
)
344 read_unlock(&fs_info
->tree_mod_log_lock
);
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info
*fs_info
)
349 write_lock(&fs_info
->tree_mod_log_lock
);
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info
*fs_info
)
354 write_unlock(&fs_info
->tree_mod_log_lock
);
358 * Pull a new tree mod seq number for our operation.
360 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
362 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
366 * This adds a new blocker to the tree mod log's blocker list if the @elem
367 * passed does not already have a sequence number set. So when a caller expects
368 * to record tree modifications, it should ensure to set elem->seq to zero
369 * before calling btrfs_get_tree_mod_seq.
370 * Returns a fresh, unused tree log modification sequence number, even if no new
373 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
374 struct seq_list
*elem
)
376 tree_mod_log_write_lock(fs_info
);
377 spin_lock(&fs_info
->tree_mod_seq_lock
);
379 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
380 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
382 spin_unlock(&fs_info
->tree_mod_seq_lock
);
383 tree_mod_log_write_unlock(fs_info
);
388 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
389 struct seq_list
*elem
)
391 struct rb_root
*tm_root
;
392 struct rb_node
*node
;
393 struct rb_node
*next
;
394 struct seq_list
*cur_elem
;
395 struct tree_mod_elem
*tm
;
396 u64 min_seq
= (u64
)-1;
397 u64 seq_putting
= elem
->seq
;
402 spin_lock(&fs_info
->tree_mod_seq_lock
);
403 list_del(&elem
->list
);
406 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
407 if (cur_elem
->seq
< min_seq
) {
408 if (seq_putting
> cur_elem
->seq
) {
410 * blocker with lower sequence number exists, we
411 * cannot remove anything from the log
413 spin_unlock(&fs_info
->tree_mod_seq_lock
);
416 min_seq
= cur_elem
->seq
;
419 spin_unlock(&fs_info
->tree_mod_seq_lock
);
422 * anything that's lower than the lowest existing (read: blocked)
423 * sequence number can be removed from the tree.
425 tree_mod_log_write_lock(fs_info
);
426 tm_root
= &fs_info
->tree_mod_log
;
427 for (node
= rb_first(tm_root
); node
; node
= next
) {
428 next
= rb_next(node
);
429 tm
= rb_entry(node
, struct tree_mod_elem
, node
);
430 if (tm
->seq
> min_seq
)
432 rb_erase(node
, tm_root
);
435 tree_mod_log_write_unlock(fs_info
);
439 * key order of the log:
440 * node/leaf start address -> sequence
442 * The 'start address' is the logical address of the *new* root node
443 * for root replace operations, or the logical address of the affected
444 * block for all other operations.
446 * Note: must be called with write lock (tree_mod_log_write_lock).
449 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
451 struct rb_root
*tm_root
;
452 struct rb_node
**new;
453 struct rb_node
*parent
= NULL
;
454 struct tree_mod_elem
*cur
;
458 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
460 tm_root
= &fs_info
->tree_mod_log
;
461 new = &tm_root
->rb_node
;
463 cur
= rb_entry(*new, struct tree_mod_elem
, node
);
465 if (cur
->logical
< tm
->logical
)
466 new = &((*new)->rb_left
);
467 else if (cur
->logical
> tm
->logical
)
468 new = &((*new)->rb_right
);
469 else if (cur
->seq
< tm
->seq
)
470 new = &((*new)->rb_left
);
471 else if (cur
->seq
> tm
->seq
)
472 new = &((*new)->rb_right
);
477 rb_link_node(&tm
->node
, parent
, new);
478 rb_insert_color(&tm
->node
, tm_root
);
483 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
484 * returns zero with the tree_mod_log_lock acquired. The caller must hold
485 * this until all tree mod log insertions are recorded in the rb tree and then
486 * call tree_mod_log_write_unlock() to release.
488 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
489 struct extent_buffer
*eb
) {
491 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
493 if (eb
&& btrfs_header_level(eb
) == 0)
496 tree_mod_log_write_lock(fs_info
);
497 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
498 tree_mod_log_write_unlock(fs_info
);
505 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
506 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
507 struct extent_buffer
*eb
)
510 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
512 if (eb
&& btrfs_header_level(eb
) == 0)
518 static struct tree_mod_elem
*
519 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
520 enum mod_log_op op
, gfp_t flags
)
522 struct tree_mod_elem
*tm
;
524 tm
= kzalloc(sizeof(*tm
), flags
);
528 tm
->logical
= eb
->start
;
529 if (op
!= MOD_LOG_KEY_ADD
) {
530 btrfs_node_key(eb
, &tm
->key
, slot
);
531 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
535 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
536 RB_CLEAR_NODE(&tm
->node
);
542 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
,
543 struct extent_buffer
*eb
, int slot
,
544 enum mod_log_op op
, gfp_t flags
)
546 struct tree_mod_elem
*tm
;
549 if (!tree_mod_need_log(fs_info
, eb
))
552 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
556 if (tree_mod_dont_log(fs_info
, eb
)) {
561 ret
= __tree_mod_log_insert(fs_info
, tm
);
562 tree_mod_log_write_unlock(fs_info
);
570 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
571 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
572 int nr_items
, gfp_t flags
)
574 struct tree_mod_elem
*tm
= NULL
;
575 struct tree_mod_elem
**tm_list
= NULL
;
580 if (!tree_mod_need_log(fs_info
, eb
))
583 tm_list
= kcalloc(nr_items
, sizeof(struct tree_mod_elem
*), flags
);
587 tm
= kzalloc(sizeof(*tm
), flags
);
593 tm
->logical
= eb
->start
;
595 tm
->move
.dst_slot
= dst_slot
;
596 tm
->move
.nr_items
= nr_items
;
597 tm
->op
= MOD_LOG_MOVE_KEYS
;
599 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
600 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
601 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, flags
);
608 if (tree_mod_dont_log(fs_info
, eb
))
613 * When we override something during the move, we log these removals.
614 * This can only happen when we move towards the beginning of the
615 * buffer, i.e. dst_slot < src_slot.
617 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
618 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
623 ret
= __tree_mod_log_insert(fs_info
, tm
);
626 tree_mod_log_write_unlock(fs_info
);
631 for (i
= 0; i
< nr_items
; i
++) {
632 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
633 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
637 tree_mod_log_write_unlock(fs_info
);
645 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
646 struct tree_mod_elem
**tm_list
,
652 for (i
= nritems
- 1; i
>= 0; i
--) {
653 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
655 for (j
= nritems
- 1; j
> i
; j
--)
656 rb_erase(&tm_list
[j
]->node
,
657 &fs_info
->tree_mod_log
);
666 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
667 struct extent_buffer
*old_root
,
668 struct extent_buffer
*new_root
, gfp_t flags
,
671 struct tree_mod_elem
*tm
= NULL
;
672 struct tree_mod_elem
**tm_list
= NULL
;
677 if (!tree_mod_need_log(fs_info
, NULL
))
680 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
681 nritems
= btrfs_header_nritems(old_root
);
682 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*),
688 for (i
= 0; i
< nritems
; i
++) {
689 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
690 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, flags
);
698 tm
= kzalloc(sizeof(*tm
), flags
);
704 tm
->logical
= new_root
->start
;
705 tm
->old_root
.logical
= old_root
->start
;
706 tm
->old_root
.level
= btrfs_header_level(old_root
);
707 tm
->generation
= btrfs_header_generation(old_root
);
708 tm
->op
= MOD_LOG_ROOT_REPLACE
;
710 if (tree_mod_dont_log(fs_info
, NULL
))
714 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
716 ret
= __tree_mod_log_insert(fs_info
, tm
);
718 tree_mod_log_write_unlock(fs_info
);
727 for (i
= 0; i
< nritems
; i
++)
736 static struct tree_mod_elem
*
737 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
740 struct rb_root
*tm_root
;
741 struct rb_node
*node
;
742 struct tree_mod_elem
*cur
= NULL
;
743 struct tree_mod_elem
*found
= NULL
;
745 tree_mod_log_read_lock(fs_info
);
746 tm_root
= &fs_info
->tree_mod_log
;
747 node
= tm_root
->rb_node
;
749 cur
= rb_entry(node
, struct tree_mod_elem
, node
);
750 if (cur
->logical
< start
) {
751 node
= node
->rb_left
;
752 } else if (cur
->logical
> start
) {
753 node
= node
->rb_right
;
754 } else if (cur
->seq
< min_seq
) {
755 node
= node
->rb_left
;
756 } else if (!smallest
) {
757 /* we want the node with the highest seq */
759 BUG_ON(found
->seq
> cur
->seq
);
761 node
= node
->rb_left
;
762 } else if (cur
->seq
> min_seq
) {
763 /* we want the node with the smallest seq */
765 BUG_ON(found
->seq
< cur
->seq
);
767 node
= node
->rb_right
;
773 tree_mod_log_read_unlock(fs_info
);
779 * this returns the element from the log with the smallest time sequence
780 * value that's in the log (the oldest log item). any element with a time
781 * sequence lower than min_seq will be ignored.
783 static struct tree_mod_elem
*
784 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
787 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
791 * this returns the element from the log with the largest time sequence
792 * value that's in the log (the most recent log item). any element with
793 * a time sequence lower than min_seq will be ignored.
795 static struct tree_mod_elem
*
796 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
798 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
802 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
803 struct extent_buffer
*src
, unsigned long dst_offset
,
804 unsigned long src_offset
, int nr_items
)
807 struct tree_mod_elem
**tm_list
= NULL
;
808 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
812 if (!tree_mod_need_log(fs_info
, NULL
))
815 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
818 tm_list
= kcalloc(nr_items
* 2, sizeof(struct tree_mod_elem
*),
823 tm_list_add
= tm_list
;
824 tm_list_rem
= tm_list
+ nr_items
;
825 for (i
= 0; i
< nr_items
; i
++) {
826 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
827 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
828 if (!tm_list_rem
[i
]) {
833 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
834 MOD_LOG_KEY_ADD
, GFP_NOFS
);
835 if (!tm_list_add
[i
]) {
841 if (tree_mod_dont_log(fs_info
, NULL
))
845 for (i
= 0; i
< nr_items
; i
++) {
846 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
849 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
854 tree_mod_log_write_unlock(fs_info
);
860 for (i
= 0; i
< nr_items
* 2; i
++) {
861 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
862 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
866 tree_mod_log_write_unlock(fs_info
);
873 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
874 int dst_offset
, int src_offset
, int nr_items
)
877 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
883 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
884 struct extent_buffer
*eb
, int slot
, int atomic
)
888 ret
= tree_mod_log_insert_key(fs_info
, eb
, slot
,
890 atomic
? GFP_ATOMIC
: GFP_NOFS
);
895 tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
897 struct tree_mod_elem
**tm_list
= NULL
;
902 if (btrfs_header_level(eb
) == 0)
905 if (!tree_mod_need_log(fs_info
, NULL
))
908 nritems
= btrfs_header_nritems(eb
);
909 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
913 for (i
= 0; i
< nritems
; i
++) {
914 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
915 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
922 if (tree_mod_dont_log(fs_info
, eb
))
925 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
926 tree_mod_log_write_unlock(fs_info
);
934 for (i
= 0; i
< nritems
; i
++)
942 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
943 struct extent_buffer
*new_root_node
,
947 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
948 new_root_node
, GFP_NOFS
, log_removal
);
953 * check if the tree block can be shared by multiple trees
955 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
956 struct extent_buffer
*buf
)
959 * Tree blocks not in reference counted trees and tree roots
960 * are never shared. If a block was allocated after the last
961 * snapshot and the block was not allocated by tree relocation,
962 * we know the block is not shared.
964 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
965 buf
!= root
->node
&& buf
!= root
->commit_root
&&
966 (btrfs_header_generation(buf
) <=
967 btrfs_root_last_snapshot(&root
->root_item
) ||
968 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
970 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
971 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
972 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
978 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
979 struct btrfs_root
*root
,
980 struct extent_buffer
*buf
,
981 struct extent_buffer
*cow
,
984 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
992 * Backrefs update rules:
994 * Always use full backrefs for extent pointers in tree block
995 * allocated by tree relocation.
997 * If a shared tree block is no longer referenced by its owner
998 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
999 * use full backrefs for extent pointers in tree block.
1001 * If a tree block is been relocating
1002 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1003 * use full backrefs for extent pointers in tree block.
1004 * The reason for this is some operations (such as drop tree)
1005 * are only allowed for blocks use full backrefs.
1008 if (btrfs_block_can_be_shared(root
, buf
)) {
1009 ret
= btrfs_lookup_extent_info(trans
, fs_info
, buf
->start
,
1010 btrfs_header_level(buf
), 1,
1016 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1021 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1022 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1023 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1028 owner
= btrfs_header_owner(buf
);
1029 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
1030 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
1033 if ((owner
== root
->root_key
.objectid
||
1034 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
1035 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
1036 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
1037 BUG_ON(ret
); /* -ENOMEM */
1039 if (root
->root_key
.objectid
==
1040 BTRFS_TREE_RELOC_OBJECTID
) {
1041 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
1042 BUG_ON(ret
); /* -ENOMEM */
1043 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1044 BUG_ON(ret
); /* -ENOMEM */
1046 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1049 if (root
->root_key
.objectid
==
1050 BTRFS_TREE_RELOC_OBJECTID
)
1051 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1053 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1054 BUG_ON(ret
); /* -ENOMEM */
1056 if (new_flags
!= 0) {
1057 int level
= btrfs_header_level(buf
);
1059 ret
= btrfs_set_disk_extent_flags(trans
, fs_info
,
1062 new_flags
, level
, 0);
1067 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
1068 if (root
->root_key
.objectid
==
1069 BTRFS_TREE_RELOC_OBJECTID
)
1070 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1072 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1073 BUG_ON(ret
); /* -ENOMEM */
1074 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
1075 BUG_ON(ret
); /* -ENOMEM */
1077 clean_tree_block(fs_info
, buf
);
1084 * does the dirty work in cow of a single block. The parent block (if
1085 * supplied) is updated to point to the new cow copy. The new buffer is marked
1086 * dirty and returned locked. If you modify the block it needs to be marked
1089 * search_start -- an allocation hint for the new block
1091 * empty_size -- a hint that you plan on doing more cow. This is the size in
1092 * bytes the allocator should try to find free next to the block it returns.
1093 * This is just a hint and may be ignored by the allocator.
1095 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1096 struct btrfs_root
*root
,
1097 struct extent_buffer
*buf
,
1098 struct extent_buffer
*parent
, int parent_slot
,
1099 struct extent_buffer
**cow_ret
,
1100 u64 search_start
, u64 empty_size
)
1102 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1103 struct btrfs_disk_key disk_key
;
1104 struct extent_buffer
*cow
;
1107 int unlock_orig
= 0;
1108 u64 parent_start
= 0;
1110 if (*cow_ret
== buf
)
1113 btrfs_assert_tree_locked(buf
);
1115 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1116 trans
->transid
!= fs_info
->running_transaction
->transid
);
1117 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1118 trans
->transid
!= root
->last_trans
);
1120 level
= btrfs_header_level(buf
);
1123 btrfs_item_key(buf
, &disk_key
, 0);
1125 btrfs_node_key(buf
, &disk_key
, 0);
1127 if ((root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) && parent
)
1128 parent_start
= parent
->start
;
1130 cow
= btrfs_alloc_tree_block(trans
, root
, parent_start
,
1131 root
->root_key
.objectid
, &disk_key
, level
,
1132 search_start
, empty_size
);
1134 return PTR_ERR(cow
);
1136 /* cow is set to blocking by btrfs_init_new_buffer */
1138 copy_extent_buffer_full(cow
, buf
);
1139 btrfs_set_header_bytenr(cow
, cow
->start
);
1140 btrfs_set_header_generation(cow
, trans
->transid
);
1141 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1142 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1143 BTRFS_HEADER_FLAG_RELOC
);
1144 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1145 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1147 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1149 write_extent_buffer_fsid(cow
, fs_info
->fsid
);
1151 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1153 btrfs_abort_transaction(trans
, ret
);
1157 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
1158 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1160 btrfs_abort_transaction(trans
, ret
);
1165 if (buf
== root
->node
) {
1166 WARN_ON(parent
&& parent
!= buf
);
1167 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1168 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1169 parent_start
= buf
->start
;
1171 extent_buffer_get(cow
);
1172 tree_mod_log_set_root_pointer(root
, cow
, 1);
1173 rcu_assign_pointer(root
->node
, cow
);
1175 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1177 free_extent_buffer(buf
);
1178 add_root_to_dirty_list(root
);
1180 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1181 tree_mod_log_insert_key(fs_info
, parent
, parent_slot
,
1182 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1183 btrfs_set_node_blockptr(parent
, parent_slot
,
1185 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1187 btrfs_mark_buffer_dirty(parent
);
1189 ret
= tree_mod_log_free_eb(fs_info
, buf
);
1191 btrfs_abort_transaction(trans
, ret
);
1195 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1199 btrfs_tree_unlock(buf
);
1200 free_extent_buffer_stale(buf
);
1201 btrfs_mark_buffer_dirty(cow
);
1207 * returns the logical address of the oldest predecessor of the given root.
1208 * entries older than time_seq are ignored.
1210 static struct tree_mod_elem
*
1211 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1212 struct extent_buffer
*eb_root
, u64 time_seq
)
1214 struct tree_mod_elem
*tm
;
1215 struct tree_mod_elem
*found
= NULL
;
1216 u64 root_logical
= eb_root
->start
;
1223 * the very last operation that's logged for a root is the
1224 * replacement operation (if it is replaced at all). this has
1225 * the logical address of the *new* root, making it the very
1226 * first operation that's logged for this root.
1229 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1234 * if there are no tree operation for the oldest root, we simply
1235 * return it. this should only happen if that (old) root is at
1242 * if there's an operation that's not a root replacement, we
1243 * found the oldest version of our root. normally, we'll find a
1244 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1246 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1250 root_logical
= tm
->old_root
.logical
;
1254 /* if there's no old root to return, return what we found instead */
1262 * tm is a pointer to the first operation to rewind within eb. then, all
1263 * previous operations will be rewound (until we reach something older than
1267 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1268 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1271 struct rb_node
*next
;
1272 struct tree_mod_elem
*tm
= first_tm
;
1273 unsigned long o_dst
;
1274 unsigned long o_src
;
1275 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1277 n
= btrfs_header_nritems(eb
);
1278 tree_mod_log_read_lock(fs_info
);
1279 while (tm
&& tm
->seq
>= time_seq
) {
1281 * all the operations are recorded with the operator used for
1282 * the modification. as we're going backwards, we do the
1283 * opposite of each operation here.
1286 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1287 BUG_ON(tm
->slot
< n
);
1289 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1290 case MOD_LOG_KEY_REMOVE
:
1291 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1292 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1293 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1297 case MOD_LOG_KEY_REPLACE
:
1298 BUG_ON(tm
->slot
>= n
);
1299 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1300 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1301 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1304 case MOD_LOG_KEY_ADD
:
1305 /* if a move operation is needed it's in the log */
1308 case MOD_LOG_MOVE_KEYS
:
1309 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1310 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1311 memmove_extent_buffer(eb
, o_dst
, o_src
,
1312 tm
->move
.nr_items
* p_size
);
1314 case MOD_LOG_ROOT_REPLACE
:
1316 * this operation is special. for roots, this must be
1317 * handled explicitly before rewinding.
1318 * for non-roots, this operation may exist if the node
1319 * was a root: root A -> child B; then A gets empty and
1320 * B is promoted to the new root. in the mod log, we'll
1321 * have a root-replace operation for B, a tree block
1322 * that is no root. we simply ignore that operation.
1326 next
= rb_next(&tm
->node
);
1329 tm
= rb_entry(next
, struct tree_mod_elem
, node
);
1330 if (tm
->logical
!= first_tm
->logical
)
1333 tree_mod_log_read_unlock(fs_info
);
1334 btrfs_set_header_nritems(eb
, n
);
1338 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1339 * is returned. If rewind operations happen, a fresh buffer is returned. The
1340 * returned buffer is always read-locked. If the returned buffer is not the
1341 * input buffer, the lock on the input buffer is released and the input buffer
1342 * is freed (its refcount is decremented).
1344 static struct extent_buffer
*
1345 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1346 struct extent_buffer
*eb
, u64 time_seq
)
1348 struct extent_buffer
*eb_rewin
;
1349 struct tree_mod_elem
*tm
;
1354 if (btrfs_header_level(eb
) == 0)
1357 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1361 btrfs_set_path_blocking(path
);
1362 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1364 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1365 BUG_ON(tm
->slot
!= 0);
1366 eb_rewin
= alloc_dummy_extent_buffer(fs_info
, eb
->start
);
1368 btrfs_tree_read_unlock_blocking(eb
);
1369 free_extent_buffer(eb
);
1372 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1373 btrfs_set_header_backref_rev(eb_rewin
,
1374 btrfs_header_backref_rev(eb
));
1375 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1376 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1378 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1380 btrfs_tree_read_unlock_blocking(eb
);
1381 free_extent_buffer(eb
);
1386 btrfs_clear_path_blocking(path
, NULL
, BTRFS_READ_LOCK
);
1387 btrfs_tree_read_unlock_blocking(eb
);
1388 free_extent_buffer(eb
);
1390 extent_buffer_get(eb_rewin
);
1391 btrfs_tree_read_lock(eb_rewin
);
1392 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1393 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1394 BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1400 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1401 * value. If there are no changes, the current root->root_node is returned. If
1402 * anything changed in between, there's a fresh buffer allocated on which the
1403 * rewind operations are done. In any case, the returned buffer is read locked.
1404 * Returns NULL on error (with no locks held).
1406 static inline struct extent_buffer
*
1407 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1409 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1410 struct tree_mod_elem
*tm
;
1411 struct extent_buffer
*eb
= NULL
;
1412 struct extent_buffer
*eb_root
;
1413 struct extent_buffer
*old
;
1414 struct tree_mod_root
*old_root
= NULL
;
1415 u64 old_generation
= 0;
1418 eb_root
= btrfs_read_lock_root_node(root
);
1419 tm
= __tree_mod_log_oldest_root(fs_info
, eb_root
, time_seq
);
1423 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1424 old_root
= &tm
->old_root
;
1425 old_generation
= tm
->generation
;
1426 logical
= old_root
->logical
;
1428 logical
= eb_root
->start
;
1431 tm
= tree_mod_log_search(fs_info
, logical
, time_seq
);
1432 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1433 btrfs_tree_read_unlock(eb_root
);
1434 free_extent_buffer(eb_root
);
1435 old
= read_tree_block(fs_info
, logical
, 0);
1436 if (WARN_ON(IS_ERR(old
) || !extent_buffer_uptodate(old
))) {
1438 free_extent_buffer(old
);
1440 "failed to read tree block %llu from get_old_root",
1443 eb
= btrfs_clone_extent_buffer(old
);
1444 free_extent_buffer(old
);
1446 } else if (old_root
) {
1447 btrfs_tree_read_unlock(eb_root
);
1448 free_extent_buffer(eb_root
);
1449 eb
= alloc_dummy_extent_buffer(fs_info
, logical
);
1451 btrfs_set_lock_blocking_rw(eb_root
, BTRFS_READ_LOCK
);
1452 eb
= btrfs_clone_extent_buffer(eb_root
);
1453 btrfs_tree_read_unlock_blocking(eb_root
);
1454 free_extent_buffer(eb_root
);
1459 extent_buffer_get(eb
);
1460 btrfs_tree_read_lock(eb
);
1462 btrfs_set_header_bytenr(eb
, eb
->start
);
1463 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1464 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1465 btrfs_set_header_level(eb
, old_root
->level
);
1466 btrfs_set_header_generation(eb
, old_generation
);
1469 __tree_mod_log_rewind(fs_info
, eb
, time_seq
, tm
);
1471 WARN_ON(btrfs_header_level(eb
) != 0);
1472 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1477 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1479 struct tree_mod_elem
*tm
;
1481 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1483 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1484 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1485 level
= tm
->old_root
.level
;
1487 level
= btrfs_header_level(eb_root
);
1489 free_extent_buffer(eb_root
);
1494 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1495 struct btrfs_root
*root
,
1496 struct extent_buffer
*buf
)
1498 if (btrfs_is_testing(root
->fs_info
))
1501 /* ensure we can see the force_cow */
1505 * We do not need to cow a block if
1506 * 1) this block is not created or changed in this transaction;
1507 * 2) this block does not belong to TREE_RELOC tree;
1508 * 3) the root is not forced COW.
1510 * What is forced COW:
1511 * when we create snapshot during committing the transaction,
1512 * after we've finished coping src root, we must COW the shared
1513 * block to ensure the metadata consistency.
1515 if (btrfs_header_generation(buf
) == trans
->transid
&&
1516 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1517 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1518 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1519 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1525 * cows a single block, see __btrfs_cow_block for the real work.
1526 * This version of it has extra checks so that a block isn't COWed more than
1527 * once per transaction, as long as it hasn't been written yet
1529 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1530 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1531 struct extent_buffer
*parent
, int parent_slot
,
1532 struct extent_buffer
**cow_ret
)
1534 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1538 if (trans
->transaction
!= fs_info
->running_transaction
)
1539 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1541 fs_info
->running_transaction
->transid
);
1543 if (trans
->transid
!= fs_info
->generation
)
1544 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1545 trans
->transid
, fs_info
->generation
);
1547 if (!should_cow_block(trans
, root
, buf
)) {
1548 trans
->dirty
= true;
1553 search_start
= buf
->start
& ~((u64
)SZ_1G
- 1);
1556 btrfs_set_lock_blocking(parent
);
1557 btrfs_set_lock_blocking(buf
);
1559 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1560 parent_slot
, cow_ret
, search_start
, 0);
1562 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1568 * helper function for defrag to decide if two blocks pointed to by a
1569 * node are actually close by
1571 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1573 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1575 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1581 * compare two keys in a memcmp fashion
1583 static int comp_keys(const struct btrfs_disk_key
*disk
,
1584 const struct btrfs_key
*k2
)
1586 struct btrfs_key k1
;
1588 btrfs_disk_key_to_cpu(&k1
, disk
);
1590 return btrfs_comp_cpu_keys(&k1
, k2
);
1594 * same as comp_keys only with two btrfs_key's
1596 int btrfs_comp_cpu_keys(const struct btrfs_key
*k1
, const struct btrfs_key
*k2
)
1598 if (k1
->objectid
> k2
->objectid
)
1600 if (k1
->objectid
< k2
->objectid
)
1602 if (k1
->type
> k2
->type
)
1604 if (k1
->type
< k2
->type
)
1606 if (k1
->offset
> k2
->offset
)
1608 if (k1
->offset
< k2
->offset
)
1614 * this is used by the defrag code to go through all the
1615 * leaves pointed to by a node and reallocate them so that
1616 * disk order is close to key order
1618 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1619 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1620 int start_slot
, u64
*last_ret
,
1621 struct btrfs_key
*progress
)
1623 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1624 struct extent_buffer
*cur
;
1627 u64 search_start
= *last_ret
;
1637 int progress_passed
= 0;
1638 struct btrfs_disk_key disk_key
;
1640 parent_level
= btrfs_header_level(parent
);
1642 WARN_ON(trans
->transaction
!= fs_info
->running_transaction
);
1643 WARN_ON(trans
->transid
!= fs_info
->generation
);
1645 parent_nritems
= btrfs_header_nritems(parent
);
1646 blocksize
= fs_info
->nodesize
;
1647 end_slot
= parent_nritems
- 1;
1649 if (parent_nritems
<= 1)
1652 btrfs_set_lock_blocking(parent
);
1654 for (i
= start_slot
; i
<= end_slot
; i
++) {
1657 btrfs_node_key(parent
, &disk_key
, i
);
1658 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1661 progress_passed
= 1;
1662 blocknr
= btrfs_node_blockptr(parent
, i
);
1663 gen
= btrfs_node_ptr_generation(parent
, i
);
1664 if (last_block
== 0)
1665 last_block
= blocknr
;
1668 other
= btrfs_node_blockptr(parent
, i
- 1);
1669 close
= close_blocks(blocknr
, other
, blocksize
);
1671 if (!close
&& i
< end_slot
) {
1672 other
= btrfs_node_blockptr(parent
, i
+ 1);
1673 close
= close_blocks(blocknr
, other
, blocksize
);
1676 last_block
= blocknr
;
1680 cur
= find_extent_buffer(fs_info
, blocknr
);
1682 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1685 if (!cur
|| !uptodate
) {
1687 cur
= read_tree_block(fs_info
, blocknr
, gen
);
1689 return PTR_ERR(cur
);
1690 } else if (!extent_buffer_uptodate(cur
)) {
1691 free_extent_buffer(cur
);
1694 } else if (!uptodate
) {
1695 err
= btrfs_read_buffer(cur
, gen
);
1697 free_extent_buffer(cur
);
1702 if (search_start
== 0)
1703 search_start
= last_block
;
1705 btrfs_tree_lock(cur
);
1706 btrfs_set_lock_blocking(cur
);
1707 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1710 (end_slot
- i
) * blocksize
));
1712 btrfs_tree_unlock(cur
);
1713 free_extent_buffer(cur
);
1716 search_start
= cur
->start
;
1717 last_block
= cur
->start
;
1718 *last_ret
= search_start
;
1719 btrfs_tree_unlock(cur
);
1720 free_extent_buffer(cur
);
1726 * search for key in the extent_buffer. The items start at offset p,
1727 * and they are item_size apart. There are 'max' items in p.
1729 * the slot in the array is returned via slot, and it points to
1730 * the place where you would insert key if it is not found in
1733 * slot may point to max if the key is bigger than all of the keys
1735 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1736 unsigned long p
, int item_size
,
1737 const struct btrfs_key
*key
,
1744 struct btrfs_disk_key
*tmp
= NULL
;
1745 struct btrfs_disk_key unaligned
;
1746 unsigned long offset
;
1748 unsigned long map_start
= 0;
1749 unsigned long map_len
= 0;
1753 btrfs_err(eb
->fs_info
,
1754 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1755 __func__
, low
, high
, eb
->start
,
1756 btrfs_header_owner(eb
), btrfs_header_level(eb
));
1760 while (low
< high
) {
1761 mid
= (low
+ high
) / 2;
1762 offset
= p
+ mid
* item_size
;
1764 if (!kaddr
|| offset
< map_start
||
1765 (offset
+ sizeof(struct btrfs_disk_key
)) >
1766 map_start
+ map_len
) {
1768 err
= map_private_extent_buffer(eb
, offset
,
1769 sizeof(struct btrfs_disk_key
),
1770 &kaddr
, &map_start
, &map_len
);
1773 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1775 } else if (err
== 1) {
1776 read_extent_buffer(eb
, &unaligned
,
1777 offset
, sizeof(unaligned
));
1784 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1787 ret
= comp_keys(tmp
, key
);
1803 * simple bin_search frontend that does the right thing for
1806 static int bin_search(struct extent_buffer
*eb
, const struct btrfs_key
*key
,
1807 int level
, int *slot
)
1810 return generic_bin_search(eb
,
1811 offsetof(struct btrfs_leaf
, items
),
1812 sizeof(struct btrfs_item
),
1813 key
, btrfs_header_nritems(eb
),
1816 return generic_bin_search(eb
,
1817 offsetof(struct btrfs_node
, ptrs
),
1818 sizeof(struct btrfs_key_ptr
),
1819 key
, btrfs_header_nritems(eb
),
1823 int btrfs_bin_search(struct extent_buffer
*eb
, const struct btrfs_key
*key
,
1824 int level
, int *slot
)
1826 return bin_search(eb
, key
, level
, slot
);
1829 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1831 spin_lock(&root
->accounting_lock
);
1832 btrfs_set_root_used(&root
->root_item
,
1833 btrfs_root_used(&root
->root_item
) + size
);
1834 spin_unlock(&root
->accounting_lock
);
1837 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1839 spin_lock(&root
->accounting_lock
);
1840 btrfs_set_root_used(&root
->root_item
,
1841 btrfs_root_used(&root
->root_item
) - size
);
1842 spin_unlock(&root
->accounting_lock
);
1845 /* given a node and slot number, this reads the blocks it points to. The
1846 * extent buffer is returned with a reference taken (but unlocked).
1848 static noinline
struct extent_buffer
*
1849 read_node_slot(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*parent
,
1852 int level
= btrfs_header_level(parent
);
1853 struct extent_buffer
*eb
;
1855 if (slot
< 0 || slot
>= btrfs_header_nritems(parent
))
1856 return ERR_PTR(-ENOENT
);
1860 eb
= read_tree_block(fs_info
, btrfs_node_blockptr(parent
, slot
),
1861 btrfs_node_ptr_generation(parent
, slot
));
1862 if (!IS_ERR(eb
) && !extent_buffer_uptodate(eb
)) {
1863 free_extent_buffer(eb
);
1871 * node level balancing, used to make sure nodes are in proper order for
1872 * item deletion. We balance from the top down, so we have to make sure
1873 * that a deletion won't leave an node completely empty later on.
1875 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1876 struct btrfs_root
*root
,
1877 struct btrfs_path
*path
, int level
)
1879 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1880 struct extent_buffer
*right
= NULL
;
1881 struct extent_buffer
*mid
;
1882 struct extent_buffer
*left
= NULL
;
1883 struct extent_buffer
*parent
= NULL
;
1887 int orig_slot
= path
->slots
[level
];
1893 mid
= path
->nodes
[level
];
1895 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1896 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1897 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1899 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1901 if (level
< BTRFS_MAX_LEVEL
- 1) {
1902 parent
= path
->nodes
[level
+ 1];
1903 pslot
= path
->slots
[level
+ 1];
1907 * deal with the case where there is only one pointer in the root
1908 * by promoting the node below to a root
1911 struct extent_buffer
*child
;
1913 if (btrfs_header_nritems(mid
) != 1)
1916 /* promote the child to a root */
1917 child
= read_node_slot(fs_info
, mid
, 0);
1918 if (IS_ERR(child
)) {
1919 ret
= PTR_ERR(child
);
1920 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1924 btrfs_tree_lock(child
);
1925 btrfs_set_lock_blocking(child
);
1926 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1928 btrfs_tree_unlock(child
);
1929 free_extent_buffer(child
);
1933 tree_mod_log_set_root_pointer(root
, child
, 1);
1934 rcu_assign_pointer(root
->node
, child
);
1936 add_root_to_dirty_list(root
);
1937 btrfs_tree_unlock(child
);
1939 path
->locks
[level
] = 0;
1940 path
->nodes
[level
] = NULL
;
1941 clean_tree_block(fs_info
, mid
);
1942 btrfs_tree_unlock(mid
);
1943 /* once for the path */
1944 free_extent_buffer(mid
);
1946 root_sub_used(root
, mid
->len
);
1947 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1948 /* once for the root ptr */
1949 free_extent_buffer_stale(mid
);
1952 if (btrfs_header_nritems(mid
) >
1953 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 4)
1956 left
= read_node_slot(fs_info
, parent
, pslot
- 1);
1961 btrfs_tree_lock(left
);
1962 btrfs_set_lock_blocking(left
);
1963 wret
= btrfs_cow_block(trans
, root
, left
,
1964 parent
, pslot
- 1, &left
);
1971 right
= read_node_slot(fs_info
, parent
, pslot
+ 1);
1976 btrfs_tree_lock(right
);
1977 btrfs_set_lock_blocking(right
);
1978 wret
= btrfs_cow_block(trans
, root
, right
,
1979 parent
, pslot
+ 1, &right
);
1986 /* first, try to make some room in the middle buffer */
1988 orig_slot
+= btrfs_header_nritems(left
);
1989 wret
= push_node_left(trans
, fs_info
, left
, mid
, 1);
1995 * then try to empty the right most buffer into the middle
1998 wret
= push_node_left(trans
, fs_info
, mid
, right
, 1);
1999 if (wret
< 0 && wret
!= -ENOSPC
)
2001 if (btrfs_header_nritems(right
) == 0) {
2002 clean_tree_block(fs_info
, right
);
2003 btrfs_tree_unlock(right
);
2004 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
2005 root_sub_used(root
, right
->len
);
2006 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
2007 free_extent_buffer_stale(right
);
2010 struct btrfs_disk_key right_key
;
2011 btrfs_node_key(right
, &right_key
, 0);
2012 tree_mod_log_set_node_key(fs_info
, parent
,
2014 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
2015 btrfs_mark_buffer_dirty(parent
);
2018 if (btrfs_header_nritems(mid
) == 1) {
2020 * we're not allowed to leave a node with one item in the
2021 * tree during a delete. A deletion from lower in the tree
2022 * could try to delete the only pointer in this node.
2023 * So, pull some keys from the left.
2024 * There has to be a left pointer at this point because
2025 * otherwise we would have pulled some pointers from the
2030 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
2033 wret
= balance_node_right(trans
, fs_info
, mid
, left
);
2039 wret
= push_node_left(trans
, fs_info
, left
, mid
, 1);
2045 if (btrfs_header_nritems(mid
) == 0) {
2046 clean_tree_block(fs_info
, mid
);
2047 btrfs_tree_unlock(mid
);
2048 del_ptr(root
, path
, level
+ 1, pslot
);
2049 root_sub_used(root
, mid
->len
);
2050 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
2051 free_extent_buffer_stale(mid
);
2054 /* update the parent key to reflect our changes */
2055 struct btrfs_disk_key mid_key
;
2056 btrfs_node_key(mid
, &mid_key
, 0);
2057 tree_mod_log_set_node_key(fs_info
, parent
, pslot
, 0);
2058 btrfs_set_node_key(parent
, &mid_key
, pslot
);
2059 btrfs_mark_buffer_dirty(parent
);
2062 /* update the path */
2064 if (btrfs_header_nritems(left
) > orig_slot
) {
2065 extent_buffer_get(left
);
2066 /* left was locked after cow */
2067 path
->nodes
[level
] = left
;
2068 path
->slots
[level
+ 1] -= 1;
2069 path
->slots
[level
] = orig_slot
;
2071 btrfs_tree_unlock(mid
);
2072 free_extent_buffer(mid
);
2075 orig_slot
-= btrfs_header_nritems(left
);
2076 path
->slots
[level
] = orig_slot
;
2079 /* double check we haven't messed things up */
2081 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
2085 btrfs_tree_unlock(right
);
2086 free_extent_buffer(right
);
2089 if (path
->nodes
[level
] != left
)
2090 btrfs_tree_unlock(left
);
2091 free_extent_buffer(left
);
2096 /* Node balancing for insertion. Here we only split or push nodes around
2097 * when they are completely full. This is also done top down, so we
2098 * have to be pessimistic.
2100 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2101 struct btrfs_root
*root
,
2102 struct btrfs_path
*path
, int level
)
2104 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2105 struct extent_buffer
*right
= NULL
;
2106 struct extent_buffer
*mid
;
2107 struct extent_buffer
*left
= NULL
;
2108 struct extent_buffer
*parent
= NULL
;
2112 int orig_slot
= path
->slots
[level
];
2117 mid
= path
->nodes
[level
];
2118 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2120 if (level
< BTRFS_MAX_LEVEL
- 1) {
2121 parent
= path
->nodes
[level
+ 1];
2122 pslot
= path
->slots
[level
+ 1];
2128 left
= read_node_slot(fs_info
, parent
, pslot
- 1);
2132 /* first, try to make some room in the middle buffer */
2136 btrfs_tree_lock(left
);
2137 btrfs_set_lock_blocking(left
);
2139 left_nr
= btrfs_header_nritems(left
);
2140 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2143 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2148 wret
= push_node_left(trans
, fs_info
,
2155 struct btrfs_disk_key disk_key
;
2156 orig_slot
+= left_nr
;
2157 btrfs_node_key(mid
, &disk_key
, 0);
2158 tree_mod_log_set_node_key(fs_info
, parent
, pslot
, 0);
2159 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2160 btrfs_mark_buffer_dirty(parent
);
2161 if (btrfs_header_nritems(left
) > orig_slot
) {
2162 path
->nodes
[level
] = left
;
2163 path
->slots
[level
+ 1] -= 1;
2164 path
->slots
[level
] = orig_slot
;
2165 btrfs_tree_unlock(mid
);
2166 free_extent_buffer(mid
);
2169 btrfs_header_nritems(left
);
2170 path
->slots
[level
] = orig_slot
;
2171 btrfs_tree_unlock(left
);
2172 free_extent_buffer(left
);
2176 btrfs_tree_unlock(left
);
2177 free_extent_buffer(left
);
2179 right
= read_node_slot(fs_info
, parent
, pslot
+ 1);
2184 * then try to empty the right most buffer into the middle
2189 btrfs_tree_lock(right
);
2190 btrfs_set_lock_blocking(right
);
2192 right_nr
= btrfs_header_nritems(right
);
2193 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2196 ret
= btrfs_cow_block(trans
, root
, right
,
2202 wret
= balance_node_right(trans
, fs_info
,
2209 struct btrfs_disk_key disk_key
;
2211 btrfs_node_key(right
, &disk_key
, 0);
2212 tree_mod_log_set_node_key(fs_info
, parent
,
2214 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2215 btrfs_mark_buffer_dirty(parent
);
2217 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2218 path
->nodes
[level
] = right
;
2219 path
->slots
[level
+ 1] += 1;
2220 path
->slots
[level
] = orig_slot
-
2221 btrfs_header_nritems(mid
);
2222 btrfs_tree_unlock(mid
);
2223 free_extent_buffer(mid
);
2225 btrfs_tree_unlock(right
);
2226 free_extent_buffer(right
);
2230 btrfs_tree_unlock(right
);
2231 free_extent_buffer(right
);
2237 * readahead one full node of leaves, finding things that are close
2238 * to the block in 'slot', and triggering ra on them.
2240 static void reada_for_search(struct btrfs_fs_info
*fs_info
,
2241 struct btrfs_path
*path
,
2242 int level
, int slot
, u64 objectid
)
2244 struct extent_buffer
*node
;
2245 struct btrfs_disk_key disk_key
;
2250 struct extent_buffer
*eb
;
2258 if (!path
->nodes
[level
])
2261 node
= path
->nodes
[level
];
2263 search
= btrfs_node_blockptr(node
, slot
);
2264 blocksize
= fs_info
->nodesize
;
2265 eb
= find_extent_buffer(fs_info
, search
);
2267 free_extent_buffer(eb
);
2273 nritems
= btrfs_header_nritems(node
);
2277 if (path
->reada
== READA_BACK
) {
2281 } else if (path
->reada
== READA_FORWARD
) {
2286 if (path
->reada
== READA_BACK
&& objectid
) {
2287 btrfs_node_key(node
, &disk_key
, nr
);
2288 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2291 search
= btrfs_node_blockptr(node
, nr
);
2292 if ((search
<= target
&& target
- search
<= 65536) ||
2293 (search
> target
&& search
- target
<= 65536)) {
2294 readahead_tree_block(fs_info
, search
);
2298 if ((nread
> 65536 || nscan
> 32))
2303 static noinline
void reada_for_balance(struct btrfs_fs_info
*fs_info
,
2304 struct btrfs_path
*path
, int level
)
2308 struct extent_buffer
*parent
;
2309 struct extent_buffer
*eb
;
2314 parent
= path
->nodes
[level
+ 1];
2318 nritems
= btrfs_header_nritems(parent
);
2319 slot
= path
->slots
[level
+ 1];
2322 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2323 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2324 eb
= find_extent_buffer(fs_info
, block1
);
2326 * if we get -eagain from btrfs_buffer_uptodate, we
2327 * don't want to return eagain here. That will loop
2330 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2332 free_extent_buffer(eb
);
2334 if (slot
+ 1 < nritems
) {
2335 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2336 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2337 eb
= find_extent_buffer(fs_info
, block2
);
2338 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2340 free_extent_buffer(eb
);
2344 readahead_tree_block(fs_info
, block1
);
2346 readahead_tree_block(fs_info
, block2
);
2351 * when we walk down the tree, it is usually safe to unlock the higher layers
2352 * in the tree. The exceptions are when our path goes through slot 0, because
2353 * operations on the tree might require changing key pointers higher up in the
2356 * callers might also have set path->keep_locks, which tells this code to keep
2357 * the lock if the path points to the last slot in the block. This is part of
2358 * walking through the tree, and selecting the next slot in the higher block.
2360 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2361 * if lowest_unlock is 1, level 0 won't be unlocked
2363 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2364 int lowest_unlock
, int min_write_lock_level
,
2365 int *write_lock_level
)
2368 int skip_level
= level
;
2370 struct extent_buffer
*t
;
2372 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2373 if (!path
->nodes
[i
])
2375 if (!path
->locks
[i
])
2377 if (!no_skips
&& path
->slots
[i
] == 0) {
2381 if (!no_skips
&& path
->keep_locks
) {
2384 nritems
= btrfs_header_nritems(t
);
2385 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2390 if (skip_level
< i
&& i
>= lowest_unlock
)
2394 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2395 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2397 if (write_lock_level
&&
2398 i
> min_write_lock_level
&&
2399 i
<= *write_lock_level
) {
2400 *write_lock_level
= i
- 1;
2407 * This releases any locks held in the path starting at level and
2408 * going all the way up to the root.
2410 * btrfs_search_slot will keep the lock held on higher nodes in a few
2411 * corner cases, such as COW of the block at slot zero in the node. This
2412 * ignores those rules, and it should only be called when there are no
2413 * more updates to be done higher up in the tree.
2415 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2419 if (path
->keep_locks
)
2422 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2423 if (!path
->nodes
[i
])
2425 if (!path
->locks
[i
])
2427 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2433 * helper function for btrfs_search_slot. The goal is to find a block
2434 * in cache without setting the path to blocking. If we find the block
2435 * we return zero and the path is unchanged.
2437 * If we can't find the block, we set the path blocking and do some
2438 * reada. -EAGAIN is returned and the search must be repeated.
2441 read_block_for_search(struct btrfs_root
*root
, struct btrfs_path
*p
,
2442 struct extent_buffer
**eb_ret
, int level
, int slot
,
2443 const struct btrfs_key
*key
)
2445 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2448 struct extent_buffer
*b
= *eb_ret
;
2449 struct extent_buffer
*tmp
;
2452 blocknr
= btrfs_node_blockptr(b
, slot
);
2453 gen
= btrfs_node_ptr_generation(b
, slot
);
2455 tmp
= find_extent_buffer(fs_info
, blocknr
);
2457 /* first we do an atomic uptodate check */
2458 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2463 /* the pages were up to date, but we failed
2464 * the generation number check. Do a full
2465 * read for the generation number that is correct.
2466 * We must do this without dropping locks so
2467 * we can trust our generation number
2469 btrfs_set_path_blocking(p
);
2471 /* now we're allowed to do a blocking uptodate check */
2472 ret
= btrfs_read_buffer(tmp
, gen
);
2477 free_extent_buffer(tmp
);
2478 btrfs_release_path(p
);
2483 * reduce lock contention at high levels
2484 * of the btree by dropping locks before
2485 * we read. Don't release the lock on the current
2486 * level because we need to walk this node to figure
2487 * out which blocks to read.
2489 btrfs_unlock_up_safe(p
, level
+ 1);
2490 btrfs_set_path_blocking(p
);
2492 free_extent_buffer(tmp
);
2493 if (p
->reada
!= READA_NONE
)
2494 reada_for_search(fs_info
, p
, level
, slot
, key
->objectid
);
2496 btrfs_release_path(p
);
2499 tmp
= read_tree_block(fs_info
, blocknr
, 0);
2502 * If the read above didn't mark this buffer up to date,
2503 * it will never end up being up to date. Set ret to EIO now
2504 * and give up so that our caller doesn't loop forever
2507 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2509 free_extent_buffer(tmp
);
2517 * helper function for btrfs_search_slot. This does all of the checks
2518 * for node-level blocks and does any balancing required based on
2521 * If no extra work was required, zero is returned. If we had to
2522 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2526 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2527 struct btrfs_root
*root
, struct btrfs_path
*p
,
2528 struct extent_buffer
*b
, int level
, int ins_len
,
2529 int *write_lock_level
)
2531 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2534 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2535 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3) {
2538 if (*write_lock_level
< level
+ 1) {
2539 *write_lock_level
= level
+ 1;
2540 btrfs_release_path(p
);
2544 btrfs_set_path_blocking(p
);
2545 reada_for_balance(fs_info
, p
, level
);
2546 sret
= split_node(trans
, root
, p
, level
);
2547 btrfs_clear_path_blocking(p
, NULL
, 0);
2554 b
= p
->nodes
[level
];
2555 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2556 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 2) {
2559 if (*write_lock_level
< level
+ 1) {
2560 *write_lock_level
= level
+ 1;
2561 btrfs_release_path(p
);
2565 btrfs_set_path_blocking(p
);
2566 reada_for_balance(fs_info
, p
, level
);
2567 sret
= balance_level(trans
, root
, p
, level
);
2568 btrfs_clear_path_blocking(p
, NULL
, 0);
2574 b
= p
->nodes
[level
];
2576 btrfs_release_path(p
);
2579 BUG_ON(btrfs_header_nritems(b
) == 1);
2589 static void key_search_validate(struct extent_buffer
*b
,
2590 const struct btrfs_key
*key
,
2593 #ifdef CONFIG_BTRFS_ASSERT
2594 struct btrfs_disk_key disk_key
;
2596 btrfs_cpu_key_to_disk(&disk_key
, key
);
2599 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2600 offsetof(struct btrfs_leaf
, items
[0].key
),
2603 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2604 offsetof(struct btrfs_node
, ptrs
[0].key
),
2609 static int key_search(struct extent_buffer
*b
, const struct btrfs_key
*key
,
2610 int level
, int *prev_cmp
, int *slot
)
2612 if (*prev_cmp
!= 0) {
2613 *prev_cmp
= bin_search(b
, key
, level
, slot
);
2617 key_search_validate(b
, key
, level
);
2623 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
2624 u64 iobjectid
, u64 ioff
, u8 key_type
,
2625 struct btrfs_key
*found_key
)
2628 struct btrfs_key key
;
2629 struct extent_buffer
*eb
;
2634 key
.type
= key_type
;
2635 key
.objectid
= iobjectid
;
2638 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2642 eb
= path
->nodes
[0];
2643 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2644 ret
= btrfs_next_leaf(fs_root
, path
);
2647 eb
= path
->nodes
[0];
2650 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2651 if (found_key
->type
!= key
.type
||
2652 found_key
->objectid
!= key
.objectid
)
2659 * look for key in the tree. path is filled in with nodes along the way
2660 * if key is found, we return zero and you can find the item in the leaf
2661 * level of the path (level 0)
2663 * If the key isn't found, the path points to the slot where it should
2664 * be inserted, and 1 is returned. If there are other errors during the
2665 * search a negative error number is returned.
2667 * if ins_len > 0, nodes and leaves will be split as we walk down the
2668 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2671 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2672 const struct btrfs_key
*key
, struct btrfs_path
*p
,
2673 int ins_len
, int cow
)
2675 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2676 struct extent_buffer
*b
;
2681 int lowest_unlock
= 1;
2683 /* everything at write_lock_level or lower must be write locked */
2684 int write_lock_level
= 0;
2685 u8 lowest_level
= 0;
2686 int min_write_lock_level
;
2689 lowest_level
= p
->lowest_level
;
2690 WARN_ON(lowest_level
&& ins_len
> 0);
2691 WARN_ON(p
->nodes
[0] != NULL
);
2692 BUG_ON(!cow
&& ins_len
);
2697 /* when we are removing items, we might have to go up to level
2698 * two as we update tree pointers Make sure we keep write
2699 * for those levels as well
2701 write_lock_level
= 2;
2702 } else if (ins_len
> 0) {
2704 * for inserting items, make sure we have a write lock on
2705 * level 1 so we can update keys
2707 write_lock_level
= 1;
2711 write_lock_level
= -1;
2713 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2714 write_lock_level
= BTRFS_MAX_LEVEL
;
2716 min_write_lock_level
= write_lock_level
;
2721 * we try very hard to do read locks on the root
2723 root_lock
= BTRFS_READ_LOCK
;
2725 if (p
->search_commit_root
) {
2727 * the commit roots are read only
2728 * so we always do read locks
2730 if (p
->need_commit_sem
)
2731 down_read(&fs_info
->commit_root_sem
);
2732 b
= root
->commit_root
;
2733 extent_buffer_get(b
);
2734 level
= btrfs_header_level(b
);
2735 if (p
->need_commit_sem
)
2736 up_read(&fs_info
->commit_root_sem
);
2737 if (!p
->skip_locking
)
2738 btrfs_tree_read_lock(b
);
2740 if (p
->skip_locking
) {
2741 b
= btrfs_root_node(root
);
2742 level
= btrfs_header_level(b
);
2744 /* we don't know the level of the root node
2745 * until we actually have it read locked
2747 b
= btrfs_read_lock_root_node(root
);
2748 level
= btrfs_header_level(b
);
2749 if (level
<= write_lock_level
) {
2750 /* whoops, must trade for write lock */
2751 btrfs_tree_read_unlock(b
);
2752 free_extent_buffer(b
);
2753 b
= btrfs_lock_root_node(root
);
2754 root_lock
= BTRFS_WRITE_LOCK
;
2756 /* the level might have changed, check again */
2757 level
= btrfs_header_level(b
);
2761 p
->nodes
[level
] = b
;
2762 if (!p
->skip_locking
)
2763 p
->locks
[level
] = root_lock
;
2766 level
= btrfs_header_level(b
);
2769 * setup the path here so we can release it under lock
2770 * contention with the cow code
2774 * if we don't really need to cow this block
2775 * then we don't want to set the path blocking,
2776 * so we test it here
2778 if (!should_cow_block(trans
, root
, b
)) {
2779 trans
->dirty
= true;
2784 * must have write locks on this node and the
2787 if (level
> write_lock_level
||
2788 (level
+ 1 > write_lock_level
&&
2789 level
+ 1 < BTRFS_MAX_LEVEL
&&
2790 p
->nodes
[level
+ 1])) {
2791 write_lock_level
= level
+ 1;
2792 btrfs_release_path(p
);
2796 btrfs_set_path_blocking(p
);
2797 err
= btrfs_cow_block(trans
, root
, b
,
2798 p
->nodes
[level
+ 1],
2799 p
->slots
[level
+ 1], &b
);
2806 p
->nodes
[level
] = b
;
2807 btrfs_clear_path_blocking(p
, NULL
, 0);
2810 * we have a lock on b and as long as we aren't changing
2811 * the tree, there is no way to for the items in b to change.
2812 * It is safe to drop the lock on our parent before we
2813 * go through the expensive btree search on b.
2815 * If we're inserting or deleting (ins_len != 0), then we might
2816 * be changing slot zero, which may require changing the parent.
2817 * So, we can't drop the lock until after we know which slot
2818 * we're operating on.
2820 if (!ins_len
&& !p
->keep_locks
) {
2823 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2824 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2829 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2835 if (ret
&& slot
> 0) {
2839 p
->slots
[level
] = slot
;
2840 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2841 ins_len
, &write_lock_level
);
2848 b
= p
->nodes
[level
];
2849 slot
= p
->slots
[level
];
2852 * slot 0 is special, if we change the key
2853 * we have to update the parent pointer
2854 * which means we must have a write lock
2857 if (slot
== 0 && ins_len
&&
2858 write_lock_level
< level
+ 1) {
2859 write_lock_level
= level
+ 1;
2860 btrfs_release_path(p
);
2864 unlock_up(p
, level
, lowest_unlock
,
2865 min_write_lock_level
, &write_lock_level
);
2867 if (level
== lowest_level
) {
2873 err
= read_block_for_search(root
, p
, &b
, level
,
2882 if (!p
->skip_locking
) {
2883 level
= btrfs_header_level(b
);
2884 if (level
<= write_lock_level
) {
2885 err
= btrfs_try_tree_write_lock(b
);
2887 btrfs_set_path_blocking(p
);
2889 btrfs_clear_path_blocking(p
, b
,
2892 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2894 err
= btrfs_tree_read_lock_atomic(b
);
2896 btrfs_set_path_blocking(p
);
2897 btrfs_tree_read_lock(b
);
2898 btrfs_clear_path_blocking(p
, b
,
2901 p
->locks
[level
] = BTRFS_READ_LOCK
;
2903 p
->nodes
[level
] = b
;
2906 p
->slots
[level
] = slot
;
2908 btrfs_leaf_free_space(fs_info
, b
) < ins_len
) {
2909 if (write_lock_level
< 1) {
2910 write_lock_level
= 1;
2911 btrfs_release_path(p
);
2915 btrfs_set_path_blocking(p
);
2916 err
= split_leaf(trans
, root
, key
,
2917 p
, ins_len
, ret
== 0);
2918 btrfs_clear_path_blocking(p
, NULL
, 0);
2926 if (!p
->search_for_split
)
2927 unlock_up(p
, level
, lowest_unlock
,
2928 min_write_lock_level
, &write_lock_level
);
2935 * we don't really know what they plan on doing with the path
2936 * from here on, so for now just mark it as blocking
2938 if (!p
->leave_spinning
)
2939 btrfs_set_path_blocking(p
);
2940 if (ret
< 0 && !p
->skip_release_on_error
)
2941 btrfs_release_path(p
);
2946 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2947 * current state of the tree together with the operations recorded in the tree
2948 * modification log to search for the key in a previous version of this tree, as
2949 * denoted by the time_seq parameter.
2951 * Naturally, there is no support for insert, delete or cow operations.
2953 * The resulting path and return value will be set up as if we called
2954 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2956 int btrfs_search_old_slot(struct btrfs_root
*root
, const struct btrfs_key
*key
,
2957 struct btrfs_path
*p
, u64 time_seq
)
2959 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2960 struct extent_buffer
*b
;
2965 int lowest_unlock
= 1;
2966 u8 lowest_level
= 0;
2969 lowest_level
= p
->lowest_level
;
2970 WARN_ON(p
->nodes
[0] != NULL
);
2972 if (p
->search_commit_root
) {
2974 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2978 b
= get_old_root(root
, time_seq
);
2979 level
= btrfs_header_level(b
);
2980 p
->locks
[level
] = BTRFS_READ_LOCK
;
2983 level
= btrfs_header_level(b
);
2984 p
->nodes
[level
] = b
;
2985 btrfs_clear_path_blocking(p
, NULL
, 0);
2988 * we have a lock on b and as long as we aren't changing
2989 * the tree, there is no way to for the items in b to change.
2990 * It is safe to drop the lock on our parent before we
2991 * go through the expensive btree search on b.
2993 btrfs_unlock_up_safe(p
, level
+ 1);
2996 * Since we can unwind ebs we want to do a real search every
3000 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
3004 if (ret
&& slot
> 0) {
3008 p
->slots
[level
] = slot
;
3009 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3011 if (level
== lowest_level
) {
3017 err
= read_block_for_search(root
, p
, &b
, level
,
3026 level
= btrfs_header_level(b
);
3027 err
= btrfs_tree_read_lock_atomic(b
);
3029 btrfs_set_path_blocking(p
);
3030 btrfs_tree_read_lock(b
);
3031 btrfs_clear_path_blocking(p
, b
,
3034 b
= tree_mod_log_rewind(fs_info
, p
, b
, time_seq
);
3039 p
->locks
[level
] = BTRFS_READ_LOCK
;
3040 p
->nodes
[level
] = b
;
3042 p
->slots
[level
] = slot
;
3043 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3049 if (!p
->leave_spinning
)
3050 btrfs_set_path_blocking(p
);
3052 btrfs_release_path(p
);
3058 * helper to use instead of search slot if no exact match is needed but
3059 * instead the next or previous item should be returned.
3060 * When find_higher is true, the next higher item is returned, the next lower
3062 * When return_any and find_higher are both true, and no higher item is found,
3063 * return the next lower instead.
3064 * When return_any is true and find_higher is false, and no lower item is found,
3065 * return the next higher instead.
3066 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3069 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
3070 const struct btrfs_key
*key
,
3071 struct btrfs_path
*p
, int find_higher
,
3075 struct extent_buffer
*leaf
;
3078 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
3082 * a return value of 1 means the path is at the position where the
3083 * item should be inserted. Normally this is the next bigger item,
3084 * but in case the previous item is the last in a leaf, path points
3085 * to the first free slot in the previous leaf, i.e. at an invalid
3091 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3092 ret
= btrfs_next_leaf(root
, p
);
3098 * no higher item found, return the next
3103 btrfs_release_path(p
);
3107 if (p
->slots
[0] == 0) {
3108 ret
= btrfs_prev_leaf(root
, p
);
3113 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
3120 * no lower item found, return the next
3125 btrfs_release_path(p
);
3135 * adjust the pointers going up the tree, starting at level
3136 * making sure the right key of each node is points to 'key'.
3137 * This is used after shifting pointers to the left, so it stops
3138 * fixing up pointers when a given leaf/node is not in slot 0 of the
3142 static void fixup_low_keys(struct btrfs_fs_info
*fs_info
,
3143 struct btrfs_path
*path
,
3144 struct btrfs_disk_key
*key
, int level
)
3147 struct extent_buffer
*t
;
3149 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3150 int tslot
= path
->slots
[i
];
3151 if (!path
->nodes
[i
])
3154 tree_mod_log_set_node_key(fs_info
, t
, tslot
, 1);
3155 btrfs_set_node_key(t
, key
, tslot
);
3156 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3165 * This function isn't completely safe. It's the caller's responsibility
3166 * that the new key won't break the order
3168 void btrfs_set_item_key_safe(struct btrfs_fs_info
*fs_info
,
3169 struct btrfs_path
*path
,
3170 const struct btrfs_key
*new_key
)
3172 struct btrfs_disk_key disk_key
;
3173 struct extent_buffer
*eb
;
3176 eb
= path
->nodes
[0];
3177 slot
= path
->slots
[0];
3179 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3180 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
3182 if (slot
< btrfs_header_nritems(eb
) - 1) {
3183 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3184 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
3187 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3188 btrfs_set_item_key(eb
, &disk_key
, slot
);
3189 btrfs_mark_buffer_dirty(eb
);
3191 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
3195 * try to push data from one node into the next node left in the
3198 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3199 * error, and > 0 if there was no room in the left hand block.
3201 static int push_node_left(struct btrfs_trans_handle
*trans
,
3202 struct btrfs_fs_info
*fs_info
,
3203 struct extent_buffer
*dst
,
3204 struct extent_buffer
*src
, int empty
)
3211 src_nritems
= btrfs_header_nritems(src
);
3212 dst_nritems
= btrfs_header_nritems(dst
);
3213 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3214 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3215 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3217 if (!empty
&& src_nritems
<= 8)
3220 if (push_items
<= 0)
3224 push_items
= min(src_nritems
, push_items
);
3225 if (push_items
< src_nritems
) {
3226 /* leave at least 8 pointers in the node if
3227 * we aren't going to empty it
3229 if (src_nritems
- push_items
< 8) {
3230 if (push_items
<= 8)
3236 push_items
= min(src_nritems
- 8, push_items
);
3238 ret
= tree_mod_log_eb_copy(fs_info
, dst
, src
, dst_nritems
, 0,
3241 btrfs_abort_transaction(trans
, ret
);
3244 copy_extent_buffer(dst
, src
,
3245 btrfs_node_key_ptr_offset(dst_nritems
),
3246 btrfs_node_key_ptr_offset(0),
3247 push_items
* sizeof(struct btrfs_key_ptr
));
3249 if (push_items
< src_nritems
) {
3251 * don't call tree_mod_log_eb_move here, key removal was already
3252 * fully logged by tree_mod_log_eb_copy above.
3254 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3255 btrfs_node_key_ptr_offset(push_items
),
3256 (src_nritems
- push_items
) *
3257 sizeof(struct btrfs_key_ptr
));
3259 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3260 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3261 btrfs_mark_buffer_dirty(src
);
3262 btrfs_mark_buffer_dirty(dst
);
3268 * try to push data from one node into the next node right in the
3271 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3272 * error, and > 0 if there was no room in the right hand block.
3274 * this will only push up to 1/2 the contents of the left node over
3276 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3277 struct btrfs_fs_info
*fs_info
,
3278 struct extent_buffer
*dst
,
3279 struct extent_buffer
*src
)
3287 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3288 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3290 src_nritems
= btrfs_header_nritems(src
);
3291 dst_nritems
= btrfs_header_nritems(dst
);
3292 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3293 if (push_items
<= 0)
3296 if (src_nritems
< 4)
3299 max_push
= src_nritems
/ 2 + 1;
3300 /* don't try to empty the node */
3301 if (max_push
>= src_nritems
)
3304 if (max_push
< push_items
)
3305 push_items
= max_push
;
3307 tree_mod_log_eb_move(fs_info
, dst
, push_items
, 0, dst_nritems
);
3308 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3309 btrfs_node_key_ptr_offset(0),
3311 sizeof(struct btrfs_key_ptr
));
3313 ret
= tree_mod_log_eb_copy(fs_info
, dst
, src
, 0,
3314 src_nritems
- push_items
, push_items
);
3316 btrfs_abort_transaction(trans
, ret
);
3319 copy_extent_buffer(dst
, src
,
3320 btrfs_node_key_ptr_offset(0),
3321 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3322 push_items
* sizeof(struct btrfs_key_ptr
));
3324 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3325 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3327 btrfs_mark_buffer_dirty(src
);
3328 btrfs_mark_buffer_dirty(dst
);
3334 * helper function to insert a new root level in the tree.
3335 * A new node is allocated, and a single item is inserted to
3336 * point to the existing root
3338 * returns zero on success or < 0 on failure.
3340 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3341 struct btrfs_root
*root
,
3342 struct btrfs_path
*path
, int level
)
3344 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3346 struct extent_buffer
*lower
;
3347 struct extent_buffer
*c
;
3348 struct extent_buffer
*old
;
3349 struct btrfs_disk_key lower_key
;
3351 BUG_ON(path
->nodes
[level
]);
3352 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3354 lower
= path
->nodes
[level
-1];
3356 btrfs_item_key(lower
, &lower_key
, 0);
3358 btrfs_node_key(lower
, &lower_key
, 0);
3360 c
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3361 &lower_key
, level
, root
->node
->start
, 0);
3365 root_add_used(root
, fs_info
->nodesize
);
3367 memzero_extent_buffer(c
, 0, sizeof(struct btrfs_header
));
3368 btrfs_set_header_nritems(c
, 1);
3369 btrfs_set_header_level(c
, level
);
3370 btrfs_set_header_bytenr(c
, c
->start
);
3371 btrfs_set_header_generation(c
, trans
->transid
);
3372 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
3373 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
3375 write_extent_buffer_fsid(c
, fs_info
->fsid
);
3376 write_extent_buffer_chunk_tree_uuid(c
, fs_info
->chunk_tree_uuid
);
3378 btrfs_set_node_key(c
, &lower_key
, 0);
3379 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3380 lower_gen
= btrfs_header_generation(lower
);
3381 WARN_ON(lower_gen
!= trans
->transid
);
3383 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3385 btrfs_mark_buffer_dirty(c
);
3388 tree_mod_log_set_root_pointer(root
, c
, 0);
3389 rcu_assign_pointer(root
->node
, c
);
3391 /* the super has an extra ref to root->node */
3392 free_extent_buffer(old
);
3394 add_root_to_dirty_list(root
);
3395 extent_buffer_get(c
);
3396 path
->nodes
[level
] = c
;
3397 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
3398 path
->slots
[level
] = 0;
3403 * worker function to insert a single pointer in a node.
3404 * the node should have enough room for the pointer already
3406 * slot and level indicate where you want the key to go, and
3407 * blocknr is the block the key points to.
3409 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3410 struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
3411 struct btrfs_disk_key
*key
, u64 bytenr
,
3412 int slot
, int level
)
3414 struct extent_buffer
*lower
;
3418 BUG_ON(!path
->nodes
[level
]);
3419 btrfs_assert_tree_locked(path
->nodes
[level
]);
3420 lower
= path
->nodes
[level
];
3421 nritems
= btrfs_header_nritems(lower
);
3422 BUG_ON(slot
> nritems
);
3423 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
3424 if (slot
!= nritems
) {
3426 tree_mod_log_eb_move(fs_info
, lower
, slot
+ 1,
3427 slot
, nritems
- slot
);
3428 memmove_extent_buffer(lower
,
3429 btrfs_node_key_ptr_offset(slot
+ 1),
3430 btrfs_node_key_ptr_offset(slot
),
3431 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3434 ret
= tree_mod_log_insert_key(fs_info
, lower
, slot
,
3435 MOD_LOG_KEY_ADD
, GFP_NOFS
);
3438 btrfs_set_node_key(lower
, key
, slot
);
3439 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3440 WARN_ON(trans
->transid
== 0);
3441 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3442 btrfs_set_header_nritems(lower
, nritems
+ 1);
3443 btrfs_mark_buffer_dirty(lower
);
3447 * split the node at the specified level in path in two.
3448 * The path is corrected to point to the appropriate node after the split
3450 * Before splitting this tries to make some room in the node by pushing
3451 * left and right, if either one works, it returns right away.
3453 * returns 0 on success and < 0 on failure
3455 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3456 struct btrfs_root
*root
,
3457 struct btrfs_path
*path
, int level
)
3459 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3460 struct extent_buffer
*c
;
3461 struct extent_buffer
*split
;
3462 struct btrfs_disk_key disk_key
;
3467 c
= path
->nodes
[level
];
3468 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3469 if (c
== root
->node
) {
3471 * trying to split the root, lets make a new one
3473 * tree mod log: We don't log_removal old root in
3474 * insert_new_root, because that root buffer will be kept as a
3475 * normal node. We are going to log removal of half of the
3476 * elements below with tree_mod_log_eb_copy. We're holding a
3477 * tree lock on the buffer, which is why we cannot race with
3478 * other tree_mod_log users.
3480 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3484 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3485 c
= path
->nodes
[level
];
3486 if (!ret
&& btrfs_header_nritems(c
) <
3487 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3)
3493 c_nritems
= btrfs_header_nritems(c
);
3494 mid
= (c_nritems
+ 1) / 2;
3495 btrfs_node_key(c
, &disk_key
, mid
);
3497 split
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3498 &disk_key
, level
, c
->start
, 0);
3500 return PTR_ERR(split
);
3502 root_add_used(root
, fs_info
->nodesize
);
3504 memzero_extent_buffer(split
, 0, sizeof(struct btrfs_header
));
3505 btrfs_set_header_level(split
, btrfs_header_level(c
));
3506 btrfs_set_header_bytenr(split
, split
->start
);
3507 btrfs_set_header_generation(split
, trans
->transid
);
3508 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3509 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3510 write_extent_buffer_fsid(split
, fs_info
->fsid
);
3511 write_extent_buffer_chunk_tree_uuid(split
, fs_info
->chunk_tree_uuid
);
3513 ret
= tree_mod_log_eb_copy(fs_info
, split
, c
, 0, mid
, c_nritems
- mid
);
3515 btrfs_abort_transaction(trans
, ret
);
3518 copy_extent_buffer(split
, c
,
3519 btrfs_node_key_ptr_offset(0),
3520 btrfs_node_key_ptr_offset(mid
),
3521 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3522 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3523 btrfs_set_header_nritems(c
, mid
);
3526 btrfs_mark_buffer_dirty(c
);
3527 btrfs_mark_buffer_dirty(split
);
3529 insert_ptr(trans
, fs_info
, path
, &disk_key
, split
->start
,
3530 path
->slots
[level
+ 1] + 1, level
+ 1);
3532 if (path
->slots
[level
] >= mid
) {
3533 path
->slots
[level
] -= mid
;
3534 btrfs_tree_unlock(c
);
3535 free_extent_buffer(c
);
3536 path
->nodes
[level
] = split
;
3537 path
->slots
[level
+ 1] += 1;
3539 btrfs_tree_unlock(split
);
3540 free_extent_buffer(split
);
3546 * how many bytes are required to store the items in a leaf. start
3547 * and nr indicate which items in the leaf to check. This totals up the
3548 * space used both by the item structs and the item data
3550 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3552 struct btrfs_item
*start_item
;
3553 struct btrfs_item
*end_item
;
3554 struct btrfs_map_token token
;
3556 int nritems
= btrfs_header_nritems(l
);
3557 int end
= min(nritems
, start
+ nr
) - 1;
3561 btrfs_init_map_token(&token
);
3562 start_item
= btrfs_item_nr(start
);
3563 end_item
= btrfs_item_nr(end
);
3564 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3565 btrfs_token_item_size(l
, start_item
, &token
);
3566 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3567 data_len
+= sizeof(struct btrfs_item
) * nr
;
3568 WARN_ON(data_len
< 0);
3573 * The space between the end of the leaf items and
3574 * the start of the leaf data. IOW, how much room
3575 * the leaf has left for both items and data
3577 noinline
int btrfs_leaf_free_space(struct btrfs_fs_info
*fs_info
,
3578 struct extent_buffer
*leaf
)
3580 int nritems
= btrfs_header_nritems(leaf
);
3583 ret
= BTRFS_LEAF_DATA_SIZE(fs_info
) - leaf_space_used(leaf
, 0, nritems
);
3586 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3588 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info
),
3589 leaf_space_used(leaf
, 0, nritems
), nritems
);
3595 * min slot controls the lowest index we're willing to push to the
3596 * right. We'll push up to and including min_slot, but no lower
3598 static noinline
int __push_leaf_right(struct btrfs_fs_info
*fs_info
,
3599 struct btrfs_path
*path
,
3600 int data_size
, int empty
,
3601 struct extent_buffer
*right
,
3602 int free_space
, u32 left_nritems
,
3605 struct extent_buffer
*left
= path
->nodes
[0];
3606 struct extent_buffer
*upper
= path
->nodes
[1];
3607 struct btrfs_map_token token
;
3608 struct btrfs_disk_key disk_key
;
3613 struct btrfs_item
*item
;
3619 btrfs_init_map_token(&token
);
3624 nr
= max_t(u32
, 1, min_slot
);
3626 if (path
->slots
[0] >= left_nritems
)
3627 push_space
+= data_size
;
3629 slot
= path
->slots
[1];
3630 i
= left_nritems
- 1;
3632 item
= btrfs_item_nr(i
);
3634 if (!empty
&& push_items
> 0) {
3635 if (path
->slots
[0] > i
)
3637 if (path
->slots
[0] == i
) {
3638 int space
= btrfs_leaf_free_space(fs_info
, left
);
3639 if (space
+ push_space
* 2 > free_space
)
3644 if (path
->slots
[0] == i
)
3645 push_space
+= data_size
;
3647 this_item_size
= btrfs_item_size(left
, item
);
3648 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3652 push_space
+= this_item_size
+ sizeof(*item
);
3658 if (push_items
== 0)
3661 WARN_ON(!empty
&& push_items
== left_nritems
);
3663 /* push left to right */
3664 right_nritems
= btrfs_header_nritems(right
);
3666 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3667 push_space
-= leaf_data_end(fs_info
, left
);
3669 /* make room in the right data area */
3670 data_end
= leaf_data_end(fs_info
, right
);
3671 memmove_extent_buffer(right
,
3672 btrfs_leaf_data(right
) + data_end
- push_space
,
3673 btrfs_leaf_data(right
) + data_end
,
3674 BTRFS_LEAF_DATA_SIZE(fs_info
) - data_end
);
3676 /* copy from the left data area */
3677 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3678 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3679 btrfs_leaf_data(left
) + leaf_data_end(fs_info
, left
),
3682 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3683 btrfs_item_nr_offset(0),
3684 right_nritems
* sizeof(struct btrfs_item
));
3686 /* copy the items from left to right */
3687 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3688 btrfs_item_nr_offset(left_nritems
- push_items
),
3689 push_items
* sizeof(struct btrfs_item
));
3691 /* update the item pointers */
3692 right_nritems
+= push_items
;
3693 btrfs_set_header_nritems(right
, right_nritems
);
3694 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3695 for (i
= 0; i
< right_nritems
; i
++) {
3696 item
= btrfs_item_nr(i
);
3697 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3698 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3701 left_nritems
-= push_items
;
3702 btrfs_set_header_nritems(left
, left_nritems
);
3705 btrfs_mark_buffer_dirty(left
);
3707 clean_tree_block(fs_info
, left
);
3709 btrfs_mark_buffer_dirty(right
);
3711 btrfs_item_key(right
, &disk_key
, 0);
3712 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3713 btrfs_mark_buffer_dirty(upper
);
3715 /* then fixup the leaf pointer in the path */
3716 if (path
->slots
[0] >= left_nritems
) {
3717 path
->slots
[0] -= left_nritems
;
3718 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3719 clean_tree_block(fs_info
, path
->nodes
[0]);
3720 btrfs_tree_unlock(path
->nodes
[0]);
3721 free_extent_buffer(path
->nodes
[0]);
3722 path
->nodes
[0] = right
;
3723 path
->slots
[1] += 1;
3725 btrfs_tree_unlock(right
);
3726 free_extent_buffer(right
);
3731 btrfs_tree_unlock(right
);
3732 free_extent_buffer(right
);
3737 * push some data in the path leaf to the right, trying to free up at
3738 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3740 * returns 1 if the push failed because the other node didn't have enough
3741 * room, 0 if everything worked out and < 0 if there were major errors.
3743 * this will push starting from min_slot to the end of the leaf. It won't
3744 * push any slot lower than min_slot
3746 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3747 *root
, struct btrfs_path
*path
,
3748 int min_data_size
, int data_size
,
3749 int empty
, u32 min_slot
)
3751 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3752 struct extent_buffer
*left
= path
->nodes
[0];
3753 struct extent_buffer
*right
;
3754 struct extent_buffer
*upper
;
3760 if (!path
->nodes
[1])
3763 slot
= path
->slots
[1];
3764 upper
= path
->nodes
[1];
3765 if (slot
>= btrfs_header_nritems(upper
) - 1)
3768 btrfs_assert_tree_locked(path
->nodes
[1]);
3770 right
= read_node_slot(fs_info
, upper
, slot
+ 1);
3772 * slot + 1 is not valid or we fail to read the right node,
3773 * no big deal, just return.
3778 btrfs_tree_lock(right
);
3779 btrfs_set_lock_blocking(right
);
3781 free_space
= btrfs_leaf_free_space(fs_info
, right
);
3782 if (free_space
< data_size
)
3785 /* cow and double check */
3786 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3791 free_space
= btrfs_leaf_free_space(fs_info
, right
);
3792 if (free_space
< data_size
)
3795 left_nritems
= btrfs_header_nritems(left
);
3796 if (left_nritems
== 0)
3799 if (path
->slots
[0] == left_nritems
&& !empty
) {
3800 /* Key greater than all keys in the leaf, right neighbor has
3801 * enough room for it and we're not emptying our leaf to delete
3802 * it, therefore use right neighbor to insert the new item and
3803 * no need to touch/dirty our left leaft. */
3804 btrfs_tree_unlock(left
);
3805 free_extent_buffer(left
);
3806 path
->nodes
[0] = right
;
3812 return __push_leaf_right(fs_info
, path
, min_data_size
, empty
,
3813 right
, free_space
, left_nritems
, min_slot
);
3815 btrfs_tree_unlock(right
);
3816 free_extent_buffer(right
);
3821 * push some data in the path leaf to the left, trying to free up at
3822 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3824 * max_slot can put a limit on how far into the leaf we'll push items. The
3825 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3828 static noinline
int __push_leaf_left(struct btrfs_fs_info
*fs_info
,
3829 struct btrfs_path
*path
, int data_size
,
3830 int empty
, struct extent_buffer
*left
,
3831 int free_space
, u32 right_nritems
,
3834 struct btrfs_disk_key disk_key
;
3835 struct extent_buffer
*right
= path
->nodes
[0];
3839 struct btrfs_item
*item
;
3840 u32 old_left_nritems
;
3844 u32 old_left_item_size
;
3845 struct btrfs_map_token token
;
3847 btrfs_init_map_token(&token
);
3850 nr
= min(right_nritems
, max_slot
);
3852 nr
= min(right_nritems
- 1, max_slot
);
3854 for (i
= 0; i
< nr
; i
++) {
3855 item
= btrfs_item_nr(i
);
3857 if (!empty
&& push_items
> 0) {
3858 if (path
->slots
[0] < i
)
3860 if (path
->slots
[0] == i
) {
3861 int space
= btrfs_leaf_free_space(fs_info
, right
);
3862 if (space
+ push_space
* 2 > free_space
)
3867 if (path
->slots
[0] == i
)
3868 push_space
+= data_size
;
3870 this_item_size
= btrfs_item_size(right
, item
);
3871 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3875 push_space
+= this_item_size
+ sizeof(*item
);
3878 if (push_items
== 0) {
3882 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3884 /* push data from right to left */
3885 copy_extent_buffer(left
, right
,
3886 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3887 btrfs_item_nr_offset(0),
3888 push_items
* sizeof(struct btrfs_item
));
3890 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
) -
3891 btrfs_item_offset_nr(right
, push_items
- 1);
3893 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3894 leaf_data_end(fs_info
, left
) - push_space
,
3895 btrfs_leaf_data(right
) +
3896 btrfs_item_offset_nr(right
, push_items
- 1),
3898 old_left_nritems
= btrfs_header_nritems(left
);
3899 BUG_ON(old_left_nritems
<= 0);
3901 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3902 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3905 item
= btrfs_item_nr(i
);
3907 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3908 btrfs_set_token_item_offset(left
, item
,
3909 ioff
- (BTRFS_LEAF_DATA_SIZE(fs_info
) - old_left_item_size
),
3912 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3914 /* fixup right node */
3915 if (push_items
> right_nritems
)
3916 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3919 if (push_items
< right_nritems
) {
3920 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3921 leaf_data_end(fs_info
, right
);
3922 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3923 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3924 btrfs_leaf_data(right
) +
3925 leaf_data_end(fs_info
, right
), push_space
);
3927 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3928 btrfs_item_nr_offset(push_items
),
3929 (btrfs_header_nritems(right
) - push_items
) *
3930 sizeof(struct btrfs_item
));
3932 right_nritems
-= push_items
;
3933 btrfs_set_header_nritems(right
, right_nritems
);
3934 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3935 for (i
= 0; i
< right_nritems
; i
++) {
3936 item
= btrfs_item_nr(i
);
3938 push_space
= push_space
- btrfs_token_item_size(right
,
3940 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3943 btrfs_mark_buffer_dirty(left
);
3945 btrfs_mark_buffer_dirty(right
);
3947 clean_tree_block(fs_info
, right
);
3949 btrfs_item_key(right
, &disk_key
, 0);
3950 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
3952 /* then fixup the leaf pointer in the path */
3953 if (path
->slots
[0] < push_items
) {
3954 path
->slots
[0] += old_left_nritems
;
3955 btrfs_tree_unlock(path
->nodes
[0]);
3956 free_extent_buffer(path
->nodes
[0]);
3957 path
->nodes
[0] = left
;
3958 path
->slots
[1] -= 1;
3960 btrfs_tree_unlock(left
);
3961 free_extent_buffer(left
);
3962 path
->slots
[0] -= push_items
;
3964 BUG_ON(path
->slots
[0] < 0);
3967 btrfs_tree_unlock(left
);
3968 free_extent_buffer(left
);
3973 * push some data in the path leaf to the left, trying to free up at
3974 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3976 * max_slot can put a limit on how far into the leaf we'll push items. The
3977 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3980 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3981 *root
, struct btrfs_path
*path
, int min_data_size
,
3982 int data_size
, int empty
, u32 max_slot
)
3984 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3985 struct extent_buffer
*right
= path
->nodes
[0];
3986 struct extent_buffer
*left
;
3992 slot
= path
->slots
[1];
3995 if (!path
->nodes
[1])
3998 right_nritems
= btrfs_header_nritems(right
);
3999 if (right_nritems
== 0)
4002 btrfs_assert_tree_locked(path
->nodes
[1]);
4004 left
= read_node_slot(fs_info
, path
->nodes
[1], slot
- 1);
4006 * slot - 1 is not valid or we fail to read the left node,
4007 * no big deal, just return.
4012 btrfs_tree_lock(left
);
4013 btrfs_set_lock_blocking(left
);
4015 free_space
= btrfs_leaf_free_space(fs_info
, left
);
4016 if (free_space
< data_size
) {
4021 /* cow and double check */
4022 ret
= btrfs_cow_block(trans
, root
, left
,
4023 path
->nodes
[1], slot
- 1, &left
);
4025 /* we hit -ENOSPC, but it isn't fatal here */
4031 free_space
= btrfs_leaf_free_space(fs_info
, left
);
4032 if (free_space
< data_size
) {
4037 return __push_leaf_left(fs_info
, path
, min_data_size
,
4038 empty
, left
, free_space
, right_nritems
,
4041 btrfs_tree_unlock(left
);
4042 free_extent_buffer(left
);
4047 * split the path's leaf in two, making sure there is at least data_size
4048 * available for the resulting leaf level of the path.
4050 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4051 struct btrfs_fs_info
*fs_info
,
4052 struct btrfs_path
*path
,
4053 struct extent_buffer
*l
,
4054 struct extent_buffer
*right
,
4055 int slot
, int mid
, int nritems
)
4060 struct btrfs_disk_key disk_key
;
4061 struct btrfs_map_token token
;
4063 btrfs_init_map_token(&token
);
4065 nritems
= nritems
- mid
;
4066 btrfs_set_header_nritems(right
, nritems
);
4067 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(fs_info
, l
);
4069 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4070 btrfs_item_nr_offset(mid
),
4071 nritems
* sizeof(struct btrfs_item
));
4073 copy_extent_buffer(right
, l
,
4074 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(fs_info
) -
4075 data_copy_size
, btrfs_leaf_data(l
) +
4076 leaf_data_end(fs_info
, l
), data_copy_size
);
4078 rt_data_off
= BTRFS_LEAF_DATA_SIZE(fs_info
) - btrfs_item_end_nr(l
, mid
);
4080 for (i
= 0; i
< nritems
; i
++) {
4081 struct btrfs_item
*item
= btrfs_item_nr(i
);
4084 ioff
= btrfs_token_item_offset(right
, item
, &token
);
4085 btrfs_set_token_item_offset(right
, item
,
4086 ioff
+ rt_data_off
, &token
);
4089 btrfs_set_header_nritems(l
, mid
);
4090 btrfs_item_key(right
, &disk_key
, 0);
4091 insert_ptr(trans
, fs_info
, path
, &disk_key
, right
->start
,
4092 path
->slots
[1] + 1, 1);
4094 btrfs_mark_buffer_dirty(right
);
4095 btrfs_mark_buffer_dirty(l
);
4096 BUG_ON(path
->slots
[0] != slot
);
4099 btrfs_tree_unlock(path
->nodes
[0]);
4100 free_extent_buffer(path
->nodes
[0]);
4101 path
->nodes
[0] = right
;
4102 path
->slots
[0] -= mid
;
4103 path
->slots
[1] += 1;
4105 btrfs_tree_unlock(right
);
4106 free_extent_buffer(right
);
4109 BUG_ON(path
->slots
[0] < 0);
4113 * double splits happen when we need to insert a big item in the middle
4114 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4115 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4118 * We avoid this by trying to push the items on either side of our target
4119 * into the adjacent leaves. If all goes well we can avoid the double split
4122 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4123 struct btrfs_root
*root
,
4124 struct btrfs_path
*path
,
4127 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4132 int space_needed
= data_size
;
4134 slot
= path
->slots
[0];
4135 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4136 space_needed
-= btrfs_leaf_free_space(fs_info
, path
->nodes
[0]);
4139 * try to push all the items after our slot into the
4142 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4149 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4151 * our goal is to get our slot at the start or end of a leaf. If
4152 * we've done so we're done
4154 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4157 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= data_size
)
4160 /* try to push all the items before our slot into the next leaf */
4161 slot
= path
->slots
[0];
4162 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4175 * split the path's leaf in two, making sure there is at least data_size
4176 * available for the resulting leaf level of the path.
4178 * returns 0 if all went well and < 0 on failure.
4180 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4181 struct btrfs_root
*root
,
4182 const struct btrfs_key
*ins_key
,
4183 struct btrfs_path
*path
, int data_size
,
4186 struct btrfs_disk_key disk_key
;
4187 struct extent_buffer
*l
;
4191 struct extent_buffer
*right
;
4192 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4196 int num_doubles
= 0;
4197 int tried_avoid_double
= 0;
4200 slot
= path
->slots
[0];
4201 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4202 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(fs_info
))
4205 /* first try to make some room by pushing left and right */
4206 if (data_size
&& path
->nodes
[1]) {
4207 int space_needed
= data_size
;
4209 if (slot
< btrfs_header_nritems(l
))
4210 space_needed
-= btrfs_leaf_free_space(fs_info
, l
);
4212 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4213 space_needed
, 0, 0);
4217 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4218 space_needed
, 0, (u32
)-1);
4224 /* did the pushes work? */
4225 if (btrfs_leaf_free_space(fs_info
, l
) >= data_size
)
4229 if (!path
->nodes
[1]) {
4230 ret
= insert_new_root(trans
, root
, path
, 1);
4237 slot
= path
->slots
[0];
4238 nritems
= btrfs_header_nritems(l
);
4239 mid
= (nritems
+ 1) / 2;
4243 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4244 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4245 if (slot
>= nritems
) {
4249 if (mid
!= nritems
&&
4250 leaf_space_used(l
, mid
, nritems
- mid
) +
4251 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4252 if (data_size
&& !tried_avoid_double
)
4253 goto push_for_double
;
4259 if (leaf_space_used(l
, 0, mid
) + data_size
>
4260 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4261 if (!extend
&& data_size
&& slot
== 0) {
4263 } else if ((extend
|| !data_size
) && slot
== 0) {
4267 if (mid
!= nritems
&&
4268 leaf_space_used(l
, mid
, nritems
- mid
) +
4269 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4270 if (data_size
&& !tried_avoid_double
)
4271 goto push_for_double
;
4279 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4281 btrfs_item_key(l
, &disk_key
, mid
);
4283 right
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
4284 &disk_key
, 0, l
->start
, 0);
4286 return PTR_ERR(right
);
4288 root_add_used(root
, fs_info
->nodesize
);
4290 memzero_extent_buffer(right
, 0, sizeof(struct btrfs_header
));
4291 btrfs_set_header_bytenr(right
, right
->start
);
4292 btrfs_set_header_generation(right
, trans
->transid
);
4293 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
4294 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
4295 btrfs_set_header_level(right
, 0);
4296 write_extent_buffer_fsid(right
, fs_info
->fsid
);
4297 write_extent_buffer_chunk_tree_uuid(right
, fs_info
->chunk_tree_uuid
);
4301 btrfs_set_header_nritems(right
, 0);
4302 insert_ptr(trans
, fs_info
, path
, &disk_key
,
4303 right
->start
, path
->slots
[1] + 1, 1);
4304 btrfs_tree_unlock(path
->nodes
[0]);
4305 free_extent_buffer(path
->nodes
[0]);
4306 path
->nodes
[0] = right
;
4308 path
->slots
[1] += 1;
4310 btrfs_set_header_nritems(right
, 0);
4311 insert_ptr(trans
, fs_info
, path
, &disk_key
,
4312 right
->start
, path
->slots
[1], 1);
4313 btrfs_tree_unlock(path
->nodes
[0]);
4314 free_extent_buffer(path
->nodes
[0]);
4315 path
->nodes
[0] = right
;
4317 if (path
->slots
[1] == 0)
4318 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
4321 * We create a new leaf 'right' for the required ins_len and
4322 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4323 * the content of ins_len to 'right'.
4328 copy_for_split(trans
, fs_info
, path
, l
, right
, slot
, mid
, nritems
);
4331 BUG_ON(num_doubles
!= 0);
4339 push_for_double_split(trans
, root
, path
, data_size
);
4340 tried_avoid_double
= 1;
4341 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= data_size
)
4346 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4347 struct btrfs_root
*root
,
4348 struct btrfs_path
*path
, int ins_len
)
4350 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4351 struct btrfs_key key
;
4352 struct extent_buffer
*leaf
;
4353 struct btrfs_file_extent_item
*fi
;
4358 leaf
= path
->nodes
[0];
4359 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4361 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4362 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4364 if (btrfs_leaf_free_space(fs_info
, leaf
) >= ins_len
)
4367 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4368 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4369 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4370 struct btrfs_file_extent_item
);
4371 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4373 btrfs_release_path(path
);
4375 path
->keep_locks
= 1;
4376 path
->search_for_split
= 1;
4377 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4378 path
->search_for_split
= 0;
4385 leaf
= path
->nodes
[0];
4386 /* if our item isn't there, return now */
4387 if (item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4390 /* the leaf has changed, it now has room. return now */
4391 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= ins_len
)
4394 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4395 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4396 struct btrfs_file_extent_item
);
4397 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4401 btrfs_set_path_blocking(path
);
4402 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4406 path
->keep_locks
= 0;
4407 btrfs_unlock_up_safe(path
, 1);
4410 path
->keep_locks
= 0;
4414 static noinline
int split_item(struct btrfs_fs_info
*fs_info
,
4415 struct btrfs_path
*path
,
4416 const struct btrfs_key
*new_key
,
4417 unsigned long split_offset
)
4419 struct extent_buffer
*leaf
;
4420 struct btrfs_item
*item
;
4421 struct btrfs_item
*new_item
;
4427 struct btrfs_disk_key disk_key
;
4429 leaf
= path
->nodes
[0];
4430 BUG_ON(btrfs_leaf_free_space(fs_info
, leaf
) < sizeof(struct btrfs_item
));
4432 btrfs_set_path_blocking(path
);
4434 item
= btrfs_item_nr(path
->slots
[0]);
4435 orig_offset
= btrfs_item_offset(leaf
, item
);
4436 item_size
= btrfs_item_size(leaf
, item
);
4438 buf
= kmalloc(item_size
, GFP_NOFS
);
4442 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4443 path
->slots
[0]), item_size
);
4445 slot
= path
->slots
[0] + 1;
4446 nritems
= btrfs_header_nritems(leaf
);
4447 if (slot
!= nritems
) {
4448 /* shift the items */
4449 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4450 btrfs_item_nr_offset(slot
),
4451 (nritems
- slot
) * sizeof(struct btrfs_item
));
4454 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4455 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4457 new_item
= btrfs_item_nr(slot
);
4459 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4460 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4462 btrfs_set_item_offset(leaf
, item
,
4463 orig_offset
+ item_size
- split_offset
);
4464 btrfs_set_item_size(leaf
, item
, split_offset
);
4466 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4468 /* write the data for the start of the original item */
4469 write_extent_buffer(leaf
, buf
,
4470 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4473 /* write the data for the new item */
4474 write_extent_buffer(leaf
, buf
+ split_offset
,
4475 btrfs_item_ptr_offset(leaf
, slot
),
4476 item_size
- split_offset
);
4477 btrfs_mark_buffer_dirty(leaf
);
4479 BUG_ON(btrfs_leaf_free_space(fs_info
, leaf
) < 0);
4485 * This function splits a single item into two items,
4486 * giving 'new_key' to the new item and splitting the
4487 * old one at split_offset (from the start of the item).
4489 * The path may be released by this operation. After
4490 * the split, the path is pointing to the old item. The
4491 * new item is going to be in the same node as the old one.
4493 * Note, the item being split must be smaller enough to live alone on
4494 * a tree block with room for one extra struct btrfs_item
4496 * This allows us to split the item in place, keeping a lock on the
4497 * leaf the entire time.
4499 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4500 struct btrfs_root
*root
,
4501 struct btrfs_path
*path
,
4502 const struct btrfs_key
*new_key
,
4503 unsigned long split_offset
)
4506 ret
= setup_leaf_for_split(trans
, root
, path
,
4507 sizeof(struct btrfs_item
));
4511 ret
= split_item(root
->fs_info
, path
, new_key
, split_offset
);
4516 * This function duplicate a item, giving 'new_key' to the new item.
4517 * It guarantees both items live in the same tree leaf and the new item
4518 * is contiguous with the original item.
4520 * This allows us to split file extent in place, keeping a lock on the
4521 * leaf the entire time.
4523 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4524 struct btrfs_root
*root
,
4525 struct btrfs_path
*path
,
4526 const struct btrfs_key
*new_key
)
4528 struct extent_buffer
*leaf
;
4532 leaf
= path
->nodes
[0];
4533 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4534 ret
= setup_leaf_for_split(trans
, root
, path
,
4535 item_size
+ sizeof(struct btrfs_item
));
4540 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4541 item_size
, item_size
+
4542 sizeof(struct btrfs_item
), 1);
4543 leaf
= path
->nodes
[0];
4544 memcpy_extent_buffer(leaf
,
4545 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4546 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4552 * make the item pointed to by the path smaller. new_size indicates
4553 * how small to make it, and from_end tells us if we just chop bytes
4554 * off the end of the item or if we shift the item to chop bytes off
4557 void btrfs_truncate_item(struct btrfs_fs_info
*fs_info
,
4558 struct btrfs_path
*path
, u32 new_size
, int from_end
)
4561 struct extent_buffer
*leaf
;
4562 struct btrfs_item
*item
;
4564 unsigned int data_end
;
4565 unsigned int old_data_start
;
4566 unsigned int old_size
;
4567 unsigned int size_diff
;
4569 struct btrfs_map_token token
;
4571 btrfs_init_map_token(&token
);
4573 leaf
= path
->nodes
[0];
4574 slot
= path
->slots
[0];
4576 old_size
= btrfs_item_size_nr(leaf
, slot
);
4577 if (old_size
== new_size
)
4580 nritems
= btrfs_header_nritems(leaf
);
4581 data_end
= leaf_data_end(fs_info
, leaf
);
4583 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4585 size_diff
= old_size
- new_size
;
4588 BUG_ON(slot
>= nritems
);
4591 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4593 /* first correct the data pointers */
4594 for (i
= slot
; i
< nritems
; i
++) {
4596 item
= btrfs_item_nr(i
);
4598 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4599 btrfs_set_token_item_offset(leaf
, item
,
4600 ioff
+ size_diff
, &token
);
4603 /* shift the data */
4605 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4606 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4607 data_end
, old_data_start
+ new_size
- data_end
);
4609 struct btrfs_disk_key disk_key
;
4612 btrfs_item_key(leaf
, &disk_key
, slot
);
4614 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4616 struct btrfs_file_extent_item
*fi
;
4618 fi
= btrfs_item_ptr(leaf
, slot
,
4619 struct btrfs_file_extent_item
);
4620 fi
= (struct btrfs_file_extent_item
*)(
4621 (unsigned long)fi
- size_diff
);
4623 if (btrfs_file_extent_type(leaf
, fi
) ==
4624 BTRFS_FILE_EXTENT_INLINE
) {
4625 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4626 memmove_extent_buffer(leaf
, ptr
,
4628 BTRFS_FILE_EXTENT_INLINE_DATA_START
);
4632 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4633 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4634 data_end
, old_data_start
- data_end
);
4636 offset
= btrfs_disk_key_offset(&disk_key
);
4637 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4638 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4640 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
4643 item
= btrfs_item_nr(slot
);
4644 btrfs_set_item_size(leaf
, item
, new_size
);
4645 btrfs_mark_buffer_dirty(leaf
);
4647 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4648 btrfs_print_leaf(fs_info
, leaf
);
4654 * make the item pointed to by the path bigger, data_size is the added size.
4656 void btrfs_extend_item(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
4660 struct extent_buffer
*leaf
;
4661 struct btrfs_item
*item
;
4663 unsigned int data_end
;
4664 unsigned int old_data
;
4665 unsigned int old_size
;
4667 struct btrfs_map_token token
;
4669 btrfs_init_map_token(&token
);
4671 leaf
= path
->nodes
[0];
4673 nritems
= btrfs_header_nritems(leaf
);
4674 data_end
= leaf_data_end(fs_info
, leaf
);
4676 if (btrfs_leaf_free_space(fs_info
, leaf
) < data_size
) {
4677 btrfs_print_leaf(fs_info
, leaf
);
4680 slot
= path
->slots
[0];
4681 old_data
= btrfs_item_end_nr(leaf
, slot
);
4684 if (slot
>= nritems
) {
4685 btrfs_print_leaf(fs_info
, leaf
);
4686 btrfs_crit(fs_info
, "slot %d too large, nritems %d",
4692 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4694 /* first correct the data pointers */
4695 for (i
= slot
; i
< nritems
; i
++) {
4697 item
= btrfs_item_nr(i
);
4699 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4700 btrfs_set_token_item_offset(leaf
, item
,
4701 ioff
- data_size
, &token
);
4704 /* shift the data */
4705 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4706 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4707 data_end
, old_data
- data_end
);
4709 data_end
= old_data
;
4710 old_size
= btrfs_item_size_nr(leaf
, slot
);
4711 item
= btrfs_item_nr(slot
);
4712 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4713 btrfs_mark_buffer_dirty(leaf
);
4715 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4716 btrfs_print_leaf(fs_info
, leaf
);
4722 * this is a helper for btrfs_insert_empty_items, the main goal here is
4723 * to save stack depth by doing the bulk of the work in a function
4724 * that doesn't call btrfs_search_slot
4726 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4727 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4728 u32 total_data
, u32 total_size
, int nr
)
4730 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4731 struct btrfs_item
*item
;
4734 unsigned int data_end
;
4735 struct btrfs_disk_key disk_key
;
4736 struct extent_buffer
*leaf
;
4738 struct btrfs_map_token token
;
4740 if (path
->slots
[0] == 0) {
4741 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4742 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
4744 btrfs_unlock_up_safe(path
, 1);
4746 btrfs_init_map_token(&token
);
4748 leaf
= path
->nodes
[0];
4749 slot
= path
->slots
[0];
4751 nritems
= btrfs_header_nritems(leaf
);
4752 data_end
= leaf_data_end(fs_info
, leaf
);
4754 if (btrfs_leaf_free_space(fs_info
, leaf
) < total_size
) {
4755 btrfs_print_leaf(fs_info
, leaf
);
4756 btrfs_crit(fs_info
, "not enough freespace need %u have %d",
4757 total_size
, btrfs_leaf_free_space(fs_info
, leaf
));
4761 if (slot
!= nritems
) {
4762 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4764 if (old_data
< data_end
) {
4765 btrfs_print_leaf(fs_info
, leaf
);
4766 btrfs_crit(fs_info
, "slot %d old_data %d data_end %d",
4767 slot
, old_data
, data_end
);
4771 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4773 /* first correct the data pointers */
4774 for (i
= slot
; i
< nritems
; i
++) {
4777 item
= btrfs_item_nr(i
);
4778 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4779 btrfs_set_token_item_offset(leaf
, item
,
4780 ioff
- total_data
, &token
);
4782 /* shift the items */
4783 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4784 btrfs_item_nr_offset(slot
),
4785 (nritems
- slot
) * sizeof(struct btrfs_item
));
4787 /* shift the data */
4788 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4789 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4790 data_end
, old_data
- data_end
);
4791 data_end
= old_data
;
4794 /* setup the item for the new data */
4795 for (i
= 0; i
< nr
; i
++) {
4796 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4797 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4798 item
= btrfs_item_nr(slot
+ i
);
4799 btrfs_set_token_item_offset(leaf
, item
,
4800 data_end
- data_size
[i
], &token
);
4801 data_end
-= data_size
[i
];
4802 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4805 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4806 btrfs_mark_buffer_dirty(leaf
);
4808 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4809 btrfs_print_leaf(fs_info
, leaf
);
4815 * Given a key and some data, insert items into the tree.
4816 * This does all the path init required, making room in the tree if needed.
4818 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4819 struct btrfs_root
*root
,
4820 struct btrfs_path
*path
,
4821 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4830 for (i
= 0; i
< nr
; i
++)
4831 total_data
+= data_size
[i
];
4833 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4834 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4840 slot
= path
->slots
[0];
4843 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4844 total_data
, total_size
, nr
);
4849 * Given a key and some data, insert an item into the tree.
4850 * This does all the path init required, making room in the tree if needed.
4852 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4853 const struct btrfs_key
*cpu_key
, void *data
,
4857 struct btrfs_path
*path
;
4858 struct extent_buffer
*leaf
;
4861 path
= btrfs_alloc_path();
4864 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4866 leaf
= path
->nodes
[0];
4867 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4868 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4869 btrfs_mark_buffer_dirty(leaf
);
4871 btrfs_free_path(path
);
4876 * delete the pointer from a given node.
4878 * the tree should have been previously balanced so the deletion does not
4881 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4882 int level
, int slot
)
4884 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4885 struct extent_buffer
*parent
= path
->nodes
[level
];
4889 nritems
= btrfs_header_nritems(parent
);
4890 if (slot
!= nritems
- 1) {
4892 tree_mod_log_eb_move(fs_info
, parent
, slot
,
4893 slot
+ 1, nritems
- slot
- 1);
4894 memmove_extent_buffer(parent
,
4895 btrfs_node_key_ptr_offset(slot
),
4896 btrfs_node_key_ptr_offset(slot
+ 1),
4897 sizeof(struct btrfs_key_ptr
) *
4898 (nritems
- slot
- 1));
4900 ret
= tree_mod_log_insert_key(fs_info
, parent
, slot
,
4901 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
4906 btrfs_set_header_nritems(parent
, nritems
);
4907 if (nritems
== 0 && parent
== root
->node
) {
4908 BUG_ON(btrfs_header_level(root
->node
) != 1);
4909 /* just turn the root into a leaf and break */
4910 btrfs_set_header_level(root
->node
, 0);
4911 } else if (slot
== 0) {
4912 struct btrfs_disk_key disk_key
;
4914 btrfs_node_key(parent
, &disk_key
, 0);
4915 fixup_low_keys(fs_info
, path
, &disk_key
, level
+ 1);
4917 btrfs_mark_buffer_dirty(parent
);
4921 * a helper function to delete the leaf pointed to by path->slots[1] and
4924 * This deletes the pointer in path->nodes[1] and frees the leaf
4925 * block extent. zero is returned if it all worked out, < 0 otherwise.
4927 * The path must have already been setup for deleting the leaf, including
4928 * all the proper balancing. path->nodes[1] must be locked.
4930 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4931 struct btrfs_root
*root
,
4932 struct btrfs_path
*path
,
4933 struct extent_buffer
*leaf
)
4935 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4936 del_ptr(root
, path
, 1, path
->slots
[1]);
4939 * btrfs_free_extent is expensive, we want to make sure we
4940 * aren't holding any locks when we call it
4942 btrfs_unlock_up_safe(path
, 0);
4944 root_sub_used(root
, leaf
->len
);
4946 extent_buffer_get(leaf
);
4947 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4948 free_extent_buffer_stale(leaf
);
4951 * delete the item at the leaf level in path. If that empties
4952 * the leaf, remove it from the tree
4954 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4955 struct btrfs_path
*path
, int slot
, int nr
)
4957 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4958 struct extent_buffer
*leaf
;
4959 struct btrfs_item
*item
;
4966 struct btrfs_map_token token
;
4968 btrfs_init_map_token(&token
);
4970 leaf
= path
->nodes
[0];
4971 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4973 for (i
= 0; i
< nr
; i
++)
4974 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4976 nritems
= btrfs_header_nritems(leaf
);
4978 if (slot
+ nr
!= nritems
) {
4979 int data_end
= leaf_data_end(fs_info
, leaf
);
4981 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4983 btrfs_leaf_data(leaf
) + data_end
,
4984 last_off
- data_end
);
4986 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4989 item
= btrfs_item_nr(i
);
4990 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4991 btrfs_set_token_item_offset(leaf
, item
,
4992 ioff
+ dsize
, &token
);
4995 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4996 btrfs_item_nr_offset(slot
+ nr
),
4997 sizeof(struct btrfs_item
) *
4998 (nritems
- slot
- nr
));
5000 btrfs_set_header_nritems(leaf
, nritems
- nr
);
5003 /* delete the leaf if we've emptied it */
5005 if (leaf
== root
->node
) {
5006 btrfs_set_header_level(leaf
, 0);
5008 btrfs_set_path_blocking(path
);
5009 clean_tree_block(fs_info
, leaf
);
5010 btrfs_del_leaf(trans
, root
, path
, leaf
);
5013 int used
= leaf_space_used(leaf
, 0, nritems
);
5015 struct btrfs_disk_key disk_key
;
5017 btrfs_item_key(leaf
, &disk_key
, 0);
5018 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
5021 /* delete the leaf if it is mostly empty */
5022 if (used
< BTRFS_LEAF_DATA_SIZE(fs_info
) / 3) {
5023 /* push_leaf_left fixes the path.
5024 * make sure the path still points to our leaf
5025 * for possible call to del_ptr below
5027 slot
= path
->slots
[1];
5028 extent_buffer_get(leaf
);
5030 btrfs_set_path_blocking(path
);
5031 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
5033 if (wret
< 0 && wret
!= -ENOSPC
)
5036 if (path
->nodes
[0] == leaf
&&
5037 btrfs_header_nritems(leaf
)) {
5038 wret
= push_leaf_right(trans
, root
, path
, 1,
5040 if (wret
< 0 && wret
!= -ENOSPC
)
5044 if (btrfs_header_nritems(leaf
) == 0) {
5045 path
->slots
[1] = slot
;
5046 btrfs_del_leaf(trans
, root
, path
, leaf
);
5047 free_extent_buffer(leaf
);
5050 /* if we're still in the path, make sure
5051 * we're dirty. Otherwise, one of the
5052 * push_leaf functions must have already
5053 * dirtied this buffer
5055 if (path
->nodes
[0] == leaf
)
5056 btrfs_mark_buffer_dirty(leaf
);
5057 free_extent_buffer(leaf
);
5060 btrfs_mark_buffer_dirty(leaf
);
5067 * search the tree again to find a leaf with lesser keys
5068 * returns 0 if it found something or 1 if there are no lesser leaves.
5069 * returns < 0 on io errors.
5071 * This may release the path, and so you may lose any locks held at the
5074 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5076 struct btrfs_key key
;
5077 struct btrfs_disk_key found_key
;
5080 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5082 if (key
.offset
> 0) {
5084 } else if (key
.type
> 0) {
5086 key
.offset
= (u64
)-1;
5087 } else if (key
.objectid
> 0) {
5090 key
.offset
= (u64
)-1;
5095 btrfs_release_path(path
);
5096 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5099 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5100 ret
= comp_keys(&found_key
, &key
);
5102 * We might have had an item with the previous key in the tree right
5103 * before we released our path. And after we released our path, that
5104 * item might have been pushed to the first slot (0) of the leaf we
5105 * were holding due to a tree balance. Alternatively, an item with the
5106 * previous key can exist as the only element of a leaf (big fat item).
5107 * Therefore account for these 2 cases, so that our callers (like
5108 * btrfs_previous_item) don't miss an existing item with a key matching
5109 * the previous key we computed above.
5117 * A helper function to walk down the tree starting at min_key, and looking
5118 * for nodes or leaves that are have a minimum transaction id.
5119 * This is used by the btree defrag code, and tree logging
5121 * This does not cow, but it does stuff the starting key it finds back
5122 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5123 * key and get a writable path.
5125 * This does lock as it descends, and path->keep_locks should be set
5126 * to 1 by the caller.
5128 * This honors path->lowest_level to prevent descent past a given level
5131 * min_trans indicates the oldest transaction that you are interested
5132 * in walking through. Any nodes or leaves older than min_trans are
5133 * skipped over (without reading them).
5135 * returns zero if something useful was found, < 0 on error and 1 if there
5136 * was nothing in the tree that matched the search criteria.
5138 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5139 struct btrfs_path
*path
,
5142 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5143 struct extent_buffer
*cur
;
5144 struct btrfs_key found_key
;
5150 int keep_locks
= path
->keep_locks
;
5152 path
->keep_locks
= 1;
5154 cur
= btrfs_read_lock_root_node(root
);
5155 level
= btrfs_header_level(cur
);
5156 WARN_ON(path
->nodes
[level
]);
5157 path
->nodes
[level
] = cur
;
5158 path
->locks
[level
] = BTRFS_READ_LOCK
;
5160 if (btrfs_header_generation(cur
) < min_trans
) {
5165 nritems
= btrfs_header_nritems(cur
);
5166 level
= btrfs_header_level(cur
);
5167 sret
= bin_search(cur
, min_key
, level
, &slot
);
5169 /* at the lowest level, we're done, setup the path and exit */
5170 if (level
== path
->lowest_level
) {
5171 if (slot
>= nritems
)
5174 path
->slots
[level
] = slot
;
5175 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5178 if (sret
&& slot
> 0)
5181 * check this node pointer against the min_trans parameters.
5182 * If it is too old, old, skip to the next one.
5184 while (slot
< nritems
) {
5187 gen
= btrfs_node_ptr_generation(cur
, slot
);
5188 if (gen
< min_trans
) {
5196 * we didn't find a candidate key in this node, walk forward
5197 * and find another one
5199 if (slot
>= nritems
) {
5200 path
->slots
[level
] = slot
;
5201 btrfs_set_path_blocking(path
);
5202 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5205 btrfs_release_path(path
);
5211 /* save our key for returning back */
5212 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5213 path
->slots
[level
] = slot
;
5214 if (level
== path
->lowest_level
) {
5218 btrfs_set_path_blocking(path
);
5219 cur
= read_node_slot(fs_info
, cur
, slot
);
5225 btrfs_tree_read_lock(cur
);
5227 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5228 path
->nodes
[level
- 1] = cur
;
5229 unlock_up(path
, level
, 1, 0, NULL
);
5230 btrfs_clear_path_blocking(path
, NULL
, 0);
5233 path
->keep_locks
= keep_locks
;
5235 btrfs_unlock_up_safe(path
, path
->lowest_level
+ 1);
5236 btrfs_set_path_blocking(path
);
5237 memcpy(min_key
, &found_key
, sizeof(found_key
));
5242 static int tree_move_down(struct btrfs_fs_info
*fs_info
,
5243 struct btrfs_path
*path
,
5246 struct extent_buffer
*eb
;
5248 BUG_ON(*level
== 0);
5249 eb
= read_node_slot(fs_info
, path
->nodes
[*level
], path
->slots
[*level
]);
5253 path
->nodes
[*level
- 1] = eb
;
5254 path
->slots
[*level
- 1] = 0;
5259 static int tree_move_next_or_upnext(struct btrfs_path
*path
,
5260 int *level
, int root_level
)
5264 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5266 path
->slots
[*level
]++;
5268 while (path
->slots
[*level
] >= nritems
) {
5269 if (*level
== root_level
)
5273 path
->slots
[*level
] = 0;
5274 free_extent_buffer(path
->nodes
[*level
]);
5275 path
->nodes
[*level
] = NULL
;
5277 path
->slots
[*level
]++;
5279 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5286 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5289 static int tree_advance(struct btrfs_fs_info
*fs_info
,
5290 struct btrfs_path
*path
,
5291 int *level
, int root_level
,
5293 struct btrfs_key
*key
)
5297 if (*level
== 0 || !allow_down
) {
5298 ret
= tree_move_next_or_upnext(path
, level
, root_level
);
5300 ret
= tree_move_down(fs_info
, path
, level
);
5304 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5305 path
->slots
[*level
]);
5307 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5308 path
->slots
[*level
]);
5313 static int tree_compare_item(struct btrfs_path
*left_path
,
5314 struct btrfs_path
*right_path
,
5319 unsigned long off1
, off2
;
5321 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5322 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5326 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5327 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5328 right_path
->slots
[0]);
5330 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5332 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5339 #define ADVANCE_ONLY_NEXT -1
5342 * This function compares two trees and calls the provided callback for
5343 * every changed/new/deleted item it finds.
5344 * If shared tree blocks are encountered, whole subtrees are skipped, making
5345 * the compare pretty fast on snapshotted subvolumes.
5347 * This currently works on commit roots only. As commit roots are read only,
5348 * we don't do any locking. The commit roots are protected with transactions.
5349 * Transactions are ended and rejoined when a commit is tried in between.
5351 * This function checks for modifications done to the trees while comparing.
5352 * If it detects a change, it aborts immediately.
5354 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5355 struct btrfs_root
*right_root
,
5356 btrfs_changed_cb_t changed_cb
, void *ctx
)
5358 struct btrfs_fs_info
*fs_info
= left_root
->fs_info
;
5361 struct btrfs_path
*left_path
= NULL
;
5362 struct btrfs_path
*right_path
= NULL
;
5363 struct btrfs_key left_key
;
5364 struct btrfs_key right_key
;
5365 char *tmp_buf
= NULL
;
5366 int left_root_level
;
5367 int right_root_level
;
5370 int left_end_reached
;
5371 int right_end_reached
;
5379 left_path
= btrfs_alloc_path();
5384 right_path
= btrfs_alloc_path();
5390 tmp_buf
= kmalloc(fs_info
->nodesize
, GFP_KERNEL
| __GFP_NOWARN
);
5392 tmp_buf
= vmalloc(fs_info
->nodesize
);
5399 left_path
->search_commit_root
= 1;
5400 left_path
->skip_locking
= 1;
5401 right_path
->search_commit_root
= 1;
5402 right_path
->skip_locking
= 1;
5405 * Strategy: Go to the first items of both trees. Then do
5407 * If both trees are at level 0
5408 * Compare keys of current items
5409 * If left < right treat left item as new, advance left tree
5411 * If left > right treat right item as deleted, advance right tree
5413 * If left == right do deep compare of items, treat as changed if
5414 * needed, advance both trees and repeat
5415 * If both trees are at the same level but not at level 0
5416 * Compare keys of current nodes/leafs
5417 * If left < right advance left tree and repeat
5418 * If left > right advance right tree and repeat
5419 * If left == right compare blockptrs of the next nodes/leafs
5420 * If they match advance both trees but stay at the same level
5422 * If they don't match advance both trees while allowing to go
5424 * If tree levels are different
5425 * Advance the tree that needs it and repeat
5427 * Advancing a tree means:
5428 * If we are at level 0, try to go to the next slot. If that's not
5429 * possible, go one level up and repeat. Stop when we found a level
5430 * where we could go to the next slot. We may at this point be on a
5433 * If we are not at level 0 and not on shared tree blocks, go one
5436 * If we are not at level 0 and on shared tree blocks, go one slot to
5437 * the right if possible or go up and right.
5440 down_read(&fs_info
->commit_root_sem
);
5441 left_level
= btrfs_header_level(left_root
->commit_root
);
5442 left_root_level
= left_level
;
5443 left_path
->nodes
[left_level
] = left_root
->commit_root
;
5444 extent_buffer_get(left_path
->nodes
[left_level
]);
5446 right_level
= btrfs_header_level(right_root
->commit_root
);
5447 right_root_level
= right_level
;
5448 right_path
->nodes
[right_level
] = right_root
->commit_root
;
5449 extent_buffer_get(right_path
->nodes
[right_level
]);
5450 up_read(&fs_info
->commit_root_sem
);
5452 if (left_level
== 0)
5453 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5454 &left_key
, left_path
->slots
[left_level
]);
5456 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5457 &left_key
, left_path
->slots
[left_level
]);
5458 if (right_level
== 0)
5459 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5460 &right_key
, right_path
->slots
[right_level
]);
5462 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5463 &right_key
, right_path
->slots
[right_level
]);
5465 left_end_reached
= right_end_reached
= 0;
5466 advance_left
= advance_right
= 0;
5469 if (advance_left
&& !left_end_reached
) {
5470 ret
= tree_advance(fs_info
, left_path
, &left_level
,
5472 advance_left
!= ADVANCE_ONLY_NEXT
,
5475 left_end_reached
= ADVANCE
;
5480 if (advance_right
&& !right_end_reached
) {
5481 ret
= tree_advance(fs_info
, right_path
, &right_level
,
5483 advance_right
!= ADVANCE_ONLY_NEXT
,
5486 right_end_reached
= ADVANCE
;
5492 if (left_end_reached
&& right_end_reached
) {
5495 } else if (left_end_reached
) {
5496 if (right_level
== 0) {
5497 ret
= changed_cb(left_root
, right_root
,
5498 left_path
, right_path
,
5500 BTRFS_COMPARE_TREE_DELETED
,
5505 advance_right
= ADVANCE
;
5507 } else if (right_end_reached
) {
5508 if (left_level
== 0) {
5509 ret
= changed_cb(left_root
, right_root
,
5510 left_path
, right_path
,
5512 BTRFS_COMPARE_TREE_NEW
,
5517 advance_left
= ADVANCE
;
5521 if (left_level
== 0 && right_level
== 0) {
5522 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5524 ret
= changed_cb(left_root
, right_root
,
5525 left_path
, right_path
,
5527 BTRFS_COMPARE_TREE_NEW
,
5531 advance_left
= ADVANCE
;
5532 } else if (cmp
> 0) {
5533 ret
= changed_cb(left_root
, right_root
,
5534 left_path
, right_path
,
5536 BTRFS_COMPARE_TREE_DELETED
,
5540 advance_right
= ADVANCE
;
5542 enum btrfs_compare_tree_result result
;
5544 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5545 ret
= tree_compare_item(left_path
, right_path
,
5548 result
= BTRFS_COMPARE_TREE_CHANGED
;
5550 result
= BTRFS_COMPARE_TREE_SAME
;
5551 ret
= changed_cb(left_root
, right_root
,
5552 left_path
, right_path
,
5553 &left_key
, result
, ctx
);
5556 advance_left
= ADVANCE
;
5557 advance_right
= ADVANCE
;
5559 } else if (left_level
== right_level
) {
5560 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5562 advance_left
= ADVANCE
;
5563 } else if (cmp
> 0) {
5564 advance_right
= ADVANCE
;
5566 left_blockptr
= btrfs_node_blockptr(
5567 left_path
->nodes
[left_level
],
5568 left_path
->slots
[left_level
]);
5569 right_blockptr
= btrfs_node_blockptr(
5570 right_path
->nodes
[right_level
],
5571 right_path
->slots
[right_level
]);
5572 left_gen
= btrfs_node_ptr_generation(
5573 left_path
->nodes
[left_level
],
5574 left_path
->slots
[left_level
]);
5575 right_gen
= btrfs_node_ptr_generation(
5576 right_path
->nodes
[right_level
],
5577 right_path
->slots
[right_level
]);
5578 if (left_blockptr
== right_blockptr
&&
5579 left_gen
== right_gen
) {
5581 * As we're on a shared block, don't
5582 * allow to go deeper.
5584 advance_left
= ADVANCE_ONLY_NEXT
;
5585 advance_right
= ADVANCE_ONLY_NEXT
;
5587 advance_left
= ADVANCE
;
5588 advance_right
= ADVANCE
;
5591 } else if (left_level
< right_level
) {
5592 advance_right
= ADVANCE
;
5594 advance_left
= ADVANCE
;
5599 btrfs_free_path(left_path
);
5600 btrfs_free_path(right_path
);
5606 * this is similar to btrfs_next_leaf, but does not try to preserve
5607 * and fixup the path. It looks for and returns the next key in the
5608 * tree based on the current path and the min_trans parameters.
5610 * 0 is returned if another key is found, < 0 if there are any errors
5611 * and 1 is returned if there are no higher keys in the tree
5613 * path->keep_locks should be set to 1 on the search made before
5614 * calling this function.
5616 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5617 struct btrfs_key
*key
, int level
, u64 min_trans
)
5620 struct extent_buffer
*c
;
5622 WARN_ON(!path
->keep_locks
);
5623 while (level
< BTRFS_MAX_LEVEL
) {
5624 if (!path
->nodes
[level
])
5627 slot
= path
->slots
[level
] + 1;
5628 c
= path
->nodes
[level
];
5630 if (slot
>= btrfs_header_nritems(c
)) {
5633 struct btrfs_key cur_key
;
5634 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5635 !path
->nodes
[level
+ 1])
5638 if (path
->locks
[level
+ 1]) {
5643 slot
= btrfs_header_nritems(c
) - 1;
5645 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5647 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5649 orig_lowest
= path
->lowest_level
;
5650 btrfs_release_path(path
);
5651 path
->lowest_level
= level
;
5652 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5654 path
->lowest_level
= orig_lowest
;
5658 c
= path
->nodes
[level
];
5659 slot
= path
->slots
[level
];
5666 btrfs_item_key_to_cpu(c
, key
, slot
);
5668 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5670 if (gen
< min_trans
) {
5674 btrfs_node_key_to_cpu(c
, key
, slot
);
5682 * search the tree again to find a leaf with greater keys
5683 * returns 0 if it found something or 1 if there are no greater leaves.
5684 * returns < 0 on io errors.
5686 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5688 return btrfs_next_old_leaf(root
, path
, 0);
5691 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5696 struct extent_buffer
*c
;
5697 struct extent_buffer
*next
;
5698 struct btrfs_key key
;
5701 int old_spinning
= path
->leave_spinning
;
5702 int next_rw_lock
= 0;
5704 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5708 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5713 btrfs_release_path(path
);
5715 path
->keep_locks
= 1;
5716 path
->leave_spinning
= 1;
5719 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5721 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5722 path
->keep_locks
= 0;
5727 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5729 * by releasing the path above we dropped all our locks. A balance
5730 * could have added more items next to the key that used to be
5731 * at the very end of the block. So, check again here and
5732 * advance the path if there are now more items available.
5734 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5741 * So the above check misses one case:
5742 * - after releasing the path above, someone has removed the item that
5743 * used to be at the very end of the block, and balance between leafs
5744 * gets another one with bigger key.offset to replace it.
5746 * This one should be returned as well, or we can get leaf corruption
5747 * later(esp. in __btrfs_drop_extents()).
5749 * And a bit more explanation about this check,
5750 * with ret > 0, the key isn't found, the path points to the slot
5751 * where it should be inserted, so the path->slots[0] item must be the
5754 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5759 while (level
< BTRFS_MAX_LEVEL
) {
5760 if (!path
->nodes
[level
]) {
5765 slot
= path
->slots
[level
] + 1;
5766 c
= path
->nodes
[level
];
5767 if (slot
>= btrfs_header_nritems(c
)) {
5769 if (level
== BTRFS_MAX_LEVEL
) {
5777 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5778 free_extent_buffer(next
);
5782 next_rw_lock
= path
->locks
[level
];
5783 ret
= read_block_for_search(root
, path
, &next
, level
,
5789 btrfs_release_path(path
);
5793 if (!path
->skip_locking
) {
5794 ret
= btrfs_try_tree_read_lock(next
);
5795 if (!ret
&& time_seq
) {
5797 * If we don't get the lock, we may be racing
5798 * with push_leaf_left, holding that lock while
5799 * itself waiting for the leaf we've currently
5800 * locked. To solve this situation, we give up
5801 * on our lock and cycle.
5803 free_extent_buffer(next
);
5804 btrfs_release_path(path
);
5809 btrfs_set_path_blocking(path
);
5810 btrfs_tree_read_lock(next
);
5811 btrfs_clear_path_blocking(path
, next
,
5814 next_rw_lock
= BTRFS_READ_LOCK
;
5818 path
->slots
[level
] = slot
;
5821 c
= path
->nodes
[level
];
5822 if (path
->locks
[level
])
5823 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5825 free_extent_buffer(c
);
5826 path
->nodes
[level
] = next
;
5827 path
->slots
[level
] = 0;
5828 if (!path
->skip_locking
)
5829 path
->locks
[level
] = next_rw_lock
;
5833 ret
= read_block_for_search(root
, path
, &next
, level
,
5839 btrfs_release_path(path
);
5843 if (!path
->skip_locking
) {
5844 ret
= btrfs_try_tree_read_lock(next
);
5846 btrfs_set_path_blocking(path
);
5847 btrfs_tree_read_lock(next
);
5848 btrfs_clear_path_blocking(path
, next
,
5851 next_rw_lock
= BTRFS_READ_LOCK
;
5856 unlock_up(path
, 0, 1, 0, NULL
);
5857 path
->leave_spinning
= old_spinning
;
5859 btrfs_set_path_blocking(path
);
5865 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5866 * searching until it gets past min_objectid or finds an item of 'type'
5868 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5870 int btrfs_previous_item(struct btrfs_root
*root
,
5871 struct btrfs_path
*path
, u64 min_objectid
,
5874 struct btrfs_key found_key
;
5875 struct extent_buffer
*leaf
;
5880 if (path
->slots
[0] == 0) {
5881 btrfs_set_path_blocking(path
);
5882 ret
= btrfs_prev_leaf(root
, path
);
5888 leaf
= path
->nodes
[0];
5889 nritems
= btrfs_header_nritems(leaf
);
5892 if (path
->slots
[0] == nritems
)
5895 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5896 if (found_key
.objectid
< min_objectid
)
5898 if (found_key
.type
== type
)
5900 if (found_key
.objectid
== min_objectid
&&
5901 found_key
.type
< type
)
5908 * search in extent tree to find a previous Metadata/Data extent item with
5911 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5913 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5914 struct btrfs_path
*path
, u64 min_objectid
)
5916 struct btrfs_key found_key
;
5917 struct extent_buffer
*leaf
;
5922 if (path
->slots
[0] == 0) {
5923 btrfs_set_path_blocking(path
);
5924 ret
= btrfs_prev_leaf(root
, path
);
5930 leaf
= path
->nodes
[0];
5931 nritems
= btrfs_header_nritems(leaf
);
5934 if (path
->slots
[0] == nritems
)
5937 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5938 if (found_key
.objectid
< min_objectid
)
5940 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5941 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5943 if (found_key
.objectid
== min_objectid
&&
5944 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)