]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/btrfs/ctree.c
Btrfs: remove unused argument of fixup_low_keys()
[mirror_ubuntu-zesty-kernel.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
45 u32 blocksize, u64 parent_transid,
46 u64 time_seq);
47 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
48 u64 bytenr, u32 blocksize,
49 u64 time_seq);
50
51 struct btrfs_path *btrfs_alloc_path(void)
52 {
53 struct btrfs_path *path;
54 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
55 return path;
56 }
57
58 /*
59 * set all locked nodes in the path to blocking locks. This should
60 * be done before scheduling
61 */
62 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
63 {
64 int i;
65 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
66 if (!p->nodes[i] || !p->locks[i])
67 continue;
68 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
69 if (p->locks[i] == BTRFS_READ_LOCK)
70 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
71 else if (p->locks[i] == BTRFS_WRITE_LOCK)
72 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
73 }
74 }
75
76 /*
77 * reset all the locked nodes in the patch to spinning locks.
78 *
79 * held is used to keep lockdep happy, when lockdep is enabled
80 * we set held to a blocking lock before we go around and
81 * retake all the spinlocks in the path. You can safely use NULL
82 * for held
83 */
84 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
85 struct extent_buffer *held, int held_rw)
86 {
87 int i;
88
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 /* lockdep really cares that we take all of these spinlocks
91 * in the right order. If any of the locks in the path are not
92 * currently blocking, it is going to complain. So, make really
93 * really sure by forcing the path to blocking before we clear
94 * the path blocking.
95 */
96 if (held) {
97 btrfs_set_lock_blocking_rw(held, held_rw);
98 if (held_rw == BTRFS_WRITE_LOCK)
99 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
100 else if (held_rw == BTRFS_READ_LOCK)
101 held_rw = BTRFS_READ_LOCK_BLOCKING;
102 }
103 btrfs_set_path_blocking(p);
104 #endif
105
106 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
107 if (p->nodes[i] && p->locks[i]) {
108 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
109 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
110 p->locks[i] = BTRFS_WRITE_LOCK;
111 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
112 p->locks[i] = BTRFS_READ_LOCK;
113 }
114 }
115
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
117 if (held)
118 btrfs_clear_lock_blocking_rw(held, held_rw);
119 #endif
120 }
121
122 /* this also releases the path */
123 void btrfs_free_path(struct btrfs_path *p)
124 {
125 if (!p)
126 return;
127 btrfs_release_path(p);
128 kmem_cache_free(btrfs_path_cachep, p);
129 }
130
131 /*
132 * path release drops references on the extent buffers in the path
133 * and it drops any locks held by this path
134 *
135 * It is safe to call this on paths that no locks or extent buffers held.
136 */
137 noinline void btrfs_release_path(struct btrfs_path *p)
138 {
139 int i;
140
141 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
142 p->slots[i] = 0;
143 if (!p->nodes[i])
144 continue;
145 if (p->locks[i]) {
146 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
147 p->locks[i] = 0;
148 }
149 free_extent_buffer(p->nodes[i]);
150 p->nodes[i] = NULL;
151 }
152 }
153
154 /*
155 * safely gets a reference on the root node of a tree. A lock
156 * is not taken, so a concurrent writer may put a different node
157 * at the root of the tree. See btrfs_lock_root_node for the
158 * looping required.
159 *
160 * The extent buffer returned by this has a reference taken, so
161 * it won't disappear. It may stop being the root of the tree
162 * at any time because there are no locks held.
163 */
164 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
165 {
166 struct extent_buffer *eb;
167
168 while (1) {
169 rcu_read_lock();
170 eb = rcu_dereference(root->node);
171
172 /*
173 * RCU really hurts here, we could free up the root node because
174 * it was cow'ed but we may not get the new root node yet so do
175 * the inc_not_zero dance and if it doesn't work then
176 * synchronize_rcu and try again.
177 */
178 if (atomic_inc_not_zero(&eb->refs)) {
179 rcu_read_unlock();
180 break;
181 }
182 rcu_read_unlock();
183 synchronize_rcu();
184 }
185 return eb;
186 }
187
188 /* loop around taking references on and locking the root node of the
189 * tree until you end up with a lock on the root. A locked buffer
190 * is returned, with a reference held.
191 */
192 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
193 {
194 struct extent_buffer *eb;
195
196 while (1) {
197 eb = btrfs_root_node(root);
198 btrfs_tree_lock(eb);
199 if (eb == root->node)
200 break;
201 btrfs_tree_unlock(eb);
202 free_extent_buffer(eb);
203 }
204 return eb;
205 }
206
207 /* loop around taking references on and locking the root node of the
208 * tree until you end up with a lock on the root. A locked buffer
209 * is returned, with a reference held.
210 */
211 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
212 {
213 struct extent_buffer *eb;
214
215 while (1) {
216 eb = btrfs_root_node(root);
217 btrfs_tree_read_lock(eb);
218 if (eb == root->node)
219 break;
220 btrfs_tree_read_unlock(eb);
221 free_extent_buffer(eb);
222 }
223 return eb;
224 }
225
226 /* cowonly root (everything not a reference counted cow subvolume), just get
227 * put onto a simple dirty list. transaction.c walks this to make sure they
228 * get properly updated on disk.
229 */
230 static void add_root_to_dirty_list(struct btrfs_root *root)
231 {
232 spin_lock(&root->fs_info->trans_lock);
233 if (root->track_dirty && list_empty(&root->dirty_list)) {
234 list_add(&root->dirty_list,
235 &root->fs_info->dirty_cowonly_roots);
236 }
237 spin_unlock(&root->fs_info->trans_lock);
238 }
239
240 /*
241 * used by snapshot creation to make a copy of a root for a tree with
242 * a given objectid. The buffer with the new root node is returned in
243 * cow_ret, and this func returns zero on success or a negative error code.
244 */
245 int btrfs_copy_root(struct btrfs_trans_handle *trans,
246 struct btrfs_root *root,
247 struct extent_buffer *buf,
248 struct extent_buffer **cow_ret, u64 new_root_objectid)
249 {
250 struct extent_buffer *cow;
251 int ret = 0;
252 int level;
253 struct btrfs_disk_key disk_key;
254
255 WARN_ON(root->ref_cows && trans->transid !=
256 root->fs_info->running_transaction->transid);
257 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
258
259 level = btrfs_header_level(buf);
260 if (level == 0)
261 btrfs_item_key(buf, &disk_key, 0);
262 else
263 btrfs_node_key(buf, &disk_key, 0);
264
265 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
266 new_root_objectid, &disk_key, level,
267 buf->start, 0);
268 if (IS_ERR(cow))
269 return PTR_ERR(cow);
270
271 copy_extent_buffer(cow, buf, 0, 0, cow->len);
272 btrfs_set_header_bytenr(cow, cow->start);
273 btrfs_set_header_generation(cow, trans->transid);
274 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
275 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
276 BTRFS_HEADER_FLAG_RELOC);
277 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
278 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
279 else
280 btrfs_set_header_owner(cow, new_root_objectid);
281
282 write_extent_buffer(cow, root->fs_info->fsid,
283 (unsigned long)btrfs_header_fsid(cow),
284 BTRFS_FSID_SIZE);
285
286 WARN_ON(btrfs_header_generation(buf) > trans->transid);
287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
288 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
289 else
290 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
291
292 if (ret)
293 return ret;
294
295 btrfs_mark_buffer_dirty(cow);
296 *cow_ret = cow;
297 return 0;
298 }
299
300 enum mod_log_op {
301 MOD_LOG_KEY_REPLACE,
302 MOD_LOG_KEY_ADD,
303 MOD_LOG_KEY_REMOVE,
304 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
305 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
306 MOD_LOG_MOVE_KEYS,
307 MOD_LOG_ROOT_REPLACE,
308 };
309
310 struct tree_mod_move {
311 int dst_slot;
312 int nr_items;
313 };
314
315 struct tree_mod_root {
316 u64 logical;
317 u8 level;
318 };
319
320 struct tree_mod_elem {
321 struct rb_node node;
322 u64 index; /* shifted logical */
323 u64 seq;
324 enum mod_log_op op;
325
326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
327 int slot;
328
329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
330 u64 generation;
331
332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
333 struct btrfs_disk_key key;
334 u64 blockptr;
335
336 /* this is used for op == MOD_LOG_MOVE_KEYS */
337 struct tree_mod_move move;
338
339 /* this is used for op == MOD_LOG_ROOT_REPLACE */
340 struct tree_mod_root old_root;
341 };
342
343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
344 {
345 read_lock(&fs_info->tree_mod_log_lock);
346 }
347
348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
349 {
350 read_unlock(&fs_info->tree_mod_log_lock);
351 }
352
353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
354 {
355 write_lock(&fs_info->tree_mod_log_lock);
356 }
357
358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
359 {
360 write_unlock(&fs_info->tree_mod_log_lock);
361 }
362
363 /*
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
369 * blocker was added.
370 */
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
373 {
374 u64 seq;
375
376 tree_mod_log_write_lock(fs_info);
377 spin_lock(&fs_info->tree_mod_seq_lock);
378 if (!elem->seq) {
379 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
381 }
382 seq = btrfs_inc_tree_mod_seq(fs_info);
383 spin_unlock(&fs_info->tree_mod_seq_lock);
384 tree_mod_log_write_unlock(fs_info);
385
386 return seq;
387 }
388
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
390 struct seq_list *elem)
391 {
392 struct rb_root *tm_root;
393 struct rb_node *node;
394 struct rb_node *next;
395 struct seq_list *cur_elem;
396 struct tree_mod_elem *tm;
397 u64 min_seq = (u64)-1;
398 u64 seq_putting = elem->seq;
399
400 if (!seq_putting)
401 return;
402
403 spin_lock(&fs_info->tree_mod_seq_lock);
404 list_del(&elem->list);
405 elem->seq = 0;
406
407 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
408 if (cur_elem->seq < min_seq) {
409 if (seq_putting > cur_elem->seq) {
410 /*
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
413 */
414 spin_unlock(&fs_info->tree_mod_seq_lock);
415 return;
416 }
417 min_seq = cur_elem->seq;
418 }
419 }
420 spin_unlock(&fs_info->tree_mod_seq_lock);
421
422 /*
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
425 */
426 tree_mod_log_write_lock(fs_info);
427 tm_root = &fs_info->tree_mod_log;
428 for (node = rb_first(tm_root); node; node = next) {
429 next = rb_next(node);
430 tm = container_of(node, struct tree_mod_elem, node);
431 if (tm->seq > min_seq)
432 continue;
433 rb_erase(node, tm_root);
434 kfree(tm);
435 }
436 tree_mod_log_write_unlock(fs_info);
437 }
438
439 /*
440 * key order of the log:
441 * index -> sequence
442 *
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
445 * operations.
446 */
447 static noinline int
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 {
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
454
455 BUG_ON(!tm || !tm->seq);
456
457 tm_root = &fs_info->tree_mod_log;
458 new = &tm_root->rb_node;
459 while (*new) {
460 cur = container_of(*new, struct tree_mod_elem, node);
461 parent = *new;
462 if (cur->index < tm->index)
463 new = &((*new)->rb_left);
464 else if (cur->index > tm->index)
465 new = &((*new)->rb_right);
466 else if (cur->seq < tm->seq)
467 new = &((*new)->rb_left);
468 else if (cur->seq > tm->seq)
469 new = &((*new)->rb_right);
470 else {
471 kfree(tm);
472 return -EEXIST;
473 }
474 }
475
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
478 return 0;
479 }
480
481 /*
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
486 */
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
489 smp_mb();
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 return 1;
492 if (eb && btrfs_header_level(eb) == 0)
493 return 1;
494
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&fs_info->tree_mod_seq_list)) {
497 /*
498 * someone emptied the list while we were waiting for the lock.
499 * we must not add to the list when no blocker exists.
500 */
501 tree_mod_log_write_unlock(fs_info);
502 return 1;
503 }
504
505 return 0;
506 }
507
508 /*
509 * This allocates memory and gets a tree modification sequence number.
510 *
511 * Returns <0 on error.
512 * Returns >0 (the added sequence number) on success.
513 */
514 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
515 struct tree_mod_elem **tm_ret)
516 {
517 struct tree_mod_elem *tm;
518
519 /*
520 * once we switch from spin locks to something different, we should
521 * honor the flags parameter here.
522 */
523 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
524 if (!tm)
525 return -ENOMEM;
526
527 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
528 return tm->seq;
529 }
530
531 static inline int
532 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
533 struct extent_buffer *eb, int slot,
534 enum mod_log_op op, gfp_t flags)
535 {
536 int ret;
537 struct tree_mod_elem *tm;
538
539 ret = tree_mod_alloc(fs_info, flags, &tm);
540 if (ret < 0)
541 return ret;
542
543 tm->index = eb->start >> PAGE_CACHE_SHIFT;
544 if (op != MOD_LOG_KEY_ADD) {
545 btrfs_node_key(eb, &tm->key, slot);
546 tm->blockptr = btrfs_node_blockptr(eb, slot);
547 }
548 tm->op = op;
549 tm->slot = slot;
550 tm->generation = btrfs_node_ptr_generation(eb, slot);
551
552 return __tree_mod_log_insert(fs_info, tm);
553 }
554
555 static noinline int
556 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
557 struct extent_buffer *eb, int slot,
558 enum mod_log_op op, gfp_t flags)
559 {
560 int ret;
561
562 if (tree_mod_dont_log(fs_info, eb))
563 return 0;
564
565 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
566
567 tree_mod_log_write_unlock(fs_info);
568 return ret;
569 }
570
571 static noinline int
572 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
573 int slot, enum mod_log_op op)
574 {
575 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
576 }
577
578 static noinline int
579 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
580 struct extent_buffer *eb, int slot,
581 enum mod_log_op op)
582 {
583 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
584 }
585
586 static noinline int
587 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
588 struct extent_buffer *eb, int dst_slot, int src_slot,
589 int nr_items, gfp_t flags)
590 {
591 struct tree_mod_elem *tm;
592 int ret;
593 int i;
594
595 if (tree_mod_dont_log(fs_info, eb))
596 return 0;
597
598 /*
599 * When we override something during the move, we log these removals.
600 * This can only happen when we move towards the beginning of the
601 * buffer, i.e. dst_slot < src_slot.
602 */
603 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
604 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
605 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
606 BUG_ON(ret < 0);
607 }
608
609 ret = tree_mod_alloc(fs_info, flags, &tm);
610 if (ret < 0)
611 goto out;
612
613 tm->index = eb->start >> PAGE_CACHE_SHIFT;
614 tm->slot = src_slot;
615 tm->move.dst_slot = dst_slot;
616 tm->move.nr_items = nr_items;
617 tm->op = MOD_LOG_MOVE_KEYS;
618
619 ret = __tree_mod_log_insert(fs_info, tm);
620 out:
621 tree_mod_log_write_unlock(fs_info);
622 return ret;
623 }
624
625 static inline void
626 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
627 {
628 int i;
629 u32 nritems;
630 int ret;
631
632 if (btrfs_header_level(eb) == 0)
633 return;
634
635 nritems = btrfs_header_nritems(eb);
636 for (i = nritems - 1; i >= 0; i--) {
637 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
638 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
639 BUG_ON(ret < 0);
640 }
641 }
642
643 static noinline int
644 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
645 struct extent_buffer *old_root,
646 struct extent_buffer *new_root, gfp_t flags,
647 int log_removal)
648 {
649 struct tree_mod_elem *tm;
650 int ret;
651
652 if (tree_mod_dont_log(fs_info, NULL))
653 return 0;
654
655 if (log_removal)
656 __tree_mod_log_free_eb(fs_info, old_root);
657
658 ret = tree_mod_alloc(fs_info, flags, &tm);
659 if (ret < 0)
660 goto out;
661
662 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
663 tm->old_root.logical = old_root->start;
664 tm->old_root.level = btrfs_header_level(old_root);
665 tm->generation = btrfs_header_generation(old_root);
666 tm->op = MOD_LOG_ROOT_REPLACE;
667
668 ret = __tree_mod_log_insert(fs_info, tm);
669 out:
670 tree_mod_log_write_unlock(fs_info);
671 return ret;
672 }
673
674 static struct tree_mod_elem *
675 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
676 int smallest)
677 {
678 struct rb_root *tm_root;
679 struct rb_node *node;
680 struct tree_mod_elem *cur = NULL;
681 struct tree_mod_elem *found = NULL;
682 u64 index = start >> PAGE_CACHE_SHIFT;
683
684 tree_mod_log_read_lock(fs_info);
685 tm_root = &fs_info->tree_mod_log;
686 node = tm_root->rb_node;
687 while (node) {
688 cur = container_of(node, struct tree_mod_elem, node);
689 if (cur->index < index) {
690 node = node->rb_left;
691 } else if (cur->index > index) {
692 node = node->rb_right;
693 } else if (cur->seq < min_seq) {
694 node = node->rb_left;
695 } else if (!smallest) {
696 /* we want the node with the highest seq */
697 if (found)
698 BUG_ON(found->seq > cur->seq);
699 found = cur;
700 node = node->rb_left;
701 } else if (cur->seq > min_seq) {
702 /* we want the node with the smallest seq */
703 if (found)
704 BUG_ON(found->seq < cur->seq);
705 found = cur;
706 node = node->rb_right;
707 } else {
708 found = cur;
709 break;
710 }
711 }
712 tree_mod_log_read_unlock(fs_info);
713
714 return found;
715 }
716
717 /*
718 * this returns the element from the log with the smallest time sequence
719 * value that's in the log (the oldest log item). any element with a time
720 * sequence lower than min_seq will be ignored.
721 */
722 static struct tree_mod_elem *
723 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
724 u64 min_seq)
725 {
726 return __tree_mod_log_search(fs_info, start, min_seq, 1);
727 }
728
729 /*
730 * this returns the element from the log with the largest time sequence
731 * value that's in the log (the most recent log item). any element with
732 * a time sequence lower than min_seq will be ignored.
733 */
734 static struct tree_mod_elem *
735 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
736 {
737 return __tree_mod_log_search(fs_info, start, min_seq, 0);
738 }
739
740 static noinline void
741 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
742 struct extent_buffer *src, unsigned long dst_offset,
743 unsigned long src_offset, int nr_items)
744 {
745 int ret;
746 int i;
747
748 if (tree_mod_dont_log(fs_info, NULL))
749 return;
750
751 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
752 tree_mod_log_write_unlock(fs_info);
753 return;
754 }
755
756 for (i = 0; i < nr_items; i++) {
757 ret = tree_mod_log_insert_key_locked(fs_info, src,
758 i + src_offset,
759 MOD_LOG_KEY_REMOVE);
760 BUG_ON(ret < 0);
761 ret = tree_mod_log_insert_key_locked(fs_info, dst,
762 i + dst_offset,
763 MOD_LOG_KEY_ADD);
764 BUG_ON(ret < 0);
765 }
766
767 tree_mod_log_write_unlock(fs_info);
768 }
769
770 static inline void
771 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
772 int dst_offset, int src_offset, int nr_items)
773 {
774 int ret;
775 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
776 nr_items, GFP_NOFS);
777 BUG_ON(ret < 0);
778 }
779
780 static noinline void
781 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
782 struct extent_buffer *eb, int slot, int atomic)
783 {
784 int ret;
785
786 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
787 MOD_LOG_KEY_REPLACE,
788 atomic ? GFP_ATOMIC : GFP_NOFS);
789 BUG_ON(ret < 0);
790 }
791
792 static noinline void
793 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
794 {
795 if (tree_mod_dont_log(fs_info, eb))
796 return;
797
798 __tree_mod_log_free_eb(fs_info, eb);
799
800 tree_mod_log_write_unlock(fs_info);
801 }
802
803 static noinline void
804 tree_mod_log_set_root_pointer(struct btrfs_root *root,
805 struct extent_buffer *new_root_node,
806 int log_removal)
807 {
808 int ret;
809 ret = tree_mod_log_insert_root(root->fs_info, root->node,
810 new_root_node, GFP_NOFS, log_removal);
811 BUG_ON(ret < 0);
812 }
813
814 /*
815 * check if the tree block can be shared by multiple trees
816 */
817 int btrfs_block_can_be_shared(struct btrfs_root *root,
818 struct extent_buffer *buf)
819 {
820 /*
821 * Tree blocks not in refernece counted trees and tree roots
822 * are never shared. If a block was allocated after the last
823 * snapshot and the block was not allocated by tree relocation,
824 * we know the block is not shared.
825 */
826 if (root->ref_cows &&
827 buf != root->node && buf != root->commit_root &&
828 (btrfs_header_generation(buf) <=
829 btrfs_root_last_snapshot(&root->root_item) ||
830 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
831 return 1;
832 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
833 if (root->ref_cows &&
834 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
835 return 1;
836 #endif
837 return 0;
838 }
839
840 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
841 struct btrfs_root *root,
842 struct extent_buffer *buf,
843 struct extent_buffer *cow,
844 int *last_ref)
845 {
846 u64 refs;
847 u64 owner;
848 u64 flags;
849 u64 new_flags = 0;
850 int ret;
851
852 /*
853 * Backrefs update rules:
854 *
855 * Always use full backrefs for extent pointers in tree block
856 * allocated by tree relocation.
857 *
858 * If a shared tree block is no longer referenced by its owner
859 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
860 * use full backrefs for extent pointers in tree block.
861 *
862 * If a tree block is been relocating
863 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
864 * use full backrefs for extent pointers in tree block.
865 * The reason for this is some operations (such as drop tree)
866 * are only allowed for blocks use full backrefs.
867 */
868
869 if (btrfs_block_can_be_shared(root, buf)) {
870 ret = btrfs_lookup_extent_info(trans, root, buf->start,
871 btrfs_header_level(buf), 1,
872 &refs, &flags);
873 if (ret)
874 return ret;
875 if (refs == 0) {
876 ret = -EROFS;
877 btrfs_std_error(root->fs_info, ret);
878 return ret;
879 }
880 } else {
881 refs = 1;
882 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
883 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
884 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
885 else
886 flags = 0;
887 }
888
889 owner = btrfs_header_owner(buf);
890 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
891 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
892
893 if (refs > 1) {
894 if ((owner == root->root_key.objectid ||
895 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
896 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
897 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
898 BUG_ON(ret); /* -ENOMEM */
899
900 if (root->root_key.objectid ==
901 BTRFS_TREE_RELOC_OBJECTID) {
902 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
903 BUG_ON(ret); /* -ENOMEM */
904 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
905 BUG_ON(ret); /* -ENOMEM */
906 }
907 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
908 } else {
909
910 if (root->root_key.objectid ==
911 BTRFS_TREE_RELOC_OBJECTID)
912 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
913 else
914 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
915 BUG_ON(ret); /* -ENOMEM */
916 }
917 if (new_flags != 0) {
918 ret = btrfs_set_disk_extent_flags(trans, root,
919 buf->start,
920 buf->len,
921 new_flags, 0);
922 if (ret)
923 return ret;
924 }
925 } else {
926 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
927 if (root->root_key.objectid ==
928 BTRFS_TREE_RELOC_OBJECTID)
929 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
930 else
931 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
932 BUG_ON(ret); /* -ENOMEM */
933 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
934 BUG_ON(ret); /* -ENOMEM */
935 }
936 clean_tree_block(trans, root, buf);
937 *last_ref = 1;
938 }
939 return 0;
940 }
941
942 /*
943 * does the dirty work in cow of a single block. The parent block (if
944 * supplied) is updated to point to the new cow copy. The new buffer is marked
945 * dirty and returned locked. If you modify the block it needs to be marked
946 * dirty again.
947 *
948 * search_start -- an allocation hint for the new block
949 *
950 * empty_size -- a hint that you plan on doing more cow. This is the size in
951 * bytes the allocator should try to find free next to the block it returns.
952 * This is just a hint and may be ignored by the allocator.
953 */
954 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct extent_buffer *buf,
957 struct extent_buffer *parent, int parent_slot,
958 struct extent_buffer **cow_ret,
959 u64 search_start, u64 empty_size)
960 {
961 struct btrfs_disk_key disk_key;
962 struct extent_buffer *cow;
963 int level, ret;
964 int last_ref = 0;
965 int unlock_orig = 0;
966 u64 parent_start;
967
968 if (*cow_ret == buf)
969 unlock_orig = 1;
970
971 btrfs_assert_tree_locked(buf);
972
973 WARN_ON(root->ref_cows && trans->transid !=
974 root->fs_info->running_transaction->transid);
975 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
976
977 level = btrfs_header_level(buf);
978
979 if (level == 0)
980 btrfs_item_key(buf, &disk_key, 0);
981 else
982 btrfs_node_key(buf, &disk_key, 0);
983
984 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
985 if (parent)
986 parent_start = parent->start;
987 else
988 parent_start = 0;
989 } else
990 parent_start = 0;
991
992 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
993 root->root_key.objectid, &disk_key,
994 level, search_start, empty_size);
995 if (IS_ERR(cow))
996 return PTR_ERR(cow);
997
998 /* cow is set to blocking by btrfs_init_new_buffer */
999
1000 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1001 btrfs_set_header_bytenr(cow, cow->start);
1002 btrfs_set_header_generation(cow, trans->transid);
1003 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1004 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1005 BTRFS_HEADER_FLAG_RELOC);
1006 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1007 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1008 else
1009 btrfs_set_header_owner(cow, root->root_key.objectid);
1010
1011 write_extent_buffer(cow, root->fs_info->fsid,
1012 (unsigned long)btrfs_header_fsid(cow),
1013 BTRFS_FSID_SIZE);
1014
1015 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1016 if (ret) {
1017 btrfs_abort_transaction(trans, root, ret);
1018 return ret;
1019 }
1020
1021 if (root->ref_cows)
1022 btrfs_reloc_cow_block(trans, root, buf, cow);
1023
1024 if (buf == root->node) {
1025 WARN_ON(parent && parent != buf);
1026 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1027 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1028 parent_start = buf->start;
1029 else
1030 parent_start = 0;
1031
1032 extent_buffer_get(cow);
1033 tree_mod_log_set_root_pointer(root, cow, 1);
1034 rcu_assign_pointer(root->node, cow);
1035
1036 btrfs_free_tree_block(trans, root, buf, parent_start,
1037 last_ref);
1038 free_extent_buffer(buf);
1039 add_root_to_dirty_list(root);
1040 } else {
1041 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1042 parent_start = parent->start;
1043 else
1044 parent_start = 0;
1045
1046 WARN_ON(trans->transid != btrfs_header_generation(parent));
1047 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1048 MOD_LOG_KEY_REPLACE);
1049 btrfs_set_node_blockptr(parent, parent_slot,
1050 cow->start);
1051 btrfs_set_node_ptr_generation(parent, parent_slot,
1052 trans->transid);
1053 btrfs_mark_buffer_dirty(parent);
1054 tree_mod_log_free_eb(root->fs_info, buf);
1055 btrfs_free_tree_block(trans, root, buf, parent_start,
1056 last_ref);
1057 }
1058 if (unlock_orig)
1059 btrfs_tree_unlock(buf);
1060 free_extent_buffer_stale(buf);
1061 btrfs_mark_buffer_dirty(cow);
1062 *cow_ret = cow;
1063 return 0;
1064 }
1065
1066 /*
1067 * returns the logical address of the oldest predecessor of the given root.
1068 * entries older than time_seq are ignored.
1069 */
1070 static struct tree_mod_elem *
1071 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1072 struct extent_buffer *eb_root, u64 time_seq)
1073 {
1074 struct tree_mod_elem *tm;
1075 struct tree_mod_elem *found = NULL;
1076 u64 root_logical = eb_root->start;
1077 int looped = 0;
1078
1079 if (!time_seq)
1080 return 0;
1081
1082 /*
1083 * the very last operation that's logged for a root is the replacement
1084 * operation (if it is replaced at all). this has the index of the *new*
1085 * root, making it the very first operation that's logged for this root.
1086 */
1087 while (1) {
1088 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1089 time_seq);
1090 if (!looped && !tm)
1091 return 0;
1092 /*
1093 * if there are no tree operation for the oldest root, we simply
1094 * return it. this should only happen if that (old) root is at
1095 * level 0.
1096 */
1097 if (!tm)
1098 break;
1099
1100 /*
1101 * if there's an operation that's not a root replacement, we
1102 * found the oldest version of our root. normally, we'll find a
1103 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1104 */
1105 if (tm->op != MOD_LOG_ROOT_REPLACE)
1106 break;
1107
1108 found = tm;
1109 root_logical = tm->old_root.logical;
1110 looped = 1;
1111 }
1112
1113 /* if there's no old root to return, return what we found instead */
1114 if (!found)
1115 found = tm;
1116
1117 return found;
1118 }
1119
1120 /*
1121 * tm is a pointer to the first operation to rewind within eb. then, all
1122 * previous operations will be rewinded (until we reach something older than
1123 * time_seq).
1124 */
1125 static void
1126 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1127 struct tree_mod_elem *first_tm)
1128 {
1129 u32 n;
1130 struct rb_node *next;
1131 struct tree_mod_elem *tm = first_tm;
1132 unsigned long o_dst;
1133 unsigned long o_src;
1134 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1135
1136 n = btrfs_header_nritems(eb);
1137 while (tm && tm->seq >= time_seq) {
1138 /*
1139 * all the operations are recorded with the operator used for
1140 * the modification. as we're going backwards, we do the
1141 * opposite of each operation here.
1142 */
1143 switch (tm->op) {
1144 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1145 BUG_ON(tm->slot < n);
1146 /* Fallthrough */
1147 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1148 case MOD_LOG_KEY_REMOVE:
1149 btrfs_set_node_key(eb, &tm->key, tm->slot);
1150 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1151 btrfs_set_node_ptr_generation(eb, tm->slot,
1152 tm->generation);
1153 n++;
1154 break;
1155 case MOD_LOG_KEY_REPLACE:
1156 BUG_ON(tm->slot >= n);
1157 btrfs_set_node_key(eb, &tm->key, tm->slot);
1158 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1159 btrfs_set_node_ptr_generation(eb, tm->slot,
1160 tm->generation);
1161 break;
1162 case MOD_LOG_KEY_ADD:
1163 /* if a move operation is needed it's in the log */
1164 n--;
1165 break;
1166 case MOD_LOG_MOVE_KEYS:
1167 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1168 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1169 memmove_extent_buffer(eb, o_dst, o_src,
1170 tm->move.nr_items * p_size);
1171 break;
1172 case MOD_LOG_ROOT_REPLACE:
1173 /*
1174 * this operation is special. for roots, this must be
1175 * handled explicitly before rewinding.
1176 * for non-roots, this operation may exist if the node
1177 * was a root: root A -> child B; then A gets empty and
1178 * B is promoted to the new root. in the mod log, we'll
1179 * have a root-replace operation for B, a tree block
1180 * that is no root. we simply ignore that operation.
1181 */
1182 break;
1183 }
1184 next = rb_next(&tm->node);
1185 if (!next)
1186 break;
1187 tm = container_of(next, struct tree_mod_elem, node);
1188 if (tm->index != first_tm->index)
1189 break;
1190 }
1191 btrfs_set_header_nritems(eb, n);
1192 }
1193
1194 /*
1195 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1196 * is returned. If rewind operations happen, a fresh buffer is returned. The
1197 * returned buffer is always read-locked. If the returned buffer is not the
1198 * input buffer, the lock on the input buffer is released and the input buffer
1199 * is freed (its refcount is decremented).
1200 */
1201 static struct extent_buffer *
1202 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1203 u64 time_seq)
1204 {
1205 struct extent_buffer *eb_rewin;
1206 struct tree_mod_elem *tm;
1207
1208 if (!time_seq)
1209 return eb;
1210
1211 if (btrfs_header_level(eb) == 0)
1212 return eb;
1213
1214 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1215 if (!tm)
1216 return eb;
1217
1218 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1219 BUG_ON(tm->slot != 0);
1220 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1221 fs_info->tree_root->nodesize);
1222 BUG_ON(!eb_rewin);
1223 btrfs_set_header_bytenr(eb_rewin, eb->start);
1224 btrfs_set_header_backref_rev(eb_rewin,
1225 btrfs_header_backref_rev(eb));
1226 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1227 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1228 } else {
1229 eb_rewin = btrfs_clone_extent_buffer(eb);
1230 BUG_ON(!eb_rewin);
1231 }
1232
1233 extent_buffer_get(eb_rewin);
1234 btrfs_tree_read_unlock(eb);
1235 free_extent_buffer(eb);
1236
1237 extent_buffer_get(eb_rewin);
1238 btrfs_tree_read_lock(eb_rewin);
1239 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1240 WARN_ON(btrfs_header_nritems(eb_rewin) >
1241 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1242
1243 return eb_rewin;
1244 }
1245
1246 /*
1247 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1248 * value. If there are no changes, the current root->root_node is returned. If
1249 * anything changed in between, there's a fresh buffer allocated on which the
1250 * rewind operations are done. In any case, the returned buffer is read locked.
1251 * Returns NULL on error (with no locks held).
1252 */
1253 static inline struct extent_buffer *
1254 get_old_root(struct btrfs_root *root, u64 time_seq)
1255 {
1256 struct tree_mod_elem *tm;
1257 struct extent_buffer *eb = NULL;
1258 struct extent_buffer *eb_root;
1259 struct extent_buffer *old;
1260 struct tree_mod_root *old_root = NULL;
1261 u64 old_generation = 0;
1262 u64 logical;
1263 u32 blocksize;
1264
1265 eb_root = btrfs_read_lock_root_node(root);
1266 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1267 if (!tm)
1268 return eb_root;
1269
1270 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1271 old_root = &tm->old_root;
1272 old_generation = tm->generation;
1273 logical = old_root->logical;
1274 } else {
1275 logical = eb_root->start;
1276 }
1277
1278 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1279 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1280 btrfs_tree_read_unlock(eb_root);
1281 free_extent_buffer(eb_root);
1282 blocksize = btrfs_level_size(root, old_root->level);
1283 old = read_tree_block(root, logical, blocksize, 0);
1284 if (!old) {
1285 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1286 logical);
1287 WARN_ON(1);
1288 } else {
1289 eb = btrfs_clone_extent_buffer(old);
1290 free_extent_buffer(old);
1291 }
1292 } else if (old_root) {
1293 btrfs_tree_read_unlock(eb_root);
1294 free_extent_buffer(eb_root);
1295 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1296 } else {
1297 eb = btrfs_clone_extent_buffer(eb_root);
1298 btrfs_tree_read_unlock(eb_root);
1299 free_extent_buffer(eb_root);
1300 }
1301
1302 if (!eb)
1303 return NULL;
1304 extent_buffer_get(eb);
1305 btrfs_tree_read_lock(eb);
1306 if (old_root) {
1307 btrfs_set_header_bytenr(eb, eb->start);
1308 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1309 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1310 btrfs_set_header_level(eb, old_root->level);
1311 btrfs_set_header_generation(eb, old_generation);
1312 }
1313 if (tm)
1314 __tree_mod_log_rewind(eb, time_seq, tm);
1315 else
1316 WARN_ON(btrfs_header_level(eb) != 0);
1317 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1318
1319 return eb;
1320 }
1321
1322 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1323 {
1324 struct tree_mod_elem *tm;
1325 int level;
1326 struct extent_buffer *eb_root = btrfs_root_node(root);
1327
1328 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1329 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1330 level = tm->old_root.level;
1331 } else {
1332 level = btrfs_header_level(eb_root);
1333 }
1334 free_extent_buffer(eb_root);
1335
1336 return level;
1337 }
1338
1339 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1340 struct btrfs_root *root,
1341 struct extent_buffer *buf)
1342 {
1343 /* ensure we can see the force_cow */
1344 smp_rmb();
1345
1346 /*
1347 * We do not need to cow a block if
1348 * 1) this block is not created or changed in this transaction;
1349 * 2) this block does not belong to TREE_RELOC tree;
1350 * 3) the root is not forced COW.
1351 *
1352 * What is forced COW:
1353 * when we create snapshot during commiting the transaction,
1354 * after we've finished coping src root, we must COW the shared
1355 * block to ensure the metadata consistency.
1356 */
1357 if (btrfs_header_generation(buf) == trans->transid &&
1358 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1359 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1360 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1361 !root->force_cow)
1362 return 0;
1363 return 1;
1364 }
1365
1366 /*
1367 * cows a single block, see __btrfs_cow_block for the real work.
1368 * This version of it has extra checks so that a block isn't cow'd more than
1369 * once per transaction, as long as it hasn't been written yet
1370 */
1371 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root, struct extent_buffer *buf,
1373 struct extent_buffer *parent, int parent_slot,
1374 struct extent_buffer **cow_ret)
1375 {
1376 u64 search_start;
1377 int ret;
1378
1379 if (trans->transaction != root->fs_info->running_transaction)
1380 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1381 (unsigned long long)trans->transid,
1382 (unsigned long long)
1383 root->fs_info->running_transaction->transid);
1384
1385 if (trans->transid != root->fs_info->generation)
1386 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1387 (unsigned long long)trans->transid,
1388 (unsigned long long)root->fs_info->generation);
1389
1390 if (!should_cow_block(trans, root, buf)) {
1391 *cow_ret = buf;
1392 return 0;
1393 }
1394
1395 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1396
1397 if (parent)
1398 btrfs_set_lock_blocking(parent);
1399 btrfs_set_lock_blocking(buf);
1400
1401 ret = __btrfs_cow_block(trans, root, buf, parent,
1402 parent_slot, cow_ret, search_start, 0);
1403
1404 trace_btrfs_cow_block(root, buf, *cow_ret);
1405
1406 return ret;
1407 }
1408
1409 /*
1410 * helper function for defrag to decide if two blocks pointed to by a
1411 * node are actually close by
1412 */
1413 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1414 {
1415 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1416 return 1;
1417 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1418 return 1;
1419 return 0;
1420 }
1421
1422 /*
1423 * compare two keys in a memcmp fashion
1424 */
1425 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1426 {
1427 struct btrfs_key k1;
1428
1429 btrfs_disk_key_to_cpu(&k1, disk);
1430
1431 return btrfs_comp_cpu_keys(&k1, k2);
1432 }
1433
1434 /*
1435 * same as comp_keys only with two btrfs_key's
1436 */
1437 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1438 {
1439 if (k1->objectid > k2->objectid)
1440 return 1;
1441 if (k1->objectid < k2->objectid)
1442 return -1;
1443 if (k1->type > k2->type)
1444 return 1;
1445 if (k1->type < k2->type)
1446 return -1;
1447 if (k1->offset > k2->offset)
1448 return 1;
1449 if (k1->offset < k2->offset)
1450 return -1;
1451 return 0;
1452 }
1453
1454 /*
1455 * this is used by the defrag code to go through all the
1456 * leaves pointed to by a node and reallocate them so that
1457 * disk order is close to key order
1458 */
1459 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1460 struct btrfs_root *root, struct extent_buffer *parent,
1461 int start_slot, u64 *last_ret,
1462 struct btrfs_key *progress)
1463 {
1464 struct extent_buffer *cur;
1465 u64 blocknr;
1466 u64 gen;
1467 u64 search_start = *last_ret;
1468 u64 last_block = 0;
1469 u64 other;
1470 u32 parent_nritems;
1471 int end_slot;
1472 int i;
1473 int err = 0;
1474 int parent_level;
1475 int uptodate;
1476 u32 blocksize;
1477 int progress_passed = 0;
1478 struct btrfs_disk_key disk_key;
1479
1480 parent_level = btrfs_header_level(parent);
1481
1482 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1483 WARN_ON(trans->transid != root->fs_info->generation);
1484
1485 parent_nritems = btrfs_header_nritems(parent);
1486 blocksize = btrfs_level_size(root, parent_level - 1);
1487 end_slot = parent_nritems;
1488
1489 if (parent_nritems == 1)
1490 return 0;
1491
1492 btrfs_set_lock_blocking(parent);
1493
1494 for (i = start_slot; i < end_slot; i++) {
1495 int close = 1;
1496
1497 btrfs_node_key(parent, &disk_key, i);
1498 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1499 continue;
1500
1501 progress_passed = 1;
1502 blocknr = btrfs_node_blockptr(parent, i);
1503 gen = btrfs_node_ptr_generation(parent, i);
1504 if (last_block == 0)
1505 last_block = blocknr;
1506
1507 if (i > 0) {
1508 other = btrfs_node_blockptr(parent, i - 1);
1509 close = close_blocks(blocknr, other, blocksize);
1510 }
1511 if (!close && i < end_slot - 2) {
1512 other = btrfs_node_blockptr(parent, i + 1);
1513 close = close_blocks(blocknr, other, blocksize);
1514 }
1515 if (close) {
1516 last_block = blocknr;
1517 continue;
1518 }
1519
1520 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1521 if (cur)
1522 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1523 else
1524 uptodate = 0;
1525 if (!cur || !uptodate) {
1526 if (!cur) {
1527 cur = read_tree_block(root, blocknr,
1528 blocksize, gen);
1529 if (!cur)
1530 return -EIO;
1531 } else if (!uptodate) {
1532 err = btrfs_read_buffer(cur, gen);
1533 if (err) {
1534 free_extent_buffer(cur);
1535 return err;
1536 }
1537 }
1538 }
1539 if (search_start == 0)
1540 search_start = last_block;
1541
1542 btrfs_tree_lock(cur);
1543 btrfs_set_lock_blocking(cur);
1544 err = __btrfs_cow_block(trans, root, cur, parent, i,
1545 &cur, search_start,
1546 min(16 * blocksize,
1547 (end_slot - i) * blocksize));
1548 if (err) {
1549 btrfs_tree_unlock(cur);
1550 free_extent_buffer(cur);
1551 break;
1552 }
1553 search_start = cur->start;
1554 last_block = cur->start;
1555 *last_ret = search_start;
1556 btrfs_tree_unlock(cur);
1557 free_extent_buffer(cur);
1558 }
1559 return err;
1560 }
1561
1562 /*
1563 * The leaf data grows from end-to-front in the node.
1564 * this returns the address of the start of the last item,
1565 * which is the stop of the leaf data stack
1566 */
1567 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1568 struct extent_buffer *leaf)
1569 {
1570 u32 nr = btrfs_header_nritems(leaf);
1571 if (nr == 0)
1572 return BTRFS_LEAF_DATA_SIZE(root);
1573 return btrfs_item_offset_nr(leaf, nr - 1);
1574 }
1575
1576
1577 /*
1578 * search for key in the extent_buffer. The items start at offset p,
1579 * and they are item_size apart. There are 'max' items in p.
1580 *
1581 * the slot in the array is returned via slot, and it points to
1582 * the place where you would insert key if it is not found in
1583 * the array.
1584 *
1585 * slot may point to max if the key is bigger than all of the keys
1586 */
1587 static noinline int generic_bin_search(struct extent_buffer *eb,
1588 unsigned long p,
1589 int item_size, struct btrfs_key *key,
1590 int max, int *slot)
1591 {
1592 int low = 0;
1593 int high = max;
1594 int mid;
1595 int ret;
1596 struct btrfs_disk_key *tmp = NULL;
1597 struct btrfs_disk_key unaligned;
1598 unsigned long offset;
1599 char *kaddr = NULL;
1600 unsigned long map_start = 0;
1601 unsigned long map_len = 0;
1602 int err;
1603
1604 while (low < high) {
1605 mid = (low + high) / 2;
1606 offset = p + mid * item_size;
1607
1608 if (!kaddr || offset < map_start ||
1609 (offset + sizeof(struct btrfs_disk_key)) >
1610 map_start + map_len) {
1611
1612 err = map_private_extent_buffer(eb, offset,
1613 sizeof(struct btrfs_disk_key),
1614 &kaddr, &map_start, &map_len);
1615
1616 if (!err) {
1617 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1618 map_start);
1619 } else {
1620 read_extent_buffer(eb, &unaligned,
1621 offset, sizeof(unaligned));
1622 tmp = &unaligned;
1623 }
1624
1625 } else {
1626 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1627 map_start);
1628 }
1629 ret = comp_keys(tmp, key);
1630
1631 if (ret < 0)
1632 low = mid + 1;
1633 else if (ret > 0)
1634 high = mid;
1635 else {
1636 *slot = mid;
1637 return 0;
1638 }
1639 }
1640 *slot = low;
1641 return 1;
1642 }
1643
1644 /*
1645 * simple bin_search frontend that does the right thing for
1646 * leaves vs nodes
1647 */
1648 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1649 int level, int *slot)
1650 {
1651 if (level == 0)
1652 return generic_bin_search(eb,
1653 offsetof(struct btrfs_leaf, items),
1654 sizeof(struct btrfs_item),
1655 key, btrfs_header_nritems(eb),
1656 slot);
1657 else
1658 return generic_bin_search(eb,
1659 offsetof(struct btrfs_node, ptrs),
1660 sizeof(struct btrfs_key_ptr),
1661 key, btrfs_header_nritems(eb),
1662 slot);
1663 }
1664
1665 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1666 int level, int *slot)
1667 {
1668 return bin_search(eb, key, level, slot);
1669 }
1670
1671 static void root_add_used(struct btrfs_root *root, u32 size)
1672 {
1673 spin_lock(&root->accounting_lock);
1674 btrfs_set_root_used(&root->root_item,
1675 btrfs_root_used(&root->root_item) + size);
1676 spin_unlock(&root->accounting_lock);
1677 }
1678
1679 static void root_sub_used(struct btrfs_root *root, u32 size)
1680 {
1681 spin_lock(&root->accounting_lock);
1682 btrfs_set_root_used(&root->root_item,
1683 btrfs_root_used(&root->root_item) - size);
1684 spin_unlock(&root->accounting_lock);
1685 }
1686
1687 /* given a node and slot number, this reads the blocks it points to. The
1688 * extent buffer is returned with a reference taken (but unlocked).
1689 * NULL is returned on error.
1690 */
1691 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1692 struct extent_buffer *parent, int slot)
1693 {
1694 int level = btrfs_header_level(parent);
1695 if (slot < 0)
1696 return NULL;
1697 if (slot >= btrfs_header_nritems(parent))
1698 return NULL;
1699
1700 BUG_ON(level == 0);
1701
1702 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1703 btrfs_level_size(root, level - 1),
1704 btrfs_node_ptr_generation(parent, slot));
1705 }
1706
1707 /*
1708 * node level balancing, used to make sure nodes are in proper order for
1709 * item deletion. We balance from the top down, so we have to make sure
1710 * that a deletion won't leave an node completely empty later on.
1711 */
1712 static noinline int balance_level(struct btrfs_trans_handle *trans,
1713 struct btrfs_root *root,
1714 struct btrfs_path *path, int level)
1715 {
1716 struct extent_buffer *right = NULL;
1717 struct extent_buffer *mid;
1718 struct extent_buffer *left = NULL;
1719 struct extent_buffer *parent = NULL;
1720 int ret = 0;
1721 int wret;
1722 int pslot;
1723 int orig_slot = path->slots[level];
1724 u64 orig_ptr;
1725
1726 if (level == 0)
1727 return 0;
1728
1729 mid = path->nodes[level];
1730
1731 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1732 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1733 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1734
1735 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1736
1737 if (level < BTRFS_MAX_LEVEL - 1) {
1738 parent = path->nodes[level + 1];
1739 pslot = path->slots[level + 1];
1740 }
1741
1742 /*
1743 * deal with the case where there is only one pointer in the root
1744 * by promoting the node below to a root
1745 */
1746 if (!parent) {
1747 struct extent_buffer *child;
1748
1749 if (btrfs_header_nritems(mid) != 1)
1750 return 0;
1751
1752 /* promote the child to a root */
1753 child = read_node_slot(root, mid, 0);
1754 if (!child) {
1755 ret = -EROFS;
1756 btrfs_std_error(root->fs_info, ret);
1757 goto enospc;
1758 }
1759
1760 btrfs_tree_lock(child);
1761 btrfs_set_lock_blocking(child);
1762 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1763 if (ret) {
1764 btrfs_tree_unlock(child);
1765 free_extent_buffer(child);
1766 goto enospc;
1767 }
1768
1769 tree_mod_log_set_root_pointer(root, child, 1);
1770 rcu_assign_pointer(root->node, child);
1771
1772 add_root_to_dirty_list(root);
1773 btrfs_tree_unlock(child);
1774
1775 path->locks[level] = 0;
1776 path->nodes[level] = NULL;
1777 clean_tree_block(trans, root, mid);
1778 btrfs_tree_unlock(mid);
1779 /* once for the path */
1780 free_extent_buffer(mid);
1781
1782 root_sub_used(root, mid->len);
1783 btrfs_free_tree_block(trans, root, mid, 0, 1);
1784 /* once for the root ptr */
1785 free_extent_buffer_stale(mid);
1786 return 0;
1787 }
1788 if (btrfs_header_nritems(mid) >
1789 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1790 return 0;
1791
1792 left = read_node_slot(root, parent, pslot - 1);
1793 if (left) {
1794 btrfs_tree_lock(left);
1795 btrfs_set_lock_blocking(left);
1796 wret = btrfs_cow_block(trans, root, left,
1797 parent, pslot - 1, &left);
1798 if (wret) {
1799 ret = wret;
1800 goto enospc;
1801 }
1802 }
1803 right = read_node_slot(root, parent, pslot + 1);
1804 if (right) {
1805 btrfs_tree_lock(right);
1806 btrfs_set_lock_blocking(right);
1807 wret = btrfs_cow_block(trans, root, right,
1808 parent, pslot + 1, &right);
1809 if (wret) {
1810 ret = wret;
1811 goto enospc;
1812 }
1813 }
1814
1815 /* first, try to make some room in the middle buffer */
1816 if (left) {
1817 orig_slot += btrfs_header_nritems(left);
1818 wret = push_node_left(trans, root, left, mid, 1);
1819 if (wret < 0)
1820 ret = wret;
1821 }
1822
1823 /*
1824 * then try to empty the right most buffer into the middle
1825 */
1826 if (right) {
1827 wret = push_node_left(trans, root, mid, right, 1);
1828 if (wret < 0 && wret != -ENOSPC)
1829 ret = wret;
1830 if (btrfs_header_nritems(right) == 0) {
1831 clean_tree_block(trans, root, right);
1832 btrfs_tree_unlock(right);
1833 del_ptr(trans, root, path, level + 1, pslot + 1);
1834 root_sub_used(root, right->len);
1835 btrfs_free_tree_block(trans, root, right, 0, 1);
1836 free_extent_buffer_stale(right);
1837 right = NULL;
1838 } else {
1839 struct btrfs_disk_key right_key;
1840 btrfs_node_key(right, &right_key, 0);
1841 tree_mod_log_set_node_key(root->fs_info, parent,
1842 pslot + 1, 0);
1843 btrfs_set_node_key(parent, &right_key, pslot + 1);
1844 btrfs_mark_buffer_dirty(parent);
1845 }
1846 }
1847 if (btrfs_header_nritems(mid) == 1) {
1848 /*
1849 * we're not allowed to leave a node with one item in the
1850 * tree during a delete. A deletion from lower in the tree
1851 * could try to delete the only pointer in this node.
1852 * So, pull some keys from the left.
1853 * There has to be a left pointer at this point because
1854 * otherwise we would have pulled some pointers from the
1855 * right
1856 */
1857 if (!left) {
1858 ret = -EROFS;
1859 btrfs_std_error(root->fs_info, ret);
1860 goto enospc;
1861 }
1862 wret = balance_node_right(trans, root, mid, left);
1863 if (wret < 0) {
1864 ret = wret;
1865 goto enospc;
1866 }
1867 if (wret == 1) {
1868 wret = push_node_left(trans, root, left, mid, 1);
1869 if (wret < 0)
1870 ret = wret;
1871 }
1872 BUG_ON(wret == 1);
1873 }
1874 if (btrfs_header_nritems(mid) == 0) {
1875 clean_tree_block(trans, root, mid);
1876 btrfs_tree_unlock(mid);
1877 del_ptr(trans, root, path, level + 1, pslot);
1878 root_sub_used(root, mid->len);
1879 btrfs_free_tree_block(trans, root, mid, 0, 1);
1880 free_extent_buffer_stale(mid);
1881 mid = NULL;
1882 } else {
1883 /* update the parent key to reflect our changes */
1884 struct btrfs_disk_key mid_key;
1885 btrfs_node_key(mid, &mid_key, 0);
1886 tree_mod_log_set_node_key(root->fs_info, parent,
1887 pslot, 0);
1888 btrfs_set_node_key(parent, &mid_key, pslot);
1889 btrfs_mark_buffer_dirty(parent);
1890 }
1891
1892 /* update the path */
1893 if (left) {
1894 if (btrfs_header_nritems(left) > orig_slot) {
1895 extent_buffer_get(left);
1896 /* left was locked after cow */
1897 path->nodes[level] = left;
1898 path->slots[level + 1] -= 1;
1899 path->slots[level] = orig_slot;
1900 if (mid) {
1901 btrfs_tree_unlock(mid);
1902 free_extent_buffer(mid);
1903 }
1904 } else {
1905 orig_slot -= btrfs_header_nritems(left);
1906 path->slots[level] = orig_slot;
1907 }
1908 }
1909 /* double check we haven't messed things up */
1910 if (orig_ptr !=
1911 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1912 BUG();
1913 enospc:
1914 if (right) {
1915 btrfs_tree_unlock(right);
1916 free_extent_buffer(right);
1917 }
1918 if (left) {
1919 if (path->nodes[level] != left)
1920 btrfs_tree_unlock(left);
1921 free_extent_buffer(left);
1922 }
1923 return ret;
1924 }
1925
1926 /* Node balancing for insertion. Here we only split or push nodes around
1927 * when they are completely full. This is also done top down, so we
1928 * have to be pessimistic.
1929 */
1930 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1931 struct btrfs_root *root,
1932 struct btrfs_path *path, int level)
1933 {
1934 struct extent_buffer *right = NULL;
1935 struct extent_buffer *mid;
1936 struct extent_buffer *left = NULL;
1937 struct extent_buffer *parent = NULL;
1938 int ret = 0;
1939 int wret;
1940 int pslot;
1941 int orig_slot = path->slots[level];
1942
1943 if (level == 0)
1944 return 1;
1945
1946 mid = path->nodes[level];
1947 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1948
1949 if (level < BTRFS_MAX_LEVEL - 1) {
1950 parent = path->nodes[level + 1];
1951 pslot = path->slots[level + 1];
1952 }
1953
1954 if (!parent)
1955 return 1;
1956
1957 left = read_node_slot(root, parent, pslot - 1);
1958
1959 /* first, try to make some room in the middle buffer */
1960 if (left) {
1961 u32 left_nr;
1962
1963 btrfs_tree_lock(left);
1964 btrfs_set_lock_blocking(left);
1965
1966 left_nr = btrfs_header_nritems(left);
1967 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1968 wret = 1;
1969 } else {
1970 ret = btrfs_cow_block(trans, root, left, parent,
1971 pslot - 1, &left);
1972 if (ret)
1973 wret = 1;
1974 else {
1975 wret = push_node_left(trans, root,
1976 left, mid, 0);
1977 }
1978 }
1979 if (wret < 0)
1980 ret = wret;
1981 if (wret == 0) {
1982 struct btrfs_disk_key disk_key;
1983 orig_slot += left_nr;
1984 btrfs_node_key(mid, &disk_key, 0);
1985 tree_mod_log_set_node_key(root->fs_info, parent,
1986 pslot, 0);
1987 btrfs_set_node_key(parent, &disk_key, pslot);
1988 btrfs_mark_buffer_dirty(parent);
1989 if (btrfs_header_nritems(left) > orig_slot) {
1990 path->nodes[level] = left;
1991 path->slots[level + 1] -= 1;
1992 path->slots[level] = orig_slot;
1993 btrfs_tree_unlock(mid);
1994 free_extent_buffer(mid);
1995 } else {
1996 orig_slot -=
1997 btrfs_header_nritems(left);
1998 path->slots[level] = orig_slot;
1999 btrfs_tree_unlock(left);
2000 free_extent_buffer(left);
2001 }
2002 return 0;
2003 }
2004 btrfs_tree_unlock(left);
2005 free_extent_buffer(left);
2006 }
2007 right = read_node_slot(root, parent, pslot + 1);
2008
2009 /*
2010 * then try to empty the right most buffer into the middle
2011 */
2012 if (right) {
2013 u32 right_nr;
2014
2015 btrfs_tree_lock(right);
2016 btrfs_set_lock_blocking(right);
2017
2018 right_nr = btrfs_header_nritems(right);
2019 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2020 wret = 1;
2021 } else {
2022 ret = btrfs_cow_block(trans, root, right,
2023 parent, pslot + 1,
2024 &right);
2025 if (ret)
2026 wret = 1;
2027 else {
2028 wret = balance_node_right(trans, root,
2029 right, mid);
2030 }
2031 }
2032 if (wret < 0)
2033 ret = wret;
2034 if (wret == 0) {
2035 struct btrfs_disk_key disk_key;
2036
2037 btrfs_node_key(right, &disk_key, 0);
2038 tree_mod_log_set_node_key(root->fs_info, parent,
2039 pslot + 1, 0);
2040 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2041 btrfs_mark_buffer_dirty(parent);
2042
2043 if (btrfs_header_nritems(mid) <= orig_slot) {
2044 path->nodes[level] = right;
2045 path->slots[level + 1] += 1;
2046 path->slots[level] = orig_slot -
2047 btrfs_header_nritems(mid);
2048 btrfs_tree_unlock(mid);
2049 free_extent_buffer(mid);
2050 } else {
2051 btrfs_tree_unlock(right);
2052 free_extent_buffer(right);
2053 }
2054 return 0;
2055 }
2056 btrfs_tree_unlock(right);
2057 free_extent_buffer(right);
2058 }
2059 return 1;
2060 }
2061
2062 /*
2063 * readahead one full node of leaves, finding things that are close
2064 * to the block in 'slot', and triggering ra on them.
2065 */
2066 static void reada_for_search(struct btrfs_root *root,
2067 struct btrfs_path *path,
2068 int level, int slot, u64 objectid)
2069 {
2070 struct extent_buffer *node;
2071 struct btrfs_disk_key disk_key;
2072 u32 nritems;
2073 u64 search;
2074 u64 target;
2075 u64 nread = 0;
2076 u64 gen;
2077 int direction = path->reada;
2078 struct extent_buffer *eb;
2079 u32 nr;
2080 u32 blocksize;
2081 u32 nscan = 0;
2082
2083 if (level != 1)
2084 return;
2085
2086 if (!path->nodes[level])
2087 return;
2088
2089 node = path->nodes[level];
2090
2091 search = btrfs_node_blockptr(node, slot);
2092 blocksize = btrfs_level_size(root, level - 1);
2093 eb = btrfs_find_tree_block(root, search, blocksize);
2094 if (eb) {
2095 free_extent_buffer(eb);
2096 return;
2097 }
2098
2099 target = search;
2100
2101 nritems = btrfs_header_nritems(node);
2102 nr = slot;
2103
2104 while (1) {
2105 if (direction < 0) {
2106 if (nr == 0)
2107 break;
2108 nr--;
2109 } else if (direction > 0) {
2110 nr++;
2111 if (nr >= nritems)
2112 break;
2113 }
2114 if (path->reada < 0 && objectid) {
2115 btrfs_node_key(node, &disk_key, nr);
2116 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2117 break;
2118 }
2119 search = btrfs_node_blockptr(node, nr);
2120 if ((search <= target && target - search <= 65536) ||
2121 (search > target && search - target <= 65536)) {
2122 gen = btrfs_node_ptr_generation(node, nr);
2123 readahead_tree_block(root, search, blocksize, gen);
2124 nread += blocksize;
2125 }
2126 nscan++;
2127 if ((nread > 65536 || nscan > 32))
2128 break;
2129 }
2130 }
2131
2132 /*
2133 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2134 * cache
2135 */
2136 static noinline int reada_for_balance(struct btrfs_root *root,
2137 struct btrfs_path *path, int level)
2138 {
2139 int slot;
2140 int nritems;
2141 struct extent_buffer *parent;
2142 struct extent_buffer *eb;
2143 u64 gen;
2144 u64 block1 = 0;
2145 u64 block2 = 0;
2146 int ret = 0;
2147 int blocksize;
2148
2149 parent = path->nodes[level + 1];
2150 if (!parent)
2151 return 0;
2152
2153 nritems = btrfs_header_nritems(parent);
2154 slot = path->slots[level + 1];
2155 blocksize = btrfs_level_size(root, level);
2156
2157 if (slot > 0) {
2158 block1 = btrfs_node_blockptr(parent, slot - 1);
2159 gen = btrfs_node_ptr_generation(parent, slot - 1);
2160 eb = btrfs_find_tree_block(root, block1, blocksize);
2161 /*
2162 * if we get -eagain from btrfs_buffer_uptodate, we
2163 * don't want to return eagain here. That will loop
2164 * forever
2165 */
2166 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2167 block1 = 0;
2168 free_extent_buffer(eb);
2169 }
2170 if (slot + 1 < nritems) {
2171 block2 = btrfs_node_blockptr(parent, slot + 1);
2172 gen = btrfs_node_ptr_generation(parent, slot + 1);
2173 eb = btrfs_find_tree_block(root, block2, blocksize);
2174 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2175 block2 = 0;
2176 free_extent_buffer(eb);
2177 }
2178 if (block1 || block2) {
2179 ret = -EAGAIN;
2180
2181 /* release the whole path */
2182 btrfs_release_path(path);
2183
2184 /* read the blocks */
2185 if (block1)
2186 readahead_tree_block(root, block1, blocksize, 0);
2187 if (block2)
2188 readahead_tree_block(root, block2, blocksize, 0);
2189
2190 if (block1) {
2191 eb = read_tree_block(root, block1, blocksize, 0);
2192 free_extent_buffer(eb);
2193 }
2194 if (block2) {
2195 eb = read_tree_block(root, block2, blocksize, 0);
2196 free_extent_buffer(eb);
2197 }
2198 }
2199 return ret;
2200 }
2201
2202
2203 /*
2204 * when we walk down the tree, it is usually safe to unlock the higher layers
2205 * in the tree. The exceptions are when our path goes through slot 0, because
2206 * operations on the tree might require changing key pointers higher up in the
2207 * tree.
2208 *
2209 * callers might also have set path->keep_locks, which tells this code to keep
2210 * the lock if the path points to the last slot in the block. This is part of
2211 * walking through the tree, and selecting the next slot in the higher block.
2212 *
2213 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2214 * if lowest_unlock is 1, level 0 won't be unlocked
2215 */
2216 static noinline void unlock_up(struct btrfs_path *path, int level,
2217 int lowest_unlock, int min_write_lock_level,
2218 int *write_lock_level)
2219 {
2220 int i;
2221 int skip_level = level;
2222 int no_skips = 0;
2223 struct extent_buffer *t;
2224
2225 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2226 if (!path->nodes[i])
2227 break;
2228 if (!path->locks[i])
2229 break;
2230 if (!no_skips && path->slots[i] == 0) {
2231 skip_level = i + 1;
2232 continue;
2233 }
2234 if (!no_skips && path->keep_locks) {
2235 u32 nritems;
2236 t = path->nodes[i];
2237 nritems = btrfs_header_nritems(t);
2238 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2239 skip_level = i + 1;
2240 continue;
2241 }
2242 }
2243 if (skip_level < i && i >= lowest_unlock)
2244 no_skips = 1;
2245
2246 t = path->nodes[i];
2247 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2248 btrfs_tree_unlock_rw(t, path->locks[i]);
2249 path->locks[i] = 0;
2250 if (write_lock_level &&
2251 i > min_write_lock_level &&
2252 i <= *write_lock_level) {
2253 *write_lock_level = i - 1;
2254 }
2255 }
2256 }
2257 }
2258
2259 /*
2260 * This releases any locks held in the path starting at level and
2261 * going all the way up to the root.
2262 *
2263 * btrfs_search_slot will keep the lock held on higher nodes in a few
2264 * corner cases, such as COW of the block at slot zero in the node. This
2265 * ignores those rules, and it should only be called when there are no
2266 * more updates to be done higher up in the tree.
2267 */
2268 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2269 {
2270 int i;
2271
2272 if (path->keep_locks)
2273 return;
2274
2275 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2276 if (!path->nodes[i])
2277 continue;
2278 if (!path->locks[i])
2279 continue;
2280 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2281 path->locks[i] = 0;
2282 }
2283 }
2284
2285 /*
2286 * helper function for btrfs_search_slot. The goal is to find a block
2287 * in cache without setting the path to blocking. If we find the block
2288 * we return zero and the path is unchanged.
2289 *
2290 * If we can't find the block, we set the path blocking and do some
2291 * reada. -EAGAIN is returned and the search must be repeated.
2292 */
2293 static int
2294 read_block_for_search(struct btrfs_trans_handle *trans,
2295 struct btrfs_root *root, struct btrfs_path *p,
2296 struct extent_buffer **eb_ret, int level, int slot,
2297 struct btrfs_key *key, u64 time_seq)
2298 {
2299 u64 blocknr;
2300 u64 gen;
2301 u32 blocksize;
2302 struct extent_buffer *b = *eb_ret;
2303 struct extent_buffer *tmp;
2304 int ret;
2305
2306 blocknr = btrfs_node_blockptr(b, slot);
2307 gen = btrfs_node_ptr_generation(b, slot);
2308 blocksize = btrfs_level_size(root, level - 1);
2309
2310 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2311 if (tmp) {
2312 /* first we do an atomic uptodate check */
2313 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2314 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2315 /*
2316 * we found an up to date block without
2317 * sleeping, return
2318 * right away
2319 */
2320 *eb_ret = tmp;
2321 return 0;
2322 }
2323 /* the pages were up to date, but we failed
2324 * the generation number check. Do a full
2325 * read for the generation number that is correct.
2326 * We must do this without dropping locks so
2327 * we can trust our generation number
2328 */
2329 free_extent_buffer(tmp);
2330 btrfs_set_path_blocking(p);
2331
2332 /* now we're allowed to do a blocking uptodate check */
2333 tmp = read_tree_block(root, blocknr, blocksize, gen);
2334 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2335 *eb_ret = tmp;
2336 return 0;
2337 }
2338 free_extent_buffer(tmp);
2339 btrfs_release_path(p);
2340 return -EIO;
2341 }
2342 }
2343
2344 /*
2345 * reduce lock contention at high levels
2346 * of the btree by dropping locks before
2347 * we read. Don't release the lock on the current
2348 * level because we need to walk this node to figure
2349 * out which blocks to read.
2350 */
2351 btrfs_unlock_up_safe(p, level + 1);
2352 btrfs_set_path_blocking(p);
2353
2354 free_extent_buffer(tmp);
2355 if (p->reada)
2356 reada_for_search(root, p, level, slot, key->objectid);
2357
2358 btrfs_release_path(p);
2359
2360 ret = -EAGAIN;
2361 tmp = read_tree_block(root, blocknr, blocksize, 0);
2362 if (tmp) {
2363 /*
2364 * If the read above didn't mark this buffer up to date,
2365 * it will never end up being up to date. Set ret to EIO now
2366 * and give up so that our caller doesn't loop forever
2367 * on our EAGAINs.
2368 */
2369 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2370 ret = -EIO;
2371 free_extent_buffer(tmp);
2372 }
2373 return ret;
2374 }
2375
2376 /*
2377 * helper function for btrfs_search_slot. This does all of the checks
2378 * for node-level blocks and does any balancing required based on
2379 * the ins_len.
2380 *
2381 * If no extra work was required, zero is returned. If we had to
2382 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2383 * start over
2384 */
2385 static int
2386 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2387 struct btrfs_root *root, struct btrfs_path *p,
2388 struct extent_buffer *b, int level, int ins_len,
2389 int *write_lock_level)
2390 {
2391 int ret;
2392 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2393 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2394 int sret;
2395
2396 if (*write_lock_level < level + 1) {
2397 *write_lock_level = level + 1;
2398 btrfs_release_path(p);
2399 goto again;
2400 }
2401
2402 sret = reada_for_balance(root, p, level);
2403 if (sret)
2404 goto again;
2405
2406 btrfs_set_path_blocking(p);
2407 sret = split_node(trans, root, p, level);
2408 btrfs_clear_path_blocking(p, NULL, 0);
2409
2410 BUG_ON(sret > 0);
2411 if (sret) {
2412 ret = sret;
2413 goto done;
2414 }
2415 b = p->nodes[level];
2416 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2417 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2418 int sret;
2419
2420 if (*write_lock_level < level + 1) {
2421 *write_lock_level = level + 1;
2422 btrfs_release_path(p);
2423 goto again;
2424 }
2425
2426 sret = reada_for_balance(root, p, level);
2427 if (sret)
2428 goto again;
2429
2430 btrfs_set_path_blocking(p);
2431 sret = balance_level(trans, root, p, level);
2432 btrfs_clear_path_blocking(p, NULL, 0);
2433
2434 if (sret) {
2435 ret = sret;
2436 goto done;
2437 }
2438 b = p->nodes[level];
2439 if (!b) {
2440 btrfs_release_path(p);
2441 goto again;
2442 }
2443 BUG_ON(btrfs_header_nritems(b) == 1);
2444 }
2445 return 0;
2446
2447 again:
2448 ret = -EAGAIN;
2449 done:
2450 return ret;
2451 }
2452
2453 /*
2454 * look for key in the tree. path is filled in with nodes along the way
2455 * if key is found, we return zero and you can find the item in the leaf
2456 * level of the path (level 0)
2457 *
2458 * If the key isn't found, the path points to the slot where it should
2459 * be inserted, and 1 is returned. If there are other errors during the
2460 * search a negative error number is returned.
2461 *
2462 * if ins_len > 0, nodes and leaves will be split as we walk down the
2463 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2464 * possible)
2465 */
2466 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2467 *root, struct btrfs_key *key, struct btrfs_path *p, int
2468 ins_len, int cow)
2469 {
2470 struct extent_buffer *b;
2471 int slot;
2472 int ret;
2473 int err;
2474 int level;
2475 int lowest_unlock = 1;
2476 int root_lock;
2477 /* everything at write_lock_level or lower must be write locked */
2478 int write_lock_level = 0;
2479 u8 lowest_level = 0;
2480 int min_write_lock_level;
2481
2482 lowest_level = p->lowest_level;
2483 WARN_ON(lowest_level && ins_len > 0);
2484 WARN_ON(p->nodes[0] != NULL);
2485
2486 if (ins_len < 0) {
2487 lowest_unlock = 2;
2488
2489 /* when we are removing items, we might have to go up to level
2490 * two as we update tree pointers Make sure we keep write
2491 * for those levels as well
2492 */
2493 write_lock_level = 2;
2494 } else if (ins_len > 0) {
2495 /*
2496 * for inserting items, make sure we have a write lock on
2497 * level 1 so we can update keys
2498 */
2499 write_lock_level = 1;
2500 }
2501
2502 if (!cow)
2503 write_lock_level = -1;
2504
2505 if (cow && (p->keep_locks || p->lowest_level))
2506 write_lock_level = BTRFS_MAX_LEVEL;
2507
2508 min_write_lock_level = write_lock_level;
2509
2510 again:
2511 /*
2512 * we try very hard to do read locks on the root
2513 */
2514 root_lock = BTRFS_READ_LOCK;
2515 level = 0;
2516 if (p->search_commit_root) {
2517 /*
2518 * the commit roots are read only
2519 * so we always do read locks
2520 */
2521 b = root->commit_root;
2522 extent_buffer_get(b);
2523 level = btrfs_header_level(b);
2524 if (!p->skip_locking)
2525 btrfs_tree_read_lock(b);
2526 } else {
2527 if (p->skip_locking) {
2528 b = btrfs_root_node(root);
2529 level = btrfs_header_level(b);
2530 } else {
2531 /* we don't know the level of the root node
2532 * until we actually have it read locked
2533 */
2534 b = btrfs_read_lock_root_node(root);
2535 level = btrfs_header_level(b);
2536 if (level <= write_lock_level) {
2537 /* whoops, must trade for write lock */
2538 btrfs_tree_read_unlock(b);
2539 free_extent_buffer(b);
2540 b = btrfs_lock_root_node(root);
2541 root_lock = BTRFS_WRITE_LOCK;
2542
2543 /* the level might have changed, check again */
2544 level = btrfs_header_level(b);
2545 }
2546 }
2547 }
2548 p->nodes[level] = b;
2549 if (!p->skip_locking)
2550 p->locks[level] = root_lock;
2551
2552 while (b) {
2553 level = btrfs_header_level(b);
2554
2555 /*
2556 * setup the path here so we can release it under lock
2557 * contention with the cow code
2558 */
2559 if (cow) {
2560 /*
2561 * if we don't really need to cow this block
2562 * then we don't want to set the path blocking,
2563 * so we test it here
2564 */
2565 if (!should_cow_block(trans, root, b))
2566 goto cow_done;
2567
2568 btrfs_set_path_blocking(p);
2569
2570 /*
2571 * must have write locks on this node and the
2572 * parent
2573 */
2574 if (level > write_lock_level ||
2575 (level + 1 > write_lock_level &&
2576 level + 1 < BTRFS_MAX_LEVEL &&
2577 p->nodes[level + 1])) {
2578 write_lock_level = level + 1;
2579 btrfs_release_path(p);
2580 goto again;
2581 }
2582
2583 err = btrfs_cow_block(trans, root, b,
2584 p->nodes[level + 1],
2585 p->slots[level + 1], &b);
2586 if (err) {
2587 ret = err;
2588 goto done;
2589 }
2590 }
2591 cow_done:
2592 BUG_ON(!cow && ins_len);
2593
2594 p->nodes[level] = b;
2595 btrfs_clear_path_blocking(p, NULL, 0);
2596
2597 /*
2598 * we have a lock on b and as long as we aren't changing
2599 * the tree, there is no way to for the items in b to change.
2600 * It is safe to drop the lock on our parent before we
2601 * go through the expensive btree search on b.
2602 *
2603 * If cow is true, then we might be changing slot zero,
2604 * which may require changing the parent. So, we can't
2605 * drop the lock until after we know which slot we're
2606 * operating on.
2607 */
2608 if (!cow)
2609 btrfs_unlock_up_safe(p, level + 1);
2610
2611 ret = bin_search(b, key, level, &slot);
2612
2613 if (level != 0) {
2614 int dec = 0;
2615 if (ret && slot > 0) {
2616 dec = 1;
2617 slot -= 1;
2618 }
2619 p->slots[level] = slot;
2620 err = setup_nodes_for_search(trans, root, p, b, level,
2621 ins_len, &write_lock_level);
2622 if (err == -EAGAIN)
2623 goto again;
2624 if (err) {
2625 ret = err;
2626 goto done;
2627 }
2628 b = p->nodes[level];
2629 slot = p->slots[level];
2630
2631 /*
2632 * slot 0 is special, if we change the key
2633 * we have to update the parent pointer
2634 * which means we must have a write lock
2635 * on the parent
2636 */
2637 if (slot == 0 && cow &&
2638 write_lock_level < level + 1) {
2639 write_lock_level = level + 1;
2640 btrfs_release_path(p);
2641 goto again;
2642 }
2643
2644 unlock_up(p, level, lowest_unlock,
2645 min_write_lock_level, &write_lock_level);
2646
2647 if (level == lowest_level) {
2648 if (dec)
2649 p->slots[level]++;
2650 goto done;
2651 }
2652
2653 err = read_block_for_search(trans, root, p,
2654 &b, level, slot, key, 0);
2655 if (err == -EAGAIN)
2656 goto again;
2657 if (err) {
2658 ret = err;
2659 goto done;
2660 }
2661
2662 if (!p->skip_locking) {
2663 level = btrfs_header_level(b);
2664 if (level <= write_lock_level) {
2665 err = btrfs_try_tree_write_lock(b);
2666 if (!err) {
2667 btrfs_set_path_blocking(p);
2668 btrfs_tree_lock(b);
2669 btrfs_clear_path_blocking(p, b,
2670 BTRFS_WRITE_LOCK);
2671 }
2672 p->locks[level] = BTRFS_WRITE_LOCK;
2673 } else {
2674 err = btrfs_try_tree_read_lock(b);
2675 if (!err) {
2676 btrfs_set_path_blocking(p);
2677 btrfs_tree_read_lock(b);
2678 btrfs_clear_path_blocking(p, b,
2679 BTRFS_READ_LOCK);
2680 }
2681 p->locks[level] = BTRFS_READ_LOCK;
2682 }
2683 p->nodes[level] = b;
2684 }
2685 } else {
2686 p->slots[level] = slot;
2687 if (ins_len > 0 &&
2688 btrfs_leaf_free_space(root, b) < ins_len) {
2689 if (write_lock_level < 1) {
2690 write_lock_level = 1;
2691 btrfs_release_path(p);
2692 goto again;
2693 }
2694
2695 btrfs_set_path_blocking(p);
2696 err = split_leaf(trans, root, key,
2697 p, ins_len, ret == 0);
2698 btrfs_clear_path_blocking(p, NULL, 0);
2699
2700 BUG_ON(err > 0);
2701 if (err) {
2702 ret = err;
2703 goto done;
2704 }
2705 }
2706 if (!p->search_for_split)
2707 unlock_up(p, level, lowest_unlock,
2708 min_write_lock_level, &write_lock_level);
2709 goto done;
2710 }
2711 }
2712 ret = 1;
2713 done:
2714 /*
2715 * we don't really know what they plan on doing with the path
2716 * from here on, so for now just mark it as blocking
2717 */
2718 if (!p->leave_spinning)
2719 btrfs_set_path_blocking(p);
2720 if (ret < 0)
2721 btrfs_release_path(p);
2722 return ret;
2723 }
2724
2725 /*
2726 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2727 * current state of the tree together with the operations recorded in the tree
2728 * modification log to search for the key in a previous version of this tree, as
2729 * denoted by the time_seq parameter.
2730 *
2731 * Naturally, there is no support for insert, delete or cow operations.
2732 *
2733 * The resulting path and return value will be set up as if we called
2734 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2735 */
2736 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2737 struct btrfs_path *p, u64 time_seq)
2738 {
2739 struct extent_buffer *b;
2740 int slot;
2741 int ret;
2742 int err;
2743 int level;
2744 int lowest_unlock = 1;
2745 u8 lowest_level = 0;
2746
2747 lowest_level = p->lowest_level;
2748 WARN_ON(p->nodes[0] != NULL);
2749
2750 if (p->search_commit_root) {
2751 BUG_ON(time_seq);
2752 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2753 }
2754
2755 again:
2756 b = get_old_root(root, time_seq);
2757 level = btrfs_header_level(b);
2758 p->locks[level] = BTRFS_READ_LOCK;
2759
2760 while (b) {
2761 level = btrfs_header_level(b);
2762 p->nodes[level] = b;
2763 btrfs_clear_path_blocking(p, NULL, 0);
2764
2765 /*
2766 * we have a lock on b and as long as we aren't changing
2767 * the tree, there is no way to for the items in b to change.
2768 * It is safe to drop the lock on our parent before we
2769 * go through the expensive btree search on b.
2770 */
2771 btrfs_unlock_up_safe(p, level + 1);
2772
2773 ret = bin_search(b, key, level, &slot);
2774
2775 if (level != 0) {
2776 int dec = 0;
2777 if (ret && slot > 0) {
2778 dec = 1;
2779 slot -= 1;
2780 }
2781 p->slots[level] = slot;
2782 unlock_up(p, level, lowest_unlock, 0, NULL);
2783
2784 if (level == lowest_level) {
2785 if (dec)
2786 p->slots[level]++;
2787 goto done;
2788 }
2789
2790 err = read_block_for_search(NULL, root, p, &b, level,
2791 slot, key, time_seq);
2792 if (err == -EAGAIN)
2793 goto again;
2794 if (err) {
2795 ret = err;
2796 goto done;
2797 }
2798
2799 level = btrfs_header_level(b);
2800 err = btrfs_try_tree_read_lock(b);
2801 if (!err) {
2802 btrfs_set_path_blocking(p);
2803 btrfs_tree_read_lock(b);
2804 btrfs_clear_path_blocking(p, b,
2805 BTRFS_READ_LOCK);
2806 }
2807 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2808 p->locks[level] = BTRFS_READ_LOCK;
2809 p->nodes[level] = b;
2810 } else {
2811 p->slots[level] = slot;
2812 unlock_up(p, level, lowest_unlock, 0, NULL);
2813 goto done;
2814 }
2815 }
2816 ret = 1;
2817 done:
2818 if (!p->leave_spinning)
2819 btrfs_set_path_blocking(p);
2820 if (ret < 0)
2821 btrfs_release_path(p);
2822
2823 return ret;
2824 }
2825
2826 /*
2827 * helper to use instead of search slot if no exact match is needed but
2828 * instead the next or previous item should be returned.
2829 * When find_higher is true, the next higher item is returned, the next lower
2830 * otherwise.
2831 * When return_any and find_higher are both true, and no higher item is found,
2832 * return the next lower instead.
2833 * When return_any is true and find_higher is false, and no lower item is found,
2834 * return the next higher instead.
2835 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2836 * < 0 on error
2837 */
2838 int btrfs_search_slot_for_read(struct btrfs_root *root,
2839 struct btrfs_key *key, struct btrfs_path *p,
2840 int find_higher, int return_any)
2841 {
2842 int ret;
2843 struct extent_buffer *leaf;
2844
2845 again:
2846 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2847 if (ret <= 0)
2848 return ret;
2849 /*
2850 * a return value of 1 means the path is at the position where the
2851 * item should be inserted. Normally this is the next bigger item,
2852 * but in case the previous item is the last in a leaf, path points
2853 * to the first free slot in the previous leaf, i.e. at an invalid
2854 * item.
2855 */
2856 leaf = p->nodes[0];
2857
2858 if (find_higher) {
2859 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2860 ret = btrfs_next_leaf(root, p);
2861 if (ret <= 0)
2862 return ret;
2863 if (!return_any)
2864 return 1;
2865 /*
2866 * no higher item found, return the next
2867 * lower instead
2868 */
2869 return_any = 0;
2870 find_higher = 0;
2871 btrfs_release_path(p);
2872 goto again;
2873 }
2874 } else {
2875 if (p->slots[0] == 0) {
2876 ret = btrfs_prev_leaf(root, p);
2877 if (ret < 0)
2878 return ret;
2879 if (!ret) {
2880 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2881 return 0;
2882 }
2883 if (!return_any)
2884 return 1;
2885 /*
2886 * no lower item found, return the next
2887 * higher instead
2888 */
2889 return_any = 0;
2890 find_higher = 1;
2891 btrfs_release_path(p);
2892 goto again;
2893 } else {
2894 --p->slots[0];
2895 }
2896 }
2897 return 0;
2898 }
2899
2900 /*
2901 * adjust the pointers going up the tree, starting at level
2902 * making sure the right key of each node is points to 'key'.
2903 * This is used after shifting pointers to the left, so it stops
2904 * fixing up pointers when a given leaf/node is not in slot 0 of the
2905 * higher levels
2906 *
2907 */
2908 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2909 struct btrfs_disk_key *key, int level)
2910 {
2911 int i;
2912 struct extent_buffer *t;
2913
2914 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2915 int tslot = path->slots[i];
2916 if (!path->nodes[i])
2917 break;
2918 t = path->nodes[i];
2919 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2920 btrfs_set_node_key(t, key, tslot);
2921 btrfs_mark_buffer_dirty(path->nodes[i]);
2922 if (tslot != 0)
2923 break;
2924 }
2925 }
2926
2927 /*
2928 * update item key.
2929 *
2930 * This function isn't completely safe. It's the caller's responsibility
2931 * that the new key won't break the order
2932 */
2933 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2934 struct btrfs_root *root, struct btrfs_path *path,
2935 struct btrfs_key *new_key)
2936 {
2937 struct btrfs_disk_key disk_key;
2938 struct extent_buffer *eb;
2939 int slot;
2940
2941 eb = path->nodes[0];
2942 slot = path->slots[0];
2943 if (slot > 0) {
2944 btrfs_item_key(eb, &disk_key, slot - 1);
2945 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2946 }
2947 if (slot < btrfs_header_nritems(eb) - 1) {
2948 btrfs_item_key(eb, &disk_key, slot + 1);
2949 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2950 }
2951
2952 btrfs_cpu_key_to_disk(&disk_key, new_key);
2953 btrfs_set_item_key(eb, &disk_key, slot);
2954 btrfs_mark_buffer_dirty(eb);
2955 if (slot == 0)
2956 fixup_low_keys(root, path, &disk_key, 1);
2957 }
2958
2959 /*
2960 * try to push data from one node into the next node left in the
2961 * tree.
2962 *
2963 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2964 * error, and > 0 if there was no room in the left hand block.
2965 */
2966 static int push_node_left(struct btrfs_trans_handle *trans,
2967 struct btrfs_root *root, struct extent_buffer *dst,
2968 struct extent_buffer *src, int empty)
2969 {
2970 int push_items = 0;
2971 int src_nritems;
2972 int dst_nritems;
2973 int ret = 0;
2974
2975 src_nritems = btrfs_header_nritems(src);
2976 dst_nritems = btrfs_header_nritems(dst);
2977 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2978 WARN_ON(btrfs_header_generation(src) != trans->transid);
2979 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2980
2981 if (!empty && src_nritems <= 8)
2982 return 1;
2983
2984 if (push_items <= 0)
2985 return 1;
2986
2987 if (empty) {
2988 push_items = min(src_nritems, push_items);
2989 if (push_items < src_nritems) {
2990 /* leave at least 8 pointers in the node if
2991 * we aren't going to empty it
2992 */
2993 if (src_nritems - push_items < 8) {
2994 if (push_items <= 8)
2995 return 1;
2996 push_items -= 8;
2997 }
2998 }
2999 } else
3000 push_items = min(src_nritems - 8, push_items);
3001
3002 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3003 push_items);
3004 copy_extent_buffer(dst, src,
3005 btrfs_node_key_ptr_offset(dst_nritems),
3006 btrfs_node_key_ptr_offset(0),
3007 push_items * sizeof(struct btrfs_key_ptr));
3008
3009 if (push_items < src_nritems) {
3010 /*
3011 * don't call tree_mod_log_eb_move here, key removal was already
3012 * fully logged by tree_mod_log_eb_copy above.
3013 */
3014 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3015 btrfs_node_key_ptr_offset(push_items),
3016 (src_nritems - push_items) *
3017 sizeof(struct btrfs_key_ptr));
3018 }
3019 btrfs_set_header_nritems(src, src_nritems - push_items);
3020 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3021 btrfs_mark_buffer_dirty(src);
3022 btrfs_mark_buffer_dirty(dst);
3023
3024 return ret;
3025 }
3026
3027 /*
3028 * try to push data from one node into the next node right in the
3029 * tree.
3030 *
3031 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3032 * error, and > 0 if there was no room in the right hand block.
3033 *
3034 * this will only push up to 1/2 the contents of the left node over
3035 */
3036 static int balance_node_right(struct btrfs_trans_handle *trans,
3037 struct btrfs_root *root,
3038 struct extent_buffer *dst,
3039 struct extent_buffer *src)
3040 {
3041 int push_items = 0;
3042 int max_push;
3043 int src_nritems;
3044 int dst_nritems;
3045 int ret = 0;
3046
3047 WARN_ON(btrfs_header_generation(src) != trans->transid);
3048 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3049
3050 src_nritems = btrfs_header_nritems(src);
3051 dst_nritems = btrfs_header_nritems(dst);
3052 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3053 if (push_items <= 0)
3054 return 1;
3055
3056 if (src_nritems < 4)
3057 return 1;
3058
3059 max_push = src_nritems / 2 + 1;
3060 /* don't try to empty the node */
3061 if (max_push >= src_nritems)
3062 return 1;
3063
3064 if (max_push < push_items)
3065 push_items = max_push;
3066
3067 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3068 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3069 btrfs_node_key_ptr_offset(0),
3070 (dst_nritems) *
3071 sizeof(struct btrfs_key_ptr));
3072
3073 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3074 src_nritems - push_items, push_items);
3075 copy_extent_buffer(dst, src,
3076 btrfs_node_key_ptr_offset(0),
3077 btrfs_node_key_ptr_offset(src_nritems - push_items),
3078 push_items * sizeof(struct btrfs_key_ptr));
3079
3080 btrfs_set_header_nritems(src, src_nritems - push_items);
3081 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3082
3083 btrfs_mark_buffer_dirty(src);
3084 btrfs_mark_buffer_dirty(dst);
3085
3086 return ret;
3087 }
3088
3089 /*
3090 * helper function to insert a new root level in the tree.
3091 * A new node is allocated, and a single item is inserted to
3092 * point to the existing root
3093 *
3094 * returns zero on success or < 0 on failure.
3095 */
3096 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3097 struct btrfs_root *root,
3098 struct btrfs_path *path, int level, int log_removal)
3099 {
3100 u64 lower_gen;
3101 struct extent_buffer *lower;
3102 struct extent_buffer *c;
3103 struct extent_buffer *old;
3104 struct btrfs_disk_key lower_key;
3105
3106 BUG_ON(path->nodes[level]);
3107 BUG_ON(path->nodes[level-1] != root->node);
3108
3109 lower = path->nodes[level-1];
3110 if (level == 1)
3111 btrfs_item_key(lower, &lower_key, 0);
3112 else
3113 btrfs_node_key(lower, &lower_key, 0);
3114
3115 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3116 root->root_key.objectid, &lower_key,
3117 level, root->node->start, 0);
3118 if (IS_ERR(c))
3119 return PTR_ERR(c);
3120
3121 root_add_used(root, root->nodesize);
3122
3123 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3124 btrfs_set_header_nritems(c, 1);
3125 btrfs_set_header_level(c, level);
3126 btrfs_set_header_bytenr(c, c->start);
3127 btrfs_set_header_generation(c, trans->transid);
3128 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3129 btrfs_set_header_owner(c, root->root_key.objectid);
3130
3131 write_extent_buffer(c, root->fs_info->fsid,
3132 (unsigned long)btrfs_header_fsid(c),
3133 BTRFS_FSID_SIZE);
3134
3135 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3136 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3137 BTRFS_UUID_SIZE);
3138
3139 btrfs_set_node_key(c, &lower_key, 0);
3140 btrfs_set_node_blockptr(c, 0, lower->start);
3141 lower_gen = btrfs_header_generation(lower);
3142 WARN_ON(lower_gen != trans->transid);
3143
3144 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3145
3146 btrfs_mark_buffer_dirty(c);
3147
3148 old = root->node;
3149 tree_mod_log_set_root_pointer(root, c, log_removal);
3150 rcu_assign_pointer(root->node, c);
3151
3152 /* the super has an extra ref to root->node */
3153 free_extent_buffer(old);
3154
3155 add_root_to_dirty_list(root);
3156 extent_buffer_get(c);
3157 path->nodes[level] = c;
3158 path->locks[level] = BTRFS_WRITE_LOCK;
3159 path->slots[level] = 0;
3160 return 0;
3161 }
3162
3163 /*
3164 * worker function to insert a single pointer in a node.
3165 * the node should have enough room for the pointer already
3166 *
3167 * slot and level indicate where you want the key to go, and
3168 * blocknr is the block the key points to.
3169 */
3170 static void insert_ptr(struct btrfs_trans_handle *trans,
3171 struct btrfs_root *root, struct btrfs_path *path,
3172 struct btrfs_disk_key *key, u64 bytenr,
3173 int slot, int level)
3174 {
3175 struct extent_buffer *lower;
3176 int nritems;
3177 int ret;
3178
3179 BUG_ON(!path->nodes[level]);
3180 btrfs_assert_tree_locked(path->nodes[level]);
3181 lower = path->nodes[level];
3182 nritems = btrfs_header_nritems(lower);
3183 BUG_ON(slot > nritems);
3184 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3185 if (slot != nritems) {
3186 if (level)
3187 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3188 slot, nritems - slot);
3189 memmove_extent_buffer(lower,
3190 btrfs_node_key_ptr_offset(slot + 1),
3191 btrfs_node_key_ptr_offset(slot),
3192 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3193 }
3194 if (level) {
3195 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3196 MOD_LOG_KEY_ADD);
3197 BUG_ON(ret < 0);
3198 }
3199 btrfs_set_node_key(lower, key, slot);
3200 btrfs_set_node_blockptr(lower, slot, bytenr);
3201 WARN_ON(trans->transid == 0);
3202 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3203 btrfs_set_header_nritems(lower, nritems + 1);
3204 btrfs_mark_buffer_dirty(lower);
3205 }
3206
3207 /*
3208 * split the node at the specified level in path in two.
3209 * The path is corrected to point to the appropriate node after the split
3210 *
3211 * Before splitting this tries to make some room in the node by pushing
3212 * left and right, if either one works, it returns right away.
3213 *
3214 * returns 0 on success and < 0 on failure
3215 */
3216 static noinline int split_node(struct btrfs_trans_handle *trans,
3217 struct btrfs_root *root,
3218 struct btrfs_path *path, int level)
3219 {
3220 struct extent_buffer *c;
3221 struct extent_buffer *split;
3222 struct btrfs_disk_key disk_key;
3223 int mid;
3224 int ret;
3225 u32 c_nritems;
3226
3227 c = path->nodes[level];
3228 WARN_ON(btrfs_header_generation(c) != trans->transid);
3229 if (c == root->node) {
3230 /*
3231 * trying to split the root, lets make a new one
3232 *
3233 * tree mod log: We pass 0 as log_removal parameter to
3234 * insert_new_root, because that root buffer will be kept as a
3235 * normal node. We are going to log removal of half of the
3236 * elements below with tree_mod_log_eb_copy. We're holding a
3237 * tree lock on the buffer, which is why we cannot race with
3238 * other tree_mod_log users.
3239 */
3240 ret = insert_new_root(trans, root, path, level + 1, 0);
3241 if (ret)
3242 return ret;
3243 } else {
3244 ret = push_nodes_for_insert(trans, root, path, level);
3245 c = path->nodes[level];
3246 if (!ret && btrfs_header_nritems(c) <
3247 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3248 return 0;
3249 if (ret < 0)
3250 return ret;
3251 }
3252
3253 c_nritems = btrfs_header_nritems(c);
3254 mid = (c_nritems + 1) / 2;
3255 btrfs_node_key(c, &disk_key, mid);
3256
3257 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3258 root->root_key.objectid,
3259 &disk_key, level, c->start, 0);
3260 if (IS_ERR(split))
3261 return PTR_ERR(split);
3262
3263 root_add_used(root, root->nodesize);
3264
3265 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3266 btrfs_set_header_level(split, btrfs_header_level(c));
3267 btrfs_set_header_bytenr(split, split->start);
3268 btrfs_set_header_generation(split, trans->transid);
3269 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3270 btrfs_set_header_owner(split, root->root_key.objectid);
3271 write_extent_buffer(split, root->fs_info->fsid,
3272 (unsigned long)btrfs_header_fsid(split),
3273 BTRFS_FSID_SIZE);
3274 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3275 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3276 BTRFS_UUID_SIZE);
3277
3278 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3279 copy_extent_buffer(split, c,
3280 btrfs_node_key_ptr_offset(0),
3281 btrfs_node_key_ptr_offset(mid),
3282 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3283 btrfs_set_header_nritems(split, c_nritems - mid);
3284 btrfs_set_header_nritems(c, mid);
3285 ret = 0;
3286
3287 btrfs_mark_buffer_dirty(c);
3288 btrfs_mark_buffer_dirty(split);
3289
3290 insert_ptr(trans, root, path, &disk_key, split->start,
3291 path->slots[level + 1] + 1, level + 1);
3292
3293 if (path->slots[level] >= mid) {
3294 path->slots[level] -= mid;
3295 btrfs_tree_unlock(c);
3296 free_extent_buffer(c);
3297 path->nodes[level] = split;
3298 path->slots[level + 1] += 1;
3299 } else {
3300 btrfs_tree_unlock(split);
3301 free_extent_buffer(split);
3302 }
3303 return ret;
3304 }
3305
3306 /*
3307 * how many bytes are required to store the items in a leaf. start
3308 * and nr indicate which items in the leaf to check. This totals up the
3309 * space used both by the item structs and the item data
3310 */
3311 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3312 {
3313 struct btrfs_item *start_item;
3314 struct btrfs_item *end_item;
3315 struct btrfs_map_token token;
3316 int data_len;
3317 int nritems = btrfs_header_nritems(l);
3318 int end = min(nritems, start + nr) - 1;
3319
3320 if (!nr)
3321 return 0;
3322 btrfs_init_map_token(&token);
3323 start_item = btrfs_item_nr(l, start);
3324 end_item = btrfs_item_nr(l, end);
3325 data_len = btrfs_token_item_offset(l, start_item, &token) +
3326 btrfs_token_item_size(l, start_item, &token);
3327 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3328 data_len += sizeof(struct btrfs_item) * nr;
3329 WARN_ON(data_len < 0);
3330 return data_len;
3331 }
3332
3333 /*
3334 * The space between the end of the leaf items and
3335 * the start of the leaf data. IOW, how much room
3336 * the leaf has left for both items and data
3337 */
3338 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3339 struct extent_buffer *leaf)
3340 {
3341 int nritems = btrfs_header_nritems(leaf);
3342 int ret;
3343 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3344 if (ret < 0) {
3345 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3346 "used %d nritems %d\n",
3347 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3348 leaf_space_used(leaf, 0, nritems), nritems);
3349 }
3350 return ret;
3351 }
3352
3353 /*
3354 * min slot controls the lowest index we're willing to push to the
3355 * right. We'll push up to and including min_slot, but no lower
3356 */
3357 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3358 struct btrfs_root *root,
3359 struct btrfs_path *path,
3360 int data_size, int empty,
3361 struct extent_buffer *right,
3362 int free_space, u32 left_nritems,
3363 u32 min_slot)
3364 {
3365 struct extent_buffer *left = path->nodes[0];
3366 struct extent_buffer *upper = path->nodes[1];
3367 struct btrfs_map_token token;
3368 struct btrfs_disk_key disk_key;
3369 int slot;
3370 u32 i;
3371 int push_space = 0;
3372 int push_items = 0;
3373 struct btrfs_item *item;
3374 u32 nr;
3375 u32 right_nritems;
3376 u32 data_end;
3377 u32 this_item_size;
3378
3379 btrfs_init_map_token(&token);
3380
3381 if (empty)
3382 nr = 0;
3383 else
3384 nr = max_t(u32, 1, min_slot);
3385
3386 if (path->slots[0] >= left_nritems)
3387 push_space += data_size;
3388
3389 slot = path->slots[1];
3390 i = left_nritems - 1;
3391 while (i >= nr) {
3392 item = btrfs_item_nr(left, i);
3393
3394 if (!empty && push_items > 0) {
3395 if (path->slots[0] > i)
3396 break;
3397 if (path->slots[0] == i) {
3398 int space = btrfs_leaf_free_space(root, left);
3399 if (space + push_space * 2 > free_space)
3400 break;
3401 }
3402 }
3403
3404 if (path->slots[0] == i)
3405 push_space += data_size;
3406
3407 this_item_size = btrfs_item_size(left, item);
3408 if (this_item_size + sizeof(*item) + push_space > free_space)
3409 break;
3410
3411 push_items++;
3412 push_space += this_item_size + sizeof(*item);
3413 if (i == 0)
3414 break;
3415 i--;
3416 }
3417
3418 if (push_items == 0)
3419 goto out_unlock;
3420
3421 WARN_ON(!empty && push_items == left_nritems);
3422
3423 /* push left to right */
3424 right_nritems = btrfs_header_nritems(right);
3425
3426 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3427 push_space -= leaf_data_end(root, left);
3428
3429 /* make room in the right data area */
3430 data_end = leaf_data_end(root, right);
3431 memmove_extent_buffer(right,
3432 btrfs_leaf_data(right) + data_end - push_space,
3433 btrfs_leaf_data(right) + data_end,
3434 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3435
3436 /* copy from the left data area */
3437 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3438 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3439 btrfs_leaf_data(left) + leaf_data_end(root, left),
3440 push_space);
3441
3442 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3443 btrfs_item_nr_offset(0),
3444 right_nritems * sizeof(struct btrfs_item));
3445
3446 /* copy the items from left to right */
3447 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3448 btrfs_item_nr_offset(left_nritems - push_items),
3449 push_items * sizeof(struct btrfs_item));
3450
3451 /* update the item pointers */
3452 right_nritems += push_items;
3453 btrfs_set_header_nritems(right, right_nritems);
3454 push_space = BTRFS_LEAF_DATA_SIZE(root);
3455 for (i = 0; i < right_nritems; i++) {
3456 item = btrfs_item_nr(right, i);
3457 push_space -= btrfs_token_item_size(right, item, &token);
3458 btrfs_set_token_item_offset(right, item, push_space, &token);
3459 }
3460
3461 left_nritems -= push_items;
3462 btrfs_set_header_nritems(left, left_nritems);
3463
3464 if (left_nritems)
3465 btrfs_mark_buffer_dirty(left);
3466 else
3467 clean_tree_block(trans, root, left);
3468
3469 btrfs_mark_buffer_dirty(right);
3470
3471 btrfs_item_key(right, &disk_key, 0);
3472 btrfs_set_node_key(upper, &disk_key, slot + 1);
3473 btrfs_mark_buffer_dirty(upper);
3474
3475 /* then fixup the leaf pointer in the path */
3476 if (path->slots[0] >= left_nritems) {
3477 path->slots[0] -= left_nritems;
3478 if (btrfs_header_nritems(path->nodes[0]) == 0)
3479 clean_tree_block(trans, root, path->nodes[0]);
3480 btrfs_tree_unlock(path->nodes[0]);
3481 free_extent_buffer(path->nodes[0]);
3482 path->nodes[0] = right;
3483 path->slots[1] += 1;
3484 } else {
3485 btrfs_tree_unlock(right);
3486 free_extent_buffer(right);
3487 }
3488 return 0;
3489
3490 out_unlock:
3491 btrfs_tree_unlock(right);
3492 free_extent_buffer(right);
3493 return 1;
3494 }
3495
3496 /*
3497 * push some data in the path leaf to the right, trying to free up at
3498 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3499 *
3500 * returns 1 if the push failed because the other node didn't have enough
3501 * room, 0 if everything worked out and < 0 if there were major errors.
3502 *
3503 * this will push starting from min_slot to the end of the leaf. It won't
3504 * push any slot lower than min_slot
3505 */
3506 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3507 *root, struct btrfs_path *path,
3508 int min_data_size, int data_size,
3509 int empty, u32 min_slot)
3510 {
3511 struct extent_buffer *left = path->nodes[0];
3512 struct extent_buffer *right;
3513 struct extent_buffer *upper;
3514 int slot;
3515 int free_space;
3516 u32 left_nritems;
3517 int ret;
3518
3519 if (!path->nodes[1])
3520 return 1;
3521
3522 slot = path->slots[1];
3523 upper = path->nodes[1];
3524 if (slot >= btrfs_header_nritems(upper) - 1)
3525 return 1;
3526
3527 btrfs_assert_tree_locked(path->nodes[1]);
3528
3529 right = read_node_slot(root, upper, slot + 1);
3530 if (right == NULL)
3531 return 1;
3532
3533 btrfs_tree_lock(right);
3534 btrfs_set_lock_blocking(right);
3535
3536 free_space = btrfs_leaf_free_space(root, right);
3537 if (free_space < data_size)
3538 goto out_unlock;
3539
3540 /* cow and double check */
3541 ret = btrfs_cow_block(trans, root, right, upper,
3542 slot + 1, &right);
3543 if (ret)
3544 goto out_unlock;
3545
3546 free_space = btrfs_leaf_free_space(root, right);
3547 if (free_space < data_size)
3548 goto out_unlock;
3549
3550 left_nritems = btrfs_header_nritems(left);
3551 if (left_nritems == 0)
3552 goto out_unlock;
3553
3554 return __push_leaf_right(trans, root, path, min_data_size, empty,
3555 right, free_space, left_nritems, min_slot);
3556 out_unlock:
3557 btrfs_tree_unlock(right);
3558 free_extent_buffer(right);
3559 return 1;
3560 }
3561
3562 /*
3563 * push some data in the path leaf to the left, trying to free up at
3564 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3565 *
3566 * max_slot can put a limit on how far into the leaf we'll push items. The
3567 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3568 * items
3569 */
3570 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3571 struct btrfs_root *root,
3572 struct btrfs_path *path, int data_size,
3573 int empty, struct extent_buffer *left,
3574 int free_space, u32 right_nritems,
3575 u32 max_slot)
3576 {
3577 struct btrfs_disk_key disk_key;
3578 struct extent_buffer *right = path->nodes[0];
3579 int i;
3580 int push_space = 0;
3581 int push_items = 0;
3582 struct btrfs_item *item;
3583 u32 old_left_nritems;
3584 u32 nr;
3585 int ret = 0;
3586 u32 this_item_size;
3587 u32 old_left_item_size;
3588 struct btrfs_map_token token;
3589
3590 btrfs_init_map_token(&token);
3591
3592 if (empty)
3593 nr = min(right_nritems, max_slot);
3594 else
3595 nr = min(right_nritems - 1, max_slot);
3596
3597 for (i = 0; i < nr; i++) {
3598 item = btrfs_item_nr(right, i);
3599
3600 if (!empty && push_items > 0) {
3601 if (path->slots[0] < i)
3602 break;
3603 if (path->slots[0] == i) {
3604 int space = btrfs_leaf_free_space(root, right);
3605 if (space + push_space * 2 > free_space)
3606 break;
3607 }
3608 }
3609
3610 if (path->slots[0] == i)
3611 push_space += data_size;
3612
3613 this_item_size = btrfs_item_size(right, item);
3614 if (this_item_size + sizeof(*item) + push_space > free_space)
3615 break;
3616
3617 push_items++;
3618 push_space += this_item_size + sizeof(*item);
3619 }
3620
3621 if (push_items == 0) {
3622 ret = 1;
3623 goto out;
3624 }
3625 if (!empty && push_items == btrfs_header_nritems(right))
3626 WARN_ON(1);
3627
3628 /* push data from right to left */
3629 copy_extent_buffer(left, right,
3630 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3631 btrfs_item_nr_offset(0),
3632 push_items * sizeof(struct btrfs_item));
3633
3634 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3635 btrfs_item_offset_nr(right, push_items - 1);
3636
3637 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3638 leaf_data_end(root, left) - push_space,
3639 btrfs_leaf_data(right) +
3640 btrfs_item_offset_nr(right, push_items - 1),
3641 push_space);
3642 old_left_nritems = btrfs_header_nritems(left);
3643 BUG_ON(old_left_nritems <= 0);
3644
3645 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3646 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3647 u32 ioff;
3648
3649 item = btrfs_item_nr(left, i);
3650
3651 ioff = btrfs_token_item_offset(left, item, &token);
3652 btrfs_set_token_item_offset(left, item,
3653 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3654 &token);
3655 }
3656 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3657
3658 /* fixup right node */
3659 if (push_items > right_nritems)
3660 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3661 right_nritems);
3662
3663 if (push_items < right_nritems) {
3664 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3665 leaf_data_end(root, right);
3666 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3667 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3668 btrfs_leaf_data(right) +
3669 leaf_data_end(root, right), push_space);
3670
3671 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3672 btrfs_item_nr_offset(push_items),
3673 (btrfs_header_nritems(right) - push_items) *
3674 sizeof(struct btrfs_item));
3675 }
3676 right_nritems -= push_items;
3677 btrfs_set_header_nritems(right, right_nritems);
3678 push_space = BTRFS_LEAF_DATA_SIZE(root);
3679 for (i = 0; i < right_nritems; i++) {
3680 item = btrfs_item_nr(right, i);
3681
3682 push_space = push_space - btrfs_token_item_size(right,
3683 item, &token);
3684 btrfs_set_token_item_offset(right, item, push_space, &token);
3685 }
3686
3687 btrfs_mark_buffer_dirty(left);
3688 if (right_nritems)
3689 btrfs_mark_buffer_dirty(right);
3690 else
3691 clean_tree_block(trans, root, right);
3692
3693 btrfs_item_key(right, &disk_key, 0);
3694 fixup_low_keys(root, path, &disk_key, 1);
3695
3696 /* then fixup the leaf pointer in the path */
3697 if (path->slots[0] < push_items) {
3698 path->slots[0] += old_left_nritems;
3699 btrfs_tree_unlock(path->nodes[0]);
3700 free_extent_buffer(path->nodes[0]);
3701 path->nodes[0] = left;
3702 path->slots[1] -= 1;
3703 } else {
3704 btrfs_tree_unlock(left);
3705 free_extent_buffer(left);
3706 path->slots[0] -= push_items;
3707 }
3708 BUG_ON(path->slots[0] < 0);
3709 return ret;
3710 out:
3711 btrfs_tree_unlock(left);
3712 free_extent_buffer(left);
3713 return ret;
3714 }
3715
3716 /*
3717 * push some data in the path leaf to the left, trying to free up at
3718 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3719 *
3720 * max_slot can put a limit on how far into the leaf we'll push items. The
3721 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3722 * items
3723 */
3724 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3725 *root, struct btrfs_path *path, int min_data_size,
3726 int data_size, int empty, u32 max_slot)
3727 {
3728 struct extent_buffer *right = path->nodes[0];
3729 struct extent_buffer *left;
3730 int slot;
3731 int free_space;
3732 u32 right_nritems;
3733 int ret = 0;
3734
3735 slot = path->slots[1];
3736 if (slot == 0)
3737 return 1;
3738 if (!path->nodes[1])
3739 return 1;
3740
3741 right_nritems = btrfs_header_nritems(right);
3742 if (right_nritems == 0)
3743 return 1;
3744
3745 btrfs_assert_tree_locked(path->nodes[1]);
3746
3747 left = read_node_slot(root, path->nodes[1], slot - 1);
3748 if (left == NULL)
3749 return 1;
3750
3751 btrfs_tree_lock(left);
3752 btrfs_set_lock_blocking(left);
3753
3754 free_space = btrfs_leaf_free_space(root, left);
3755 if (free_space < data_size) {
3756 ret = 1;
3757 goto out;
3758 }
3759
3760 /* cow and double check */
3761 ret = btrfs_cow_block(trans, root, left,
3762 path->nodes[1], slot - 1, &left);
3763 if (ret) {
3764 /* we hit -ENOSPC, but it isn't fatal here */
3765 if (ret == -ENOSPC)
3766 ret = 1;
3767 goto out;
3768 }
3769
3770 free_space = btrfs_leaf_free_space(root, left);
3771 if (free_space < data_size) {
3772 ret = 1;
3773 goto out;
3774 }
3775
3776 return __push_leaf_left(trans, root, path, min_data_size,
3777 empty, left, free_space, right_nritems,
3778 max_slot);
3779 out:
3780 btrfs_tree_unlock(left);
3781 free_extent_buffer(left);
3782 return ret;
3783 }
3784
3785 /*
3786 * split the path's leaf in two, making sure there is at least data_size
3787 * available for the resulting leaf level of the path.
3788 */
3789 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3790 struct btrfs_root *root,
3791 struct btrfs_path *path,
3792 struct extent_buffer *l,
3793 struct extent_buffer *right,
3794 int slot, int mid, int nritems)
3795 {
3796 int data_copy_size;
3797 int rt_data_off;
3798 int i;
3799 struct btrfs_disk_key disk_key;
3800 struct btrfs_map_token token;
3801
3802 btrfs_init_map_token(&token);
3803
3804 nritems = nritems - mid;
3805 btrfs_set_header_nritems(right, nritems);
3806 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3807
3808 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3809 btrfs_item_nr_offset(mid),
3810 nritems * sizeof(struct btrfs_item));
3811
3812 copy_extent_buffer(right, l,
3813 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3814 data_copy_size, btrfs_leaf_data(l) +
3815 leaf_data_end(root, l), data_copy_size);
3816
3817 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3818 btrfs_item_end_nr(l, mid);
3819
3820 for (i = 0; i < nritems; i++) {
3821 struct btrfs_item *item = btrfs_item_nr(right, i);
3822 u32 ioff;
3823
3824 ioff = btrfs_token_item_offset(right, item, &token);
3825 btrfs_set_token_item_offset(right, item,
3826 ioff + rt_data_off, &token);
3827 }
3828
3829 btrfs_set_header_nritems(l, mid);
3830 btrfs_item_key(right, &disk_key, 0);
3831 insert_ptr(trans, root, path, &disk_key, right->start,
3832 path->slots[1] + 1, 1);
3833
3834 btrfs_mark_buffer_dirty(right);
3835 btrfs_mark_buffer_dirty(l);
3836 BUG_ON(path->slots[0] != slot);
3837
3838 if (mid <= slot) {
3839 btrfs_tree_unlock(path->nodes[0]);
3840 free_extent_buffer(path->nodes[0]);
3841 path->nodes[0] = right;
3842 path->slots[0] -= mid;
3843 path->slots[1] += 1;
3844 } else {
3845 btrfs_tree_unlock(right);
3846 free_extent_buffer(right);
3847 }
3848
3849 BUG_ON(path->slots[0] < 0);
3850 }
3851
3852 /*
3853 * double splits happen when we need to insert a big item in the middle
3854 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3855 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3856 * A B C
3857 *
3858 * We avoid this by trying to push the items on either side of our target
3859 * into the adjacent leaves. If all goes well we can avoid the double split
3860 * completely.
3861 */
3862 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3863 struct btrfs_root *root,
3864 struct btrfs_path *path,
3865 int data_size)
3866 {
3867 int ret;
3868 int progress = 0;
3869 int slot;
3870 u32 nritems;
3871
3872 slot = path->slots[0];
3873
3874 /*
3875 * try to push all the items after our slot into the
3876 * right leaf
3877 */
3878 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3879 if (ret < 0)
3880 return ret;
3881
3882 if (ret == 0)
3883 progress++;
3884
3885 nritems = btrfs_header_nritems(path->nodes[0]);
3886 /*
3887 * our goal is to get our slot at the start or end of a leaf. If
3888 * we've done so we're done
3889 */
3890 if (path->slots[0] == 0 || path->slots[0] == nritems)
3891 return 0;
3892
3893 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3894 return 0;
3895
3896 /* try to push all the items before our slot into the next leaf */
3897 slot = path->slots[0];
3898 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3899 if (ret < 0)
3900 return ret;
3901
3902 if (ret == 0)
3903 progress++;
3904
3905 if (progress)
3906 return 0;
3907 return 1;
3908 }
3909
3910 /*
3911 * split the path's leaf in two, making sure there is at least data_size
3912 * available for the resulting leaf level of the path.
3913 *
3914 * returns 0 if all went well and < 0 on failure.
3915 */
3916 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3917 struct btrfs_root *root,
3918 struct btrfs_key *ins_key,
3919 struct btrfs_path *path, int data_size,
3920 int extend)
3921 {
3922 struct btrfs_disk_key disk_key;
3923 struct extent_buffer *l;
3924 u32 nritems;
3925 int mid;
3926 int slot;
3927 struct extent_buffer *right;
3928 int ret = 0;
3929 int wret;
3930 int split;
3931 int num_doubles = 0;
3932 int tried_avoid_double = 0;
3933
3934 l = path->nodes[0];
3935 slot = path->slots[0];
3936 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3937 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3938 return -EOVERFLOW;
3939
3940 /* first try to make some room by pushing left and right */
3941 if (data_size) {
3942 wret = push_leaf_right(trans, root, path, data_size,
3943 data_size, 0, 0);
3944 if (wret < 0)
3945 return wret;
3946 if (wret) {
3947 wret = push_leaf_left(trans, root, path, data_size,
3948 data_size, 0, (u32)-1);
3949 if (wret < 0)
3950 return wret;
3951 }
3952 l = path->nodes[0];
3953
3954 /* did the pushes work? */
3955 if (btrfs_leaf_free_space(root, l) >= data_size)
3956 return 0;
3957 }
3958
3959 if (!path->nodes[1]) {
3960 ret = insert_new_root(trans, root, path, 1, 1);
3961 if (ret)
3962 return ret;
3963 }
3964 again:
3965 split = 1;
3966 l = path->nodes[0];
3967 slot = path->slots[0];
3968 nritems = btrfs_header_nritems(l);
3969 mid = (nritems + 1) / 2;
3970
3971 if (mid <= slot) {
3972 if (nritems == 1 ||
3973 leaf_space_used(l, mid, nritems - mid) + data_size >
3974 BTRFS_LEAF_DATA_SIZE(root)) {
3975 if (slot >= nritems) {
3976 split = 0;
3977 } else {
3978 mid = slot;
3979 if (mid != nritems &&
3980 leaf_space_used(l, mid, nritems - mid) +
3981 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3982 if (data_size && !tried_avoid_double)
3983 goto push_for_double;
3984 split = 2;
3985 }
3986 }
3987 }
3988 } else {
3989 if (leaf_space_used(l, 0, mid) + data_size >
3990 BTRFS_LEAF_DATA_SIZE(root)) {
3991 if (!extend && data_size && slot == 0) {
3992 split = 0;
3993 } else if ((extend || !data_size) && slot == 0) {
3994 mid = 1;
3995 } else {
3996 mid = slot;
3997 if (mid != nritems &&
3998 leaf_space_used(l, mid, nritems - mid) +
3999 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4000 if (data_size && !tried_avoid_double)
4001 goto push_for_double;
4002 split = 2 ;
4003 }
4004 }
4005 }
4006 }
4007
4008 if (split == 0)
4009 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4010 else
4011 btrfs_item_key(l, &disk_key, mid);
4012
4013 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4014 root->root_key.objectid,
4015 &disk_key, 0, l->start, 0);
4016 if (IS_ERR(right))
4017 return PTR_ERR(right);
4018
4019 root_add_used(root, root->leafsize);
4020
4021 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4022 btrfs_set_header_bytenr(right, right->start);
4023 btrfs_set_header_generation(right, trans->transid);
4024 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4025 btrfs_set_header_owner(right, root->root_key.objectid);
4026 btrfs_set_header_level(right, 0);
4027 write_extent_buffer(right, root->fs_info->fsid,
4028 (unsigned long)btrfs_header_fsid(right),
4029 BTRFS_FSID_SIZE);
4030
4031 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4032 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4033 BTRFS_UUID_SIZE);
4034
4035 if (split == 0) {
4036 if (mid <= slot) {
4037 btrfs_set_header_nritems(right, 0);
4038 insert_ptr(trans, root, path, &disk_key, right->start,
4039 path->slots[1] + 1, 1);
4040 btrfs_tree_unlock(path->nodes[0]);
4041 free_extent_buffer(path->nodes[0]);
4042 path->nodes[0] = right;
4043 path->slots[0] = 0;
4044 path->slots[1] += 1;
4045 } else {
4046 btrfs_set_header_nritems(right, 0);
4047 insert_ptr(trans, root, path, &disk_key, right->start,
4048 path->slots[1], 1);
4049 btrfs_tree_unlock(path->nodes[0]);
4050 free_extent_buffer(path->nodes[0]);
4051 path->nodes[0] = right;
4052 path->slots[0] = 0;
4053 if (path->slots[1] == 0)
4054 fixup_low_keys(root, path, &disk_key, 1);
4055 }
4056 btrfs_mark_buffer_dirty(right);
4057 return ret;
4058 }
4059
4060 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4061
4062 if (split == 2) {
4063 BUG_ON(num_doubles != 0);
4064 num_doubles++;
4065 goto again;
4066 }
4067
4068 return 0;
4069
4070 push_for_double:
4071 push_for_double_split(trans, root, path, data_size);
4072 tried_avoid_double = 1;
4073 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4074 return 0;
4075 goto again;
4076 }
4077
4078 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4079 struct btrfs_root *root,
4080 struct btrfs_path *path, int ins_len)
4081 {
4082 struct btrfs_key key;
4083 struct extent_buffer *leaf;
4084 struct btrfs_file_extent_item *fi;
4085 u64 extent_len = 0;
4086 u32 item_size;
4087 int ret;
4088
4089 leaf = path->nodes[0];
4090 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4091
4092 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4093 key.type != BTRFS_EXTENT_CSUM_KEY);
4094
4095 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4096 return 0;
4097
4098 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4099 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4100 fi = btrfs_item_ptr(leaf, path->slots[0],
4101 struct btrfs_file_extent_item);
4102 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4103 }
4104 btrfs_release_path(path);
4105
4106 path->keep_locks = 1;
4107 path->search_for_split = 1;
4108 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4109 path->search_for_split = 0;
4110 if (ret < 0)
4111 goto err;
4112
4113 ret = -EAGAIN;
4114 leaf = path->nodes[0];
4115 /* if our item isn't there or got smaller, return now */
4116 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4117 goto err;
4118
4119 /* the leaf has changed, it now has room. return now */
4120 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4121 goto err;
4122
4123 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4124 fi = btrfs_item_ptr(leaf, path->slots[0],
4125 struct btrfs_file_extent_item);
4126 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4127 goto err;
4128 }
4129
4130 btrfs_set_path_blocking(path);
4131 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4132 if (ret)
4133 goto err;
4134
4135 path->keep_locks = 0;
4136 btrfs_unlock_up_safe(path, 1);
4137 return 0;
4138 err:
4139 path->keep_locks = 0;
4140 return ret;
4141 }
4142
4143 static noinline int split_item(struct btrfs_trans_handle *trans,
4144 struct btrfs_root *root,
4145 struct btrfs_path *path,
4146 struct btrfs_key *new_key,
4147 unsigned long split_offset)
4148 {
4149 struct extent_buffer *leaf;
4150 struct btrfs_item *item;
4151 struct btrfs_item *new_item;
4152 int slot;
4153 char *buf;
4154 u32 nritems;
4155 u32 item_size;
4156 u32 orig_offset;
4157 struct btrfs_disk_key disk_key;
4158
4159 leaf = path->nodes[0];
4160 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4161
4162 btrfs_set_path_blocking(path);
4163
4164 item = btrfs_item_nr(leaf, path->slots[0]);
4165 orig_offset = btrfs_item_offset(leaf, item);
4166 item_size = btrfs_item_size(leaf, item);
4167
4168 buf = kmalloc(item_size, GFP_NOFS);
4169 if (!buf)
4170 return -ENOMEM;
4171
4172 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4173 path->slots[0]), item_size);
4174
4175 slot = path->slots[0] + 1;
4176 nritems = btrfs_header_nritems(leaf);
4177 if (slot != nritems) {
4178 /* shift the items */
4179 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4180 btrfs_item_nr_offset(slot),
4181 (nritems - slot) * sizeof(struct btrfs_item));
4182 }
4183
4184 btrfs_cpu_key_to_disk(&disk_key, new_key);
4185 btrfs_set_item_key(leaf, &disk_key, slot);
4186
4187 new_item = btrfs_item_nr(leaf, slot);
4188
4189 btrfs_set_item_offset(leaf, new_item, orig_offset);
4190 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4191
4192 btrfs_set_item_offset(leaf, item,
4193 orig_offset + item_size - split_offset);
4194 btrfs_set_item_size(leaf, item, split_offset);
4195
4196 btrfs_set_header_nritems(leaf, nritems + 1);
4197
4198 /* write the data for the start of the original item */
4199 write_extent_buffer(leaf, buf,
4200 btrfs_item_ptr_offset(leaf, path->slots[0]),
4201 split_offset);
4202
4203 /* write the data for the new item */
4204 write_extent_buffer(leaf, buf + split_offset,
4205 btrfs_item_ptr_offset(leaf, slot),
4206 item_size - split_offset);
4207 btrfs_mark_buffer_dirty(leaf);
4208
4209 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4210 kfree(buf);
4211 return 0;
4212 }
4213
4214 /*
4215 * This function splits a single item into two items,
4216 * giving 'new_key' to the new item and splitting the
4217 * old one at split_offset (from the start of the item).
4218 *
4219 * The path may be released by this operation. After
4220 * the split, the path is pointing to the old item. The
4221 * new item is going to be in the same node as the old one.
4222 *
4223 * Note, the item being split must be smaller enough to live alone on
4224 * a tree block with room for one extra struct btrfs_item
4225 *
4226 * This allows us to split the item in place, keeping a lock on the
4227 * leaf the entire time.
4228 */
4229 int btrfs_split_item(struct btrfs_trans_handle *trans,
4230 struct btrfs_root *root,
4231 struct btrfs_path *path,
4232 struct btrfs_key *new_key,
4233 unsigned long split_offset)
4234 {
4235 int ret;
4236 ret = setup_leaf_for_split(trans, root, path,
4237 sizeof(struct btrfs_item));
4238 if (ret)
4239 return ret;
4240
4241 ret = split_item(trans, root, path, new_key, split_offset);
4242 return ret;
4243 }
4244
4245 /*
4246 * This function duplicate a item, giving 'new_key' to the new item.
4247 * It guarantees both items live in the same tree leaf and the new item
4248 * is contiguous with the original item.
4249 *
4250 * This allows us to split file extent in place, keeping a lock on the
4251 * leaf the entire time.
4252 */
4253 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4254 struct btrfs_root *root,
4255 struct btrfs_path *path,
4256 struct btrfs_key *new_key)
4257 {
4258 struct extent_buffer *leaf;
4259 int ret;
4260 u32 item_size;
4261
4262 leaf = path->nodes[0];
4263 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4264 ret = setup_leaf_for_split(trans, root, path,
4265 item_size + sizeof(struct btrfs_item));
4266 if (ret)
4267 return ret;
4268
4269 path->slots[0]++;
4270 setup_items_for_insert(trans, root, path, new_key, &item_size,
4271 item_size, item_size +
4272 sizeof(struct btrfs_item), 1);
4273 leaf = path->nodes[0];
4274 memcpy_extent_buffer(leaf,
4275 btrfs_item_ptr_offset(leaf, path->slots[0]),
4276 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4277 item_size);
4278 return 0;
4279 }
4280
4281 /*
4282 * make the item pointed to by the path smaller. new_size indicates
4283 * how small to make it, and from_end tells us if we just chop bytes
4284 * off the end of the item or if we shift the item to chop bytes off
4285 * the front.
4286 */
4287 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4288 struct btrfs_root *root,
4289 struct btrfs_path *path,
4290 u32 new_size, int from_end)
4291 {
4292 int slot;
4293 struct extent_buffer *leaf;
4294 struct btrfs_item *item;
4295 u32 nritems;
4296 unsigned int data_end;
4297 unsigned int old_data_start;
4298 unsigned int old_size;
4299 unsigned int size_diff;
4300 int i;
4301 struct btrfs_map_token token;
4302
4303 btrfs_init_map_token(&token);
4304
4305 leaf = path->nodes[0];
4306 slot = path->slots[0];
4307
4308 old_size = btrfs_item_size_nr(leaf, slot);
4309 if (old_size == new_size)
4310 return;
4311
4312 nritems = btrfs_header_nritems(leaf);
4313 data_end = leaf_data_end(root, leaf);
4314
4315 old_data_start = btrfs_item_offset_nr(leaf, slot);
4316
4317 size_diff = old_size - new_size;
4318
4319 BUG_ON(slot < 0);
4320 BUG_ON(slot >= nritems);
4321
4322 /*
4323 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4324 */
4325 /* first correct the data pointers */
4326 for (i = slot; i < nritems; i++) {
4327 u32 ioff;
4328 item = btrfs_item_nr(leaf, i);
4329
4330 ioff = btrfs_token_item_offset(leaf, item, &token);
4331 btrfs_set_token_item_offset(leaf, item,
4332 ioff + size_diff, &token);
4333 }
4334
4335 /* shift the data */
4336 if (from_end) {
4337 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4338 data_end + size_diff, btrfs_leaf_data(leaf) +
4339 data_end, old_data_start + new_size - data_end);
4340 } else {
4341 struct btrfs_disk_key disk_key;
4342 u64 offset;
4343
4344 btrfs_item_key(leaf, &disk_key, slot);
4345
4346 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4347 unsigned long ptr;
4348 struct btrfs_file_extent_item *fi;
4349
4350 fi = btrfs_item_ptr(leaf, slot,
4351 struct btrfs_file_extent_item);
4352 fi = (struct btrfs_file_extent_item *)(
4353 (unsigned long)fi - size_diff);
4354
4355 if (btrfs_file_extent_type(leaf, fi) ==
4356 BTRFS_FILE_EXTENT_INLINE) {
4357 ptr = btrfs_item_ptr_offset(leaf, slot);
4358 memmove_extent_buffer(leaf, ptr,
4359 (unsigned long)fi,
4360 offsetof(struct btrfs_file_extent_item,
4361 disk_bytenr));
4362 }
4363 }
4364
4365 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4366 data_end + size_diff, btrfs_leaf_data(leaf) +
4367 data_end, old_data_start - data_end);
4368
4369 offset = btrfs_disk_key_offset(&disk_key);
4370 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4371 btrfs_set_item_key(leaf, &disk_key, slot);
4372 if (slot == 0)
4373 fixup_low_keys(root, path, &disk_key, 1);
4374 }
4375
4376 item = btrfs_item_nr(leaf, slot);
4377 btrfs_set_item_size(leaf, item, new_size);
4378 btrfs_mark_buffer_dirty(leaf);
4379
4380 if (btrfs_leaf_free_space(root, leaf) < 0) {
4381 btrfs_print_leaf(root, leaf);
4382 BUG();
4383 }
4384 }
4385
4386 /*
4387 * make the item pointed to by the path bigger, data_size is the new size.
4388 */
4389 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4390 struct btrfs_root *root, struct btrfs_path *path,
4391 u32 data_size)
4392 {
4393 int slot;
4394 struct extent_buffer *leaf;
4395 struct btrfs_item *item;
4396 u32 nritems;
4397 unsigned int data_end;
4398 unsigned int old_data;
4399 unsigned int old_size;
4400 int i;
4401 struct btrfs_map_token token;
4402
4403 btrfs_init_map_token(&token);
4404
4405 leaf = path->nodes[0];
4406
4407 nritems = btrfs_header_nritems(leaf);
4408 data_end = leaf_data_end(root, leaf);
4409
4410 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4411 btrfs_print_leaf(root, leaf);
4412 BUG();
4413 }
4414 slot = path->slots[0];
4415 old_data = btrfs_item_end_nr(leaf, slot);
4416
4417 BUG_ON(slot < 0);
4418 if (slot >= nritems) {
4419 btrfs_print_leaf(root, leaf);
4420 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4421 slot, nritems);
4422 BUG_ON(1);
4423 }
4424
4425 /*
4426 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4427 */
4428 /* first correct the data pointers */
4429 for (i = slot; i < nritems; i++) {
4430 u32 ioff;
4431 item = btrfs_item_nr(leaf, i);
4432
4433 ioff = btrfs_token_item_offset(leaf, item, &token);
4434 btrfs_set_token_item_offset(leaf, item,
4435 ioff - data_size, &token);
4436 }
4437
4438 /* shift the data */
4439 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4440 data_end - data_size, btrfs_leaf_data(leaf) +
4441 data_end, old_data - data_end);
4442
4443 data_end = old_data;
4444 old_size = btrfs_item_size_nr(leaf, slot);
4445 item = btrfs_item_nr(leaf, slot);
4446 btrfs_set_item_size(leaf, item, old_size + data_size);
4447 btrfs_mark_buffer_dirty(leaf);
4448
4449 if (btrfs_leaf_free_space(root, leaf) < 0) {
4450 btrfs_print_leaf(root, leaf);
4451 BUG();
4452 }
4453 }
4454
4455 /*
4456 * this is a helper for btrfs_insert_empty_items, the main goal here is
4457 * to save stack depth by doing the bulk of the work in a function
4458 * that doesn't call btrfs_search_slot
4459 */
4460 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4461 struct btrfs_root *root, struct btrfs_path *path,
4462 struct btrfs_key *cpu_key, u32 *data_size,
4463 u32 total_data, u32 total_size, int nr)
4464 {
4465 struct btrfs_item *item;
4466 int i;
4467 u32 nritems;
4468 unsigned int data_end;
4469 struct btrfs_disk_key disk_key;
4470 struct extent_buffer *leaf;
4471 int slot;
4472 struct btrfs_map_token token;
4473
4474 btrfs_init_map_token(&token);
4475
4476 leaf = path->nodes[0];
4477 slot = path->slots[0];
4478
4479 nritems = btrfs_header_nritems(leaf);
4480 data_end = leaf_data_end(root, leaf);
4481
4482 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4483 btrfs_print_leaf(root, leaf);
4484 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4485 total_size, btrfs_leaf_free_space(root, leaf));
4486 BUG();
4487 }
4488
4489 if (slot != nritems) {
4490 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4491
4492 if (old_data < data_end) {
4493 btrfs_print_leaf(root, leaf);
4494 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4495 slot, old_data, data_end);
4496 BUG_ON(1);
4497 }
4498 /*
4499 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4500 */
4501 /* first correct the data pointers */
4502 for (i = slot; i < nritems; i++) {
4503 u32 ioff;
4504
4505 item = btrfs_item_nr(leaf, i);
4506 ioff = btrfs_token_item_offset(leaf, item, &token);
4507 btrfs_set_token_item_offset(leaf, item,
4508 ioff - total_data, &token);
4509 }
4510 /* shift the items */
4511 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4512 btrfs_item_nr_offset(slot),
4513 (nritems - slot) * sizeof(struct btrfs_item));
4514
4515 /* shift the data */
4516 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4517 data_end - total_data, btrfs_leaf_data(leaf) +
4518 data_end, old_data - data_end);
4519 data_end = old_data;
4520 }
4521
4522 /* setup the item for the new data */
4523 for (i = 0; i < nr; i++) {
4524 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4525 btrfs_set_item_key(leaf, &disk_key, slot + i);
4526 item = btrfs_item_nr(leaf, slot + i);
4527 btrfs_set_token_item_offset(leaf, item,
4528 data_end - data_size[i], &token);
4529 data_end -= data_size[i];
4530 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4531 }
4532
4533 btrfs_set_header_nritems(leaf, nritems + nr);
4534
4535 if (slot == 0) {
4536 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4537 fixup_low_keys(root, path, &disk_key, 1);
4538 }
4539 btrfs_unlock_up_safe(path, 1);
4540 btrfs_mark_buffer_dirty(leaf);
4541
4542 if (btrfs_leaf_free_space(root, leaf) < 0) {
4543 btrfs_print_leaf(root, leaf);
4544 BUG();
4545 }
4546 }
4547
4548 /*
4549 * Given a key and some data, insert items into the tree.
4550 * This does all the path init required, making room in the tree if needed.
4551 */
4552 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4553 struct btrfs_root *root,
4554 struct btrfs_path *path,
4555 struct btrfs_key *cpu_key, u32 *data_size,
4556 int nr)
4557 {
4558 int ret = 0;
4559 int slot;
4560 int i;
4561 u32 total_size = 0;
4562 u32 total_data = 0;
4563
4564 for (i = 0; i < nr; i++)
4565 total_data += data_size[i];
4566
4567 total_size = total_data + (nr * sizeof(struct btrfs_item));
4568 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4569 if (ret == 0)
4570 return -EEXIST;
4571 if (ret < 0)
4572 return ret;
4573
4574 slot = path->slots[0];
4575 BUG_ON(slot < 0);
4576
4577 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4578 total_data, total_size, nr);
4579 return 0;
4580 }
4581
4582 /*
4583 * Given a key and some data, insert an item into the tree.
4584 * This does all the path init required, making room in the tree if needed.
4585 */
4586 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4587 *root, struct btrfs_key *cpu_key, void *data, u32
4588 data_size)
4589 {
4590 int ret = 0;
4591 struct btrfs_path *path;
4592 struct extent_buffer *leaf;
4593 unsigned long ptr;
4594
4595 path = btrfs_alloc_path();
4596 if (!path)
4597 return -ENOMEM;
4598 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4599 if (!ret) {
4600 leaf = path->nodes[0];
4601 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4602 write_extent_buffer(leaf, data, ptr, data_size);
4603 btrfs_mark_buffer_dirty(leaf);
4604 }
4605 btrfs_free_path(path);
4606 return ret;
4607 }
4608
4609 /*
4610 * delete the pointer from a given node.
4611 *
4612 * the tree should have been previously balanced so the deletion does not
4613 * empty a node.
4614 */
4615 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4616 struct btrfs_path *path, int level, int slot)
4617 {
4618 struct extent_buffer *parent = path->nodes[level];
4619 u32 nritems;
4620 int ret;
4621
4622 nritems = btrfs_header_nritems(parent);
4623 if (slot != nritems - 1) {
4624 if (level)
4625 tree_mod_log_eb_move(root->fs_info, parent, slot,
4626 slot + 1, nritems - slot - 1);
4627 memmove_extent_buffer(parent,
4628 btrfs_node_key_ptr_offset(slot),
4629 btrfs_node_key_ptr_offset(slot + 1),
4630 sizeof(struct btrfs_key_ptr) *
4631 (nritems - slot - 1));
4632 } else if (level) {
4633 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4634 MOD_LOG_KEY_REMOVE);
4635 BUG_ON(ret < 0);
4636 }
4637
4638 nritems--;
4639 btrfs_set_header_nritems(parent, nritems);
4640 if (nritems == 0 && parent == root->node) {
4641 BUG_ON(btrfs_header_level(root->node) != 1);
4642 /* just turn the root into a leaf and break */
4643 btrfs_set_header_level(root->node, 0);
4644 } else if (slot == 0) {
4645 struct btrfs_disk_key disk_key;
4646
4647 btrfs_node_key(parent, &disk_key, 0);
4648 fixup_low_keys(root, path, &disk_key, level + 1);
4649 }
4650 btrfs_mark_buffer_dirty(parent);
4651 }
4652
4653 /*
4654 * a helper function to delete the leaf pointed to by path->slots[1] and
4655 * path->nodes[1].
4656 *
4657 * This deletes the pointer in path->nodes[1] and frees the leaf
4658 * block extent. zero is returned if it all worked out, < 0 otherwise.
4659 *
4660 * The path must have already been setup for deleting the leaf, including
4661 * all the proper balancing. path->nodes[1] must be locked.
4662 */
4663 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4664 struct btrfs_root *root,
4665 struct btrfs_path *path,
4666 struct extent_buffer *leaf)
4667 {
4668 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4669 del_ptr(trans, root, path, 1, path->slots[1]);
4670
4671 /*
4672 * btrfs_free_extent is expensive, we want to make sure we
4673 * aren't holding any locks when we call it
4674 */
4675 btrfs_unlock_up_safe(path, 0);
4676
4677 root_sub_used(root, leaf->len);
4678
4679 extent_buffer_get(leaf);
4680 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4681 free_extent_buffer_stale(leaf);
4682 }
4683 /*
4684 * delete the item at the leaf level in path. If that empties
4685 * the leaf, remove it from the tree
4686 */
4687 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4688 struct btrfs_path *path, int slot, int nr)
4689 {
4690 struct extent_buffer *leaf;
4691 struct btrfs_item *item;
4692 int last_off;
4693 int dsize = 0;
4694 int ret = 0;
4695 int wret;
4696 int i;
4697 u32 nritems;
4698 struct btrfs_map_token token;
4699
4700 btrfs_init_map_token(&token);
4701
4702 leaf = path->nodes[0];
4703 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4704
4705 for (i = 0; i < nr; i++)
4706 dsize += btrfs_item_size_nr(leaf, slot + i);
4707
4708 nritems = btrfs_header_nritems(leaf);
4709
4710 if (slot + nr != nritems) {
4711 int data_end = leaf_data_end(root, leaf);
4712
4713 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4714 data_end + dsize,
4715 btrfs_leaf_data(leaf) + data_end,
4716 last_off - data_end);
4717
4718 for (i = slot + nr; i < nritems; i++) {
4719 u32 ioff;
4720
4721 item = btrfs_item_nr(leaf, i);
4722 ioff = btrfs_token_item_offset(leaf, item, &token);
4723 btrfs_set_token_item_offset(leaf, item,
4724 ioff + dsize, &token);
4725 }
4726
4727 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4728 btrfs_item_nr_offset(slot + nr),
4729 sizeof(struct btrfs_item) *
4730 (nritems - slot - nr));
4731 }
4732 btrfs_set_header_nritems(leaf, nritems - nr);
4733 nritems -= nr;
4734
4735 /* delete the leaf if we've emptied it */
4736 if (nritems == 0) {
4737 if (leaf == root->node) {
4738 btrfs_set_header_level(leaf, 0);
4739 } else {
4740 btrfs_set_path_blocking(path);
4741 clean_tree_block(trans, root, leaf);
4742 btrfs_del_leaf(trans, root, path, leaf);
4743 }
4744 } else {
4745 int used = leaf_space_used(leaf, 0, nritems);
4746 if (slot == 0) {
4747 struct btrfs_disk_key disk_key;
4748
4749 btrfs_item_key(leaf, &disk_key, 0);
4750 fixup_low_keys(root, path, &disk_key, 1);
4751 }
4752
4753 /* delete the leaf if it is mostly empty */
4754 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4755 /* push_leaf_left fixes the path.
4756 * make sure the path still points to our leaf
4757 * for possible call to del_ptr below
4758 */
4759 slot = path->slots[1];
4760 extent_buffer_get(leaf);
4761
4762 btrfs_set_path_blocking(path);
4763 wret = push_leaf_left(trans, root, path, 1, 1,
4764 1, (u32)-1);
4765 if (wret < 0 && wret != -ENOSPC)
4766 ret = wret;
4767
4768 if (path->nodes[0] == leaf &&
4769 btrfs_header_nritems(leaf)) {
4770 wret = push_leaf_right(trans, root, path, 1,
4771 1, 1, 0);
4772 if (wret < 0 && wret != -ENOSPC)
4773 ret = wret;
4774 }
4775
4776 if (btrfs_header_nritems(leaf) == 0) {
4777 path->slots[1] = slot;
4778 btrfs_del_leaf(trans, root, path, leaf);
4779 free_extent_buffer(leaf);
4780 ret = 0;
4781 } else {
4782 /* if we're still in the path, make sure
4783 * we're dirty. Otherwise, one of the
4784 * push_leaf functions must have already
4785 * dirtied this buffer
4786 */
4787 if (path->nodes[0] == leaf)
4788 btrfs_mark_buffer_dirty(leaf);
4789 free_extent_buffer(leaf);
4790 }
4791 } else {
4792 btrfs_mark_buffer_dirty(leaf);
4793 }
4794 }
4795 return ret;
4796 }
4797
4798 /*
4799 * search the tree again to find a leaf with lesser keys
4800 * returns 0 if it found something or 1 if there are no lesser leaves.
4801 * returns < 0 on io errors.
4802 *
4803 * This may release the path, and so you may lose any locks held at the
4804 * time you call it.
4805 */
4806 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4807 {
4808 struct btrfs_key key;
4809 struct btrfs_disk_key found_key;
4810 int ret;
4811
4812 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4813
4814 if (key.offset > 0)
4815 key.offset--;
4816 else if (key.type > 0)
4817 key.type--;
4818 else if (key.objectid > 0)
4819 key.objectid--;
4820 else
4821 return 1;
4822
4823 btrfs_release_path(path);
4824 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4825 if (ret < 0)
4826 return ret;
4827 btrfs_item_key(path->nodes[0], &found_key, 0);
4828 ret = comp_keys(&found_key, &key);
4829 if (ret < 0)
4830 return 0;
4831 return 1;
4832 }
4833
4834 /*
4835 * A helper function to walk down the tree starting at min_key, and looking
4836 * for nodes or leaves that are have a minimum transaction id.
4837 * This is used by the btree defrag code, and tree logging
4838 *
4839 * This does not cow, but it does stuff the starting key it finds back
4840 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4841 * key and get a writable path.
4842 *
4843 * This does lock as it descends, and path->keep_locks should be set
4844 * to 1 by the caller.
4845 *
4846 * This honors path->lowest_level to prevent descent past a given level
4847 * of the tree.
4848 *
4849 * min_trans indicates the oldest transaction that you are interested
4850 * in walking through. Any nodes or leaves older than min_trans are
4851 * skipped over (without reading them).
4852 *
4853 * returns zero if something useful was found, < 0 on error and 1 if there
4854 * was nothing in the tree that matched the search criteria.
4855 */
4856 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4857 struct btrfs_key *max_key,
4858 struct btrfs_path *path,
4859 u64 min_trans)
4860 {
4861 struct extent_buffer *cur;
4862 struct btrfs_key found_key;
4863 int slot;
4864 int sret;
4865 u32 nritems;
4866 int level;
4867 int ret = 1;
4868
4869 WARN_ON(!path->keep_locks);
4870 again:
4871 cur = btrfs_read_lock_root_node(root);
4872 level = btrfs_header_level(cur);
4873 WARN_ON(path->nodes[level]);
4874 path->nodes[level] = cur;
4875 path->locks[level] = BTRFS_READ_LOCK;
4876
4877 if (btrfs_header_generation(cur) < min_trans) {
4878 ret = 1;
4879 goto out;
4880 }
4881 while (1) {
4882 nritems = btrfs_header_nritems(cur);
4883 level = btrfs_header_level(cur);
4884 sret = bin_search(cur, min_key, level, &slot);
4885
4886 /* at the lowest level, we're done, setup the path and exit */
4887 if (level == path->lowest_level) {
4888 if (slot >= nritems)
4889 goto find_next_key;
4890 ret = 0;
4891 path->slots[level] = slot;
4892 btrfs_item_key_to_cpu(cur, &found_key, slot);
4893 goto out;
4894 }
4895 if (sret && slot > 0)
4896 slot--;
4897 /*
4898 * check this node pointer against the min_trans parameters.
4899 * If it is too old, old, skip to the next one.
4900 */
4901 while (slot < nritems) {
4902 u64 blockptr;
4903 u64 gen;
4904
4905 blockptr = btrfs_node_blockptr(cur, slot);
4906 gen = btrfs_node_ptr_generation(cur, slot);
4907 if (gen < min_trans) {
4908 slot++;
4909 continue;
4910 }
4911 break;
4912 }
4913 find_next_key:
4914 /*
4915 * we didn't find a candidate key in this node, walk forward
4916 * and find another one
4917 */
4918 if (slot >= nritems) {
4919 path->slots[level] = slot;
4920 btrfs_set_path_blocking(path);
4921 sret = btrfs_find_next_key(root, path, min_key, level,
4922 min_trans);
4923 if (sret == 0) {
4924 btrfs_release_path(path);
4925 goto again;
4926 } else {
4927 goto out;
4928 }
4929 }
4930 /* save our key for returning back */
4931 btrfs_node_key_to_cpu(cur, &found_key, slot);
4932 path->slots[level] = slot;
4933 if (level == path->lowest_level) {
4934 ret = 0;
4935 unlock_up(path, level, 1, 0, NULL);
4936 goto out;
4937 }
4938 btrfs_set_path_blocking(path);
4939 cur = read_node_slot(root, cur, slot);
4940 BUG_ON(!cur); /* -ENOMEM */
4941
4942 btrfs_tree_read_lock(cur);
4943
4944 path->locks[level - 1] = BTRFS_READ_LOCK;
4945 path->nodes[level - 1] = cur;
4946 unlock_up(path, level, 1, 0, NULL);
4947 btrfs_clear_path_blocking(path, NULL, 0);
4948 }
4949 out:
4950 if (ret == 0)
4951 memcpy(min_key, &found_key, sizeof(found_key));
4952 btrfs_set_path_blocking(path);
4953 return ret;
4954 }
4955
4956 static void tree_move_down(struct btrfs_root *root,
4957 struct btrfs_path *path,
4958 int *level, int root_level)
4959 {
4960 BUG_ON(*level == 0);
4961 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4962 path->slots[*level]);
4963 path->slots[*level - 1] = 0;
4964 (*level)--;
4965 }
4966
4967 static int tree_move_next_or_upnext(struct btrfs_root *root,
4968 struct btrfs_path *path,
4969 int *level, int root_level)
4970 {
4971 int ret = 0;
4972 int nritems;
4973 nritems = btrfs_header_nritems(path->nodes[*level]);
4974
4975 path->slots[*level]++;
4976
4977 while (path->slots[*level] >= nritems) {
4978 if (*level == root_level)
4979 return -1;
4980
4981 /* move upnext */
4982 path->slots[*level] = 0;
4983 free_extent_buffer(path->nodes[*level]);
4984 path->nodes[*level] = NULL;
4985 (*level)++;
4986 path->slots[*level]++;
4987
4988 nritems = btrfs_header_nritems(path->nodes[*level]);
4989 ret = 1;
4990 }
4991 return ret;
4992 }
4993
4994 /*
4995 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
4996 * or down.
4997 */
4998 static int tree_advance(struct btrfs_root *root,
4999 struct btrfs_path *path,
5000 int *level, int root_level,
5001 int allow_down,
5002 struct btrfs_key *key)
5003 {
5004 int ret;
5005
5006 if (*level == 0 || !allow_down) {
5007 ret = tree_move_next_or_upnext(root, path, level, root_level);
5008 } else {
5009 tree_move_down(root, path, level, root_level);
5010 ret = 0;
5011 }
5012 if (ret >= 0) {
5013 if (*level == 0)
5014 btrfs_item_key_to_cpu(path->nodes[*level], key,
5015 path->slots[*level]);
5016 else
5017 btrfs_node_key_to_cpu(path->nodes[*level], key,
5018 path->slots[*level]);
5019 }
5020 return ret;
5021 }
5022
5023 static int tree_compare_item(struct btrfs_root *left_root,
5024 struct btrfs_path *left_path,
5025 struct btrfs_path *right_path,
5026 char *tmp_buf)
5027 {
5028 int cmp;
5029 int len1, len2;
5030 unsigned long off1, off2;
5031
5032 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5033 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5034 if (len1 != len2)
5035 return 1;
5036
5037 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5038 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5039 right_path->slots[0]);
5040
5041 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5042
5043 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5044 if (cmp)
5045 return 1;
5046 return 0;
5047 }
5048
5049 #define ADVANCE 1
5050 #define ADVANCE_ONLY_NEXT -1
5051
5052 /*
5053 * This function compares two trees and calls the provided callback for
5054 * every changed/new/deleted item it finds.
5055 * If shared tree blocks are encountered, whole subtrees are skipped, making
5056 * the compare pretty fast on snapshotted subvolumes.
5057 *
5058 * This currently works on commit roots only. As commit roots are read only,
5059 * we don't do any locking. The commit roots are protected with transactions.
5060 * Transactions are ended and rejoined when a commit is tried in between.
5061 *
5062 * This function checks for modifications done to the trees while comparing.
5063 * If it detects a change, it aborts immediately.
5064 */
5065 int btrfs_compare_trees(struct btrfs_root *left_root,
5066 struct btrfs_root *right_root,
5067 btrfs_changed_cb_t changed_cb, void *ctx)
5068 {
5069 int ret;
5070 int cmp;
5071 struct btrfs_trans_handle *trans = NULL;
5072 struct btrfs_path *left_path = NULL;
5073 struct btrfs_path *right_path = NULL;
5074 struct btrfs_key left_key;
5075 struct btrfs_key right_key;
5076 char *tmp_buf = NULL;
5077 int left_root_level;
5078 int right_root_level;
5079 int left_level;
5080 int right_level;
5081 int left_end_reached;
5082 int right_end_reached;
5083 int advance_left;
5084 int advance_right;
5085 u64 left_blockptr;
5086 u64 right_blockptr;
5087 u64 left_start_ctransid;
5088 u64 right_start_ctransid;
5089 u64 ctransid;
5090
5091 left_path = btrfs_alloc_path();
5092 if (!left_path) {
5093 ret = -ENOMEM;
5094 goto out;
5095 }
5096 right_path = btrfs_alloc_path();
5097 if (!right_path) {
5098 ret = -ENOMEM;
5099 goto out;
5100 }
5101
5102 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5103 if (!tmp_buf) {
5104 ret = -ENOMEM;
5105 goto out;
5106 }
5107
5108 left_path->search_commit_root = 1;
5109 left_path->skip_locking = 1;
5110 right_path->search_commit_root = 1;
5111 right_path->skip_locking = 1;
5112
5113 spin_lock(&left_root->root_item_lock);
5114 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5115 spin_unlock(&left_root->root_item_lock);
5116
5117 spin_lock(&right_root->root_item_lock);
5118 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5119 spin_unlock(&right_root->root_item_lock);
5120
5121 trans = btrfs_join_transaction(left_root);
5122 if (IS_ERR(trans)) {
5123 ret = PTR_ERR(trans);
5124 trans = NULL;
5125 goto out;
5126 }
5127
5128 /*
5129 * Strategy: Go to the first items of both trees. Then do
5130 *
5131 * If both trees are at level 0
5132 * Compare keys of current items
5133 * If left < right treat left item as new, advance left tree
5134 * and repeat
5135 * If left > right treat right item as deleted, advance right tree
5136 * and repeat
5137 * If left == right do deep compare of items, treat as changed if
5138 * needed, advance both trees and repeat
5139 * If both trees are at the same level but not at level 0
5140 * Compare keys of current nodes/leafs
5141 * If left < right advance left tree and repeat
5142 * If left > right advance right tree and repeat
5143 * If left == right compare blockptrs of the next nodes/leafs
5144 * If they match advance both trees but stay at the same level
5145 * and repeat
5146 * If they don't match advance both trees while allowing to go
5147 * deeper and repeat
5148 * If tree levels are different
5149 * Advance the tree that needs it and repeat
5150 *
5151 * Advancing a tree means:
5152 * If we are at level 0, try to go to the next slot. If that's not
5153 * possible, go one level up and repeat. Stop when we found a level
5154 * where we could go to the next slot. We may at this point be on a
5155 * node or a leaf.
5156 *
5157 * If we are not at level 0 and not on shared tree blocks, go one
5158 * level deeper.
5159 *
5160 * If we are not at level 0 and on shared tree blocks, go one slot to
5161 * the right if possible or go up and right.
5162 */
5163
5164 left_level = btrfs_header_level(left_root->commit_root);
5165 left_root_level = left_level;
5166 left_path->nodes[left_level] = left_root->commit_root;
5167 extent_buffer_get(left_path->nodes[left_level]);
5168
5169 right_level = btrfs_header_level(right_root->commit_root);
5170 right_root_level = right_level;
5171 right_path->nodes[right_level] = right_root->commit_root;
5172 extent_buffer_get(right_path->nodes[right_level]);
5173
5174 if (left_level == 0)
5175 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5176 &left_key, left_path->slots[left_level]);
5177 else
5178 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5179 &left_key, left_path->slots[left_level]);
5180 if (right_level == 0)
5181 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5182 &right_key, right_path->slots[right_level]);
5183 else
5184 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5185 &right_key, right_path->slots[right_level]);
5186
5187 left_end_reached = right_end_reached = 0;
5188 advance_left = advance_right = 0;
5189
5190 while (1) {
5191 /*
5192 * We need to make sure the transaction does not get committed
5193 * while we do anything on commit roots. This means, we need to
5194 * join and leave transactions for every item that we process.
5195 */
5196 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5197 btrfs_release_path(left_path);
5198 btrfs_release_path(right_path);
5199
5200 ret = btrfs_end_transaction(trans, left_root);
5201 trans = NULL;
5202 if (ret < 0)
5203 goto out;
5204 }
5205 /* now rejoin the transaction */
5206 if (!trans) {
5207 trans = btrfs_join_transaction(left_root);
5208 if (IS_ERR(trans)) {
5209 ret = PTR_ERR(trans);
5210 trans = NULL;
5211 goto out;
5212 }
5213
5214 spin_lock(&left_root->root_item_lock);
5215 ctransid = btrfs_root_ctransid(&left_root->root_item);
5216 spin_unlock(&left_root->root_item_lock);
5217 if (ctransid != left_start_ctransid)
5218 left_start_ctransid = 0;
5219
5220 spin_lock(&right_root->root_item_lock);
5221 ctransid = btrfs_root_ctransid(&right_root->root_item);
5222 spin_unlock(&right_root->root_item_lock);
5223 if (ctransid != right_start_ctransid)
5224 right_start_ctransid = 0;
5225
5226 if (!left_start_ctransid || !right_start_ctransid) {
5227 WARN(1, KERN_WARNING
5228 "btrfs: btrfs_compare_tree detected "
5229 "a change in one of the trees while "
5230 "iterating. This is probably a "
5231 "bug.\n");
5232 ret = -EIO;
5233 goto out;
5234 }
5235
5236 /*
5237 * the commit root may have changed, so start again
5238 * where we stopped
5239 */
5240 left_path->lowest_level = left_level;
5241 right_path->lowest_level = right_level;
5242 ret = btrfs_search_slot(NULL, left_root,
5243 &left_key, left_path, 0, 0);
5244 if (ret < 0)
5245 goto out;
5246 ret = btrfs_search_slot(NULL, right_root,
5247 &right_key, right_path, 0, 0);
5248 if (ret < 0)
5249 goto out;
5250 }
5251
5252 if (advance_left && !left_end_reached) {
5253 ret = tree_advance(left_root, left_path, &left_level,
5254 left_root_level,
5255 advance_left != ADVANCE_ONLY_NEXT,
5256 &left_key);
5257 if (ret < 0)
5258 left_end_reached = ADVANCE;
5259 advance_left = 0;
5260 }
5261 if (advance_right && !right_end_reached) {
5262 ret = tree_advance(right_root, right_path, &right_level,
5263 right_root_level,
5264 advance_right != ADVANCE_ONLY_NEXT,
5265 &right_key);
5266 if (ret < 0)
5267 right_end_reached = ADVANCE;
5268 advance_right = 0;
5269 }
5270
5271 if (left_end_reached && right_end_reached) {
5272 ret = 0;
5273 goto out;
5274 } else if (left_end_reached) {
5275 if (right_level == 0) {
5276 ret = changed_cb(left_root, right_root,
5277 left_path, right_path,
5278 &right_key,
5279 BTRFS_COMPARE_TREE_DELETED,
5280 ctx);
5281 if (ret < 0)
5282 goto out;
5283 }
5284 advance_right = ADVANCE;
5285 continue;
5286 } else if (right_end_reached) {
5287 if (left_level == 0) {
5288 ret = changed_cb(left_root, right_root,
5289 left_path, right_path,
5290 &left_key,
5291 BTRFS_COMPARE_TREE_NEW,
5292 ctx);
5293 if (ret < 0)
5294 goto out;
5295 }
5296 advance_left = ADVANCE;
5297 continue;
5298 }
5299
5300 if (left_level == 0 && right_level == 0) {
5301 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5302 if (cmp < 0) {
5303 ret = changed_cb(left_root, right_root,
5304 left_path, right_path,
5305 &left_key,
5306 BTRFS_COMPARE_TREE_NEW,
5307 ctx);
5308 if (ret < 0)
5309 goto out;
5310 advance_left = ADVANCE;
5311 } else if (cmp > 0) {
5312 ret = changed_cb(left_root, right_root,
5313 left_path, right_path,
5314 &right_key,
5315 BTRFS_COMPARE_TREE_DELETED,
5316 ctx);
5317 if (ret < 0)
5318 goto out;
5319 advance_right = ADVANCE;
5320 } else {
5321 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5322 ret = tree_compare_item(left_root, left_path,
5323 right_path, tmp_buf);
5324 if (ret) {
5325 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5326 ret = changed_cb(left_root, right_root,
5327 left_path, right_path,
5328 &left_key,
5329 BTRFS_COMPARE_TREE_CHANGED,
5330 ctx);
5331 if (ret < 0)
5332 goto out;
5333 }
5334 advance_left = ADVANCE;
5335 advance_right = ADVANCE;
5336 }
5337 } else if (left_level == right_level) {
5338 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5339 if (cmp < 0) {
5340 advance_left = ADVANCE;
5341 } else if (cmp > 0) {
5342 advance_right = ADVANCE;
5343 } else {
5344 left_blockptr = btrfs_node_blockptr(
5345 left_path->nodes[left_level],
5346 left_path->slots[left_level]);
5347 right_blockptr = btrfs_node_blockptr(
5348 right_path->nodes[right_level],
5349 right_path->slots[right_level]);
5350 if (left_blockptr == right_blockptr) {
5351 /*
5352 * As we're on a shared block, don't
5353 * allow to go deeper.
5354 */
5355 advance_left = ADVANCE_ONLY_NEXT;
5356 advance_right = ADVANCE_ONLY_NEXT;
5357 } else {
5358 advance_left = ADVANCE;
5359 advance_right = ADVANCE;
5360 }
5361 }
5362 } else if (left_level < right_level) {
5363 advance_right = ADVANCE;
5364 } else {
5365 advance_left = ADVANCE;
5366 }
5367 }
5368
5369 out:
5370 btrfs_free_path(left_path);
5371 btrfs_free_path(right_path);
5372 kfree(tmp_buf);
5373
5374 if (trans) {
5375 if (!ret)
5376 ret = btrfs_end_transaction(trans, left_root);
5377 else
5378 btrfs_end_transaction(trans, left_root);
5379 }
5380
5381 return ret;
5382 }
5383
5384 /*
5385 * this is similar to btrfs_next_leaf, but does not try to preserve
5386 * and fixup the path. It looks for and returns the next key in the
5387 * tree based on the current path and the min_trans parameters.
5388 *
5389 * 0 is returned if another key is found, < 0 if there are any errors
5390 * and 1 is returned if there are no higher keys in the tree
5391 *
5392 * path->keep_locks should be set to 1 on the search made before
5393 * calling this function.
5394 */
5395 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5396 struct btrfs_key *key, int level, u64 min_trans)
5397 {
5398 int slot;
5399 struct extent_buffer *c;
5400
5401 WARN_ON(!path->keep_locks);
5402 while (level < BTRFS_MAX_LEVEL) {
5403 if (!path->nodes[level])
5404 return 1;
5405
5406 slot = path->slots[level] + 1;
5407 c = path->nodes[level];
5408 next:
5409 if (slot >= btrfs_header_nritems(c)) {
5410 int ret;
5411 int orig_lowest;
5412 struct btrfs_key cur_key;
5413 if (level + 1 >= BTRFS_MAX_LEVEL ||
5414 !path->nodes[level + 1])
5415 return 1;
5416
5417 if (path->locks[level + 1]) {
5418 level++;
5419 continue;
5420 }
5421
5422 slot = btrfs_header_nritems(c) - 1;
5423 if (level == 0)
5424 btrfs_item_key_to_cpu(c, &cur_key, slot);
5425 else
5426 btrfs_node_key_to_cpu(c, &cur_key, slot);
5427
5428 orig_lowest = path->lowest_level;
5429 btrfs_release_path(path);
5430 path->lowest_level = level;
5431 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5432 0, 0);
5433 path->lowest_level = orig_lowest;
5434 if (ret < 0)
5435 return ret;
5436
5437 c = path->nodes[level];
5438 slot = path->slots[level];
5439 if (ret == 0)
5440 slot++;
5441 goto next;
5442 }
5443
5444 if (level == 0)
5445 btrfs_item_key_to_cpu(c, key, slot);
5446 else {
5447 u64 gen = btrfs_node_ptr_generation(c, slot);
5448
5449 if (gen < min_trans) {
5450 slot++;
5451 goto next;
5452 }
5453 btrfs_node_key_to_cpu(c, key, slot);
5454 }
5455 return 0;
5456 }
5457 return 1;
5458 }
5459
5460 /*
5461 * search the tree again to find a leaf with greater keys
5462 * returns 0 if it found something or 1 if there are no greater leaves.
5463 * returns < 0 on io errors.
5464 */
5465 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5466 {
5467 return btrfs_next_old_leaf(root, path, 0);
5468 }
5469
5470 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5471 u64 time_seq)
5472 {
5473 int slot;
5474 int level;
5475 struct extent_buffer *c;
5476 struct extent_buffer *next;
5477 struct btrfs_key key;
5478 u32 nritems;
5479 int ret;
5480 int old_spinning = path->leave_spinning;
5481 int next_rw_lock = 0;
5482
5483 nritems = btrfs_header_nritems(path->nodes[0]);
5484 if (nritems == 0)
5485 return 1;
5486
5487 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5488 again:
5489 level = 1;
5490 next = NULL;
5491 next_rw_lock = 0;
5492 btrfs_release_path(path);
5493
5494 path->keep_locks = 1;
5495 path->leave_spinning = 1;
5496
5497 if (time_seq)
5498 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5499 else
5500 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5501 path->keep_locks = 0;
5502
5503 if (ret < 0)
5504 return ret;
5505
5506 nritems = btrfs_header_nritems(path->nodes[0]);
5507 /*
5508 * by releasing the path above we dropped all our locks. A balance
5509 * could have added more items next to the key that used to be
5510 * at the very end of the block. So, check again here and
5511 * advance the path if there are now more items available.
5512 */
5513 if (nritems > 0 && path->slots[0] < nritems - 1) {
5514 if (ret == 0)
5515 path->slots[0]++;
5516 ret = 0;
5517 goto done;
5518 }
5519
5520 while (level < BTRFS_MAX_LEVEL) {
5521 if (!path->nodes[level]) {
5522 ret = 1;
5523 goto done;
5524 }
5525
5526 slot = path->slots[level] + 1;
5527 c = path->nodes[level];
5528 if (slot >= btrfs_header_nritems(c)) {
5529 level++;
5530 if (level == BTRFS_MAX_LEVEL) {
5531 ret = 1;
5532 goto done;
5533 }
5534 continue;
5535 }
5536
5537 if (next) {
5538 btrfs_tree_unlock_rw(next, next_rw_lock);
5539 free_extent_buffer(next);
5540 }
5541
5542 next = c;
5543 next_rw_lock = path->locks[level];
5544 ret = read_block_for_search(NULL, root, path, &next, level,
5545 slot, &key, 0);
5546 if (ret == -EAGAIN)
5547 goto again;
5548
5549 if (ret < 0) {
5550 btrfs_release_path(path);
5551 goto done;
5552 }
5553
5554 if (!path->skip_locking) {
5555 ret = btrfs_try_tree_read_lock(next);
5556 if (!ret && time_seq) {
5557 /*
5558 * If we don't get the lock, we may be racing
5559 * with push_leaf_left, holding that lock while
5560 * itself waiting for the leaf we've currently
5561 * locked. To solve this situation, we give up
5562 * on our lock and cycle.
5563 */
5564 free_extent_buffer(next);
5565 btrfs_release_path(path);
5566 cond_resched();
5567 goto again;
5568 }
5569 if (!ret) {
5570 btrfs_set_path_blocking(path);
5571 btrfs_tree_read_lock(next);
5572 btrfs_clear_path_blocking(path, next,
5573 BTRFS_READ_LOCK);
5574 }
5575 next_rw_lock = BTRFS_READ_LOCK;
5576 }
5577 break;
5578 }
5579 path->slots[level] = slot;
5580 while (1) {
5581 level--;
5582 c = path->nodes[level];
5583 if (path->locks[level])
5584 btrfs_tree_unlock_rw(c, path->locks[level]);
5585
5586 free_extent_buffer(c);
5587 path->nodes[level] = next;
5588 path->slots[level] = 0;
5589 if (!path->skip_locking)
5590 path->locks[level] = next_rw_lock;
5591 if (!level)
5592 break;
5593
5594 ret = read_block_for_search(NULL, root, path, &next, level,
5595 0, &key, 0);
5596 if (ret == -EAGAIN)
5597 goto again;
5598
5599 if (ret < 0) {
5600 btrfs_release_path(path);
5601 goto done;
5602 }
5603
5604 if (!path->skip_locking) {
5605 ret = btrfs_try_tree_read_lock(next);
5606 if (!ret) {
5607 btrfs_set_path_blocking(path);
5608 btrfs_tree_read_lock(next);
5609 btrfs_clear_path_blocking(path, next,
5610 BTRFS_READ_LOCK);
5611 }
5612 next_rw_lock = BTRFS_READ_LOCK;
5613 }
5614 }
5615 ret = 0;
5616 done:
5617 unlock_up(path, 0, 1, 0, NULL);
5618 path->leave_spinning = old_spinning;
5619 if (!old_spinning)
5620 btrfs_set_path_blocking(path);
5621
5622 return ret;
5623 }
5624
5625 /*
5626 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5627 * searching until it gets past min_objectid or finds an item of 'type'
5628 *
5629 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5630 */
5631 int btrfs_previous_item(struct btrfs_root *root,
5632 struct btrfs_path *path, u64 min_objectid,
5633 int type)
5634 {
5635 struct btrfs_key found_key;
5636 struct extent_buffer *leaf;
5637 u32 nritems;
5638 int ret;
5639
5640 while (1) {
5641 if (path->slots[0] == 0) {
5642 btrfs_set_path_blocking(path);
5643 ret = btrfs_prev_leaf(root, path);
5644 if (ret != 0)
5645 return ret;
5646 } else {
5647 path->slots[0]--;
5648 }
5649 leaf = path->nodes[0];
5650 nritems = btrfs_header_nritems(leaf);
5651 if (nritems == 0)
5652 return 1;
5653 if (path->slots[0] == nritems)
5654 path->slots[0]--;
5655
5656 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5657 if (found_key.objectid < min_objectid)
5658 break;
5659 if (found_key.type == type)
5660 return 0;
5661 if (found_key.objectid == min_objectid &&
5662 found_key.type < type)
5663 break;
5664 }
5665 return 1;
5666 }