]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/ctree.c
Merge tag 'pwm/for-4.7-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 *root, struct btrfs_key *ins_key,
33 struct btrfs_path *path, int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root, struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct extent_buffer *dst_buf,
40 struct extent_buffer *src_buf);
41 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 int level, int slot);
43 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51 }
52
53 /*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 }
69 }
70
71 /*
72 * reset all the locked nodes in the patch to spinning locks.
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
78 */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
81 {
82 int i;
83
84 if (held) {
85 btrfs_set_lock_blocking_rw(held, held_rw);
86 if (held_rw == BTRFS_WRITE_LOCK)
87 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
88 else if (held_rw == BTRFS_READ_LOCK)
89 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 }
91 btrfs_set_path_blocking(p);
92
93 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
94 if (p->nodes[i] && p->locks[i]) {
95 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
96 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
97 p->locks[i] = BTRFS_WRITE_LOCK;
98 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
99 p->locks[i] = BTRFS_READ_LOCK;
100 }
101 }
102
103 if (held)
104 btrfs_clear_lock_blocking_rw(held, held_rw);
105 }
106
107 /* this also releases the path */
108 void btrfs_free_path(struct btrfs_path *p)
109 {
110 if (!p)
111 return;
112 btrfs_release_path(p);
113 kmem_cache_free(btrfs_path_cachep, p);
114 }
115
116 /*
117 * path release drops references on the extent buffers in the path
118 * and it drops any locks held by this path
119 *
120 * It is safe to call this on paths that no locks or extent buffers held.
121 */
122 noinline void btrfs_release_path(struct btrfs_path *p)
123 {
124 int i;
125
126 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 p->slots[i] = 0;
128 if (!p->nodes[i])
129 continue;
130 if (p->locks[i]) {
131 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
132 p->locks[i] = 0;
133 }
134 free_extent_buffer(p->nodes[i]);
135 p->nodes[i] = NULL;
136 }
137 }
138
139 /*
140 * safely gets a reference on the root node of a tree. A lock
141 * is not taken, so a concurrent writer may put a different node
142 * at the root of the tree. See btrfs_lock_root_node for the
143 * looping required.
144 *
145 * The extent buffer returned by this has a reference taken, so
146 * it won't disappear. It may stop being the root of the tree
147 * at any time because there are no locks held.
148 */
149 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 {
151 struct extent_buffer *eb;
152
153 while (1) {
154 rcu_read_lock();
155 eb = rcu_dereference(root->node);
156
157 /*
158 * RCU really hurts here, we could free up the root node because
159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again.
162 */
163 if (atomic_inc_not_zero(&eb->refs)) {
164 rcu_read_unlock();
165 break;
166 }
167 rcu_read_unlock();
168 synchronize_rcu();
169 }
170 return eb;
171 }
172
173 /* loop around taking references on and locking the root node of the
174 * tree until you end up with a lock on the root. A locked buffer
175 * is returned, with a reference held.
176 */
177 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 {
179 struct extent_buffer *eb;
180
181 while (1) {
182 eb = btrfs_root_node(root);
183 btrfs_tree_lock(eb);
184 if (eb == root->node)
185 break;
186 btrfs_tree_unlock(eb);
187 free_extent_buffer(eb);
188 }
189 return eb;
190 }
191
192 /* loop around taking references on and locking the root node of the
193 * tree until you end up with a lock on the root. A locked buffer
194 * is returned, with a reference held.
195 */
196 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 {
198 struct extent_buffer *eb;
199
200 while (1) {
201 eb = btrfs_root_node(root);
202 btrfs_tree_read_lock(eb);
203 if (eb == root->node)
204 break;
205 btrfs_tree_read_unlock(eb);
206 free_extent_buffer(eb);
207 }
208 return eb;
209 }
210
211 /* cowonly root (everything not a reference counted cow subvolume), just get
212 * put onto a simple dirty list. transaction.c walks this to make sure they
213 * get properly updated on disk.
214 */
215 static void add_root_to_dirty_list(struct btrfs_root *root)
216 {
217 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
218 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
219 return;
220
221 spin_lock(&root->fs_info->trans_lock);
222 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
223 /* Want the extent tree to be the last on the list */
224 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
225 list_move_tail(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
227 else
228 list_move(&root->dirty_list,
229 &root->fs_info->dirty_cowonly_roots);
230 }
231 spin_unlock(&root->fs_info->trans_lock);
232 }
233
234 /*
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
238 */
239 int btrfs_copy_root(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root,
241 struct extent_buffer *buf,
242 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 {
244 struct extent_buffer *cow;
245 int ret = 0;
246 int level;
247 struct btrfs_disk_key disk_key;
248
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != root->fs_info->running_transaction->transid);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
259
260 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 &disk_key, level, buf->start, 0);
262 if (IS_ERR(cow))
263 return PTR_ERR(cow);
264
265 copy_extent_buffer(cow, buf, 0, 0, cow->len);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 else
274 btrfs_set_header_owner(cow, new_root_objectid);
275
276 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
277 BTRFS_FSID_SIZE);
278
279 WARN_ON(btrfs_header_generation(buf) > trans->transid);
280 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
281 ret = btrfs_inc_ref(trans, root, cow, 1);
282 else
283 ret = btrfs_inc_ref(trans, root, cow, 0);
284
285 if (ret)
286 return ret;
287
288 btrfs_mark_buffer_dirty(cow);
289 *cow_ret = cow;
290 return 0;
291 }
292
293 enum mod_log_op {
294 MOD_LOG_KEY_REPLACE,
295 MOD_LOG_KEY_ADD,
296 MOD_LOG_KEY_REMOVE,
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_MOVE_KEYS,
300 MOD_LOG_ROOT_REPLACE,
301 };
302
303 struct tree_mod_move {
304 int dst_slot;
305 int nr_items;
306 };
307
308 struct tree_mod_root {
309 u64 logical;
310 u8 level;
311 };
312
313 struct tree_mod_elem {
314 struct rb_node node;
315 u64 logical;
316 u64 seq;
317 enum mod_log_op op;
318
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
320 int slot;
321
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
323 u64 generation;
324
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key;
327 u64 blockptr;
328
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move;
331
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root;
334 };
335
336 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 {
338 read_lock(&fs_info->tree_mod_log_lock);
339 }
340
341 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 {
343 read_unlock(&fs_info->tree_mod_log_lock);
344 }
345
346 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 {
348 write_lock(&fs_info->tree_mod_log_lock);
349 }
350
351 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 {
353 write_unlock(&fs_info->tree_mod_log_lock);
354 }
355
356 /*
357 * Pull a new tree mod seq number for our operation.
358 */
359 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 {
361 return atomic64_inc_return(&fs_info->tree_mod_seq);
362 }
363
364 /*
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
370 * blocker was added.
371 */
372 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
373 struct seq_list *elem)
374 {
375 tree_mod_log_write_lock(fs_info);
376 spin_lock(&fs_info->tree_mod_seq_lock);
377 if (!elem->seq) {
378 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
379 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 }
381 spin_unlock(&fs_info->tree_mod_seq_lock);
382 tree_mod_log_write_unlock(fs_info);
383
384 return elem->seq;
385 }
386
387 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
388 struct seq_list *elem)
389 {
390 struct rb_root *tm_root;
391 struct rb_node *node;
392 struct rb_node *next;
393 struct seq_list *cur_elem;
394 struct tree_mod_elem *tm;
395 u64 min_seq = (u64)-1;
396 u64 seq_putting = elem->seq;
397
398 if (!seq_putting)
399 return;
400
401 spin_lock(&fs_info->tree_mod_seq_lock);
402 list_del(&elem->list);
403 elem->seq = 0;
404
405 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
406 if (cur_elem->seq < min_seq) {
407 if (seq_putting > cur_elem->seq) {
408 /*
409 * blocker with lower sequence number exists, we
410 * cannot remove anything from the log
411 */
412 spin_unlock(&fs_info->tree_mod_seq_lock);
413 return;
414 }
415 min_seq = cur_elem->seq;
416 }
417 }
418 spin_unlock(&fs_info->tree_mod_seq_lock);
419
420 /*
421 * anything that's lower than the lowest existing (read: blocked)
422 * sequence number can be removed from the tree.
423 */
424 tree_mod_log_write_lock(fs_info);
425 tm_root = &fs_info->tree_mod_log;
426 for (node = rb_first(tm_root); node; node = next) {
427 next = rb_next(node);
428 tm = container_of(node, struct tree_mod_elem, node);
429 if (tm->seq > min_seq)
430 continue;
431 rb_erase(node, tm_root);
432 kfree(tm);
433 }
434 tree_mod_log_write_unlock(fs_info);
435 }
436
437 /*
438 * key order of the log:
439 * node/leaf start address -> sequence
440 *
441 * The 'start address' is the logical address of the *new* root node
442 * for root replace operations, or the logical address of the affected
443 * block for all other operations.
444 *
445 * Note: must be called with write lock (tree_mod_log_write_lock).
446 */
447 static noinline int
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 {
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
454
455 BUG_ON(!tm);
456
457 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458
459 tm_root = &fs_info->tree_mod_log;
460 new = &tm_root->rb_node;
461 while (*new) {
462 cur = container_of(*new, struct tree_mod_elem, node);
463 parent = *new;
464 if (cur->logical < tm->logical)
465 new = &((*new)->rb_left);
466 else if (cur->logical > tm->logical)
467 new = &((*new)->rb_right);
468 else if (cur->seq < tm->seq)
469 new = &((*new)->rb_left);
470 else if (cur->seq > tm->seq)
471 new = &((*new)->rb_right);
472 else
473 return -EEXIST;
474 }
475
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
478 return 0;
479 }
480
481 /*
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
486 */
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
489 smp_mb();
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 return 1;
492 if (eb && btrfs_header_level(eb) == 0)
493 return 1;
494
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
497 tree_mod_log_write_unlock(fs_info);
498 return 1;
499 }
500
501 return 0;
502 }
503
504 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
505 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
506 struct extent_buffer *eb)
507 {
508 smp_mb();
509 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 return 0;
511 if (eb && btrfs_header_level(eb) == 0)
512 return 0;
513
514 return 1;
515 }
516
517 static struct tree_mod_elem *
518 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
519 enum mod_log_op op, gfp_t flags)
520 {
521 struct tree_mod_elem *tm;
522
523 tm = kzalloc(sizeof(*tm), flags);
524 if (!tm)
525 return NULL;
526
527 tm->logical = eb->start;
528 if (op != MOD_LOG_KEY_ADD) {
529 btrfs_node_key(eb, &tm->key, slot);
530 tm->blockptr = btrfs_node_blockptr(eb, slot);
531 }
532 tm->op = op;
533 tm->slot = slot;
534 tm->generation = btrfs_node_ptr_generation(eb, slot);
535 RB_CLEAR_NODE(&tm->node);
536
537 return tm;
538 }
539
540 static noinline int
541 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
542 struct extent_buffer *eb, int slot,
543 enum mod_log_op op, gfp_t flags)
544 {
545 struct tree_mod_elem *tm;
546 int ret;
547
548 if (!tree_mod_need_log(fs_info, eb))
549 return 0;
550
551 tm = alloc_tree_mod_elem(eb, slot, op, flags);
552 if (!tm)
553 return -ENOMEM;
554
555 if (tree_mod_dont_log(fs_info, eb)) {
556 kfree(tm);
557 return 0;
558 }
559
560 ret = __tree_mod_log_insert(fs_info, tm);
561 tree_mod_log_write_unlock(fs_info);
562 if (ret)
563 kfree(tm);
564
565 return ret;
566 }
567
568 static noinline int
569 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
570 struct extent_buffer *eb, int dst_slot, int src_slot,
571 int nr_items, gfp_t flags)
572 {
573 struct tree_mod_elem *tm = NULL;
574 struct tree_mod_elem **tm_list = NULL;
575 int ret = 0;
576 int i;
577 int locked = 0;
578
579 if (!tree_mod_need_log(fs_info, eb))
580 return 0;
581
582 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
583 if (!tm_list)
584 return -ENOMEM;
585
586 tm = kzalloc(sizeof(*tm), flags);
587 if (!tm) {
588 ret = -ENOMEM;
589 goto free_tms;
590 }
591
592 tm->logical = eb->start;
593 tm->slot = src_slot;
594 tm->move.dst_slot = dst_slot;
595 tm->move.nr_items = nr_items;
596 tm->op = MOD_LOG_MOVE_KEYS;
597
598 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
599 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
600 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
601 if (!tm_list[i]) {
602 ret = -ENOMEM;
603 goto free_tms;
604 }
605 }
606
607 if (tree_mod_dont_log(fs_info, eb))
608 goto free_tms;
609 locked = 1;
610
611 /*
612 * When we override something during the move, we log these removals.
613 * This can only happen when we move towards the beginning of the
614 * buffer, i.e. dst_slot < src_slot.
615 */
616 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
617 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
618 if (ret)
619 goto free_tms;
620 }
621
622 ret = __tree_mod_log_insert(fs_info, tm);
623 if (ret)
624 goto free_tms;
625 tree_mod_log_write_unlock(fs_info);
626 kfree(tm_list);
627
628 return 0;
629 free_tms:
630 for (i = 0; i < nr_items; i++) {
631 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
632 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
633 kfree(tm_list[i]);
634 }
635 if (locked)
636 tree_mod_log_write_unlock(fs_info);
637 kfree(tm_list);
638 kfree(tm);
639
640 return ret;
641 }
642
643 static inline int
644 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
645 struct tree_mod_elem **tm_list,
646 int nritems)
647 {
648 int i, j;
649 int ret;
650
651 for (i = nritems - 1; i >= 0; i--) {
652 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 if (ret) {
654 for (j = nritems - 1; j > i; j--)
655 rb_erase(&tm_list[j]->node,
656 &fs_info->tree_mod_log);
657 return ret;
658 }
659 }
660
661 return 0;
662 }
663
664 static noinline int
665 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
666 struct extent_buffer *old_root,
667 struct extent_buffer *new_root, gfp_t flags,
668 int log_removal)
669 {
670 struct tree_mod_elem *tm = NULL;
671 struct tree_mod_elem **tm_list = NULL;
672 int nritems = 0;
673 int ret = 0;
674 int i;
675
676 if (!tree_mod_need_log(fs_info, NULL))
677 return 0;
678
679 if (log_removal && btrfs_header_level(old_root) > 0) {
680 nritems = btrfs_header_nritems(old_root);
681 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
682 flags);
683 if (!tm_list) {
684 ret = -ENOMEM;
685 goto free_tms;
686 }
687 for (i = 0; i < nritems; i++) {
688 tm_list[i] = alloc_tree_mod_elem(old_root, i,
689 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
690 if (!tm_list[i]) {
691 ret = -ENOMEM;
692 goto free_tms;
693 }
694 }
695 }
696
697 tm = kzalloc(sizeof(*tm), flags);
698 if (!tm) {
699 ret = -ENOMEM;
700 goto free_tms;
701 }
702
703 tm->logical = new_root->start;
704 tm->old_root.logical = old_root->start;
705 tm->old_root.level = btrfs_header_level(old_root);
706 tm->generation = btrfs_header_generation(old_root);
707 tm->op = MOD_LOG_ROOT_REPLACE;
708
709 if (tree_mod_dont_log(fs_info, NULL))
710 goto free_tms;
711
712 if (tm_list)
713 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 if (!ret)
715 ret = __tree_mod_log_insert(fs_info, tm);
716
717 tree_mod_log_write_unlock(fs_info);
718 if (ret)
719 goto free_tms;
720 kfree(tm_list);
721
722 return ret;
723
724 free_tms:
725 if (tm_list) {
726 for (i = 0; i < nritems; i++)
727 kfree(tm_list[i]);
728 kfree(tm_list);
729 }
730 kfree(tm);
731
732 return ret;
733 }
734
735 static struct tree_mod_elem *
736 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
737 int smallest)
738 {
739 struct rb_root *tm_root;
740 struct rb_node *node;
741 struct tree_mod_elem *cur = NULL;
742 struct tree_mod_elem *found = NULL;
743
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
747 while (node) {
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->logical < start) {
750 node = node->rb_left;
751 } else if (cur->logical > start) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
757 if (found)
758 BUG_ON(found->seq > cur->seq);
759 found = cur;
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
763 if (found)
764 BUG_ON(found->seq < cur->seq);
765 found = cur;
766 node = node->rb_right;
767 } else {
768 found = cur;
769 break;
770 }
771 }
772 tree_mod_log_read_unlock(fs_info);
773
774 return found;
775 }
776
777 /*
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
781 */
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 u64 min_seq)
785 {
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
787 }
788
789 /*
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
793 */
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
796 {
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
798 }
799
800 static noinline int
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
804 {
805 int ret = 0;
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
808 int i;
809 int locked = 0;
810
811 if (!tree_mod_need_log(fs_info, NULL))
812 return 0;
813
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 return 0;
816
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
818 GFP_NOFS);
819 if (!tm_list)
820 return -ENOMEM;
821
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
828 ret = -ENOMEM;
829 goto free_tms;
830 }
831
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
835 ret = -ENOMEM;
836 goto free_tms;
837 }
838 }
839
840 if (tree_mod_dont_log(fs_info, NULL))
841 goto free_tms;
842 locked = 1;
843
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 if (ret)
847 goto free_tms;
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
849 if (ret)
850 goto free_tms;
851 }
852
853 tree_mod_log_write_unlock(fs_info);
854 kfree(tm_list);
855
856 return 0;
857
858 free_tms:
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
862 kfree(tm_list[i]);
863 }
864 if (locked)
865 tree_mod_log_write_unlock(fs_info);
866 kfree(tm_list);
867
868 return ret;
869 }
870
871 static inline void
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
874 {
875 int ret;
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
877 nr_items, GFP_NOFS);
878 BUG_ON(ret < 0);
879 }
880
881 static noinline void
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
884 {
885 int ret;
886
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
888 MOD_LOG_KEY_REPLACE,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
890 BUG_ON(ret < 0);
891 }
892
893 static noinline int
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
895 {
896 struct tree_mod_elem **tm_list = NULL;
897 int nritems = 0;
898 int i;
899 int ret = 0;
900
901 if (btrfs_header_level(eb) == 0)
902 return 0;
903
904 if (!tree_mod_need_log(fs_info, NULL))
905 return 0;
906
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
909 if (!tm_list)
910 return -ENOMEM;
911
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
915 if (!tm_list[i]) {
916 ret = -ENOMEM;
917 goto free_tms;
918 }
919 }
920
921 if (tree_mod_dont_log(fs_info, eb))
922 goto free_tms;
923
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
926 if (ret)
927 goto free_tms;
928 kfree(tm_list);
929
930 return 0;
931
932 free_tms:
933 for (i = 0; i < nritems; i++)
934 kfree(tm_list[i]);
935 kfree(tm_list);
936
937 return ret;
938 }
939
940 static noinline void
941 tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
943 int log_removal)
944 {
945 int ret;
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
948 BUG_ON(ret < 0);
949 }
950
951 /*
952 * check if the tree block can be shared by multiple trees
953 */
954 int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
956 {
957 /*
958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
962 */
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
968 return 1;
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
972 return 1;
973 #endif
974 return 0;
975 }
976
977 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
981 int *last_ref)
982 {
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
988
989 /*
990 * Backrefs update rules:
991 *
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
994 *
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
998 *
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1004 */
1005
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1015 return ret;
1016 }
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1024 }
1025
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1036
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1043 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else {
1046
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1053 }
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1056
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1058 buf->start,
1059 buf->len,
1060 new_flags, level, 0);
1061 if (ret)
1062 return ret;
1063 }
1064 } else {
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1074 }
1075 clean_tree_block(trans, root->fs_info, buf);
1076 *last_ref = 1;
1077 }
1078 return 0;
1079 }
1080
1081 /*
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1085 * dirty again.
1086 *
1087 * search_start -- an allocation hint for the new block
1088 *
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1092 */
1093 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1099 {
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1102 int level, ret;
1103 int last_ref = 0;
1104 int unlock_orig = 0;
1105 u64 parent_start;
1106
1107 if (*cow_ret == buf)
1108 unlock_orig = 1;
1109
1110 btrfs_assert_tree_locked(buf);
1111
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1116
1117 level = btrfs_header_level(buf);
1118
1119 if (level == 0)
1120 btrfs_item_key(buf, &disk_key, 0);
1121 else
1122 btrfs_node_key(buf, &disk_key, 0);
1123
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1125 if (parent)
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1131
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1135 if (IS_ERR(cow))
1136 return PTR_ERR(cow);
1137
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1139
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1148 else
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1150
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 BTRFS_FSID_SIZE);
1153
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1155 if (ret) {
1156 btrfs_abort_transaction(trans, root, ret);
1157 return ret;
1158 }
1159
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 return ret;
1165 }
1166 }
1167
1168 if (buf == root->node) {
1169 WARN_ON(parent && parent != buf);
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1175
1176 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1);
1178 rcu_assign_pointer(root->node, cow);
1179
1180 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 last_ref);
1182 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root);
1184 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1189
1190 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1193 btrfs_set_node_blockptr(parent, parent_slot,
1194 cow->start);
1195 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 trans->transid);
1197 btrfs_mark_buffer_dirty(parent);
1198 if (last_ref) {
1199 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 return ret;
1203 }
1204 }
1205 btrfs_free_tree_block(trans, root, buf, parent_start,
1206 last_ref);
1207 }
1208 if (unlock_orig)
1209 btrfs_tree_unlock(buf);
1210 free_extent_buffer_stale(buf);
1211 btrfs_mark_buffer_dirty(cow);
1212 *cow_ret = cow;
1213 return 0;
1214 }
1215
1216 /*
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1219 */
1220 static struct tree_mod_elem *
1221 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1222 struct extent_buffer *eb_root, u64 time_seq)
1223 {
1224 struct tree_mod_elem *tm;
1225 struct tree_mod_elem *found = NULL;
1226 u64 root_logical = eb_root->start;
1227 int looped = 0;
1228
1229 if (!time_seq)
1230 return NULL;
1231
1232 /*
1233 * the very last operation that's logged for a root is the
1234 * replacement operation (if it is replaced at all). this has
1235 * the logical address of the *new* root, making it the very
1236 * first operation that's logged for this root.
1237 */
1238 while (1) {
1239 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1240 time_seq);
1241 if (!looped && !tm)
1242 return NULL;
1243 /*
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1246 * level 0.
1247 */
1248 if (!tm)
1249 break;
1250
1251 /*
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1255 */
1256 if (tm->op != MOD_LOG_ROOT_REPLACE)
1257 break;
1258
1259 found = tm;
1260 root_logical = tm->old_root.logical;
1261 looped = 1;
1262 }
1263
1264 /* if there's no old root to return, return what we found instead */
1265 if (!found)
1266 found = tm;
1267
1268 return found;
1269 }
1270
1271 /*
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewound (until we reach something older than
1274 * time_seq).
1275 */
1276 static void
1277 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1278 u64 time_seq, struct tree_mod_elem *first_tm)
1279 {
1280 u32 n;
1281 struct rb_node *next;
1282 struct tree_mod_elem *tm = first_tm;
1283 unsigned long o_dst;
1284 unsigned long o_src;
1285 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1286
1287 n = btrfs_header_nritems(eb);
1288 tree_mod_log_read_lock(fs_info);
1289 while (tm && tm->seq >= time_seq) {
1290 /*
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1294 */
1295 switch (tm->op) {
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1297 BUG_ON(tm->slot < n);
1298 /* Fallthrough */
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1300 case MOD_LOG_KEY_REMOVE:
1301 btrfs_set_node_key(eb, &tm->key, tm->slot);
1302 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1303 btrfs_set_node_ptr_generation(eb, tm->slot,
1304 tm->generation);
1305 n++;
1306 break;
1307 case MOD_LOG_KEY_REPLACE:
1308 BUG_ON(tm->slot >= n);
1309 btrfs_set_node_key(eb, &tm->key, tm->slot);
1310 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1311 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 tm->generation);
1313 break;
1314 case MOD_LOG_KEY_ADD:
1315 /* if a move operation is needed it's in the log */
1316 n--;
1317 break;
1318 case MOD_LOG_MOVE_KEYS:
1319 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1320 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1321 memmove_extent_buffer(eb, o_dst, o_src,
1322 tm->move.nr_items * p_size);
1323 break;
1324 case MOD_LOG_ROOT_REPLACE:
1325 /*
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1333 */
1334 break;
1335 }
1336 next = rb_next(&tm->node);
1337 if (!next)
1338 break;
1339 tm = container_of(next, struct tree_mod_elem, node);
1340 if (tm->logical != first_tm->logical)
1341 break;
1342 }
1343 tree_mod_log_read_unlock(fs_info);
1344 btrfs_set_header_nritems(eb, n);
1345 }
1346
1347 /*
1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1353 */
1354 static struct extent_buffer *
1355 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1356 struct extent_buffer *eb, u64 time_seq)
1357 {
1358 struct extent_buffer *eb_rewin;
1359 struct tree_mod_elem *tm;
1360
1361 if (!time_seq)
1362 return eb;
1363
1364 if (btrfs_header_level(eb) == 0)
1365 return eb;
1366
1367 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1368 if (!tm)
1369 return eb;
1370
1371 btrfs_set_path_blocking(path);
1372 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 eb->len);
1378 if (!eb_rewin) {
1379 btrfs_tree_read_unlock_blocking(eb);
1380 free_extent_buffer(eb);
1381 return NULL;
1382 }
1383 btrfs_set_header_bytenr(eb_rewin, eb->start);
1384 btrfs_set_header_backref_rev(eb_rewin,
1385 btrfs_header_backref_rev(eb));
1386 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1387 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1388 } else {
1389 eb_rewin = btrfs_clone_extent_buffer(eb);
1390 if (!eb_rewin) {
1391 btrfs_tree_read_unlock_blocking(eb);
1392 free_extent_buffer(eb);
1393 return NULL;
1394 }
1395 }
1396
1397 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1398 btrfs_tree_read_unlock_blocking(eb);
1399 free_extent_buffer(eb);
1400
1401 extent_buffer_get(eb_rewin);
1402 btrfs_tree_read_lock(eb_rewin);
1403 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1404 WARN_ON(btrfs_header_nritems(eb_rewin) >
1405 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1406
1407 return eb_rewin;
1408 }
1409
1410 /*
1411 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1412 * value. If there are no changes, the current root->root_node is returned. If
1413 * anything changed in between, there's a fresh buffer allocated on which the
1414 * rewind operations are done. In any case, the returned buffer is read locked.
1415 * Returns NULL on error (with no locks held).
1416 */
1417 static inline struct extent_buffer *
1418 get_old_root(struct btrfs_root *root, u64 time_seq)
1419 {
1420 struct tree_mod_elem *tm;
1421 struct extent_buffer *eb = NULL;
1422 struct extent_buffer *eb_root;
1423 struct extent_buffer *old;
1424 struct tree_mod_root *old_root = NULL;
1425 u64 old_generation = 0;
1426 u64 logical;
1427
1428 eb_root = btrfs_read_lock_root_node(root);
1429 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1430 if (!tm)
1431 return eb_root;
1432
1433 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1434 old_root = &tm->old_root;
1435 old_generation = tm->generation;
1436 logical = old_root->logical;
1437 } else {
1438 logical = eb_root->start;
1439 }
1440
1441 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1442 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1443 btrfs_tree_read_unlock(eb_root);
1444 free_extent_buffer(eb_root);
1445 old = read_tree_block(root, logical, 0);
1446 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1447 if (!IS_ERR(old))
1448 free_extent_buffer(old);
1449 btrfs_warn(root->fs_info,
1450 "failed to read tree block %llu from get_old_root", logical);
1451 } else {
1452 eb = btrfs_clone_extent_buffer(old);
1453 free_extent_buffer(old);
1454 }
1455 } else if (old_root) {
1456 btrfs_tree_read_unlock(eb_root);
1457 free_extent_buffer(eb_root);
1458 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 root->nodesize);
1460 } else {
1461 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1462 eb = btrfs_clone_extent_buffer(eb_root);
1463 btrfs_tree_read_unlock_blocking(eb_root);
1464 free_extent_buffer(eb_root);
1465 }
1466
1467 if (!eb)
1468 return NULL;
1469 extent_buffer_get(eb);
1470 btrfs_tree_read_lock(eb);
1471 if (old_root) {
1472 btrfs_set_header_bytenr(eb, eb->start);
1473 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1474 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1475 btrfs_set_header_level(eb, old_root->level);
1476 btrfs_set_header_generation(eb, old_generation);
1477 }
1478 if (tm)
1479 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1480 else
1481 WARN_ON(btrfs_header_level(eb) != 0);
1482 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1483
1484 return eb;
1485 }
1486
1487 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1488 {
1489 struct tree_mod_elem *tm;
1490 int level;
1491 struct extent_buffer *eb_root = btrfs_root_node(root);
1492
1493 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1494 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1495 level = tm->old_root.level;
1496 } else {
1497 level = btrfs_header_level(eb_root);
1498 }
1499 free_extent_buffer(eb_root);
1500
1501 return level;
1502 }
1503
1504 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1505 struct btrfs_root *root,
1506 struct extent_buffer *buf)
1507 {
1508 if (btrfs_test_is_dummy_root(root))
1509 return 0;
1510
1511 /* ensure we can see the force_cow */
1512 smp_rmb();
1513
1514 /*
1515 * We do not need to cow a block if
1516 * 1) this block is not created or changed in this transaction;
1517 * 2) this block does not belong to TREE_RELOC tree;
1518 * 3) the root is not forced COW.
1519 *
1520 * What is forced COW:
1521 * when we create snapshot during committing the transaction,
1522 * after we've finished coping src root, we must COW the shared
1523 * block to ensure the metadata consistency.
1524 */
1525 if (btrfs_header_generation(buf) == trans->transid &&
1526 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1527 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1528 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1529 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1530 return 0;
1531 return 1;
1532 }
1533
1534 /*
1535 * cows a single block, see __btrfs_cow_block for the real work.
1536 * This version of it has extra checks so that a block isn't COWed more than
1537 * once per transaction, as long as it hasn't been written yet
1538 */
1539 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1540 struct btrfs_root *root, struct extent_buffer *buf,
1541 struct extent_buffer *parent, int parent_slot,
1542 struct extent_buffer **cow_ret)
1543 {
1544 u64 search_start;
1545 int ret;
1546
1547 if (trans->transaction != root->fs_info->running_transaction)
1548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 trans->transid,
1550 root->fs_info->running_transaction->transid);
1551
1552 if (trans->transid != root->fs_info->generation)
1553 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1554 trans->transid, root->fs_info->generation);
1555
1556 if (!should_cow_block(trans, root, buf)) {
1557 *cow_ret = buf;
1558 return 0;
1559 }
1560
1561 search_start = buf->start & ~((u64)SZ_1G - 1);
1562
1563 if (parent)
1564 btrfs_set_lock_blocking(parent);
1565 btrfs_set_lock_blocking(buf);
1566
1567 ret = __btrfs_cow_block(trans, root, buf, parent,
1568 parent_slot, cow_ret, search_start, 0);
1569
1570 trace_btrfs_cow_block(root, buf, *cow_ret);
1571
1572 return ret;
1573 }
1574
1575 /*
1576 * helper function for defrag to decide if two blocks pointed to by a
1577 * node are actually close by
1578 */
1579 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1580 {
1581 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1582 return 1;
1583 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1584 return 1;
1585 return 0;
1586 }
1587
1588 /*
1589 * compare two keys in a memcmp fashion
1590 */
1591 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1592 {
1593 struct btrfs_key k1;
1594
1595 btrfs_disk_key_to_cpu(&k1, disk);
1596
1597 return btrfs_comp_cpu_keys(&k1, k2);
1598 }
1599
1600 /*
1601 * same as comp_keys only with two btrfs_key's
1602 */
1603 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1604 {
1605 if (k1->objectid > k2->objectid)
1606 return 1;
1607 if (k1->objectid < k2->objectid)
1608 return -1;
1609 if (k1->type > k2->type)
1610 return 1;
1611 if (k1->type < k2->type)
1612 return -1;
1613 if (k1->offset > k2->offset)
1614 return 1;
1615 if (k1->offset < k2->offset)
1616 return -1;
1617 return 0;
1618 }
1619
1620 /*
1621 * this is used by the defrag code to go through all the
1622 * leaves pointed to by a node and reallocate them so that
1623 * disk order is close to key order
1624 */
1625 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1626 struct btrfs_root *root, struct extent_buffer *parent,
1627 int start_slot, u64 *last_ret,
1628 struct btrfs_key *progress)
1629 {
1630 struct extent_buffer *cur;
1631 u64 blocknr;
1632 u64 gen;
1633 u64 search_start = *last_ret;
1634 u64 last_block = 0;
1635 u64 other;
1636 u32 parent_nritems;
1637 int end_slot;
1638 int i;
1639 int err = 0;
1640 int parent_level;
1641 int uptodate;
1642 u32 blocksize;
1643 int progress_passed = 0;
1644 struct btrfs_disk_key disk_key;
1645
1646 parent_level = btrfs_header_level(parent);
1647
1648 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1649 WARN_ON(trans->transid != root->fs_info->generation);
1650
1651 parent_nritems = btrfs_header_nritems(parent);
1652 blocksize = root->nodesize;
1653 end_slot = parent_nritems - 1;
1654
1655 if (parent_nritems <= 1)
1656 return 0;
1657
1658 btrfs_set_lock_blocking(parent);
1659
1660 for (i = start_slot; i <= end_slot; i++) {
1661 int close = 1;
1662
1663 btrfs_node_key(parent, &disk_key, i);
1664 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1665 continue;
1666
1667 progress_passed = 1;
1668 blocknr = btrfs_node_blockptr(parent, i);
1669 gen = btrfs_node_ptr_generation(parent, i);
1670 if (last_block == 0)
1671 last_block = blocknr;
1672
1673 if (i > 0) {
1674 other = btrfs_node_blockptr(parent, i - 1);
1675 close = close_blocks(blocknr, other, blocksize);
1676 }
1677 if (!close && i < end_slot) {
1678 other = btrfs_node_blockptr(parent, i + 1);
1679 close = close_blocks(blocknr, other, blocksize);
1680 }
1681 if (close) {
1682 last_block = blocknr;
1683 continue;
1684 }
1685
1686 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1687 if (cur)
1688 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1689 else
1690 uptodate = 0;
1691 if (!cur || !uptodate) {
1692 if (!cur) {
1693 cur = read_tree_block(root, blocknr, gen);
1694 if (IS_ERR(cur)) {
1695 return PTR_ERR(cur);
1696 } else if (!extent_buffer_uptodate(cur)) {
1697 free_extent_buffer(cur);
1698 return -EIO;
1699 }
1700 } else if (!uptodate) {
1701 err = btrfs_read_buffer(cur, gen);
1702 if (err) {
1703 free_extent_buffer(cur);
1704 return err;
1705 }
1706 }
1707 }
1708 if (search_start == 0)
1709 search_start = last_block;
1710
1711 btrfs_tree_lock(cur);
1712 btrfs_set_lock_blocking(cur);
1713 err = __btrfs_cow_block(trans, root, cur, parent, i,
1714 &cur, search_start,
1715 min(16 * blocksize,
1716 (end_slot - i) * blocksize));
1717 if (err) {
1718 btrfs_tree_unlock(cur);
1719 free_extent_buffer(cur);
1720 break;
1721 }
1722 search_start = cur->start;
1723 last_block = cur->start;
1724 *last_ret = search_start;
1725 btrfs_tree_unlock(cur);
1726 free_extent_buffer(cur);
1727 }
1728 return err;
1729 }
1730
1731 /*
1732 * The leaf data grows from end-to-front in the node.
1733 * this returns the address of the start of the last item,
1734 * which is the stop of the leaf data stack
1735 */
1736 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1737 struct extent_buffer *leaf)
1738 {
1739 u32 nr = btrfs_header_nritems(leaf);
1740 if (nr == 0)
1741 return BTRFS_LEAF_DATA_SIZE(root);
1742 return btrfs_item_offset_nr(leaf, nr - 1);
1743 }
1744
1745
1746 /*
1747 * search for key in the extent_buffer. The items start at offset p,
1748 * and they are item_size apart. There are 'max' items in p.
1749 *
1750 * the slot in the array is returned via slot, and it points to
1751 * the place where you would insert key if it is not found in
1752 * the array.
1753 *
1754 * slot may point to max if the key is bigger than all of the keys
1755 */
1756 static noinline int generic_bin_search(struct extent_buffer *eb,
1757 unsigned long p,
1758 int item_size, struct btrfs_key *key,
1759 int max, int *slot)
1760 {
1761 int low = 0;
1762 int high = max;
1763 int mid;
1764 int ret;
1765 struct btrfs_disk_key *tmp = NULL;
1766 struct btrfs_disk_key unaligned;
1767 unsigned long offset;
1768 char *kaddr = NULL;
1769 unsigned long map_start = 0;
1770 unsigned long map_len = 0;
1771 int err;
1772
1773 while (low < high) {
1774 mid = (low + high) / 2;
1775 offset = p + mid * item_size;
1776
1777 if (!kaddr || offset < map_start ||
1778 (offset + sizeof(struct btrfs_disk_key)) >
1779 map_start + map_len) {
1780
1781 err = map_private_extent_buffer(eb, offset,
1782 sizeof(struct btrfs_disk_key),
1783 &kaddr, &map_start, &map_len);
1784
1785 if (!err) {
1786 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1787 map_start);
1788 } else {
1789 read_extent_buffer(eb, &unaligned,
1790 offset, sizeof(unaligned));
1791 tmp = &unaligned;
1792 }
1793
1794 } else {
1795 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1796 map_start);
1797 }
1798 ret = comp_keys(tmp, key);
1799
1800 if (ret < 0)
1801 low = mid + 1;
1802 else if (ret > 0)
1803 high = mid;
1804 else {
1805 *slot = mid;
1806 return 0;
1807 }
1808 }
1809 *slot = low;
1810 return 1;
1811 }
1812
1813 /*
1814 * simple bin_search frontend that does the right thing for
1815 * leaves vs nodes
1816 */
1817 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1818 int level, int *slot)
1819 {
1820 if (level == 0)
1821 return generic_bin_search(eb,
1822 offsetof(struct btrfs_leaf, items),
1823 sizeof(struct btrfs_item),
1824 key, btrfs_header_nritems(eb),
1825 slot);
1826 else
1827 return generic_bin_search(eb,
1828 offsetof(struct btrfs_node, ptrs),
1829 sizeof(struct btrfs_key_ptr),
1830 key, btrfs_header_nritems(eb),
1831 slot);
1832 }
1833
1834 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1835 int level, int *slot)
1836 {
1837 return bin_search(eb, key, level, slot);
1838 }
1839
1840 static void root_add_used(struct btrfs_root *root, u32 size)
1841 {
1842 spin_lock(&root->accounting_lock);
1843 btrfs_set_root_used(&root->root_item,
1844 btrfs_root_used(&root->root_item) + size);
1845 spin_unlock(&root->accounting_lock);
1846 }
1847
1848 static void root_sub_used(struct btrfs_root *root, u32 size)
1849 {
1850 spin_lock(&root->accounting_lock);
1851 btrfs_set_root_used(&root->root_item,
1852 btrfs_root_used(&root->root_item) - size);
1853 spin_unlock(&root->accounting_lock);
1854 }
1855
1856 /* given a node and slot number, this reads the blocks it points to. The
1857 * extent buffer is returned with a reference taken (but unlocked).
1858 * NULL is returned on error.
1859 */
1860 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1861 struct extent_buffer *parent, int slot)
1862 {
1863 int level = btrfs_header_level(parent);
1864 struct extent_buffer *eb;
1865
1866 if (slot < 0)
1867 return NULL;
1868 if (slot >= btrfs_header_nritems(parent))
1869 return NULL;
1870
1871 BUG_ON(level == 0);
1872
1873 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1874 btrfs_node_ptr_generation(parent, slot));
1875 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1876 if (!IS_ERR(eb))
1877 free_extent_buffer(eb);
1878 eb = NULL;
1879 }
1880
1881 return eb;
1882 }
1883
1884 /*
1885 * node level balancing, used to make sure nodes are in proper order for
1886 * item deletion. We balance from the top down, so we have to make sure
1887 * that a deletion won't leave an node completely empty later on.
1888 */
1889 static noinline int balance_level(struct btrfs_trans_handle *trans,
1890 struct btrfs_root *root,
1891 struct btrfs_path *path, int level)
1892 {
1893 struct extent_buffer *right = NULL;
1894 struct extent_buffer *mid;
1895 struct extent_buffer *left = NULL;
1896 struct extent_buffer *parent = NULL;
1897 int ret = 0;
1898 int wret;
1899 int pslot;
1900 int orig_slot = path->slots[level];
1901 u64 orig_ptr;
1902
1903 if (level == 0)
1904 return 0;
1905
1906 mid = path->nodes[level];
1907
1908 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1909 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1910 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1911
1912 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1913
1914 if (level < BTRFS_MAX_LEVEL - 1) {
1915 parent = path->nodes[level + 1];
1916 pslot = path->slots[level + 1];
1917 }
1918
1919 /*
1920 * deal with the case where there is only one pointer in the root
1921 * by promoting the node below to a root
1922 */
1923 if (!parent) {
1924 struct extent_buffer *child;
1925
1926 if (btrfs_header_nritems(mid) != 1)
1927 return 0;
1928
1929 /* promote the child to a root */
1930 child = read_node_slot(root, mid, 0);
1931 if (!child) {
1932 ret = -EROFS;
1933 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1934 goto enospc;
1935 }
1936
1937 btrfs_tree_lock(child);
1938 btrfs_set_lock_blocking(child);
1939 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1940 if (ret) {
1941 btrfs_tree_unlock(child);
1942 free_extent_buffer(child);
1943 goto enospc;
1944 }
1945
1946 tree_mod_log_set_root_pointer(root, child, 1);
1947 rcu_assign_pointer(root->node, child);
1948
1949 add_root_to_dirty_list(root);
1950 btrfs_tree_unlock(child);
1951
1952 path->locks[level] = 0;
1953 path->nodes[level] = NULL;
1954 clean_tree_block(trans, root->fs_info, mid);
1955 btrfs_tree_unlock(mid);
1956 /* once for the path */
1957 free_extent_buffer(mid);
1958
1959 root_sub_used(root, mid->len);
1960 btrfs_free_tree_block(trans, root, mid, 0, 1);
1961 /* once for the root ptr */
1962 free_extent_buffer_stale(mid);
1963 return 0;
1964 }
1965 if (btrfs_header_nritems(mid) >
1966 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1967 return 0;
1968
1969 left = read_node_slot(root, parent, pslot - 1);
1970 if (left) {
1971 btrfs_tree_lock(left);
1972 btrfs_set_lock_blocking(left);
1973 wret = btrfs_cow_block(trans, root, left,
1974 parent, pslot - 1, &left);
1975 if (wret) {
1976 ret = wret;
1977 goto enospc;
1978 }
1979 }
1980 right = read_node_slot(root, parent, pslot + 1);
1981 if (right) {
1982 btrfs_tree_lock(right);
1983 btrfs_set_lock_blocking(right);
1984 wret = btrfs_cow_block(trans, root, right,
1985 parent, pslot + 1, &right);
1986 if (wret) {
1987 ret = wret;
1988 goto enospc;
1989 }
1990 }
1991
1992 /* first, try to make some room in the middle buffer */
1993 if (left) {
1994 orig_slot += btrfs_header_nritems(left);
1995 wret = push_node_left(trans, root, left, mid, 1);
1996 if (wret < 0)
1997 ret = wret;
1998 }
1999
2000 /*
2001 * then try to empty the right most buffer into the middle
2002 */
2003 if (right) {
2004 wret = push_node_left(trans, root, mid, right, 1);
2005 if (wret < 0 && wret != -ENOSPC)
2006 ret = wret;
2007 if (btrfs_header_nritems(right) == 0) {
2008 clean_tree_block(trans, root->fs_info, right);
2009 btrfs_tree_unlock(right);
2010 del_ptr(root, path, level + 1, pslot + 1);
2011 root_sub_used(root, right->len);
2012 btrfs_free_tree_block(trans, root, right, 0, 1);
2013 free_extent_buffer_stale(right);
2014 right = NULL;
2015 } else {
2016 struct btrfs_disk_key right_key;
2017 btrfs_node_key(right, &right_key, 0);
2018 tree_mod_log_set_node_key(root->fs_info, parent,
2019 pslot + 1, 0);
2020 btrfs_set_node_key(parent, &right_key, pslot + 1);
2021 btrfs_mark_buffer_dirty(parent);
2022 }
2023 }
2024 if (btrfs_header_nritems(mid) == 1) {
2025 /*
2026 * we're not allowed to leave a node with one item in the
2027 * tree during a delete. A deletion from lower in the tree
2028 * could try to delete the only pointer in this node.
2029 * So, pull some keys from the left.
2030 * There has to be a left pointer at this point because
2031 * otherwise we would have pulled some pointers from the
2032 * right
2033 */
2034 if (!left) {
2035 ret = -EROFS;
2036 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2037 goto enospc;
2038 }
2039 wret = balance_node_right(trans, root, mid, left);
2040 if (wret < 0) {
2041 ret = wret;
2042 goto enospc;
2043 }
2044 if (wret == 1) {
2045 wret = push_node_left(trans, root, left, mid, 1);
2046 if (wret < 0)
2047 ret = wret;
2048 }
2049 BUG_ON(wret == 1);
2050 }
2051 if (btrfs_header_nritems(mid) == 0) {
2052 clean_tree_block(trans, root->fs_info, mid);
2053 btrfs_tree_unlock(mid);
2054 del_ptr(root, path, level + 1, pslot);
2055 root_sub_used(root, mid->len);
2056 btrfs_free_tree_block(trans, root, mid, 0, 1);
2057 free_extent_buffer_stale(mid);
2058 mid = NULL;
2059 } else {
2060 /* update the parent key to reflect our changes */
2061 struct btrfs_disk_key mid_key;
2062 btrfs_node_key(mid, &mid_key, 0);
2063 tree_mod_log_set_node_key(root->fs_info, parent,
2064 pslot, 0);
2065 btrfs_set_node_key(parent, &mid_key, pslot);
2066 btrfs_mark_buffer_dirty(parent);
2067 }
2068
2069 /* update the path */
2070 if (left) {
2071 if (btrfs_header_nritems(left) > orig_slot) {
2072 extent_buffer_get(left);
2073 /* left was locked after cow */
2074 path->nodes[level] = left;
2075 path->slots[level + 1] -= 1;
2076 path->slots[level] = orig_slot;
2077 if (mid) {
2078 btrfs_tree_unlock(mid);
2079 free_extent_buffer(mid);
2080 }
2081 } else {
2082 orig_slot -= btrfs_header_nritems(left);
2083 path->slots[level] = orig_slot;
2084 }
2085 }
2086 /* double check we haven't messed things up */
2087 if (orig_ptr !=
2088 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2089 BUG();
2090 enospc:
2091 if (right) {
2092 btrfs_tree_unlock(right);
2093 free_extent_buffer(right);
2094 }
2095 if (left) {
2096 if (path->nodes[level] != left)
2097 btrfs_tree_unlock(left);
2098 free_extent_buffer(left);
2099 }
2100 return ret;
2101 }
2102
2103 /* Node balancing for insertion. Here we only split or push nodes around
2104 * when they are completely full. This is also done top down, so we
2105 * have to be pessimistic.
2106 */
2107 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2108 struct btrfs_root *root,
2109 struct btrfs_path *path, int level)
2110 {
2111 struct extent_buffer *right = NULL;
2112 struct extent_buffer *mid;
2113 struct extent_buffer *left = NULL;
2114 struct extent_buffer *parent = NULL;
2115 int ret = 0;
2116 int wret;
2117 int pslot;
2118 int orig_slot = path->slots[level];
2119
2120 if (level == 0)
2121 return 1;
2122
2123 mid = path->nodes[level];
2124 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2125
2126 if (level < BTRFS_MAX_LEVEL - 1) {
2127 parent = path->nodes[level + 1];
2128 pslot = path->slots[level + 1];
2129 }
2130
2131 if (!parent)
2132 return 1;
2133
2134 left = read_node_slot(root, parent, pslot - 1);
2135
2136 /* first, try to make some room in the middle buffer */
2137 if (left) {
2138 u32 left_nr;
2139
2140 btrfs_tree_lock(left);
2141 btrfs_set_lock_blocking(left);
2142
2143 left_nr = btrfs_header_nritems(left);
2144 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2145 wret = 1;
2146 } else {
2147 ret = btrfs_cow_block(trans, root, left, parent,
2148 pslot - 1, &left);
2149 if (ret)
2150 wret = 1;
2151 else {
2152 wret = push_node_left(trans, root,
2153 left, mid, 0);
2154 }
2155 }
2156 if (wret < 0)
2157 ret = wret;
2158 if (wret == 0) {
2159 struct btrfs_disk_key disk_key;
2160 orig_slot += left_nr;
2161 btrfs_node_key(mid, &disk_key, 0);
2162 tree_mod_log_set_node_key(root->fs_info, parent,
2163 pslot, 0);
2164 btrfs_set_node_key(parent, &disk_key, pslot);
2165 btrfs_mark_buffer_dirty(parent);
2166 if (btrfs_header_nritems(left) > orig_slot) {
2167 path->nodes[level] = left;
2168 path->slots[level + 1] -= 1;
2169 path->slots[level] = orig_slot;
2170 btrfs_tree_unlock(mid);
2171 free_extent_buffer(mid);
2172 } else {
2173 orig_slot -=
2174 btrfs_header_nritems(left);
2175 path->slots[level] = orig_slot;
2176 btrfs_tree_unlock(left);
2177 free_extent_buffer(left);
2178 }
2179 return 0;
2180 }
2181 btrfs_tree_unlock(left);
2182 free_extent_buffer(left);
2183 }
2184 right = read_node_slot(root, parent, pslot + 1);
2185
2186 /*
2187 * then try to empty the right most buffer into the middle
2188 */
2189 if (right) {
2190 u32 right_nr;
2191
2192 btrfs_tree_lock(right);
2193 btrfs_set_lock_blocking(right);
2194
2195 right_nr = btrfs_header_nritems(right);
2196 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2197 wret = 1;
2198 } else {
2199 ret = btrfs_cow_block(trans, root, right,
2200 parent, pslot + 1,
2201 &right);
2202 if (ret)
2203 wret = 1;
2204 else {
2205 wret = balance_node_right(trans, root,
2206 right, mid);
2207 }
2208 }
2209 if (wret < 0)
2210 ret = wret;
2211 if (wret == 0) {
2212 struct btrfs_disk_key disk_key;
2213
2214 btrfs_node_key(right, &disk_key, 0);
2215 tree_mod_log_set_node_key(root->fs_info, parent,
2216 pslot + 1, 0);
2217 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2218 btrfs_mark_buffer_dirty(parent);
2219
2220 if (btrfs_header_nritems(mid) <= orig_slot) {
2221 path->nodes[level] = right;
2222 path->slots[level + 1] += 1;
2223 path->slots[level] = orig_slot -
2224 btrfs_header_nritems(mid);
2225 btrfs_tree_unlock(mid);
2226 free_extent_buffer(mid);
2227 } else {
2228 btrfs_tree_unlock(right);
2229 free_extent_buffer(right);
2230 }
2231 return 0;
2232 }
2233 btrfs_tree_unlock(right);
2234 free_extent_buffer(right);
2235 }
2236 return 1;
2237 }
2238
2239 /*
2240 * readahead one full node of leaves, finding things that are close
2241 * to the block in 'slot', and triggering ra on them.
2242 */
2243 static void reada_for_search(struct btrfs_root *root,
2244 struct btrfs_path *path,
2245 int level, int slot, u64 objectid)
2246 {
2247 struct extent_buffer *node;
2248 struct btrfs_disk_key disk_key;
2249 u32 nritems;
2250 u64 search;
2251 u64 target;
2252 u64 nread = 0;
2253 u64 gen;
2254 struct extent_buffer *eb;
2255 u32 nr;
2256 u32 blocksize;
2257 u32 nscan = 0;
2258
2259 if (level != 1)
2260 return;
2261
2262 if (!path->nodes[level])
2263 return;
2264
2265 node = path->nodes[level];
2266
2267 search = btrfs_node_blockptr(node, slot);
2268 blocksize = root->nodesize;
2269 eb = btrfs_find_tree_block(root->fs_info, search);
2270 if (eb) {
2271 free_extent_buffer(eb);
2272 return;
2273 }
2274
2275 target = search;
2276
2277 nritems = btrfs_header_nritems(node);
2278 nr = slot;
2279
2280 while (1) {
2281 if (path->reada == READA_BACK) {
2282 if (nr == 0)
2283 break;
2284 nr--;
2285 } else if (path->reada == READA_FORWARD) {
2286 nr++;
2287 if (nr >= nritems)
2288 break;
2289 }
2290 if (path->reada == READA_BACK && objectid) {
2291 btrfs_node_key(node, &disk_key, nr);
2292 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2293 break;
2294 }
2295 search = btrfs_node_blockptr(node, nr);
2296 if ((search <= target && target - search <= 65536) ||
2297 (search > target && search - target <= 65536)) {
2298 gen = btrfs_node_ptr_generation(node, nr);
2299 readahead_tree_block(root, search);
2300 nread += blocksize;
2301 }
2302 nscan++;
2303 if ((nread > 65536 || nscan > 32))
2304 break;
2305 }
2306 }
2307
2308 static noinline void reada_for_balance(struct btrfs_root *root,
2309 struct btrfs_path *path, int level)
2310 {
2311 int slot;
2312 int nritems;
2313 struct extent_buffer *parent;
2314 struct extent_buffer *eb;
2315 u64 gen;
2316 u64 block1 = 0;
2317 u64 block2 = 0;
2318
2319 parent = path->nodes[level + 1];
2320 if (!parent)
2321 return;
2322
2323 nritems = btrfs_header_nritems(parent);
2324 slot = path->slots[level + 1];
2325
2326 if (slot > 0) {
2327 block1 = btrfs_node_blockptr(parent, slot - 1);
2328 gen = btrfs_node_ptr_generation(parent, slot - 1);
2329 eb = btrfs_find_tree_block(root->fs_info, block1);
2330 /*
2331 * if we get -eagain from btrfs_buffer_uptodate, we
2332 * don't want to return eagain here. That will loop
2333 * forever
2334 */
2335 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2336 block1 = 0;
2337 free_extent_buffer(eb);
2338 }
2339 if (slot + 1 < nritems) {
2340 block2 = btrfs_node_blockptr(parent, slot + 1);
2341 gen = btrfs_node_ptr_generation(parent, slot + 1);
2342 eb = btrfs_find_tree_block(root->fs_info, block2);
2343 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2344 block2 = 0;
2345 free_extent_buffer(eb);
2346 }
2347
2348 if (block1)
2349 readahead_tree_block(root, block1);
2350 if (block2)
2351 readahead_tree_block(root, block2);
2352 }
2353
2354
2355 /*
2356 * when we walk down the tree, it is usually safe to unlock the higher layers
2357 * in the tree. The exceptions are when our path goes through slot 0, because
2358 * operations on the tree might require changing key pointers higher up in the
2359 * tree.
2360 *
2361 * callers might also have set path->keep_locks, which tells this code to keep
2362 * the lock if the path points to the last slot in the block. This is part of
2363 * walking through the tree, and selecting the next slot in the higher block.
2364 *
2365 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2366 * if lowest_unlock is 1, level 0 won't be unlocked
2367 */
2368 static noinline void unlock_up(struct btrfs_path *path, int level,
2369 int lowest_unlock, int min_write_lock_level,
2370 int *write_lock_level)
2371 {
2372 int i;
2373 int skip_level = level;
2374 int no_skips = 0;
2375 struct extent_buffer *t;
2376
2377 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2378 if (!path->nodes[i])
2379 break;
2380 if (!path->locks[i])
2381 break;
2382 if (!no_skips && path->slots[i] == 0) {
2383 skip_level = i + 1;
2384 continue;
2385 }
2386 if (!no_skips && path->keep_locks) {
2387 u32 nritems;
2388 t = path->nodes[i];
2389 nritems = btrfs_header_nritems(t);
2390 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2391 skip_level = i + 1;
2392 continue;
2393 }
2394 }
2395 if (skip_level < i && i >= lowest_unlock)
2396 no_skips = 1;
2397
2398 t = path->nodes[i];
2399 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2400 btrfs_tree_unlock_rw(t, path->locks[i]);
2401 path->locks[i] = 0;
2402 if (write_lock_level &&
2403 i > min_write_lock_level &&
2404 i <= *write_lock_level) {
2405 *write_lock_level = i - 1;
2406 }
2407 }
2408 }
2409 }
2410
2411 /*
2412 * This releases any locks held in the path starting at level and
2413 * going all the way up to the root.
2414 *
2415 * btrfs_search_slot will keep the lock held on higher nodes in a few
2416 * corner cases, such as COW of the block at slot zero in the node. This
2417 * ignores those rules, and it should only be called when there are no
2418 * more updates to be done higher up in the tree.
2419 */
2420 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2421 {
2422 int i;
2423
2424 if (path->keep_locks)
2425 return;
2426
2427 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2428 if (!path->nodes[i])
2429 continue;
2430 if (!path->locks[i])
2431 continue;
2432 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2433 path->locks[i] = 0;
2434 }
2435 }
2436
2437 /*
2438 * helper function for btrfs_search_slot. The goal is to find a block
2439 * in cache without setting the path to blocking. If we find the block
2440 * we return zero and the path is unchanged.
2441 *
2442 * If we can't find the block, we set the path blocking and do some
2443 * reada. -EAGAIN is returned and the search must be repeated.
2444 */
2445 static int
2446 read_block_for_search(struct btrfs_trans_handle *trans,
2447 struct btrfs_root *root, struct btrfs_path *p,
2448 struct extent_buffer **eb_ret, int level, int slot,
2449 struct btrfs_key *key, u64 time_seq)
2450 {
2451 u64 blocknr;
2452 u64 gen;
2453 struct extent_buffer *b = *eb_ret;
2454 struct extent_buffer *tmp;
2455 int ret;
2456
2457 blocknr = btrfs_node_blockptr(b, slot);
2458 gen = btrfs_node_ptr_generation(b, slot);
2459
2460 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2461 if (tmp) {
2462 /* first we do an atomic uptodate check */
2463 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2464 *eb_ret = tmp;
2465 return 0;
2466 }
2467
2468 /* the pages were up to date, but we failed
2469 * the generation number check. Do a full
2470 * read for the generation number that is correct.
2471 * We must do this without dropping locks so
2472 * we can trust our generation number
2473 */
2474 btrfs_set_path_blocking(p);
2475
2476 /* now we're allowed to do a blocking uptodate check */
2477 ret = btrfs_read_buffer(tmp, gen);
2478 if (!ret) {
2479 *eb_ret = tmp;
2480 return 0;
2481 }
2482 free_extent_buffer(tmp);
2483 btrfs_release_path(p);
2484 return -EIO;
2485 }
2486
2487 /*
2488 * reduce lock contention at high levels
2489 * of the btree by dropping locks before
2490 * we read. Don't release the lock on the current
2491 * level because we need to walk this node to figure
2492 * out which blocks to read.
2493 */
2494 btrfs_unlock_up_safe(p, level + 1);
2495 btrfs_set_path_blocking(p);
2496
2497 free_extent_buffer(tmp);
2498 if (p->reada != READA_NONE)
2499 reada_for_search(root, p, level, slot, key->objectid);
2500
2501 btrfs_release_path(p);
2502
2503 ret = -EAGAIN;
2504 tmp = read_tree_block(root, blocknr, 0);
2505 if (!IS_ERR(tmp)) {
2506 /*
2507 * If the read above didn't mark this buffer up to date,
2508 * it will never end up being up to date. Set ret to EIO now
2509 * and give up so that our caller doesn't loop forever
2510 * on our EAGAINs.
2511 */
2512 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2513 ret = -EIO;
2514 free_extent_buffer(tmp);
2515 }
2516 return ret;
2517 }
2518
2519 /*
2520 * helper function for btrfs_search_slot. This does all of the checks
2521 * for node-level blocks and does any balancing required based on
2522 * the ins_len.
2523 *
2524 * If no extra work was required, zero is returned. If we had to
2525 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2526 * start over
2527 */
2528 static int
2529 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2530 struct btrfs_root *root, struct btrfs_path *p,
2531 struct extent_buffer *b, int level, int ins_len,
2532 int *write_lock_level)
2533 {
2534 int ret;
2535 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2536 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2537 int sret;
2538
2539 if (*write_lock_level < level + 1) {
2540 *write_lock_level = level + 1;
2541 btrfs_release_path(p);
2542 goto again;
2543 }
2544
2545 btrfs_set_path_blocking(p);
2546 reada_for_balance(root, p, level);
2547 sret = split_node(trans, root, p, level);
2548 btrfs_clear_path_blocking(p, NULL, 0);
2549
2550 BUG_ON(sret > 0);
2551 if (sret) {
2552 ret = sret;
2553 goto done;
2554 }
2555 b = p->nodes[level];
2556 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2557 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2558 int sret;
2559
2560 if (*write_lock_level < level + 1) {
2561 *write_lock_level = level + 1;
2562 btrfs_release_path(p);
2563 goto again;
2564 }
2565
2566 btrfs_set_path_blocking(p);
2567 reada_for_balance(root, p, level);
2568 sret = balance_level(trans, root, p, level);
2569 btrfs_clear_path_blocking(p, NULL, 0);
2570
2571 if (sret) {
2572 ret = sret;
2573 goto done;
2574 }
2575 b = p->nodes[level];
2576 if (!b) {
2577 btrfs_release_path(p);
2578 goto again;
2579 }
2580 BUG_ON(btrfs_header_nritems(b) == 1);
2581 }
2582 return 0;
2583
2584 again:
2585 ret = -EAGAIN;
2586 done:
2587 return ret;
2588 }
2589
2590 static void key_search_validate(struct extent_buffer *b,
2591 struct btrfs_key *key,
2592 int level)
2593 {
2594 #ifdef CONFIG_BTRFS_ASSERT
2595 struct btrfs_disk_key disk_key;
2596
2597 btrfs_cpu_key_to_disk(&disk_key, key);
2598
2599 if (level == 0)
2600 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2601 offsetof(struct btrfs_leaf, items[0].key),
2602 sizeof(disk_key)));
2603 else
2604 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2605 offsetof(struct btrfs_node, ptrs[0].key),
2606 sizeof(disk_key)));
2607 #endif
2608 }
2609
2610 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2611 int level, int *prev_cmp, int *slot)
2612 {
2613 if (*prev_cmp != 0) {
2614 *prev_cmp = bin_search(b, key, level, slot);
2615 return *prev_cmp;
2616 }
2617
2618 key_search_validate(b, key, level);
2619 *slot = 0;
2620
2621 return 0;
2622 }
2623
2624 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2625 u64 iobjectid, u64 ioff, u8 key_type,
2626 struct btrfs_key *found_key)
2627 {
2628 int ret;
2629 struct btrfs_key key;
2630 struct extent_buffer *eb;
2631
2632 ASSERT(path);
2633 ASSERT(found_key);
2634
2635 key.type = key_type;
2636 key.objectid = iobjectid;
2637 key.offset = ioff;
2638
2639 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2640 if (ret < 0)
2641 return ret;
2642
2643 eb = path->nodes[0];
2644 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2645 ret = btrfs_next_leaf(fs_root, path);
2646 if (ret)
2647 return ret;
2648 eb = path->nodes[0];
2649 }
2650
2651 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2652 if (found_key->type != key.type ||
2653 found_key->objectid != key.objectid)
2654 return 1;
2655
2656 return 0;
2657 }
2658
2659 /*
2660 * look for key in the tree. path is filled in with nodes along the way
2661 * if key is found, we return zero and you can find the item in the leaf
2662 * level of the path (level 0)
2663 *
2664 * If the key isn't found, the path points to the slot where it should
2665 * be inserted, and 1 is returned. If there are other errors during the
2666 * search a negative error number is returned.
2667 *
2668 * if ins_len > 0, nodes and leaves will be split as we walk down the
2669 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2670 * possible)
2671 */
2672 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2673 *root, struct btrfs_key *key, struct btrfs_path *p, int
2674 ins_len, int cow)
2675 {
2676 struct extent_buffer *b;
2677 int slot;
2678 int ret;
2679 int err;
2680 int level;
2681 int lowest_unlock = 1;
2682 int root_lock;
2683 /* everything at write_lock_level or lower must be write locked */
2684 int write_lock_level = 0;
2685 u8 lowest_level = 0;
2686 int min_write_lock_level;
2687 int prev_cmp;
2688
2689 lowest_level = p->lowest_level;
2690 WARN_ON(lowest_level && ins_len > 0);
2691 WARN_ON(p->nodes[0] != NULL);
2692 BUG_ON(!cow && ins_len);
2693
2694 if (ins_len < 0) {
2695 lowest_unlock = 2;
2696
2697 /* when we are removing items, we might have to go up to level
2698 * two as we update tree pointers Make sure we keep write
2699 * for those levels as well
2700 */
2701 write_lock_level = 2;
2702 } else if (ins_len > 0) {
2703 /*
2704 * for inserting items, make sure we have a write lock on
2705 * level 1 so we can update keys
2706 */
2707 write_lock_level = 1;
2708 }
2709
2710 if (!cow)
2711 write_lock_level = -1;
2712
2713 if (cow && (p->keep_locks || p->lowest_level))
2714 write_lock_level = BTRFS_MAX_LEVEL;
2715
2716 min_write_lock_level = write_lock_level;
2717
2718 again:
2719 prev_cmp = -1;
2720 /*
2721 * we try very hard to do read locks on the root
2722 */
2723 root_lock = BTRFS_READ_LOCK;
2724 level = 0;
2725 if (p->search_commit_root) {
2726 /*
2727 * the commit roots are read only
2728 * so we always do read locks
2729 */
2730 if (p->need_commit_sem)
2731 down_read(&root->fs_info->commit_root_sem);
2732 b = root->commit_root;
2733 extent_buffer_get(b);
2734 level = btrfs_header_level(b);
2735 if (p->need_commit_sem)
2736 up_read(&root->fs_info->commit_root_sem);
2737 if (!p->skip_locking)
2738 btrfs_tree_read_lock(b);
2739 } else {
2740 if (p->skip_locking) {
2741 b = btrfs_root_node(root);
2742 level = btrfs_header_level(b);
2743 } else {
2744 /* we don't know the level of the root node
2745 * until we actually have it read locked
2746 */
2747 b = btrfs_read_lock_root_node(root);
2748 level = btrfs_header_level(b);
2749 if (level <= write_lock_level) {
2750 /* whoops, must trade for write lock */
2751 btrfs_tree_read_unlock(b);
2752 free_extent_buffer(b);
2753 b = btrfs_lock_root_node(root);
2754 root_lock = BTRFS_WRITE_LOCK;
2755
2756 /* the level might have changed, check again */
2757 level = btrfs_header_level(b);
2758 }
2759 }
2760 }
2761 p->nodes[level] = b;
2762 if (!p->skip_locking)
2763 p->locks[level] = root_lock;
2764
2765 while (b) {
2766 level = btrfs_header_level(b);
2767
2768 /*
2769 * setup the path here so we can release it under lock
2770 * contention with the cow code
2771 */
2772 if (cow) {
2773 /*
2774 * if we don't really need to cow this block
2775 * then we don't want to set the path blocking,
2776 * so we test it here
2777 */
2778 if (!should_cow_block(trans, root, b))
2779 goto cow_done;
2780
2781 /*
2782 * must have write locks on this node and the
2783 * parent
2784 */
2785 if (level > write_lock_level ||
2786 (level + 1 > write_lock_level &&
2787 level + 1 < BTRFS_MAX_LEVEL &&
2788 p->nodes[level + 1])) {
2789 write_lock_level = level + 1;
2790 btrfs_release_path(p);
2791 goto again;
2792 }
2793
2794 btrfs_set_path_blocking(p);
2795 err = btrfs_cow_block(trans, root, b,
2796 p->nodes[level + 1],
2797 p->slots[level + 1], &b);
2798 if (err) {
2799 ret = err;
2800 goto done;
2801 }
2802 }
2803 cow_done:
2804 p->nodes[level] = b;
2805 btrfs_clear_path_blocking(p, NULL, 0);
2806
2807 /*
2808 * we have a lock on b and as long as we aren't changing
2809 * the tree, there is no way to for the items in b to change.
2810 * It is safe to drop the lock on our parent before we
2811 * go through the expensive btree search on b.
2812 *
2813 * If we're inserting or deleting (ins_len != 0), then we might
2814 * be changing slot zero, which may require changing the parent.
2815 * So, we can't drop the lock until after we know which slot
2816 * we're operating on.
2817 */
2818 if (!ins_len && !p->keep_locks) {
2819 int u = level + 1;
2820
2821 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2822 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2823 p->locks[u] = 0;
2824 }
2825 }
2826
2827 ret = key_search(b, key, level, &prev_cmp, &slot);
2828
2829 if (level != 0) {
2830 int dec = 0;
2831 if (ret && slot > 0) {
2832 dec = 1;
2833 slot -= 1;
2834 }
2835 p->slots[level] = slot;
2836 err = setup_nodes_for_search(trans, root, p, b, level,
2837 ins_len, &write_lock_level);
2838 if (err == -EAGAIN)
2839 goto again;
2840 if (err) {
2841 ret = err;
2842 goto done;
2843 }
2844 b = p->nodes[level];
2845 slot = p->slots[level];
2846
2847 /*
2848 * slot 0 is special, if we change the key
2849 * we have to update the parent pointer
2850 * which means we must have a write lock
2851 * on the parent
2852 */
2853 if (slot == 0 && ins_len &&
2854 write_lock_level < level + 1) {
2855 write_lock_level = level + 1;
2856 btrfs_release_path(p);
2857 goto again;
2858 }
2859
2860 unlock_up(p, level, lowest_unlock,
2861 min_write_lock_level, &write_lock_level);
2862
2863 if (level == lowest_level) {
2864 if (dec)
2865 p->slots[level]++;
2866 goto done;
2867 }
2868
2869 err = read_block_for_search(trans, root, p,
2870 &b, level, slot, key, 0);
2871 if (err == -EAGAIN)
2872 goto again;
2873 if (err) {
2874 ret = err;
2875 goto done;
2876 }
2877
2878 if (!p->skip_locking) {
2879 level = btrfs_header_level(b);
2880 if (level <= write_lock_level) {
2881 err = btrfs_try_tree_write_lock(b);
2882 if (!err) {
2883 btrfs_set_path_blocking(p);
2884 btrfs_tree_lock(b);
2885 btrfs_clear_path_blocking(p, b,
2886 BTRFS_WRITE_LOCK);
2887 }
2888 p->locks[level] = BTRFS_WRITE_LOCK;
2889 } else {
2890 err = btrfs_tree_read_lock_atomic(b);
2891 if (!err) {
2892 btrfs_set_path_blocking(p);
2893 btrfs_tree_read_lock(b);
2894 btrfs_clear_path_blocking(p, b,
2895 BTRFS_READ_LOCK);
2896 }
2897 p->locks[level] = BTRFS_READ_LOCK;
2898 }
2899 p->nodes[level] = b;
2900 }
2901 } else {
2902 p->slots[level] = slot;
2903 if (ins_len > 0 &&
2904 btrfs_leaf_free_space(root, b) < ins_len) {
2905 if (write_lock_level < 1) {
2906 write_lock_level = 1;
2907 btrfs_release_path(p);
2908 goto again;
2909 }
2910
2911 btrfs_set_path_blocking(p);
2912 err = split_leaf(trans, root, key,
2913 p, ins_len, ret == 0);
2914 btrfs_clear_path_blocking(p, NULL, 0);
2915
2916 BUG_ON(err > 0);
2917 if (err) {
2918 ret = err;
2919 goto done;
2920 }
2921 }
2922 if (!p->search_for_split)
2923 unlock_up(p, level, lowest_unlock,
2924 min_write_lock_level, &write_lock_level);
2925 goto done;
2926 }
2927 }
2928 ret = 1;
2929 done:
2930 /*
2931 * we don't really know what they plan on doing with the path
2932 * from here on, so for now just mark it as blocking
2933 */
2934 if (!p->leave_spinning)
2935 btrfs_set_path_blocking(p);
2936 if (ret < 0 && !p->skip_release_on_error)
2937 btrfs_release_path(p);
2938 return ret;
2939 }
2940
2941 /*
2942 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2943 * current state of the tree together with the operations recorded in the tree
2944 * modification log to search for the key in a previous version of this tree, as
2945 * denoted by the time_seq parameter.
2946 *
2947 * Naturally, there is no support for insert, delete or cow operations.
2948 *
2949 * The resulting path and return value will be set up as if we called
2950 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2951 */
2952 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2953 struct btrfs_path *p, u64 time_seq)
2954 {
2955 struct extent_buffer *b;
2956 int slot;
2957 int ret;
2958 int err;
2959 int level;
2960 int lowest_unlock = 1;
2961 u8 lowest_level = 0;
2962 int prev_cmp = -1;
2963
2964 lowest_level = p->lowest_level;
2965 WARN_ON(p->nodes[0] != NULL);
2966
2967 if (p->search_commit_root) {
2968 BUG_ON(time_seq);
2969 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2970 }
2971
2972 again:
2973 b = get_old_root(root, time_seq);
2974 level = btrfs_header_level(b);
2975 p->locks[level] = BTRFS_READ_LOCK;
2976
2977 while (b) {
2978 level = btrfs_header_level(b);
2979 p->nodes[level] = b;
2980 btrfs_clear_path_blocking(p, NULL, 0);
2981
2982 /*
2983 * we have a lock on b and as long as we aren't changing
2984 * the tree, there is no way to for the items in b to change.
2985 * It is safe to drop the lock on our parent before we
2986 * go through the expensive btree search on b.
2987 */
2988 btrfs_unlock_up_safe(p, level + 1);
2989
2990 /*
2991 * Since we can unwind ebs we want to do a real search every
2992 * time.
2993 */
2994 prev_cmp = -1;
2995 ret = key_search(b, key, level, &prev_cmp, &slot);
2996
2997 if (level != 0) {
2998 int dec = 0;
2999 if (ret && slot > 0) {
3000 dec = 1;
3001 slot -= 1;
3002 }
3003 p->slots[level] = slot;
3004 unlock_up(p, level, lowest_unlock, 0, NULL);
3005
3006 if (level == lowest_level) {
3007 if (dec)
3008 p->slots[level]++;
3009 goto done;
3010 }
3011
3012 err = read_block_for_search(NULL, root, p, &b, level,
3013 slot, key, time_seq);
3014 if (err == -EAGAIN)
3015 goto again;
3016 if (err) {
3017 ret = err;
3018 goto done;
3019 }
3020
3021 level = btrfs_header_level(b);
3022 err = btrfs_tree_read_lock_atomic(b);
3023 if (!err) {
3024 btrfs_set_path_blocking(p);
3025 btrfs_tree_read_lock(b);
3026 btrfs_clear_path_blocking(p, b,
3027 BTRFS_READ_LOCK);
3028 }
3029 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3030 if (!b) {
3031 ret = -ENOMEM;
3032 goto done;
3033 }
3034 p->locks[level] = BTRFS_READ_LOCK;
3035 p->nodes[level] = b;
3036 } else {
3037 p->slots[level] = slot;
3038 unlock_up(p, level, lowest_unlock, 0, NULL);
3039 goto done;
3040 }
3041 }
3042 ret = 1;
3043 done:
3044 if (!p->leave_spinning)
3045 btrfs_set_path_blocking(p);
3046 if (ret < 0)
3047 btrfs_release_path(p);
3048
3049 return ret;
3050 }
3051
3052 /*
3053 * helper to use instead of search slot if no exact match is needed but
3054 * instead the next or previous item should be returned.
3055 * When find_higher is true, the next higher item is returned, the next lower
3056 * otherwise.
3057 * When return_any and find_higher are both true, and no higher item is found,
3058 * return the next lower instead.
3059 * When return_any is true and find_higher is false, and no lower item is found,
3060 * return the next higher instead.
3061 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3062 * < 0 on error
3063 */
3064 int btrfs_search_slot_for_read(struct btrfs_root *root,
3065 struct btrfs_key *key, struct btrfs_path *p,
3066 int find_higher, int return_any)
3067 {
3068 int ret;
3069 struct extent_buffer *leaf;
3070
3071 again:
3072 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3073 if (ret <= 0)
3074 return ret;
3075 /*
3076 * a return value of 1 means the path is at the position where the
3077 * item should be inserted. Normally this is the next bigger item,
3078 * but in case the previous item is the last in a leaf, path points
3079 * to the first free slot in the previous leaf, i.e. at an invalid
3080 * item.
3081 */
3082 leaf = p->nodes[0];
3083
3084 if (find_higher) {
3085 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3086 ret = btrfs_next_leaf(root, p);
3087 if (ret <= 0)
3088 return ret;
3089 if (!return_any)
3090 return 1;
3091 /*
3092 * no higher item found, return the next
3093 * lower instead
3094 */
3095 return_any = 0;
3096 find_higher = 0;
3097 btrfs_release_path(p);
3098 goto again;
3099 }
3100 } else {
3101 if (p->slots[0] == 0) {
3102 ret = btrfs_prev_leaf(root, p);
3103 if (ret < 0)
3104 return ret;
3105 if (!ret) {
3106 leaf = p->nodes[0];
3107 if (p->slots[0] == btrfs_header_nritems(leaf))
3108 p->slots[0]--;
3109 return 0;
3110 }
3111 if (!return_any)
3112 return 1;
3113 /*
3114 * no lower item found, return the next
3115 * higher instead
3116 */
3117 return_any = 0;
3118 find_higher = 1;
3119 btrfs_release_path(p);
3120 goto again;
3121 } else {
3122 --p->slots[0];
3123 }
3124 }
3125 return 0;
3126 }
3127
3128 /*
3129 * adjust the pointers going up the tree, starting at level
3130 * making sure the right key of each node is points to 'key'.
3131 * This is used after shifting pointers to the left, so it stops
3132 * fixing up pointers when a given leaf/node is not in slot 0 of the
3133 * higher levels
3134 *
3135 */
3136 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3137 struct btrfs_path *path,
3138 struct btrfs_disk_key *key, int level)
3139 {
3140 int i;
3141 struct extent_buffer *t;
3142
3143 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3144 int tslot = path->slots[i];
3145 if (!path->nodes[i])
3146 break;
3147 t = path->nodes[i];
3148 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3149 btrfs_set_node_key(t, key, tslot);
3150 btrfs_mark_buffer_dirty(path->nodes[i]);
3151 if (tslot != 0)
3152 break;
3153 }
3154 }
3155
3156 /*
3157 * update item key.
3158 *
3159 * This function isn't completely safe. It's the caller's responsibility
3160 * that the new key won't break the order
3161 */
3162 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3163 struct btrfs_path *path,
3164 struct btrfs_key *new_key)
3165 {
3166 struct btrfs_disk_key disk_key;
3167 struct extent_buffer *eb;
3168 int slot;
3169
3170 eb = path->nodes[0];
3171 slot = path->slots[0];
3172 if (slot > 0) {
3173 btrfs_item_key(eb, &disk_key, slot - 1);
3174 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3175 }
3176 if (slot < btrfs_header_nritems(eb) - 1) {
3177 btrfs_item_key(eb, &disk_key, slot + 1);
3178 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3179 }
3180
3181 btrfs_cpu_key_to_disk(&disk_key, new_key);
3182 btrfs_set_item_key(eb, &disk_key, slot);
3183 btrfs_mark_buffer_dirty(eb);
3184 if (slot == 0)
3185 fixup_low_keys(fs_info, path, &disk_key, 1);
3186 }
3187
3188 /*
3189 * try to push data from one node into the next node left in the
3190 * tree.
3191 *
3192 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3193 * error, and > 0 if there was no room in the left hand block.
3194 */
3195 static int push_node_left(struct btrfs_trans_handle *trans,
3196 struct btrfs_root *root, struct extent_buffer *dst,
3197 struct extent_buffer *src, int empty)
3198 {
3199 int push_items = 0;
3200 int src_nritems;
3201 int dst_nritems;
3202 int ret = 0;
3203
3204 src_nritems = btrfs_header_nritems(src);
3205 dst_nritems = btrfs_header_nritems(dst);
3206 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3207 WARN_ON(btrfs_header_generation(src) != trans->transid);
3208 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3209
3210 if (!empty && src_nritems <= 8)
3211 return 1;
3212
3213 if (push_items <= 0)
3214 return 1;
3215
3216 if (empty) {
3217 push_items = min(src_nritems, push_items);
3218 if (push_items < src_nritems) {
3219 /* leave at least 8 pointers in the node if
3220 * we aren't going to empty it
3221 */
3222 if (src_nritems - push_items < 8) {
3223 if (push_items <= 8)
3224 return 1;
3225 push_items -= 8;
3226 }
3227 }
3228 } else
3229 push_items = min(src_nritems - 8, push_items);
3230
3231 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3232 push_items);
3233 if (ret) {
3234 btrfs_abort_transaction(trans, root, ret);
3235 return ret;
3236 }
3237 copy_extent_buffer(dst, src,
3238 btrfs_node_key_ptr_offset(dst_nritems),
3239 btrfs_node_key_ptr_offset(0),
3240 push_items * sizeof(struct btrfs_key_ptr));
3241
3242 if (push_items < src_nritems) {
3243 /*
3244 * don't call tree_mod_log_eb_move here, key removal was already
3245 * fully logged by tree_mod_log_eb_copy above.
3246 */
3247 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3248 btrfs_node_key_ptr_offset(push_items),
3249 (src_nritems - push_items) *
3250 sizeof(struct btrfs_key_ptr));
3251 }
3252 btrfs_set_header_nritems(src, src_nritems - push_items);
3253 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3254 btrfs_mark_buffer_dirty(src);
3255 btrfs_mark_buffer_dirty(dst);
3256
3257 return ret;
3258 }
3259
3260 /*
3261 * try to push data from one node into the next node right in the
3262 * tree.
3263 *
3264 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3265 * error, and > 0 if there was no room in the right hand block.
3266 *
3267 * this will only push up to 1/2 the contents of the left node over
3268 */
3269 static int balance_node_right(struct btrfs_trans_handle *trans,
3270 struct btrfs_root *root,
3271 struct extent_buffer *dst,
3272 struct extent_buffer *src)
3273 {
3274 int push_items = 0;
3275 int max_push;
3276 int src_nritems;
3277 int dst_nritems;
3278 int ret = 0;
3279
3280 WARN_ON(btrfs_header_generation(src) != trans->transid);
3281 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3282
3283 src_nritems = btrfs_header_nritems(src);
3284 dst_nritems = btrfs_header_nritems(dst);
3285 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3286 if (push_items <= 0)
3287 return 1;
3288
3289 if (src_nritems < 4)
3290 return 1;
3291
3292 max_push = src_nritems / 2 + 1;
3293 /* don't try to empty the node */
3294 if (max_push >= src_nritems)
3295 return 1;
3296
3297 if (max_push < push_items)
3298 push_items = max_push;
3299
3300 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3301 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3302 btrfs_node_key_ptr_offset(0),
3303 (dst_nritems) *
3304 sizeof(struct btrfs_key_ptr));
3305
3306 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3307 src_nritems - push_items, push_items);
3308 if (ret) {
3309 btrfs_abort_transaction(trans, root, ret);
3310 return ret;
3311 }
3312 copy_extent_buffer(dst, src,
3313 btrfs_node_key_ptr_offset(0),
3314 btrfs_node_key_ptr_offset(src_nritems - push_items),
3315 push_items * sizeof(struct btrfs_key_ptr));
3316
3317 btrfs_set_header_nritems(src, src_nritems - push_items);
3318 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3319
3320 btrfs_mark_buffer_dirty(src);
3321 btrfs_mark_buffer_dirty(dst);
3322
3323 return ret;
3324 }
3325
3326 /*
3327 * helper function to insert a new root level in the tree.
3328 * A new node is allocated, and a single item is inserted to
3329 * point to the existing root
3330 *
3331 * returns zero on success or < 0 on failure.
3332 */
3333 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3334 struct btrfs_root *root,
3335 struct btrfs_path *path, int level)
3336 {
3337 u64 lower_gen;
3338 struct extent_buffer *lower;
3339 struct extent_buffer *c;
3340 struct extent_buffer *old;
3341 struct btrfs_disk_key lower_key;
3342
3343 BUG_ON(path->nodes[level]);
3344 BUG_ON(path->nodes[level-1] != root->node);
3345
3346 lower = path->nodes[level-1];
3347 if (level == 1)
3348 btrfs_item_key(lower, &lower_key, 0);
3349 else
3350 btrfs_node_key(lower, &lower_key, 0);
3351
3352 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3353 &lower_key, level, root->node->start, 0);
3354 if (IS_ERR(c))
3355 return PTR_ERR(c);
3356
3357 root_add_used(root, root->nodesize);
3358
3359 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3360 btrfs_set_header_nritems(c, 1);
3361 btrfs_set_header_level(c, level);
3362 btrfs_set_header_bytenr(c, c->start);
3363 btrfs_set_header_generation(c, trans->transid);
3364 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3365 btrfs_set_header_owner(c, root->root_key.objectid);
3366
3367 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3368 BTRFS_FSID_SIZE);
3369
3370 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3371 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3372
3373 btrfs_set_node_key(c, &lower_key, 0);
3374 btrfs_set_node_blockptr(c, 0, lower->start);
3375 lower_gen = btrfs_header_generation(lower);
3376 WARN_ON(lower_gen != trans->transid);
3377
3378 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3379
3380 btrfs_mark_buffer_dirty(c);
3381
3382 old = root->node;
3383 tree_mod_log_set_root_pointer(root, c, 0);
3384 rcu_assign_pointer(root->node, c);
3385
3386 /* the super has an extra ref to root->node */
3387 free_extent_buffer(old);
3388
3389 add_root_to_dirty_list(root);
3390 extent_buffer_get(c);
3391 path->nodes[level] = c;
3392 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3393 path->slots[level] = 0;
3394 return 0;
3395 }
3396
3397 /*
3398 * worker function to insert a single pointer in a node.
3399 * the node should have enough room for the pointer already
3400 *
3401 * slot and level indicate where you want the key to go, and
3402 * blocknr is the block the key points to.
3403 */
3404 static void insert_ptr(struct btrfs_trans_handle *trans,
3405 struct btrfs_root *root, struct btrfs_path *path,
3406 struct btrfs_disk_key *key, u64 bytenr,
3407 int slot, int level)
3408 {
3409 struct extent_buffer *lower;
3410 int nritems;
3411 int ret;
3412
3413 BUG_ON(!path->nodes[level]);
3414 btrfs_assert_tree_locked(path->nodes[level]);
3415 lower = path->nodes[level];
3416 nritems = btrfs_header_nritems(lower);
3417 BUG_ON(slot > nritems);
3418 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3419 if (slot != nritems) {
3420 if (level)
3421 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3422 slot, nritems - slot);
3423 memmove_extent_buffer(lower,
3424 btrfs_node_key_ptr_offset(slot + 1),
3425 btrfs_node_key_ptr_offset(slot),
3426 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3427 }
3428 if (level) {
3429 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3430 MOD_LOG_KEY_ADD, GFP_NOFS);
3431 BUG_ON(ret < 0);
3432 }
3433 btrfs_set_node_key(lower, key, slot);
3434 btrfs_set_node_blockptr(lower, slot, bytenr);
3435 WARN_ON(trans->transid == 0);
3436 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3437 btrfs_set_header_nritems(lower, nritems + 1);
3438 btrfs_mark_buffer_dirty(lower);
3439 }
3440
3441 /*
3442 * split the node at the specified level in path in two.
3443 * The path is corrected to point to the appropriate node after the split
3444 *
3445 * Before splitting this tries to make some room in the node by pushing
3446 * left and right, if either one works, it returns right away.
3447 *
3448 * returns 0 on success and < 0 on failure
3449 */
3450 static noinline int split_node(struct btrfs_trans_handle *trans,
3451 struct btrfs_root *root,
3452 struct btrfs_path *path, int level)
3453 {
3454 struct extent_buffer *c;
3455 struct extent_buffer *split;
3456 struct btrfs_disk_key disk_key;
3457 int mid;
3458 int ret;
3459 u32 c_nritems;
3460
3461 c = path->nodes[level];
3462 WARN_ON(btrfs_header_generation(c) != trans->transid);
3463 if (c == root->node) {
3464 /*
3465 * trying to split the root, lets make a new one
3466 *
3467 * tree mod log: We don't log_removal old root in
3468 * insert_new_root, because that root buffer will be kept as a
3469 * normal node. We are going to log removal of half of the
3470 * elements below with tree_mod_log_eb_copy. We're holding a
3471 * tree lock on the buffer, which is why we cannot race with
3472 * other tree_mod_log users.
3473 */
3474 ret = insert_new_root(trans, root, path, level + 1);
3475 if (ret)
3476 return ret;
3477 } else {
3478 ret = push_nodes_for_insert(trans, root, path, level);
3479 c = path->nodes[level];
3480 if (!ret && btrfs_header_nritems(c) <
3481 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3482 return 0;
3483 if (ret < 0)
3484 return ret;
3485 }
3486
3487 c_nritems = btrfs_header_nritems(c);
3488 mid = (c_nritems + 1) / 2;
3489 btrfs_node_key(c, &disk_key, mid);
3490
3491 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3492 &disk_key, level, c->start, 0);
3493 if (IS_ERR(split))
3494 return PTR_ERR(split);
3495
3496 root_add_used(root, root->nodesize);
3497
3498 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3499 btrfs_set_header_level(split, btrfs_header_level(c));
3500 btrfs_set_header_bytenr(split, split->start);
3501 btrfs_set_header_generation(split, trans->transid);
3502 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3503 btrfs_set_header_owner(split, root->root_key.objectid);
3504 write_extent_buffer(split, root->fs_info->fsid,
3505 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3506 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3507 btrfs_header_chunk_tree_uuid(split),
3508 BTRFS_UUID_SIZE);
3509
3510 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3511 mid, c_nritems - mid);
3512 if (ret) {
3513 btrfs_abort_transaction(trans, root, ret);
3514 return ret;
3515 }
3516 copy_extent_buffer(split, c,
3517 btrfs_node_key_ptr_offset(0),
3518 btrfs_node_key_ptr_offset(mid),
3519 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3520 btrfs_set_header_nritems(split, c_nritems - mid);
3521 btrfs_set_header_nritems(c, mid);
3522 ret = 0;
3523
3524 btrfs_mark_buffer_dirty(c);
3525 btrfs_mark_buffer_dirty(split);
3526
3527 insert_ptr(trans, root, path, &disk_key, split->start,
3528 path->slots[level + 1] + 1, level + 1);
3529
3530 if (path->slots[level] >= mid) {
3531 path->slots[level] -= mid;
3532 btrfs_tree_unlock(c);
3533 free_extent_buffer(c);
3534 path->nodes[level] = split;
3535 path->slots[level + 1] += 1;
3536 } else {
3537 btrfs_tree_unlock(split);
3538 free_extent_buffer(split);
3539 }
3540 return ret;
3541 }
3542
3543 /*
3544 * how many bytes are required to store the items in a leaf. start
3545 * and nr indicate which items in the leaf to check. This totals up the
3546 * space used both by the item structs and the item data
3547 */
3548 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3549 {
3550 struct btrfs_item *start_item;
3551 struct btrfs_item *end_item;
3552 struct btrfs_map_token token;
3553 int data_len;
3554 int nritems = btrfs_header_nritems(l);
3555 int end = min(nritems, start + nr) - 1;
3556
3557 if (!nr)
3558 return 0;
3559 btrfs_init_map_token(&token);
3560 start_item = btrfs_item_nr(start);
3561 end_item = btrfs_item_nr(end);
3562 data_len = btrfs_token_item_offset(l, start_item, &token) +
3563 btrfs_token_item_size(l, start_item, &token);
3564 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3565 data_len += sizeof(struct btrfs_item) * nr;
3566 WARN_ON(data_len < 0);
3567 return data_len;
3568 }
3569
3570 /*
3571 * The space between the end of the leaf items and
3572 * the start of the leaf data. IOW, how much room
3573 * the leaf has left for both items and data
3574 */
3575 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3576 struct extent_buffer *leaf)
3577 {
3578 int nritems = btrfs_header_nritems(leaf);
3579 int ret;
3580 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3581 if (ret < 0) {
3582 btrfs_crit(root->fs_info,
3583 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3584 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3585 leaf_space_used(leaf, 0, nritems), nritems);
3586 }
3587 return ret;
3588 }
3589
3590 /*
3591 * min slot controls the lowest index we're willing to push to the
3592 * right. We'll push up to and including min_slot, but no lower
3593 */
3594 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3595 struct btrfs_root *root,
3596 struct btrfs_path *path,
3597 int data_size, int empty,
3598 struct extent_buffer *right,
3599 int free_space, u32 left_nritems,
3600 u32 min_slot)
3601 {
3602 struct extent_buffer *left = path->nodes[0];
3603 struct extent_buffer *upper = path->nodes[1];
3604 struct btrfs_map_token token;
3605 struct btrfs_disk_key disk_key;
3606 int slot;
3607 u32 i;
3608 int push_space = 0;
3609 int push_items = 0;
3610 struct btrfs_item *item;
3611 u32 nr;
3612 u32 right_nritems;
3613 u32 data_end;
3614 u32 this_item_size;
3615
3616 btrfs_init_map_token(&token);
3617
3618 if (empty)
3619 nr = 0;
3620 else
3621 nr = max_t(u32, 1, min_slot);
3622
3623 if (path->slots[0] >= left_nritems)
3624 push_space += data_size;
3625
3626 slot = path->slots[1];
3627 i = left_nritems - 1;
3628 while (i >= nr) {
3629 item = btrfs_item_nr(i);
3630
3631 if (!empty && push_items > 0) {
3632 if (path->slots[0] > i)
3633 break;
3634 if (path->slots[0] == i) {
3635 int space = btrfs_leaf_free_space(root, left);
3636 if (space + push_space * 2 > free_space)
3637 break;
3638 }
3639 }
3640
3641 if (path->slots[0] == i)
3642 push_space += data_size;
3643
3644 this_item_size = btrfs_item_size(left, item);
3645 if (this_item_size + sizeof(*item) + push_space > free_space)
3646 break;
3647
3648 push_items++;
3649 push_space += this_item_size + sizeof(*item);
3650 if (i == 0)
3651 break;
3652 i--;
3653 }
3654
3655 if (push_items == 0)
3656 goto out_unlock;
3657
3658 WARN_ON(!empty && push_items == left_nritems);
3659
3660 /* push left to right */
3661 right_nritems = btrfs_header_nritems(right);
3662
3663 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3664 push_space -= leaf_data_end(root, left);
3665
3666 /* make room in the right data area */
3667 data_end = leaf_data_end(root, right);
3668 memmove_extent_buffer(right,
3669 btrfs_leaf_data(right) + data_end - push_space,
3670 btrfs_leaf_data(right) + data_end,
3671 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3672
3673 /* copy from the left data area */
3674 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3675 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3676 btrfs_leaf_data(left) + leaf_data_end(root, left),
3677 push_space);
3678
3679 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3680 btrfs_item_nr_offset(0),
3681 right_nritems * sizeof(struct btrfs_item));
3682
3683 /* copy the items from left to right */
3684 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3685 btrfs_item_nr_offset(left_nritems - push_items),
3686 push_items * sizeof(struct btrfs_item));
3687
3688 /* update the item pointers */
3689 right_nritems += push_items;
3690 btrfs_set_header_nritems(right, right_nritems);
3691 push_space = BTRFS_LEAF_DATA_SIZE(root);
3692 for (i = 0; i < right_nritems; i++) {
3693 item = btrfs_item_nr(i);
3694 push_space -= btrfs_token_item_size(right, item, &token);
3695 btrfs_set_token_item_offset(right, item, push_space, &token);
3696 }
3697
3698 left_nritems -= push_items;
3699 btrfs_set_header_nritems(left, left_nritems);
3700
3701 if (left_nritems)
3702 btrfs_mark_buffer_dirty(left);
3703 else
3704 clean_tree_block(trans, root->fs_info, left);
3705
3706 btrfs_mark_buffer_dirty(right);
3707
3708 btrfs_item_key(right, &disk_key, 0);
3709 btrfs_set_node_key(upper, &disk_key, slot + 1);
3710 btrfs_mark_buffer_dirty(upper);
3711
3712 /* then fixup the leaf pointer in the path */
3713 if (path->slots[0] >= left_nritems) {
3714 path->slots[0] -= left_nritems;
3715 if (btrfs_header_nritems(path->nodes[0]) == 0)
3716 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3717 btrfs_tree_unlock(path->nodes[0]);
3718 free_extent_buffer(path->nodes[0]);
3719 path->nodes[0] = right;
3720 path->slots[1] += 1;
3721 } else {
3722 btrfs_tree_unlock(right);
3723 free_extent_buffer(right);
3724 }
3725 return 0;
3726
3727 out_unlock:
3728 btrfs_tree_unlock(right);
3729 free_extent_buffer(right);
3730 return 1;
3731 }
3732
3733 /*
3734 * push some data in the path leaf to the right, trying to free up at
3735 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3736 *
3737 * returns 1 if the push failed because the other node didn't have enough
3738 * room, 0 if everything worked out and < 0 if there were major errors.
3739 *
3740 * this will push starting from min_slot to the end of the leaf. It won't
3741 * push any slot lower than min_slot
3742 */
3743 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3744 *root, struct btrfs_path *path,
3745 int min_data_size, int data_size,
3746 int empty, u32 min_slot)
3747 {
3748 struct extent_buffer *left = path->nodes[0];
3749 struct extent_buffer *right;
3750 struct extent_buffer *upper;
3751 int slot;
3752 int free_space;
3753 u32 left_nritems;
3754 int ret;
3755
3756 if (!path->nodes[1])
3757 return 1;
3758
3759 slot = path->slots[1];
3760 upper = path->nodes[1];
3761 if (slot >= btrfs_header_nritems(upper) - 1)
3762 return 1;
3763
3764 btrfs_assert_tree_locked(path->nodes[1]);
3765
3766 right = read_node_slot(root, upper, slot + 1);
3767 if (right == NULL)
3768 return 1;
3769
3770 btrfs_tree_lock(right);
3771 btrfs_set_lock_blocking(right);
3772
3773 free_space = btrfs_leaf_free_space(root, right);
3774 if (free_space < data_size)
3775 goto out_unlock;
3776
3777 /* cow and double check */
3778 ret = btrfs_cow_block(trans, root, right, upper,
3779 slot + 1, &right);
3780 if (ret)
3781 goto out_unlock;
3782
3783 free_space = btrfs_leaf_free_space(root, right);
3784 if (free_space < data_size)
3785 goto out_unlock;
3786
3787 left_nritems = btrfs_header_nritems(left);
3788 if (left_nritems == 0)
3789 goto out_unlock;
3790
3791 if (path->slots[0] == left_nritems && !empty) {
3792 /* Key greater than all keys in the leaf, right neighbor has
3793 * enough room for it and we're not emptying our leaf to delete
3794 * it, therefore use right neighbor to insert the new item and
3795 * no need to touch/dirty our left leaft. */
3796 btrfs_tree_unlock(left);
3797 free_extent_buffer(left);
3798 path->nodes[0] = right;
3799 path->slots[0] = 0;
3800 path->slots[1]++;
3801 return 0;
3802 }
3803
3804 return __push_leaf_right(trans, root, path, min_data_size, empty,
3805 right, free_space, left_nritems, min_slot);
3806 out_unlock:
3807 btrfs_tree_unlock(right);
3808 free_extent_buffer(right);
3809 return 1;
3810 }
3811
3812 /*
3813 * push some data in the path leaf to the left, trying to free up at
3814 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3815 *
3816 * max_slot can put a limit on how far into the leaf we'll push items. The
3817 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3818 * items
3819 */
3820 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3821 struct btrfs_root *root,
3822 struct btrfs_path *path, int data_size,
3823 int empty, struct extent_buffer *left,
3824 int free_space, u32 right_nritems,
3825 u32 max_slot)
3826 {
3827 struct btrfs_disk_key disk_key;
3828 struct extent_buffer *right = path->nodes[0];
3829 int i;
3830 int push_space = 0;
3831 int push_items = 0;
3832 struct btrfs_item *item;
3833 u32 old_left_nritems;
3834 u32 nr;
3835 int ret = 0;
3836 u32 this_item_size;
3837 u32 old_left_item_size;
3838 struct btrfs_map_token token;
3839
3840 btrfs_init_map_token(&token);
3841
3842 if (empty)
3843 nr = min(right_nritems, max_slot);
3844 else
3845 nr = min(right_nritems - 1, max_slot);
3846
3847 for (i = 0; i < nr; i++) {
3848 item = btrfs_item_nr(i);
3849
3850 if (!empty && push_items > 0) {
3851 if (path->slots[0] < i)
3852 break;
3853 if (path->slots[0] == i) {
3854 int space = btrfs_leaf_free_space(root, right);
3855 if (space + push_space * 2 > free_space)
3856 break;
3857 }
3858 }
3859
3860 if (path->slots[0] == i)
3861 push_space += data_size;
3862
3863 this_item_size = btrfs_item_size(right, item);
3864 if (this_item_size + sizeof(*item) + push_space > free_space)
3865 break;
3866
3867 push_items++;
3868 push_space += this_item_size + sizeof(*item);
3869 }
3870
3871 if (push_items == 0) {
3872 ret = 1;
3873 goto out;
3874 }
3875 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3876
3877 /* push data from right to left */
3878 copy_extent_buffer(left, right,
3879 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3880 btrfs_item_nr_offset(0),
3881 push_items * sizeof(struct btrfs_item));
3882
3883 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3884 btrfs_item_offset_nr(right, push_items - 1);
3885
3886 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3887 leaf_data_end(root, left) - push_space,
3888 btrfs_leaf_data(right) +
3889 btrfs_item_offset_nr(right, push_items - 1),
3890 push_space);
3891 old_left_nritems = btrfs_header_nritems(left);
3892 BUG_ON(old_left_nritems <= 0);
3893
3894 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3895 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3896 u32 ioff;
3897
3898 item = btrfs_item_nr(i);
3899
3900 ioff = btrfs_token_item_offset(left, item, &token);
3901 btrfs_set_token_item_offset(left, item,
3902 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3903 &token);
3904 }
3905 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3906
3907 /* fixup right node */
3908 if (push_items > right_nritems)
3909 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3910 right_nritems);
3911
3912 if (push_items < right_nritems) {
3913 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3914 leaf_data_end(root, right);
3915 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3916 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3917 btrfs_leaf_data(right) +
3918 leaf_data_end(root, right), push_space);
3919
3920 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3921 btrfs_item_nr_offset(push_items),
3922 (btrfs_header_nritems(right) - push_items) *
3923 sizeof(struct btrfs_item));
3924 }
3925 right_nritems -= push_items;
3926 btrfs_set_header_nritems(right, right_nritems);
3927 push_space = BTRFS_LEAF_DATA_SIZE(root);
3928 for (i = 0; i < right_nritems; i++) {
3929 item = btrfs_item_nr(i);
3930
3931 push_space = push_space - btrfs_token_item_size(right,
3932 item, &token);
3933 btrfs_set_token_item_offset(right, item, push_space, &token);
3934 }
3935
3936 btrfs_mark_buffer_dirty(left);
3937 if (right_nritems)
3938 btrfs_mark_buffer_dirty(right);
3939 else
3940 clean_tree_block(trans, root->fs_info, right);
3941
3942 btrfs_item_key(right, &disk_key, 0);
3943 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3944
3945 /* then fixup the leaf pointer in the path */
3946 if (path->slots[0] < push_items) {
3947 path->slots[0] += old_left_nritems;
3948 btrfs_tree_unlock(path->nodes[0]);
3949 free_extent_buffer(path->nodes[0]);
3950 path->nodes[0] = left;
3951 path->slots[1] -= 1;
3952 } else {
3953 btrfs_tree_unlock(left);
3954 free_extent_buffer(left);
3955 path->slots[0] -= push_items;
3956 }
3957 BUG_ON(path->slots[0] < 0);
3958 return ret;
3959 out:
3960 btrfs_tree_unlock(left);
3961 free_extent_buffer(left);
3962 return ret;
3963 }
3964
3965 /*
3966 * push some data in the path leaf to the left, trying to free up at
3967 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3968 *
3969 * max_slot can put a limit on how far into the leaf we'll push items. The
3970 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3971 * items
3972 */
3973 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3974 *root, struct btrfs_path *path, int min_data_size,
3975 int data_size, int empty, u32 max_slot)
3976 {
3977 struct extent_buffer *right = path->nodes[0];
3978 struct extent_buffer *left;
3979 int slot;
3980 int free_space;
3981 u32 right_nritems;
3982 int ret = 0;
3983
3984 slot = path->slots[1];
3985 if (slot == 0)
3986 return 1;
3987 if (!path->nodes[1])
3988 return 1;
3989
3990 right_nritems = btrfs_header_nritems(right);
3991 if (right_nritems == 0)
3992 return 1;
3993
3994 btrfs_assert_tree_locked(path->nodes[1]);
3995
3996 left = read_node_slot(root, path->nodes[1], slot - 1);
3997 if (left == NULL)
3998 return 1;
3999
4000 btrfs_tree_lock(left);
4001 btrfs_set_lock_blocking(left);
4002
4003 free_space = btrfs_leaf_free_space(root, left);
4004 if (free_space < data_size) {
4005 ret = 1;
4006 goto out;
4007 }
4008
4009 /* cow and double check */
4010 ret = btrfs_cow_block(trans, root, left,
4011 path->nodes[1], slot - 1, &left);
4012 if (ret) {
4013 /* we hit -ENOSPC, but it isn't fatal here */
4014 if (ret == -ENOSPC)
4015 ret = 1;
4016 goto out;
4017 }
4018
4019 free_space = btrfs_leaf_free_space(root, left);
4020 if (free_space < data_size) {
4021 ret = 1;
4022 goto out;
4023 }
4024
4025 return __push_leaf_left(trans, root, path, min_data_size,
4026 empty, left, free_space, right_nritems,
4027 max_slot);
4028 out:
4029 btrfs_tree_unlock(left);
4030 free_extent_buffer(left);
4031 return ret;
4032 }
4033
4034 /*
4035 * split the path's leaf in two, making sure there is at least data_size
4036 * available for the resulting leaf level of the path.
4037 */
4038 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4039 struct btrfs_root *root,
4040 struct btrfs_path *path,
4041 struct extent_buffer *l,
4042 struct extent_buffer *right,
4043 int slot, int mid, int nritems)
4044 {
4045 int data_copy_size;
4046 int rt_data_off;
4047 int i;
4048 struct btrfs_disk_key disk_key;
4049 struct btrfs_map_token token;
4050
4051 btrfs_init_map_token(&token);
4052
4053 nritems = nritems - mid;
4054 btrfs_set_header_nritems(right, nritems);
4055 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4056
4057 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4058 btrfs_item_nr_offset(mid),
4059 nritems * sizeof(struct btrfs_item));
4060
4061 copy_extent_buffer(right, l,
4062 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4063 data_copy_size, btrfs_leaf_data(l) +
4064 leaf_data_end(root, l), data_copy_size);
4065
4066 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4067 btrfs_item_end_nr(l, mid);
4068
4069 for (i = 0; i < nritems; i++) {
4070 struct btrfs_item *item = btrfs_item_nr(i);
4071 u32 ioff;
4072
4073 ioff = btrfs_token_item_offset(right, item, &token);
4074 btrfs_set_token_item_offset(right, item,
4075 ioff + rt_data_off, &token);
4076 }
4077
4078 btrfs_set_header_nritems(l, mid);
4079 btrfs_item_key(right, &disk_key, 0);
4080 insert_ptr(trans, root, path, &disk_key, right->start,
4081 path->slots[1] + 1, 1);
4082
4083 btrfs_mark_buffer_dirty(right);
4084 btrfs_mark_buffer_dirty(l);
4085 BUG_ON(path->slots[0] != slot);
4086
4087 if (mid <= slot) {
4088 btrfs_tree_unlock(path->nodes[0]);
4089 free_extent_buffer(path->nodes[0]);
4090 path->nodes[0] = right;
4091 path->slots[0] -= mid;
4092 path->slots[1] += 1;
4093 } else {
4094 btrfs_tree_unlock(right);
4095 free_extent_buffer(right);
4096 }
4097
4098 BUG_ON(path->slots[0] < 0);
4099 }
4100
4101 /*
4102 * double splits happen when we need to insert a big item in the middle
4103 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4104 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4105 * A B C
4106 *
4107 * We avoid this by trying to push the items on either side of our target
4108 * into the adjacent leaves. If all goes well we can avoid the double split
4109 * completely.
4110 */
4111 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4112 struct btrfs_root *root,
4113 struct btrfs_path *path,
4114 int data_size)
4115 {
4116 int ret;
4117 int progress = 0;
4118 int slot;
4119 u32 nritems;
4120 int space_needed = data_size;
4121
4122 slot = path->slots[0];
4123 if (slot < btrfs_header_nritems(path->nodes[0]))
4124 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4125
4126 /*
4127 * try to push all the items after our slot into the
4128 * right leaf
4129 */
4130 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4131 if (ret < 0)
4132 return ret;
4133
4134 if (ret == 0)
4135 progress++;
4136
4137 nritems = btrfs_header_nritems(path->nodes[0]);
4138 /*
4139 * our goal is to get our slot at the start or end of a leaf. If
4140 * we've done so we're done
4141 */
4142 if (path->slots[0] == 0 || path->slots[0] == nritems)
4143 return 0;
4144
4145 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4146 return 0;
4147
4148 /* try to push all the items before our slot into the next leaf */
4149 slot = path->slots[0];
4150 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4151 if (ret < 0)
4152 return ret;
4153
4154 if (ret == 0)
4155 progress++;
4156
4157 if (progress)
4158 return 0;
4159 return 1;
4160 }
4161
4162 /*
4163 * split the path's leaf in two, making sure there is at least data_size
4164 * available for the resulting leaf level of the path.
4165 *
4166 * returns 0 if all went well and < 0 on failure.
4167 */
4168 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4169 struct btrfs_root *root,
4170 struct btrfs_key *ins_key,
4171 struct btrfs_path *path, int data_size,
4172 int extend)
4173 {
4174 struct btrfs_disk_key disk_key;
4175 struct extent_buffer *l;
4176 u32 nritems;
4177 int mid;
4178 int slot;
4179 struct extent_buffer *right;
4180 struct btrfs_fs_info *fs_info = root->fs_info;
4181 int ret = 0;
4182 int wret;
4183 int split;
4184 int num_doubles = 0;
4185 int tried_avoid_double = 0;
4186
4187 l = path->nodes[0];
4188 slot = path->slots[0];
4189 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4190 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4191 return -EOVERFLOW;
4192
4193 /* first try to make some room by pushing left and right */
4194 if (data_size && path->nodes[1]) {
4195 int space_needed = data_size;
4196
4197 if (slot < btrfs_header_nritems(l))
4198 space_needed -= btrfs_leaf_free_space(root, l);
4199
4200 wret = push_leaf_right(trans, root, path, space_needed,
4201 space_needed, 0, 0);
4202 if (wret < 0)
4203 return wret;
4204 if (wret) {
4205 wret = push_leaf_left(trans, root, path, space_needed,
4206 space_needed, 0, (u32)-1);
4207 if (wret < 0)
4208 return wret;
4209 }
4210 l = path->nodes[0];
4211
4212 /* did the pushes work? */
4213 if (btrfs_leaf_free_space(root, l) >= data_size)
4214 return 0;
4215 }
4216
4217 if (!path->nodes[1]) {
4218 ret = insert_new_root(trans, root, path, 1);
4219 if (ret)
4220 return ret;
4221 }
4222 again:
4223 split = 1;
4224 l = path->nodes[0];
4225 slot = path->slots[0];
4226 nritems = btrfs_header_nritems(l);
4227 mid = (nritems + 1) / 2;
4228
4229 if (mid <= slot) {
4230 if (nritems == 1 ||
4231 leaf_space_used(l, mid, nritems - mid) + data_size >
4232 BTRFS_LEAF_DATA_SIZE(root)) {
4233 if (slot >= nritems) {
4234 split = 0;
4235 } else {
4236 mid = slot;
4237 if (mid != nritems &&
4238 leaf_space_used(l, mid, nritems - mid) +
4239 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4240 if (data_size && !tried_avoid_double)
4241 goto push_for_double;
4242 split = 2;
4243 }
4244 }
4245 }
4246 } else {
4247 if (leaf_space_used(l, 0, mid) + data_size >
4248 BTRFS_LEAF_DATA_SIZE(root)) {
4249 if (!extend && data_size && slot == 0) {
4250 split = 0;
4251 } else if ((extend || !data_size) && slot == 0) {
4252 mid = 1;
4253 } else {
4254 mid = slot;
4255 if (mid != nritems &&
4256 leaf_space_used(l, mid, nritems - mid) +
4257 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4258 if (data_size && !tried_avoid_double)
4259 goto push_for_double;
4260 split = 2;
4261 }
4262 }
4263 }
4264 }
4265
4266 if (split == 0)
4267 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4268 else
4269 btrfs_item_key(l, &disk_key, mid);
4270
4271 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4272 &disk_key, 0, l->start, 0);
4273 if (IS_ERR(right))
4274 return PTR_ERR(right);
4275
4276 root_add_used(root, root->nodesize);
4277
4278 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4279 btrfs_set_header_bytenr(right, right->start);
4280 btrfs_set_header_generation(right, trans->transid);
4281 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4282 btrfs_set_header_owner(right, root->root_key.objectid);
4283 btrfs_set_header_level(right, 0);
4284 write_extent_buffer(right, fs_info->fsid,
4285 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4286
4287 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4288 btrfs_header_chunk_tree_uuid(right),
4289 BTRFS_UUID_SIZE);
4290
4291 if (split == 0) {
4292 if (mid <= slot) {
4293 btrfs_set_header_nritems(right, 0);
4294 insert_ptr(trans, root, path, &disk_key, right->start,
4295 path->slots[1] + 1, 1);
4296 btrfs_tree_unlock(path->nodes[0]);
4297 free_extent_buffer(path->nodes[0]);
4298 path->nodes[0] = right;
4299 path->slots[0] = 0;
4300 path->slots[1] += 1;
4301 } else {
4302 btrfs_set_header_nritems(right, 0);
4303 insert_ptr(trans, root, path, &disk_key, right->start,
4304 path->slots[1], 1);
4305 btrfs_tree_unlock(path->nodes[0]);
4306 free_extent_buffer(path->nodes[0]);
4307 path->nodes[0] = right;
4308 path->slots[0] = 0;
4309 if (path->slots[1] == 0)
4310 fixup_low_keys(fs_info, path, &disk_key, 1);
4311 }
4312 btrfs_mark_buffer_dirty(right);
4313 return ret;
4314 }
4315
4316 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4317
4318 if (split == 2) {
4319 BUG_ON(num_doubles != 0);
4320 num_doubles++;
4321 goto again;
4322 }
4323
4324 return 0;
4325
4326 push_for_double:
4327 push_for_double_split(trans, root, path, data_size);
4328 tried_avoid_double = 1;
4329 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4330 return 0;
4331 goto again;
4332 }
4333
4334 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4335 struct btrfs_root *root,
4336 struct btrfs_path *path, int ins_len)
4337 {
4338 struct btrfs_key key;
4339 struct extent_buffer *leaf;
4340 struct btrfs_file_extent_item *fi;
4341 u64 extent_len = 0;
4342 u32 item_size;
4343 int ret;
4344
4345 leaf = path->nodes[0];
4346 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4347
4348 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4349 key.type != BTRFS_EXTENT_CSUM_KEY);
4350
4351 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4352 return 0;
4353
4354 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4355 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4356 fi = btrfs_item_ptr(leaf, path->slots[0],
4357 struct btrfs_file_extent_item);
4358 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4359 }
4360 btrfs_release_path(path);
4361
4362 path->keep_locks = 1;
4363 path->search_for_split = 1;
4364 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4365 path->search_for_split = 0;
4366 if (ret > 0)
4367 ret = -EAGAIN;
4368 if (ret < 0)
4369 goto err;
4370
4371 ret = -EAGAIN;
4372 leaf = path->nodes[0];
4373 /* if our item isn't there, return now */
4374 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4375 goto err;
4376
4377 /* the leaf has changed, it now has room. return now */
4378 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4379 goto err;
4380
4381 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4382 fi = btrfs_item_ptr(leaf, path->slots[0],
4383 struct btrfs_file_extent_item);
4384 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4385 goto err;
4386 }
4387
4388 btrfs_set_path_blocking(path);
4389 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4390 if (ret)
4391 goto err;
4392
4393 path->keep_locks = 0;
4394 btrfs_unlock_up_safe(path, 1);
4395 return 0;
4396 err:
4397 path->keep_locks = 0;
4398 return ret;
4399 }
4400
4401 static noinline int split_item(struct btrfs_trans_handle *trans,
4402 struct btrfs_root *root,
4403 struct btrfs_path *path,
4404 struct btrfs_key *new_key,
4405 unsigned long split_offset)
4406 {
4407 struct extent_buffer *leaf;
4408 struct btrfs_item *item;
4409 struct btrfs_item *new_item;
4410 int slot;
4411 char *buf;
4412 u32 nritems;
4413 u32 item_size;
4414 u32 orig_offset;
4415 struct btrfs_disk_key disk_key;
4416
4417 leaf = path->nodes[0];
4418 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4419
4420 btrfs_set_path_blocking(path);
4421
4422 item = btrfs_item_nr(path->slots[0]);
4423 orig_offset = btrfs_item_offset(leaf, item);
4424 item_size = btrfs_item_size(leaf, item);
4425
4426 buf = kmalloc(item_size, GFP_NOFS);
4427 if (!buf)
4428 return -ENOMEM;
4429
4430 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4431 path->slots[0]), item_size);
4432
4433 slot = path->slots[0] + 1;
4434 nritems = btrfs_header_nritems(leaf);
4435 if (slot != nritems) {
4436 /* shift the items */
4437 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4438 btrfs_item_nr_offset(slot),
4439 (nritems - slot) * sizeof(struct btrfs_item));
4440 }
4441
4442 btrfs_cpu_key_to_disk(&disk_key, new_key);
4443 btrfs_set_item_key(leaf, &disk_key, slot);
4444
4445 new_item = btrfs_item_nr(slot);
4446
4447 btrfs_set_item_offset(leaf, new_item, orig_offset);
4448 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4449
4450 btrfs_set_item_offset(leaf, item,
4451 orig_offset + item_size - split_offset);
4452 btrfs_set_item_size(leaf, item, split_offset);
4453
4454 btrfs_set_header_nritems(leaf, nritems + 1);
4455
4456 /* write the data for the start of the original item */
4457 write_extent_buffer(leaf, buf,
4458 btrfs_item_ptr_offset(leaf, path->slots[0]),
4459 split_offset);
4460
4461 /* write the data for the new item */
4462 write_extent_buffer(leaf, buf + split_offset,
4463 btrfs_item_ptr_offset(leaf, slot),
4464 item_size - split_offset);
4465 btrfs_mark_buffer_dirty(leaf);
4466
4467 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4468 kfree(buf);
4469 return 0;
4470 }
4471
4472 /*
4473 * This function splits a single item into two items,
4474 * giving 'new_key' to the new item and splitting the
4475 * old one at split_offset (from the start of the item).
4476 *
4477 * The path may be released by this operation. After
4478 * the split, the path is pointing to the old item. The
4479 * new item is going to be in the same node as the old one.
4480 *
4481 * Note, the item being split must be smaller enough to live alone on
4482 * a tree block with room for one extra struct btrfs_item
4483 *
4484 * This allows us to split the item in place, keeping a lock on the
4485 * leaf the entire time.
4486 */
4487 int btrfs_split_item(struct btrfs_trans_handle *trans,
4488 struct btrfs_root *root,
4489 struct btrfs_path *path,
4490 struct btrfs_key *new_key,
4491 unsigned long split_offset)
4492 {
4493 int ret;
4494 ret = setup_leaf_for_split(trans, root, path,
4495 sizeof(struct btrfs_item));
4496 if (ret)
4497 return ret;
4498
4499 ret = split_item(trans, root, path, new_key, split_offset);
4500 return ret;
4501 }
4502
4503 /*
4504 * This function duplicate a item, giving 'new_key' to the new item.
4505 * It guarantees both items live in the same tree leaf and the new item
4506 * is contiguous with the original item.
4507 *
4508 * This allows us to split file extent in place, keeping a lock on the
4509 * leaf the entire time.
4510 */
4511 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4512 struct btrfs_root *root,
4513 struct btrfs_path *path,
4514 struct btrfs_key *new_key)
4515 {
4516 struct extent_buffer *leaf;
4517 int ret;
4518 u32 item_size;
4519
4520 leaf = path->nodes[0];
4521 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4522 ret = setup_leaf_for_split(trans, root, path,
4523 item_size + sizeof(struct btrfs_item));
4524 if (ret)
4525 return ret;
4526
4527 path->slots[0]++;
4528 setup_items_for_insert(root, path, new_key, &item_size,
4529 item_size, item_size +
4530 sizeof(struct btrfs_item), 1);
4531 leaf = path->nodes[0];
4532 memcpy_extent_buffer(leaf,
4533 btrfs_item_ptr_offset(leaf, path->slots[0]),
4534 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4535 item_size);
4536 return 0;
4537 }
4538
4539 /*
4540 * make the item pointed to by the path smaller. new_size indicates
4541 * how small to make it, and from_end tells us if we just chop bytes
4542 * off the end of the item or if we shift the item to chop bytes off
4543 * the front.
4544 */
4545 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4546 u32 new_size, int from_end)
4547 {
4548 int slot;
4549 struct extent_buffer *leaf;
4550 struct btrfs_item *item;
4551 u32 nritems;
4552 unsigned int data_end;
4553 unsigned int old_data_start;
4554 unsigned int old_size;
4555 unsigned int size_diff;
4556 int i;
4557 struct btrfs_map_token token;
4558
4559 btrfs_init_map_token(&token);
4560
4561 leaf = path->nodes[0];
4562 slot = path->slots[0];
4563
4564 old_size = btrfs_item_size_nr(leaf, slot);
4565 if (old_size == new_size)
4566 return;
4567
4568 nritems = btrfs_header_nritems(leaf);
4569 data_end = leaf_data_end(root, leaf);
4570
4571 old_data_start = btrfs_item_offset_nr(leaf, slot);
4572
4573 size_diff = old_size - new_size;
4574
4575 BUG_ON(slot < 0);
4576 BUG_ON(slot >= nritems);
4577
4578 /*
4579 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4580 */
4581 /* first correct the data pointers */
4582 for (i = slot; i < nritems; i++) {
4583 u32 ioff;
4584 item = btrfs_item_nr(i);
4585
4586 ioff = btrfs_token_item_offset(leaf, item, &token);
4587 btrfs_set_token_item_offset(leaf, item,
4588 ioff + size_diff, &token);
4589 }
4590
4591 /* shift the data */
4592 if (from_end) {
4593 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4594 data_end + size_diff, btrfs_leaf_data(leaf) +
4595 data_end, old_data_start + new_size - data_end);
4596 } else {
4597 struct btrfs_disk_key disk_key;
4598 u64 offset;
4599
4600 btrfs_item_key(leaf, &disk_key, slot);
4601
4602 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4603 unsigned long ptr;
4604 struct btrfs_file_extent_item *fi;
4605
4606 fi = btrfs_item_ptr(leaf, slot,
4607 struct btrfs_file_extent_item);
4608 fi = (struct btrfs_file_extent_item *)(
4609 (unsigned long)fi - size_diff);
4610
4611 if (btrfs_file_extent_type(leaf, fi) ==
4612 BTRFS_FILE_EXTENT_INLINE) {
4613 ptr = btrfs_item_ptr_offset(leaf, slot);
4614 memmove_extent_buffer(leaf, ptr,
4615 (unsigned long)fi,
4616 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4617 }
4618 }
4619
4620 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4621 data_end + size_diff, btrfs_leaf_data(leaf) +
4622 data_end, old_data_start - data_end);
4623
4624 offset = btrfs_disk_key_offset(&disk_key);
4625 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4626 btrfs_set_item_key(leaf, &disk_key, slot);
4627 if (slot == 0)
4628 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4629 }
4630
4631 item = btrfs_item_nr(slot);
4632 btrfs_set_item_size(leaf, item, new_size);
4633 btrfs_mark_buffer_dirty(leaf);
4634
4635 if (btrfs_leaf_free_space(root, leaf) < 0) {
4636 btrfs_print_leaf(root, leaf);
4637 BUG();
4638 }
4639 }
4640
4641 /*
4642 * make the item pointed to by the path bigger, data_size is the added size.
4643 */
4644 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4645 u32 data_size)
4646 {
4647 int slot;
4648 struct extent_buffer *leaf;
4649 struct btrfs_item *item;
4650 u32 nritems;
4651 unsigned int data_end;
4652 unsigned int old_data;
4653 unsigned int old_size;
4654 int i;
4655 struct btrfs_map_token token;
4656
4657 btrfs_init_map_token(&token);
4658
4659 leaf = path->nodes[0];
4660
4661 nritems = btrfs_header_nritems(leaf);
4662 data_end = leaf_data_end(root, leaf);
4663
4664 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4665 btrfs_print_leaf(root, leaf);
4666 BUG();
4667 }
4668 slot = path->slots[0];
4669 old_data = btrfs_item_end_nr(leaf, slot);
4670
4671 BUG_ON(slot < 0);
4672 if (slot >= nritems) {
4673 btrfs_print_leaf(root, leaf);
4674 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4675 slot, nritems);
4676 BUG_ON(1);
4677 }
4678
4679 /*
4680 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4681 */
4682 /* first correct the data pointers */
4683 for (i = slot; i < nritems; i++) {
4684 u32 ioff;
4685 item = btrfs_item_nr(i);
4686
4687 ioff = btrfs_token_item_offset(leaf, item, &token);
4688 btrfs_set_token_item_offset(leaf, item,
4689 ioff - data_size, &token);
4690 }
4691
4692 /* shift the data */
4693 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4694 data_end - data_size, btrfs_leaf_data(leaf) +
4695 data_end, old_data - data_end);
4696
4697 data_end = old_data;
4698 old_size = btrfs_item_size_nr(leaf, slot);
4699 item = btrfs_item_nr(slot);
4700 btrfs_set_item_size(leaf, item, old_size + data_size);
4701 btrfs_mark_buffer_dirty(leaf);
4702
4703 if (btrfs_leaf_free_space(root, leaf) < 0) {
4704 btrfs_print_leaf(root, leaf);
4705 BUG();
4706 }
4707 }
4708
4709 /*
4710 * this is a helper for btrfs_insert_empty_items, the main goal here is
4711 * to save stack depth by doing the bulk of the work in a function
4712 * that doesn't call btrfs_search_slot
4713 */
4714 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4715 struct btrfs_key *cpu_key, u32 *data_size,
4716 u32 total_data, u32 total_size, int nr)
4717 {
4718 struct btrfs_item *item;
4719 int i;
4720 u32 nritems;
4721 unsigned int data_end;
4722 struct btrfs_disk_key disk_key;
4723 struct extent_buffer *leaf;
4724 int slot;
4725 struct btrfs_map_token token;
4726
4727 if (path->slots[0] == 0) {
4728 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4729 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4730 }
4731 btrfs_unlock_up_safe(path, 1);
4732
4733 btrfs_init_map_token(&token);
4734
4735 leaf = path->nodes[0];
4736 slot = path->slots[0];
4737
4738 nritems = btrfs_header_nritems(leaf);
4739 data_end = leaf_data_end(root, leaf);
4740
4741 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4742 btrfs_print_leaf(root, leaf);
4743 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4744 total_size, btrfs_leaf_free_space(root, leaf));
4745 BUG();
4746 }
4747
4748 if (slot != nritems) {
4749 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4750
4751 if (old_data < data_end) {
4752 btrfs_print_leaf(root, leaf);
4753 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4754 slot, old_data, data_end);
4755 BUG_ON(1);
4756 }
4757 /*
4758 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4759 */
4760 /* first correct the data pointers */
4761 for (i = slot; i < nritems; i++) {
4762 u32 ioff;
4763
4764 item = btrfs_item_nr( i);
4765 ioff = btrfs_token_item_offset(leaf, item, &token);
4766 btrfs_set_token_item_offset(leaf, item,
4767 ioff - total_data, &token);
4768 }
4769 /* shift the items */
4770 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4771 btrfs_item_nr_offset(slot),
4772 (nritems - slot) * sizeof(struct btrfs_item));
4773
4774 /* shift the data */
4775 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4776 data_end - total_data, btrfs_leaf_data(leaf) +
4777 data_end, old_data - data_end);
4778 data_end = old_data;
4779 }
4780
4781 /* setup the item for the new data */
4782 for (i = 0; i < nr; i++) {
4783 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4784 btrfs_set_item_key(leaf, &disk_key, slot + i);
4785 item = btrfs_item_nr(slot + i);
4786 btrfs_set_token_item_offset(leaf, item,
4787 data_end - data_size[i], &token);
4788 data_end -= data_size[i];
4789 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4790 }
4791
4792 btrfs_set_header_nritems(leaf, nritems + nr);
4793 btrfs_mark_buffer_dirty(leaf);
4794
4795 if (btrfs_leaf_free_space(root, leaf) < 0) {
4796 btrfs_print_leaf(root, leaf);
4797 BUG();
4798 }
4799 }
4800
4801 /*
4802 * Given a key and some data, insert items into the tree.
4803 * This does all the path init required, making room in the tree if needed.
4804 */
4805 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4806 struct btrfs_root *root,
4807 struct btrfs_path *path,
4808 struct btrfs_key *cpu_key, u32 *data_size,
4809 int nr)
4810 {
4811 int ret = 0;
4812 int slot;
4813 int i;
4814 u32 total_size = 0;
4815 u32 total_data = 0;
4816
4817 for (i = 0; i < nr; i++)
4818 total_data += data_size[i];
4819
4820 total_size = total_data + (nr * sizeof(struct btrfs_item));
4821 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4822 if (ret == 0)
4823 return -EEXIST;
4824 if (ret < 0)
4825 return ret;
4826
4827 slot = path->slots[0];
4828 BUG_ON(slot < 0);
4829
4830 setup_items_for_insert(root, path, cpu_key, data_size,
4831 total_data, total_size, nr);
4832 return 0;
4833 }
4834
4835 /*
4836 * Given a key and some data, insert an item into the tree.
4837 * This does all the path init required, making room in the tree if needed.
4838 */
4839 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4840 *root, struct btrfs_key *cpu_key, void *data, u32
4841 data_size)
4842 {
4843 int ret = 0;
4844 struct btrfs_path *path;
4845 struct extent_buffer *leaf;
4846 unsigned long ptr;
4847
4848 path = btrfs_alloc_path();
4849 if (!path)
4850 return -ENOMEM;
4851 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4852 if (!ret) {
4853 leaf = path->nodes[0];
4854 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4855 write_extent_buffer(leaf, data, ptr, data_size);
4856 btrfs_mark_buffer_dirty(leaf);
4857 }
4858 btrfs_free_path(path);
4859 return ret;
4860 }
4861
4862 /*
4863 * delete the pointer from a given node.
4864 *
4865 * the tree should have been previously balanced so the deletion does not
4866 * empty a node.
4867 */
4868 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4869 int level, int slot)
4870 {
4871 struct extent_buffer *parent = path->nodes[level];
4872 u32 nritems;
4873 int ret;
4874
4875 nritems = btrfs_header_nritems(parent);
4876 if (slot != nritems - 1) {
4877 if (level)
4878 tree_mod_log_eb_move(root->fs_info, parent, slot,
4879 slot + 1, nritems - slot - 1);
4880 memmove_extent_buffer(parent,
4881 btrfs_node_key_ptr_offset(slot),
4882 btrfs_node_key_ptr_offset(slot + 1),
4883 sizeof(struct btrfs_key_ptr) *
4884 (nritems - slot - 1));
4885 } else if (level) {
4886 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4887 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4888 BUG_ON(ret < 0);
4889 }
4890
4891 nritems--;
4892 btrfs_set_header_nritems(parent, nritems);
4893 if (nritems == 0 && parent == root->node) {
4894 BUG_ON(btrfs_header_level(root->node) != 1);
4895 /* just turn the root into a leaf and break */
4896 btrfs_set_header_level(root->node, 0);
4897 } else if (slot == 0) {
4898 struct btrfs_disk_key disk_key;
4899
4900 btrfs_node_key(parent, &disk_key, 0);
4901 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4902 }
4903 btrfs_mark_buffer_dirty(parent);
4904 }
4905
4906 /*
4907 * a helper function to delete the leaf pointed to by path->slots[1] and
4908 * path->nodes[1].
4909 *
4910 * This deletes the pointer in path->nodes[1] and frees the leaf
4911 * block extent. zero is returned if it all worked out, < 0 otherwise.
4912 *
4913 * The path must have already been setup for deleting the leaf, including
4914 * all the proper balancing. path->nodes[1] must be locked.
4915 */
4916 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4917 struct btrfs_root *root,
4918 struct btrfs_path *path,
4919 struct extent_buffer *leaf)
4920 {
4921 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4922 del_ptr(root, path, 1, path->slots[1]);
4923
4924 /*
4925 * btrfs_free_extent is expensive, we want to make sure we
4926 * aren't holding any locks when we call it
4927 */
4928 btrfs_unlock_up_safe(path, 0);
4929
4930 root_sub_used(root, leaf->len);
4931
4932 extent_buffer_get(leaf);
4933 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4934 free_extent_buffer_stale(leaf);
4935 }
4936 /*
4937 * delete the item at the leaf level in path. If that empties
4938 * the leaf, remove it from the tree
4939 */
4940 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4941 struct btrfs_path *path, int slot, int nr)
4942 {
4943 struct extent_buffer *leaf;
4944 struct btrfs_item *item;
4945 u32 last_off;
4946 u32 dsize = 0;
4947 int ret = 0;
4948 int wret;
4949 int i;
4950 u32 nritems;
4951 struct btrfs_map_token token;
4952
4953 btrfs_init_map_token(&token);
4954
4955 leaf = path->nodes[0];
4956 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4957
4958 for (i = 0; i < nr; i++)
4959 dsize += btrfs_item_size_nr(leaf, slot + i);
4960
4961 nritems = btrfs_header_nritems(leaf);
4962
4963 if (slot + nr != nritems) {
4964 int data_end = leaf_data_end(root, leaf);
4965
4966 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4967 data_end + dsize,
4968 btrfs_leaf_data(leaf) + data_end,
4969 last_off - data_end);
4970
4971 for (i = slot + nr; i < nritems; i++) {
4972 u32 ioff;
4973
4974 item = btrfs_item_nr(i);
4975 ioff = btrfs_token_item_offset(leaf, item, &token);
4976 btrfs_set_token_item_offset(leaf, item,
4977 ioff + dsize, &token);
4978 }
4979
4980 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4981 btrfs_item_nr_offset(slot + nr),
4982 sizeof(struct btrfs_item) *
4983 (nritems - slot - nr));
4984 }
4985 btrfs_set_header_nritems(leaf, nritems - nr);
4986 nritems -= nr;
4987
4988 /* delete the leaf if we've emptied it */
4989 if (nritems == 0) {
4990 if (leaf == root->node) {
4991 btrfs_set_header_level(leaf, 0);
4992 } else {
4993 btrfs_set_path_blocking(path);
4994 clean_tree_block(trans, root->fs_info, leaf);
4995 btrfs_del_leaf(trans, root, path, leaf);
4996 }
4997 } else {
4998 int used = leaf_space_used(leaf, 0, nritems);
4999 if (slot == 0) {
5000 struct btrfs_disk_key disk_key;
5001
5002 btrfs_item_key(leaf, &disk_key, 0);
5003 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5004 }
5005
5006 /* delete the leaf if it is mostly empty */
5007 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5008 /* push_leaf_left fixes the path.
5009 * make sure the path still points to our leaf
5010 * for possible call to del_ptr below
5011 */
5012 slot = path->slots[1];
5013 extent_buffer_get(leaf);
5014
5015 btrfs_set_path_blocking(path);
5016 wret = push_leaf_left(trans, root, path, 1, 1,
5017 1, (u32)-1);
5018 if (wret < 0 && wret != -ENOSPC)
5019 ret = wret;
5020
5021 if (path->nodes[0] == leaf &&
5022 btrfs_header_nritems(leaf)) {
5023 wret = push_leaf_right(trans, root, path, 1,
5024 1, 1, 0);
5025 if (wret < 0 && wret != -ENOSPC)
5026 ret = wret;
5027 }
5028
5029 if (btrfs_header_nritems(leaf) == 0) {
5030 path->slots[1] = slot;
5031 btrfs_del_leaf(trans, root, path, leaf);
5032 free_extent_buffer(leaf);
5033 ret = 0;
5034 } else {
5035 /* if we're still in the path, make sure
5036 * we're dirty. Otherwise, one of the
5037 * push_leaf functions must have already
5038 * dirtied this buffer
5039 */
5040 if (path->nodes[0] == leaf)
5041 btrfs_mark_buffer_dirty(leaf);
5042 free_extent_buffer(leaf);
5043 }
5044 } else {
5045 btrfs_mark_buffer_dirty(leaf);
5046 }
5047 }
5048 return ret;
5049 }
5050
5051 /*
5052 * search the tree again to find a leaf with lesser keys
5053 * returns 0 if it found something or 1 if there are no lesser leaves.
5054 * returns < 0 on io errors.
5055 *
5056 * This may release the path, and so you may lose any locks held at the
5057 * time you call it.
5058 */
5059 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5060 {
5061 struct btrfs_key key;
5062 struct btrfs_disk_key found_key;
5063 int ret;
5064
5065 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5066
5067 if (key.offset > 0) {
5068 key.offset--;
5069 } else if (key.type > 0) {
5070 key.type--;
5071 key.offset = (u64)-1;
5072 } else if (key.objectid > 0) {
5073 key.objectid--;
5074 key.type = (u8)-1;
5075 key.offset = (u64)-1;
5076 } else {
5077 return 1;
5078 }
5079
5080 btrfs_release_path(path);
5081 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5082 if (ret < 0)
5083 return ret;
5084 btrfs_item_key(path->nodes[0], &found_key, 0);
5085 ret = comp_keys(&found_key, &key);
5086 /*
5087 * We might have had an item with the previous key in the tree right
5088 * before we released our path. And after we released our path, that
5089 * item might have been pushed to the first slot (0) of the leaf we
5090 * were holding due to a tree balance. Alternatively, an item with the
5091 * previous key can exist as the only element of a leaf (big fat item).
5092 * Therefore account for these 2 cases, so that our callers (like
5093 * btrfs_previous_item) don't miss an existing item with a key matching
5094 * the previous key we computed above.
5095 */
5096 if (ret <= 0)
5097 return 0;
5098 return 1;
5099 }
5100
5101 /*
5102 * A helper function to walk down the tree starting at min_key, and looking
5103 * for nodes or leaves that are have a minimum transaction id.
5104 * This is used by the btree defrag code, and tree logging
5105 *
5106 * This does not cow, but it does stuff the starting key it finds back
5107 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5108 * key and get a writable path.
5109 *
5110 * This does lock as it descends, and path->keep_locks should be set
5111 * to 1 by the caller.
5112 *
5113 * This honors path->lowest_level to prevent descent past a given level
5114 * of the tree.
5115 *
5116 * min_trans indicates the oldest transaction that you are interested
5117 * in walking through. Any nodes or leaves older than min_trans are
5118 * skipped over (without reading them).
5119 *
5120 * returns zero if something useful was found, < 0 on error and 1 if there
5121 * was nothing in the tree that matched the search criteria.
5122 */
5123 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5124 struct btrfs_path *path,
5125 u64 min_trans)
5126 {
5127 struct extent_buffer *cur;
5128 struct btrfs_key found_key;
5129 int slot;
5130 int sret;
5131 u32 nritems;
5132 int level;
5133 int ret = 1;
5134 int keep_locks = path->keep_locks;
5135
5136 path->keep_locks = 1;
5137 again:
5138 cur = btrfs_read_lock_root_node(root);
5139 level = btrfs_header_level(cur);
5140 WARN_ON(path->nodes[level]);
5141 path->nodes[level] = cur;
5142 path->locks[level] = BTRFS_READ_LOCK;
5143
5144 if (btrfs_header_generation(cur) < min_trans) {
5145 ret = 1;
5146 goto out;
5147 }
5148 while (1) {
5149 nritems = btrfs_header_nritems(cur);
5150 level = btrfs_header_level(cur);
5151 sret = bin_search(cur, min_key, level, &slot);
5152
5153 /* at the lowest level, we're done, setup the path and exit */
5154 if (level == path->lowest_level) {
5155 if (slot >= nritems)
5156 goto find_next_key;
5157 ret = 0;
5158 path->slots[level] = slot;
5159 btrfs_item_key_to_cpu(cur, &found_key, slot);
5160 goto out;
5161 }
5162 if (sret && slot > 0)
5163 slot--;
5164 /*
5165 * check this node pointer against the min_trans parameters.
5166 * If it is too old, old, skip to the next one.
5167 */
5168 while (slot < nritems) {
5169 u64 gen;
5170
5171 gen = btrfs_node_ptr_generation(cur, slot);
5172 if (gen < min_trans) {
5173 slot++;
5174 continue;
5175 }
5176 break;
5177 }
5178 find_next_key:
5179 /*
5180 * we didn't find a candidate key in this node, walk forward
5181 * and find another one
5182 */
5183 if (slot >= nritems) {
5184 path->slots[level] = slot;
5185 btrfs_set_path_blocking(path);
5186 sret = btrfs_find_next_key(root, path, min_key, level,
5187 min_trans);
5188 if (sret == 0) {
5189 btrfs_release_path(path);
5190 goto again;
5191 } else {
5192 goto out;
5193 }
5194 }
5195 /* save our key for returning back */
5196 btrfs_node_key_to_cpu(cur, &found_key, slot);
5197 path->slots[level] = slot;
5198 if (level == path->lowest_level) {
5199 ret = 0;
5200 goto out;
5201 }
5202 btrfs_set_path_blocking(path);
5203 cur = read_node_slot(root, cur, slot);
5204 BUG_ON(!cur); /* -ENOMEM */
5205
5206 btrfs_tree_read_lock(cur);
5207
5208 path->locks[level - 1] = BTRFS_READ_LOCK;
5209 path->nodes[level - 1] = cur;
5210 unlock_up(path, level, 1, 0, NULL);
5211 btrfs_clear_path_blocking(path, NULL, 0);
5212 }
5213 out:
5214 path->keep_locks = keep_locks;
5215 if (ret == 0) {
5216 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5217 btrfs_set_path_blocking(path);
5218 memcpy(min_key, &found_key, sizeof(found_key));
5219 }
5220 return ret;
5221 }
5222
5223 static void tree_move_down(struct btrfs_root *root,
5224 struct btrfs_path *path,
5225 int *level, int root_level)
5226 {
5227 BUG_ON(*level == 0);
5228 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5229 path->slots[*level]);
5230 path->slots[*level - 1] = 0;
5231 (*level)--;
5232 }
5233
5234 static int tree_move_next_or_upnext(struct btrfs_root *root,
5235 struct btrfs_path *path,
5236 int *level, int root_level)
5237 {
5238 int ret = 0;
5239 int nritems;
5240 nritems = btrfs_header_nritems(path->nodes[*level]);
5241
5242 path->slots[*level]++;
5243
5244 while (path->slots[*level] >= nritems) {
5245 if (*level == root_level)
5246 return -1;
5247
5248 /* move upnext */
5249 path->slots[*level] = 0;
5250 free_extent_buffer(path->nodes[*level]);
5251 path->nodes[*level] = NULL;
5252 (*level)++;
5253 path->slots[*level]++;
5254
5255 nritems = btrfs_header_nritems(path->nodes[*level]);
5256 ret = 1;
5257 }
5258 return ret;
5259 }
5260
5261 /*
5262 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5263 * or down.
5264 */
5265 static int tree_advance(struct btrfs_root *root,
5266 struct btrfs_path *path,
5267 int *level, int root_level,
5268 int allow_down,
5269 struct btrfs_key *key)
5270 {
5271 int ret;
5272
5273 if (*level == 0 || !allow_down) {
5274 ret = tree_move_next_or_upnext(root, path, level, root_level);
5275 } else {
5276 tree_move_down(root, path, level, root_level);
5277 ret = 0;
5278 }
5279 if (ret >= 0) {
5280 if (*level == 0)
5281 btrfs_item_key_to_cpu(path->nodes[*level], key,
5282 path->slots[*level]);
5283 else
5284 btrfs_node_key_to_cpu(path->nodes[*level], key,
5285 path->slots[*level]);
5286 }
5287 return ret;
5288 }
5289
5290 static int tree_compare_item(struct btrfs_root *left_root,
5291 struct btrfs_path *left_path,
5292 struct btrfs_path *right_path,
5293 char *tmp_buf)
5294 {
5295 int cmp;
5296 int len1, len2;
5297 unsigned long off1, off2;
5298
5299 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5300 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5301 if (len1 != len2)
5302 return 1;
5303
5304 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5305 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5306 right_path->slots[0]);
5307
5308 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5309
5310 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5311 if (cmp)
5312 return 1;
5313 return 0;
5314 }
5315
5316 #define ADVANCE 1
5317 #define ADVANCE_ONLY_NEXT -1
5318
5319 /*
5320 * This function compares two trees and calls the provided callback for
5321 * every changed/new/deleted item it finds.
5322 * If shared tree blocks are encountered, whole subtrees are skipped, making
5323 * the compare pretty fast on snapshotted subvolumes.
5324 *
5325 * This currently works on commit roots only. As commit roots are read only,
5326 * we don't do any locking. The commit roots are protected with transactions.
5327 * Transactions are ended and rejoined when a commit is tried in between.
5328 *
5329 * This function checks for modifications done to the trees while comparing.
5330 * If it detects a change, it aborts immediately.
5331 */
5332 int btrfs_compare_trees(struct btrfs_root *left_root,
5333 struct btrfs_root *right_root,
5334 btrfs_changed_cb_t changed_cb, void *ctx)
5335 {
5336 int ret;
5337 int cmp;
5338 struct btrfs_path *left_path = NULL;
5339 struct btrfs_path *right_path = NULL;
5340 struct btrfs_key left_key;
5341 struct btrfs_key right_key;
5342 char *tmp_buf = NULL;
5343 int left_root_level;
5344 int right_root_level;
5345 int left_level;
5346 int right_level;
5347 int left_end_reached;
5348 int right_end_reached;
5349 int advance_left;
5350 int advance_right;
5351 u64 left_blockptr;
5352 u64 right_blockptr;
5353 u64 left_gen;
5354 u64 right_gen;
5355
5356 left_path = btrfs_alloc_path();
5357 if (!left_path) {
5358 ret = -ENOMEM;
5359 goto out;
5360 }
5361 right_path = btrfs_alloc_path();
5362 if (!right_path) {
5363 ret = -ENOMEM;
5364 goto out;
5365 }
5366
5367 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5368 if (!tmp_buf) {
5369 tmp_buf = vmalloc(left_root->nodesize);
5370 if (!tmp_buf) {
5371 ret = -ENOMEM;
5372 goto out;
5373 }
5374 }
5375
5376 left_path->search_commit_root = 1;
5377 left_path->skip_locking = 1;
5378 right_path->search_commit_root = 1;
5379 right_path->skip_locking = 1;
5380
5381 /*
5382 * Strategy: Go to the first items of both trees. Then do
5383 *
5384 * If both trees are at level 0
5385 * Compare keys of current items
5386 * If left < right treat left item as new, advance left tree
5387 * and repeat
5388 * If left > right treat right item as deleted, advance right tree
5389 * and repeat
5390 * If left == right do deep compare of items, treat as changed if
5391 * needed, advance both trees and repeat
5392 * If both trees are at the same level but not at level 0
5393 * Compare keys of current nodes/leafs
5394 * If left < right advance left tree and repeat
5395 * If left > right advance right tree and repeat
5396 * If left == right compare blockptrs of the next nodes/leafs
5397 * If they match advance both trees but stay at the same level
5398 * and repeat
5399 * If they don't match advance both trees while allowing to go
5400 * deeper and repeat
5401 * If tree levels are different
5402 * Advance the tree that needs it and repeat
5403 *
5404 * Advancing a tree means:
5405 * If we are at level 0, try to go to the next slot. If that's not
5406 * possible, go one level up and repeat. Stop when we found a level
5407 * where we could go to the next slot. We may at this point be on a
5408 * node or a leaf.
5409 *
5410 * If we are not at level 0 and not on shared tree blocks, go one
5411 * level deeper.
5412 *
5413 * If we are not at level 0 and on shared tree blocks, go one slot to
5414 * the right if possible or go up and right.
5415 */
5416
5417 down_read(&left_root->fs_info->commit_root_sem);
5418 left_level = btrfs_header_level(left_root->commit_root);
5419 left_root_level = left_level;
5420 left_path->nodes[left_level] = left_root->commit_root;
5421 extent_buffer_get(left_path->nodes[left_level]);
5422
5423 right_level = btrfs_header_level(right_root->commit_root);
5424 right_root_level = right_level;
5425 right_path->nodes[right_level] = right_root->commit_root;
5426 extent_buffer_get(right_path->nodes[right_level]);
5427 up_read(&left_root->fs_info->commit_root_sem);
5428
5429 if (left_level == 0)
5430 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5431 &left_key, left_path->slots[left_level]);
5432 else
5433 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5434 &left_key, left_path->slots[left_level]);
5435 if (right_level == 0)
5436 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5437 &right_key, right_path->slots[right_level]);
5438 else
5439 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5440 &right_key, right_path->slots[right_level]);
5441
5442 left_end_reached = right_end_reached = 0;
5443 advance_left = advance_right = 0;
5444
5445 while (1) {
5446 if (advance_left && !left_end_reached) {
5447 ret = tree_advance(left_root, left_path, &left_level,
5448 left_root_level,
5449 advance_left != ADVANCE_ONLY_NEXT,
5450 &left_key);
5451 if (ret < 0)
5452 left_end_reached = ADVANCE;
5453 advance_left = 0;
5454 }
5455 if (advance_right && !right_end_reached) {
5456 ret = tree_advance(right_root, right_path, &right_level,
5457 right_root_level,
5458 advance_right != ADVANCE_ONLY_NEXT,
5459 &right_key);
5460 if (ret < 0)
5461 right_end_reached = ADVANCE;
5462 advance_right = 0;
5463 }
5464
5465 if (left_end_reached && right_end_reached) {
5466 ret = 0;
5467 goto out;
5468 } else if (left_end_reached) {
5469 if (right_level == 0) {
5470 ret = changed_cb(left_root, right_root,
5471 left_path, right_path,
5472 &right_key,
5473 BTRFS_COMPARE_TREE_DELETED,
5474 ctx);
5475 if (ret < 0)
5476 goto out;
5477 }
5478 advance_right = ADVANCE;
5479 continue;
5480 } else if (right_end_reached) {
5481 if (left_level == 0) {
5482 ret = changed_cb(left_root, right_root,
5483 left_path, right_path,
5484 &left_key,
5485 BTRFS_COMPARE_TREE_NEW,
5486 ctx);
5487 if (ret < 0)
5488 goto out;
5489 }
5490 advance_left = ADVANCE;
5491 continue;
5492 }
5493
5494 if (left_level == 0 && right_level == 0) {
5495 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5496 if (cmp < 0) {
5497 ret = changed_cb(left_root, right_root,
5498 left_path, right_path,
5499 &left_key,
5500 BTRFS_COMPARE_TREE_NEW,
5501 ctx);
5502 if (ret < 0)
5503 goto out;
5504 advance_left = ADVANCE;
5505 } else if (cmp > 0) {
5506 ret = changed_cb(left_root, right_root,
5507 left_path, right_path,
5508 &right_key,
5509 BTRFS_COMPARE_TREE_DELETED,
5510 ctx);
5511 if (ret < 0)
5512 goto out;
5513 advance_right = ADVANCE;
5514 } else {
5515 enum btrfs_compare_tree_result result;
5516
5517 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5518 ret = tree_compare_item(left_root, left_path,
5519 right_path, tmp_buf);
5520 if (ret)
5521 result = BTRFS_COMPARE_TREE_CHANGED;
5522 else
5523 result = BTRFS_COMPARE_TREE_SAME;
5524 ret = changed_cb(left_root, right_root,
5525 left_path, right_path,
5526 &left_key, result, ctx);
5527 if (ret < 0)
5528 goto out;
5529 advance_left = ADVANCE;
5530 advance_right = ADVANCE;
5531 }
5532 } else if (left_level == right_level) {
5533 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5534 if (cmp < 0) {
5535 advance_left = ADVANCE;
5536 } else if (cmp > 0) {
5537 advance_right = ADVANCE;
5538 } else {
5539 left_blockptr = btrfs_node_blockptr(
5540 left_path->nodes[left_level],
5541 left_path->slots[left_level]);
5542 right_blockptr = btrfs_node_blockptr(
5543 right_path->nodes[right_level],
5544 right_path->slots[right_level]);
5545 left_gen = btrfs_node_ptr_generation(
5546 left_path->nodes[left_level],
5547 left_path->slots[left_level]);
5548 right_gen = btrfs_node_ptr_generation(
5549 right_path->nodes[right_level],
5550 right_path->slots[right_level]);
5551 if (left_blockptr == right_blockptr &&
5552 left_gen == right_gen) {
5553 /*
5554 * As we're on a shared block, don't
5555 * allow to go deeper.
5556 */
5557 advance_left = ADVANCE_ONLY_NEXT;
5558 advance_right = ADVANCE_ONLY_NEXT;
5559 } else {
5560 advance_left = ADVANCE;
5561 advance_right = ADVANCE;
5562 }
5563 }
5564 } else if (left_level < right_level) {
5565 advance_right = ADVANCE;
5566 } else {
5567 advance_left = ADVANCE;
5568 }
5569 }
5570
5571 out:
5572 btrfs_free_path(left_path);
5573 btrfs_free_path(right_path);
5574 kvfree(tmp_buf);
5575 return ret;
5576 }
5577
5578 /*
5579 * this is similar to btrfs_next_leaf, but does not try to preserve
5580 * and fixup the path. It looks for and returns the next key in the
5581 * tree based on the current path and the min_trans parameters.
5582 *
5583 * 0 is returned if another key is found, < 0 if there are any errors
5584 * and 1 is returned if there are no higher keys in the tree
5585 *
5586 * path->keep_locks should be set to 1 on the search made before
5587 * calling this function.
5588 */
5589 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5590 struct btrfs_key *key, int level, u64 min_trans)
5591 {
5592 int slot;
5593 struct extent_buffer *c;
5594
5595 WARN_ON(!path->keep_locks);
5596 while (level < BTRFS_MAX_LEVEL) {
5597 if (!path->nodes[level])
5598 return 1;
5599
5600 slot = path->slots[level] + 1;
5601 c = path->nodes[level];
5602 next:
5603 if (slot >= btrfs_header_nritems(c)) {
5604 int ret;
5605 int orig_lowest;
5606 struct btrfs_key cur_key;
5607 if (level + 1 >= BTRFS_MAX_LEVEL ||
5608 !path->nodes[level + 1])
5609 return 1;
5610
5611 if (path->locks[level + 1]) {
5612 level++;
5613 continue;
5614 }
5615
5616 slot = btrfs_header_nritems(c) - 1;
5617 if (level == 0)
5618 btrfs_item_key_to_cpu(c, &cur_key, slot);
5619 else
5620 btrfs_node_key_to_cpu(c, &cur_key, slot);
5621
5622 orig_lowest = path->lowest_level;
5623 btrfs_release_path(path);
5624 path->lowest_level = level;
5625 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5626 0, 0);
5627 path->lowest_level = orig_lowest;
5628 if (ret < 0)
5629 return ret;
5630
5631 c = path->nodes[level];
5632 slot = path->slots[level];
5633 if (ret == 0)
5634 slot++;
5635 goto next;
5636 }
5637
5638 if (level == 0)
5639 btrfs_item_key_to_cpu(c, key, slot);
5640 else {
5641 u64 gen = btrfs_node_ptr_generation(c, slot);
5642
5643 if (gen < min_trans) {
5644 slot++;
5645 goto next;
5646 }
5647 btrfs_node_key_to_cpu(c, key, slot);
5648 }
5649 return 0;
5650 }
5651 return 1;
5652 }
5653
5654 /*
5655 * search the tree again to find a leaf with greater keys
5656 * returns 0 if it found something or 1 if there are no greater leaves.
5657 * returns < 0 on io errors.
5658 */
5659 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5660 {
5661 return btrfs_next_old_leaf(root, path, 0);
5662 }
5663
5664 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5665 u64 time_seq)
5666 {
5667 int slot;
5668 int level;
5669 struct extent_buffer *c;
5670 struct extent_buffer *next;
5671 struct btrfs_key key;
5672 u32 nritems;
5673 int ret;
5674 int old_spinning = path->leave_spinning;
5675 int next_rw_lock = 0;
5676
5677 nritems = btrfs_header_nritems(path->nodes[0]);
5678 if (nritems == 0)
5679 return 1;
5680
5681 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5682 again:
5683 level = 1;
5684 next = NULL;
5685 next_rw_lock = 0;
5686 btrfs_release_path(path);
5687
5688 path->keep_locks = 1;
5689 path->leave_spinning = 1;
5690
5691 if (time_seq)
5692 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5693 else
5694 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5695 path->keep_locks = 0;
5696
5697 if (ret < 0)
5698 return ret;
5699
5700 nritems = btrfs_header_nritems(path->nodes[0]);
5701 /*
5702 * by releasing the path above we dropped all our locks. A balance
5703 * could have added more items next to the key that used to be
5704 * at the very end of the block. So, check again here and
5705 * advance the path if there are now more items available.
5706 */
5707 if (nritems > 0 && path->slots[0] < nritems - 1) {
5708 if (ret == 0)
5709 path->slots[0]++;
5710 ret = 0;
5711 goto done;
5712 }
5713 /*
5714 * So the above check misses one case:
5715 * - after releasing the path above, someone has removed the item that
5716 * used to be at the very end of the block, and balance between leafs
5717 * gets another one with bigger key.offset to replace it.
5718 *
5719 * This one should be returned as well, or we can get leaf corruption
5720 * later(esp. in __btrfs_drop_extents()).
5721 *
5722 * And a bit more explanation about this check,
5723 * with ret > 0, the key isn't found, the path points to the slot
5724 * where it should be inserted, so the path->slots[0] item must be the
5725 * bigger one.
5726 */
5727 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5728 ret = 0;
5729 goto done;
5730 }
5731
5732 while (level < BTRFS_MAX_LEVEL) {
5733 if (!path->nodes[level]) {
5734 ret = 1;
5735 goto done;
5736 }
5737
5738 slot = path->slots[level] + 1;
5739 c = path->nodes[level];
5740 if (slot >= btrfs_header_nritems(c)) {
5741 level++;
5742 if (level == BTRFS_MAX_LEVEL) {
5743 ret = 1;
5744 goto done;
5745 }
5746 continue;
5747 }
5748
5749 if (next) {
5750 btrfs_tree_unlock_rw(next, next_rw_lock);
5751 free_extent_buffer(next);
5752 }
5753
5754 next = c;
5755 next_rw_lock = path->locks[level];
5756 ret = read_block_for_search(NULL, root, path, &next, level,
5757 slot, &key, 0);
5758 if (ret == -EAGAIN)
5759 goto again;
5760
5761 if (ret < 0) {
5762 btrfs_release_path(path);
5763 goto done;
5764 }
5765
5766 if (!path->skip_locking) {
5767 ret = btrfs_try_tree_read_lock(next);
5768 if (!ret && time_seq) {
5769 /*
5770 * If we don't get the lock, we may be racing
5771 * with push_leaf_left, holding that lock while
5772 * itself waiting for the leaf we've currently
5773 * locked. To solve this situation, we give up
5774 * on our lock and cycle.
5775 */
5776 free_extent_buffer(next);
5777 btrfs_release_path(path);
5778 cond_resched();
5779 goto again;
5780 }
5781 if (!ret) {
5782 btrfs_set_path_blocking(path);
5783 btrfs_tree_read_lock(next);
5784 btrfs_clear_path_blocking(path, next,
5785 BTRFS_READ_LOCK);
5786 }
5787 next_rw_lock = BTRFS_READ_LOCK;
5788 }
5789 break;
5790 }
5791 path->slots[level] = slot;
5792 while (1) {
5793 level--;
5794 c = path->nodes[level];
5795 if (path->locks[level])
5796 btrfs_tree_unlock_rw(c, path->locks[level]);
5797
5798 free_extent_buffer(c);
5799 path->nodes[level] = next;
5800 path->slots[level] = 0;
5801 if (!path->skip_locking)
5802 path->locks[level] = next_rw_lock;
5803 if (!level)
5804 break;
5805
5806 ret = read_block_for_search(NULL, root, path, &next, level,
5807 0, &key, 0);
5808 if (ret == -EAGAIN)
5809 goto again;
5810
5811 if (ret < 0) {
5812 btrfs_release_path(path);
5813 goto done;
5814 }
5815
5816 if (!path->skip_locking) {
5817 ret = btrfs_try_tree_read_lock(next);
5818 if (!ret) {
5819 btrfs_set_path_blocking(path);
5820 btrfs_tree_read_lock(next);
5821 btrfs_clear_path_blocking(path, next,
5822 BTRFS_READ_LOCK);
5823 }
5824 next_rw_lock = BTRFS_READ_LOCK;
5825 }
5826 }
5827 ret = 0;
5828 done:
5829 unlock_up(path, 0, 1, 0, NULL);
5830 path->leave_spinning = old_spinning;
5831 if (!old_spinning)
5832 btrfs_set_path_blocking(path);
5833
5834 return ret;
5835 }
5836
5837 /*
5838 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5839 * searching until it gets past min_objectid or finds an item of 'type'
5840 *
5841 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5842 */
5843 int btrfs_previous_item(struct btrfs_root *root,
5844 struct btrfs_path *path, u64 min_objectid,
5845 int type)
5846 {
5847 struct btrfs_key found_key;
5848 struct extent_buffer *leaf;
5849 u32 nritems;
5850 int ret;
5851
5852 while (1) {
5853 if (path->slots[0] == 0) {
5854 btrfs_set_path_blocking(path);
5855 ret = btrfs_prev_leaf(root, path);
5856 if (ret != 0)
5857 return ret;
5858 } else {
5859 path->slots[0]--;
5860 }
5861 leaf = path->nodes[0];
5862 nritems = btrfs_header_nritems(leaf);
5863 if (nritems == 0)
5864 return 1;
5865 if (path->slots[0] == nritems)
5866 path->slots[0]--;
5867
5868 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5869 if (found_key.objectid < min_objectid)
5870 break;
5871 if (found_key.type == type)
5872 return 0;
5873 if (found_key.objectid == min_objectid &&
5874 found_key.type < type)
5875 break;
5876 }
5877 return 1;
5878 }
5879
5880 /*
5881 * search in extent tree to find a previous Metadata/Data extent item with
5882 * min objecitd.
5883 *
5884 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5885 */
5886 int btrfs_previous_extent_item(struct btrfs_root *root,
5887 struct btrfs_path *path, u64 min_objectid)
5888 {
5889 struct btrfs_key found_key;
5890 struct extent_buffer *leaf;
5891 u32 nritems;
5892 int ret;
5893
5894 while (1) {
5895 if (path->slots[0] == 0) {
5896 btrfs_set_path_blocking(path);
5897 ret = btrfs_prev_leaf(root, path);
5898 if (ret != 0)
5899 return ret;
5900 } else {
5901 path->slots[0]--;
5902 }
5903 leaf = path->nodes[0];
5904 nritems = btrfs_header_nritems(leaf);
5905 if (nritems == 0)
5906 return 1;
5907 if (path->slots[0] == nritems)
5908 path->slots[0]--;
5909
5910 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5911 if (found_key.objectid < min_objectid)
5912 break;
5913 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5914 found_key.type == BTRFS_METADATA_ITEM_KEY)
5915 return 0;
5916 if (found_key.objectid == min_objectid &&
5917 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5918 break;
5919 }
5920 return 1;
5921 }