]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/ctree.c
Merge branch 'stable-4.7' of git://git.infradead.org/users/pcmoore/audit
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 *root, struct btrfs_key *ins_key,
33 struct btrfs_path *path, int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root, struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct extent_buffer *dst_buf,
40 struct extent_buffer *src_buf);
41 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 int level, int slot);
43 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51 }
52
53 /*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 }
69 }
70
71 /*
72 * reset all the locked nodes in the patch to spinning locks.
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
78 */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
81 {
82 int i;
83
84 if (held) {
85 btrfs_set_lock_blocking_rw(held, held_rw);
86 if (held_rw == BTRFS_WRITE_LOCK)
87 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
88 else if (held_rw == BTRFS_READ_LOCK)
89 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 }
91 btrfs_set_path_blocking(p);
92
93 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
94 if (p->nodes[i] && p->locks[i]) {
95 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
96 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
97 p->locks[i] = BTRFS_WRITE_LOCK;
98 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
99 p->locks[i] = BTRFS_READ_LOCK;
100 }
101 }
102
103 if (held)
104 btrfs_clear_lock_blocking_rw(held, held_rw);
105 }
106
107 /* this also releases the path */
108 void btrfs_free_path(struct btrfs_path *p)
109 {
110 if (!p)
111 return;
112 btrfs_release_path(p);
113 kmem_cache_free(btrfs_path_cachep, p);
114 }
115
116 /*
117 * path release drops references on the extent buffers in the path
118 * and it drops any locks held by this path
119 *
120 * It is safe to call this on paths that no locks or extent buffers held.
121 */
122 noinline void btrfs_release_path(struct btrfs_path *p)
123 {
124 int i;
125
126 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 p->slots[i] = 0;
128 if (!p->nodes[i])
129 continue;
130 if (p->locks[i]) {
131 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
132 p->locks[i] = 0;
133 }
134 free_extent_buffer(p->nodes[i]);
135 p->nodes[i] = NULL;
136 }
137 }
138
139 /*
140 * safely gets a reference on the root node of a tree. A lock
141 * is not taken, so a concurrent writer may put a different node
142 * at the root of the tree. See btrfs_lock_root_node for the
143 * looping required.
144 *
145 * The extent buffer returned by this has a reference taken, so
146 * it won't disappear. It may stop being the root of the tree
147 * at any time because there are no locks held.
148 */
149 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 {
151 struct extent_buffer *eb;
152
153 while (1) {
154 rcu_read_lock();
155 eb = rcu_dereference(root->node);
156
157 /*
158 * RCU really hurts here, we could free up the root node because
159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again.
162 */
163 if (atomic_inc_not_zero(&eb->refs)) {
164 rcu_read_unlock();
165 break;
166 }
167 rcu_read_unlock();
168 synchronize_rcu();
169 }
170 return eb;
171 }
172
173 /* loop around taking references on and locking the root node of the
174 * tree until you end up with a lock on the root. A locked buffer
175 * is returned, with a reference held.
176 */
177 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 {
179 struct extent_buffer *eb;
180
181 while (1) {
182 eb = btrfs_root_node(root);
183 btrfs_tree_lock(eb);
184 if (eb == root->node)
185 break;
186 btrfs_tree_unlock(eb);
187 free_extent_buffer(eb);
188 }
189 return eb;
190 }
191
192 /* loop around taking references on and locking the root node of the
193 * tree until you end up with a lock on the root. A locked buffer
194 * is returned, with a reference held.
195 */
196 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 {
198 struct extent_buffer *eb;
199
200 while (1) {
201 eb = btrfs_root_node(root);
202 btrfs_tree_read_lock(eb);
203 if (eb == root->node)
204 break;
205 btrfs_tree_read_unlock(eb);
206 free_extent_buffer(eb);
207 }
208 return eb;
209 }
210
211 /* cowonly root (everything not a reference counted cow subvolume), just get
212 * put onto a simple dirty list. transaction.c walks this to make sure they
213 * get properly updated on disk.
214 */
215 static void add_root_to_dirty_list(struct btrfs_root *root)
216 {
217 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
218 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
219 return;
220
221 spin_lock(&root->fs_info->trans_lock);
222 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
223 /* Want the extent tree to be the last on the list */
224 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
225 list_move_tail(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
227 else
228 list_move(&root->dirty_list,
229 &root->fs_info->dirty_cowonly_roots);
230 }
231 spin_unlock(&root->fs_info->trans_lock);
232 }
233
234 /*
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
238 */
239 int btrfs_copy_root(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root,
241 struct extent_buffer *buf,
242 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 {
244 struct extent_buffer *cow;
245 int ret = 0;
246 int level;
247 struct btrfs_disk_key disk_key;
248
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != root->fs_info->running_transaction->transid);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
259
260 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 &disk_key, level, buf->start, 0);
262 if (IS_ERR(cow))
263 return PTR_ERR(cow);
264
265 copy_extent_buffer(cow, buf, 0, 0, cow->len);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 else
274 btrfs_set_header_owner(cow, new_root_objectid);
275
276 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
277 BTRFS_FSID_SIZE);
278
279 WARN_ON(btrfs_header_generation(buf) > trans->transid);
280 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
281 ret = btrfs_inc_ref(trans, root, cow, 1);
282 else
283 ret = btrfs_inc_ref(trans, root, cow, 0);
284
285 if (ret)
286 return ret;
287
288 btrfs_mark_buffer_dirty(cow);
289 *cow_ret = cow;
290 return 0;
291 }
292
293 enum mod_log_op {
294 MOD_LOG_KEY_REPLACE,
295 MOD_LOG_KEY_ADD,
296 MOD_LOG_KEY_REMOVE,
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_MOVE_KEYS,
300 MOD_LOG_ROOT_REPLACE,
301 };
302
303 struct tree_mod_move {
304 int dst_slot;
305 int nr_items;
306 };
307
308 struct tree_mod_root {
309 u64 logical;
310 u8 level;
311 };
312
313 struct tree_mod_elem {
314 struct rb_node node;
315 u64 logical;
316 u64 seq;
317 enum mod_log_op op;
318
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
320 int slot;
321
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
323 u64 generation;
324
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key;
327 u64 blockptr;
328
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move;
331
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root;
334 };
335
336 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 {
338 read_lock(&fs_info->tree_mod_log_lock);
339 }
340
341 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 {
343 read_unlock(&fs_info->tree_mod_log_lock);
344 }
345
346 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 {
348 write_lock(&fs_info->tree_mod_log_lock);
349 }
350
351 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 {
353 write_unlock(&fs_info->tree_mod_log_lock);
354 }
355
356 /*
357 * Pull a new tree mod seq number for our operation.
358 */
359 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 {
361 return atomic64_inc_return(&fs_info->tree_mod_seq);
362 }
363
364 /*
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
370 * blocker was added.
371 */
372 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
373 struct seq_list *elem)
374 {
375 tree_mod_log_write_lock(fs_info);
376 spin_lock(&fs_info->tree_mod_seq_lock);
377 if (!elem->seq) {
378 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
379 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 }
381 spin_unlock(&fs_info->tree_mod_seq_lock);
382 tree_mod_log_write_unlock(fs_info);
383
384 return elem->seq;
385 }
386
387 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
388 struct seq_list *elem)
389 {
390 struct rb_root *tm_root;
391 struct rb_node *node;
392 struct rb_node *next;
393 struct seq_list *cur_elem;
394 struct tree_mod_elem *tm;
395 u64 min_seq = (u64)-1;
396 u64 seq_putting = elem->seq;
397
398 if (!seq_putting)
399 return;
400
401 spin_lock(&fs_info->tree_mod_seq_lock);
402 list_del(&elem->list);
403 elem->seq = 0;
404
405 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
406 if (cur_elem->seq < min_seq) {
407 if (seq_putting > cur_elem->seq) {
408 /*
409 * blocker with lower sequence number exists, we
410 * cannot remove anything from the log
411 */
412 spin_unlock(&fs_info->tree_mod_seq_lock);
413 return;
414 }
415 min_seq = cur_elem->seq;
416 }
417 }
418 spin_unlock(&fs_info->tree_mod_seq_lock);
419
420 /*
421 * anything that's lower than the lowest existing (read: blocked)
422 * sequence number can be removed from the tree.
423 */
424 tree_mod_log_write_lock(fs_info);
425 tm_root = &fs_info->tree_mod_log;
426 for (node = rb_first(tm_root); node; node = next) {
427 next = rb_next(node);
428 tm = container_of(node, struct tree_mod_elem, node);
429 if (tm->seq > min_seq)
430 continue;
431 rb_erase(node, tm_root);
432 kfree(tm);
433 }
434 tree_mod_log_write_unlock(fs_info);
435 }
436
437 /*
438 * key order of the log:
439 * node/leaf start address -> sequence
440 *
441 * The 'start address' is the logical address of the *new* root node
442 * for root replace operations, or the logical address of the affected
443 * block for all other operations.
444 *
445 * Note: must be called with write lock (tree_mod_log_write_lock).
446 */
447 static noinline int
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 {
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
454
455 BUG_ON(!tm);
456
457 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458
459 tm_root = &fs_info->tree_mod_log;
460 new = &tm_root->rb_node;
461 while (*new) {
462 cur = container_of(*new, struct tree_mod_elem, node);
463 parent = *new;
464 if (cur->logical < tm->logical)
465 new = &((*new)->rb_left);
466 else if (cur->logical > tm->logical)
467 new = &((*new)->rb_right);
468 else if (cur->seq < tm->seq)
469 new = &((*new)->rb_left);
470 else if (cur->seq > tm->seq)
471 new = &((*new)->rb_right);
472 else
473 return -EEXIST;
474 }
475
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
478 return 0;
479 }
480
481 /*
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
486 */
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
489 smp_mb();
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 return 1;
492 if (eb && btrfs_header_level(eb) == 0)
493 return 1;
494
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
497 tree_mod_log_write_unlock(fs_info);
498 return 1;
499 }
500
501 return 0;
502 }
503
504 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
505 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
506 struct extent_buffer *eb)
507 {
508 smp_mb();
509 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 return 0;
511 if (eb && btrfs_header_level(eb) == 0)
512 return 0;
513
514 return 1;
515 }
516
517 static struct tree_mod_elem *
518 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
519 enum mod_log_op op, gfp_t flags)
520 {
521 struct tree_mod_elem *tm;
522
523 tm = kzalloc(sizeof(*tm), flags);
524 if (!tm)
525 return NULL;
526
527 tm->logical = eb->start;
528 if (op != MOD_LOG_KEY_ADD) {
529 btrfs_node_key(eb, &tm->key, slot);
530 tm->blockptr = btrfs_node_blockptr(eb, slot);
531 }
532 tm->op = op;
533 tm->slot = slot;
534 tm->generation = btrfs_node_ptr_generation(eb, slot);
535 RB_CLEAR_NODE(&tm->node);
536
537 return tm;
538 }
539
540 static noinline int
541 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
542 struct extent_buffer *eb, int slot,
543 enum mod_log_op op, gfp_t flags)
544 {
545 struct tree_mod_elem *tm;
546 int ret;
547
548 if (!tree_mod_need_log(fs_info, eb))
549 return 0;
550
551 tm = alloc_tree_mod_elem(eb, slot, op, flags);
552 if (!tm)
553 return -ENOMEM;
554
555 if (tree_mod_dont_log(fs_info, eb)) {
556 kfree(tm);
557 return 0;
558 }
559
560 ret = __tree_mod_log_insert(fs_info, tm);
561 tree_mod_log_write_unlock(fs_info);
562 if (ret)
563 kfree(tm);
564
565 return ret;
566 }
567
568 static noinline int
569 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
570 struct extent_buffer *eb, int dst_slot, int src_slot,
571 int nr_items, gfp_t flags)
572 {
573 struct tree_mod_elem *tm = NULL;
574 struct tree_mod_elem **tm_list = NULL;
575 int ret = 0;
576 int i;
577 int locked = 0;
578
579 if (!tree_mod_need_log(fs_info, eb))
580 return 0;
581
582 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
583 if (!tm_list)
584 return -ENOMEM;
585
586 tm = kzalloc(sizeof(*tm), flags);
587 if (!tm) {
588 ret = -ENOMEM;
589 goto free_tms;
590 }
591
592 tm->logical = eb->start;
593 tm->slot = src_slot;
594 tm->move.dst_slot = dst_slot;
595 tm->move.nr_items = nr_items;
596 tm->op = MOD_LOG_MOVE_KEYS;
597
598 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
599 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
600 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
601 if (!tm_list[i]) {
602 ret = -ENOMEM;
603 goto free_tms;
604 }
605 }
606
607 if (tree_mod_dont_log(fs_info, eb))
608 goto free_tms;
609 locked = 1;
610
611 /*
612 * When we override something during the move, we log these removals.
613 * This can only happen when we move towards the beginning of the
614 * buffer, i.e. dst_slot < src_slot.
615 */
616 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
617 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
618 if (ret)
619 goto free_tms;
620 }
621
622 ret = __tree_mod_log_insert(fs_info, tm);
623 if (ret)
624 goto free_tms;
625 tree_mod_log_write_unlock(fs_info);
626 kfree(tm_list);
627
628 return 0;
629 free_tms:
630 for (i = 0; i < nr_items; i++) {
631 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
632 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
633 kfree(tm_list[i]);
634 }
635 if (locked)
636 tree_mod_log_write_unlock(fs_info);
637 kfree(tm_list);
638 kfree(tm);
639
640 return ret;
641 }
642
643 static inline int
644 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
645 struct tree_mod_elem **tm_list,
646 int nritems)
647 {
648 int i, j;
649 int ret;
650
651 for (i = nritems - 1; i >= 0; i--) {
652 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 if (ret) {
654 for (j = nritems - 1; j > i; j--)
655 rb_erase(&tm_list[j]->node,
656 &fs_info->tree_mod_log);
657 return ret;
658 }
659 }
660
661 return 0;
662 }
663
664 static noinline int
665 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
666 struct extent_buffer *old_root,
667 struct extent_buffer *new_root, gfp_t flags,
668 int log_removal)
669 {
670 struct tree_mod_elem *tm = NULL;
671 struct tree_mod_elem **tm_list = NULL;
672 int nritems = 0;
673 int ret = 0;
674 int i;
675
676 if (!tree_mod_need_log(fs_info, NULL))
677 return 0;
678
679 if (log_removal && btrfs_header_level(old_root) > 0) {
680 nritems = btrfs_header_nritems(old_root);
681 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
682 flags);
683 if (!tm_list) {
684 ret = -ENOMEM;
685 goto free_tms;
686 }
687 for (i = 0; i < nritems; i++) {
688 tm_list[i] = alloc_tree_mod_elem(old_root, i,
689 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
690 if (!tm_list[i]) {
691 ret = -ENOMEM;
692 goto free_tms;
693 }
694 }
695 }
696
697 tm = kzalloc(sizeof(*tm), flags);
698 if (!tm) {
699 ret = -ENOMEM;
700 goto free_tms;
701 }
702
703 tm->logical = new_root->start;
704 tm->old_root.logical = old_root->start;
705 tm->old_root.level = btrfs_header_level(old_root);
706 tm->generation = btrfs_header_generation(old_root);
707 tm->op = MOD_LOG_ROOT_REPLACE;
708
709 if (tree_mod_dont_log(fs_info, NULL))
710 goto free_tms;
711
712 if (tm_list)
713 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 if (!ret)
715 ret = __tree_mod_log_insert(fs_info, tm);
716
717 tree_mod_log_write_unlock(fs_info);
718 if (ret)
719 goto free_tms;
720 kfree(tm_list);
721
722 return ret;
723
724 free_tms:
725 if (tm_list) {
726 for (i = 0; i < nritems; i++)
727 kfree(tm_list[i]);
728 kfree(tm_list);
729 }
730 kfree(tm);
731
732 return ret;
733 }
734
735 static struct tree_mod_elem *
736 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
737 int smallest)
738 {
739 struct rb_root *tm_root;
740 struct rb_node *node;
741 struct tree_mod_elem *cur = NULL;
742 struct tree_mod_elem *found = NULL;
743
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
747 while (node) {
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->logical < start) {
750 node = node->rb_left;
751 } else if (cur->logical > start) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
757 if (found)
758 BUG_ON(found->seq > cur->seq);
759 found = cur;
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
763 if (found)
764 BUG_ON(found->seq < cur->seq);
765 found = cur;
766 node = node->rb_right;
767 } else {
768 found = cur;
769 break;
770 }
771 }
772 tree_mod_log_read_unlock(fs_info);
773
774 return found;
775 }
776
777 /*
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
781 */
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 u64 min_seq)
785 {
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
787 }
788
789 /*
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
793 */
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
796 {
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
798 }
799
800 static noinline int
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
804 {
805 int ret = 0;
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
808 int i;
809 int locked = 0;
810
811 if (!tree_mod_need_log(fs_info, NULL))
812 return 0;
813
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 return 0;
816
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
818 GFP_NOFS);
819 if (!tm_list)
820 return -ENOMEM;
821
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
828 ret = -ENOMEM;
829 goto free_tms;
830 }
831
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
835 ret = -ENOMEM;
836 goto free_tms;
837 }
838 }
839
840 if (tree_mod_dont_log(fs_info, NULL))
841 goto free_tms;
842 locked = 1;
843
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 if (ret)
847 goto free_tms;
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
849 if (ret)
850 goto free_tms;
851 }
852
853 tree_mod_log_write_unlock(fs_info);
854 kfree(tm_list);
855
856 return 0;
857
858 free_tms:
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
862 kfree(tm_list[i]);
863 }
864 if (locked)
865 tree_mod_log_write_unlock(fs_info);
866 kfree(tm_list);
867
868 return ret;
869 }
870
871 static inline void
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
874 {
875 int ret;
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
877 nr_items, GFP_NOFS);
878 BUG_ON(ret < 0);
879 }
880
881 static noinline void
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
884 {
885 int ret;
886
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
888 MOD_LOG_KEY_REPLACE,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
890 BUG_ON(ret < 0);
891 }
892
893 static noinline int
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
895 {
896 struct tree_mod_elem **tm_list = NULL;
897 int nritems = 0;
898 int i;
899 int ret = 0;
900
901 if (btrfs_header_level(eb) == 0)
902 return 0;
903
904 if (!tree_mod_need_log(fs_info, NULL))
905 return 0;
906
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
909 if (!tm_list)
910 return -ENOMEM;
911
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
915 if (!tm_list[i]) {
916 ret = -ENOMEM;
917 goto free_tms;
918 }
919 }
920
921 if (tree_mod_dont_log(fs_info, eb))
922 goto free_tms;
923
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
926 if (ret)
927 goto free_tms;
928 kfree(tm_list);
929
930 return 0;
931
932 free_tms:
933 for (i = 0; i < nritems; i++)
934 kfree(tm_list[i]);
935 kfree(tm_list);
936
937 return ret;
938 }
939
940 static noinline void
941 tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
943 int log_removal)
944 {
945 int ret;
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
948 BUG_ON(ret < 0);
949 }
950
951 /*
952 * check if the tree block can be shared by multiple trees
953 */
954 int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
956 {
957 /*
958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
962 */
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
968 return 1;
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
972 return 1;
973 #endif
974 return 0;
975 }
976
977 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
981 int *last_ref)
982 {
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
988
989 /*
990 * Backrefs update rules:
991 *
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
994 *
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
998 *
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1004 */
1005
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1015 return ret;
1016 }
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1024 }
1025
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1036
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1043 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else {
1046
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1053 }
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1056
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1058 buf->start,
1059 buf->len,
1060 new_flags, level, 0);
1061 if (ret)
1062 return ret;
1063 }
1064 } else {
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1074 }
1075 clean_tree_block(trans, root->fs_info, buf);
1076 *last_ref = 1;
1077 }
1078 return 0;
1079 }
1080
1081 /*
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1085 * dirty again.
1086 *
1087 * search_start -- an allocation hint for the new block
1088 *
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1092 */
1093 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1099 {
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1102 int level, ret;
1103 int last_ref = 0;
1104 int unlock_orig = 0;
1105 u64 parent_start;
1106
1107 if (*cow_ret == buf)
1108 unlock_orig = 1;
1109
1110 btrfs_assert_tree_locked(buf);
1111
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1116
1117 level = btrfs_header_level(buf);
1118
1119 if (level == 0)
1120 btrfs_item_key(buf, &disk_key, 0);
1121 else
1122 btrfs_node_key(buf, &disk_key, 0);
1123
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1125 if (parent)
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1131
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1135 if (IS_ERR(cow))
1136 return PTR_ERR(cow);
1137
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1139
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1148 else
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1150
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 BTRFS_FSID_SIZE);
1153
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1155 if (ret) {
1156 btrfs_abort_transaction(trans, root, ret);
1157 return ret;
1158 }
1159
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 return ret;
1165 }
1166 }
1167
1168 if (buf == root->node) {
1169 WARN_ON(parent && parent != buf);
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1175
1176 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1);
1178 rcu_assign_pointer(root->node, cow);
1179
1180 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 last_ref);
1182 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root);
1184 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1189
1190 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1193 btrfs_set_node_blockptr(parent, parent_slot,
1194 cow->start);
1195 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 trans->transid);
1197 btrfs_mark_buffer_dirty(parent);
1198 if (last_ref) {
1199 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 return ret;
1203 }
1204 }
1205 btrfs_free_tree_block(trans, root, buf, parent_start,
1206 last_ref);
1207 }
1208 if (unlock_orig)
1209 btrfs_tree_unlock(buf);
1210 free_extent_buffer_stale(buf);
1211 btrfs_mark_buffer_dirty(cow);
1212 *cow_ret = cow;
1213 return 0;
1214 }
1215
1216 /*
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1219 */
1220 static struct tree_mod_elem *
1221 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1222 struct extent_buffer *eb_root, u64 time_seq)
1223 {
1224 struct tree_mod_elem *tm;
1225 struct tree_mod_elem *found = NULL;
1226 u64 root_logical = eb_root->start;
1227 int looped = 0;
1228
1229 if (!time_seq)
1230 return NULL;
1231
1232 /*
1233 * the very last operation that's logged for a root is the
1234 * replacement operation (if it is replaced at all). this has
1235 * the logical address of the *new* root, making it the very
1236 * first operation that's logged for this root.
1237 */
1238 while (1) {
1239 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1240 time_seq);
1241 if (!looped && !tm)
1242 return NULL;
1243 /*
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1246 * level 0.
1247 */
1248 if (!tm)
1249 break;
1250
1251 /*
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1255 */
1256 if (tm->op != MOD_LOG_ROOT_REPLACE)
1257 break;
1258
1259 found = tm;
1260 root_logical = tm->old_root.logical;
1261 looped = 1;
1262 }
1263
1264 /* if there's no old root to return, return what we found instead */
1265 if (!found)
1266 found = tm;
1267
1268 return found;
1269 }
1270
1271 /*
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewound (until we reach something older than
1274 * time_seq).
1275 */
1276 static void
1277 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1278 u64 time_seq, struct tree_mod_elem *first_tm)
1279 {
1280 u32 n;
1281 struct rb_node *next;
1282 struct tree_mod_elem *tm = first_tm;
1283 unsigned long o_dst;
1284 unsigned long o_src;
1285 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1286
1287 n = btrfs_header_nritems(eb);
1288 tree_mod_log_read_lock(fs_info);
1289 while (tm && tm->seq >= time_seq) {
1290 /*
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1294 */
1295 switch (tm->op) {
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1297 BUG_ON(tm->slot < n);
1298 /* Fallthrough */
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1300 case MOD_LOG_KEY_REMOVE:
1301 btrfs_set_node_key(eb, &tm->key, tm->slot);
1302 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1303 btrfs_set_node_ptr_generation(eb, tm->slot,
1304 tm->generation);
1305 n++;
1306 break;
1307 case MOD_LOG_KEY_REPLACE:
1308 BUG_ON(tm->slot >= n);
1309 btrfs_set_node_key(eb, &tm->key, tm->slot);
1310 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1311 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 tm->generation);
1313 break;
1314 case MOD_LOG_KEY_ADD:
1315 /* if a move operation is needed it's in the log */
1316 n--;
1317 break;
1318 case MOD_LOG_MOVE_KEYS:
1319 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1320 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1321 memmove_extent_buffer(eb, o_dst, o_src,
1322 tm->move.nr_items * p_size);
1323 break;
1324 case MOD_LOG_ROOT_REPLACE:
1325 /*
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1333 */
1334 break;
1335 }
1336 next = rb_next(&tm->node);
1337 if (!next)
1338 break;
1339 tm = container_of(next, struct tree_mod_elem, node);
1340 if (tm->logical != first_tm->logical)
1341 break;
1342 }
1343 tree_mod_log_read_unlock(fs_info);
1344 btrfs_set_header_nritems(eb, n);
1345 }
1346
1347 /*
1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1353 */
1354 static struct extent_buffer *
1355 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1356 struct extent_buffer *eb, u64 time_seq)
1357 {
1358 struct extent_buffer *eb_rewin;
1359 struct tree_mod_elem *tm;
1360
1361 if (!time_seq)
1362 return eb;
1363
1364 if (btrfs_header_level(eb) == 0)
1365 return eb;
1366
1367 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1368 if (!tm)
1369 return eb;
1370
1371 btrfs_set_path_blocking(path);
1372 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 eb->len);
1378 if (!eb_rewin) {
1379 btrfs_tree_read_unlock_blocking(eb);
1380 free_extent_buffer(eb);
1381 return NULL;
1382 }
1383 btrfs_set_header_bytenr(eb_rewin, eb->start);
1384 btrfs_set_header_backref_rev(eb_rewin,
1385 btrfs_header_backref_rev(eb));
1386 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1387 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1388 } else {
1389 eb_rewin = btrfs_clone_extent_buffer(eb);
1390 if (!eb_rewin) {
1391 btrfs_tree_read_unlock_blocking(eb);
1392 free_extent_buffer(eb);
1393 return NULL;
1394 }
1395 }
1396
1397 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1398 btrfs_tree_read_unlock_blocking(eb);
1399 free_extent_buffer(eb);
1400
1401 extent_buffer_get(eb_rewin);
1402 btrfs_tree_read_lock(eb_rewin);
1403 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1404 WARN_ON(btrfs_header_nritems(eb_rewin) >
1405 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1406
1407 return eb_rewin;
1408 }
1409
1410 /*
1411 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1412 * value. If there are no changes, the current root->root_node is returned. If
1413 * anything changed in between, there's a fresh buffer allocated on which the
1414 * rewind operations are done. In any case, the returned buffer is read locked.
1415 * Returns NULL on error (with no locks held).
1416 */
1417 static inline struct extent_buffer *
1418 get_old_root(struct btrfs_root *root, u64 time_seq)
1419 {
1420 struct tree_mod_elem *tm;
1421 struct extent_buffer *eb = NULL;
1422 struct extent_buffer *eb_root;
1423 struct extent_buffer *old;
1424 struct tree_mod_root *old_root = NULL;
1425 u64 old_generation = 0;
1426 u64 logical;
1427
1428 eb_root = btrfs_read_lock_root_node(root);
1429 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1430 if (!tm)
1431 return eb_root;
1432
1433 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1434 old_root = &tm->old_root;
1435 old_generation = tm->generation;
1436 logical = old_root->logical;
1437 } else {
1438 logical = eb_root->start;
1439 }
1440
1441 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1442 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1443 btrfs_tree_read_unlock(eb_root);
1444 free_extent_buffer(eb_root);
1445 old = read_tree_block(root, logical, 0);
1446 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1447 if (!IS_ERR(old))
1448 free_extent_buffer(old);
1449 btrfs_warn(root->fs_info,
1450 "failed to read tree block %llu from get_old_root", logical);
1451 } else {
1452 eb = btrfs_clone_extent_buffer(old);
1453 free_extent_buffer(old);
1454 }
1455 } else if (old_root) {
1456 btrfs_tree_read_unlock(eb_root);
1457 free_extent_buffer(eb_root);
1458 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 root->nodesize);
1460 } else {
1461 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1462 eb = btrfs_clone_extent_buffer(eb_root);
1463 btrfs_tree_read_unlock_blocking(eb_root);
1464 free_extent_buffer(eb_root);
1465 }
1466
1467 if (!eb)
1468 return NULL;
1469 extent_buffer_get(eb);
1470 btrfs_tree_read_lock(eb);
1471 if (old_root) {
1472 btrfs_set_header_bytenr(eb, eb->start);
1473 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1474 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1475 btrfs_set_header_level(eb, old_root->level);
1476 btrfs_set_header_generation(eb, old_generation);
1477 }
1478 if (tm)
1479 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1480 else
1481 WARN_ON(btrfs_header_level(eb) != 0);
1482 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1483
1484 return eb;
1485 }
1486
1487 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1488 {
1489 struct tree_mod_elem *tm;
1490 int level;
1491 struct extent_buffer *eb_root = btrfs_root_node(root);
1492
1493 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1494 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1495 level = tm->old_root.level;
1496 } else {
1497 level = btrfs_header_level(eb_root);
1498 }
1499 free_extent_buffer(eb_root);
1500
1501 return level;
1502 }
1503
1504 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1505 struct btrfs_root *root,
1506 struct extent_buffer *buf)
1507 {
1508 if (btrfs_test_is_dummy_root(root))
1509 return 0;
1510
1511 /* ensure we can see the force_cow */
1512 smp_rmb();
1513
1514 /*
1515 * We do not need to cow a block if
1516 * 1) this block is not created or changed in this transaction;
1517 * 2) this block does not belong to TREE_RELOC tree;
1518 * 3) the root is not forced COW.
1519 *
1520 * What is forced COW:
1521 * when we create snapshot during committing the transaction,
1522 * after we've finished coping src root, we must COW the shared
1523 * block to ensure the metadata consistency.
1524 */
1525 if (btrfs_header_generation(buf) == trans->transid &&
1526 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1527 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1528 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1529 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1530 return 0;
1531 return 1;
1532 }
1533
1534 /*
1535 * cows a single block, see __btrfs_cow_block for the real work.
1536 * This version of it has extra checks so that a block isn't COWed more than
1537 * once per transaction, as long as it hasn't been written yet
1538 */
1539 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1540 struct btrfs_root *root, struct extent_buffer *buf,
1541 struct extent_buffer *parent, int parent_slot,
1542 struct extent_buffer **cow_ret)
1543 {
1544 u64 search_start;
1545 int ret;
1546
1547 if (trans->transaction != root->fs_info->running_transaction)
1548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 trans->transid,
1550 root->fs_info->running_transaction->transid);
1551
1552 if (trans->transid != root->fs_info->generation)
1553 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1554 trans->transid, root->fs_info->generation);
1555
1556 if (!should_cow_block(trans, root, buf)) {
1557 trans->dirty = true;
1558 *cow_ret = buf;
1559 return 0;
1560 }
1561
1562 search_start = buf->start & ~((u64)SZ_1G - 1);
1563
1564 if (parent)
1565 btrfs_set_lock_blocking(parent);
1566 btrfs_set_lock_blocking(buf);
1567
1568 ret = __btrfs_cow_block(trans, root, buf, parent,
1569 parent_slot, cow_ret, search_start, 0);
1570
1571 trace_btrfs_cow_block(root, buf, *cow_ret);
1572
1573 return ret;
1574 }
1575
1576 /*
1577 * helper function for defrag to decide if two blocks pointed to by a
1578 * node are actually close by
1579 */
1580 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1581 {
1582 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1583 return 1;
1584 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1585 return 1;
1586 return 0;
1587 }
1588
1589 /*
1590 * compare two keys in a memcmp fashion
1591 */
1592 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1593 {
1594 struct btrfs_key k1;
1595
1596 btrfs_disk_key_to_cpu(&k1, disk);
1597
1598 return btrfs_comp_cpu_keys(&k1, k2);
1599 }
1600
1601 /*
1602 * same as comp_keys only with two btrfs_key's
1603 */
1604 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1605 {
1606 if (k1->objectid > k2->objectid)
1607 return 1;
1608 if (k1->objectid < k2->objectid)
1609 return -1;
1610 if (k1->type > k2->type)
1611 return 1;
1612 if (k1->type < k2->type)
1613 return -1;
1614 if (k1->offset > k2->offset)
1615 return 1;
1616 if (k1->offset < k2->offset)
1617 return -1;
1618 return 0;
1619 }
1620
1621 /*
1622 * this is used by the defrag code to go through all the
1623 * leaves pointed to by a node and reallocate them so that
1624 * disk order is close to key order
1625 */
1626 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1627 struct btrfs_root *root, struct extent_buffer *parent,
1628 int start_slot, u64 *last_ret,
1629 struct btrfs_key *progress)
1630 {
1631 struct extent_buffer *cur;
1632 u64 blocknr;
1633 u64 gen;
1634 u64 search_start = *last_ret;
1635 u64 last_block = 0;
1636 u64 other;
1637 u32 parent_nritems;
1638 int end_slot;
1639 int i;
1640 int err = 0;
1641 int parent_level;
1642 int uptodate;
1643 u32 blocksize;
1644 int progress_passed = 0;
1645 struct btrfs_disk_key disk_key;
1646
1647 parent_level = btrfs_header_level(parent);
1648
1649 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1650 WARN_ON(trans->transid != root->fs_info->generation);
1651
1652 parent_nritems = btrfs_header_nritems(parent);
1653 blocksize = root->nodesize;
1654 end_slot = parent_nritems - 1;
1655
1656 if (parent_nritems <= 1)
1657 return 0;
1658
1659 btrfs_set_lock_blocking(parent);
1660
1661 for (i = start_slot; i <= end_slot; i++) {
1662 int close = 1;
1663
1664 btrfs_node_key(parent, &disk_key, i);
1665 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1666 continue;
1667
1668 progress_passed = 1;
1669 blocknr = btrfs_node_blockptr(parent, i);
1670 gen = btrfs_node_ptr_generation(parent, i);
1671 if (last_block == 0)
1672 last_block = blocknr;
1673
1674 if (i > 0) {
1675 other = btrfs_node_blockptr(parent, i - 1);
1676 close = close_blocks(blocknr, other, blocksize);
1677 }
1678 if (!close && i < end_slot) {
1679 other = btrfs_node_blockptr(parent, i + 1);
1680 close = close_blocks(blocknr, other, blocksize);
1681 }
1682 if (close) {
1683 last_block = blocknr;
1684 continue;
1685 }
1686
1687 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1688 if (cur)
1689 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1690 else
1691 uptodate = 0;
1692 if (!cur || !uptodate) {
1693 if (!cur) {
1694 cur = read_tree_block(root, blocknr, gen);
1695 if (IS_ERR(cur)) {
1696 return PTR_ERR(cur);
1697 } else if (!extent_buffer_uptodate(cur)) {
1698 free_extent_buffer(cur);
1699 return -EIO;
1700 }
1701 } else if (!uptodate) {
1702 err = btrfs_read_buffer(cur, gen);
1703 if (err) {
1704 free_extent_buffer(cur);
1705 return err;
1706 }
1707 }
1708 }
1709 if (search_start == 0)
1710 search_start = last_block;
1711
1712 btrfs_tree_lock(cur);
1713 btrfs_set_lock_blocking(cur);
1714 err = __btrfs_cow_block(trans, root, cur, parent, i,
1715 &cur, search_start,
1716 min(16 * blocksize,
1717 (end_slot - i) * blocksize));
1718 if (err) {
1719 btrfs_tree_unlock(cur);
1720 free_extent_buffer(cur);
1721 break;
1722 }
1723 search_start = cur->start;
1724 last_block = cur->start;
1725 *last_ret = search_start;
1726 btrfs_tree_unlock(cur);
1727 free_extent_buffer(cur);
1728 }
1729 return err;
1730 }
1731
1732 /*
1733 * The leaf data grows from end-to-front in the node.
1734 * this returns the address of the start of the last item,
1735 * which is the stop of the leaf data stack
1736 */
1737 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1738 struct extent_buffer *leaf)
1739 {
1740 u32 nr = btrfs_header_nritems(leaf);
1741 if (nr == 0)
1742 return BTRFS_LEAF_DATA_SIZE(root);
1743 return btrfs_item_offset_nr(leaf, nr - 1);
1744 }
1745
1746
1747 /*
1748 * search for key in the extent_buffer. The items start at offset p,
1749 * and they are item_size apart. There are 'max' items in p.
1750 *
1751 * the slot in the array is returned via slot, and it points to
1752 * the place where you would insert key if it is not found in
1753 * the array.
1754 *
1755 * slot may point to max if the key is bigger than all of the keys
1756 */
1757 static noinline int generic_bin_search(struct extent_buffer *eb,
1758 unsigned long p,
1759 int item_size, struct btrfs_key *key,
1760 int max, int *slot)
1761 {
1762 int low = 0;
1763 int high = max;
1764 int mid;
1765 int ret;
1766 struct btrfs_disk_key *tmp = NULL;
1767 struct btrfs_disk_key unaligned;
1768 unsigned long offset;
1769 char *kaddr = NULL;
1770 unsigned long map_start = 0;
1771 unsigned long map_len = 0;
1772 int err;
1773
1774 while (low < high) {
1775 mid = (low + high) / 2;
1776 offset = p + mid * item_size;
1777
1778 if (!kaddr || offset < map_start ||
1779 (offset + sizeof(struct btrfs_disk_key)) >
1780 map_start + map_len) {
1781
1782 err = map_private_extent_buffer(eb, offset,
1783 sizeof(struct btrfs_disk_key),
1784 &kaddr, &map_start, &map_len);
1785
1786 if (!err) {
1787 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1788 map_start);
1789 } else if (err == 1) {
1790 read_extent_buffer(eb, &unaligned,
1791 offset, sizeof(unaligned));
1792 tmp = &unaligned;
1793 } else {
1794 return err;
1795 }
1796
1797 } else {
1798 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1799 map_start);
1800 }
1801 ret = comp_keys(tmp, key);
1802
1803 if (ret < 0)
1804 low = mid + 1;
1805 else if (ret > 0)
1806 high = mid;
1807 else {
1808 *slot = mid;
1809 return 0;
1810 }
1811 }
1812 *slot = low;
1813 return 1;
1814 }
1815
1816 /*
1817 * simple bin_search frontend that does the right thing for
1818 * leaves vs nodes
1819 */
1820 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1821 int level, int *slot)
1822 {
1823 if (level == 0)
1824 return generic_bin_search(eb,
1825 offsetof(struct btrfs_leaf, items),
1826 sizeof(struct btrfs_item),
1827 key, btrfs_header_nritems(eb),
1828 slot);
1829 else
1830 return generic_bin_search(eb,
1831 offsetof(struct btrfs_node, ptrs),
1832 sizeof(struct btrfs_key_ptr),
1833 key, btrfs_header_nritems(eb),
1834 slot);
1835 }
1836
1837 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1838 int level, int *slot)
1839 {
1840 return bin_search(eb, key, level, slot);
1841 }
1842
1843 static void root_add_used(struct btrfs_root *root, u32 size)
1844 {
1845 spin_lock(&root->accounting_lock);
1846 btrfs_set_root_used(&root->root_item,
1847 btrfs_root_used(&root->root_item) + size);
1848 spin_unlock(&root->accounting_lock);
1849 }
1850
1851 static void root_sub_used(struct btrfs_root *root, u32 size)
1852 {
1853 spin_lock(&root->accounting_lock);
1854 btrfs_set_root_used(&root->root_item,
1855 btrfs_root_used(&root->root_item) - size);
1856 spin_unlock(&root->accounting_lock);
1857 }
1858
1859 /* given a node and slot number, this reads the blocks it points to. The
1860 * extent buffer is returned with a reference taken (but unlocked).
1861 * NULL is returned on error.
1862 */
1863 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1864 struct extent_buffer *parent, int slot)
1865 {
1866 int level = btrfs_header_level(parent);
1867 struct extent_buffer *eb;
1868
1869 if (slot < 0)
1870 return NULL;
1871 if (slot >= btrfs_header_nritems(parent))
1872 return NULL;
1873
1874 BUG_ON(level == 0);
1875
1876 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1877 btrfs_node_ptr_generation(parent, slot));
1878 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1879 if (!IS_ERR(eb))
1880 free_extent_buffer(eb);
1881 eb = NULL;
1882 }
1883
1884 return eb;
1885 }
1886
1887 /*
1888 * node level balancing, used to make sure nodes are in proper order for
1889 * item deletion. We balance from the top down, so we have to make sure
1890 * that a deletion won't leave an node completely empty later on.
1891 */
1892 static noinline int balance_level(struct btrfs_trans_handle *trans,
1893 struct btrfs_root *root,
1894 struct btrfs_path *path, int level)
1895 {
1896 struct extent_buffer *right = NULL;
1897 struct extent_buffer *mid;
1898 struct extent_buffer *left = NULL;
1899 struct extent_buffer *parent = NULL;
1900 int ret = 0;
1901 int wret;
1902 int pslot;
1903 int orig_slot = path->slots[level];
1904 u64 orig_ptr;
1905
1906 if (level == 0)
1907 return 0;
1908
1909 mid = path->nodes[level];
1910
1911 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1912 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1913 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1914
1915 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1916
1917 if (level < BTRFS_MAX_LEVEL - 1) {
1918 parent = path->nodes[level + 1];
1919 pslot = path->slots[level + 1];
1920 }
1921
1922 /*
1923 * deal with the case where there is only one pointer in the root
1924 * by promoting the node below to a root
1925 */
1926 if (!parent) {
1927 struct extent_buffer *child;
1928
1929 if (btrfs_header_nritems(mid) != 1)
1930 return 0;
1931
1932 /* promote the child to a root */
1933 child = read_node_slot(root, mid, 0);
1934 if (!child) {
1935 ret = -EROFS;
1936 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1937 goto enospc;
1938 }
1939
1940 btrfs_tree_lock(child);
1941 btrfs_set_lock_blocking(child);
1942 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1943 if (ret) {
1944 btrfs_tree_unlock(child);
1945 free_extent_buffer(child);
1946 goto enospc;
1947 }
1948
1949 tree_mod_log_set_root_pointer(root, child, 1);
1950 rcu_assign_pointer(root->node, child);
1951
1952 add_root_to_dirty_list(root);
1953 btrfs_tree_unlock(child);
1954
1955 path->locks[level] = 0;
1956 path->nodes[level] = NULL;
1957 clean_tree_block(trans, root->fs_info, mid);
1958 btrfs_tree_unlock(mid);
1959 /* once for the path */
1960 free_extent_buffer(mid);
1961
1962 root_sub_used(root, mid->len);
1963 btrfs_free_tree_block(trans, root, mid, 0, 1);
1964 /* once for the root ptr */
1965 free_extent_buffer_stale(mid);
1966 return 0;
1967 }
1968 if (btrfs_header_nritems(mid) >
1969 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1970 return 0;
1971
1972 left = read_node_slot(root, parent, pslot - 1);
1973 if (left) {
1974 btrfs_tree_lock(left);
1975 btrfs_set_lock_blocking(left);
1976 wret = btrfs_cow_block(trans, root, left,
1977 parent, pslot - 1, &left);
1978 if (wret) {
1979 ret = wret;
1980 goto enospc;
1981 }
1982 }
1983 right = read_node_slot(root, parent, pslot + 1);
1984 if (right) {
1985 btrfs_tree_lock(right);
1986 btrfs_set_lock_blocking(right);
1987 wret = btrfs_cow_block(trans, root, right,
1988 parent, pslot + 1, &right);
1989 if (wret) {
1990 ret = wret;
1991 goto enospc;
1992 }
1993 }
1994
1995 /* first, try to make some room in the middle buffer */
1996 if (left) {
1997 orig_slot += btrfs_header_nritems(left);
1998 wret = push_node_left(trans, root, left, mid, 1);
1999 if (wret < 0)
2000 ret = wret;
2001 }
2002
2003 /*
2004 * then try to empty the right most buffer into the middle
2005 */
2006 if (right) {
2007 wret = push_node_left(trans, root, mid, right, 1);
2008 if (wret < 0 && wret != -ENOSPC)
2009 ret = wret;
2010 if (btrfs_header_nritems(right) == 0) {
2011 clean_tree_block(trans, root->fs_info, right);
2012 btrfs_tree_unlock(right);
2013 del_ptr(root, path, level + 1, pslot + 1);
2014 root_sub_used(root, right->len);
2015 btrfs_free_tree_block(trans, root, right, 0, 1);
2016 free_extent_buffer_stale(right);
2017 right = NULL;
2018 } else {
2019 struct btrfs_disk_key right_key;
2020 btrfs_node_key(right, &right_key, 0);
2021 tree_mod_log_set_node_key(root->fs_info, parent,
2022 pslot + 1, 0);
2023 btrfs_set_node_key(parent, &right_key, pslot + 1);
2024 btrfs_mark_buffer_dirty(parent);
2025 }
2026 }
2027 if (btrfs_header_nritems(mid) == 1) {
2028 /*
2029 * we're not allowed to leave a node with one item in the
2030 * tree during a delete. A deletion from lower in the tree
2031 * could try to delete the only pointer in this node.
2032 * So, pull some keys from the left.
2033 * There has to be a left pointer at this point because
2034 * otherwise we would have pulled some pointers from the
2035 * right
2036 */
2037 if (!left) {
2038 ret = -EROFS;
2039 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2040 goto enospc;
2041 }
2042 wret = balance_node_right(trans, root, mid, left);
2043 if (wret < 0) {
2044 ret = wret;
2045 goto enospc;
2046 }
2047 if (wret == 1) {
2048 wret = push_node_left(trans, root, left, mid, 1);
2049 if (wret < 0)
2050 ret = wret;
2051 }
2052 BUG_ON(wret == 1);
2053 }
2054 if (btrfs_header_nritems(mid) == 0) {
2055 clean_tree_block(trans, root->fs_info, mid);
2056 btrfs_tree_unlock(mid);
2057 del_ptr(root, path, level + 1, pslot);
2058 root_sub_used(root, mid->len);
2059 btrfs_free_tree_block(trans, root, mid, 0, 1);
2060 free_extent_buffer_stale(mid);
2061 mid = NULL;
2062 } else {
2063 /* update the parent key to reflect our changes */
2064 struct btrfs_disk_key mid_key;
2065 btrfs_node_key(mid, &mid_key, 0);
2066 tree_mod_log_set_node_key(root->fs_info, parent,
2067 pslot, 0);
2068 btrfs_set_node_key(parent, &mid_key, pslot);
2069 btrfs_mark_buffer_dirty(parent);
2070 }
2071
2072 /* update the path */
2073 if (left) {
2074 if (btrfs_header_nritems(left) > orig_slot) {
2075 extent_buffer_get(left);
2076 /* left was locked after cow */
2077 path->nodes[level] = left;
2078 path->slots[level + 1] -= 1;
2079 path->slots[level] = orig_slot;
2080 if (mid) {
2081 btrfs_tree_unlock(mid);
2082 free_extent_buffer(mid);
2083 }
2084 } else {
2085 orig_slot -= btrfs_header_nritems(left);
2086 path->slots[level] = orig_slot;
2087 }
2088 }
2089 /* double check we haven't messed things up */
2090 if (orig_ptr !=
2091 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2092 BUG();
2093 enospc:
2094 if (right) {
2095 btrfs_tree_unlock(right);
2096 free_extent_buffer(right);
2097 }
2098 if (left) {
2099 if (path->nodes[level] != left)
2100 btrfs_tree_unlock(left);
2101 free_extent_buffer(left);
2102 }
2103 return ret;
2104 }
2105
2106 /* Node balancing for insertion. Here we only split or push nodes around
2107 * when they are completely full. This is also done top down, so we
2108 * have to be pessimistic.
2109 */
2110 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2111 struct btrfs_root *root,
2112 struct btrfs_path *path, int level)
2113 {
2114 struct extent_buffer *right = NULL;
2115 struct extent_buffer *mid;
2116 struct extent_buffer *left = NULL;
2117 struct extent_buffer *parent = NULL;
2118 int ret = 0;
2119 int wret;
2120 int pslot;
2121 int orig_slot = path->slots[level];
2122
2123 if (level == 0)
2124 return 1;
2125
2126 mid = path->nodes[level];
2127 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2128
2129 if (level < BTRFS_MAX_LEVEL - 1) {
2130 parent = path->nodes[level + 1];
2131 pslot = path->slots[level + 1];
2132 }
2133
2134 if (!parent)
2135 return 1;
2136
2137 left = read_node_slot(root, parent, pslot - 1);
2138
2139 /* first, try to make some room in the middle buffer */
2140 if (left) {
2141 u32 left_nr;
2142
2143 btrfs_tree_lock(left);
2144 btrfs_set_lock_blocking(left);
2145
2146 left_nr = btrfs_header_nritems(left);
2147 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2148 wret = 1;
2149 } else {
2150 ret = btrfs_cow_block(trans, root, left, parent,
2151 pslot - 1, &left);
2152 if (ret)
2153 wret = 1;
2154 else {
2155 wret = push_node_left(trans, root,
2156 left, mid, 0);
2157 }
2158 }
2159 if (wret < 0)
2160 ret = wret;
2161 if (wret == 0) {
2162 struct btrfs_disk_key disk_key;
2163 orig_slot += left_nr;
2164 btrfs_node_key(mid, &disk_key, 0);
2165 tree_mod_log_set_node_key(root->fs_info, parent,
2166 pslot, 0);
2167 btrfs_set_node_key(parent, &disk_key, pslot);
2168 btrfs_mark_buffer_dirty(parent);
2169 if (btrfs_header_nritems(left) > orig_slot) {
2170 path->nodes[level] = left;
2171 path->slots[level + 1] -= 1;
2172 path->slots[level] = orig_slot;
2173 btrfs_tree_unlock(mid);
2174 free_extent_buffer(mid);
2175 } else {
2176 orig_slot -=
2177 btrfs_header_nritems(left);
2178 path->slots[level] = orig_slot;
2179 btrfs_tree_unlock(left);
2180 free_extent_buffer(left);
2181 }
2182 return 0;
2183 }
2184 btrfs_tree_unlock(left);
2185 free_extent_buffer(left);
2186 }
2187 right = read_node_slot(root, parent, pslot + 1);
2188
2189 /*
2190 * then try to empty the right most buffer into the middle
2191 */
2192 if (right) {
2193 u32 right_nr;
2194
2195 btrfs_tree_lock(right);
2196 btrfs_set_lock_blocking(right);
2197
2198 right_nr = btrfs_header_nritems(right);
2199 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2200 wret = 1;
2201 } else {
2202 ret = btrfs_cow_block(trans, root, right,
2203 parent, pslot + 1,
2204 &right);
2205 if (ret)
2206 wret = 1;
2207 else {
2208 wret = balance_node_right(trans, root,
2209 right, mid);
2210 }
2211 }
2212 if (wret < 0)
2213 ret = wret;
2214 if (wret == 0) {
2215 struct btrfs_disk_key disk_key;
2216
2217 btrfs_node_key(right, &disk_key, 0);
2218 tree_mod_log_set_node_key(root->fs_info, parent,
2219 pslot + 1, 0);
2220 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2221 btrfs_mark_buffer_dirty(parent);
2222
2223 if (btrfs_header_nritems(mid) <= orig_slot) {
2224 path->nodes[level] = right;
2225 path->slots[level + 1] += 1;
2226 path->slots[level] = orig_slot -
2227 btrfs_header_nritems(mid);
2228 btrfs_tree_unlock(mid);
2229 free_extent_buffer(mid);
2230 } else {
2231 btrfs_tree_unlock(right);
2232 free_extent_buffer(right);
2233 }
2234 return 0;
2235 }
2236 btrfs_tree_unlock(right);
2237 free_extent_buffer(right);
2238 }
2239 return 1;
2240 }
2241
2242 /*
2243 * readahead one full node of leaves, finding things that are close
2244 * to the block in 'slot', and triggering ra on them.
2245 */
2246 static void reada_for_search(struct btrfs_root *root,
2247 struct btrfs_path *path,
2248 int level, int slot, u64 objectid)
2249 {
2250 struct extent_buffer *node;
2251 struct btrfs_disk_key disk_key;
2252 u32 nritems;
2253 u64 search;
2254 u64 target;
2255 u64 nread = 0;
2256 u64 gen;
2257 struct extent_buffer *eb;
2258 u32 nr;
2259 u32 blocksize;
2260 u32 nscan = 0;
2261
2262 if (level != 1)
2263 return;
2264
2265 if (!path->nodes[level])
2266 return;
2267
2268 node = path->nodes[level];
2269
2270 search = btrfs_node_blockptr(node, slot);
2271 blocksize = root->nodesize;
2272 eb = btrfs_find_tree_block(root->fs_info, search);
2273 if (eb) {
2274 free_extent_buffer(eb);
2275 return;
2276 }
2277
2278 target = search;
2279
2280 nritems = btrfs_header_nritems(node);
2281 nr = slot;
2282
2283 while (1) {
2284 if (path->reada == READA_BACK) {
2285 if (nr == 0)
2286 break;
2287 nr--;
2288 } else if (path->reada == READA_FORWARD) {
2289 nr++;
2290 if (nr >= nritems)
2291 break;
2292 }
2293 if (path->reada == READA_BACK && objectid) {
2294 btrfs_node_key(node, &disk_key, nr);
2295 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2296 break;
2297 }
2298 search = btrfs_node_blockptr(node, nr);
2299 if ((search <= target && target - search <= 65536) ||
2300 (search > target && search - target <= 65536)) {
2301 gen = btrfs_node_ptr_generation(node, nr);
2302 readahead_tree_block(root, search);
2303 nread += blocksize;
2304 }
2305 nscan++;
2306 if ((nread > 65536 || nscan > 32))
2307 break;
2308 }
2309 }
2310
2311 static noinline void reada_for_balance(struct btrfs_root *root,
2312 struct btrfs_path *path, int level)
2313 {
2314 int slot;
2315 int nritems;
2316 struct extent_buffer *parent;
2317 struct extent_buffer *eb;
2318 u64 gen;
2319 u64 block1 = 0;
2320 u64 block2 = 0;
2321
2322 parent = path->nodes[level + 1];
2323 if (!parent)
2324 return;
2325
2326 nritems = btrfs_header_nritems(parent);
2327 slot = path->slots[level + 1];
2328
2329 if (slot > 0) {
2330 block1 = btrfs_node_blockptr(parent, slot - 1);
2331 gen = btrfs_node_ptr_generation(parent, slot - 1);
2332 eb = btrfs_find_tree_block(root->fs_info, block1);
2333 /*
2334 * if we get -eagain from btrfs_buffer_uptodate, we
2335 * don't want to return eagain here. That will loop
2336 * forever
2337 */
2338 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2339 block1 = 0;
2340 free_extent_buffer(eb);
2341 }
2342 if (slot + 1 < nritems) {
2343 block2 = btrfs_node_blockptr(parent, slot + 1);
2344 gen = btrfs_node_ptr_generation(parent, slot + 1);
2345 eb = btrfs_find_tree_block(root->fs_info, block2);
2346 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2347 block2 = 0;
2348 free_extent_buffer(eb);
2349 }
2350
2351 if (block1)
2352 readahead_tree_block(root, block1);
2353 if (block2)
2354 readahead_tree_block(root, block2);
2355 }
2356
2357
2358 /*
2359 * when we walk down the tree, it is usually safe to unlock the higher layers
2360 * in the tree. The exceptions are when our path goes through slot 0, because
2361 * operations on the tree might require changing key pointers higher up in the
2362 * tree.
2363 *
2364 * callers might also have set path->keep_locks, which tells this code to keep
2365 * the lock if the path points to the last slot in the block. This is part of
2366 * walking through the tree, and selecting the next slot in the higher block.
2367 *
2368 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2369 * if lowest_unlock is 1, level 0 won't be unlocked
2370 */
2371 static noinline void unlock_up(struct btrfs_path *path, int level,
2372 int lowest_unlock, int min_write_lock_level,
2373 int *write_lock_level)
2374 {
2375 int i;
2376 int skip_level = level;
2377 int no_skips = 0;
2378 struct extent_buffer *t;
2379
2380 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2381 if (!path->nodes[i])
2382 break;
2383 if (!path->locks[i])
2384 break;
2385 if (!no_skips && path->slots[i] == 0) {
2386 skip_level = i + 1;
2387 continue;
2388 }
2389 if (!no_skips && path->keep_locks) {
2390 u32 nritems;
2391 t = path->nodes[i];
2392 nritems = btrfs_header_nritems(t);
2393 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2394 skip_level = i + 1;
2395 continue;
2396 }
2397 }
2398 if (skip_level < i && i >= lowest_unlock)
2399 no_skips = 1;
2400
2401 t = path->nodes[i];
2402 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2403 btrfs_tree_unlock_rw(t, path->locks[i]);
2404 path->locks[i] = 0;
2405 if (write_lock_level &&
2406 i > min_write_lock_level &&
2407 i <= *write_lock_level) {
2408 *write_lock_level = i - 1;
2409 }
2410 }
2411 }
2412 }
2413
2414 /*
2415 * This releases any locks held in the path starting at level and
2416 * going all the way up to the root.
2417 *
2418 * btrfs_search_slot will keep the lock held on higher nodes in a few
2419 * corner cases, such as COW of the block at slot zero in the node. This
2420 * ignores those rules, and it should only be called when there are no
2421 * more updates to be done higher up in the tree.
2422 */
2423 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2424 {
2425 int i;
2426
2427 if (path->keep_locks)
2428 return;
2429
2430 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2431 if (!path->nodes[i])
2432 continue;
2433 if (!path->locks[i])
2434 continue;
2435 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2436 path->locks[i] = 0;
2437 }
2438 }
2439
2440 /*
2441 * helper function for btrfs_search_slot. The goal is to find a block
2442 * in cache without setting the path to blocking. If we find the block
2443 * we return zero and the path is unchanged.
2444 *
2445 * If we can't find the block, we set the path blocking and do some
2446 * reada. -EAGAIN is returned and the search must be repeated.
2447 */
2448 static int
2449 read_block_for_search(struct btrfs_trans_handle *trans,
2450 struct btrfs_root *root, struct btrfs_path *p,
2451 struct extent_buffer **eb_ret, int level, int slot,
2452 struct btrfs_key *key, u64 time_seq)
2453 {
2454 u64 blocknr;
2455 u64 gen;
2456 struct extent_buffer *b = *eb_ret;
2457 struct extent_buffer *tmp;
2458 int ret;
2459
2460 blocknr = btrfs_node_blockptr(b, slot);
2461 gen = btrfs_node_ptr_generation(b, slot);
2462
2463 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2464 if (tmp) {
2465 /* first we do an atomic uptodate check */
2466 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2467 *eb_ret = tmp;
2468 return 0;
2469 }
2470
2471 /* the pages were up to date, but we failed
2472 * the generation number check. Do a full
2473 * read for the generation number that is correct.
2474 * We must do this without dropping locks so
2475 * we can trust our generation number
2476 */
2477 btrfs_set_path_blocking(p);
2478
2479 /* now we're allowed to do a blocking uptodate check */
2480 ret = btrfs_read_buffer(tmp, gen);
2481 if (!ret) {
2482 *eb_ret = tmp;
2483 return 0;
2484 }
2485 free_extent_buffer(tmp);
2486 btrfs_release_path(p);
2487 return -EIO;
2488 }
2489
2490 /*
2491 * reduce lock contention at high levels
2492 * of the btree by dropping locks before
2493 * we read. Don't release the lock on the current
2494 * level because we need to walk this node to figure
2495 * out which blocks to read.
2496 */
2497 btrfs_unlock_up_safe(p, level + 1);
2498 btrfs_set_path_blocking(p);
2499
2500 free_extent_buffer(tmp);
2501 if (p->reada != READA_NONE)
2502 reada_for_search(root, p, level, slot, key->objectid);
2503
2504 btrfs_release_path(p);
2505
2506 ret = -EAGAIN;
2507 tmp = read_tree_block(root, blocknr, 0);
2508 if (!IS_ERR(tmp)) {
2509 /*
2510 * If the read above didn't mark this buffer up to date,
2511 * it will never end up being up to date. Set ret to EIO now
2512 * and give up so that our caller doesn't loop forever
2513 * on our EAGAINs.
2514 */
2515 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2516 ret = -EIO;
2517 free_extent_buffer(tmp);
2518 } else {
2519 ret = PTR_ERR(tmp);
2520 }
2521 return ret;
2522 }
2523
2524 /*
2525 * helper function for btrfs_search_slot. This does all of the checks
2526 * for node-level blocks and does any balancing required based on
2527 * the ins_len.
2528 *
2529 * If no extra work was required, zero is returned. If we had to
2530 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2531 * start over
2532 */
2533 static int
2534 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2535 struct btrfs_root *root, struct btrfs_path *p,
2536 struct extent_buffer *b, int level, int ins_len,
2537 int *write_lock_level)
2538 {
2539 int ret;
2540 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2541 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2542 int sret;
2543
2544 if (*write_lock_level < level + 1) {
2545 *write_lock_level = level + 1;
2546 btrfs_release_path(p);
2547 goto again;
2548 }
2549
2550 btrfs_set_path_blocking(p);
2551 reada_for_balance(root, p, level);
2552 sret = split_node(trans, root, p, level);
2553 btrfs_clear_path_blocking(p, NULL, 0);
2554
2555 BUG_ON(sret > 0);
2556 if (sret) {
2557 ret = sret;
2558 goto done;
2559 }
2560 b = p->nodes[level];
2561 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2562 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2563 int sret;
2564
2565 if (*write_lock_level < level + 1) {
2566 *write_lock_level = level + 1;
2567 btrfs_release_path(p);
2568 goto again;
2569 }
2570
2571 btrfs_set_path_blocking(p);
2572 reada_for_balance(root, p, level);
2573 sret = balance_level(trans, root, p, level);
2574 btrfs_clear_path_blocking(p, NULL, 0);
2575
2576 if (sret) {
2577 ret = sret;
2578 goto done;
2579 }
2580 b = p->nodes[level];
2581 if (!b) {
2582 btrfs_release_path(p);
2583 goto again;
2584 }
2585 BUG_ON(btrfs_header_nritems(b) == 1);
2586 }
2587 return 0;
2588
2589 again:
2590 ret = -EAGAIN;
2591 done:
2592 return ret;
2593 }
2594
2595 static void key_search_validate(struct extent_buffer *b,
2596 struct btrfs_key *key,
2597 int level)
2598 {
2599 #ifdef CONFIG_BTRFS_ASSERT
2600 struct btrfs_disk_key disk_key;
2601
2602 btrfs_cpu_key_to_disk(&disk_key, key);
2603
2604 if (level == 0)
2605 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2606 offsetof(struct btrfs_leaf, items[0].key),
2607 sizeof(disk_key)));
2608 else
2609 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2610 offsetof(struct btrfs_node, ptrs[0].key),
2611 sizeof(disk_key)));
2612 #endif
2613 }
2614
2615 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2616 int level, int *prev_cmp, int *slot)
2617 {
2618 if (*prev_cmp != 0) {
2619 *prev_cmp = bin_search(b, key, level, slot);
2620 return *prev_cmp;
2621 }
2622
2623 key_search_validate(b, key, level);
2624 *slot = 0;
2625
2626 return 0;
2627 }
2628
2629 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2630 u64 iobjectid, u64 ioff, u8 key_type,
2631 struct btrfs_key *found_key)
2632 {
2633 int ret;
2634 struct btrfs_key key;
2635 struct extent_buffer *eb;
2636
2637 ASSERT(path);
2638 ASSERT(found_key);
2639
2640 key.type = key_type;
2641 key.objectid = iobjectid;
2642 key.offset = ioff;
2643
2644 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2645 if (ret < 0)
2646 return ret;
2647
2648 eb = path->nodes[0];
2649 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2650 ret = btrfs_next_leaf(fs_root, path);
2651 if (ret)
2652 return ret;
2653 eb = path->nodes[0];
2654 }
2655
2656 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2657 if (found_key->type != key.type ||
2658 found_key->objectid != key.objectid)
2659 return 1;
2660
2661 return 0;
2662 }
2663
2664 /*
2665 * look for key in the tree. path is filled in with nodes along the way
2666 * if key is found, we return zero and you can find the item in the leaf
2667 * level of the path (level 0)
2668 *
2669 * If the key isn't found, the path points to the slot where it should
2670 * be inserted, and 1 is returned. If there are other errors during the
2671 * search a negative error number is returned.
2672 *
2673 * if ins_len > 0, nodes and leaves will be split as we walk down the
2674 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2675 * possible)
2676 */
2677 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2678 *root, struct btrfs_key *key, struct btrfs_path *p, int
2679 ins_len, int cow)
2680 {
2681 struct extent_buffer *b;
2682 int slot;
2683 int ret;
2684 int err;
2685 int level;
2686 int lowest_unlock = 1;
2687 int root_lock;
2688 /* everything at write_lock_level or lower must be write locked */
2689 int write_lock_level = 0;
2690 u8 lowest_level = 0;
2691 int min_write_lock_level;
2692 int prev_cmp;
2693
2694 lowest_level = p->lowest_level;
2695 WARN_ON(lowest_level && ins_len > 0);
2696 WARN_ON(p->nodes[0] != NULL);
2697 BUG_ON(!cow && ins_len);
2698
2699 if (ins_len < 0) {
2700 lowest_unlock = 2;
2701
2702 /* when we are removing items, we might have to go up to level
2703 * two as we update tree pointers Make sure we keep write
2704 * for those levels as well
2705 */
2706 write_lock_level = 2;
2707 } else if (ins_len > 0) {
2708 /*
2709 * for inserting items, make sure we have a write lock on
2710 * level 1 so we can update keys
2711 */
2712 write_lock_level = 1;
2713 }
2714
2715 if (!cow)
2716 write_lock_level = -1;
2717
2718 if (cow && (p->keep_locks || p->lowest_level))
2719 write_lock_level = BTRFS_MAX_LEVEL;
2720
2721 min_write_lock_level = write_lock_level;
2722
2723 again:
2724 prev_cmp = -1;
2725 /*
2726 * we try very hard to do read locks on the root
2727 */
2728 root_lock = BTRFS_READ_LOCK;
2729 level = 0;
2730 if (p->search_commit_root) {
2731 /*
2732 * the commit roots are read only
2733 * so we always do read locks
2734 */
2735 if (p->need_commit_sem)
2736 down_read(&root->fs_info->commit_root_sem);
2737 b = root->commit_root;
2738 extent_buffer_get(b);
2739 level = btrfs_header_level(b);
2740 if (p->need_commit_sem)
2741 up_read(&root->fs_info->commit_root_sem);
2742 if (!p->skip_locking)
2743 btrfs_tree_read_lock(b);
2744 } else {
2745 if (p->skip_locking) {
2746 b = btrfs_root_node(root);
2747 level = btrfs_header_level(b);
2748 } else {
2749 /* we don't know the level of the root node
2750 * until we actually have it read locked
2751 */
2752 b = btrfs_read_lock_root_node(root);
2753 level = btrfs_header_level(b);
2754 if (level <= write_lock_level) {
2755 /* whoops, must trade for write lock */
2756 btrfs_tree_read_unlock(b);
2757 free_extent_buffer(b);
2758 b = btrfs_lock_root_node(root);
2759 root_lock = BTRFS_WRITE_LOCK;
2760
2761 /* the level might have changed, check again */
2762 level = btrfs_header_level(b);
2763 }
2764 }
2765 }
2766 p->nodes[level] = b;
2767 if (!p->skip_locking)
2768 p->locks[level] = root_lock;
2769
2770 while (b) {
2771 level = btrfs_header_level(b);
2772
2773 /*
2774 * setup the path here so we can release it under lock
2775 * contention with the cow code
2776 */
2777 if (cow) {
2778 /*
2779 * if we don't really need to cow this block
2780 * then we don't want to set the path blocking,
2781 * so we test it here
2782 */
2783 if (!should_cow_block(trans, root, b)) {
2784 trans->dirty = true;
2785 goto cow_done;
2786 }
2787
2788 /*
2789 * must have write locks on this node and the
2790 * parent
2791 */
2792 if (level > write_lock_level ||
2793 (level + 1 > write_lock_level &&
2794 level + 1 < BTRFS_MAX_LEVEL &&
2795 p->nodes[level + 1])) {
2796 write_lock_level = level + 1;
2797 btrfs_release_path(p);
2798 goto again;
2799 }
2800
2801 btrfs_set_path_blocking(p);
2802 err = btrfs_cow_block(trans, root, b,
2803 p->nodes[level + 1],
2804 p->slots[level + 1], &b);
2805 if (err) {
2806 ret = err;
2807 goto done;
2808 }
2809 }
2810 cow_done:
2811 p->nodes[level] = b;
2812 btrfs_clear_path_blocking(p, NULL, 0);
2813
2814 /*
2815 * we have a lock on b and as long as we aren't changing
2816 * the tree, there is no way to for the items in b to change.
2817 * It is safe to drop the lock on our parent before we
2818 * go through the expensive btree search on b.
2819 *
2820 * If we're inserting or deleting (ins_len != 0), then we might
2821 * be changing slot zero, which may require changing the parent.
2822 * So, we can't drop the lock until after we know which slot
2823 * we're operating on.
2824 */
2825 if (!ins_len && !p->keep_locks) {
2826 int u = level + 1;
2827
2828 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2829 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2830 p->locks[u] = 0;
2831 }
2832 }
2833
2834 ret = key_search(b, key, level, &prev_cmp, &slot);
2835 if (ret < 0)
2836 goto done;
2837
2838 if (level != 0) {
2839 int dec = 0;
2840 if (ret && slot > 0) {
2841 dec = 1;
2842 slot -= 1;
2843 }
2844 p->slots[level] = slot;
2845 err = setup_nodes_for_search(trans, root, p, b, level,
2846 ins_len, &write_lock_level);
2847 if (err == -EAGAIN)
2848 goto again;
2849 if (err) {
2850 ret = err;
2851 goto done;
2852 }
2853 b = p->nodes[level];
2854 slot = p->slots[level];
2855
2856 /*
2857 * slot 0 is special, if we change the key
2858 * we have to update the parent pointer
2859 * which means we must have a write lock
2860 * on the parent
2861 */
2862 if (slot == 0 && ins_len &&
2863 write_lock_level < level + 1) {
2864 write_lock_level = level + 1;
2865 btrfs_release_path(p);
2866 goto again;
2867 }
2868
2869 unlock_up(p, level, lowest_unlock,
2870 min_write_lock_level, &write_lock_level);
2871
2872 if (level == lowest_level) {
2873 if (dec)
2874 p->slots[level]++;
2875 goto done;
2876 }
2877
2878 err = read_block_for_search(trans, root, p,
2879 &b, level, slot, key, 0);
2880 if (err == -EAGAIN)
2881 goto again;
2882 if (err) {
2883 ret = err;
2884 goto done;
2885 }
2886
2887 if (!p->skip_locking) {
2888 level = btrfs_header_level(b);
2889 if (level <= write_lock_level) {
2890 err = btrfs_try_tree_write_lock(b);
2891 if (!err) {
2892 btrfs_set_path_blocking(p);
2893 btrfs_tree_lock(b);
2894 btrfs_clear_path_blocking(p, b,
2895 BTRFS_WRITE_LOCK);
2896 }
2897 p->locks[level] = BTRFS_WRITE_LOCK;
2898 } else {
2899 err = btrfs_tree_read_lock_atomic(b);
2900 if (!err) {
2901 btrfs_set_path_blocking(p);
2902 btrfs_tree_read_lock(b);
2903 btrfs_clear_path_blocking(p, b,
2904 BTRFS_READ_LOCK);
2905 }
2906 p->locks[level] = BTRFS_READ_LOCK;
2907 }
2908 p->nodes[level] = b;
2909 }
2910 } else {
2911 p->slots[level] = slot;
2912 if (ins_len > 0 &&
2913 btrfs_leaf_free_space(root, b) < ins_len) {
2914 if (write_lock_level < 1) {
2915 write_lock_level = 1;
2916 btrfs_release_path(p);
2917 goto again;
2918 }
2919
2920 btrfs_set_path_blocking(p);
2921 err = split_leaf(trans, root, key,
2922 p, ins_len, ret == 0);
2923 btrfs_clear_path_blocking(p, NULL, 0);
2924
2925 BUG_ON(err > 0);
2926 if (err) {
2927 ret = err;
2928 goto done;
2929 }
2930 }
2931 if (!p->search_for_split)
2932 unlock_up(p, level, lowest_unlock,
2933 min_write_lock_level, &write_lock_level);
2934 goto done;
2935 }
2936 }
2937 ret = 1;
2938 done:
2939 /*
2940 * we don't really know what they plan on doing with the path
2941 * from here on, so for now just mark it as blocking
2942 */
2943 if (!p->leave_spinning)
2944 btrfs_set_path_blocking(p);
2945 if (ret < 0 && !p->skip_release_on_error)
2946 btrfs_release_path(p);
2947 return ret;
2948 }
2949
2950 /*
2951 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2952 * current state of the tree together with the operations recorded in the tree
2953 * modification log to search for the key in a previous version of this tree, as
2954 * denoted by the time_seq parameter.
2955 *
2956 * Naturally, there is no support for insert, delete or cow operations.
2957 *
2958 * The resulting path and return value will be set up as if we called
2959 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2960 */
2961 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2962 struct btrfs_path *p, u64 time_seq)
2963 {
2964 struct extent_buffer *b;
2965 int slot;
2966 int ret;
2967 int err;
2968 int level;
2969 int lowest_unlock = 1;
2970 u8 lowest_level = 0;
2971 int prev_cmp = -1;
2972
2973 lowest_level = p->lowest_level;
2974 WARN_ON(p->nodes[0] != NULL);
2975
2976 if (p->search_commit_root) {
2977 BUG_ON(time_seq);
2978 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2979 }
2980
2981 again:
2982 b = get_old_root(root, time_seq);
2983 level = btrfs_header_level(b);
2984 p->locks[level] = BTRFS_READ_LOCK;
2985
2986 while (b) {
2987 level = btrfs_header_level(b);
2988 p->nodes[level] = b;
2989 btrfs_clear_path_blocking(p, NULL, 0);
2990
2991 /*
2992 * we have a lock on b and as long as we aren't changing
2993 * the tree, there is no way to for the items in b to change.
2994 * It is safe to drop the lock on our parent before we
2995 * go through the expensive btree search on b.
2996 */
2997 btrfs_unlock_up_safe(p, level + 1);
2998
2999 /*
3000 * Since we can unwind ebs we want to do a real search every
3001 * time.
3002 */
3003 prev_cmp = -1;
3004 ret = key_search(b, key, level, &prev_cmp, &slot);
3005
3006 if (level != 0) {
3007 int dec = 0;
3008 if (ret && slot > 0) {
3009 dec = 1;
3010 slot -= 1;
3011 }
3012 p->slots[level] = slot;
3013 unlock_up(p, level, lowest_unlock, 0, NULL);
3014
3015 if (level == lowest_level) {
3016 if (dec)
3017 p->slots[level]++;
3018 goto done;
3019 }
3020
3021 err = read_block_for_search(NULL, root, p, &b, level,
3022 slot, key, time_seq);
3023 if (err == -EAGAIN)
3024 goto again;
3025 if (err) {
3026 ret = err;
3027 goto done;
3028 }
3029
3030 level = btrfs_header_level(b);
3031 err = btrfs_tree_read_lock_atomic(b);
3032 if (!err) {
3033 btrfs_set_path_blocking(p);
3034 btrfs_tree_read_lock(b);
3035 btrfs_clear_path_blocking(p, b,
3036 BTRFS_READ_LOCK);
3037 }
3038 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3039 if (!b) {
3040 ret = -ENOMEM;
3041 goto done;
3042 }
3043 p->locks[level] = BTRFS_READ_LOCK;
3044 p->nodes[level] = b;
3045 } else {
3046 p->slots[level] = slot;
3047 unlock_up(p, level, lowest_unlock, 0, NULL);
3048 goto done;
3049 }
3050 }
3051 ret = 1;
3052 done:
3053 if (!p->leave_spinning)
3054 btrfs_set_path_blocking(p);
3055 if (ret < 0)
3056 btrfs_release_path(p);
3057
3058 return ret;
3059 }
3060
3061 /*
3062 * helper to use instead of search slot if no exact match is needed but
3063 * instead the next or previous item should be returned.
3064 * When find_higher is true, the next higher item is returned, the next lower
3065 * otherwise.
3066 * When return_any and find_higher are both true, and no higher item is found,
3067 * return the next lower instead.
3068 * When return_any is true and find_higher is false, and no lower item is found,
3069 * return the next higher instead.
3070 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3071 * < 0 on error
3072 */
3073 int btrfs_search_slot_for_read(struct btrfs_root *root,
3074 struct btrfs_key *key, struct btrfs_path *p,
3075 int find_higher, int return_any)
3076 {
3077 int ret;
3078 struct extent_buffer *leaf;
3079
3080 again:
3081 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3082 if (ret <= 0)
3083 return ret;
3084 /*
3085 * a return value of 1 means the path is at the position where the
3086 * item should be inserted. Normally this is the next bigger item,
3087 * but in case the previous item is the last in a leaf, path points
3088 * to the first free slot in the previous leaf, i.e. at an invalid
3089 * item.
3090 */
3091 leaf = p->nodes[0];
3092
3093 if (find_higher) {
3094 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3095 ret = btrfs_next_leaf(root, p);
3096 if (ret <= 0)
3097 return ret;
3098 if (!return_any)
3099 return 1;
3100 /*
3101 * no higher item found, return the next
3102 * lower instead
3103 */
3104 return_any = 0;
3105 find_higher = 0;
3106 btrfs_release_path(p);
3107 goto again;
3108 }
3109 } else {
3110 if (p->slots[0] == 0) {
3111 ret = btrfs_prev_leaf(root, p);
3112 if (ret < 0)
3113 return ret;
3114 if (!ret) {
3115 leaf = p->nodes[0];
3116 if (p->slots[0] == btrfs_header_nritems(leaf))
3117 p->slots[0]--;
3118 return 0;
3119 }
3120 if (!return_any)
3121 return 1;
3122 /*
3123 * no lower item found, return the next
3124 * higher instead
3125 */
3126 return_any = 0;
3127 find_higher = 1;
3128 btrfs_release_path(p);
3129 goto again;
3130 } else {
3131 --p->slots[0];
3132 }
3133 }
3134 return 0;
3135 }
3136
3137 /*
3138 * adjust the pointers going up the tree, starting at level
3139 * making sure the right key of each node is points to 'key'.
3140 * This is used after shifting pointers to the left, so it stops
3141 * fixing up pointers when a given leaf/node is not in slot 0 of the
3142 * higher levels
3143 *
3144 */
3145 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3146 struct btrfs_path *path,
3147 struct btrfs_disk_key *key, int level)
3148 {
3149 int i;
3150 struct extent_buffer *t;
3151
3152 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3153 int tslot = path->slots[i];
3154 if (!path->nodes[i])
3155 break;
3156 t = path->nodes[i];
3157 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3158 btrfs_set_node_key(t, key, tslot);
3159 btrfs_mark_buffer_dirty(path->nodes[i]);
3160 if (tslot != 0)
3161 break;
3162 }
3163 }
3164
3165 /*
3166 * update item key.
3167 *
3168 * This function isn't completely safe. It's the caller's responsibility
3169 * that the new key won't break the order
3170 */
3171 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3172 struct btrfs_path *path,
3173 struct btrfs_key *new_key)
3174 {
3175 struct btrfs_disk_key disk_key;
3176 struct extent_buffer *eb;
3177 int slot;
3178
3179 eb = path->nodes[0];
3180 slot = path->slots[0];
3181 if (slot > 0) {
3182 btrfs_item_key(eb, &disk_key, slot - 1);
3183 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3184 }
3185 if (slot < btrfs_header_nritems(eb) - 1) {
3186 btrfs_item_key(eb, &disk_key, slot + 1);
3187 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3188 }
3189
3190 btrfs_cpu_key_to_disk(&disk_key, new_key);
3191 btrfs_set_item_key(eb, &disk_key, slot);
3192 btrfs_mark_buffer_dirty(eb);
3193 if (slot == 0)
3194 fixup_low_keys(fs_info, path, &disk_key, 1);
3195 }
3196
3197 /*
3198 * try to push data from one node into the next node left in the
3199 * tree.
3200 *
3201 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3202 * error, and > 0 if there was no room in the left hand block.
3203 */
3204 static int push_node_left(struct btrfs_trans_handle *trans,
3205 struct btrfs_root *root, struct extent_buffer *dst,
3206 struct extent_buffer *src, int empty)
3207 {
3208 int push_items = 0;
3209 int src_nritems;
3210 int dst_nritems;
3211 int ret = 0;
3212
3213 src_nritems = btrfs_header_nritems(src);
3214 dst_nritems = btrfs_header_nritems(dst);
3215 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3216 WARN_ON(btrfs_header_generation(src) != trans->transid);
3217 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3218
3219 if (!empty && src_nritems <= 8)
3220 return 1;
3221
3222 if (push_items <= 0)
3223 return 1;
3224
3225 if (empty) {
3226 push_items = min(src_nritems, push_items);
3227 if (push_items < src_nritems) {
3228 /* leave at least 8 pointers in the node if
3229 * we aren't going to empty it
3230 */
3231 if (src_nritems - push_items < 8) {
3232 if (push_items <= 8)
3233 return 1;
3234 push_items -= 8;
3235 }
3236 }
3237 } else
3238 push_items = min(src_nritems - 8, push_items);
3239
3240 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3241 push_items);
3242 if (ret) {
3243 btrfs_abort_transaction(trans, root, ret);
3244 return ret;
3245 }
3246 copy_extent_buffer(dst, src,
3247 btrfs_node_key_ptr_offset(dst_nritems),
3248 btrfs_node_key_ptr_offset(0),
3249 push_items * sizeof(struct btrfs_key_ptr));
3250
3251 if (push_items < src_nritems) {
3252 /*
3253 * don't call tree_mod_log_eb_move here, key removal was already
3254 * fully logged by tree_mod_log_eb_copy above.
3255 */
3256 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3257 btrfs_node_key_ptr_offset(push_items),
3258 (src_nritems - push_items) *
3259 sizeof(struct btrfs_key_ptr));
3260 }
3261 btrfs_set_header_nritems(src, src_nritems - push_items);
3262 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3263 btrfs_mark_buffer_dirty(src);
3264 btrfs_mark_buffer_dirty(dst);
3265
3266 return ret;
3267 }
3268
3269 /*
3270 * try to push data from one node into the next node right in the
3271 * tree.
3272 *
3273 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3274 * error, and > 0 if there was no room in the right hand block.
3275 *
3276 * this will only push up to 1/2 the contents of the left node over
3277 */
3278 static int balance_node_right(struct btrfs_trans_handle *trans,
3279 struct btrfs_root *root,
3280 struct extent_buffer *dst,
3281 struct extent_buffer *src)
3282 {
3283 int push_items = 0;
3284 int max_push;
3285 int src_nritems;
3286 int dst_nritems;
3287 int ret = 0;
3288
3289 WARN_ON(btrfs_header_generation(src) != trans->transid);
3290 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3291
3292 src_nritems = btrfs_header_nritems(src);
3293 dst_nritems = btrfs_header_nritems(dst);
3294 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3295 if (push_items <= 0)
3296 return 1;
3297
3298 if (src_nritems < 4)
3299 return 1;
3300
3301 max_push = src_nritems / 2 + 1;
3302 /* don't try to empty the node */
3303 if (max_push >= src_nritems)
3304 return 1;
3305
3306 if (max_push < push_items)
3307 push_items = max_push;
3308
3309 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3310 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3311 btrfs_node_key_ptr_offset(0),
3312 (dst_nritems) *
3313 sizeof(struct btrfs_key_ptr));
3314
3315 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3316 src_nritems - push_items, push_items);
3317 if (ret) {
3318 btrfs_abort_transaction(trans, root, ret);
3319 return ret;
3320 }
3321 copy_extent_buffer(dst, src,
3322 btrfs_node_key_ptr_offset(0),
3323 btrfs_node_key_ptr_offset(src_nritems - push_items),
3324 push_items * sizeof(struct btrfs_key_ptr));
3325
3326 btrfs_set_header_nritems(src, src_nritems - push_items);
3327 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3328
3329 btrfs_mark_buffer_dirty(src);
3330 btrfs_mark_buffer_dirty(dst);
3331
3332 return ret;
3333 }
3334
3335 /*
3336 * helper function to insert a new root level in the tree.
3337 * A new node is allocated, and a single item is inserted to
3338 * point to the existing root
3339 *
3340 * returns zero on success or < 0 on failure.
3341 */
3342 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3343 struct btrfs_root *root,
3344 struct btrfs_path *path, int level)
3345 {
3346 u64 lower_gen;
3347 struct extent_buffer *lower;
3348 struct extent_buffer *c;
3349 struct extent_buffer *old;
3350 struct btrfs_disk_key lower_key;
3351
3352 BUG_ON(path->nodes[level]);
3353 BUG_ON(path->nodes[level-1] != root->node);
3354
3355 lower = path->nodes[level-1];
3356 if (level == 1)
3357 btrfs_item_key(lower, &lower_key, 0);
3358 else
3359 btrfs_node_key(lower, &lower_key, 0);
3360
3361 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3362 &lower_key, level, root->node->start, 0);
3363 if (IS_ERR(c))
3364 return PTR_ERR(c);
3365
3366 root_add_used(root, root->nodesize);
3367
3368 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3369 btrfs_set_header_nritems(c, 1);
3370 btrfs_set_header_level(c, level);
3371 btrfs_set_header_bytenr(c, c->start);
3372 btrfs_set_header_generation(c, trans->transid);
3373 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3374 btrfs_set_header_owner(c, root->root_key.objectid);
3375
3376 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3377 BTRFS_FSID_SIZE);
3378
3379 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3380 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3381
3382 btrfs_set_node_key(c, &lower_key, 0);
3383 btrfs_set_node_blockptr(c, 0, lower->start);
3384 lower_gen = btrfs_header_generation(lower);
3385 WARN_ON(lower_gen != trans->transid);
3386
3387 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3388
3389 btrfs_mark_buffer_dirty(c);
3390
3391 old = root->node;
3392 tree_mod_log_set_root_pointer(root, c, 0);
3393 rcu_assign_pointer(root->node, c);
3394
3395 /* the super has an extra ref to root->node */
3396 free_extent_buffer(old);
3397
3398 add_root_to_dirty_list(root);
3399 extent_buffer_get(c);
3400 path->nodes[level] = c;
3401 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3402 path->slots[level] = 0;
3403 return 0;
3404 }
3405
3406 /*
3407 * worker function to insert a single pointer in a node.
3408 * the node should have enough room for the pointer already
3409 *
3410 * slot and level indicate where you want the key to go, and
3411 * blocknr is the block the key points to.
3412 */
3413 static void insert_ptr(struct btrfs_trans_handle *trans,
3414 struct btrfs_root *root, struct btrfs_path *path,
3415 struct btrfs_disk_key *key, u64 bytenr,
3416 int slot, int level)
3417 {
3418 struct extent_buffer *lower;
3419 int nritems;
3420 int ret;
3421
3422 BUG_ON(!path->nodes[level]);
3423 btrfs_assert_tree_locked(path->nodes[level]);
3424 lower = path->nodes[level];
3425 nritems = btrfs_header_nritems(lower);
3426 BUG_ON(slot > nritems);
3427 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3428 if (slot != nritems) {
3429 if (level)
3430 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3431 slot, nritems - slot);
3432 memmove_extent_buffer(lower,
3433 btrfs_node_key_ptr_offset(slot + 1),
3434 btrfs_node_key_ptr_offset(slot),
3435 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3436 }
3437 if (level) {
3438 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3439 MOD_LOG_KEY_ADD, GFP_NOFS);
3440 BUG_ON(ret < 0);
3441 }
3442 btrfs_set_node_key(lower, key, slot);
3443 btrfs_set_node_blockptr(lower, slot, bytenr);
3444 WARN_ON(trans->transid == 0);
3445 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3446 btrfs_set_header_nritems(lower, nritems + 1);
3447 btrfs_mark_buffer_dirty(lower);
3448 }
3449
3450 /*
3451 * split the node at the specified level in path in two.
3452 * The path is corrected to point to the appropriate node after the split
3453 *
3454 * Before splitting this tries to make some room in the node by pushing
3455 * left and right, if either one works, it returns right away.
3456 *
3457 * returns 0 on success and < 0 on failure
3458 */
3459 static noinline int split_node(struct btrfs_trans_handle *trans,
3460 struct btrfs_root *root,
3461 struct btrfs_path *path, int level)
3462 {
3463 struct extent_buffer *c;
3464 struct extent_buffer *split;
3465 struct btrfs_disk_key disk_key;
3466 int mid;
3467 int ret;
3468 u32 c_nritems;
3469
3470 c = path->nodes[level];
3471 WARN_ON(btrfs_header_generation(c) != trans->transid);
3472 if (c == root->node) {
3473 /*
3474 * trying to split the root, lets make a new one
3475 *
3476 * tree mod log: We don't log_removal old root in
3477 * insert_new_root, because that root buffer will be kept as a
3478 * normal node. We are going to log removal of half of the
3479 * elements below with tree_mod_log_eb_copy. We're holding a
3480 * tree lock on the buffer, which is why we cannot race with
3481 * other tree_mod_log users.
3482 */
3483 ret = insert_new_root(trans, root, path, level + 1);
3484 if (ret)
3485 return ret;
3486 } else {
3487 ret = push_nodes_for_insert(trans, root, path, level);
3488 c = path->nodes[level];
3489 if (!ret && btrfs_header_nritems(c) <
3490 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3491 return 0;
3492 if (ret < 0)
3493 return ret;
3494 }
3495
3496 c_nritems = btrfs_header_nritems(c);
3497 mid = (c_nritems + 1) / 2;
3498 btrfs_node_key(c, &disk_key, mid);
3499
3500 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3501 &disk_key, level, c->start, 0);
3502 if (IS_ERR(split))
3503 return PTR_ERR(split);
3504
3505 root_add_used(root, root->nodesize);
3506
3507 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3508 btrfs_set_header_level(split, btrfs_header_level(c));
3509 btrfs_set_header_bytenr(split, split->start);
3510 btrfs_set_header_generation(split, trans->transid);
3511 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3512 btrfs_set_header_owner(split, root->root_key.objectid);
3513 write_extent_buffer(split, root->fs_info->fsid,
3514 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3515 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3516 btrfs_header_chunk_tree_uuid(split),
3517 BTRFS_UUID_SIZE);
3518
3519 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3520 mid, c_nritems - mid);
3521 if (ret) {
3522 btrfs_abort_transaction(trans, root, ret);
3523 return ret;
3524 }
3525 copy_extent_buffer(split, c,
3526 btrfs_node_key_ptr_offset(0),
3527 btrfs_node_key_ptr_offset(mid),
3528 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3529 btrfs_set_header_nritems(split, c_nritems - mid);
3530 btrfs_set_header_nritems(c, mid);
3531 ret = 0;
3532
3533 btrfs_mark_buffer_dirty(c);
3534 btrfs_mark_buffer_dirty(split);
3535
3536 insert_ptr(trans, root, path, &disk_key, split->start,
3537 path->slots[level + 1] + 1, level + 1);
3538
3539 if (path->slots[level] >= mid) {
3540 path->slots[level] -= mid;
3541 btrfs_tree_unlock(c);
3542 free_extent_buffer(c);
3543 path->nodes[level] = split;
3544 path->slots[level + 1] += 1;
3545 } else {
3546 btrfs_tree_unlock(split);
3547 free_extent_buffer(split);
3548 }
3549 return ret;
3550 }
3551
3552 /*
3553 * how many bytes are required to store the items in a leaf. start
3554 * and nr indicate which items in the leaf to check. This totals up the
3555 * space used both by the item structs and the item data
3556 */
3557 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3558 {
3559 struct btrfs_item *start_item;
3560 struct btrfs_item *end_item;
3561 struct btrfs_map_token token;
3562 int data_len;
3563 int nritems = btrfs_header_nritems(l);
3564 int end = min(nritems, start + nr) - 1;
3565
3566 if (!nr)
3567 return 0;
3568 btrfs_init_map_token(&token);
3569 start_item = btrfs_item_nr(start);
3570 end_item = btrfs_item_nr(end);
3571 data_len = btrfs_token_item_offset(l, start_item, &token) +
3572 btrfs_token_item_size(l, start_item, &token);
3573 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3574 data_len += sizeof(struct btrfs_item) * nr;
3575 WARN_ON(data_len < 0);
3576 return data_len;
3577 }
3578
3579 /*
3580 * The space between the end of the leaf items and
3581 * the start of the leaf data. IOW, how much room
3582 * the leaf has left for both items and data
3583 */
3584 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3585 struct extent_buffer *leaf)
3586 {
3587 int nritems = btrfs_header_nritems(leaf);
3588 int ret;
3589 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3590 if (ret < 0) {
3591 btrfs_crit(root->fs_info,
3592 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3593 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3594 leaf_space_used(leaf, 0, nritems), nritems);
3595 }
3596 return ret;
3597 }
3598
3599 /*
3600 * min slot controls the lowest index we're willing to push to the
3601 * right. We'll push up to and including min_slot, but no lower
3602 */
3603 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3604 struct btrfs_root *root,
3605 struct btrfs_path *path,
3606 int data_size, int empty,
3607 struct extent_buffer *right,
3608 int free_space, u32 left_nritems,
3609 u32 min_slot)
3610 {
3611 struct extent_buffer *left = path->nodes[0];
3612 struct extent_buffer *upper = path->nodes[1];
3613 struct btrfs_map_token token;
3614 struct btrfs_disk_key disk_key;
3615 int slot;
3616 u32 i;
3617 int push_space = 0;
3618 int push_items = 0;
3619 struct btrfs_item *item;
3620 u32 nr;
3621 u32 right_nritems;
3622 u32 data_end;
3623 u32 this_item_size;
3624
3625 btrfs_init_map_token(&token);
3626
3627 if (empty)
3628 nr = 0;
3629 else
3630 nr = max_t(u32, 1, min_slot);
3631
3632 if (path->slots[0] >= left_nritems)
3633 push_space += data_size;
3634
3635 slot = path->slots[1];
3636 i = left_nritems - 1;
3637 while (i >= nr) {
3638 item = btrfs_item_nr(i);
3639
3640 if (!empty && push_items > 0) {
3641 if (path->slots[0] > i)
3642 break;
3643 if (path->slots[0] == i) {
3644 int space = btrfs_leaf_free_space(root, left);
3645 if (space + push_space * 2 > free_space)
3646 break;
3647 }
3648 }
3649
3650 if (path->slots[0] == i)
3651 push_space += data_size;
3652
3653 this_item_size = btrfs_item_size(left, item);
3654 if (this_item_size + sizeof(*item) + push_space > free_space)
3655 break;
3656
3657 push_items++;
3658 push_space += this_item_size + sizeof(*item);
3659 if (i == 0)
3660 break;
3661 i--;
3662 }
3663
3664 if (push_items == 0)
3665 goto out_unlock;
3666
3667 WARN_ON(!empty && push_items == left_nritems);
3668
3669 /* push left to right */
3670 right_nritems = btrfs_header_nritems(right);
3671
3672 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3673 push_space -= leaf_data_end(root, left);
3674
3675 /* make room in the right data area */
3676 data_end = leaf_data_end(root, right);
3677 memmove_extent_buffer(right,
3678 btrfs_leaf_data(right) + data_end - push_space,
3679 btrfs_leaf_data(right) + data_end,
3680 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3681
3682 /* copy from the left data area */
3683 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3684 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3685 btrfs_leaf_data(left) + leaf_data_end(root, left),
3686 push_space);
3687
3688 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3689 btrfs_item_nr_offset(0),
3690 right_nritems * sizeof(struct btrfs_item));
3691
3692 /* copy the items from left to right */
3693 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3694 btrfs_item_nr_offset(left_nritems - push_items),
3695 push_items * sizeof(struct btrfs_item));
3696
3697 /* update the item pointers */
3698 right_nritems += push_items;
3699 btrfs_set_header_nritems(right, right_nritems);
3700 push_space = BTRFS_LEAF_DATA_SIZE(root);
3701 for (i = 0; i < right_nritems; i++) {
3702 item = btrfs_item_nr(i);
3703 push_space -= btrfs_token_item_size(right, item, &token);
3704 btrfs_set_token_item_offset(right, item, push_space, &token);
3705 }
3706
3707 left_nritems -= push_items;
3708 btrfs_set_header_nritems(left, left_nritems);
3709
3710 if (left_nritems)
3711 btrfs_mark_buffer_dirty(left);
3712 else
3713 clean_tree_block(trans, root->fs_info, left);
3714
3715 btrfs_mark_buffer_dirty(right);
3716
3717 btrfs_item_key(right, &disk_key, 0);
3718 btrfs_set_node_key(upper, &disk_key, slot + 1);
3719 btrfs_mark_buffer_dirty(upper);
3720
3721 /* then fixup the leaf pointer in the path */
3722 if (path->slots[0] >= left_nritems) {
3723 path->slots[0] -= left_nritems;
3724 if (btrfs_header_nritems(path->nodes[0]) == 0)
3725 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3726 btrfs_tree_unlock(path->nodes[0]);
3727 free_extent_buffer(path->nodes[0]);
3728 path->nodes[0] = right;
3729 path->slots[1] += 1;
3730 } else {
3731 btrfs_tree_unlock(right);
3732 free_extent_buffer(right);
3733 }
3734 return 0;
3735
3736 out_unlock:
3737 btrfs_tree_unlock(right);
3738 free_extent_buffer(right);
3739 return 1;
3740 }
3741
3742 /*
3743 * push some data in the path leaf to the right, trying to free up at
3744 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3745 *
3746 * returns 1 if the push failed because the other node didn't have enough
3747 * room, 0 if everything worked out and < 0 if there were major errors.
3748 *
3749 * this will push starting from min_slot to the end of the leaf. It won't
3750 * push any slot lower than min_slot
3751 */
3752 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3753 *root, struct btrfs_path *path,
3754 int min_data_size, int data_size,
3755 int empty, u32 min_slot)
3756 {
3757 struct extent_buffer *left = path->nodes[0];
3758 struct extent_buffer *right;
3759 struct extent_buffer *upper;
3760 int slot;
3761 int free_space;
3762 u32 left_nritems;
3763 int ret;
3764
3765 if (!path->nodes[1])
3766 return 1;
3767
3768 slot = path->slots[1];
3769 upper = path->nodes[1];
3770 if (slot >= btrfs_header_nritems(upper) - 1)
3771 return 1;
3772
3773 btrfs_assert_tree_locked(path->nodes[1]);
3774
3775 right = read_node_slot(root, upper, slot + 1);
3776 if (right == NULL)
3777 return 1;
3778
3779 btrfs_tree_lock(right);
3780 btrfs_set_lock_blocking(right);
3781
3782 free_space = btrfs_leaf_free_space(root, right);
3783 if (free_space < data_size)
3784 goto out_unlock;
3785
3786 /* cow and double check */
3787 ret = btrfs_cow_block(trans, root, right, upper,
3788 slot + 1, &right);
3789 if (ret)
3790 goto out_unlock;
3791
3792 free_space = btrfs_leaf_free_space(root, right);
3793 if (free_space < data_size)
3794 goto out_unlock;
3795
3796 left_nritems = btrfs_header_nritems(left);
3797 if (left_nritems == 0)
3798 goto out_unlock;
3799
3800 if (path->slots[0] == left_nritems && !empty) {
3801 /* Key greater than all keys in the leaf, right neighbor has
3802 * enough room for it and we're not emptying our leaf to delete
3803 * it, therefore use right neighbor to insert the new item and
3804 * no need to touch/dirty our left leaft. */
3805 btrfs_tree_unlock(left);
3806 free_extent_buffer(left);
3807 path->nodes[0] = right;
3808 path->slots[0] = 0;
3809 path->slots[1]++;
3810 return 0;
3811 }
3812
3813 return __push_leaf_right(trans, root, path, min_data_size, empty,
3814 right, free_space, left_nritems, min_slot);
3815 out_unlock:
3816 btrfs_tree_unlock(right);
3817 free_extent_buffer(right);
3818 return 1;
3819 }
3820
3821 /*
3822 * push some data in the path leaf to the left, trying to free up at
3823 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3824 *
3825 * max_slot can put a limit on how far into the leaf we'll push items. The
3826 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3827 * items
3828 */
3829 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3830 struct btrfs_root *root,
3831 struct btrfs_path *path, int data_size,
3832 int empty, struct extent_buffer *left,
3833 int free_space, u32 right_nritems,
3834 u32 max_slot)
3835 {
3836 struct btrfs_disk_key disk_key;
3837 struct extent_buffer *right = path->nodes[0];
3838 int i;
3839 int push_space = 0;
3840 int push_items = 0;
3841 struct btrfs_item *item;
3842 u32 old_left_nritems;
3843 u32 nr;
3844 int ret = 0;
3845 u32 this_item_size;
3846 u32 old_left_item_size;
3847 struct btrfs_map_token token;
3848
3849 btrfs_init_map_token(&token);
3850
3851 if (empty)
3852 nr = min(right_nritems, max_slot);
3853 else
3854 nr = min(right_nritems - 1, max_slot);
3855
3856 for (i = 0; i < nr; i++) {
3857 item = btrfs_item_nr(i);
3858
3859 if (!empty && push_items > 0) {
3860 if (path->slots[0] < i)
3861 break;
3862 if (path->slots[0] == i) {
3863 int space = btrfs_leaf_free_space(root, right);
3864 if (space + push_space * 2 > free_space)
3865 break;
3866 }
3867 }
3868
3869 if (path->slots[0] == i)
3870 push_space += data_size;
3871
3872 this_item_size = btrfs_item_size(right, item);
3873 if (this_item_size + sizeof(*item) + push_space > free_space)
3874 break;
3875
3876 push_items++;
3877 push_space += this_item_size + sizeof(*item);
3878 }
3879
3880 if (push_items == 0) {
3881 ret = 1;
3882 goto out;
3883 }
3884 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3885
3886 /* push data from right to left */
3887 copy_extent_buffer(left, right,
3888 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3889 btrfs_item_nr_offset(0),
3890 push_items * sizeof(struct btrfs_item));
3891
3892 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3893 btrfs_item_offset_nr(right, push_items - 1);
3894
3895 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3896 leaf_data_end(root, left) - push_space,
3897 btrfs_leaf_data(right) +
3898 btrfs_item_offset_nr(right, push_items - 1),
3899 push_space);
3900 old_left_nritems = btrfs_header_nritems(left);
3901 BUG_ON(old_left_nritems <= 0);
3902
3903 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3904 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3905 u32 ioff;
3906
3907 item = btrfs_item_nr(i);
3908
3909 ioff = btrfs_token_item_offset(left, item, &token);
3910 btrfs_set_token_item_offset(left, item,
3911 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3912 &token);
3913 }
3914 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3915
3916 /* fixup right node */
3917 if (push_items > right_nritems)
3918 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3919 right_nritems);
3920
3921 if (push_items < right_nritems) {
3922 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3923 leaf_data_end(root, right);
3924 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3925 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3926 btrfs_leaf_data(right) +
3927 leaf_data_end(root, right), push_space);
3928
3929 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3930 btrfs_item_nr_offset(push_items),
3931 (btrfs_header_nritems(right) - push_items) *
3932 sizeof(struct btrfs_item));
3933 }
3934 right_nritems -= push_items;
3935 btrfs_set_header_nritems(right, right_nritems);
3936 push_space = BTRFS_LEAF_DATA_SIZE(root);
3937 for (i = 0; i < right_nritems; i++) {
3938 item = btrfs_item_nr(i);
3939
3940 push_space = push_space - btrfs_token_item_size(right,
3941 item, &token);
3942 btrfs_set_token_item_offset(right, item, push_space, &token);
3943 }
3944
3945 btrfs_mark_buffer_dirty(left);
3946 if (right_nritems)
3947 btrfs_mark_buffer_dirty(right);
3948 else
3949 clean_tree_block(trans, root->fs_info, right);
3950
3951 btrfs_item_key(right, &disk_key, 0);
3952 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3953
3954 /* then fixup the leaf pointer in the path */
3955 if (path->slots[0] < push_items) {
3956 path->slots[0] += old_left_nritems;
3957 btrfs_tree_unlock(path->nodes[0]);
3958 free_extent_buffer(path->nodes[0]);
3959 path->nodes[0] = left;
3960 path->slots[1] -= 1;
3961 } else {
3962 btrfs_tree_unlock(left);
3963 free_extent_buffer(left);
3964 path->slots[0] -= push_items;
3965 }
3966 BUG_ON(path->slots[0] < 0);
3967 return ret;
3968 out:
3969 btrfs_tree_unlock(left);
3970 free_extent_buffer(left);
3971 return ret;
3972 }
3973
3974 /*
3975 * push some data in the path leaf to the left, trying to free up at
3976 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3977 *
3978 * max_slot can put a limit on how far into the leaf we'll push items. The
3979 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3980 * items
3981 */
3982 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3983 *root, struct btrfs_path *path, int min_data_size,
3984 int data_size, int empty, u32 max_slot)
3985 {
3986 struct extent_buffer *right = path->nodes[0];
3987 struct extent_buffer *left;
3988 int slot;
3989 int free_space;
3990 u32 right_nritems;
3991 int ret = 0;
3992
3993 slot = path->slots[1];
3994 if (slot == 0)
3995 return 1;
3996 if (!path->nodes[1])
3997 return 1;
3998
3999 right_nritems = btrfs_header_nritems(right);
4000 if (right_nritems == 0)
4001 return 1;
4002
4003 btrfs_assert_tree_locked(path->nodes[1]);
4004
4005 left = read_node_slot(root, path->nodes[1], slot - 1);
4006 if (left == NULL)
4007 return 1;
4008
4009 btrfs_tree_lock(left);
4010 btrfs_set_lock_blocking(left);
4011
4012 free_space = btrfs_leaf_free_space(root, left);
4013 if (free_space < data_size) {
4014 ret = 1;
4015 goto out;
4016 }
4017
4018 /* cow and double check */
4019 ret = btrfs_cow_block(trans, root, left,
4020 path->nodes[1], slot - 1, &left);
4021 if (ret) {
4022 /* we hit -ENOSPC, but it isn't fatal here */
4023 if (ret == -ENOSPC)
4024 ret = 1;
4025 goto out;
4026 }
4027
4028 free_space = btrfs_leaf_free_space(root, left);
4029 if (free_space < data_size) {
4030 ret = 1;
4031 goto out;
4032 }
4033
4034 return __push_leaf_left(trans, root, path, min_data_size,
4035 empty, left, free_space, right_nritems,
4036 max_slot);
4037 out:
4038 btrfs_tree_unlock(left);
4039 free_extent_buffer(left);
4040 return ret;
4041 }
4042
4043 /*
4044 * split the path's leaf in two, making sure there is at least data_size
4045 * available for the resulting leaf level of the path.
4046 */
4047 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4048 struct btrfs_root *root,
4049 struct btrfs_path *path,
4050 struct extent_buffer *l,
4051 struct extent_buffer *right,
4052 int slot, int mid, int nritems)
4053 {
4054 int data_copy_size;
4055 int rt_data_off;
4056 int i;
4057 struct btrfs_disk_key disk_key;
4058 struct btrfs_map_token token;
4059
4060 btrfs_init_map_token(&token);
4061
4062 nritems = nritems - mid;
4063 btrfs_set_header_nritems(right, nritems);
4064 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4065
4066 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4067 btrfs_item_nr_offset(mid),
4068 nritems * sizeof(struct btrfs_item));
4069
4070 copy_extent_buffer(right, l,
4071 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4072 data_copy_size, btrfs_leaf_data(l) +
4073 leaf_data_end(root, l), data_copy_size);
4074
4075 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4076 btrfs_item_end_nr(l, mid);
4077
4078 for (i = 0; i < nritems; i++) {
4079 struct btrfs_item *item = btrfs_item_nr(i);
4080 u32 ioff;
4081
4082 ioff = btrfs_token_item_offset(right, item, &token);
4083 btrfs_set_token_item_offset(right, item,
4084 ioff + rt_data_off, &token);
4085 }
4086
4087 btrfs_set_header_nritems(l, mid);
4088 btrfs_item_key(right, &disk_key, 0);
4089 insert_ptr(trans, root, path, &disk_key, right->start,
4090 path->slots[1] + 1, 1);
4091
4092 btrfs_mark_buffer_dirty(right);
4093 btrfs_mark_buffer_dirty(l);
4094 BUG_ON(path->slots[0] != slot);
4095
4096 if (mid <= slot) {
4097 btrfs_tree_unlock(path->nodes[0]);
4098 free_extent_buffer(path->nodes[0]);
4099 path->nodes[0] = right;
4100 path->slots[0] -= mid;
4101 path->slots[1] += 1;
4102 } else {
4103 btrfs_tree_unlock(right);
4104 free_extent_buffer(right);
4105 }
4106
4107 BUG_ON(path->slots[0] < 0);
4108 }
4109
4110 /*
4111 * double splits happen when we need to insert a big item in the middle
4112 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4113 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4114 * A B C
4115 *
4116 * We avoid this by trying to push the items on either side of our target
4117 * into the adjacent leaves. If all goes well we can avoid the double split
4118 * completely.
4119 */
4120 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4121 struct btrfs_root *root,
4122 struct btrfs_path *path,
4123 int data_size)
4124 {
4125 int ret;
4126 int progress = 0;
4127 int slot;
4128 u32 nritems;
4129 int space_needed = data_size;
4130
4131 slot = path->slots[0];
4132 if (slot < btrfs_header_nritems(path->nodes[0]))
4133 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4134
4135 /*
4136 * try to push all the items after our slot into the
4137 * right leaf
4138 */
4139 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4140 if (ret < 0)
4141 return ret;
4142
4143 if (ret == 0)
4144 progress++;
4145
4146 nritems = btrfs_header_nritems(path->nodes[0]);
4147 /*
4148 * our goal is to get our slot at the start or end of a leaf. If
4149 * we've done so we're done
4150 */
4151 if (path->slots[0] == 0 || path->slots[0] == nritems)
4152 return 0;
4153
4154 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4155 return 0;
4156
4157 /* try to push all the items before our slot into the next leaf */
4158 slot = path->slots[0];
4159 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4160 if (ret < 0)
4161 return ret;
4162
4163 if (ret == 0)
4164 progress++;
4165
4166 if (progress)
4167 return 0;
4168 return 1;
4169 }
4170
4171 /*
4172 * split the path's leaf in two, making sure there is at least data_size
4173 * available for the resulting leaf level of the path.
4174 *
4175 * returns 0 if all went well and < 0 on failure.
4176 */
4177 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4178 struct btrfs_root *root,
4179 struct btrfs_key *ins_key,
4180 struct btrfs_path *path, int data_size,
4181 int extend)
4182 {
4183 struct btrfs_disk_key disk_key;
4184 struct extent_buffer *l;
4185 u32 nritems;
4186 int mid;
4187 int slot;
4188 struct extent_buffer *right;
4189 struct btrfs_fs_info *fs_info = root->fs_info;
4190 int ret = 0;
4191 int wret;
4192 int split;
4193 int num_doubles = 0;
4194 int tried_avoid_double = 0;
4195
4196 l = path->nodes[0];
4197 slot = path->slots[0];
4198 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4199 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4200 return -EOVERFLOW;
4201
4202 /* first try to make some room by pushing left and right */
4203 if (data_size && path->nodes[1]) {
4204 int space_needed = data_size;
4205
4206 if (slot < btrfs_header_nritems(l))
4207 space_needed -= btrfs_leaf_free_space(root, l);
4208
4209 wret = push_leaf_right(trans, root, path, space_needed,
4210 space_needed, 0, 0);
4211 if (wret < 0)
4212 return wret;
4213 if (wret) {
4214 wret = push_leaf_left(trans, root, path, space_needed,
4215 space_needed, 0, (u32)-1);
4216 if (wret < 0)
4217 return wret;
4218 }
4219 l = path->nodes[0];
4220
4221 /* did the pushes work? */
4222 if (btrfs_leaf_free_space(root, l) >= data_size)
4223 return 0;
4224 }
4225
4226 if (!path->nodes[1]) {
4227 ret = insert_new_root(trans, root, path, 1);
4228 if (ret)
4229 return ret;
4230 }
4231 again:
4232 split = 1;
4233 l = path->nodes[0];
4234 slot = path->slots[0];
4235 nritems = btrfs_header_nritems(l);
4236 mid = (nritems + 1) / 2;
4237
4238 if (mid <= slot) {
4239 if (nritems == 1 ||
4240 leaf_space_used(l, mid, nritems - mid) + data_size >
4241 BTRFS_LEAF_DATA_SIZE(root)) {
4242 if (slot >= nritems) {
4243 split = 0;
4244 } else {
4245 mid = slot;
4246 if (mid != nritems &&
4247 leaf_space_used(l, mid, nritems - mid) +
4248 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4249 if (data_size && !tried_avoid_double)
4250 goto push_for_double;
4251 split = 2;
4252 }
4253 }
4254 }
4255 } else {
4256 if (leaf_space_used(l, 0, mid) + data_size >
4257 BTRFS_LEAF_DATA_SIZE(root)) {
4258 if (!extend && data_size && slot == 0) {
4259 split = 0;
4260 } else if ((extend || !data_size) && slot == 0) {
4261 mid = 1;
4262 } else {
4263 mid = slot;
4264 if (mid != nritems &&
4265 leaf_space_used(l, mid, nritems - mid) +
4266 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4267 if (data_size && !tried_avoid_double)
4268 goto push_for_double;
4269 split = 2;
4270 }
4271 }
4272 }
4273 }
4274
4275 if (split == 0)
4276 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4277 else
4278 btrfs_item_key(l, &disk_key, mid);
4279
4280 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4281 &disk_key, 0, l->start, 0);
4282 if (IS_ERR(right))
4283 return PTR_ERR(right);
4284
4285 root_add_used(root, root->nodesize);
4286
4287 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4288 btrfs_set_header_bytenr(right, right->start);
4289 btrfs_set_header_generation(right, trans->transid);
4290 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4291 btrfs_set_header_owner(right, root->root_key.objectid);
4292 btrfs_set_header_level(right, 0);
4293 write_extent_buffer(right, fs_info->fsid,
4294 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4295
4296 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4297 btrfs_header_chunk_tree_uuid(right),
4298 BTRFS_UUID_SIZE);
4299
4300 if (split == 0) {
4301 if (mid <= slot) {
4302 btrfs_set_header_nritems(right, 0);
4303 insert_ptr(trans, root, path, &disk_key, right->start,
4304 path->slots[1] + 1, 1);
4305 btrfs_tree_unlock(path->nodes[0]);
4306 free_extent_buffer(path->nodes[0]);
4307 path->nodes[0] = right;
4308 path->slots[0] = 0;
4309 path->slots[1] += 1;
4310 } else {
4311 btrfs_set_header_nritems(right, 0);
4312 insert_ptr(trans, root, path, &disk_key, right->start,
4313 path->slots[1], 1);
4314 btrfs_tree_unlock(path->nodes[0]);
4315 free_extent_buffer(path->nodes[0]);
4316 path->nodes[0] = right;
4317 path->slots[0] = 0;
4318 if (path->slots[1] == 0)
4319 fixup_low_keys(fs_info, path, &disk_key, 1);
4320 }
4321 btrfs_mark_buffer_dirty(right);
4322 return ret;
4323 }
4324
4325 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4326
4327 if (split == 2) {
4328 BUG_ON(num_doubles != 0);
4329 num_doubles++;
4330 goto again;
4331 }
4332
4333 return 0;
4334
4335 push_for_double:
4336 push_for_double_split(trans, root, path, data_size);
4337 tried_avoid_double = 1;
4338 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4339 return 0;
4340 goto again;
4341 }
4342
4343 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4344 struct btrfs_root *root,
4345 struct btrfs_path *path, int ins_len)
4346 {
4347 struct btrfs_key key;
4348 struct extent_buffer *leaf;
4349 struct btrfs_file_extent_item *fi;
4350 u64 extent_len = 0;
4351 u32 item_size;
4352 int ret;
4353
4354 leaf = path->nodes[0];
4355 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4356
4357 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4358 key.type != BTRFS_EXTENT_CSUM_KEY);
4359
4360 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4361 return 0;
4362
4363 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4364 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4365 fi = btrfs_item_ptr(leaf, path->slots[0],
4366 struct btrfs_file_extent_item);
4367 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4368 }
4369 btrfs_release_path(path);
4370
4371 path->keep_locks = 1;
4372 path->search_for_split = 1;
4373 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4374 path->search_for_split = 0;
4375 if (ret > 0)
4376 ret = -EAGAIN;
4377 if (ret < 0)
4378 goto err;
4379
4380 ret = -EAGAIN;
4381 leaf = path->nodes[0];
4382 /* if our item isn't there, return now */
4383 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4384 goto err;
4385
4386 /* the leaf has changed, it now has room. return now */
4387 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4388 goto err;
4389
4390 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4391 fi = btrfs_item_ptr(leaf, path->slots[0],
4392 struct btrfs_file_extent_item);
4393 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4394 goto err;
4395 }
4396
4397 btrfs_set_path_blocking(path);
4398 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4399 if (ret)
4400 goto err;
4401
4402 path->keep_locks = 0;
4403 btrfs_unlock_up_safe(path, 1);
4404 return 0;
4405 err:
4406 path->keep_locks = 0;
4407 return ret;
4408 }
4409
4410 static noinline int split_item(struct btrfs_trans_handle *trans,
4411 struct btrfs_root *root,
4412 struct btrfs_path *path,
4413 struct btrfs_key *new_key,
4414 unsigned long split_offset)
4415 {
4416 struct extent_buffer *leaf;
4417 struct btrfs_item *item;
4418 struct btrfs_item *new_item;
4419 int slot;
4420 char *buf;
4421 u32 nritems;
4422 u32 item_size;
4423 u32 orig_offset;
4424 struct btrfs_disk_key disk_key;
4425
4426 leaf = path->nodes[0];
4427 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4428
4429 btrfs_set_path_blocking(path);
4430
4431 item = btrfs_item_nr(path->slots[0]);
4432 orig_offset = btrfs_item_offset(leaf, item);
4433 item_size = btrfs_item_size(leaf, item);
4434
4435 buf = kmalloc(item_size, GFP_NOFS);
4436 if (!buf)
4437 return -ENOMEM;
4438
4439 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4440 path->slots[0]), item_size);
4441
4442 slot = path->slots[0] + 1;
4443 nritems = btrfs_header_nritems(leaf);
4444 if (slot != nritems) {
4445 /* shift the items */
4446 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4447 btrfs_item_nr_offset(slot),
4448 (nritems - slot) * sizeof(struct btrfs_item));
4449 }
4450
4451 btrfs_cpu_key_to_disk(&disk_key, new_key);
4452 btrfs_set_item_key(leaf, &disk_key, slot);
4453
4454 new_item = btrfs_item_nr(slot);
4455
4456 btrfs_set_item_offset(leaf, new_item, orig_offset);
4457 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4458
4459 btrfs_set_item_offset(leaf, item,
4460 orig_offset + item_size - split_offset);
4461 btrfs_set_item_size(leaf, item, split_offset);
4462
4463 btrfs_set_header_nritems(leaf, nritems + 1);
4464
4465 /* write the data for the start of the original item */
4466 write_extent_buffer(leaf, buf,
4467 btrfs_item_ptr_offset(leaf, path->slots[0]),
4468 split_offset);
4469
4470 /* write the data for the new item */
4471 write_extent_buffer(leaf, buf + split_offset,
4472 btrfs_item_ptr_offset(leaf, slot),
4473 item_size - split_offset);
4474 btrfs_mark_buffer_dirty(leaf);
4475
4476 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4477 kfree(buf);
4478 return 0;
4479 }
4480
4481 /*
4482 * This function splits a single item into two items,
4483 * giving 'new_key' to the new item and splitting the
4484 * old one at split_offset (from the start of the item).
4485 *
4486 * The path may be released by this operation. After
4487 * the split, the path is pointing to the old item. The
4488 * new item is going to be in the same node as the old one.
4489 *
4490 * Note, the item being split must be smaller enough to live alone on
4491 * a tree block with room for one extra struct btrfs_item
4492 *
4493 * This allows us to split the item in place, keeping a lock on the
4494 * leaf the entire time.
4495 */
4496 int btrfs_split_item(struct btrfs_trans_handle *trans,
4497 struct btrfs_root *root,
4498 struct btrfs_path *path,
4499 struct btrfs_key *new_key,
4500 unsigned long split_offset)
4501 {
4502 int ret;
4503 ret = setup_leaf_for_split(trans, root, path,
4504 sizeof(struct btrfs_item));
4505 if (ret)
4506 return ret;
4507
4508 ret = split_item(trans, root, path, new_key, split_offset);
4509 return ret;
4510 }
4511
4512 /*
4513 * This function duplicate a item, giving 'new_key' to the new item.
4514 * It guarantees both items live in the same tree leaf and the new item
4515 * is contiguous with the original item.
4516 *
4517 * This allows us to split file extent in place, keeping a lock on the
4518 * leaf the entire time.
4519 */
4520 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4521 struct btrfs_root *root,
4522 struct btrfs_path *path,
4523 struct btrfs_key *new_key)
4524 {
4525 struct extent_buffer *leaf;
4526 int ret;
4527 u32 item_size;
4528
4529 leaf = path->nodes[0];
4530 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4531 ret = setup_leaf_for_split(trans, root, path,
4532 item_size + sizeof(struct btrfs_item));
4533 if (ret)
4534 return ret;
4535
4536 path->slots[0]++;
4537 setup_items_for_insert(root, path, new_key, &item_size,
4538 item_size, item_size +
4539 sizeof(struct btrfs_item), 1);
4540 leaf = path->nodes[0];
4541 memcpy_extent_buffer(leaf,
4542 btrfs_item_ptr_offset(leaf, path->slots[0]),
4543 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4544 item_size);
4545 return 0;
4546 }
4547
4548 /*
4549 * make the item pointed to by the path smaller. new_size indicates
4550 * how small to make it, and from_end tells us if we just chop bytes
4551 * off the end of the item or if we shift the item to chop bytes off
4552 * the front.
4553 */
4554 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4555 u32 new_size, int from_end)
4556 {
4557 int slot;
4558 struct extent_buffer *leaf;
4559 struct btrfs_item *item;
4560 u32 nritems;
4561 unsigned int data_end;
4562 unsigned int old_data_start;
4563 unsigned int old_size;
4564 unsigned int size_diff;
4565 int i;
4566 struct btrfs_map_token token;
4567
4568 btrfs_init_map_token(&token);
4569
4570 leaf = path->nodes[0];
4571 slot = path->slots[0];
4572
4573 old_size = btrfs_item_size_nr(leaf, slot);
4574 if (old_size == new_size)
4575 return;
4576
4577 nritems = btrfs_header_nritems(leaf);
4578 data_end = leaf_data_end(root, leaf);
4579
4580 old_data_start = btrfs_item_offset_nr(leaf, slot);
4581
4582 size_diff = old_size - new_size;
4583
4584 BUG_ON(slot < 0);
4585 BUG_ON(slot >= nritems);
4586
4587 /*
4588 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4589 */
4590 /* first correct the data pointers */
4591 for (i = slot; i < nritems; i++) {
4592 u32 ioff;
4593 item = btrfs_item_nr(i);
4594
4595 ioff = btrfs_token_item_offset(leaf, item, &token);
4596 btrfs_set_token_item_offset(leaf, item,
4597 ioff + size_diff, &token);
4598 }
4599
4600 /* shift the data */
4601 if (from_end) {
4602 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4603 data_end + size_diff, btrfs_leaf_data(leaf) +
4604 data_end, old_data_start + new_size - data_end);
4605 } else {
4606 struct btrfs_disk_key disk_key;
4607 u64 offset;
4608
4609 btrfs_item_key(leaf, &disk_key, slot);
4610
4611 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4612 unsigned long ptr;
4613 struct btrfs_file_extent_item *fi;
4614
4615 fi = btrfs_item_ptr(leaf, slot,
4616 struct btrfs_file_extent_item);
4617 fi = (struct btrfs_file_extent_item *)(
4618 (unsigned long)fi - size_diff);
4619
4620 if (btrfs_file_extent_type(leaf, fi) ==
4621 BTRFS_FILE_EXTENT_INLINE) {
4622 ptr = btrfs_item_ptr_offset(leaf, slot);
4623 memmove_extent_buffer(leaf, ptr,
4624 (unsigned long)fi,
4625 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4626 }
4627 }
4628
4629 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4630 data_end + size_diff, btrfs_leaf_data(leaf) +
4631 data_end, old_data_start - data_end);
4632
4633 offset = btrfs_disk_key_offset(&disk_key);
4634 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4635 btrfs_set_item_key(leaf, &disk_key, slot);
4636 if (slot == 0)
4637 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4638 }
4639
4640 item = btrfs_item_nr(slot);
4641 btrfs_set_item_size(leaf, item, new_size);
4642 btrfs_mark_buffer_dirty(leaf);
4643
4644 if (btrfs_leaf_free_space(root, leaf) < 0) {
4645 btrfs_print_leaf(root, leaf);
4646 BUG();
4647 }
4648 }
4649
4650 /*
4651 * make the item pointed to by the path bigger, data_size is the added size.
4652 */
4653 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4654 u32 data_size)
4655 {
4656 int slot;
4657 struct extent_buffer *leaf;
4658 struct btrfs_item *item;
4659 u32 nritems;
4660 unsigned int data_end;
4661 unsigned int old_data;
4662 unsigned int old_size;
4663 int i;
4664 struct btrfs_map_token token;
4665
4666 btrfs_init_map_token(&token);
4667
4668 leaf = path->nodes[0];
4669
4670 nritems = btrfs_header_nritems(leaf);
4671 data_end = leaf_data_end(root, leaf);
4672
4673 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4674 btrfs_print_leaf(root, leaf);
4675 BUG();
4676 }
4677 slot = path->slots[0];
4678 old_data = btrfs_item_end_nr(leaf, slot);
4679
4680 BUG_ON(slot < 0);
4681 if (slot >= nritems) {
4682 btrfs_print_leaf(root, leaf);
4683 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4684 slot, nritems);
4685 BUG_ON(1);
4686 }
4687
4688 /*
4689 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4690 */
4691 /* first correct the data pointers */
4692 for (i = slot; i < nritems; i++) {
4693 u32 ioff;
4694 item = btrfs_item_nr(i);
4695
4696 ioff = btrfs_token_item_offset(leaf, item, &token);
4697 btrfs_set_token_item_offset(leaf, item,
4698 ioff - data_size, &token);
4699 }
4700
4701 /* shift the data */
4702 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4703 data_end - data_size, btrfs_leaf_data(leaf) +
4704 data_end, old_data - data_end);
4705
4706 data_end = old_data;
4707 old_size = btrfs_item_size_nr(leaf, slot);
4708 item = btrfs_item_nr(slot);
4709 btrfs_set_item_size(leaf, item, old_size + data_size);
4710 btrfs_mark_buffer_dirty(leaf);
4711
4712 if (btrfs_leaf_free_space(root, leaf) < 0) {
4713 btrfs_print_leaf(root, leaf);
4714 BUG();
4715 }
4716 }
4717
4718 /*
4719 * this is a helper for btrfs_insert_empty_items, the main goal here is
4720 * to save stack depth by doing the bulk of the work in a function
4721 * that doesn't call btrfs_search_slot
4722 */
4723 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4724 struct btrfs_key *cpu_key, u32 *data_size,
4725 u32 total_data, u32 total_size, int nr)
4726 {
4727 struct btrfs_item *item;
4728 int i;
4729 u32 nritems;
4730 unsigned int data_end;
4731 struct btrfs_disk_key disk_key;
4732 struct extent_buffer *leaf;
4733 int slot;
4734 struct btrfs_map_token token;
4735
4736 if (path->slots[0] == 0) {
4737 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4738 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4739 }
4740 btrfs_unlock_up_safe(path, 1);
4741
4742 btrfs_init_map_token(&token);
4743
4744 leaf = path->nodes[0];
4745 slot = path->slots[0];
4746
4747 nritems = btrfs_header_nritems(leaf);
4748 data_end = leaf_data_end(root, leaf);
4749
4750 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4751 btrfs_print_leaf(root, leaf);
4752 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4753 total_size, btrfs_leaf_free_space(root, leaf));
4754 BUG();
4755 }
4756
4757 if (slot != nritems) {
4758 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4759
4760 if (old_data < data_end) {
4761 btrfs_print_leaf(root, leaf);
4762 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4763 slot, old_data, data_end);
4764 BUG_ON(1);
4765 }
4766 /*
4767 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4768 */
4769 /* first correct the data pointers */
4770 for (i = slot; i < nritems; i++) {
4771 u32 ioff;
4772
4773 item = btrfs_item_nr( i);
4774 ioff = btrfs_token_item_offset(leaf, item, &token);
4775 btrfs_set_token_item_offset(leaf, item,
4776 ioff - total_data, &token);
4777 }
4778 /* shift the items */
4779 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4780 btrfs_item_nr_offset(slot),
4781 (nritems - slot) * sizeof(struct btrfs_item));
4782
4783 /* shift the data */
4784 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4785 data_end - total_data, btrfs_leaf_data(leaf) +
4786 data_end, old_data - data_end);
4787 data_end = old_data;
4788 }
4789
4790 /* setup the item for the new data */
4791 for (i = 0; i < nr; i++) {
4792 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4793 btrfs_set_item_key(leaf, &disk_key, slot + i);
4794 item = btrfs_item_nr(slot + i);
4795 btrfs_set_token_item_offset(leaf, item,
4796 data_end - data_size[i], &token);
4797 data_end -= data_size[i];
4798 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4799 }
4800
4801 btrfs_set_header_nritems(leaf, nritems + nr);
4802 btrfs_mark_buffer_dirty(leaf);
4803
4804 if (btrfs_leaf_free_space(root, leaf) < 0) {
4805 btrfs_print_leaf(root, leaf);
4806 BUG();
4807 }
4808 }
4809
4810 /*
4811 * Given a key and some data, insert items into the tree.
4812 * This does all the path init required, making room in the tree if needed.
4813 */
4814 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4815 struct btrfs_root *root,
4816 struct btrfs_path *path,
4817 struct btrfs_key *cpu_key, u32 *data_size,
4818 int nr)
4819 {
4820 int ret = 0;
4821 int slot;
4822 int i;
4823 u32 total_size = 0;
4824 u32 total_data = 0;
4825
4826 for (i = 0; i < nr; i++)
4827 total_data += data_size[i];
4828
4829 total_size = total_data + (nr * sizeof(struct btrfs_item));
4830 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4831 if (ret == 0)
4832 return -EEXIST;
4833 if (ret < 0)
4834 return ret;
4835
4836 slot = path->slots[0];
4837 BUG_ON(slot < 0);
4838
4839 setup_items_for_insert(root, path, cpu_key, data_size,
4840 total_data, total_size, nr);
4841 return 0;
4842 }
4843
4844 /*
4845 * Given a key and some data, insert an item into the tree.
4846 * This does all the path init required, making room in the tree if needed.
4847 */
4848 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4849 *root, struct btrfs_key *cpu_key, void *data, u32
4850 data_size)
4851 {
4852 int ret = 0;
4853 struct btrfs_path *path;
4854 struct extent_buffer *leaf;
4855 unsigned long ptr;
4856
4857 path = btrfs_alloc_path();
4858 if (!path)
4859 return -ENOMEM;
4860 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4861 if (!ret) {
4862 leaf = path->nodes[0];
4863 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4864 write_extent_buffer(leaf, data, ptr, data_size);
4865 btrfs_mark_buffer_dirty(leaf);
4866 }
4867 btrfs_free_path(path);
4868 return ret;
4869 }
4870
4871 /*
4872 * delete the pointer from a given node.
4873 *
4874 * the tree should have been previously balanced so the deletion does not
4875 * empty a node.
4876 */
4877 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4878 int level, int slot)
4879 {
4880 struct extent_buffer *parent = path->nodes[level];
4881 u32 nritems;
4882 int ret;
4883
4884 nritems = btrfs_header_nritems(parent);
4885 if (slot != nritems - 1) {
4886 if (level)
4887 tree_mod_log_eb_move(root->fs_info, parent, slot,
4888 slot + 1, nritems - slot - 1);
4889 memmove_extent_buffer(parent,
4890 btrfs_node_key_ptr_offset(slot),
4891 btrfs_node_key_ptr_offset(slot + 1),
4892 sizeof(struct btrfs_key_ptr) *
4893 (nritems - slot - 1));
4894 } else if (level) {
4895 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4896 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4897 BUG_ON(ret < 0);
4898 }
4899
4900 nritems--;
4901 btrfs_set_header_nritems(parent, nritems);
4902 if (nritems == 0 && parent == root->node) {
4903 BUG_ON(btrfs_header_level(root->node) != 1);
4904 /* just turn the root into a leaf and break */
4905 btrfs_set_header_level(root->node, 0);
4906 } else if (slot == 0) {
4907 struct btrfs_disk_key disk_key;
4908
4909 btrfs_node_key(parent, &disk_key, 0);
4910 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4911 }
4912 btrfs_mark_buffer_dirty(parent);
4913 }
4914
4915 /*
4916 * a helper function to delete the leaf pointed to by path->slots[1] and
4917 * path->nodes[1].
4918 *
4919 * This deletes the pointer in path->nodes[1] and frees the leaf
4920 * block extent. zero is returned if it all worked out, < 0 otherwise.
4921 *
4922 * The path must have already been setup for deleting the leaf, including
4923 * all the proper balancing. path->nodes[1] must be locked.
4924 */
4925 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4926 struct btrfs_root *root,
4927 struct btrfs_path *path,
4928 struct extent_buffer *leaf)
4929 {
4930 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4931 del_ptr(root, path, 1, path->slots[1]);
4932
4933 /*
4934 * btrfs_free_extent is expensive, we want to make sure we
4935 * aren't holding any locks when we call it
4936 */
4937 btrfs_unlock_up_safe(path, 0);
4938
4939 root_sub_used(root, leaf->len);
4940
4941 extent_buffer_get(leaf);
4942 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4943 free_extent_buffer_stale(leaf);
4944 }
4945 /*
4946 * delete the item at the leaf level in path. If that empties
4947 * the leaf, remove it from the tree
4948 */
4949 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4950 struct btrfs_path *path, int slot, int nr)
4951 {
4952 struct extent_buffer *leaf;
4953 struct btrfs_item *item;
4954 u32 last_off;
4955 u32 dsize = 0;
4956 int ret = 0;
4957 int wret;
4958 int i;
4959 u32 nritems;
4960 struct btrfs_map_token token;
4961
4962 btrfs_init_map_token(&token);
4963
4964 leaf = path->nodes[0];
4965 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4966
4967 for (i = 0; i < nr; i++)
4968 dsize += btrfs_item_size_nr(leaf, slot + i);
4969
4970 nritems = btrfs_header_nritems(leaf);
4971
4972 if (slot + nr != nritems) {
4973 int data_end = leaf_data_end(root, leaf);
4974
4975 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4976 data_end + dsize,
4977 btrfs_leaf_data(leaf) + data_end,
4978 last_off - data_end);
4979
4980 for (i = slot + nr; i < nritems; i++) {
4981 u32 ioff;
4982
4983 item = btrfs_item_nr(i);
4984 ioff = btrfs_token_item_offset(leaf, item, &token);
4985 btrfs_set_token_item_offset(leaf, item,
4986 ioff + dsize, &token);
4987 }
4988
4989 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4990 btrfs_item_nr_offset(slot + nr),
4991 sizeof(struct btrfs_item) *
4992 (nritems - slot - nr));
4993 }
4994 btrfs_set_header_nritems(leaf, nritems - nr);
4995 nritems -= nr;
4996
4997 /* delete the leaf if we've emptied it */
4998 if (nritems == 0) {
4999 if (leaf == root->node) {
5000 btrfs_set_header_level(leaf, 0);
5001 } else {
5002 btrfs_set_path_blocking(path);
5003 clean_tree_block(trans, root->fs_info, leaf);
5004 btrfs_del_leaf(trans, root, path, leaf);
5005 }
5006 } else {
5007 int used = leaf_space_used(leaf, 0, nritems);
5008 if (slot == 0) {
5009 struct btrfs_disk_key disk_key;
5010
5011 btrfs_item_key(leaf, &disk_key, 0);
5012 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5013 }
5014
5015 /* delete the leaf if it is mostly empty */
5016 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5017 /* push_leaf_left fixes the path.
5018 * make sure the path still points to our leaf
5019 * for possible call to del_ptr below
5020 */
5021 slot = path->slots[1];
5022 extent_buffer_get(leaf);
5023
5024 btrfs_set_path_blocking(path);
5025 wret = push_leaf_left(trans, root, path, 1, 1,
5026 1, (u32)-1);
5027 if (wret < 0 && wret != -ENOSPC)
5028 ret = wret;
5029
5030 if (path->nodes[0] == leaf &&
5031 btrfs_header_nritems(leaf)) {
5032 wret = push_leaf_right(trans, root, path, 1,
5033 1, 1, 0);
5034 if (wret < 0 && wret != -ENOSPC)
5035 ret = wret;
5036 }
5037
5038 if (btrfs_header_nritems(leaf) == 0) {
5039 path->slots[1] = slot;
5040 btrfs_del_leaf(trans, root, path, leaf);
5041 free_extent_buffer(leaf);
5042 ret = 0;
5043 } else {
5044 /* if we're still in the path, make sure
5045 * we're dirty. Otherwise, one of the
5046 * push_leaf functions must have already
5047 * dirtied this buffer
5048 */
5049 if (path->nodes[0] == leaf)
5050 btrfs_mark_buffer_dirty(leaf);
5051 free_extent_buffer(leaf);
5052 }
5053 } else {
5054 btrfs_mark_buffer_dirty(leaf);
5055 }
5056 }
5057 return ret;
5058 }
5059
5060 /*
5061 * search the tree again to find a leaf with lesser keys
5062 * returns 0 if it found something or 1 if there are no lesser leaves.
5063 * returns < 0 on io errors.
5064 *
5065 * This may release the path, and so you may lose any locks held at the
5066 * time you call it.
5067 */
5068 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5069 {
5070 struct btrfs_key key;
5071 struct btrfs_disk_key found_key;
5072 int ret;
5073
5074 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5075
5076 if (key.offset > 0) {
5077 key.offset--;
5078 } else if (key.type > 0) {
5079 key.type--;
5080 key.offset = (u64)-1;
5081 } else if (key.objectid > 0) {
5082 key.objectid--;
5083 key.type = (u8)-1;
5084 key.offset = (u64)-1;
5085 } else {
5086 return 1;
5087 }
5088
5089 btrfs_release_path(path);
5090 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5091 if (ret < 0)
5092 return ret;
5093 btrfs_item_key(path->nodes[0], &found_key, 0);
5094 ret = comp_keys(&found_key, &key);
5095 /*
5096 * We might have had an item with the previous key in the tree right
5097 * before we released our path. And after we released our path, that
5098 * item might have been pushed to the first slot (0) of the leaf we
5099 * were holding due to a tree balance. Alternatively, an item with the
5100 * previous key can exist as the only element of a leaf (big fat item).
5101 * Therefore account for these 2 cases, so that our callers (like
5102 * btrfs_previous_item) don't miss an existing item with a key matching
5103 * the previous key we computed above.
5104 */
5105 if (ret <= 0)
5106 return 0;
5107 return 1;
5108 }
5109
5110 /*
5111 * A helper function to walk down the tree starting at min_key, and looking
5112 * for nodes or leaves that are have a minimum transaction id.
5113 * This is used by the btree defrag code, and tree logging
5114 *
5115 * This does not cow, but it does stuff the starting key it finds back
5116 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5117 * key and get a writable path.
5118 *
5119 * This does lock as it descends, and path->keep_locks should be set
5120 * to 1 by the caller.
5121 *
5122 * This honors path->lowest_level to prevent descent past a given level
5123 * of the tree.
5124 *
5125 * min_trans indicates the oldest transaction that you are interested
5126 * in walking through. Any nodes or leaves older than min_trans are
5127 * skipped over (without reading them).
5128 *
5129 * returns zero if something useful was found, < 0 on error and 1 if there
5130 * was nothing in the tree that matched the search criteria.
5131 */
5132 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5133 struct btrfs_path *path,
5134 u64 min_trans)
5135 {
5136 struct extent_buffer *cur;
5137 struct btrfs_key found_key;
5138 int slot;
5139 int sret;
5140 u32 nritems;
5141 int level;
5142 int ret = 1;
5143 int keep_locks = path->keep_locks;
5144
5145 path->keep_locks = 1;
5146 again:
5147 cur = btrfs_read_lock_root_node(root);
5148 level = btrfs_header_level(cur);
5149 WARN_ON(path->nodes[level]);
5150 path->nodes[level] = cur;
5151 path->locks[level] = BTRFS_READ_LOCK;
5152
5153 if (btrfs_header_generation(cur) < min_trans) {
5154 ret = 1;
5155 goto out;
5156 }
5157 while (1) {
5158 nritems = btrfs_header_nritems(cur);
5159 level = btrfs_header_level(cur);
5160 sret = bin_search(cur, min_key, level, &slot);
5161
5162 /* at the lowest level, we're done, setup the path and exit */
5163 if (level == path->lowest_level) {
5164 if (slot >= nritems)
5165 goto find_next_key;
5166 ret = 0;
5167 path->slots[level] = slot;
5168 btrfs_item_key_to_cpu(cur, &found_key, slot);
5169 goto out;
5170 }
5171 if (sret && slot > 0)
5172 slot--;
5173 /*
5174 * check this node pointer against the min_trans parameters.
5175 * If it is too old, old, skip to the next one.
5176 */
5177 while (slot < nritems) {
5178 u64 gen;
5179
5180 gen = btrfs_node_ptr_generation(cur, slot);
5181 if (gen < min_trans) {
5182 slot++;
5183 continue;
5184 }
5185 break;
5186 }
5187 find_next_key:
5188 /*
5189 * we didn't find a candidate key in this node, walk forward
5190 * and find another one
5191 */
5192 if (slot >= nritems) {
5193 path->slots[level] = slot;
5194 btrfs_set_path_blocking(path);
5195 sret = btrfs_find_next_key(root, path, min_key, level,
5196 min_trans);
5197 if (sret == 0) {
5198 btrfs_release_path(path);
5199 goto again;
5200 } else {
5201 goto out;
5202 }
5203 }
5204 /* save our key for returning back */
5205 btrfs_node_key_to_cpu(cur, &found_key, slot);
5206 path->slots[level] = slot;
5207 if (level == path->lowest_level) {
5208 ret = 0;
5209 goto out;
5210 }
5211 btrfs_set_path_blocking(path);
5212 cur = read_node_slot(root, cur, slot);
5213 BUG_ON(!cur); /* -ENOMEM */
5214
5215 btrfs_tree_read_lock(cur);
5216
5217 path->locks[level - 1] = BTRFS_READ_LOCK;
5218 path->nodes[level - 1] = cur;
5219 unlock_up(path, level, 1, 0, NULL);
5220 btrfs_clear_path_blocking(path, NULL, 0);
5221 }
5222 out:
5223 path->keep_locks = keep_locks;
5224 if (ret == 0) {
5225 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5226 btrfs_set_path_blocking(path);
5227 memcpy(min_key, &found_key, sizeof(found_key));
5228 }
5229 return ret;
5230 }
5231
5232 static void tree_move_down(struct btrfs_root *root,
5233 struct btrfs_path *path,
5234 int *level, int root_level)
5235 {
5236 BUG_ON(*level == 0);
5237 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5238 path->slots[*level]);
5239 path->slots[*level - 1] = 0;
5240 (*level)--;
5241 }
5242
5243 static int tree_move_next_or_upnext(struct btrfs_root *root,
5244 struct btrfs_path *path,
5245 int *level, int root_level)
5246 {
5247 int ret = 0;
5248 int nritems;
5249 nritems = btrfs_header_nritems(path->nodes[*level]);
5250
5251 path->slots[*level]++;
5252
5253 while (path->slots[*level] >= nritems) {
5254 if (*level == root_level)
5255 return -1;
5256
5257 /* move upnext */
5258 path->slots[*level] = 0;
5259 free_extent_buffer(path->nodes[*level]);
5260 path->nodes[*level] = NULL;
5261 (*level)++;
5262 path->slots[*level]++;
5263
5264 nritems = btrfs_header_nritems(path->nodes[*level]);
5265 ret = 1;
5266 }
5267 return ret;
5268 }
5269
5270 /*
5271 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5272 * or down.
5273 */
5274 static int tree_advance(struct btrfs_root *root,
5275 struct btrfs_path *path,
5276 int *level, int root_level,
5277 int allow_down,
5278 struct btrfs_key *key)
5279 {
5280 int ret;
5281
5282 if (*level == 0 || !allow_down) {
5283 ret = tree_move_next_or_upnext(root, path, level, root_level);
5284 } else {
5285 tree_move_down(root, path, level, root_level);
5286 ret = 0;
5287 }
5288 if (ret >= 0) {
5289 if (*level == 0)
5290 btrfs_item_key_to_cpu(path->nodes[*level], key,
5291 path->slots[*level]);
5292 else
5293 btrfs_node_key_to_cpu(path->nodes[*level], key,
5294 path->slots[*level]);
5295 }
5296 return ret;
5297 }
5298
5299 static int tree_compare_item(struct btrfs_root *left_root,
5300 struct btrfs_path *left_path,
5301 struct btrfs_path *right_path,
5302 char *tmp_buf)
5303 {
5304 int cmp;
5305 int len1, len2;
5306 unsigned long off1, off2;
5307
5308 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5309 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5310 if (len1 != len2)
5311 return 1;
5312
5313 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5314 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5315 right_path->slots[0]);
5316
5317 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5318
5319 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5320 if (cmp)
5321 return 1;
5322 return 0;
5323 }
5324
5325 #define ADVANCE 1
5326 #define ADVANCE_ONLY_NEXT -1
5327
5328 /*
5329 * This function compares two trees and calls the provided callback for
5330 * every changed/new/deleted item it finds.
5331 * If shared tree blocks are encountered, whole subtrees are skipped, making
5332 * the compare pretty fast on snapshotted subvolumes.
5333 *
5334 * This currently works on commit roots only. As commit roots are read only,
5335 * we don't do any locking. The commit roots are protected with transactions.
5336 * Transactions are ended and rejoined when a commit is tried in between.
5337 *
5338 * This function checks for modifications done to the trees while comparing.
5339 * If it detects a change, it aborts immediately.
5340 */
5341 int btrfs_compare_trees(struct btrfs_root *left_root,
5342 struct btrfs_root *right_root,
5343 btrfs_changed_cb_t changed_cb, void *ctx)
5344 {
5345 int ret;
5346 int cmp;
5347 struct btrfs_path *left_path = NULL;
5348 struct btrfs_path *right_path = NULL;
5349 struct btrfs_key left_key;
5350 struct btrfs_key right_key;
5351 char *tmp_buf = NULL;
5352 int left_root_level;
5353 int right_root_level;
5354 int left_level;
5355 int right_level;
5356 int left_end_reached;
5357 int right_end_reached;
5358 int advance_left;
5359 int advance_right;
5360 u64 left_blockptr;
5361 u64 right_blockptr;
5362 u64 left_gen;
5363 u64 right_gen;
5364
5365 left_path = btrfs_alloc_path();
5366 if (!left_path) {
5367 ret = -ENOMEM;
5368 goto out;
5369 }
5370 right_path = btrfs_alloc_path();
5371 if (!right_path) {
5372 ret = -ENOMEM;
5373 goto out;
5374 }
5375
5376 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5377 if (!tmp_buf) {
5378 tmp_buf = vmalloc(left_root->nodesize);
5379 if (!tmp_buf) {
5380 ret = -ENOMEM;
5381 goto out;
5382 }
5383 }
5384
5385 left_path->search_commit_root = 1;
5386 left_path->skip_locking = 1;
5387 right_path->search_commit_root = 1;
5388 right_path->skip_locking = 1;
5389
5390 /*
5391 * Strategy: Go to the first items of both trees. Then do
5392 *
5393 * If both trees are at level 0
5394 * Compare keys of current items
5395 * If left < right treat left item as new, advance left tree
5396 * and repeat
5397 * If left > right treat right item as deleted, advance right tree
5398 * and repeat
5399 * If left == right do deep compare of items, treat as changed if
5400 * needed, advance both trees and repeat
5401 * If both trees are at the same level but not at level 0
5402 * Compare keys of current nodes/leafs
5403 * If left < right advance left tree and repeat
5404 * If left > right advance right tree and repeat
5405 * If left == right compare blockptrs of the next nodes/leafs
5406 * If they match advance both trees but stay at the same level
5407 * and repeat
5408 * If they don't match advance both trees while allowing to go
5409 * deeper and repeat
5410 * If tree levels are different
5411 * Advance the tree that needs it and repeat
5412 *
5413 * Advancing a tree means:
5414 * If we are at level 0, try to go to the next slot. If that's not
5415 * possible, go one level up and repeat. Stop when we found a level
5416 * where we could go to the next slot. We may at this point be on a
5417 * node or a leaf.
5418 *
5419 * If we are not at level 0 and not on shared tree blocks, go one
5420 * level deeper.
5421 *
5422 * If we are not at level 0 and on shared tree blocks, go one slot to
5423 * the right if possible or go up and right.
5424 */
5425
5426 down_read(&left_root->fs_info->commit_root_sem);
5427 left_level = btrfs_header_level(left_root->commit_root);
5428 left_root_level = left_level;
5429 left_path->nodes[left_level] = left_root->commit_root;
5430 extent_buffer_get(left_path->nodes[left_level]);
5431
5432 right_level = btrfs_header_level(right_root->commit_root);
5433 right_root_level = right_level;
5434 right_path->nodes[right_level] = right_root->commit_root;
5435 extent_buffer_get(right_path->nodes[right_level]);
5436 up_read(&left_root->fs_info->commit_root_sem);
5437
5438 if (left_level == 0)
5439 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5440 &left_key, left_path->slots[left_level]);
5441 else
5442 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5443 &left_key, left_path->slots[left_level]);
5444 if (right_level == 0)
5445 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5446 &right_key, right_path->slots[right_level]);
5447 else
5448 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5449 &right_key, right_path->slots[right_level]);
5450
5451 left_end_reached = right_end_reached = 0;
5452 advance_left = advance_right = 0;
5453
5454 while (1) {
5455 if (advance_left && !left_end_reached) {
5456 ret = tree_advance(left_root, left_path, &left_level,
5457 left_root_level,
5458 advance_left != ADVANCE_ONLY_NEXT,
5459 &left_key);
5460 if (ret < 0)
5461 left_end_reached = ADVANCE;
5462 advance_left = 0;
5463 }
5464 if (advance_right && !right_end_reached) {
5465 ret = tree_advance(right_root, right_path, &right_level,
5466 right_root_level,
5467 advance_right != ADVANCE_ONLY_NEXT,
5468 &right_key);
5469 if (ret < 0)
5470 right_end_reached = ADVANCE;
5471 advance_right = 0;
5472 }
5473
5474 if (left_end_reached && right_end_reached) {
5475 ret = 0;
5476 goto out;
5477 } else if (left_end_reached) {
5478 if (right_level == 0) {
5479 ret = changed_cb(left_root, right_root,
5480 left_path, right_path,
5481 &right_key,
5482 BTRFS_COMPARE_TREE_DELETED,
5483 ctx);
5484 if (ret < 0)
5485 goto out;
5486 }
5487 advance_right = ADVANCE;
5488 continue;
5489 } else if (right_end_reached) {
5490 if (left_level == 0) {
5491 ret = changed_cb(left_root, right_root,
5492 left_path, right_path,
5493 &left_key,
5494 BTRFS_COMPARE_TREE_NEW,
5495 ctx);
5496 if (ret < 0)
5497 goto out;
5498 }
5499 advance_left = ADVANCE;
5500 continue;
5501 }
5502
5503 if (left_level == 0 && right_level == 0) {
5504 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5505 if (cmp < 0) {
5506 ret = changed_cb(left_root, right_root,
5507 left_path, right_path,
5508 &left_key,
5509 BTRFS_COMPARE_TREE_NEW,
5510 ctx);
5511 if (ret < 0)
5512 goto out;
5513 advance_left = ADVANCE;
5514 } else if (cmp > 0) {
5515 ret = changed_cb(left_root, right_root,
5516 left_path, right_path,
5517 &right_key,
5518 BTRFS_COMPARE_TREE_DELETED,
5519 ctx);
5520 if (ret < 0)
5521 goto out;
5522 advance_right = ADVANCE;
5523 } else {
5524 enum btrfs_compare_tree_result result;
5525
5526 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5527 ret = tree_compare_item(left_root, left_path,
5528 right_path, tmp_buf);
5529 if (ret)
5530 result = BTRFS_COMPARE_TREE_CHANGED;
5531 else
5532 result = BTRFS_COMPARE_TREE_SAME;
5533 ret = changed_cb(left_root, right_root,
5534 left_path, right_path,
5535 &left_key, result, ctx);
5536 if (ret < 0)
5537 goto out;
5538 advance_left = ADVANCE;
5539 advance_right = ADVANCE;
5540 }
5541 } else if (left_level == right_level) {
5542 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5543 if (cmp < 0) {
5544 advance_left = ADVANCE;
5545 } else if (cmp > 0) {
5546 advance_right = ADVANCE;
5547 } else {
5548 left_blockptr = btrfs_node_blockptr(
5549 left_path->nodes[left_level],
5550 left_path->slots[left_level]);
5551 right_blockptr = btrfs_node_blockptr(
5552 right_path->nodes[right_level],
5553 right_path->slots[right_level]);
5554 left_gen = btrfs_node_ptr_generation(
5555 left_path->nodes[left_level],
5556 left_path->slots[left_level]);
5557 right_gen = btrfs_node_ptr_generation(
5558 right_path->nodes[right_level],
5559 right_path->slots[right_level]);
5560 if (left_blockptr == right_blockptr &&
5561 left_gen == right_gen) {
5562 /*
5563 * As we're on a shared block, don't
5564 * allow to go deeper.
5565 */
5566 advance_left = ADVANCE_ONLY_NEXT;
5567 advance_right = ADVANCE_ONLY_NEXT;
5568 } else {
5569 advance_left = ADVANCE;
5570 advance_right = ADVANCE;
5571 }
5572 }
5573 } else if (left_level < right_level) {
5574 advance_right = ADVANCE;
5575 } else {
5576 advance_left = ADVANCE;
5577 }
5578 }
5579
5580 out:
5581 btrfs_free_path(left_path);
5582 btrfs_free_path(right_path);
5583 kvfree(tmp_buf);
5584 return ret;
5585 }
5586
5587 /*
5588 * this is similar to btrfs_next_leaf, but does not try to preserve
5589 * and fixup the path. It looks for and returns the next key in the
5590 * tree based on the current path and the min_trans parameters.
5591 *
5592 * 0 is returned if another key is found, < 0 if there are any errors
5593 * and 1 is returned if there are no higher keys in the tree
5594 *
5595 * path->keep_locks should be set to 1 on the search made before
5596 * calling this function.
5597 */
5598 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5599 struct btrfs_key *key, int level, u64 min_trans)
5600 {
5601 int slot;
5602 struct extent_buffer *c;
5603
5604 WARN_ON(!path->keep_locks);
5605 while (level < BTRFS_MAX_LEVEL) {
5606 if (!path->nodes[level])
5607 return 1;
5608
5609 slot = path->slots[level] + 1;
5610 c = path->nodes[level];
5611 next:
5612 if (slot >= btrfs_header_nritems(c)) {
5613 int ret;
5614 int orig_lowest;
5615 struct btrfs_key cur_key;
5616 if (level + 1 >= BTRFS_MAX_LEVEL ||
5617 !path->nodes[level + 1])
5618 return 1;
5619
5620 if (path->locks[level + 1]) {
5621 level++;
5622 continue;
5623 }
5624
5625 slot = btrfs_header_nritems(c) - 1;
5626 if (level == 0)
5627 btrfs_item_key_to_cpu(c, &cur_key, slot);
5628 else
5629 btrfs_node_key_to_cpu(c, &cur_key, slot);
5630
5631 orig_lowest = path->lowest_level;
5632 btrfs_release_path(path);
5633 path->lowest_level = level;
5634 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5635 0, 0);
5636 path->lowest_level = orig_lowest;
5637 if (ret < 0)
5638 return ret;
5639
5640 c = path->nodes[level];
5641 slot = path->slots[level];
5642 if (ret == 0)
5643 slot++;
5644 goto next;
5645 }
5646
5647 if (level == 0)
5648 btrfs_item_key_to_cpu(c, key, slot);
5649 else {
5650 u64 gen = btrfs_node_ptr_generation(c, slot);
5651
5652 if (gen < min_trans) {
5653 slot++;
5654 goto next;
5655 }
5656 btrfs_node_key_to_cpu(c, key, slot);
5657 }
5658 return 0;
5659 }
5660 return 1;
5661 }
5662
5663 /*
5664 * search the tree again to find a leaf with greater keys
5665 * returns 0 if it found something or 1 if there are no greater leaves.
5666 * returns < 0 on io errors.
5667 */
5668 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5669 {
5670 return btrfs_next_old_leaf(root, path, 0);
5671 }
5672
5673 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5674 u64 time_seq)
5675 {
5676 int slot;
5677 int level;
5678 struct extent_buffer *c;
5679 struct extent_buffer *next;
5680 struct btrfs_key key;
5681 u32 nritems;
5682 int ret;
5683 int old_spinning = path->leave_spinning;
5684 int next_rw_lock = 0;
5685
5686 nritems = btrfs_header_nritems(path->nodes[0]);
5687 if (nritems == 0)
5688 return 1;
5689
5690 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5691 again:
5692 level = 1;
5693 next = NULL;
5694 next_rw_lock = 0;
5695 btrfs_release_path(path);
5696
5697 path->keep_locks = 1;
5698 path->leave_spinning = 1;
5699
5700 if (time_seq)
5701 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5702 else
5703 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5704 path->keep_locks = 0;
5705
5706 if (ret < 0)
5707 return ret;
5708
5709 nritems = btrfs_header_nritems(path->nodes[0]);
5710 /*
5711 * by releasing the path above we dropped all our locks. A balance
5712 * could have added more items next to the key that used to be
5713 * at the very end of the block. So, check again here and
5714 * advance the path if there are now more items available.
5715 */
5716 if (nritems > 0 && path->slots[0] < nritems - 1) {
5717 if (ret == 0)
5718 path->slots[0]++;
5719 ret = 0;
5720 goto done;
5721 }
5722 /*
5723 * So the above check misses one case:
5724 * - after releasing the path above, someone has removed the item that
5725 * used to be at the very end of the block, and balance between leafs
5726 * gets another one with bigger key.offset to replace it.
5727 *
5728 * This one should be returned as well, or we can get leaf corruption
5729 * later(esp. in __btrfs_drop_extents()).
5730 *
5731 * And a bit more explanation about this check,
5732 * with ret > 0, the key isn't found, the path points to the slot
5733 * where it should be inserted, so the path->slots[0] item must be the
5734 * bigger one.
5735 */
5736 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5737 ret = 0;
5738 goto done;
5739 }
5740
5741 while (level < BTRFS_MAX_LEVEL) {
5742 if (!path->nodes[level]) {
5743 ret = 1;
5744 goto done;
5745 }
5746
5747 slot = path->slots[level] + 1;
5748 c = path->nodes[level];
5749 if (slot >= btrfs_header_nritems(c)) {
5750 level++;
5751 if (level == BTRFS_MAX_LEVEL) {
5752 ret = 1;
5753 goto done;
5754 }
5755 continue;
5756 }
5757
5758 if (next) {
5759 btrfs_tree_unlock_rw(next, next_rw_lock);
5760 free_extent_buffer(next);
5761 }
5762
5763 next = c;
5764 next_rw_lock = path->locks[level];
5765 ret = read_block_for_search(NULL, root, path, &next, level,
5766 slot, &key, 0);
5767 if (ret == -EAGAIN)
5768 goto again;
5769
5770 if (ret < 0) {
5771 btrfs_release_path(path);
5772 goto done;
5773 }
5774
5775 if (!path->skip_locking) {
5776 ret = btrfs_try_tree_read_lock(next);
5777 if (!ret && time_seq) {
5778 /*
5779 * If we don't get the lock, we may be racing
5780 * with push_leaf_left, holding that lock while
5781 * itself waiting for the leaf we've currently
5782 * locked. To solve this situation, we give up
5783 * on our lock and cycle.
5784 */
5785 free_extent_buffer(next);
5786 btrfs_release_path(path);
5787 cond_resched();
5788 goto again;
5789 }
5790 if (!ret) {
5791 btrfs_set_path_blocking(path);
5792 btrfs_tree_read_lock(next);
5793 btrfs_clear_path_blocking(path, next,
5794 BTRFS_READ_LOCK);
5795 }
5796 next_rw_lock = BTRFS_READ_LOCK;
5797 }
5798 break;
5799 }
5800 path->slots[level] = slot;
5801 while (1) {
5802 level--;
5803 c = path->nodes[level];
5804 if (path->locks[level])
5805 btrfs_tree_unlock_rw(c, path->locks[level]);
5806
5807 free_extent_buffer(c);
5808 path->nodes[level] = next;
5809 path->slots[level] = 0;
5810 if (!path->skip_locking)
5811 path->locks[level] = next_rw_lock;
5812 if (!level)
5813 break;
5814
5815 ret = read_block_for_search(NULL, root, path, &next, level,
5816 0, &key, 0);
5817 if (ret == -EAGAIN)
5818 goto again;
5819
5820 if (ret < 0) {
5821 btrfs_release_path(path);
5822 goto done;
5823 }
5824
5825 if (!path->skip_locking) {
5826 ret = btrfs_try_tree_read_lock(next);
5827 if (!ret) {
5828 btrfs_set_path_blocking(path);
5829 btrfs_tree_read_lock(next);
5830 btrfs_clear_path_blocking(path, next,
5831 BTRFS_READ_LOCK);
5832 }
5833 next_rw_lock = BTRFS_READ_LOCK;
5834 }
5835 }
5836 ret = 0;
5837 done:
5838 unlock_up(path, 0, 1, 0, NULL);
5839 path->leave_spinning = old_spinning;
5840 if (!old_spinning)
5841 btrfs_set_path_blocking(path);
5842
5843 return ret;
5844 }
5845
5846 /*
5847 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5848 * searching until it gets past min_objectid or finds an item of 'type'
5849 *
5850 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5851 */
5852 int btrfs_previous_item(struct btrfs_root *root,
5853 struct btrfs_path *path, u64 min_objectid,
5854 int type)
5855 {
5856 struct btrfs_key found_key;
5857 struct extent_buffer *leaf;
5858 u32 nritems;
5859 int ret;
5860
5861 while (1) {
5862 if (path->slots[0] == 0) {
5863 btrfs_set_path_blocking(path);
5864 ret = btrfs_prev_leaf(root, path);
5865 if (ret != 0)
5866 return ret;
5867 } else {
5868 path->slots[0]--;
5869 }
5870 leaf = path->nodes[0];
5871 nritems = btrfs_header_nritems(leaf);
5872 if (nritems == 0)
5873 return 1;
5874 if (path->slots[0] == nritems)
5875 path->slots[0]--;
5876
5877 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5878 if (found_key.objectid < min_objectid)
5879 break;
5880 if (found_key.type == type)
5881 return 0;
5882 if (found_key.objectid == min_objectid &&
5883 found_key.type < type)
5884 break;
5885 }
5886 return 1;
5887 }
5888
5889 /*
5890 * search in extent tree to find a previous Metadata/Data extent item with
5891 * min objecitd.
5892 *
5893 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5894 */
5895 int btrfs_previous_extent_item(struct btrfs_root *root,
5896 struct btrfs_path *path, u64 min_objectid)
5897 {
5898 struct btrfs_key found_key;
5899 struct extent_buffer *leaf;
5900 u32 nritems;
5901 int ret;
5902
5903 while (1) {
5904 if (path->slots[0] == 0) {
5905 btrfs_set_path_blocking(path);
5906 ret = btrfs_prev_leaf(root, path);
5907 if (ret != 0)
5908 return ret;
5909 } else {
5910 path->slots[0]--;
5911 }
5912 leaf = path->nodes[0];
5913 nritems = btrfs_header_nritems(leaf);
5914 if (nritems == 0)
5915 return 1;
5916 if (path->slots[0] == nritems)
5917 path->slots[0]--;
5918
5919 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5920 if (found_key.objectid < min_objectid)
5921 break;
5922 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5923 found_key.type == BTRFS_METADATA_ITEM_KEY)
5924 return 0;
5925 if (found_key.objectid == min_objectid &&
5926 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5927 break;
5928 }
5929 return 1;
5930 }