]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/btrfs/ctree.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/mm.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_fs_info *fs_info,
36 struct extent_buffer *dst,
37 struct extent_buffer *src, int empty);
38 static int balance_node_right(struct btrfs_trans_handle *trans,
39 struct btrfs_fs_info *fs_info,
40 struct extent_buffer *dst_buf,
41 struct extent_buffer *src_buf);
42 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
43 int level, int slot);
44 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
45 struct extent_buffer *eb);
46
47 struct btrfs_path *btrfs_alloc_path(void)
48 {
49 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 }
51
52 /*
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
55 */
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 int i;
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
61 continue;
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 }
68 }
69
70 /*
71 * reset all the locked nodes in the patch to spinning locks.
72 *
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
76 * for held
77 */
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
80 {
81 int i;
82
83 if (held) {
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
89 }
90 btrfs_set_path_blocking(p);
91
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
99 }
100 }
101
102 if (held)
103 btrfs_clear_lock_blocking_rw(held, held_rw);
104 }
105
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
108 {
109 if (!p)
110 return;
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
113 }
114
115 /*
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
118 *
119 * It is safe to call this on paths that no locks or extent buffers held.
120 */
121 noinline void btrfs_release_path(struct btrfs_path *p)
122 {
123 int i;
124
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 p->slots[i] = 0;
127 if (!p->nodes[i])
128 continue;
129 if (p->locks[i]) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 p->locks[i] = 0;
132 }
133 free_extent_buffer(p->nodes[i]);
134 p->nodes[i] = NULL;
135 }
136 }
137
138 /*
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
142 * looping required.
143 *
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
147 */
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 {
150 struct extent_buffer *eb;
151
152 while (1) {
153 rcu_read_lock();
154 eb = rcu_dereference(root->node);
155
156 /*
157 * RCU really hurts here, we could free up the root node because
158 * it was COWed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
161 */
162 if (atomic_inc_not_zero(&eb->refs)) {
163 rcu_read_unlock();
164 break;
165 }
166 rcu_read_unlock();
167 synchronize_rcu();
168 }
169 return eb;
170 }
171
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
175 */
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
177 {
178 struct extent_buffer *eb;
179
180 while (1) {
181 eb = btrfs_root_node(root);
182 btrfs_tree_lock(eb);
183 if (eb == root->node)
184 break;
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
187 }
188 return eb;
189 }
190
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
194 */
195 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
196 {
197 struct extent_buffer *eb;
198
199 while (1) {
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
203 break;
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
206 }
207 return eb;
208 }
209
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
213 */
214 static void add_root_to_dirty_list(struct btrfs_root *root)
215 {
216 struct btrfs_fs_info *fs_info = root->fs_info;
217
218 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
219 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 return;
221
222 spin_lock(&fs_info->trans_lock);
223 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
224 /* Want the extent tree to be the last on the list */
225 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
226 list_move_tail(&root->dirty_list,
227 &fs_info->dirty_cowonly_roots);
228 else
229 list_move(&root->dirty_list,
230 &fs_info->dirty_cowonly_roots);
231 }
232 spin_unlock(&fs_info->trans_lock);
233 }
234
235 /*
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
239 */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 struct btrfs_fs_info *fs_info = root->fs_info;
246 struct extent_buffer *cow;
247 int ret = 0;
248 int level;
249 struct btrfs_disk_key disk_key;
250
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != fs_info->running_transaction->transid);
253 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
254 trans->transid != root->last_trans);
255
256 level = btrfs_header_level(buf);
257 if (level == 0)
258 btrfs_item_key(buf, &disk_key, 0);
259 else
260 btrfs_node_key(buf, &disk_key, 0);
261
262 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
263 &disk_key, level, buf->start, 0);
264 if (IS_ERR(cow))
265 return PTR_ERR(cow);
266
267 copy_extent_buffer_full(cow, buf);
268 btrfs_set_header_bytenr(cow, cow->start);
269 btrfs_set_header_generation(cow, trans->transid);
270 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
271 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
272 BTRFS_HEADER_FLAG_RELOC);
273 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
274 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 else
276 btrfs_set_header_owner(cow, new_root_objectid);
277
278 write_extent_buffer_fsid(cow, fs_info->fsid);
279
280 WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 ret = btrfs_inc_ref(trans, root, cow, 1);
283 else
284 ret = btrfs_inc_ref(trans, root, cow, 0);
285
286 if (ret)
287 return ret;
288
289 btrfs_mark_buffer_dirty(cow);
290 *cow_ret = cow;
291 return 0;
292 }
293
294 enum mod_log_op {
295 MOD_LOG_KEY_REPLACE,
296 MOD_LOG_KEY_ADD,
297 MOD_LOG_KEY_REMOVE,
298 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 MOD_LOG_MOVE_KEYS,
301 MOD_LOG_ROOT_REPLACE,
302 };
303
304 struct tree_mod_move {
305 int dst_slot;
306 int nr_items;
307 };
308
309 struct tree_mod_root {
310 u64 logical;
311 u8 level;
312 };
313
314 struct tree_mod_elem {
315 struct rb_node node;
316 u64 logical;
317 u64 seq;
318 enum mod_log_op op;
319
320 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 int slot;
322
323 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 u64 generation;
325
326 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 struct btrfs_disk_key key;
328 u64 blockptr;
329
330 /* this is used for op == MOD_LOG_MOVE_KEYS */
331 struct tree_mod_move move;
332
333 /* this is used for op == MOD_LOG_ROOT_REPLACE */
334 struct tree_mod_root old_root;
335 };
336
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
338 {
339 read_lock(&fs_info->tree_mod_log_lock);
340 }
341
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
343 {
344 read_unlock(&fs_info->tree_mod_log_lock);
345 }
346
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
348 {
349 write_lock(&fs_info->tree_mod_log_lock);
350 }
351
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
353 {
354 write_unlock(&fs_info->tree_mod_log_lock);
355 }
356
357 /*
358 * Pull a new tree mod seq number for our operation.
359 */
360 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
361 {
362 return atomic64_inc_return(&fs_info->tree_mod_seq);
363 }
364
365 /*
366 * This adds a new blocker to the tree mod log's blocker list if the @elem
367 * passed does not already have a sequence number set. So when a caller expects
368 * to record tree modifications, it should ensure to set elem->seq to zero
369 * before calling btrfs_get_tree_mod_seq.
370 * Returns a fresh, unused tree log modification sequence number, even if no new
371 * blocker was added.
372 */
373 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
374 struct seq_list *elem)
375 {
376 tree_mod_log_write_lock(fs_info);
377 spin_lock(&fs_info->tree_mod_seq_lock);
378 if (!elem->seq) {
379 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
381 }
382 spin_unlock(&fs_info->tree_mod_seq_lock);
383 tree_mod_log_write_unlock(fs_info);
384
385 return elem->seq;
386 }
387
388 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
389 struct seq_list *elem)
390 {
391 struct rb_root *tm_root;
392 struct rb_node *node;
393 struct rb_node *next;
394 struct seq_list *cur_elem;
395 struct tree_mod_elem *tm;
396 u64 min_seq = (u64)-1;
397 u64 seq_putting = elem->seq;
398
399 if (!seq_putting)
400 return;
401
402 spin_lock(&fs_info->tree_mod_seq_lock);
403 list_del(&elem->list);
404 elem->seq = 0;
405
406 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
407 if (cur_elem->seq < min_seq) {
408 if (seq_putting > cur_elem->seq) {
409 /*
410 * blocker with lower sequence number exists, we
411 * cannot remove anything from the log
412 */
413 spin_unlock(&fs_info->tree_mod_seq_lock);
414 return;
415 }
416 min_seq = cur_elem->seq;
417 }
418 }
419 spin_unlock(&fs_info->tree_mod_seq_lock);
420
421 /*
422 * anything that's lower than the lowest existing (read: blocked)
423 * sequence number can be removed from the tree.
424 */
425 tree_mod_log_write_lock(fs_info);
426 tm_root = &fs_info->tree_mod_log;
427 for (node = rb_first(tm_root); node; node = next) {
428 next = rb_next(node);
429 tm = rb_entry(node, struct tree_mod_elem, node);
430 if (tm->seq > min_seq)
431 continue;
432 rb_erase(node, tm_root);
433 kfree(tm);
434 }
435 tree_mod_log_write_unlock(fs_info);
436 }
437
438 /*
439 * key order of the log:
440 * node/leaf start address -> sequence
441 *
442 * The 'start address' is the logical address of the *new* root node
443 * for root replace operations, or the logical address of the affected
444 * block for all other operations.
445 *
446 * Note: must be called with write lock (tree_mod_log_write_lock).
447 */
448 static noinline int
449 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
450 {
451 struct rb_root *tm_root;
452 struct rb_node **new;
453 struct rb_node *parent = NULL;
454 struct tree_mod_elem *cur;
455
456 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
457
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
460 while (*new) {
461 cur = rb_entry(*new, struct tree_mod_elem, node);
462 parent = *new;
463 if (cur->logical < tm->logical)
464 new = &((*new)->rb_left);
465 else if (cur->logical > tm->logical)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
471 else
472 return -EEXIST;
473 }
474
475 rb_link_node(&tm->node, parent, new);
476 rb_insert_color(&tm->node, tm_root);
477 return 0;
478 }
479
480 /*
481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
482 * returns zero with the tree_mod_log_lock acquired. The caller must hold
483 * this until all tree mod log insertions are recorded in the rb tree and then
484 * call tree_mod_log_write_unlock() to release.
485 */
486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
487 struct extent_buffer *eb) {
488 smp_mb();
489 if (list_empty(&(fs_info)->tree_mod_seq_list))
490 return 1;
491 if (eb && btrfs_header_level(eb) == 0)
492 return 1;
493
494 tree_mod_log_write_lock(fs_info);
495 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
496 tree_mod_log_write_unlock(fs_info);
497 return 1;
498 }
499
500 return 0;
501 }
502
503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb)
506 {
507 smp_mb();
508 if (list_empty(&(fs_info)->tree_mod_seq_list))
509 return 0;
510 if (eb && btrfs_header_level(eb) == 0)
511 return 0;
512
513 return 1;
514 }
515
516 static struct tree_mod_elem *
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
519 {
520 struct tree_mod_elem *tm;
521
522 tm = kzalloc(sizeof(*tm), flags);
523 if (!tm)
524 return NULL;
525
526 tm->logical = eb->start;
527 if (op != MOD_LOG_KEY_ADD) {
528 btrfs_node_key(eb, &tm->key, slot);
529 tm->blockptr = btrfs_node_blockptr(eb, slot);
530 }
531 tm->op = op;
532 tm->slot = slot;
533 tm->generation = btrfs_node_ptr_generation(eb, slot);
534 RB_CLEAR_NODE(&tm->node);
535
536 return tm;
537 }
538
539 static noinline int
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb, int slot,
542 enum mod_log_op op, gfp_t flags)
543 {
544 struct tree_mod_elem *tm;
545 int ret;
546
547 if (!tree_mod_need_log(fs_info, eb))
548 return 0;
549
550 tm = alloc_tree_mod_elem(eb, slot, op, flags);
551 if (!tm)
552 return -ENOMEM;
553
554 if (tree_mod_dont_log(fs_info, eb)) {
555 kfree(tm);
556 return 0;
557 }
558
559 ret = __tree_mod_log_insert(fs_info, tm);
560 tree_mod_log_write_unlock(fs_info);
561 if (ret)
562 kfree(tm);
563
564 return ret;
565 }
566
567 static noinline int
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int dst_slot, int src_slot,
570 int nr_items)
571 {
572 struct tree_mod_elem *tm = NULL;
573 struct tree_mod_elem **tm_list = NULL;
574 int ret = 0;
575 int i;
576 int locked = 0;
577
578 if (!tree_mod_need_log(fs_info, eb))
579 return 0;
580
581 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
582 if (!tm_list)
583 return -ENOMEM;
584
585 tm = kzalloc(sizeof(*tm), GFP_NOFS);
586 if (!tm) {
587 ret = -ENOMEM;
588 goto free_tms;
589 }
590
591 tm->logical = eb->start;
592 tm->slot = src_slot;
593 tm->move.dst_slot = dst_slot;
594 tm->move.nr_items = nr_items;
595 tm->op = MOD_LOG_MOVE_KEYS;
596
597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
600 if (!tm_list[i]) {
601 ret = -ENOMEM;
602 goto free_tms;
603 }
604 }
605
606 if (tree_mod_dont_log(fs_info, eb))
607 goto free_tms;
608 locked = 1;
609
610 /*
611 * When we override something during the move, we log these removals.
612 * This can only happen when we move towards the beginning of the
613 * buffer, i.e. dst_slot < src_slot.
614 */
615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
616 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
617 if (ret)
618 goto free_tms;
619 }
620
621 ret = __tree_mod_log_insert(fs_info, tm);
622 if (ret)
623 goto free_tms;
624 tree_mod_log_write_unlock(fs_info);
625 kfree(tm_list);
626
627 return 0;
628 free_tms:
629 for (i = 0; i < nr_items; i++) {
630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
632 kfree(tm_list[i]);
633 }
634 if (locked)
635 tree_mod_log_write_unlock(fs_info);
636 kfree(tm_list);
637 kfree(tm);
638
639 return ret;
640 }
641
642 static inline int
643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
644 struct tree_mod_elem **tm_list,
645 int nritems)
646 {
647 int i, j;
648 int ret;
649
650 for (i = nritems - 1; i >= 0; i--) {
651 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
652 if (ret) {
653 for (j = nritems - 1; j > i; j--)
654 rb_erase(&tm_list[j]->node,
655 &fs_info->tree_mod_log);
656 return ret;
657 }
658 }
659
660 return 0;
661 }
662
663 static noinline int
664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
665 struct extent_buffer *old_root,
666 struct extent_buffer *new_root,
667 int log_removal)
668 {
669 struct tree_mod_elem *tm = NULL;
670 struct tree_mod_elem **tm_list = NULL;
671 int nritems = 0;
672 int ret = 0;
673 int i;
674
675 if (!tree_mod_need_log(fs_info, NULL))
676 return 0;
677
678 if (log_removal && btrfs_header_level(old_root) > 0) {
679 nritems = btrfs_header_nritems(old_root);
680 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
681 GFP_NOFS);
682 if (!tm_list) {
683 ret = -ENOMEM;
684 goto free_tms;
685 }
686 for (i = 0; i < nritems; i++) {
687 tm_list[i] = alloc_tree_mod_elem(old_root, i,
688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
689 if (!tm_list[i]) {
690 ret = -ENOMEM;
691 goto free_tms;
692 }
693 }
694 }
695
696 tm = kzalloc(sizeof(*tm), GFP_NOFS);
697 if (!tm) {
698 ret = -ENOMEM;
699 goto free_tms;
700 }
701
702 tm->logical = new_root->start;
703 tm->old_root.logical = old_root->start;
704 tm->old_root.level = btrfs_header_level(old_root);
705 tm->generation = btrfs_header_generation(old_root);
706 tm->op = MOD_LOG_ROOT_REPLACE;
707
708 if (tree_mod_dont_log(fs_info, NULL))
709 goto free_tms;
710
711 if (tm_list)
712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
713 if (!ret)
714 ret = __tree_mod_log_insert(fs_info, tm);
715
716 tree_mod_log_write_unlock(fs_info);
717 if (ret)
718 goto free_tms;
719 kfree(tm_list);
720
721 return ret;
722
723 free_tms:
724 if (tm_list) {
725 for (i = 0; i < nritems; i++)
726 kfree(tm_list[i]);
727 kfree(tm_list);
728 }
729 kfree(tm);
730
731 return ret;
732 }
733
734 static struct tree_mod_elem *
735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
736 int smallest)
737 {
738 struct rb_root *tm_root;
739 struct rb_node *node;
740 struct tree_mod_elem *cur = NULL;
741 struct tree_mod_elem *found = NULL;
742
743 tree_mod_log_read_lock(fs_info);
744 tm_root = &fs_info->tree_mod_log;
745 node = tm_root->rb_node;
746 while (node) {
747 cur = rb_entry(node, struct tree_mod_elem, node);
748 if (cur->logical < start) {
749 node = node->rb_left;
750 } else if (cur->logical > start) {
751 node = node->rb_right;
752 } else if (cur->seq < min_seq) {
753 node = node->rb_left;
754 } else if (!smallest) {
755 /* we want the node with the highest seq */
756 if (found)
757 BUG_ON(found->seq > cur->seq);
758 found = cur;
759 node = node->rb_left;
760 } else if (cur->seq > min_seq) {
761 /* we want the node with the smallest seq */
762 if (found)
763 BUG_ON(found->seq < cur->seq);
764 found = cur;
765 node = node->rb_right;
766 } else {
767 found = cur;
768 break;
769 }
770 }
771 tree_mod_log_read_unlock(fs_info);
772
773 return found;
774 }
775
776 /*
777 * this returns the element from the log with the smallest time sequence
778 * value that's in the log (the oldest log item). any element with a time
779 * sequence lower than min_seq will be ignored.
780 */
781 static struct tree_mod_elem *
782 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
783 u64 min_seq)
784 {
785 return __tree_mod_log_search(fs_info, start, min_seq, 1);
786 }
787
788 /*
789 * this returns the element from the log with the largest time sequence
790 * value that's in the log (the most recent log item). any element with
791 * a time sequence lower than min_seq will be ignored.
792 */
793 static struct tree_mod_elem *
794 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
795 {
796 return __tree_mod_log_search(fs_info, start, min_seq, 0);
797 }
798
799 static noinline int
800 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
801 struct extent_buffer *src, unsigned long dst_offset,
802 unsigned long src_offset, int nr_items)
803 {
804 int ret = 0;
805 struct tree_mod_elem **tm_list = NULL;
806 struct tree_mod_elem **tm_list_add, **tm_list_rem;
807 int i;
808 int locked = 0;
809
810 if (!tree_mod_need_log(fs_info, NULL))
811 return 0;
812
813 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
814 return 0;
815
816 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
817 GFP_NOFS);
818 if (!tm_list)
819 return -ENOMEM;
820
821 tm_list_add = tm_list;
822 tm_list_rem = tm_list + nr_items;
823 for (i = 0; i < nr_items; i++) {
824 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
825 MOD_LOG_KEY_REMOVE, GFP_NOFS);
826 if (!tm_list_rem[i]) {
827 ret = -ENOMEM;
828 goto free_tms;
829 }
830
831 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
832 MOD_LOG_KEY_ADD, GFP_NOFS);
833 if (!tm_list_add[i]) {
834 ret = -ENOMEM;
835 goto free_tms;
836 }
837 }
838
839 if (tree_mod_dont_log(fs_info, NULL))
840 goto free_tms;
841 locked = 1;
842
843 for (i = 0; i < nr_items; i++) {
844 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
845 if (ret)
846 goto free_tms;
847 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
848 if (ret)
849 goto free_tms;
850 }
851
852 tree_mod_log_write_unlock(fs_info);
853 kfree(tm_list);
854
855 return 0;
856
857 free_tms:
858 for (i = 0; i < nr_items * 2; i++) {
859 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
860 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
861 kfree(tm_list[i]);
862 }
863 if (locked)
864 tree_mod_log_write_unlock(fs_info);
865 kfree(tm_list);
866
867 return ret;
868 }
869
870 static inline void
871 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
872 int dst_offset, int src_offset, int nr_items)
873 {
874 int ret;
875 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
876 nr_items);
877 BUG_ON(ret < 0);
878 }
879
880 static noinline void
881 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
882 struct extent_buffer *eb, int slot, int atomic)
883 {
884 int ret;
885
886 ret = tree_mod_log_insert_key(fs_info, eb, slot,
887 MOD_LOG_KEY_REPLACE,
888 atomic ? GFP_ATOMIC : GFP_NOFS);
889 BUG_ON(ret < 0);
890 }
891
892 static noinline int
893 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
894 {
895 struct tree_mod_elem **tm_list = NULL;
896 int nritems = 0;
897 int i;
898 int ret = 0;
899
900 if (btrfs_header_level(eb) == 0)
901 return 0;
902
903 if (!tree_mod_need_log(fs_info, NULL))
904 return 0;
905
906 nritems = btrfs_header_nritems(eb);
907 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
908 if (!tm_list)
909 return -ENOMEM;
910
911 for (i = 0; i < nritems; i++) {
912 tm_list[i] = alloc_tree_mod_elem(eb, i,
913 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
914 if (!tm_list[i]) {
915 ret = -ENOMEM;
916 goto free_tms;
917 }
918 }
919
920 if (tree_mod_dont_log(fs_info, eb))
921 goto free_tms;
922
923 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
924 tree_mod_log_write_unlock(fs_info);
925 if (ret)
926 goto free_tms;
927 kfree(tm_list);
928
929 return 0;
930
931 free_tms:
932 for (i = 0; i < nritems; i++)
933 kfree(tm_list[i]);
934 kfree(tm_list);
935
936 return ret;
937 }
938
939 static noinline void
940 tree_mod_log_set_root_pointer(struct btrfs_root *root,
941 struct extent_buffer *new_root_node,
942 int log_removal)
943 {
944 int ret;
945 ret = tree_mod_log_insert_root(root->fs_info, root->node,
946 new_root_node, log_removal);
947 BUG_ON(ret < 0);
948 }
949
950 /*
951 * check if the tree block can be shared by multiple trees
952 */
953 int btrfs_block_can_be_shared(struct btrfs_root *root,
954 struct extent_buffer *buf)
955 {
956 /*
957 * Tree blocks not in reference counted trees and tree roots
958 * are never shared. If a block was allocated after the last
959 * snapshot and the block was not allocated by tree relocation,
960 * we know the block is not shared.
961 */
962 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
963 buf != root->node && buf != root->commit_root &&
964 (btrfs_header_generation(buf) <=
965 btrfs_root_last_snapshot(&root->root_item) ||
966 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
967 return 1;
968 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
969 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
970 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
971 return 1;
972 #endif
973 return 0;
974 }
975
976 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
977 struct btrfs_root *root,
978 struct extent_buffer *buf,
979 struct extent_buffer *cow,
980 int *last_ref)
981 {
982 struct btrfs_fs_info *fs_info = root->fs_info;
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
988
989 /*
990 * Backrefs update rules:
991 *
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
994 *
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
998 *
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1004 */
1005
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_handle_fs_error(fs_info, ret, NULL);
1015 return ret;
1016 }
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1024 }
1025
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 if (ret)
1036 return ret;
1037
1038 if (root->root_key.objectid ==
1039 BTRFS_TREE_RELOC_OBJECTID) {
1040 ret = btrfs_dec_ref(trans, root, buf, 0);
1041 if (ret)
1042 return ret;
1043 ret = btrfs_inc_ref(trans, root, cow, 1);
1044 if (ret)
1045 return ret;
1046 }
1047 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1048 } else {
1049
1050 if (root->root_key.objectid ==
1051 BTRFS_TREE_RELOC_OBJECTID)
1052 ret = btrfs_inc_ref(trans, root, cow, 1);
1053 else
1054 ret = btrfs_inc_ref(trans, root, cow, 0);
1055 if (ret)
1056 return ret;
1057 }
1058 if (new_flags != 0) {
1059 int level = btrfs_header_level(buf);
1060
1061 ret = btrfs_set_disk_extent_flags(trans, fs_info,
1062 buf->start,
1063 buf->len,
1064 new_flags, level, 0);
1065 if (ret)
1066 return ret;
1067 }
1068 } else {
1069 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1070 if (root->root_key.objectid ==
1071 BTRFS_TREE_RELOC_OBJECTID)
1072 ret = btrfs_inc_ref(trans, root, cow, 1);
1073 else
1074 ret = btrfs_inc_ref(trans, root, cow, 0);
1075 if (ret)
1076 return ret;
1077 ret = btrfs_dec_ref(trans, root, buf, 1);
1078 if (ret)
1079 return ret;
1080 }
1081 clean_tree_block(fs_info, buf);
1082 *last_ref = 1;
1083 }
1084 return 0;
1085 }
1086
1087 /*
1088 * does the dirty work in cow of a single block. The parent block (if
1089 * supplied) is updated to point to the new cow copy. The new buffer is marked
1090 * dirty and returned locked. If you modify the block it needs to be marked
1091 * dirty again.
1092 *
1093 * search_start -- an allocation hint for the new block
1094 *
1095 * empty_size -- a hint that you plan on doing more cow. This is the size in
1096 * bytes the allocator should try to find free next to the block it returns.
1097 * This is just a hint and may be ignored by the allocator.
1098 */
1099 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1100 struct btrfs_root *root,
1101 struct extent_buffer *buf,
1102 struct extent_buffer *parent, int parent_slot,
1103 struct extent_buffer **cow_ret,
1104 u64 search_start, u64 empty_size)
1105 {
1106 struct btrfs_fs_info *fs_info = root->fs_info;
1107 struct btrfs_disk_key disk_key;
1108 struct extent_buffer *cow;
1109 int level, ret;
1110 int last_ref = 0;
1111 int unlock_orig = 0;
1112 u64 parent_start = 0;
1113
1114 if (*cow_ret == buf)
1115 unlock_orig = 1;
1116
1117 btrfs_assert_tree_locked(buf);
1118
1119 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1120 trans->transid != fs_info->running_transaction->transid);
1121 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1122 trans->transid != root->last_trans);
1123
1124 level = btrfs_header_level(buf);
1125
1126 if (level == 0)
1127 btrfs_item_key(buf, &disk_key, 0);
1128 else
1129 btrfs_node_key(buf, &disk_key, 0);
1130
1131 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1132 parent_start = parent->start;
1133
1134 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1135 root->root_key.objectid, &disk_key, level,
1136 search_start, empty_size);
1137 if (IS_ERR(cow))
1138 return PTR_ERR(cow);
1139
1140 /* cow is set to blocking by btrfs_init_new_buffer */
1141
1142 copy_extent_buffer_full(cow, buf);
1143 btrfs_set_header_bytenr(cow, cow->start);
1144 btrfs_set_header_generation(cow, trans->transid);
1145 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1146 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1147 BTRFS_HEADER_FLAG_RELOC);
1148 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1149 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1150 else
1151 btrfs_set_header_owner(cow, root->root_key.objectid);
1152
1153 write_extent_buffer_fsid(cow, fs_info->fsid);
1154
1155 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1156 if (ret) {
1157 btrfs_abort_transaction(trans, ret);
1158 return ret;
1159 }
1160
1161 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1162 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1163 if (ret) {
1164 btrfs_abort_transaction(trans, ret);
1165 return ret;
1166 }
1167 }
1168
1169 if (buf == root->node) {
1170 WARN_ON(parent && parent != buf);
1171 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1172 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1173 parent_start = buf->start;
1174
1175 extent_buffer_get(cow);
1176 tree_mod_log_set_root_pointer(root, cow, 1);
1177 rcu_assign_pointer(root->node, cow);
1178
1179 btrfs_free_tree_block(trans, root, buf, parent_start,
1180 last_ref);
1181 free_extent_buffer(buf);
1182 add_root_to_dirty_list(root);
1183 } else {
1184 WARN_ON(trans->transid != btrfs_header_generation(parent));
1185 tree_mod_log_insert_key(fs_info, parent, parent_slot,
1186 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1187 btrfs_set_node_blockptr(parent, parent_slot,
1188 cow->start);
1189 btrfs_set_node_ptr_generation(parent, parent_slot,
1190 trans->transid);
1191 btrfs_mark_buffer_dirty(parent);
1192 if (last_ref) {
1193 ret = tree_mod_log_free_eb(fs_info, buf);
1194 if (ret) {
1195 btrfs_abort_transaction(trans, ret);
1196 return ret;
1197 }
1198 }
1199 btrfs_free_tree_block(trans, root, buf, parent_start,
1200 last_ref);
1201 }
1202 if (unlock_orig)
1203 btrfs_tree_unlock(buf);
1204 free_extent_buffer_stale(buf);
1205 btrfs_mark_buffer_dirty(cow);
1206 *cow_ret = cow;
1207 return 0;
1208 }
1209
1210 /*
1211 * returns the logical address of the oldest predecessor of the given root.
1212 * entries older than time_seq are ignored.
1213 */
1214 static struct tree_mod_elem *
1215 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1216 struct extent_buffer *eb_root, u64 time_seq)
1217 {
1218 struct tree_mod_elem *tm;
1219 struct tree_mod_elem *found = NULL;
1220 u64 root_logical = eb_root->start;
1221 int looped = 0;
1222
1223 if (!time_seq)
1224 return NULL;
1225
1226 /*
1227 * the very last operation that's logged for a root is the
1228 * replacement operation (if it is replaced at all). this has
1229 * the logical address of the *new* root, making it the very
1230 * first operation that's logged for this root.
1231 */
1232 while (1) {
1233 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1234 time_seq);
1235 if (!looped && !tm)
1236 return NULL;
1237 /*
1238 * if there are no tree operation for the oldest root, we simply
1239 * return it. this should only happen if that (old) root is at
1240 * level 0.
1241 */
1242 if (!tm)
1243 break;
1244
1245 /*
1246 * if there's an operation that's not a root replacement, we
1247 * found the oldest version of our root. normally, we'll find a
1248 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1249 */
1250 if (tm->op != MOD_LOG_ROOT_REPLACE)
1251 break;
1252
1253 found = tm;
1254 root_logical = tm->old_root.logical;
1255 looped = 1;
1256 }
1257
1258 /* if there's no old root to return, return what we found instead */
1259 if (!found)
1260 found = tm;
1261
1262 return found;
1263 }
1264
1265 /*
1266 * tm is a pointer to the first operation to rewind within eb. then, all
1267 * previous operations will be rewound (until we reach something older than
1268 * time_seq).
1269 */
1270 static void
1271 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1272 u64 time_seq, struct tree_mod_elem *first_tm)
1273 {
1274 u32 n;
1275 struct rb_node *next;
1276 struct tree_mod_elem *tm = first_tm;
1277 unsigned long o_dst;
1278 unsigned long o_src;
1279 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1280
1281 n = btrfs_header_nritems(eb);
1282 tree_mod_log_read_lock(fs_info);
1283 while (tm && tm->seq >= time_seq) {
1284 /*
1285 * all the operations are recorded with the operator used for
1286 * the modification. as we're going backwards, we do the
1287 * opposite of each operation here.
1288 */
1289 switch (tm->op) {
1290 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1291 BUG_ON(tm->slot < n);
1292 /* Fallthrough */
1293 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1294 case MOD_LOG_KEY_REMOVE:
1295 btrfs_set_node_key(eb, &tm->key, tm->slot);
1296 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1297 btrfs_set_node_ptr_generation(eb, tm->slot,
1298 tm->generation);
1299 n++;
1300 break;
1301 case MOD_LOG_KEY_REPLACE:
1302 BUG_ON(tm->slot >= n);
1303 btrfs_set_node_key(eb, &tm->key, tm->slot);
1304 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1305 btrfs_set_node_ptr_generation(eb, tm->slot,
1306 tm->generation);
1307 break;
1308 case MOD_LOG_KEY_ADD:
1309 /* if a move operation is needed it's in the log */
1310 n--;
1311 break;
1312 case MOD_LOG_MOVE_KEYS:
1313 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1314 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1315 memmove_extent_buffer(eb, o_dst, o_src,
1316 tm->move.nr_items * p_size);
1317 break;
1318 case MOD_LOG_ROOT_REPLACE:
1319 /*
1320 * this operation is special. for roots, this must be
1321 * handled explicitly before rewinding.
1322 * for non-roots, this operation may exist if the node
1323 * was a root: root A -> child B; then A gets empty and
1324 * B is promoted to the new root. in the mod log, we'll
1325 * have a root-replace operation for B, a tree block
1326 * that is no root. we simply ignore that operation.
1327 */
1328 break;
1329 }
1330 next = rb_next(&tm->node);
1331 if (!next)
1332 break;
1333 tm = rb_entry(next, struct tree_mod_elem, node);
1334 if (tm->logical != first_tm->logical)
1335 break;
1336 }
1337 tree_mod_log_read_unlock(fs_info);
1338 btrfs_set_header_nritems(eb, n);
1339 }
1340
1341 /*
1342 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1343 * is returned. If rewind operations happen, a fresh buffer is returned. The
1344 * returned buffer is always read-locked. If the returned buffer is not the
1345 * input buffer, the lock on the input buffer is released and the input buffer
1346 * is freed (its refcount is decremented).
1347 */
1348 static struct extent_buffer *
1349 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1350 struct extent_buffer *eb, u64 time_seq)
1351 {
1352 struct extent_buffer *eb_rewin;
1353 struct tree_mod_elem *tm;
1354
1355 if (!time_seq)
1356 return eb;
1357
1358 if (btrfs_header_level(eb) == 0)
1359 return eb;
1360
1361 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1362 if (!tm)
1363 return eb;
1364
1365 btrfs_set_path_blocking(path);
1366 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1367
1368 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1369 BUG_ON(tm->slot != 0);
1370 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1371 if (!eb_rewin) {
1372 btrfs_tree_read_unlock_blocking(eb);
1373 free_extent_buffer(eb);
1374 return NULL;
1375 }
1376 btrfs_set_header_bytenr(eb_rewin, eb->start);
1377 btrfs_set_header_backref_rev(eb_rewin,
1378 btrfs_header_backref_rev(eb));
1379 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1380 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1381 } else {
1382 eb_rewin = btrfs_clone_extent_buffer(eb);
1383 if (!eb_rewin) {
1384 btrfs_tree_read_unlock_blocking(eb);
1385 free_extent_buffer(eb);
1386 return NULL;
1387 }
1388 }
1389
1390 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1391 btrfs_tree_read_unlock_blocking(eb);
1392 free_extent_buffer(eb);
1393
1394 extent_buffer_get(eb_rewin);
1395 btrfs_tree_read_lock(eb_rewin);
1396 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1397 WARN_ON(btrfs_header_nritems(eb_rewin) >
1398 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1399
1400 return eb_rewin;
1401 }
1402
1403 /*
1404 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1405 * value. If there are no changes, the current root->root_node is returned. If
1406 * anything changed in between, there's a fresh buffer allocated on which the
1407 * rewind operations are done. In any case, the returned buffer is read locked.
1408 * Returns NULL on error (with no locks held).
1409 */
1410 static inline struct extent_buffer *
1411 get_old_root(struct btrfs_root *root, u64 time_seq)
1412 {
1413 struct btrfs_fs_info *fs_info = root->fs_info;
1414 struct tree_mod_elem *tm;
1415 struct extent_buffer *eb = NULL;
1416 struct extent_buffer *eb_root;
1417 struct extent_buffer *old;
1418 struct tree_mod_root *old_root = NULL;
1419 u64 old_generation = 0;
1420 u64 logical;
1421
1422 eb_root = btrfs_read_lock_root_node(root);
1423 tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
1424 if (!tm)
1425 return eb_root;
1426
1427 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1428 old_root = &tm->old_root;
1429 old_generation = tm->generation;
1430 logical = old_root->logical;
1431 } else {
1432 logical = eb_root->start;
1433 }
1434
1435 tm = tree_mod_log_search(fs_info, logical, time_seq);
1436 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1437 btrfs_tree_read_unlock(eb_root);
1438 free_extent_buffer(eb_root);
1439 old = read_tree_block(fs_info, logical, 0);
1440 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1441 if (!IS_ERR(old))
1442 free_extent_buffer(old);
1443 btrfs_warn(fs_info,
1444 "failed to read tree block %llu from get_old_root",
1445 logical);
1446 } else {
1447 eb = btrfs_clone_extent_buffer(old);
1448 free_extent_buffer(old);
1449 }
1450 } else if (old_root) {
1451 btrfs_tree_read_unlock(eb_root);
1452 free_extent_buffer(eb_root);
1453 eb = alloc_dummy_extent_buffer(fs_info, logical);
1454 } else {
1455 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1456 eb = btrfs_clone_extent_buffer(eb_root);
1457 btrfs_tree_read_unlock_blocking(eb_root);
1458 free_extent_buffer(eb_root);
1459 }
1460
1461 if (!eb)
1462 return NULL;
1463 extent_buffer_get(eb);
1464 btrfs_tree_read_lock(eb);
1465 if (old_root) {
1466 btrfs_set_header_bytenr(eb, eb->start);
1467 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1468 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1469 btrfs_set_header_level(eb, old_root->level);
1470 btrfs_set_header_generation(eb, old_generation);
1471 }
1472 if (tm)
1473 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1474 else
1475 WARN_ON(btrfs_header_level(eb) != 0);
1476 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1477
1478 return eb;
1479 }
1480
1481 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1482 {
1483 struct tree_mod_elem *tm;
1484 int level;
1485 struct extent_buffer *eb_root = btrfs_root_node(root);
1486
1487 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1488 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1489 level = tm->old_root.level;
1490 } else {
1491 level = btrfs_header_level(eb_root);
1492 }
1493 free_extent_buffer(eb_root);
1494
1495 return level;
1496 }
1497
1498 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1499 struct btrfs_root *root,
1500 struct extent_buffer *buf)
1501 {
1502 if (btrfs_is_testing(root->fs_info))
1503 return 0;
1504
1505 /* ensure we can see the force_cow */
1506 smp_rmb();
1507
1508 /*
1509 * We do not need to cow a block if
1510 * 1) this block is not created or changed in this transaction;
1511 * 2) this block does not belong to TREE_RELOC tree;
1512 * 3) the root is not forced COW.
1513 *
1514 * What is forced COW:
1515 * when we create snapshot during committing the transaction,
1516 * after we've finished coping src root, we must COW the shared
1517 * block to ensure the metadata consistency.
1518 */
1519 if (btrfs_header_generation(buf) == trans->transid &&
1520 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1521 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1522 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1523 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1524 return 0;
1525 return 1;
1526 }
1527
1528 /*
1529 * cows a single block, see __btrfs_cow_block for the real work.
1530 * This version of it has extra checks so that a block isn't COWed more than
1531 * once per transaction, as long as it hasn't been written yet
1532 */
1533 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1534 struct btrfs_root *root, struct extent_buffer *buf,
1535 struct extent_buffer *parent, int parent_slot,
1536 struct extent_buffer **cow_ret)
1537 {
1538 struct btrfs_fs_info *fs_info = root->fs_info;
1539 u64 search_start;
1540 int ret;
1541
1542 if (trans->transaction != fs_info->running_transaction)
1543 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1544 trans->transid,
1545 fs_info->running_transaction->transid);
1546
1547 if (trans->transid != fs_info->generation)
1548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 trans->transid, fs_info->generation);
1550
1551 if (!should_cow_block(trans, root, buf)) {
1552 trans->dirty = true;
1553 *cow_ret = buf;
1554 return 0;
1555 }
1556
1557 search_start = buf->start & ~((u64)SZ_1G - 1);
1558
1559 if (parent)
1560 btrfs_set_lock_blocking(parent);
1561 btrfs_set_lock_blocking(buf);
1562
1563 ret = __btrfs_cow_block(trans, root, buf, parent,
1564 parent_slot, cow_ret, search_start, 0);
1565
1566 trace_btrfs_cow_block(root, buf, *cow_ret);
1567
1568 return ret;
1569 }
1570
1571 /*
1572 * helper function for defrag to decide if two blocks pointed to by a
1573 * node are actually close by
1574 */
1575 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1576 {
1577 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1578 return 1;
1579 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1580 return 1;
1581 return 0;
1582 }
1583
1584 /*
1585 * compare two keys in a memcmp fashion
1586 */
1587 static int comp_keys(const struct btrfs_disk_key *disk,
1588 const struct btrfs_key *k2)
1589 {
1590 struct btrfs_key k1;
1591
1592 btrfs_disk_key_to_cpu(&k1, disk);
1593
1594 return btrfs_comp_cpu_keys(&k1, k2);
1595 }
1596
1597 /*
1598 * same as comp_keys only with two btrfs_key's
1599 */
1600 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1601 {
1602 if (k1->objectid > k2->objectid)
1603 return 1;
1604 if (k1->objectid < k2->objectid)
1605 return -1;
1606 if (k1->type > k2->type)
1607 return 1;
1608 if (k1->type < k2->type)
1609 return -1;
1610 if (k1->offset > k2->offset)
1611 return 1;
1612 if (k1->offset < k2->offset)
1613 return -1;
1614 return 0;
1615 }
1616
1617 /*
1618 * this is used by the defrag code to go through all the
1619 * leaves pointed to by a node and reallocate them so that
1620 * disk order is close to key order
1621 */
1622 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1623 struct btrfs_root *root, struct extent_buffer *parent,
1624 int start_slot, u64 *last_ret,
1625 struct btrfs_key *progress)
1626 {
1627 struct btrfs_fs_info *fs_info = root->fs_info;
1628 struct extent_buffer *cur;
1629 u64 blocknr;
1630 u64 gen;
1631 u64 search_start = *last_ret;
1632 u64 last_block = 0;
1633 u64 other;
1634 u32 parent_nritems;
1635 int end_slot;
1636 int i;
1637 int err = 0;
1638 int parent_level;
1639 int uptodate;
1640 u32 blocksize;
1641 int progress_passed = 0;
1642 struct btrfs_disk_key disk_key;
1643
1644 parent_level = btrfs_header_level(parent);
1645
1646 WARN_ON(trans->transaction != fs_info->running_transaction);
1647 WARN_ON(trans->transid != fs_info->generation);
1648
1649 parent_nritems = btrfs_header_nritems(parent);
1650 blocksize = fs_info->nodesize;
1651 end_slot = parent_nritems - 1;
1652
1653 if (parent_nritems <= 1)
1654 return 0;
1655
1656 btrfs_set_lock_blocking(parent);
1657
1658 for (i = start_slot; i <= end_slot; i++) {
1659 int close = 1;
1660
1661 btrfs_node_key(parent, &disk_key, i);
1662 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1663 continue;
1664
1665 progress_passed = 1;
1666 blocknr = btrfs_node_blockptr(parent, i);
1667 gen = btrfs_node_ptr_generation(parent, i);
1668 if (last_block == 0)
1669 last_block = blocknr;
1670
1671 if (i > 0) {
1672 other = btrfs_node_blockptr(parent, i - 1);
1673 close = close_blocks(blocknr, other, blocksize);
1674 }
1675 if (!close && i < end_slot) {
1676 other = btrfs_node_blockptr(parent, i + 1);
1677 close = close_blocks(blocknr, other, blocksize);
1678 }
1679 if (close) {
1680 last_block = blocknr;
1681 continue;
1682 }
1683
1684 cur = find_extent_buffer(fs_info, blocknr);
1685 if (cur)
1686 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1687 else
1688 uptodate = 0;
1689 if (!cur || !uptodate) {
1690 if (!cur) {
1691 cur = read_tree_block(fs_info, blocknr, gen);
1692 if (IS_ERR(cur)) {
1693 return PTR_ERR(cur);
1694 } else if (!extent_buffer_uptodate(cur)) {
1695 free_extent_buffer(cur);
1696 return -EIO;
1697 }
1698 } else if (!uptodate) {
1699 err = btrfs_read_buffer(cur, gen);
1700 if (err) {
1701 free_extent_buffer(cur);
1702 return err;
1703 }
1704 }
1705 }
1706 if (search_start == 0)
1707 search_start = last_block;
1708
1709 btrfs_tree_lock(cur);
1710 btrfs_set_lock_blocking(cur);
1711 err = __btrfs_cow_block(trans, root, cur, parent, i,
1712 &cur, search_start,
1713 min(16 * blocksize,
1714 (end_slot - i) * blocksize));
1715 if (err) {
1716 btrfs_tree_unlock(cur);
1717 free_extent_buffer(cur);
1718 break;
1719 }
1720 search_start = cur->start;
1721 last_block = cur->start;
1722 *last_ret = search_start;
1723 btrfs_tree_unlock(cur);
1724 free_extent_buffer(cur);
1725 }
1726 return err;
1727 }
1728
1729 /*
1730 * search for key in the extent_buffer. The items start at offset p,
1731 * and they are item_size apart. There are 'max' items in p.
1732 *
1733 * the slot in the array is returned via slot, and it points to
1734 * the place where you would insert key if it is not found in
1735 * the array.
1736 *
1737 * slot may point to max if the key is bigger than all of the keys
1738 */
1739 static noinline int generic_bin_search(struct extent_buffer *eb,
1740 unsigned long p, int item_size,
1741 const struct btrfs_key *key,
1742 int max, int *slot)
1743 {
1744 int low = 0;
1745 int high = max;
1746 int mid;
1747 int ret;
1748 struct btrfs_disk_key *tmp = NULL;
1749 struct btrfs_disk_key unaligned;
1750 unsigned long offset;
1751 char *kaddr = NULL;
1752 unsigned long map_start = 0;
1753 unsigned long map_len = 0;
1754 int err;
1755
1756 if (low > high) {
1757 btrfs_err(eb->fs_info,
1758 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1759 __func__, low, high, eb->start,
1760 btrfs_header_owner(eb), btrfs_header_level(eb));
1761 return -EINVAL;
1762 }
1763
1764 while (low < high) {
1765 mid = (low + high) / 2;
1766 offset = p + mid * item_size;
1767
1768 if (!kaddr || offset < map_start ||
1769 (offset + sizeof(struct btrfs_disk_key)) >
1770 map_start + map_len) {
1771
1772 err = map_private_extent_buffer(eb, offset,
1773 sizeof(struct btrfs_disk_key),
1774 &kaddr, &map_start, &map_len);
1775
1776 if (!err) {
1777 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1778 map_start);
1779 } else if (err == 1) {
1780 read_extent_buffer(eb, &unaligned,
1781 offset, sizeof(unaligned));
1782 tmp = &unaligned;
1783 } else {
1784 return err;
1785 }
1786
1787 } else {
1788 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1789 map_start);
1790 }
1791 ret = comp_keys(tmp, key);
1792
1793 if (ret < 0)
1794 low = mid + 1;
1795 else if (ret > 0)
1796 high = mid;
1797 else {
1798 *slot = mid;
1799 return 0;
1800 }
1801 }
1802 *slot = low;
1803 return 1;
1804 }
1805
1806 /*
1807 * simple bin_search frontend that does the right thing for
1808 * leaves vs nodes
1809 */
1810 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1811 int level, int *slot)
1812 {
1813 if (level == 0)
1814 return generic_bin_search(eb,
1815 offsetof(struct btrfs_leaf, items),
1816 sizeof(struct btrfs_item),
1817 key, btrfs_header_nritems(eb),
1818 slot);
1819 else
1820 return generic_bin_search(eb,
1821 offsetof(struct btrfs_node, ptrs),
1822 sizeof(struct btrfs_key_ptr),
1823 key, btrfs_header_nritems(eb),
1824 slot);
1825 }
1826
1827 static void root_add_used(struct btrfs_root *root, u32 size)
1828 {
1829 spin_lock(&root->accounting_lock);
1830 btrfs_set_root_used(&root->root_item,
1831 btrfs_root_used(&root->root_item) + size);
1832 spin_unlock(&root->accounting_lock);
1833 }
1834
1835 static void root_sub_used(struct btrfs_root *root, u32 size)
1836 {
1837 spin_lock(&root->accounting_lock);
1838 btrfs_set_root_used(&root->root_item,
1839 btrfs_root_used(&root->root_item) - size);
1840 spin_unlock(&root->accounting_lock);
1841 }
1842
1843 /* given a node and slot number, this reads the blocks it points to. The
1844 * extent buffer is returned with a reference taken (but unlocked).
1845 */
1846 static noinline struct extent_buffer *
1847 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1848 int slot)
1849 {
1850 int level = btrfs_header_level(parent);
1851 struct extent_buffer *eb;
1852
1853 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1854 return ERR_PTR(-ENOENT);
1855
1856 BUG_ON(level == 0);
1857
1858 eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1859 btrfs_node_ptr_generation(parent, slot));
1860 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1861 free_extent_buffer(eb);
1862 eb = ERR_PTR(-EIO);
1863 }
1864
1865 return eb;
1866 }
1867
1868 /*
1869 * node level balancing, used to make sure nodes are in proper order for
1870 * item deletion. We balance from the top down, so we have to make sure
1871 * that a deletion won't leave an node completely empty later on.
1872 */
1873 static noinline int balance_level(struct btrfs_trans_handle *trans,
1874 struct btrfs_root *root,
1875 struct btrfs_path *path, int level)
1876 {
1877 struct btrfs_fs_info *fs_info = root->fs_info;
1878 struct extent_buffer *right = NULL;
1879 struct extent_buffer *mid;
1880 struct extent_buffer *left = NULL;
1881 struct extent_buffer *parent = NULL;
1882 int ret = 0;
1883 int wret;
1884 int pslot;
1885 int orig_slot = path->slots[level];
1886 u64 orig_ptr;
1887
1888 if (level == 0)
1889 return 0;
1890
1891 mid = path->nodes[level];
1892
1893 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1894 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1895 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1896
1897 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1898
1899 if (level < BTRFS_MAX_LEVEL - 1) {
1900 parent = path->nodes[level + 1];
1901 pslot = path->slots[level + 1];
1902 }
1903
1904 /*
1905 * deal with the case where there is only one pointer in the root
1906 * by promoting the node below to a root
1907 */
1908 if (!parent) {
1909 struct extent_buffer *child;
1910
1911 if (btrfs_header_nritems(mid) != 1)
1912 return 0;
1913
1914 /* promote the child to a root */
1915 child = read_node_slot(fs_info, mid, 0);
1916 if (IS_ERR(child)) {
1917 ret = PTR_ERR(child);
1918 btrfs_handle_fs_error(fs_info, ret, NULL);
1919 goto enospc;
1920 }
1921
1922 btrfs_tree_lock(child);
1923 btrfs_set_lock_blocking(child);
1924 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1925 if (ret) {
1926 btrfs_tree_unlock(child);
1927 free_extent_buffer(child);
1928 goto enospc;
1929 }
1930
1931 tree_mod_log_set_root_pointer(root, child, 1);
1932 rcu_assign_pointer(root->node, child);
1933
1934 add_root_to_dirty_list(root);
1935 btrfs_tree_unlock(child);
1936
1937 path->locks[level] = 0;
1938 path->nodes[level] = NULL;
1939 clean_tree_block(fs_info, mid);
1940 btrfs_tree_unlock(mid);
1941 /* once for the path */
1942 free_extent_buffer(mid);
1943
1944 root_sub_used(root, mid->len);
1945 btrfs_free_tree_block(trans, root, mid, 0, 1);
1946 /* once for the root ptr */
1947 free_extent_buffer_stale(mid);
1948 return 0;
1949 }
1950 if (btrfs_header_nritems(mid) >
1951 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1952 return 0;
1953
1954 left = read_node_slot(fs_info, parent, pslot - 1);
1955 if (IS_ERR(left))
1956 left = NULL;
1957
1958 if (left) {
1959 btrfs_tree_lock(left);
1960 btrfs_set_lock_blocking(left);
1961 wret = btrfs_cow_block(trans, root, left,
1962 parent, pslot - 1, &left);
1963 if (wret) {
1964 ret = wret;
1965 goto enospc;
1966 }
1967 }
1968
1969 right = read_node_slot(fs_info, parent, pslot + 1);
1970 if (IS_ERR(right))
1971 right = NULL;
1972
1973 if (right) {
1974 btrfs_tree_lock(right);
1975 btrfs_set_lock_blocking(right);
1976 wret = btrfs_cow_block(trans, root, right,
1977 parent, pslot + 1, &right);
1978 if (wret) {
1979 ret = wret;
1980 goto enospc;
1981 }
1982 }
1983
1984 /* first, try to make some room in the middle buffer */
1985 if (left) {
1986 orig_slot += btrfs_header_nritems(left);
1987 wret = push_node_left(trans, fs_info, left, mid, 1);
1988 if (wret < 0)
1989 ret = wret;
1990 }
1991
1992 /*
1993 * then try to empty the right most buffer into the middle
1994 */
1995 if (right) {
1996 wret = push_node_left(trans, fs_info, mid, right, 1);
1997 if (wret < 0 && wret != -ENOSPC)
1998 ret = wret;
1999 if (btrfs_header_nritems(right) == 0) {
2000 clean_tree_block(fs_info, right);
2001 btrfs_tree_unlock(right);
2002 del_ptr(root, path, level + 1, pslot + 1);
2003 root_sub_used(root, right->len);
2004 btrfs_free_tree_block(trans, root, right, 0, 1);
2005 free_extent_buffer_stale(right);
2006 right = NULL;
2007 } else {
2008 struct btrfs_disk_key right_key;
2009 btrfs_node_key(right, &right_key, 0);
2010 tree_mod_log_set_node_key(fs_info, parent,
2011 pslot + 1, 0);
2012 btrfs_set_node_key(parent, &right_key, pslot + 1);
2013 btrfs_mark_buffer_dirty(parent);
2014 }
2015 }
2016 if (btrfs_header_nritems(mid) == 1) {
2017 /*
2018 * we're not allowed to leave a node with one item in the
2019 * tree during a delete. A deletion from lower in the tree
2020 * could try to delete the only pointer in this node.
2021 * So, pull some keys from the left.
2022 * There has to be a left pointer at this point because
2023 * otherwise we would have pulled some pointers from the
2024 * right
2025 */
2026 if (!left) {
2027 ret = -EROFS;
2028 btrfs_handle_fs_error(fs_info, ret, NULL);
2029 goto enospc;
2030 }
2031 wret = balance_node_right(trans, fs_info, mid, left);
2032 if (wret < 0) {
2033 ret = wret;
2034 goto enospc;
2035 }
2036 if (wret == 1) {
2037 wret = push_node_left(trans, fs_info, left, mid, 1);
2038 if (wret < 0)
2039 ret = wret;
2040 }
2041 BUG_ON(wret == 1);
2042 }
2043 if (btrfs_header_nritems(mid) == 0) {
2044 clean_tree_block(fs_info, mid);
2045 btrfs_tree_unlock(mid);
2046 del_ptr(root, path, level + 1, pslot);
2047 root_sub_used(root, mid->len);
2048 btrfs_free_tree_block(trans, root, mid, 0, 1);
2049 free_extent_buffer_stale(mid);
2050 mid = NULL;
2051 } else {
2052 /* update the parent key to reflect our changes */
2053 struct btrfs_disk_key mid_key;
2054 btrfs_node_key(mid, &mid_key, 0);
2055 tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2056 btrfs_set_node_key(parent, &mid_key, pslot);
2057 btrfs_mark_buffer_dirty(parent);
2058 }
2059
2060 /* update the path */
2061 if (left) {
2062 if (btrfs_header_nritems(left) > orig_slot) {
2063 extent_buffer_get(left);
2064 /* left was locked after cow */
2065 path->nodes[level] = left;
2066 path->slots[level + 1] -= 1;
2067 path->slots[level] = orig_slot;
2068 if (mid) {
2069 btrfs_tree_unlock(mid);
2070 free_extent_buffer(mid);
2071 }
2072 } else {
2073 orig_slot -= btrfs_header_nritems(left);
2074 path->slots[level] = orig_slot;
2075 }
2076 }
2077 /* double check we haven't messed things up */
2078 if (orig_ptr !=
2079 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2080 BUG();
2081 enospc:
2082 if (right) {
2083 btrfs_tree_unlock(right);
2084 free_extent_buffer(right);
2085 }
2086 if (left) {
2087 if (path->nodes[level] != left)
2088 btrfs_tree_unlock(left);
2089 free_extent_buffer(left);
2090 }
2091 return ret;
2092 }
2093
2094 /* Node balancing for insertion. Here we only split or push nodes around
2095 * when they are completely full. This is also done top down, so we
2096 * have to be pessimistic.
2097 */
2098 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2099 struct btrfs_root *root,
2100 struct btrfs_path *path, int level)
2101 {
2102 struct btrfs_fs_info *fs_info = root->fs_info;
2103 struct extent_buffer *right = NULL;
2104 struct extent_buffer *mid;
2105 struct extent_buffer *left = NULL;
2106 struct extent_buffer *parent = NULL;
2107 int ret = 0;
2108 int wret;
2109 int pslot;
2110 int orig_slot = path->slots[level];
2111
2112 if (level == 0)
2113 return 1;
2114
2115 mid = path->nodes[level];
2116 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2117
2118 if (level < BTRFS_MAX_LEVEL - 1) {
2119 parent = path->nodes[level + 1];
2120 pslot = path->slots[level + 1];
2121 }
2122
2123 if (!parent)
2124 return 1;
2125
2126 left = read_node_slot(fs_info, parent, pslot - 1);
2127 if (IS_ERR(left))
2128 left = NULL;
2129
2130 /* first, try to make some room in the middle buffer */
2131 if (left) {
2132 u32 left_nr;
2133
2134 btrfs_tree_lock(left);
2135 btrfs_set_lock_blocking(left);
2136
2137 left_nr = btrfs_header_nritems(left);
2138 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2139 wret = 1;
2140 } else {
2141 ret = btrfs_cow_block(trans, root, left, parent,
2142 pslot - 1, &left);
2143 if (ret)
2144 wret = 1;
2145 else {
2146 wret = push_node_left(trans, fs_info,
2147 left, mid, 0);
2148 }
2149 }
2150 if (wret < 0)
2151 ret = wret;
2152 if (wret == 0) {
2153 struct btrfs_disk_key disk_key;
2154 orig_slot += left_nr;
2155 btrfs_node_key(mid, &disk_key, 0);
2156 tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2157 btrfs_set_node_key(parent, &disk_key, pslot);
2158 btrfs_mark_buffer_dirty(parent);
2159 if (btrfs_header_nritems(left) > orig_slot) {
2160 path->nodes[level] = left;
2161 path->slots[level + 1] -= 1;
2162 path->slots[level] = orig_slot;
2163 btrfs_tree_unlock(mid);
2164 free_extent_buffer(mid);
2165 } else {
2166 orig_slot -=
2167 btrfs_header_nritems(left);
2168 path->slots[level] = orig_slot;
2169 btrfs_tree_unlock(left);
2170 free_extent_buffer(left);
2171 }
2172 return 0;
2173 }
2174 btrfs_tree_unlock(left);
2175 free_extent_buffer(left);
2176 }
2177 right = read_node_slot(fs_info, parent, pslot + 1);
2178 if (IS_ERR(right))
2179 right = NULL;
2180
2181 /*
2182 * then try to empty the right most buffer into the middle
2183 */
2184 if (right) {
2185 u32 right_nr;
2186
2187 btrfs_tree_lock(right);
2188 btrfs_set_lock_blocking(right);
2189
2190 right_nr = btrfs_header_nritems(right);
2191 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2192 wret = 1;
2193 } else {
2194 ret = btrfs_cow_block(trans, root, right,
2195 parent, pslot + 1,
2196 &right);
2197 if (ret)
2198 wret = 1;
2199 else {
2200 wret = balance_node_right(trans, fs_info,
2201 right, mid);
2202 }
2203 }
2204 if (wret < 0)
2205 ret = wret;
2206 if (wret == 0) {
2207 struct btrfs_disk_key disk_key;
2208
2209 btrfs_node_key(right, &disk_key, 0);
2210 tree_mod_log_set_node_key(fs_info, parent,
2211 pslot + 1, 0);
2212 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2213 btrfs_mark_buffer_dirty(parent);
2214
2215 if (btrfs_header_nritems(mid) <= orig_slot) {
2216 path->nodes[level] = right;
2217 path->slots[level + 1] += 1;
2218 path->slots[level] = orig_slot -
2219 btrfs_header_nritems(mid);
2220 btrfs_tree_unlock(mid);
2221 free_extent_buffer(mid);
2222 } else {
2223 btrfs_tree_unlock(right);
2224 free_extent_buffer(right);
2225 }
2226 return 0;
2227 }
2228 btrfs_tree_unlock(right);
2229 free_extent_buffer(right);
2230 }
2231 return 1;
2232 }
2233
2234 /*
2235 * readahead one full node of leaves, finding things that are close
2236 * to the block in 'slot', and triggering ra on them.
2237 */
2238 static void reada_for_search(struct btrfs_fs_info *fs_info,
2239 struct btrfs_path *path,
2240 int level, int slot, u64 objectid)
2241 {
2242 struct extent_buffer *node;
2243 struct btrfs_disk_key disk_key;
2244 u32 nritems;
2245 u64 search;
2246 u64 target;
2247 u64 nread = 0;
2248 struct extent_buffer *eb;
2249 u32 nr;
2250 u32 blocksize;
2251 u32 nscan = 0;
2252
2253 if (level != 1)
2254 return;
2255
2256 if (!path->nodes[level])
2257 return;
2258
2259 node = path->nodes[level];
2260
2261 search = btrfs_node_blockptr(node, slot);
2262 blocksize = fs_info->nodesize;
2263 eb = find_extent_buffer(fs_info, search);
2264 if (eb) {
2265 free_extent_buffer(eb);
2266 return;
2267 }
2268
2269 target = search;
2270
2271 nritems = btrfs_header_nritems(node);
2272 nr = slot;
2273
2274 while (1) {
2275 if (path->reada == READA_BACK) {
2276 if (nr == 0)
2277 break;
2278 nr--;
2279 } else if (path->reada == READA_FORWARD) {
2280 nr++;
2281 if (nr >= nritems)
2282 break;
2283 }
2284 if (path->reada == READA_BACK && objectid) {
2285 btrfs_node_key(node, &disk_key, nr);
2286 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2287 break;
2288 }
2289 search = btrfs_node_blockptr(node, nr);
2290 if ((search <= target && target - search <= 65536) ||
2291 (search > target && search - target <= 65536)) {
2292 readahead_tree_block(fs_info, search);
2293 nread += blocksize;
2294 }
2295 nscan++;
2296 if ((nread > 65536 || nscan > 32))
2297 break;
2298 }
2299 }
2300
2301 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2302 struct btrfs_path *path, int level)
2303 {
2304 int slot;
2305 int nritems;
2306 struct extent_buffer *parent;
2307 struct extent_buffer *eb;
2308 u64 gen;
2309 u64 block1 = 0;
2310 u64 block2 = 0;
2311
2312 parent = path->nodes[level + 1];
2313 if (!parent)
2314 return;
2315
2316 nritems = btrfs_header_nritems(parent);
2317 slot = path->slots[level + 1];
2318
2319 if (slot > 0) {
2320 block1 = btrfs_node_blockptr(parent, slot - 1);
2321 gen = btrfs_node_ptr_generation(parent, slot - 1);
2322 eb = find_extent_buffer(fs_info, block1);
2323 /*
2324 * if we get -eagain from btrfs_buffer_uptodate, we
2325 * don't want to return eagain here. That will loop
2326 * forever
2327 */
2328 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2329 block1 = 0;
2330 free_extent_buffer(eb);
2331 }
2332 if (slot + 1 < nritems) {
2333 block2 = btrfs_node_blockptr(parent, slot + 1);
2334 gen = btrfs_node_ptr_generation(parent, slot + 1);
2335 eb = find_extent_buffer(fs_info, block2);
2336 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2337 block2 = 0;
2338 free_extent_buffer(eb);
2339 }
2340
2341 if (block1)
2342 readahead_tree_block(fs_info, block1);
2343 if (block2)
2344 readahead_tree_block(fs_info, block2);
2345 }
2346
2347
2348 /*
2349 * when we walk down the tree, it is usually safe to unlock the higher layers
2350 * in the tree. The exceptions are when our path goes through slot 0, because
2351 * operations on the tree might require changing key pointers higher up in the
2352 * tree.
2353 *
2354 * callers might also have set path->keep_locks, which tells this code to keep
2355 * the lock if the path points to the last slot in the block. This is part of
2356 * walking through the tree, and selecting the next slot in the higher block.
2357 *
2358 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2359 * if lowest_unlock is 1, level 0 won't be unlocked
2360 */
2361 static noinline void unlock_up(struct btrfs_path *path, int level,
2362 int lowest_unlock, int min_write_lock_level,
2363 int *write_lock_level)
2364 {
2365 int i;
2366 int skip_level = level;
2367 int no_skips = 0;
2368 struct extent_buffer *t;
2369
2370 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2371 if (!path->nodes[i])
2372 break;
2373 if (!path->locks[i])
2374 break;
2375 if (!no_skips && path->slots[i] == 0) {
2376 skip_level = i + 1;
2377 continue;
2378 }
2379 if (!no_skips && path->keep_locks) {
2380 u32 nritems;
2381 t = path->nodes[i];
2382 nritems = btrfs_header_nritems(t);
2383 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2384 skip_level = i + 1;
2385 continue;
2386 }
2387 }
2388 if (skip_level < i && i >= lowest_unlock)
2389 no_skips = 1;
2390
2391 t = path->nodes[i];
2392 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2393 btrfs_tree_unlock_rw(t, path->locks[i]);
2394 path->locks[i] = 0;
2395 if (write_lock_level &&
2396 i > min_write_lock_level &&
2397 i <= *write_lock_level) {
2398 *write_lock_level = i - 1;
2399 }
2400 }
2401 }
2402 }
2403
2404 /*
2405 * This releases any locks held in the path starting at level and
2406 * going all the way up to the root.
2407 *
2408 * btrfs_search_slot will keep the lock held on higher nodes in a few
2409 * corner cases, such as COW of the block at slot zero in the node. This
2410 * ignores those rules, and it should only be called when there are no
2411 * more updates to be done higher up in the tree.
2412 */
2413 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2414 {
2415 int i;
2416
2417 if (path->keep_locks)
2418 return;
2419
2420 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2421 if (!path->nodes[i])
2422 continue;
2423 if (!path->locks[i])
2424 continue;
2425 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2426 path->locks[i] = 0;
2427 }
2428 }
2429
2430 /*
2431 * helper function for btrfs_search_slot. The goal is to find a block
2432 * in cache without setting the path to blocking. If we find the block
2433 * we return zero and the path is unchanged.
2434 *
2435 * If we can't find the block, we set the path blocking and do some
2436 * reada. -EAGAIN is returned and the search must be repeated.
2437 */
2438 static int
2439 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2440 struct extent_buffer **eb_ret, int level, int slot,
2441 const struct btrfs_key *key)
2442 {
2443 struct btrfs_fs_info *fs_info = root->fs_info;
2444 u64 blocknr;
2445 u64 gen;
2446 struct extent_buffer *b = *eb_ret;
2447 struct extent_buffer *tmp;
2448 int ret;
2449
2450 blocknr = btrfs_node_blockptr(b, slot);
2451 gen = btrfs_node_ptr_generation(b, slot);
2452
2453 tmp = find_extent_buffer(fs_info, blocknr);
2454 if (tmp) {
2455 /* first we do an atomic uptodate check */
2456 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2457 *eb_ret = tmp;
2458 return 0;
2459 }
2460
2461 /* the pages were up to date, but we failed
2462 * the generation number check. Do a full
2463 * read for the generation number that is correct.
2464 * We must do this without dropping locks so
2465 * we can trust our generation number
2466 */
2467 btrfs_set_path_blocking(p);
2468
2469 /* now we're allowed to do a blocking uptodate check */
2470 ret = btrfs_read_buffer(tmp, gen);
2471 if (!ret) {
2472 *eb_ret = tmp;
2473 return 0;
2474 }
2475 free_extent_buffer(tmp);
2476 btrfs_release_path(p);
2477 return -EIO;
2478 }
2479
2480 /*
2481 * reduce lock contention at high levels
2482 * of the btree by dropping locks before
2483 * we read. Don't release the lock on the current
2484 * level because we need to walk this node to figure
2485 * out which blocks to read.
2486 */
2487 btrfs_unlock_up_safe(p, level + 1);
2488 btrfs_set_path_blocking(p);
2489
2490 free_extent_buffer(tmp);
2491 if (p->reada != READA_NONE)
2492 reada_for_search(fs_info, p, level, slot, key->objectid);
2493
2494 btrfs_release_path(p);
2495
2496 ret = -EAGAIN;
2497 tmp = read_tree_block(fs_info, blocknr, 0);
2498 if (!IS_ERR(tmp)) {
2499 /*
2500 * If the read above didn't mark this buffer up to date,
2501 * it will never end up being up to date. Set ret to EIO now
2502 * and give up so that our caller doesn't loop forever
2503 * on our EAGAINs.
2504 */
2505 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2506 ret = -EIO;
2507 free_extent_buffer(tmp);
2508 } else {
2509 ret = PTR_ERR(tmp);
2510 }
2511 return ret;
2512 }
2513
2514 /*
2515 * helper function for btrfs_search_slot. This does all of the checks
2516 * for node-level blocks and does any balancing required based on
2517 * the ins_len.
2518 *
2519 * If no extra work was required, zero is returned. If we had to
2520 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2521 * start over
2522 */
2523 static int
2524 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2525 struct btrfs_root *root, struct btrfs_path *p,
2526 struct extent_buffer *b, int level, int ins_len,
2527 int *write_lock_level)
2528 {
2529 struct btrfs_fs_info *fs_info = root->fs_info;
2530 int ret;
2531
2532 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2533 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2534 int sret;
2535
2536 if (*write_lock_level < level + 1) {
2537 *write_lock_level = level + 1;
2538 btrfs_release_path(p);
2539 goto again;
2540 }
2541
2542 btrfs_set_path_blocking(p);
2543 reada_for_balance(fs_info, p, level);
2544 sret = split_node(trans, root, p, level);
2545 btrfs_clear_path_blocking(p, NULL, 0);
2546
2547 BUG_ON(sret > 0);
2548 if (sret) {
2549 ret = sret;
2550 goto done;
2551 }
2552 b = p->nodes[level];
2553 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2554 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2555 int sret;
2556
2557 if (*write_lock_level < level + 1) {
2558 *write_lock_level = level + 1;
2559 btrfs_release_path(p);
2560 goto again;
2561 }
2562
2563 btrfs_set_path_blocking(p);
2564 reada_for_balance(fs_info, p, level);
2565 sret = balance_level(trans, root, p, level);
2566 btrfs_clear_path_blocking(p, NULL, 0);
2567
2568 if (sret) {
2569 ret = sret;
2570 goto done;
2571 }
2572 b = p->nodes[level];
2573 if (!b) {
2574 btrfs_release_path(p);
2575 goto again;
2576 }
2577 BUG_ON(btrfs_header_nritems(b) == 1);
2578 }
2579 return 0;
2580
2581 again:
2582 ret = -EAGAIN;
2583 done:
2584 return ret;
2585 }
2586
2587 static void key_search_validate(struct extent_buffer *b,
2588 const struct btrfs_key *key,
2589 int level)
2590 {
2591 #ifdef CONFIG_BTRFS_ASSERT
2592 struct btrfs_disk_key disk_key;
2593
2594 btrfs_cpu_key_to_disk(&disk_key, key);
2595
2596 if (level == 0)
2597 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2598 offsetof(struct btrfs_leaf, items[0].key),
2599 sizeof(disk_key)));
2600 else
2601 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2602 offsetof(struct btrfs_node, ptrs[0].key),
2603 sizeof(disk_key)));
2604 #endif
2605 }
2606
2607 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2608 int level, int *prev_cmp, int *slot)
2609 {
2610 if (*prev_cmp != 0) {
2611 *prev_cmp = btrfs_bin_search(b, key, level, slot);
2612 return *prev_cmp;
2613 }
2614
2615 key_search_validate(b, key, level);
2616 *slot = 0;
2617
2618 return 0;
2619 }
2620
2621 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2622 u64 iobjectid, u64 ioff, u8 key_type,
2623 struct btrfs_key *found_key)
2624 {
2625 int ret;
2626 struct btrfs_key key;
2627 struct extent_buffer *eb;
2628
2629 ASSERT(path);
2630 ASSERT(found_key);
2631
2632 key.type = key_type;
2633 key.objectid = iobjectid;
2634 key.offset = ioff;
2635
2636 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2637 if (ret < 0)
2638 return ret;
2639
2640 eb = path->nodes[0];
2641 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2642 ret = btrfs_next_leaf(fs_root, path);
2643 if (ret)
2644 return ret;
2645 eb = path->nodes[0];
2646 }
2647
2648 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2649 if (found_key->type != key.type ||
2650 found_key->objectid != key.objectid)
2651 return 1;
2652
2653 return 0;
2654 }
2655
2656 /*
2657 * btrfs_search_slot - look for a key in a tree and perform necessary
2658 * modifications to preserve tree invariants.
2659 *
2660 * @trans: Handle of transaction, used when modifying the tree
2661 * @p: Holds all btree nodes along the search path
2662 * @root: The root node of the tree
2663 * @key: The key we are looking for
2664 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2665 * deletions it's -1. 0 for plain searches
2666 * @cow: boolean should CoW operations be performed. Must always be 1
2667 * when modifying the tree.
2668 *
2669 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2670 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2671 *
2672 * If @key is found, 0 is returned and you can find the item in the leaf level
2673 * of the path (level 0)
2674 *
2675 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2676 * points to the slot where it should be inserted
2677 *
2678 * If an error is encountered while searching the tree a negative error number
2679 * is returned
2680 */
2681 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2682 const struct btrfs_key *key, struct btrfs_path *p,
2683 int ins_len, int cow)
2684 {
2685 struct btrfs_fs_info *fs_info = root->fs_info;
2686 struct extent_buffer *b;
2687 int slot;
2688 int ret;
2689 int err;
2690 int level;
2691 int lowest_unlock = 1;
2692 int root_lock;
2693 /* everything at write_lock_level or lower must be write locked */
2694 int write_lock_level = 0;
2695 u8 lowest_level = 0;
2696 int min_write_lock_level;
2697 int prev_cmp;
2698
2699 lowest_level = p->lowest_level;
2700 WARN_ON(lowest_level && ins_len > 0);
2701 WARN_ON(p->nodes[0] != NULL);
2702 BUG_ON(!cow && ins_len);
2703
2704 if (ins_len < 0) {
2705 lowest_unlock = 2;
2706
2707 /* when we are removing items, we might have to go up to level
2708 * two as we update tree pointers Make sure we keep write
2709 * for those levels as well
2710 */
2711 write_lock_level = 2;
2712 } else if (ins_len > 0) {
2713 /*
2714 * for inserting items, make sure we have a write lock on
2715 * level 1 so we can update keys
2716 */
2717 write_lock_level = 1;
2718 }
2719
2720 if (!cow)
2721 write_lock_level = -1;
2722
2723 if (cow && (p->keep_locks || p->lowest_level))
2724 write_lock_level = BTRFS_MAX_LEVEL;
2725
2726 min_write_lock_level = write_lock_level;
2727
2728 again:
2729 prev_cmp = -1;
2730 /*
2731 * we try very hard to do read locks on the root
2732 */
2733 root_lock = BTRFS_READ_LOCK;
2734 level = 0;
2735 if (p->search_commit_root) {
2736 /*
2737 * the commit roots are read only
2738 * so we always do read locks
2739 */
2740 if (p->need_commit_sem)
2741 down_read(&fs_info->commit_root_sem);
2742 b = root->commit_root;
2743 extent_buffer_get(b);
2744 level = btrfs_header_level(b);
2745 if (p->need_commit_sem)
2746 up_read(&fs_info->commit_root_sem);
2747 if (!p->skip_locking)
2748 btrfs_tree_read_lock(b);
2749 } else {
2750 if (p->skip_locking) {
2751 b = btrfs_root_node(root);
2752 level = btrfs_header_level(b);
2753 } else {
2754 /* we don't know the level of the root node
2755 * until we actually have it read locked
2756 */
2757 b = btrfs_read_lock_root_node(root);
2758 level = btrfs_header_level(b);
2759 if (level <= write_lock_level) {
2760 /* whoops, must trade for write lock */
2761 btrfs_tree_read_unlock(b);
2762 free_extent_buffer(b);
2763 b = btrfs_lock_root_node(root);
2764 root_lock = BTRFS_WRITE_LOCK;
2765
2766 /* the level might have changed, check again */
2767 level = btrfs_header_level(b);
2768 }
2769 }
2770 }
2771 p->nodes[level] = b;
2772 if (!p->skip_locking)
2773 p->locks[level] = root_lock;
2774
2775 while (b) {
2776 level = btrfs_header_level(b);
2777
2778 /*
2779 * setup the path here so we can release it under lock
2780 * contention with the cow code
2781 */
2782 if (cow) {
2783 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2784
2785 /*
2786 * if we don't really need to cow this block
2787 * then we don't want to set the path blocking,
2788 * so we test it here
2789 */
2790 if (!should_cow_block(trans, root, b)) {
2791 trans->dirty = true;
2792 goto cow_done;
2793 }
2794
2795 /*
2796 * must have write locks on this node and the
2797 * parent
2798 */
2799 if (level > write_lock_level ||
2800 (level + 1 > write_lock_level &&
2801 level + 1 < BTRFS_MAX_LEVEL &&
2802 p->nodes[level + 1])) {
2803 write_lock_level = level + 1;
2804 btrfs_release_path(p);
2805 goto again;
2806 }
2807
2808 btrfs_set_path_blocking(p);
2809 if (last_level)
2810 err = btrfs_cow_block(trans, root, b, NULL, 0,
2811 &b);
2812 else
2813 err = btrfs_cow_block(trans, root, b,
2814 p->nodes[level + 1],
2815 p->slots[level + 1], &b);
2816 if (err) {
2817 ret = err;
2818 goto done;
2819 }
2820 }
2821 cow_done:
2822 p->nodes[level] = b;
2823 btrfs_clear_path_blocking(p, NULL, 0);
2824
2825 /*
2826 * we have a lock on b and as long as we aren't changing
2827 * the tree, there is no way to for the items in b to change.
2828 * It is safe to drop the lock on our parent before we
2829 * go through the expensive btree search on b.
2830 *
2831 * If we're inserting or deleting (ins_len != 0), then we might
2832 * be changing slot zero, which may require changing the parent.
2833 * So, we can't drop the lock until after we know which slot
2834 * we're operating on.
2835 */
2836 if (!ins_len && !p->keep_locks) {
2837 int u = level + 1;
2838
2839 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2840 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2841 p->locks[u] = 0;
2842 }
2843 }
2844
2845 ret = key_search(b, key, level, &prev_cmp, &slot);
2846 if (ret < 0)
2847 goto done;
2848
2849 if (level != 0) {
2850 int dec = 0;
2851 if (ret && slot > 0) {
2852 dec = 1;
2853 slot -= 1;
2854 }
2855 p->slots[level] = slot;
2856 err = setup_nodes_for_search(trans, root, p, b, level,
2857 ins_len, &write_lock_level);
2858 if (err == -EAGAIN)
2859 goto again;
2860 if (err) {
2861 ret = err;
2862 goto done;
2863 }
2864 b = p->nodes[level];
2865 slot = p->slots[level];
2866
2867 /*
2868 * slot 0 is special, if we change the key
2869 * we have to update the parent pointer
2870 * which means we must have a write lock
2871 * on the parent
2872 */
2873 if (slot == 0 && ins_len &&
2874 write_lock_level < level + 1) {
2875 write_lock_level = level + 1;
2876 btrfs_release_path(p);
2877 goto again;
2878 }
2879
2880 unlock_up(p, level, lowest_unlock,
2881 min_write_lock_level, &write_lock_level);
2882
2883 if (level == lowest_level) {
2884 if (dec)
2885 p->slots[level]++;
2886 goto done;
2887 }
2888
2889 err = read_block_for_search(root, p, &b, level,
2890 slot, key);
2891 if (err == -EAGAIN)
2892 goto again;
2893 if (err) {
2894 ret = err;
2895 goto done;
2896 }
2897
2898 if (!p->skip_locking) {
2899 level = btrfs_header_level(b);
2900 if (level <= write_lock_level) {
2901 err = btrfs_try_tree_write_lock(b);
2902 if (!err) {
2903 btrfs_set_path_blocking(p);
2904 btrfs_tree_lock(b);
2905 btrfs_clear_path_blocking(p, b,
2906 BTRFS_WRITE_LOCK);
2907 }
2908 p->locks[level] = BTRFS_WRITE_LOCK;
2909 } else {
2910 err = btrfs_tree_read_lock_atomic(b);
2911 if (!err) {
2912 btrfs_set_path_blocking(p);
2913 btrfs_tree_read_lock(b);
2914 btrfs_clear_path_blocking(p, b,
2915 BTRFS_READ_LOCK);
2916 }
2917 p->locks[level] = BTRFS_READ_LOCK;
2918 }
2919 p->nodes[level] = b;
2920 }
2921 } else {
2922 p->slots[level] = slot;
2923 if (ins_len > 0 &&
2924 btrfs_leaf_free_space(fs_info, b) < ins_len) {
2925 if (write_lock_level < 1) {
2926 write_lock_level = 1;
2927 btrfs_release_path(p);
2928 goto again;
2929 }
2930
2931 btrfs_set_path_blocking(p);
2932 err = split_leaf(trans, root, key,
2933 p, ins_len, ret == 0);
2934 btrfs_clear_path_blocking(p, NULL, 0);
2935
2936 BUG_ON(err > 0);
2937 if (err) {
2938 ret = err;
2939 goto done;
2940 }
2941 }
2942 if (!p->search_for_split)
2943 unlock_up(p, level, lowest_unlock,
2944 min_write_lock_level, &write_lock_level);
2945 goto done;
2946 }
2947 }
2948 ret = 1;
2949 done:
2950 /*
2951 * we don't really know what they plan on doing with the path
2952 * from here on, so for now just mark it as blocking
2953 */
2954 if (!p->leave_spinning)
2955 btrfs_set_path_blocking(p);
2956 if (ret < 0 && !p->skip_release_on_error)
2957 btrfs_release_path(p);
2958 return ret;
2959 }
2960
2961 /*
2962 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2963 * current state of the tree together with the operations recorded in the tree
2964 * modification log to search for the key in a previous version of this tree, as
2965 * denoted by the time_seq parameter.
2966 *
2967 * Naturally, there is no support for insert, delete or cow operations.
2968 *
2969 * The resulting path and return value will be set up as if we called
2970 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2971 */
2972 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2973 struct btrfs_path *p, u64 time_seq)
2974 {
2975 struct btrfs_fs_info *fs_info = root->fs_info;
2976 struct extent_buffer *b;
2977 int slot;
2978 int ret;
2979 int err;
2980 int level;
2981 int lowest_unlock = 1;
2982 u8 lowest_level = 0;
2983 int prev_cmp = -1;
2984
2985 lowest_level = p->lowest_level;
2986 WARN_ON(p->nodes[0] != NULL);
2987
2988 if (p->search_commit_root) {
2989 BUG_ON(time_seq);
2990 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2991 }
2992
2993 again:
2994 b = get_old_root(root, time_seq);
2995 level = btrfs_header_level(b);
2996 p->locks[level] = BTRFS_READ_LOCK;
2997
2998 while (b) {
2999 level = btrfs_header_level(b);
3000 p->nodes[level] = b;
3001 btrfs_clear_path_blocking(p, NULL, 0);
3002
3003 /*
3004 * we have a lock on b and as long as we aren't changing
3005 * the tree, there is no way to for the items in b to change.
3006 * It is safe to drop the lock on our parent before we
3007 * go through the expensive btree search on b.
3008 */
3009 btrfs_unlock_up_safe(p, level + 1);
3010
3011 /*
3012 * Since we can unwind ebs we want to do a real search every
3013 * time.
3014 */
3015 prev_cmp = -1;
3016 ret = key_search(b, key, level, &prev_cmp, &slot);
3017
3018 if (level != 0) {
3019 int dec = 0;
3020 if (ret && slot > 0) {
3021 dec = 1;
3022 slot -= 1;
3023 }
3024 p->slots[level] = slot;
3025 unlock_up(p, level, lowest_unlock, 0, NULL);
3026
3027 if (level == lowest_level) {
3028 if (dec)
3029 p->slots[level]++;
3030 goto done;
3031 }
3032
3033 err = read_block_for_search(root, p, &b, level,
3034 slot, key);
3035 if (err == -EAGAIN)
3036 goto again;
3037 if (err) {
3038 ret = err;
3039 goto done;
3040 }
3041
3042 level = btrfs_header_level(b);
3043 err = btrfs_tree_read_lock_atomic(b);
3044 if (!err) {
3045 btrfs_set_path_blocking(p);
3046 btrfs_tree_read_lock(b);
3047 btrfs_clear_path_blocking(p, b,
3048 BTRFS_READ_LOCK);
3049 }
3050 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3051 if (!b) {
3052 ret = -ENOMEM;
3053 goto done;
3054 }
3055 p->locks[level] = BTRFS_READ_LOCK;
3056 p->nodes[level] = b;
3057 } else {
3058 p->slots[level] = slot;
3059 unlock_up(p, level, lowest_unlock, 0, NULL);
3060 goto done;
3061 }
3062 }
3063 ret = 1;
3064 done:
3065 if (!p->leave_spinning)
3066 btrfs_set_path_blocking(p);
3067 if (ret < 0)
3068 btrfs_release_path(p);
3069
3070 return ret;
3071 }
3072
3073 /*
3074 * helper to use instead of search slot if no exact match is needed but
3075 * instead the next or previous item should be returned.
3076 * When find_higher is true, the next higher item is returned, the next lower
3077 * otherwise.
3078 * When return_any and find_higher are both true, and no higher item is found,
3079 * return the next lower instead.
3080 * When return_any is true and find_higher is false, and no lower item is found,
3081 * return the next higher instead.
3082 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3083 * < 0 on error
3084 */
3085 int btrfs_search_slot_for_read(struct btrfs_root *root,
3086 const struct btrfs_key *key,
3087 struct btrfs_path *p, int find_higher,
3088 int return_any)
3089 {
3090 int ret;
3091 struct extent_buffer *leaf;
3092
3093 again:
3094 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3095 if (ret <= 0)
3096 return ret;
3097 /*
3098 * a return value of 1 means the path is at the position where the
3099 * item should be inserted. Normally this is the next bigger item,
3100 * but in case the previous item is the last in a leaf, path points
3101 * to the first free slot in the previous leaf, i.e. at an invalid
3102 * item.
3103 */
3104 leaf = p->nodes[0];
3105
3106 if (find_higher) {
3107 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3108 ret = btrfs_next_leaf(root, p);
3109 if (ret <= 0)
3110 return ret;
3111 if (!return_any)
3112 return 1;
3113 /*
3114 * no higher item found, return the next
3115 * lower instead
3116 */
3117 return_any = 0;
3118 find_higher = 0;
3119 btrfs_release_path(p);
3120 goto again;
3121 }
3122 } else {
3123 if (p->slots[0] == 0) {
3124 ret = btrfs_prev_leaf(root, p);
3125 if (ret < 0)
3126 return ret;
3127 if (!ret) {
3128 leaf = p->nodes[0];
3129 if (p->slots[0] == btrfs_header_nritems(leaf))
3130 p->slots[0]--;
3131 return 0;
3132 }
3133 if (!return_any)
3134 return 1;
3135 /*
3136 * no lower item found, return the next
3137 * higher instead
3138 */
3139 return_any = 0;
3140 find_higher = 1;
3141 btrfs_release_path(p);
3142 goto again;
3143 } else {
3144 --p->slots[0];
3145 }
3146 }
3147 return 0;
3148 }
3149
3150 /*
3151 * adjust the pointers going up the tree, starting at level
3152 * making sure the right key of each node is points to 'key'.
3153 * This is used after shifting pointers to the left, so it stops
3154 * fixing up pointers when a given leaf/node is not in slot 0 of the
3155 * higher levels
3156 *
3157 */
3158 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3159 struct btrfs_path *path,
3160 struct btrfs_disk_key *key, int level)
3161 {
3162 int i;
3163 struct extent_buffer *t;
3164
3165 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3166 int tslot = path->slots[i];
3167 if (!path->nodes[i])
3168 break;
3169 t = path->nodes[i];
3170 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3171 btrfs_set_node_key(t, key, tslot);
3172 btrfs_mark_buffer_dirty(path->nodes[i]);
3173 if (tslot != 0)
3174 break;
3175 }
3176 }
3177
3178 /*
3179 * update item key.
3180 *
3181 * This function isn't completely safe. It's the caller's responsibility
3182 * that the new key won't break the order
3183 */
3184 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3185 struct btrfs_path *path,
3186 const struct btrfs_key *new_key)
3187 {
3188 struct btrfs_disk_key disk_key;
3189 struct extent_buffer *eb;
3190 int slot;
3191
3192 eb = path->nodes[0];
3193 slot = path->slots[0];
3194 if (slot > 0) {
3195 btrfs_item_key(eb, &disk_key, slot - 1);
3196 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3197 }
3198 if (slot < btrfs_header_nritems(eb) - 1) {
3199 btrfs_item_key(eb, &disk_key, slot + 1);
3200 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3201 }
3202
3203 btrfs_cpu_key_to_disk(&disk_key, new_key);
3204 btrfs_set_item_key(eb, &disk_key, slot);
3205 btrfs_mark_buffer_dirty(eb);
3206 if (slot == 0)
3207 fixup_low_keys(fs_info, path, &disk_key, 1);
3208 }
3209
3210 /*
3211 * try to push data from one node into the next node left in the
3212 * tree.
3213 *
3214 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3215 * error, and > 0 if there was no room in the left hand block.
3216 */
3217 static int push_node_left(struct btrfs_trans_handle *trans,
3218 struct btrfs_fs_info *fs_info,
3219 struct extent_buffer *dst,
3220 struct extent_buffer *src, int empty)
3221 {
3222 int push_items = 0;
3223 int src_nritems;
3224 int dst_nritems;
3225 int ret = 0;
3226
3227 src_nritems = btrfs_header_nritems(src);
3228 dst_nritems = btrfs_header_nritems(dst);
3229 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3230 WARN_ON(btrfs_header_generation(src) != trans->transid);
3231 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3232
3233 if (!empty && src_nritems <= 8)
3234 return 1;
3235
3236 if (push_items <= 0)
3237 return 1;
3238
3239 if (empty) {
3240 push_items = min(src_nritems, push_items);
3241 if (push_items < src_nritems) {
3242 /* leave at least 8 pointers in the node if
3243 * we aren't going to empty it
3244 */
3245 if (src_nritems - push_items < 8) {
3246 if (push_items <= 8)
3247 return 1;
3248 push_items -= 8;
3249 }
3250 }
3251 } else
3252 push_items = min(src_nritems - 8, push_items);
3253
3254 ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3255 push_items);
3256 if (ret) {
3257 btrfs_abort_transaction(trans, ret);
3258 return ret;
3259 }
3260 copy_extent_buffer(dst, src,
3261 btrfs_node_key_ptr_offset(dst_nritems),
3262 btrfs_node_key_ptr_offset(0),
3263 push_items * sizeof(struct btrfs_key_ptr));
3264
3265 if (push_items < src_nritems) {
3266 /*
3267 * don't call tree_mod_log_eb_move here, key removal was already
3268 * fully logged by tree_mod_log_eb_copy above.
3269 */
3270 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3271 btrfs_node_key_ptr_offset(push_items),
3272 (src_nritems - push_items) *
3273 sizeof(struct btrfs_key_ptr));
3274 }
3275 btrfs_set_header_nritems(src, src_nritems - push_items);
3276 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3277 btrfs_mark_buffer_dirty(src);
3278 btrfs_mark_buffer_dirty(dst);
3279
3280 return ret;
3281 }
3282
3283 /*
3284 * try to push data from one node into the next node right in the
3285 * tree.
3286 *
3287 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3288 * error, and > 0 if there was no room in the right hand block.
3289 *
3290 * this will only push up to 1/2 the contents of the left node over
3291 */
3292 static int balance_node_right(struct btrfs_trans_handle *trans,
3293 struct btrfs_fs_info *fs_info,
3294 struct extent_buffer *dst,
3295 struct extent_buffer *src)
3296 {
3297 int push_items = 0;
3298 int max_push;
3299 int src_nritems;
3300 int dst_nritems;
3301 int ret = 0;
3302
3303 WARN_ON(btrfs_header_generation(src) != trans->transid);
3304 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3305
3306 src_nritems = btrfs_header_nritems(src);
3307 dst_nritems = btrfs_header_nritems(dst);
3308 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3309 if (push_items <= 0)
3310 return 1;
3311
3312 if (src_nritems < 4)
3313 return 1;
3314
3315 max_push = src_nritems / 2 + 1;
3316 /* don't try to empty the node */
3317 if (max_push >= src_nritems)
3318 return 1;
3319
3320 if (max_push < push_items)
3321 push_items = max_push;
3322
3323 tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
3324 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3325 btrfs_node_key_ptr_offset(0),
3326 (dst_nritems) *
3327 sizeof(struct btrfs_key_ptr));
3328
3329 ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3330 src_nritems - push_items, push_items);
3331 if (ret) {
3332 btrfs_abort_transaction(trans, ret);
3333 return ret;
3334 }
3335 copy_extent_buffer(dst, src,
3336 btrfs_node_key_ptr_offset(0),
3337 btrfs_node_key_ptr_offset(src_nritems - push_items),
3338 push_items * sizeof(struct btrfs_key_ptr));
3339
3340 btrfs_set_header_nritems(src, src_nritems - push_items);
3341 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3342
3343 btrfs_mark_buffer_dirty(src);
3344 btrfs_mark_buffer_dirty(dst);
3345
3346 return ret;
3347 }
3348
3349 /*
3350 * helper function to insert a new root level in the tree.
3351 * A new node is allocated, and a single item is inserted to
3352 * point to the existing root
3353 *
3354 * returns zero on success or < 0 on failure.
3355 */
3356 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3357 struct btrfs_root *root,
3358 struct btrfs_path *path, int level)
3359 {
3360 struct btrfs_fs_info *fs_info = root->fs_info;
3361 u64 lower_gen;
3362 struct extent_buffer *lower;
3363 struct extent_buffer *c;
3364 struct extent_buffer *old;
3365 struct btrfs_disk_key lower_key;
3366
3367 BUG_ON(path->nodes[level]);
3368 BUG_ON(path->nodes[level-1] != root->node);
3369
3370 lower = path->nodes[level-1];
3371 if (level == 1)
3372 btrfs_item_key(lower, &lower_key, 0);
3373 else
3374 btrfs_node_key(lower, &lower_key, 0);
3375
3376 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3377 &lower_key, level, root->node->start, 0);
3378 if (IS_ERR(c))
3379 return PTR_ERR(c);
3380
3381 root_add_used(root, fs_info->nodesize);
3382
3383 memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3384 btrfs_set_header_nritems(c, 1);
3385 btrfs_set_header_level(c, level);
3386 btrfs_set_header_bytenr(c, c->start);
3387 btrfs_set_header_generation(c, trans->transid);
3388 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3389 btrfs_set_header_owner(c, root->root_key.objectid);
3390
3391 write_extent_buffer_fsid(c, fs_info->fsid);
3392 write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3393
3394 btrfs_set_node_key(c, &lower_key, 0);
3395 btrfs_set_node_blockptr(c, 0, lower->start);
3396 lower_gen = btrfs_header_generation(lower);
3397 WARN_ON(lower_gen != trans->transid);
3398
3399 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3400
3401 btrfs_mark_buffer_dirty(c);
3402
3403 old = root->node;
3404 tree_mod_log_set_root_pointer(root, c, 0);
3405 rcu_assign_pointer(root->node, c);
3406
3407 /* the super has an extra ref to root->node */
3408 free_extent_buffer(old);
3409
3410 add_root_to_dirty_list(root);
3411 extent_buffer_get(c);
3412 path->nodes[level] = c;
3413 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3414 path->slots[level] = 0;
3415 return 0;
3416 }
3417
3418 /*
3419 * worker function to insert a single pointer in a node.
3420 * the node should have enough room for the pointer already
3421 *
3422 * slot and level indicate where you want the key to go, and
3423 * blocknr is the block the key points to.
3424 */
3425 static void insert_ptr(struct btrfs_trans_handle *trans,
3426 struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3427 struct btrfs_disk_key *key, u64 bytenr,
3428 int slot, int level)
3429 {
3430 struct extent_buffer *lower;
3431 int nritems;
3432 int ret;
3433
3434 BUG_ON(!path->nodes[level]);
3435 btrfs_assert_tree_locked(path->nodes[level]);
3436 lower = path->nodes[level];
3437 nritems = btrfs_header_nritems(lower);
3438 BUG_ON(slot > nritems);
3439 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3440 if (slot != nritems) {
3441 if (level)
3442 tree_mod_log_eb_move(fs_info, lower, slot + 1,
3443 slot, nritems - slot);
3444 memmove_extent_buffer(lower,
3445 btrfs_node_key_ptr_offset(slot + 1),
3446 btrfs_node_key_ptr_offset(slot),
3447 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3448 }
3449 if (level) {
3450 ret = tree_mod_log_insert_key(fs_info, lower, slot,
3451 MOD_LOG_KEY_ADD, GFP_NOFS);
3452 BUG_ON(ret < 0);
3453 }
3454 btrfs_set_node_key(lower, key, slot);
3455 btrfs_set_node_blockptr(lower, slot, bytenr);
3456 WARN_ON(trans->transid == 0);
3457 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3458 btrfs_set_header_nritems(lower, nritems + 1);
3459 btrfs_mark_buffer_dirty(lower);
3460 }
3461
3462 /*
3463 * split the node at the specified level in path in two.
3464 * The path is corrected to point to the appropriate node after the split
3465 *
3466 * Before splitting this tries to make some room in the node by pushing
3467 * left and right, if either one works, it returns right away.
3468 *
3469 * returns 0 on success and < 0 on failure
3470 */
3471 static noinline int split_node(struct btrfs_trans_handle *trans,
3472 struct btrfs_root *root,
3473 struct btrfs_path *path, int level)
3474 {
3475 struct btrfs_fs_info *fs_info = root->fs_info;
3476 struct extent_buffer *c;
3477 struct extent_buffer *split;
3478 struct btrfs_disk_key disk_key;
3479 int mid;
3480 int ret;
3481 u32 c_nritems;
3482
3483 c = path->nodes[level];
3484 WARN_ON(btrfs_header_generation(c) != trans->transid);
3485 if (c == root->node) {
3486 /*
3487 * trying to split the root, lets make a new one
3488 *
3489 * tree mod log: We don't log_removal old root in
3490 * insert_new_root, because that root buffer will be kept as a
3491 * normal node. We are going to log removal of half of the
3492 * elements below with tree_mod_log_eb_copy. We're holding a
3493 * tree lock on the buffer, which is why we cannot race with
3494 * other tree_mod_log users.
3495 */
3496 ret = insert_new_root(trans, root, path, level + 1);
3497 if (ret)
3498 return ret;
3499 } else {
3500 ret = push_nodes_for_insert(trans, root, path, level);
3501 c = path->nodes[level];
3502 if (!ret && btrfs_header_nritems(c) <
3503 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3504 return 0;
3505 if (ret < 0)
3506 return ret;
3507 }
3508
3509 c_nritems = btrfs_header_nritems(c);
3510 mid = (c_nritems + 1) / 2;
3511 btrfs_node_key(c, &disk_key, mid);
3512
3513 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3514 &disk_key, level, c->start, 0);
3515 if (IS_ERR(split))
3516 return PTR_ERR(split);
3517
3518 root_add_used(root, fs_info->nodesize);
3519
3520 memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3521 btrfs_set_header_level(split, btrfs_header_level(c));
3522 btrfs_set_header_bytenr(split, split->start);
3523 btrfs_set_header_generation(split, trans->transid);
3524 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3525 btrfs_set_header_owner(split, root->root_key.objectid);
3526 write_extent_buffer_fsid(split, fs_info->fsid);
3527 write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3528
3529 ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3530 if (ret) {
3531 btrfs_abort_transaction(trans, ret);
3532 return ret;
3533 }
3534 copy_extent_buffer(split, c,
3535 btrfs_node_key_ptr_offset(0),
3536 btrfs_node_key_ptr_offset(mid),
3537 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3538 btrfs_set_header_nritems(split, c_nritems - mid);
3539 btrfs_set_header_nritems(c, mid);
3540 ret = 0;
3541
3542 btrfs_mark_buffer_dirty(c);
3543 btrfs_mark_buffer_dirty(split);
3544
3545 insert_ptr(trans, fs_info, path, &disk_key, split->start,
3546 path->slots[level + 1] + 1, level + 1);
3547
3548 if (path->slots[level] >= mid) {
3549 path->slots[level] -= mid;
3550 btrfs_tree_unlock(c);
3551 free_extent_buffer(c);
3552 path->nodes[level] = split;
3553 path->slots[level + 1] += 1;
3554 } else {
3555 btrfs_tree_unlock(split);
3556 free_extent_buffer(split);
3557 }
3558 return ret;
3559 }
3560
3561 /*
3562 * how many bytes are required to store the items in a leaf. start
3563 * and nr indicate which items in the leaf to check. This totals up the
3564 * space used both by the item structs and the item data
3565 */
3566 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3567 {
3568 struct btrfs_item *start_item;
3569 struct btrfs_item *end_item;
3570 struct btrfs_map_token token;
3571 int data_len;
3572 int nritems = btrfs_header_nritems(l);
3573 int end = min(nritems, start + nr) - 1;
3574
3575 if (!nr)
3576 return 0;
3577 btrfs_init_map_token(&token);
3578 start_item = btrfs_item_nr(start);
3579 end_item = btrfs_item_nr(end);
3580 data_len = btrfs_token_item_offset(l, start_item, &token) +
3581 btrfs_token_item_size(l, start_item, &token);
3582 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3583 data_len += sizeof(struct btrfs_item) * nr;
3584 WARN_ON(data_len < 0);
3585 return data_len;
3586 }
3587
3588 /*
3589 * The space between the end of the leaf items and
3590 * the start of the leaf data. IOW, how much room
3591 * the leaf has left for both items and data
3592 */
3593 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3594 struct extent_buffer *leaf)
3595 {
3596 int nritems = btrfs_header_nritems(leaf);
3597 int ret;
3598
3599 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3600 if (ret < 0) {
3601 btrfs_crit(fs_info,
3602 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3603 ret,
3604 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3605 leaf_space_used(leaf, 0, nritems), nritems);
3606 }
3607 return ret;
3608 }
3609
3610 /*
3611 * min slot controls the lowest index we're willing to push to the
3612 * right. We'll push up to and including min_slot, but no lower
3613 */
3614 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3615 struct btrfs_path *path,
3616 int data_size, int empty,
3617 struct extent_buffer *right,
3618 int free_space, u32 left_nritems,
3619 u32 min_slot)
3620 {
3621 struct extent_buffer *left = path->nodes[0];
3622 struct extent_buffer *upper = path->nodes[1];
3623 struct btrfs_map_token token;
3624 struct btrfs_disk_key disk_key;
3625 int slot;
3626 u32 i;
3627 int push_space = 0;
3628 int push_items = 0;
3629 struct btrfs_item *item;
3630 u32 nr;
3631 u32 right_nritems;
3632 u32 data_end;
3633 u32 this_item_size;
3634
3635 btrfs_init_map_token(&token);
3636
3637 if (empty)
3638 nr = 0;
3639 else
3640 nr = max_t(u32, 1, min_slot);
3641
3642 if (path->slots[0] >= left_nritems)
3643 push_space += data_size;
3644
3645 slot = path->slots[1];
3646 i = left_nritems - 1;
3647 while (i >= nr) {
3648 item = btrfs_item_nr(i);
3649
3650 if (!empty && push_items > 0) {
3651 if (path->slots[0] > i)
3652 break;
3653 if (path->slots[0] == i) {
3654 int space = btrfs_leaf_free_space(fs_info, left);
3655 if (space + push_space * 2 > free_space)
3656 break;
3657 }
3658 }
3659
3660 if (path->slots[0] == i)
3661 push_space += data_size;
3662
3663 this_item_size = btrfs_item_size(left, item);
3664 if (this_item_size + sizeof(*item) + push_space > free_space)
3665 break;
3666
3667 push_items++;
3668 push_space += this_item_size + sizeof(*item);
3669 if (i == 0)
3670 break;
3671 i--;
3672 }
3673
3674 if (push_items == 0)
3675 goto out_unlock;
3676
3677 WARN_ON(!empty && push_items == left_nritems);
3678
3679 /* push left to right */
3680 right_nritems = btrfs_header_nritems(right);
3681
3682 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3683 push_space -= leaf_data_end(fs_info, left);
3684
3685 /* make room in the right data area */
3686 data_end = leaf_data_end(fs_info, right);
3687 memmove_extent_buffer(right,
3688 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3689 BTRFS_LEAF_DATA_OFFSET + data_end,
3690 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3691
3692 /* copy from the left data area */
3693 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3694 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3695 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3696 push_space);
3697
3698 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3699 btrfs_item_nr_offset(0),
3700 right_nritems * sizeof(struct btrfs_item));
3701
3702 /* copy the items from left to right */
3703 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3704 btrfs_item_nr_offset(left_nritems - push_items),
3705 push_items * sizeof(struct btrfs_item));
3706
3707 /* update the item pointers */
3708 right_nritems += push_items;
3709 btrfs_set_header_nritems(right, right_nritems);
3710 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3711 for (i = 0; i < right_nritems; i++) {
3712 item = btrfs_item_nr(i);
3713 push_space -= btrfs_token_item_size(right, item, &token);
3714 btrfs_set_token_item_offset(right, item, push_space, &token);
3715 }
3716
3717 left_nritems -= push_items;
3718 btrfs_set_header_nritems(left, left_nritems);
3719
3720 if (left_nritems)
3721 btrfs_mark_buffer_dirty(left);
3722 else
3723 clean_tree_block(fs_info, left);
3724
3725 btrfs_mark_buffer_dirty(right);
3726
3727 btrfs_item_key(right, &disk_key, 0);
3728 btrfs_set_node_key(upper, &disk_key, slot + 1);
3729 btrfs_mark_buffer_dirty(upper);
3730
3731 /* then fixup the leaf pointer in the path */
3732 if (path->slots[0] >= left_nritems) {
3733 path->slots[0] -= left_nritems;
3734 if (btrfs_header_nritems(path->nodes[0]) == 0)
3735 clean_tree_block(fs_info, path->nodes[0]);
3736 btrfs_tree_unlock(path->nodes[0]);
3737 free_extent_buffer(path->nodes[0]);
3738 path->nodes[0] = right;
3739 path->slots[1] += 1;
3740 } else {
3741 btrfs_tree_unlock(right);
3742 free_extent_buffer(right);
3743 }
3744 return 0;
3745
3746 out_unlock:
3747 btrfs_tree_unlock(right);
3748 free_extent_buffer(right);
3749 return 1;
3750 }
3751
3752 /*
3753 * push some data in the path leaf to the right, trying to free up at
3754 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3755 *
3756 * returns 1 if the push failed because the other node didn't have enough
3757 * room, 0 if everything worked out and < 0 if there were major errors.
3758 *
3759 * this will push starting from min_slot to the end of the leaf. It won't
3760 * push any slot lower than min_slot
3761 */
3762 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3763 *root, struct btrfs_path *path,
3764 int min_data_size, int data_size,
3765 int empty, u32 min_slot)
3766 {
3767 struct btrfs_fs_info *fs_info = root->fs_info;
3768 struct extent_buffer *left = path->nodes[0];
3769 struct extent_buffer *right;
3770 struct extent_buffer *upper;
3771 int slot;
3772 int free_space;
3773 u32 left_nritems;
3774 int ret;
3775
3776 if (!path->nodes[1])
3777 return 1;
3778
3779 slot = path->slots[1];
3780 upper = path->nodes[1];
3781 if (slot >= btrfs_header_nritems(upper) - 1)
3782 return 1;
3783
3784 btrfs_assert_tree_locked(path->nodes[1]);
3785
3786 right = read_node_slot(fs_info, upper, slot + 1);
3787 /*
3788 * slot + 1 is not valid or we fail to read the right node,
3789 * no big deal, just return.
3790 */
3791 if (IS_ERR(right))
3792 return 1;
3793
3794 btrfs_tree_lock(right);
3795 btrfs_set_lock_blocking(right);
3796
3797 free_space = btrfs_leaf_free_space(fs_info, right);
3798 if (free_space < data_size)
3799 goto out_unlock;
3800
3801 /* cow and double check */
3802 ret = btrfs_cow_block(trans, root, right, upper,
3803 slot + 1, &right);
3804 if (ret)
3805 goto out_unlock;
3806
3807 free_space = btrfs_leaf_free_space(fs_info, right);
3808 if (free_space < data_size)
3809 goto out_unlock;
3810
3811 left_nritems = btrfs_header_nritems(left);
3812 if (left_nritems == 0)
3813 goto out_unlock;
3814
3815 if (path->slots[0] == left_nritems && !empty) {
3816 /* Key greater than all keys in the leaf, right neighbor has
3817 * enough room for it and we're not emptying our leaf to delete
3818 * it, therefore use right neighbor to insert the new item and
3819 * no need to touch/dirty our left leaft. */
3820 btrfs_tree_unlock(left);
3821 free_extent_buffer(left);
3822 path->nodes[0] = right;
3823 path->slots[0] = 0;
3824 path->slots[1]++;
3825 return 0;
3826 }
3827
3828 return __push_leaf_right(fs_info, path, min_data_size, empty,
3829 right, free_space, left_nritems, min_slot);
3830 out_unlock:
3831 btrfs_tree_unlock(right);
3832 free_extent_buffer(right);
3833 return 1;
3834 }
3835
3836 /*
3837 * push some data in the path leaf to the left, trying to free up at
3838 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3839 *
3840 * max_slot can put a limit on how far into the leaf we'll push items. The
3841 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3842 * items
3843 */
3844 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3845 struct btrfs_path *path, int data_size,
3846 int empty, struct extent_buffer *left,
3847 int free_space, u32 right_nritems,
3848 u32 max_slot)
3849 {
3850 struct btrfs_disk_key disk_key;
3851 struct extent_buffer *right = path->nodes[0];
3852 int i;
3853 int push_space = 0;
3854 int push_items = 0;
3855 struct btrfs_item *item;
3856 u32 old_left_nritems;
3857 u32 nr;
3858 int ret = 0;
3859 u32 this_item_size;
3860 u32 old_left_item_size;
3861 struct btrfs_map_token token;
3862
3863 btrfs_init_map_token(&token);
3864
3865 if (empty)
3866 nr = min(right_nritems, max_slot);
3867 else
3868 nr = min(right_nritems - 1, max_slot);
3869
3870 for (i = 0; i < nr; i++) {
3871 item = btrfs_item_nr(i);
3872
3873 if (!empty && push_items > 0) {
3874 if (path->slots[0] < i)
3875 break;
3876 if (path->slots[0] == i) {
3877 int space = btrfs_leaf_free_space(fs_info, right);
3878 if (space + push_space * 2 > free_space)
3879 break;
3880 }
3881 }
3882
3883 if (path->slots[0] == i)
3884 push_space += data_size;
3885
3886 this_item_size = btrfs_item_size(right, item);
3887 if (this_item_size + sizeof(*item) + push_space > free_space)
3888 break;
3889
3890 push_items++;
3891 push_space += this_item_size + sizeof(*item);
3892 }
3893
3894 if (push_items == 0) {
3895 ret = 1;
3896 goto out;
3897 }
3898 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3899
3900 /* push data from right to left */
3901 copy_extent_buffer(left, right,
3902 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3903 btrfs_item_nr_offset(0),
3904 push_items * sizeof(struct btrfs_item));
3905
3906 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3907 btrfs_item_offset_nr(right, push_items - 1);
3908
3909 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3910 leaf_data_end(fs_info, left) - push_space,
3911 BTRFS_LEAF_DATA_OFFSET +
3912 btrfs_item_offset_nr(right, push_items - 1),
3913 push_space);
3914 old_left_nritems = btrfs_header_nritems(left);
3915 BUG_ON(old_left_nritems <= 0);
3916
3917 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3918 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3919 u32 ioff;
3920
3921 item = btrfs_item_nr(i);
3922
3923 ioff = btrfs_token_item_offset(left, item, &token);
3924 btrfs_set_token_item_offset(left, item,
3925 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3926 &token);
3927 }
3928 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3929
3930 /* fixup right node */
3931 if (push_items > right_nritems)
3932 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3933 right_nritems);
3934
3935 if (push_items < right_nritems) {
3936 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3937 leaf_data_end(fs_info, right);
3938 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3939 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3940 BTRFS_LEAF_DATA_OFFSET +
3941 leaf_data_end(fs_info, right), push_space);
3942
3943 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3944 btrfs_item_nr_offset(push_items),
3945 (btrfs_header_nritems(right) - push_items) *
3946 sizeof(struct btrfs_item));
3947 }
3948 right_nritems -= push_items;
3949 btrfs_set_header_nritems(right, right_nritems);
3950 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3951 for (i = 0; i < right_nritems; i++) {
3952 item = btrfs_item_nr(i);
3953
3954 push_space = push_space - btrfs_token_item_size(right,
3955 item, &token);
3956 btrfs_set_token_item_offset(right, item, push_space, &token);
3957 }
3958
3959 btrfs_mark_buffer_dirty(left);
3960 if (right_nritems)
3961 btrfs_mark_buffer_dirty(right);
3962 else
3963 clean_tree_block(fs_info, right);
3964
3965 btrfs_item_key(right, &disk_key, 0);
3966 fixup_low_keys(fs_info, path, &disk_key, 1);
3967
3968 /* then fixup the leaf pointer in the path */
3969 if (path->slots[0] < push_items) {
3970 path->slots[0] += old_left_nritems;
3971 btrfs_tree_unlock(path->nodes[0]);
3972 free_extent_buffer(path->nodes[0]);
3973 path->nodes[0] = left;
3974 path->slots[1] -= 1;
3975 } else {
3976 btrfs_tree_unlock(left);
3977 free_extent_buffer(left);
3978 path->slots[0] -= push_items;
3979 }
3980 BUG_ON(path->slots[0] < 0);
3981 return ret;
3982 out:
3983 btrfs_tree_unlock(left);
3984 free_extent_buffer(left);
3985 return ret;
3986 }
3987
3988 /*
3989 * push some data in the path leaf to the left, trying to free up at
3990 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3991 *
3992 * max_slot can put a limit on how far into the leaf we'll push items. The
3993 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3994 * items
3995 */
3996 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3997 *root, struct btrfs_path *path, int min_data_size,
3998 int data_size, int empty, u32 max_slot)
3999 {
4000 struct btrfs_fs_info *fs_info = root->fs_info;
4001 struct extent_buffer *right = path->nodes[0];
4002 struct extent_buffer *left;
4003 int slot;
4004 int free_space;
4005 u32 right_nritems;
4006 int ret = 0;
4007
4008 slot = path->slots[1];
4009 if (slot == 0)
4010 return 1;
4011 if (!path->nodes[1])
4012 return 1;
4013
4014 right_nritems = btrfs_header_nritems(right);
4015 if (right_nritems == 0)
4016 return 1;
4017
4018 btrfs_assert_tree_locked(path->nodes[1]);
4019
4020 left = read_node_slot(fs_info, path->nodes[1], slot - 1);
4021 /*
4022 * slot - 1 is not valid or we fail to read the left node,
4023 * no big deal, just return.
4024 */
4025 if (IS_ERR(left))
4026 return 1;
4027
4028 btrfs_tree_lock(left);
4029 btrfs_set_lock_blocking(left);
4030
4031 free_space = btrfs_leaf_free_space(fs_info, left);
4032 if (free_space < data_size) {
4033 ret = 1;
4034 goto out;
4035 }
4036
4037 /* cow and double check */
4038 ret = btrfs_cow_block(trans, root, left,
4039 path->nodes[1], slot - 1, &left);
4040 if (ret) {
4041 /* we hit -ENOSPC, but it isn't fatal here */
4042 if (ret == -ENOSPC)
4043 ret = 1;
4044 goto out;
4045 }
4046
4047 free_space = btrfs_leaf_free_space(fs_info, left);
4048 if (free_space < data_size) {
4049 ret = 1;
4050 goto out;
4051 }
4052
4053 return __push_leaf_left(fs_info, path, min_data_size,
4054 empty, left, free_space, right_nritems,
4055 max_slot);
4056 out:
4057 btrfs_tree_unlock(left);
4058 free_extent_buffer(left);
4059 return ret;
4060 }
4061
4062 /*
4063 * split the path's leaf in two, making sure there is at least data_size
4064 * available for the resulting leaf level of the path.
4065 */
4066 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4067 struct btrfs_fs_info *fs_info,
4068 struct btrfs_path *path,
4069 struct extent_buffer *l,
4070 struct extent_buffer *right,
4071 int slot, int mid, int nritems)
4072 {
4073 int data_copy_size;
4074 int rt_data_off;
4075 int i;
4076 struct btrfs_disk_key disk_key;
4077 struct btrfs_map_token token;
4078
4079 btrfs_init_map_token(&token);
4080
4081 nritems = nritems - mid;
4082 btrfs_set_header_nritems(right, nritems);
4083 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4084
4085 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4086 btrfs_item_nr_offset(mid),
4087 nritems * sizeof(struct btrfs_item));
4088
4089 copy_extent_buffer(right, l,
4090 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4091 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4092 leaf_data_end(fs_info, l), data_copy_size);
4093
4094 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4095
4096 for (i = 0; i < nritems; i++) {
4097 struct btrfs_item *item = btrfs_item_nr(i);
4098 u32 ioff;
4099
4100 ioff = btrfs_token_item_offset(right, item, &token);
4101 btrfs_set_token_item_offset(right, item,
4102 ioff + rt_data_off, &token);
4103 }
4104
4105 btrfs_set_header_nritems(l, mid);
4106 btrfs_item_key(right, &disk_key, 0);
4107 insert_ptr(trans, fs_info, path, &disk_key, right->start,
4108 path->slots[1] + 1, 1);
4109
4110 btrfs_mark_buffer_dirty(right);
4111 btrfs_mark_buffer_dirty(l);
4112 BUG_ON(path->slots[0] != slot);
4113
4114 if (mid <= slot) {
4115 btrfs_tree_unlock(path->nodes[0]);
4116 free_extent_buffer(path->nodes[0]);
4117 path->nodes[0] = right;
4118 path->slots[0] -= mid;
4119 path->slots[1] += 1;
4120 } else {
4121 btrfs_tree_unlock(right);
4122 free_extent_buffer(right);
4123 }
4124
4125 BUG_ON(path->slots[0] < 0);
4126 }
4127
4128 /*
4129 * double splits happen when we need to insert a big item in the middle
4130 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4131 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4132 * A B C
4133 *
4134 * We avoid this by trying to push the items on either side of our target
4135 * into the adjacent leaves. If all goes well we can avoid the double split
4136 * completely.
4137 */
4138 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4139 struct btrfs_root *root,
4140 struct btrfs_path *path,
4141 int data_size)
4142 {
4143 struct btrfs_fs_info *fs_info = root->fs_info;
4144 int ret;
4145 int progress = 0;
4146 int slot;
4147 u32 nritems;
4148 int space_needed = data_size;
4149
4150 slot = path->slots[0];
4151 if (slot < btrfs_header_nritems(path->nodes[0]))
4152 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4153
4154 /*
4155 * try to push all the items after our slot into the
4156 * right leaf
4157 */
4158 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4159 if (ret < 0)
4160 return ret;
4161
4162 if (ret == 0)
4163 progress++;
4164
4165 nritems = btrfs_header_nritems(path->nodes[0]);
4166 /*
4167 * our goal is to get our slot at the start or end of a leaf. If
4168 * we've done so we're done
4169 */
4170 if (path->slots[0] == 0 || path->slots[0] == nritems)
4171 return 0;
4172
4173 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4174 return 0;
4175
4176 /* try to push all the items before our slot into the next leaf */
4177 slot = path->slots[0];
4178 space_needed = data_size;
4179 if (slot > 0)
4180 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4181 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4182 if (ret < 0)
4183 return ret;
4184
4185 if (ret == 0)
4186 progress++;
4187
4188 if (progress)
4189 return 0;
4190 return 1;
4191 }
4192
4193 /*
4194 * split the path's leaf in two, making sure there is at least data_size
4195 * available for the resulting leaf level of the path.
4196 *
4197 * returns 0 if all went well and < 0 on failure.
4198 */
4199 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4200 struct btrfs_root *root,
4201 const struct btrfs_key *ins_key,
4202 struct btrfs_path *path, int data_size,
4203 int extend)
4204 {
4205 struct btrfs_disk_key disk_key;
4206 struct extent_buffer *l;
4207 u32 nritems;
4208 int mid;
4209 int slot;
4210 struct extent_buffer *right;
4211 struct btrfs_fs_info *fs_info = root->fs_info;
4212 int ret = 0;
4213 int wret;
4214 int split;
4215 int num_doubles = 0;
4216 int tried_avoid_double = 0;
4217
4218 l = path->nodes[0];
4219 slot = path->slots[0];
4220 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4221 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4222 return -EOVERFLOW;
4223
4224 /* first try to make some room by pushing left and right */
4225 if (data_size && path->nodes[1]) {
4226 int space_needed = data_size;
4227
4228 if (slot < btrfs_header_nritems(l))
4229 space_needed -= btrfs_leaf_free_space(fs_info, l);
4230
4231 wret = push_leaf_right(trans, root, path, space_needed,
4232 space_needed, 0, 0);
4233 if (wret < 0)
4234 return wret;
4235 if (wret) {
4236 space_needed = data_size;
4237 if (slot > 0)
4238 space_needed -= btrfs_leaf_free_space(fs_info,
4239 l);
4240 wret = push_leaf_left(trans, root, path, space_needed,
4241 space_needed, 0, (u32)-1);
4242 if (wret < 0)
4243 return wret;
4244 }
4245 l = path->nodes[0];
4246
4247 /* did the pushes work? */
4248 if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4249 return 0;
4250 }
4251
4252 if (!path->nodes[1]) {
4253 ret = insert_new_root(trans, root, path, 1);
4254 if (ret)
4255 return ret;
4256 }
4257 again:
4258 split = 1;
4259 l = path->nodes[0];
4260 slot = path->slots[0];
4261 nritems = btrfs_header_nritems(l);
4262 mid = (nritems + 1) / 2;
4263
4264 if (mid <= slot) {
4265 if (nritems == 1 ||
4266 leaf_space_used(l, mid, nritems - mid) + data_size >
4267 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4268 if (slot >= nritems) {
4269 split = 0;
4270 } else {
4271 mid = slot;
4272 if (mid != nritems &&
4273 leaf_space_used(l, mid, nritems - mid) +
4274 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4275 if (data_size && !tried_avoid_double)
4276 goto push_for_double;
4277 split = 2;
4278 }
4279 }
4280 }
4281 } else {
4282 if (leaf_space_used(l, 0, mid) + data_size >
4283 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4284 if (!extend && data_size && slot == 0) {
4285 split = 0;
4286 } else if ((extend || !data_size) && slot == 0) {
4287 mid = 1;
4288 } else {
4289 mid = slot;
4290 if (mid != nritems &&
4291 leaf_space_used(l, mid, nritems - mid) +
4292 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4293 if (data_size && !tried_avoid_double)
4294 goto push_for_double;
4295 split = 2;
4296 }
4297 }
4298 }
4299 }
4300
4301 if (split == 0)
4302 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4303 else
4304 btrfs_item_key(l, &disk_key, mid);
4305
4306 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4307 &disk_key, 0, l->start, 0);
4308 if (IS_ERR(right))
4309 return PTR_ERR(right);
4310
4311 root_add_used(root, fs_info->nodesize);
4312
4313 memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4314 btrfs_set_header_bytenr(right, right->start);
4315 btrfs_set_header_generation(right, trans->transid);
4316 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4317 btrfs_set_header_owner(right, root->root_key.objectid);
4318 btrfs_set_header_level(right, 0);
4319 write_extent_buffer_fsid(right, fs_info->fsid);
4320 write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4321
4322 if (split == 0) {
4323 if (mid <= slot) {
4324 btrfs_set_header_nritems(right, 0);
4325 insert_ptr(trans, fs_info, path, &disk_key,
4326 right->start, path->slots[1] + 1, 1);
4327 btrfs_tree_unlock(path->nodes[0]);
4328 free_extent_buffer(path->nodes[0]);
4329 path->nodes[0] = right;
4330 path->slots[0] = 0;
4331 path->slots[1] += 1;
4332 } else {
4333 btrfs_set_header_nritems(right, 0);
4334 insert_ptr(trans, fs_info, path, &disk_key,
4335 right->start, path->slots[1], 1);
4336 btrfs_tree_unlock(path->nodes[0]);
4337 free_extent_buffer(path->nodes[0]);
4338 path->nodes[0] = right;
4339 path->slots[0] = 0;
4340 if (path->slots[1] == 0)
4341 fixup_low_keys(fs_info, path, &disk_key, 1);
4342 }
4343 /*
4344 * We create a new leaf 'right' for the required ins_len and
4345 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4346 * the content of ins_len to 'right'.
4347 */
4348 return ret;
4349 }
4350
4351 copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4352
4353 if (split == 2) {
4354 BUG_ON(num_doubles != 0);
4355 num_doubles++;
4356 goto again;
4357 }
4358
4359 return 0;
4360
4361 push_for_double:
4362 push_for_double_split(trans, root, path, data_size);
4363 tried_avoid_double = 1;
4364 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4365 return 0;
4366 goto again;
4367 }
4368
4369 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4370 struct btrfs_root *root,
4371 struct btrfs_path *path, int ins_len)
4372 {
4373 struct btrfs_fs_info *fs_info = root->fs_info;
4374 struct btrfs_key key;
4375 struct extent_buffer *leaf;
4376 struct btrfs_file_extent_item *fi;
4377 u64 extent_len = 0;
4378 u32 item_size;
4379 int ret;
4380
4381 leaf = path->nodes[0];
4382 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4383
4384 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4385 key.type != BTRFS_EXTENT_CSUM_KEY);
4386
4387 if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4388 return 0;
4389
4390 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4391 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4392 fi = btrfs_item_ptr(leaf, path->slots[0],
4393 struct btrfs_file_extent_item);
4394 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4395 }
4396 btrfs_release_path(path);
4397
4398 path->keep_locks = 1;
4399 path->search_for_split = 1;
4400 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4401 path->search_for_split = 0;
4402 if (ret > 0)
4403 ret = -EAGAIN;
4404 if (ret < 0)
4405 goto err;
4406
4407 ret = -EAGAIN;
4408 leaf = path->nodes[0];
4409 /* if our item isn't there, return now */
4410 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4411 goto err;
4412
4413 /* the leaf has changed, it now has room. return now */
4414 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4415 goto err;
4416
4417 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4418 fi = btrfs_item_ptr(leaf, path->slots[0],
4419 struct btrfs_file_extent_item);
4420 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4421 goto err;
4422 }
4423
4424 btrfs_set_path_blocking(path);
4425 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4426 if (ret)
4427 goto err;
4428
4429 path->keep_locks = 0;
4430 btrfs_unlock_up_safe(path, 1);
4431 return 0;
4432 err:
4433 path->keep_locks = 0;
4434 return ret;
4435 }
4436
4437 static noinline int split_item(struct btrfs_fs_info *fs_info,
4438 struct btrfs_path *path,
4439 const struct btrfs_key *new_key,
4440 unsigned long split_offset)
4441 {
4442 struct extent_buffer *leaf;
4443 struct btrfs_item *item;
4444 struct btrfs_item *new_item;
4445 int slot;
4446 char *buf;
4447 u32 nritems;
4448 u32 item_size;
4449 u32 orig_offset;
4450 struct btrfs_disk_key disk_key;
4451
4452 leaf = path->nodes[0];
4453 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4454
4455 btrfs_set_path_blocking(path);
4456
4457 item = btrfs_item_nr(path->slots[0]);
4458 orig_offset = btrfs_item_offset(leaf, item);
4459 item_size = btrfs_item_size(leaf, item);
4460
4461 buf = kmalloc(item_size, GFP_NOFS);
4462 if (!buf)
4463 return -ENOMEM;
4464
4465 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4466 path->slots[0]), item_size);
4467
4468 slot = path->slots[0] + 1;
4469 nritems = btrfs_header_nritems(leaf);
4470 if (slot != nritems) {
4471 /* shift the items */
4472 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4473 btrfs_item_nr_offset(slot),
4474 (nritems - slot) * sizeof(struct btrfs_item));
4475 }
4476
4477 btrfs_cpu_key_to_disk(&disk_key, new_key);
4478 btrfs_set_item_key(leaf, &disk_key, slot);
4479
4480 new_item = btrfs_item_nr(slot);
4481
4482 btrfs_set_item_offset(leaf, new_item, orig_offset);
4483 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4484
4485 btrfs_set_item_offset(leaf, item,
4486 orig_offset + item_size - split_offset);
4487 btrfs_set_item_size(leaf, item, split_offset);
4488
4489 btrfs_set_header_nritems(leaf, nritems + 1);
4490
4491 /* write the data for the start of the original item */
4492 write_extent_buffer(leaf, buf,
4493 btrfs_item_ptr_offset(leaf, path->slots[0]),
4494 split_offset);
4495
4496 /* write the data for the new item */
4497 write_extent_buffer(leaf, buf + split_offset,
4498 btrfs_item_ptr_offset(leaf, slot),
4499 item_size - split_offset);
4500 btrfs_mark_buffer_dirty(leaf);
4501
4502 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4503 kfree(buf);
4504 return 0;
4505 }
4506
4507 /*
4508 * This function splits a single item into two items,
4509 * giving 'new_key' to the new item and splitting the
4510 * old one at split_offset (from the start of the item).
4511 *
4512 * The path may be released by this operation. After
4513 * the split, the path is pointing to the old item. The
4514 * new item is going to be in the same node as the old one.
4515 *
4516 * Note, the item being split must be smaller enough to live alone on
4517 * a tree block with room for one extra struct btrfs_item
4518 *
4519 * This allows us to split the item in place, keeping a lock on the
4520 * leaf the entire time.
4521 */
4522 int btrfs_split_item(struct btrfs_trans_handle *trans,
4523 struct btrfs_root *root,
4524 struct btrfs_path *path,
4525 const struct btrfs_key *new_key,
4526 unsigned long split_offset)
4527 {
4528 int ret;
4529 ret = setup_leaf_for_split(trans, root, path,
4530 sizeof(struct btrfs_item));
4531 if (ret)
4532 return ret;
4533
4534 ret = split_item(root->fs_info, path, new_key, split_offset);
4535 return ret;
4536 }
4537
4538 /*
4539 * This function duplicate a item, giving 'new_key' to the new item.
4540 * It guarantees both items live in the same tree leaf and the new item
4541 * is contiguous with the original item.
4542 *
4543 * This allows us to split file extent in place, keeping a lock on the
4544 * leaf the entire time.
4545 */
4546 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4547 struct btrfs_root *root,
4548 struct btrfs_path *path,
4549 const struct btrfs_key *new_key)
4550 {
4551 struct extent_buffer *leaf;
4552 int ret;
4553 u32 item_size;
4554
4555 leaf = path->nodes[0];
4556 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4557 ret = setup_leaf_for_split(trans, root, path,
4558 item_size + sizeof(struct btrfs_item));
4559 if (ret)
4560 return ret;
4561
4562 path->slots[0]++;
4563 setup_items_for_insert(root, path, new_key, &item_size,
4564 item_size, item_size +
4565 sizeof(struct btrfs_item), 1);
4566 leaf = path->nodes[0];
4567 memcpy_extent_buffer(leaf,
4568 btrfs_item_ptr_offset(leaf, path->slots[0]),
4569 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4570 item_size);
4571 return 0;
4572 }
4573
4574 /*
4575 * make the item pointed to by the path smaller. new_size indicates
4576 * how small to make it, and from_end tells us if we just chop bytes
4577 * off the end of the item or if we shift the item to chop bytes off
4578 * the front.
4579 */
4580 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4581 struct btrfs_path *path, u32 new_size, int from_end)
4582 {
4583 int slot;
4584 struct extent_buffer *leaf;
4585 struct btrfs_item *item;
4586 u32 nritems;
4587 unsigned int data_end;
4588 unsigned int old_data_start;
4589 unsigned int old_size;
4590 unsigned int size_diff;
4591 int i;
4592 struct btrfs_map_token token;
4593
4594 btrfs_init_map_token(&token);
4595
4596 leaf = path->nodes[0];
4597 slot = path->slots[0];
4598
4599 old_size = btrfs_item_size_nr(leaf, slot);
4600 if (old_size == new_size)
4601 return;
4602
4603 nritems = btrfs_header_nritems(leaf);
4604 data_end = leaf_data_end(fs_info, leaf);
4605
4606 old_data_start = btrfs_item_offset_nr(leaf, slot);
4607
4608 size_diff = old_size - new_size;
4609
4610 BUG_ON(slot < 0);
4611 BUG_ON(slot >= nritems);
4612
4613 /*
4614 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4615 */
4616 /* first correct the data pointers */
4617 for (i = slot; i < nritems; i++) {
4618 u32 ioff;
4619 item = btrfs_item_nr(i);
4620
4621 ioff = btrfs_token_item_offset(leaf, item, &token);
4622 btrfs_set_token_item_offset(leaf, item,
4623 ioff + size_diff, &token);
4624 }
4625
4626 /* shift the data */
4627 if (from_end) {
4628 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4629 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4630 data_end, old_data_start + new_size - data_end);
4631 } else {
4632 struct btrfs_disk_key disk_key;
4633 u64 offset;
4634
4635 btrfs_item_key(leaf, &disk_key, slot);
4636
4637 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4638 unsigned long ptr;
4639 struct btrfs_file_extent_item *fi;
4640
4641 fi = btrfs_item_ptr(leaf, slot,
4642 struct btrfs_file_extent_item);
4643 fi = (struct btrfs_file_extent_item *)(
4644 (unsigned long)fi - size_diff);
4645
4646 if (btrfs_file_extent_type(leaf, fi) ==
4647 BTRFS_FILE_EXTENT_INLINE) {
4648 ptr = btrfs_item_ptr_offset(leaf, slot);
4649 memmove_extent_buffer(leaf, ptr,
4650 (unsigned long)fi,
4651 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4652 }
4653 }
4654
4655 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4656 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4657 data_end, old_data_start - data_end);
4658
4659 offset = btrfs_disk_key_offset(&disk_key);
4660 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4661 btrfs_set_item_key(leaf, &disk_key, slot);
4662 if (slot == 0)
4663 fixup_low_keys(fs_info, path, &disk_key, 1);
4664 }
4665
4666 item = btrfs_item_nr(slot);
4667 btrfs_set_item_size(leaf, item, new_size);
4668 btrfs_mark_buffer_dirty(leaf);
4669
4670 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4671 btrfs_print_leaf(leaf);
4672 BUG();
4673 }
4674 }
4675
4676 /*
4677 * make the item pointed to by the path bigger, data_size is the added size.
4678 */
4679 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4680 u32 data_size)
4681 {
4682 int slot;
4683 struct extent_buffer *leaf;
4684 struct btrfs_item *item;
4685 u32 nritems;
4686 unsigned int data_end;
4687 unsigned int old_data;
4688 unsigned int old_size;
4689 int i;
4690 struct btrfs_map_token token;
4691
4692 btrfs_init_map_token(&token);
4693
4694 leaf = path->nodes[0];
4695
4696 nritems = btrfs_header_nritems(leaf);
4697 data_end = leaf_data_end(fs_info, leaf);
4698
4699 if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4700 btrfs_print_leaf(leaf);
4701 BUG();
4702 }
4703 slot = path->slots[0];
4704 old_data = btrfs_item_end_nr(leaf, slot);
4705
4706 BUG_ON(slot < 0);
4707 if (slot >= nritems) {
4708 btrfs_print_leaf(leaf);
4709 btrfs_crit(fs_info, "slot %d too large, nritems %d",
4710 slot, nritems);
4711 BUG_ON(1);
4712 }
4713
4714 /*
4715 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4716 */
4717 /* first correct the data pointers */
4718 for (i = slot; i < nritems; i++) {
4719 u32 ioff;
4720 item = btrfs_item_nr(i);
4721
4722 ioff = btrfs_token_item_offset(leaf, item, &token);
4723 btrfs_set_token_item_offset(leaf, item,
4724 ioff - data_size, &token);
4725 }
4726
4727 /* shift the data */
4728 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4729 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4730 data_end, old_data - data_end);
4731
4732 data_end = old_data;
4733 old_size = btrfs_item_size_nr(leaf, slot);
4734 item = btrfs_item_nr(slot);
4735 btrfs_set_item_size(leaf, item, old_size + data_size);
4736 btrfs_mark_buffer_dirty(leaf);
4737
4738 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4739 btrfs_print_leaf(leaf);
4740 BUG();
4741 }
4742 }
4743
4744 /*
4745 * this is a helper for btrfs_insert_empty_items, the main goal here is
4746 * to save stack depth by doing the bulk of the work in a function
4747 * that doesn't call btrfs_search_slot
4748 */
4749 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4750 const struct btrfs_key *cpu_key, u32 *data_size,
4751 u32 total_data, u32 total_size, int nr)
4752 {
4753 struct btrfs_fs_info *fs_info = root->fs_info;
4754 struct btrfs_item *item;
4755 int i;
4756 u32 nritems;
4757 unsigned int data_end;
4758 struct btrfs_disk_key disk_key;
4759 struct extent_buffer *leaf;
4760 int slot;
4761 struct btrfs_map_token token;
4762
4763 if (path->slots[0] == 0) {
4764 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4765 fixup_low_keys(fs_info, path, &disk_key, 1);
4766 }
4767 btrfs_unlock_up_safe(path, 1);
4768
4769 btrfs_init_map_token(&token);
4770
4771 leaf = path->nodes[0];
4772 slot = path->slots[0];
4773
4774 nritems = btrfs_header_nritems(leaf);
4775 data_end = leaf_data_end(fs_info, leaf);
4776
4777 if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4778 btrfs_print_leaf(leaf);
4779 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4780 total_size, btrfs_leaf_free_space(fs_info, leaf));
4781 BUG();
4782 }
4783
4784 if (slot != nritems) {
4785 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4786
4787 if (old_data < data_end) {
4788 btrfs_print_leaf(leaf);
4789 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4790 slot, old_data, data_end);
4791 BUG_ON(1);
4792 }
4793 /*
4794 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4795 */
4796 /* first correct the data pointers */
4797 for (i = slot; i < nritems; i++) {
4798 u32 ioff;
4799
4800 item = btrfs_item_nr(i);
4801 ioff = btrfs_token_item_offset(leaf, item, &token);
4802 btrfs_set_token_item_offset(leaf, item,
4803 ioff - total_data, &token);
4804 }
4805 /* shift the items */
4806 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4807 btrfs_item_nr_offset(slot),
4808 (nritems - slot) * sizeof(struct btrfs_item));
4809
4810 /* shift the data */
4811 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4812 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4813 data_end, old_data - data_end);
4814 data_end = old_data;
4815 }
4816
4817 /* setup the item for the new data */
4818 for (i = 0; i < nr; i++) {
4819 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4820 btrfs_set_item_key(leaf, &disk_key, slot + i);
4821 item = btrfs_item_nr(slot + i);
4822 btrfs_set_token_item_offset(leaf, item,
4823 data_end - data_size[i], &token);
4824 data_end -= data_size[i];
4825 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4826 }
4827
4828 btrfs_set_header_nritems(leaf, nritems + nr);
4829 btrfs_mark_buffer_dirty(leaf);
4830
4831 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4832 btrfs_print_leaf(leaf);
4833 BUG();
4834 }
4835 }
4836
4837 /*
4838 * Given a key and some data, insert items into the tree.
4839 * This does all the path init required, making room in the tree if needed.
4840 */
4841 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4842 struct btrfs_root *root,
4843 struct btrfs_path *path,
4844 const struct btrfs_key *cpu_key, u32 *data_size,
4845 int nr)
4846 {
4847 int ret = 0;
4848 int slot;
4849 int i;
4850 u32 total_size = 0;
4851 u32 total_data = 0;
4852
4853 for (i = 0; i < nr; i++)
4854 total_data += data_size[i];
4855
4856 total_size = total_data + (nr * sizeof(struct btrfs_item));
4857 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4858 if (ret == 0)
4859 return -EEXIST;
4860 if (ret < 0)
4861 return ret;
4862
4863 slot = path->slots[0];
4864 BUG_ON(slot < 0);
4865
4866 setup_items_for_insert(root, path, cpu_key, data_size,
4867 total_data, total_size, nr);
4868 return 0;
4869 }
4870
4871 /*
4872 * Given a key and some data, insert an item into the tree.
4873 * This does all the path init required, making room in the tree if needed.
4874 */
4875 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4876 const struct btrfs_key *cpu_key, void *data,
4877 u32 data_size)
4878 {
4879 int ret = 0;
4880 struct btrfs_path *path;
4881 struct extent_buffer *leaf;
4882 unsigned long ptr;
4883
4884 path = btrfs_alloc_path();
4885 if (!path)
4886 return -ENOMEM;
4887 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4888 if (!ret) {
4889 leaf = path->nodes[0];
4890 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4891 write_extent_buffer(leaf, data, ptr, data_size);
4892 btrfs_mark_buffer_dirty(leaf);
4893 }
4894 btrfs_free_path(path);
4895 return ret;
4896 }
4897
4898 /*
4899 * delete the pointer from a given node.
4900 *
4901 * the tree should have been previously balanced so the deletion does not
4902 * empty a node.
4903 */
4904 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4905 int level, int slot)
4906 {
4907 struct btrfs_fs_info *fs_info = root->fs_info;
4908 struct extent_buffer *parent = path->nodes[level];
4909 u32 nritems;
4910 int ret;
4911
4912 nritems = btrfs_header_nritems(parent);
4913 if (slot != nritems - 1) {
4914 if (level)
4915 tree_mod_log_eb_move(fs_info, parent, slot,
4916 slot + 1, nritems - slot - 1);
4917 memmove_extent_buffer(parent,
4918 btrfs_node_key_ptr_offset(slot),
4919 btrfs_node_key_ptr_offset(slot + 1),
4920 sizeof(struct btrfs_key_ptr) *
4921 (nritems - slot - 1));
4922 } else if (level) {
4923 ret = tree_mod_log_insert_key(fs_info, parent, slot,
4924 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4925 BUG_ON(ret < 0);
4926 }
4927
4928 nritems--;
4929 btrfs_set_header_nritems(parent, nritems);
4930 if (nritems == 0 && parent == root->node) {
4931 BUG_ON(btrfs_header_level(root->node) != 1);
4932 /* just turn the root into a leaf and break */
4933 btrfs_set_header_level(root->node, 0);
4934 } else if (slot == 0) {
4935 struct btrfs_disk_key disk_key;
4936
4937 btrfs_node_key(parent, &disk_key, 0);
4938 fixup_low_keys(fs_info, path, &disk_key, level + 1);
4939 }
4940 btrfs_mark_buffer_dirty(parent);
4941 }
4942
4943 /*
4944 * a helper function to delete the leaf pointed to by path->slots[1] and
4945 * path->nodes[1].
4946 *
4947 * This deletes the pointer in path->nodes[1] and frees the leaf
4948 * block extent. zero is returned if it all worked out, < 0 otherwise.
4949 *
4950 * The path must have already been setup for deleting the leaf, including
4951 * all the proper balancing. path->nodes[1] must be locked.
4952 */
4953 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4954 struct btrfs_root *root,
4955 struct btrfs_path *path,
4956 struct extent_buffer *leaf)
4957 {
4958 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4959 del_ptr(root, path, 1, path->slots[1]);
4960
4961 /*
4962 * btrfs_free_extent is expensive, we want to make sure we
4963 * aren't holding any locks when we call it
4964 */
4965 btrfs_unlock_up_safe(path, 0);
4966
4967 root_sub_used(root, leaf->len);
4968
4969 extent_buffer_get(leaf);
4970 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4971 free_extent_buffer_stale(leaf);
4972 }
4973 /*
4974 * delete the item at the leaf level in path. If that empties
4975 * the leaf, remove it from the tree
4976 */
4977 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4978 struct btrfs_path *path, int slot, int nr)
4979 {
4980 struct btrfs_fs_info *fs_info = root->fs_info;
4981 struct extent_buffer *leaf;
4982 struct btrfs_item *item;
4983 u32 last_off;
4984 u32 dsize = 0;
4985 int ret = 0;
4986 int wret;
4987 int i;
4988 u32 nritems;
4989 struct btrfs_map_token token;
4990
4991 btrfs_init_map_token(&token);
4992
4993 leaf = path->nodes[0];
4994 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4995
4996 for (i = 0; i < nr; i++)
4997 dsize += btrfs_item_size_nr(leaf, slot + i);
4998
4999 nritems = btrfs_header_nritems(leaf);
5000
5001 if (slot + nr != nritems) {
5002 int data_end = leaf_data_end(fs_info, leaf);
5003
5004 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
5005 data_end + dsize,
5006 BTRFS_LEAF_DATA_OFFSET + data_end,
5007 last_off - data_end);
5008
5009 for (i = slot + nr; i < nritems; i++) {
5010 u32 ioff;
5011
5012 item = btrfs_item_nr(i);
5013 ioff = btrfs_token_item_offset(leaf, item, &token);
5014 btrfs_set_token_item_offset(leaf, item,
5015 ioff + dsize, &token);
5016 }
5017
5018 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5019 btrfs_item_nr_offset(slot + nr),
5020 sizeof(struct btrfs_item) *
5021 (nritems - slot - nr));
5022 }
5023 btrfs_set_header_nritems(leaf, nritems - nr);
5024 nritems -= nr;
5025
5026 /* delete the leaf if we've emptied it */
5027 if (nritems == 0) {
5028 if (leaf == root->node) {
5029 btrfs_set_header_level(leaf, 0);
5030 } else {
5031 btrfs_set_path_blocking(path);
5032 clean_tree_block(fs_info, leaf);
5033 btrfs_del_leaf(trans, root, path, leaf);
5034 }
5035 } else {
5036 int used = leaf_space_used(leaf, 0, nritems);
5037 if (slot == 0) {
5038 struct btrfs_disk_key disk_key;
5039
5040 btrfs_item_key(leaf, &disk_key, 0);
5041 fixup_low_keys(fs_info, path, &disk_key, 1);
5042 }
5043
5044 /* delete the leaf if it is mostly empty */
5045 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5046 /* push_leaf_left fixes the path.
5047 * make sure the path still points to our leaf
5048 * for possible call to del_ptr below
5049 */
5050 slot = path->slots[1];
5051 extent_buffer_get(leaf);
5052
5053 btrfs_set_path_blocking(path);
5054 wret = push_leaf_left(trans, root, path, 1, 1,
5055 1, (u32)-1);
5056 if (wret < 0 && wret != -ENOSPC)
5057 ret = wret;
5058
5059 if (path->nodes[0] == leaf &&
5060 btrfs_header_nritems(leaf)) {
5061 wret = push_leaf_right(trans, root, path, 1,
5062 1, 1, 0);
5063 if (wret < 0 && wret != -ENOSPC)
5064 ret = wret;
5065 }
5066
5067 if (btrfs_header_nritems(leaf) == 0) {
5068 path->slots[1] = slot;
5069 btrfs_del_leaf(trans, root, path, leaf);
5070 free_extent_buffer(leaf);
5071 ret = 0;
5072 } else {
5073 /* if we're still in the path, make sure
5074 * we're dirty. Otherwise, one of the
5075 * push_leaf functions must have already
5076 * dirtied this buffer
5077 */
5078 if (path->nodes[0] == leaf)
5079 btrfs_mark_buffer_dirty(leaf);
5080 free_extent_buffer(leaf);
5081 }
5082 } else {
5083 btrfs_mark_buffer_dirty(leaf);
5084 }
5085 }
5086 return ret;
5087 }
5088
5089 /*
5090 * search the tree again to find a leaf with lesser keys
5091 * returns 0 if it found something or 1 if there are no lesser leaves.
5092 * returns < 0 on io errors.
5093 *
5094 * This may release the path, and so you may lose any locks held at the
5095 * time you call it.
5096 */
5097 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5098 {
5099 struct btrfs_key key;
5100 struct btrfs_disk_key found_key;
5101 int ret;
5102
5103 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5104
5105 if (key.offset > 0) {
5106 key.offset--;
5107 } else if (key.type > 0) {
5108 key.type--;
5109 key.offset = (u64)-1;
5110 } else if (key.objectid > 0) {
5111 key.objectid--;
5112 key.type = (u8)-1;
5113 key.offset = (u64)-1;
5114 } else {
5115 return 1;
5116 }
5117
5118 btrfs_release_path(path);
5119 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5120 if (ret < 0)
5121 return ret;
5122 btrfs_item_key(path->nodes[0], &found_key, 0);
5123 ret = comp_keys(&found_key, &key);
5124 /*
5125 * We might have had an item with the previous key in the tree right
5126 * before we released our path. And after we released our path, that
5127 * item might have been pushed to the first slot (0) of the leaf we
5128 * were holding due to a tree balance. Alternatively, an item with the
5129 * previous key can exist as the only element of a leaf (big fat item).
5130 * Therefore account for these 2 cases, so that our callers (like
5131 * btrfs_previous_item) don't miss an existing item with a key matching
5132 * the previous key we computed above.
5133 */
5134 if (ret <= 0)
5135 return 0;
5136 return 1;
5137 }
5138
5139 /*
5140 * A helper function to walk down the tree starting at min_key, and looking
5141 * for nodes or leaves that are have a minimum transaction id.
5142 * This is used by the btree defrag code, and tree logging
5143 *
5144 * This does not cow, but it does stuff the starting key it finds back
5145 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5146 * key and get a writable path.
5147 *
5148 * This does lock as it descends, and path->keep_locks should be set
5149 * to 1 by the caller.
5150 *
5151 * This honors path->lowest_level to prevent descent past a given level
5152 * of the tree.
5153 *
5154 * min_trans indicates the oldest transaction that you are interested
5155 * in walking through. Any nodes or leaves older than min_trans are
5156 * skipped over (without reading them).
5157 *
5158 * returns zero if something useful was found, < 0 on error and 1 if there
5159 * was nothing in the tree that matched the search criteria.
5160 */
5161 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5162 struct btrfs_path *path,
5163 u64 min_trans)
5164 {
5165 struct btrfs_fs_info *fs_info = root->fs_info;
5166 struct extent_buffer *cur;
5167 struct btrfs_key found_key;
5168 int slot;
5169 int sret;
5170 u32 nritems;
5171 int level;
5172 int ret = 1;
5173 int keep_locks = path->keep_locks;
5174
5175 path->keep_locks = 1;
5176 again:
5177 cur = btrfs_read_lock_root_node(root);
5178 level = btrfs_header_level(cur);
5179 WARN_ON(path->nodes[level]);
5180 path->nodes[level] = cur;
5181 path->locks[level] = BTRFS_READ_LOCK;
5182
5183 if (btrfs_header_generation(cur) < min_trans) {
5184 ret = 1;
5185 goto out;
5186 }
5187 while (1) {
5188 nritems = btrfs_header_nritems(cur);
5189 level = btrfs_header_level(cur);
5190 sret = btrfs_bin_search(cur, min_key, level, &slot);
5191
5192 /* at the lowest level, we're done, setup the path and exit */
5193 if (level == path->lowest_level) {
5194 if (slot >= nritems)
5195 goto find_next_key;
5196 ret = 0;
5197 path->slots[level] = slot;
5198 btrfs_item_key_to_cpu(cur, &found_key, slot);
5199 goto out;
5200 }
5201 if (sret && slot > 0)
5202 slot--;
5203 /*
5204 * check this node pointer against the min_trans parameters.
5205 * If it is too old, old, skip to the next one.
5206 */
5207 while (slot < nritems) {
5208 u64 gen;
5209
5210 gen = btrfs_node_ptr_generation(cur, slot);
5211 if (gen < min_trans) {
5212 slot++;
5213 continue;
5214 }
5215 break;
5216 }
5217 find_next_key:
5218 /*
5219 * we didn't find a candidate key in this node, walk forward
5220 * and find another one
5221 */
5222 if (slot >= nritems) {
5223 path->slots[level] = slot;
5224 btrfs_set_path_blocking(path);
5225 sret = btrfs_find_next_key(root, path, min_key, level,
5226 min_trans);
5227 if (sret == 0) {
5228 btrfs_release_path(path);
5229 goto again;
5230 } else {
5231 goto out;
5232 }
5233 }
5234 /* save our key for returning back */
5235 btrfs_node_key_to_cpu(cur, &found_key, slot);
5236 path->slots[level] = slot;
5237 if (level == path->lowest_level) {
5238 ret = 0;
5239 goto out;
5240 }
5241 btrfs_set_path_blocking(path);
5242 cur = read_node_slot(fs_info, cur, slot);
5243 if (IS_ERR(cur)) {
5244 ret = PTR_ERR(cur);
5245 goto out;
5246 }
5247
5248 btrfs_tree_read_lock(cur);
5249
5250 path->locks[level - 1] = BTRFS_READ_LOCK;
5251 path->nodes[level - 1] = cur;
5252 unlock_up(path, level, 1, 0, NULL);
5253 btrfs_clear_path_blocking(path, NULL, 0);
5254 }
5255 out:
5256 path->keep_locks = keep_locks;
5257 if (ret == 0) {
5258 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5259 btrfs_set_path_blocking(path);
5260 memcpy(min_key, &found_key, sizeof(found_key));
5261 }
5262 return ret;
5263 }
5264
5265 static int tree_move_down(struct btrfs_fs_info *fs_info,
5266 struct btrfs_path *path,
5267 int *level)
5268 {
5269 struct extent_buffer *eb;
5270
5271 BUG_ON(*level == 0);
5272 eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5273 if (IS_ERR(eb))
5274 return PTR_ERR(eb);
5275
5276 path->nodes[*level - 1] = eb;
5277 path->slots[*level - 1] = 0;
5278 (*level)--;
5279 return 0;
5280 }
5281
5282 static int tree_move_next_or_upnext(struct btrfs_path *path,
5283 int *level, int root_level)
5284 {
5285 int ret = 0;
5286 int nritems;
5287 nritems = btrfs_header_nritems(path->nodes[*level]);
5288
5289 path->slots[*level]++;
5290
5291 while (path->slots[*level] >= nritems) {
5292 if (*level == root_level)
5293 return -1;
5294
5295 /* move upnext */
5296 path->slots[*level] = 0;
5297 free_extent_buffer(path->nodes[*level]);
5298 path->nodes[*level] = NULL;
5299 (*level)++;
5300 path->slots[*level]++;
5301
5302 nritems = btrfs_header_nritems(path->nodes[*level]);
5303 ret = 1;
5304 }
5305 return ret;
5306 }
5307
5308 /*
5309 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5310 * or down.
5311 */
5312 static int tree_advance(struct btrfs_fs_info *fs_info,
5313 struct btrfs_path *path,
5314 int *level, int root_level,
5315 int allow_down,
5316 struct btrfs_key *key)
5317 {
5318 int ret;
5319
5320 if (*level == 0 || !allow_down) {
5321 ret = tree_move_next_or_upnext(path, level, root_level);
5322 } else {
5323 ret = tree_move_down(fs_info, path, level);
5324 }
5325 if (ret >= 0) {
5326 if (*level == 0)
5327 btrfs_item_key_to_cpu(path->nodes[*level], key,
5328 path->slots[*level]);
5329 else
5330 btrfs_node_key_to_cpu(path->nodes[*level], key,
5331 path->slots[*level]);
5332 }
5333 return ret;
5334 }
5335
5336 static int tree_compare_item(struct btrfs_path *left_path,
5337 struct btrfs_path *right_path,
5338 char *tmp_buf)
5339 {
5340 int cmp;
5341 int len1, len2;
5342 unsigned long off1, off2;
5343
5344 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5345 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5346 if (len1 != len2)
5347 return 1;
5348
5349 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5350 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5351 right_path->slots[0]);
5352
5353 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5354
5355 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5356 if (cmp)
5357 return 1;
5358 return 0;
5359 }
5360
5361 #define ADVANCE 1
5362 #define ADVANCE_ONLY_NEXT -1
5363
5364 /*
5365 * This function compares two trees and calls the provided callback for
5366 * every changed/new/deleted item it finds.
5367 * If shared tree blocks are encountered, whole subtrees are skipped, making
5368 * the compare pretty fast on snapshotted subvolumes.
5369 *
5370 * This currently works on commit roots only. As commit roots are read only,
5371 * we don't do any locking. The commit roots are protected with transactions.
5372 * Transactions are ended and rejoined when a commit is tried in between.
5373 *
5374 * This function checks for modifications done to the trees while comparing.
5375 * If it detects a change, it aborts immediately.
5376 */
5377 int btrfs_compare_trees(struct btrfs_root *left_root,
5378 struct btrfs_root *right_root,
5379 btrfs_changed_cb_t changed_cb, void *ctx)
5380 {
5381 struct btrfs_fs_info *fs_info = left_root->fs_info;
5382 int ret;
5383 int cmp;
5384 struct btrfs_path *left_path = NULL;
5385 struct btrfs_path *right_path = NULL;
5386 struct btrfs_key left_key;
5387 struct btrfs_key right_key;
5388 char *tmp_buf = NULL;
5389 int left_root_level;
5390 int right_root_level;
5391 int left_level;
5392 int right_level;
5393 int left_end_reached;
5394 int right_end_reached;
5395 int advance_left;
5396 int advance_right;
5397 u64 left_blockptr;
5398 u64 right_blockptr;
5399 u64 left_gen;
5400 u64 right_gen;
5401
5402 left_path = btrfs_alloc_path();
5403 if (!left_path) {
5404 ret = -ENOMEM;
5405 goto out;
5406 }
5407 right_path = btrfs_alloc_path();
5408 if (!right_path) {
5409 ret = -ENOMEM;
5410 goto out;
5411 }
5412
5413 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5414 if (!tmp_buf) {
5415 ret = -ENOMEM;
5416 goto out;
5417 }
5418
5419 left_path->search_commit_root = 1;
5420 left_path->skip_locking = 1;
5421 right_path->search_commit_root = 1;
5422 right_path->skip_locking = 1;
5423
5424 /*
5425 * Strategy: Go to the first items of both trees. Then do
5426 *
5427 * If both trees are at level 0
5428 * Compare keys of current items
5429 * If left < right treat left item as new, advance left tree
5430 * and repeat
5431 * If left > right treat right item as deleted, advance right tree
5432 * and repeat
5433 * If left == right do deep compare of items, treat as changed if
5434 * needed, advance both trees and repeat
5435 * If both trees are at the same level but not at level 0
5436 * Compare keys of current nodes/leafs
5437 * If left < right advance left tree and repeat
5438 * If left > right advance right tree and repeat
5439 * If left == right compare blockptrs of the next nodes/leafs
5440 * If they match advance both trees but stay at the same level
5441 * and repeat
5442 * If they don't match advance both trees while allowing to go
5443 * deeper and repeat
5444 * If tree levels are different
5445 * Advance the tree that needs it and repeat
5446 *
5447 * Advancing a tree means:
5448 * If we are at level 0, try to go to the next slot. If that's not
5449 * possible, go one level up and repeat. Stop when we found a level
5450 * where we could go to the next slot. We may at this point be on a
5451 * node or a leaf.
5452 *
5453 * If we are not at level 0 and not on shared tree blocks, go one
5454 * level deeper.
5455 *
5456 * If we are not at level 0 and on shared tree blocks, go one slot to
5457 * the right if possible or go up and right.
5458 */
5459
5460 down_read(&fs_info->commit_root_sem);
5461 left_level = btrfs_header_level(left_root->commit_root);
5462 left_root_level = left_level;
5463 left_path->nodes[left_level] = left_root->commit_root;
5464 extent_buffer_get(left_path->nodes[left_level]);
5465
5466 right_level = btrfs_header_level(right_root->commit_root);
5467 right_root_level = right_level;
5468 right_path->nodes[right_level] = right_root->commit_root;
5469 extent_buffer_get(right_path->nodes[right_level]);
5470 up_read(&fs_info->commit_root_sem);
5471
5472 if (left_level == 0)
5473 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5474 &left_key, left_path->slots[left_level]);
5475 else
5476 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5477 &left_key, left_path->slots[left_level]);
5478 if (right_level == 0)
5479 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5480 &right_key, right_path->slots[right_level]);
5481 else
5482 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5483 &right_key, right_path->slots[right_level]);
5484
5485 left_end_reached = right_end_reached = 0;
5486 advance_left = advance_right = 0;
5487
5488 while (1) {
5489 if (advance_left && !left_end_reached) {
5490 ret = tree_advance(fs_info, left_path, &left_level,
5491 left_root_level,
5492 advance_left != ADVANCE_ONLY_NEXT,
5493 &left_key);
5494 if (ret == -1)
5495 left_end_reached = ADVANCE;
5496 else if (ret < 0)
5497 goto out;
5498 advance_left = 0;
5499 }
5500 if (advance_right && !right_end_reached) {
5501 ret = tree_advance(fs_info, right_path, &right_level,
5502 right_root_level,
5503 advance_right != ADVANCE_ONLY_NEXT,
5504 &right_key);
5505 if (ret == -1)
5506 right_end_reached = ADVANCE;
5507 else if (ret < 0)
5508 goto out;
5509 advance_right = 0;
5510 }
5511
5512 if (left_end_reached && right_end_reached) {
5513 ret = 0;
5514 goto out;
5515 } else if (left_end_reached) {
5516 if (right_level == 0) {
5517 ret = changed_cb(left_path, right_path,
5518 &right_key,
5519 BTRFS_COMPARE_TREE_DELETED,
5520 ctx);
5521 if (ret < 0)
5522 goto out;
5523 }
5524 advance_right = ADVANCE;
5525 continue;
5526 } else if (right_end_reached) {
5527 if (left_level == 0) {
5528 ret = changed_cb(left_path, right_path,
5529 &left_key,
5530 BTRFS_COMPARE_TREE_NEW,
5531 ctx);
5532 if (ret < 0)
5533 goto out;
5534 }
5535 advance_left = ADVANCE;
5536 continue;
5537 }
5538
5539 if (left_level == 0 && right_level == 0) {
5540 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5541 if (cmp < 0) {
5542 ret = changed_cb(left_path, right_path,
5543 &left_key,
5544 BTRFS_COMPARE_TREE_NEW,
5545 ctx);
5546 if (ret < 0)
5547 goto out;
5548 advance_left = ADVANCE;
5549 } else if (cmp > 0) {
5550 ret = changed_cb(left_path, right_path,
5551 &right_key,
5552 BTRFS_COMPARE_TREE_DELETED,
5553 ctx);
5554 if (ret < 0)
5555 goto out;
5556 advance_right = ADVANCE;
5557 } else {
5558 enum btrfs_compare_tree_result result;
5559
5560 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5561 ret = tree_compare_item(left_path, right_path,
5562 tmp_buf);
5563 if (ret)
5564 result = BTRFS_COMPARE_TREE_CHANGED;
5565 else
5566 result = BTRFS_COMPARE_TREE_SAME;
5567 ret = changed_cb(left_path, right_path,
5568 &left_key, result, ctx);
5569 if (ret < 0)
5570 goto out;
5571 advance_left = ADVANCE;
5572 advance_right = ADVANCE;
5573 }
5574 } else if (left_level == right_level) {
5575 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5576 if (cmp < 0) {
5577 advance_left = ADVANCE;
5578 } else if (cmp > 0) {
5579 advance_right = ADVANCE;
5580 } else {
5581 left_blockptr = btrfs_node_blockptr(
5582 left_path->nodes[left_level],
5583 left_path->slots[left_level]);
5584 right_blockptr = btrfs_node_blockptr(
5585 right_path->nodes[right_level],
5586 right_path->slots[right_level]);
5587 left_gen = btrfs_node_ptr_generation(
5588 left_path->nodes[left_level],
5589 left_path->slots[left_level]);
5590 right_gen = btrfs_node_ptr_generation(
5591 right_path->nodes[right_level],
5592 right_path->slots[right_level]);
5593 if (left_blockptr == right_blockptr &&
5594 left_gen == right_gen) {
5595 /*
5596 * As we're on a shared block, don't
5597 * allow to go deeper.
5598 */
5599 advance_left = ADVANCE_ONLY_NEXT;
5600 advance_right = ADVANCE_ONLY_NEXT;
5601 } else {
5602 advance_left = ADVANCE;
5603 advance_right = ADVANCE;
5604 }
5605 }
5606 } else if (left_level < right_level) {
5607 advance_right = ADVANCE;
5608 } else {
5609 advance_left = ADVANCE;
5610 }
5611 }
5612
5613 out:
5614 btrfs_free_path(left_path);
5615 btrfs_free_path(right_path);
5616 kvfree(tmp_buf);
5617 return ret;
5618 }
5619
5620 /*
5621 * this is similar to btrfs_next_leaf, but does not try to preserve
5622 * and fixup the path. It looks for and returns the next key in the
5623 * tree based on the current path and the min_trans parameters.
5624 *
5625 * 0 is returned if another key is found, < 0 if there are any errors
5626 * and 1 is returned if there are no higher keys in the tree
5627 *
5628 * path->keep_locks should be set to 1 on the search made before
5629 * calling this function.
5630 */
5631 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5632 struct btrfs_key *key, int level, u64 min_trans)
5633 {
5634 int slot;
5635 struct extent_buffer *c;
5636
5637 WARN_ON(!path->keep_locks);
5638 while (level < BTRFS_MAX_LEVEL) {
5639 if (!path->nodes[level])
5640 return 1;
5641
5642 slot = path->slots[level] + 1;
5643 c = path->nodes[level];
5644 next:
5645 if (slot >= btrfs_header_nritems(c)) {
5646 int ret;
5647 int orig_lowest;
5648 struct btrfs_key cur_key;
5649 if (level + 1 >= BTRFS_MAX_LEVEL ||
5650 !path->nodes[level + 1])
5651 return 1;
5652
5653 if (path->locks[level + 1]) {
5654 level++;
5655 continue;
5656 }
5657
5658 slot = btrfs_header_nritems(c) - 1;
5659 if (level == 0)
5660 btrfs_item_key_to_cpu(c, &cur_key, slot);
5661 else
5662 btrfs_node_key_to_cpu(c, &cur_key, slot);
5663
5664 orig_lowest = path->lowest_level;
5665 btrfs_release_path(path);
5666 path->lowest_level = level;
5667 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5668 0, 0);
5669 path->lowest_level = orig_lowest;
5670 if (ret < 0)
5671 return ret;
5672
5673 c = path->nodes[level];
5674 slot = path->slots[level];
5675 if (ret == 0)
5676 slot++;
5677 goto next;
5678 }
5679
5680 if (level == 0)
5681 btrfs_item_key_to_cpu(c, key, slot);
5682 else {
5683 u64 gen = btrfs_node_ptr_generation(c, slot);
5684
5685 if (gen < min_trans) {
5686 slot++;
5687 goto next;
5688 }
5689 btrfs_node_key_to_cpu(c, key, slot);
5690 }
5691 return 0;
5692 }
5693 return 1;
5694 }
5695
5696 /*
5697 * search the tree again to find a leaf with greater keys
5698 * returns 0 if it found something or 1 if there are no greater leaves.
5699 * returns < 0 on io errors.
5700 */
5701 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5702 {
5703 return btrfs_next_old_leaf(root, path, 0);
5704 }
5705
5706 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5707 u64 time_seq)
5708 {
5709 int slot;
5710 int level;
5711 struct extent_buffer *c;
5712 struct extent_buffer *next;
5713 struct btrfs_key key;
5714 u32 nritems;
5715 int ret;
5716 int old_spinning = path->leave_spinning;
5717 int next_rw_lock = 0;
5718
5719 nritems = btrfs_header_nritems(path->nodes[0]);
5720 if (nritems == 0)
5721 return 1;
5722
5723 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5724 again:
5725 level = 1;
5726 next = NULL;
5727 next_rw_lock = 0;
5728 btrfs_release_path(path);
5729
5730 path->keep_locks = 1;
5731 path->leave_spinning = 1;
5732
5733 if (time_seq)
5734 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5735 else
5736 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5737 path->keep_locks = 0;
5738
5739 if (ret < 0)
5740 return ret;
5741
5742 nritems = btrfs_header_nritems(path->nodes[0]);
5743 /*
5744 * by releasing the path above we dropped all our locks. A balance
5745 * could have added more items next to the key that used to be
5746 * at the very end of the block. So, check again here and
5747 * advance the path if there are now more items available.
5748 */
5749 if (nritems > 0 && path->slots[0] < nritems - 1) {
5750 if (ret == 0)
5751 path->slots[0]++;
5752 ret = 0;
5753 goto done;
5754 }
5755 /*
5756 * So the above check misses one case:
5757 * - after releasing the path above, someone has removed the item that
5758 * used to be at the very end of the block, and balance between leafs
5759 * gets another one with bigger key.offset to replace it.
5760 *
5761 * This one should be returned as well, or we can get leaf corruption
5762 * later(esp. in __btrfs_drop_extents()).
5763 *
5764 * And a bit more explanation about this check,
5765 * with ret > 0, the key isn't found, the path points to the slot
5766 * where it should be inserted, so the path->slots[0] item must be the
5767 * bigger one.
5768 */
5769 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5770 ret = 0;
5771 goto done;
5772 }
5773
5774 while (level < BTRFS_MAX_LEVEL) {
5775 if (!path->nodes[level]) {
5776 ret = 1;
5777 goto done;
5778 }
5779
5780 slot = path->slots[level] + 1;
5781 c = path->nodes[level];
5782 if (slot >= btrfs_header_nritems(c)) {
5783 level++;
5784 if (level == BTRFS_MAX_LEVEL) {
5785 ret = 1;
5786 goto done;
5787 }
5788 continue;
5789 }
5790
5791 if (next) {
5792 btrfs_tree_unlock_rw(next, next_rw_lock);
5793 free_extent_buffer(next);
5794 }
5795
5796 next = c;
5797 next_rw_lock = path->locks[level];
5798 ret = read_block_for_search(root, path, &next, level,
5799 slot, &key);
5800 if (ret == -EAGAIN)
5801 goto again;
5802
5803 if (ret < 0) {
5804 btrfs_release_path(path);
5805 goto done;
5806 }
5807
5808 if (!path->skip_locking) {
5809 ret = btrfs_try_tree_read_lock(next);
5810 if (!ret && time_seq) {
5811 /*
5812 * If we don't get the lock, we may be racing
5813 * with push_leaf_left, holding that lock while
5814 * itself waiting for the leaf we've currently
5815 * locked. To solve this situation, we give up
5816 * on our lock and cycle.
5817 */
5818 free_extent_buffer(next);
5819 btrfs_release_path(path);
5820 cond_resched();
5821 goto again;
5822 }
5823 if (!ret) {
5824 btrfs_set_path_blocking(path);
5825 btrfs_tree_read_lock(next);
5826 btrfs_clear_path_blocking(path, next,
5827 BTRFS_READ_LOCK);
5828 }
5829 next_rw_lock = BTRFS_READ_LOCK;
5830 }
5831 break;
5832 }
5833 path->slots[level] = slot;
5834 while (1) {
5835 level--;
5836 c = path->nodes[level];
5837 if (path->locks[level])
5838 btrfs_tree_unlock_rw(c, path->locks[level]);
5839
5840 free_extent_buffer(c);
5841 path->nodes[level] = next;
5842 path->slots[level] = 0;
5843 if (!path->skip_locking)
5844 path->locks[level] = next_rw_lock;
5845 if (!level)
5846 break;
5847
5848 ret = read_block_for_search(root, path, &next, level,
5849 0, &key);
5850 if (ret == -EAGAIN)
5851 goto again;
5852
5853 if (ret < 0) {
5854 btrfs_release_path(path);
5855 goto done;
5856 }
5857
5858 if (!path->skip_locking) {
5859 ret = btrfs_try_tree_read_lock(next);
5860 if (!ret) {
5861 btrfs_set_path_blocking(path);
5862 btrfs_tree_read_lock(next);
5863 btrfs_clear_path_blocking(path, next,
5864 BTRFS_READ_LOCK);
5865 }
5866 next_rw_lock = BTRFS_READ_LOCK;
5867 }
5868 }
5869 ret = 0;
5870 done:
5871 unlock_up(path, 0, 1, 0, NULL);
5872 path->leave_spinning = old_spinning;
5873 if (!old_spinning)
5874 btrfs_set_path_blocking(path);
5875
5876 return ret;
5877 }
5878
5879 /*
5880 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5881 * searching until it gets past min_objectid or finds an item of 'type'
5882 *
5883 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5884 */
5885 int btrfs_previous_item(struct btrfs_root *root,
5886 struct btrfs_path *path, u64 min_objectid,
5887 int type)
5888 {
5889 struct btrfs_key found_key;
5890 struct extent_buffer *leaf;
5891 u32 nritems;
5892 int ret;
5893
5894 while (1) {
5895 if (path->slots[0] == 0) {
5896 btrfs_set_path_blocking(path);
5897 ret = btrfs_prev_leaf(root, path);
5898 if (ret != 0)
5899 return ret;
5900 } else {
5901 path->slots[0]--;
5902 }
5903 leaf = path->nodes[0];
5904 nritems = btrfs_header_nritems(leaf);
5905 if (nritems == 0)
5906 return 1;
5907 if (path->slots[0] == nritems)
5908 path->slots[0]--;
5909
5910 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5911 if (found_key.objectid < min_objectid)
5912 break;
5913 if (found_key.type == type)
5914 return 0;
5915 if (found_key.objectid == min_objectid &&
5916 found_key.type < type)
5917 break;
5918 }
5919 return 1;
5920 }
5921
5922 /*
5923 * search in extent tree to find a previous Metadata/Data extent item with
5924 * min objecitd.
5925 *
5926 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5927 */
5928 int btrfs_previous_extent_item(struct btrfs_root *root,
5929 struct btrfs_path *path, u64 min_objectid)
5930 {
5931 struct btrfs_key found_key;
5932 struct extent_buffer *leaf;
5933 u32 nritems;
5934 int ret;
5935
5936 while (1) {
5937 if (path->slots[0] == 0) {
5938 btrfs_set_path_blocking(path);
5939 ret = btrfs_prev_leaf(root, path);
5940 if (ret != 0)
5941 return ret;
5942 } else {
5943 path->slots[0]--;
5944 }
5945 leaf = path->nodes[0];
5946 nritems = btrfs_header_nritems(leaf);
5947 if (nritems == 0)
5948 return 1;
5949 if (path->slots[0] == nritems)
5950 path->slots[0]--;
5951
5952 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5953 if (found_key.objectid < min_objectid)
5954 break;
5955 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5956 found_key.type == BTRFS_METADATA_ITEM_KEY)
5957 return 0;
5958 if (found_key.objectid == min_objectid &&
5959 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5960 break;
5961 }
5962 return 1;
5963 }