]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/btrfs/ctree.c
Btrfs: error out if generic_bin_search get invalid arguments
[mirror_ubuntu-zesty-kernel.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 *root, struct btrfs_key *ins_key,
33 struct btrfs_path *path, int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root, struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct extent_buffer *dst_buf,
40 struct extent_buffer *src_buf);
41 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 int level, int slot);
43 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51 }
52
53 /*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 }
69 }
70
71 /*
72 * reset all the locked nodes in the patch to spinning locks.
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
78 */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
81 {
82 int i;
83
84 if (held) {
85 btrfs_set_lock_blocking_rw(held, held_rw);
86 if (held_rw == BTRFS_WRITE_LOCK)
87 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
88 else if (held_rw == BTRFS_READ_LOCK)
89 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 }
91 btrfs_set_path_blocking(p);
92
93 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
94 if (p->nodes[i] && p->locks[i]) {
95 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
96 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
97 p->locks[i] = BTRFS_WRITE_LOCK;
98 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
99 p->locks[i] = BTRFS_READ_LOCK;
100 }
101 }
102
103 if (held)
104 btrfs_clear_lock_blocking_rw(held, held_rw);
105 }
106
107 /* this also releases the path */
108 void btrfs_free_path(struct btrfs_path *p)
109 {
110 if (!p)
111 return;
112 btrfs_release_path(p);
113 kmem_cache_free(btrfs_path_cachep, p);
114 }
115
116 /*
117 * path release drops references on the extent buffers in the path
118 * and it drops any locks held by this path
119 *
120 * It is safe to call this on paths that no locks or extent buffers held.
121 */
122 noinline void btrfs_release_path(struct btrfs_path *p)
123 {
124 int i;
125
126 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 p->slots[i] = 0;
128 if (!p->nodes[i])
129 continue;
130 if (p->locks[i]) {
131 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
132 p->locks[i] = 0;
133 }
134 free_extent_buffer(p->nodes[i]);
135 p->nodes[i] = NULL;
136 }
137 }
138
139 /*
140 * safely gets a reference on the root node of a tree. A lock
141 * is not taken, so a concurrent writer may put a different node
142 * at the root of the tree. See btrfs_lock_root_node for the
143 * looping required.
144 *
145 * The extent buffer returned by this has a reference taken, so
146 * it won't disappear. It may stop being the root of the tree
147 * at any time because there are no locks held.
148 */
149 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 {
151 struct extent_buffer *eb;
152
153 while (1) {
154 rcu_read_lock();
155 eb = rcu_dereference(root->node);
156
157 /*
158 * RCU really hurts here, we could free up the root node because
159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again.
162 */
163 if (atomic_inc_not_zero(&eb->refs)) {
164 rcu_read_unlock();
165 break;
166 }
167 rcu_read_unlock();
168 synchronize_rcu();
169 }
170 return eb;
171 }
172
173 /* loop around taking references on and locking the root node of the
174 * tree until you end up with a lock on the root. A locked buffer
175 * is returned, with a reference held.
176 */
177 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 {
179 struct extent_buffer *eb;
180
181 while (1) {
182 eb = btrfs_root_node(root);
183 btrfs_tree_lock(eb);
184 if (eb == root->node)
185 break;
186 btrfs_tree_unlock(eb);
187 free_extent_buffer(eb);
188 }
189 return eb;
190 }
191
192 /* loop around taking references on and locking the root node of the
193 * tree until you end up with a lock on the root. A locked buffer
194 * is returned, with a reference held.
195 */
196 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 {
198 struct extent_buffer *eb;
199
200 while (1) {
201 eb = btrfs_root_node(root);
202 btrfs_tree_read_lock(eb);
203 if (eb == root->node)
204 break;
205 btrfs_tree_read_unlock(eb);
206 free_extent_buffer(eb);
207 }
208 return eb;
209 }
210
211 /* cowonly root (everything not a reference counted cow subvolume), just get
212 * put onto a simple dirty list. transaction.c walks this to make sure they
213 * get properly updated on disk.
214 */
215 static void add_root_to_dirty_list(struct btrfs_root *root)
216 {
217 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
218 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
219 return;
220
221 spin_lock(&root->fs_info->trans_lock);
222 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
223 /* Want the extent tree to be the last on the list */
224 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
225 list_move_tail(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
227 else
228 list_move(&root->dirty_list,
229 &root->fs_info->dirty_cowonly_roots);
230 }
231 spin_unlock(&root->fs_info->trans_lock);
232 }
233
234 /*
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
238 */
239 int btrfs_copy_root(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root,
241 struct extent_buffer *buf,
242 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 {
244 struct extent_buffer *cow;
245 int ret = 0;
246 int level;
247 struct btrfs_disk_key disk_key;
248
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != root->fs_info->running_transaction->transid);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
259
260 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 &disk_key, level, buf->start, 0);
262 if (IS_ERR(cow))
263 return PTR_ERR(cow);
264
265 copy_extent_buffer(cow, buf, 0, 0, cow->len);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 else
274 btrfs_set_header_owner(cow, new_root_objectid);
275
276 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
277 BTRFS_FSID_SIZE);
278
279 WARN_ON(btrfs_header_generation(buf) > trans->transid);
280 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
281 ret = btrfs_inc_ref(trans, root, cow, 1);
282 else
283 ret = btrfs_inc_ref(trans, root, cow, 0);
284
285 if (ret)
286 return ret;
287
288 btrfs_mark_buffer_dirty(cow);
289 *cow_ret = cow;
290 return 0;
291 }
292
293 enum mod_log_op {
294 MOD_LOG_KEY_REPLACE,
295 MOD_LOG_KEY_ADD,
296 MOD_LOG_KEY_REMOVE,
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_MOVE_KEYS,
300 MOD_LOG_ROOT_REPLACE,
301 };
302
303 struct tree_mod_move {
304 int dst_slot;
305 int nr_items;
306 };
307
308 struct tree_mod_root {
309 u64 logical;
310 u8 level;
311 };
312
313 struct tree_mod_elem {
314 struct rb_node node;
315 u64 logical;
316 u64 seq;
317 enum mod_log_op op;
318
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
320 int slot;
321
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
323 u64 generation;
324
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key;
327 u64 blockptr;
328
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move;
331
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root;
334 };
335
336 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 {
338 read_lock(&fs_info->tree_mod_log_lock);
339 }
340
341 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 {
343 read_unlock(&fs_info->tree_mod_log_lock);
344 }
345
346 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 {
348 write_lock(&fs_info->tree_mod_log_lock);
349 }
350
351 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 {
353 write_unlock(&fs_info->tree_mod_log_lock);
354 }
355
356 /*
357 * Pull a new tree mod seq number for our operation.
358 */
359 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 {
361 return atomic64_inc_return(&fs_info->tree_mod_seq);
362 }
363
364 /*
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
370 * blocker was added.
371 */
372 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
373 struct seq_list *elem)
374 {
375 tree_mod_log_write_lock(fs_info);
376 spin_lock(&fs_info->tree_mod_seq_lock);
377 if (!elem->seq) {
378 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
379 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 }
381 spin_unlock(&fs_info->tree_mod_seq_lock);
382 tree_mod_log_write_unlock(fs_info);
383
384 return elem->seq;
385 }
386
387 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
388 struct seq_list *elem)
389 {
390 struct rb_root *tm_root;
391 struct rb_node *node;
392 struct rb_node *next;
393 struct seq_list *cur_elem;
394 struct tree_mod_elem *tm;
395 u64 min_seq = (u64)-1;
396 u64 seq_putting = elem->seq;
397
398 if (!seq_putting)
399 return;
400
401 spin_lock(&fs_info->tree_mod_seq_lock);
402 list_del(&elem->list);
403 elem->seq = 0;
404
405 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
406 if (cur_elem->seq < min_seq) {
407 if (seq_putting > cur_elem->seq) {
408 /*
409 * blocker with lower sequence number exists, we
410 * cannot remove anything from the log
411 */
412 spin_unlock(&fs_info->tree_mod_seq_lock);
413 return;
414 }
415 min_seq = cur_elem->seq;
416 }
417 }
418 spin_unlock(&fs_info->tree_mod_seq_lock);
419
420 /*
421 * anything that's lower than the lowest existing (read: blocked)
422 * sequence number can be removed from the tree.
423 */
424 tree_mod_log_write_lock(fs_info);
425 tm_root = &fs_info->tree_mod_log;
426 for (node = rb_first(tm_root); node; node = next) {
427 next = rb_next(node);
428 tm = container_of(node, struct tree_mod_elem, node);
429 if (tm->seq > min_seq)
430 continue;
431 rb_erase(node, tm_root);
432 kfree(tm);
433 }
434 tree_mod_log_write_unlock(fs_info);
435 }
436
437 /*
438 * key order of the log:
439 * node/leaf start address -> sequence
440 *
441 * The 'start address' is the logical address of the *new* root node
442 * for root replace operations, or the logical address of the affected
443 * block for all other operations.
444 *
445 * Note: must be called with write lock (tree_mod_log_write_lock).
446 */
447 static noinline int
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 {
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
454
455 BUG_ON(!tm);
456
457 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458
459 tm_root = &fs_info->tree_mod_log;
460 new = &tm_root->rb_node;
461 while (*new) {
462 cur = container_of(*new, struct tree_mod_elem, node);
463 parent = *new;
464 if (cur->logical < tm->logical)
465 new = &((*new)->rb_left);
466 else if (cur->logical > tm->logical)
467 new = &((*new)->rb_right);
468 else if (cur->seq < tm->seq)
469 new = &((*new)->rb_left);
470 else if (cur->seq > tm->seq)
471 new = &((*new)->rb_right);
472 else
473 return -EEXIST;
474 }
475
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
478 return 0;
479 }
480
481 /*
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
486 */
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
489 smp_mb();
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 return 1;
492 if (eb && btrfs_header_level(eb) == 0)
493 return 1;
494
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
497 tree_mod_log_write_unlock(fs_info);
498 return 1;
499 }
500
501 return 0;
502 }
503
504 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
505 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
506 struct extent_buffer *eb)
507 {
508 smp_mb();
509 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 return 0;
511 if (eb && btrfs_header_level(eb) == 0)
512 return 0;
513
514 return 1;
515 }
516
517 static struct tree_mod_elem *
518 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
519 enum mod_log_op op, gfp_t flags)
520 {
521 struct tree_mod_elem *tm;
522
523 tm = kzalloc(sizeof(*tm), flags);
524 if (!tm)
525 return NULL;
526
527 tm->logical = eb->start;
528 if (op != MOD_LOG_KEY_ADD) {
529 btrfs_node_key(eb, &tm->key, slot);
530 tm->blockptr = btrfs_node_blockptr(eb, slot);
531 }
532 tm->op = op;
533 tm->slot = slot;
534 tm->generation = btrfs_node_ptr_generation(eb, slot);
535 RB_CLEAR_NODE(&tm->node);
536
537 return tm;
538 }
539
540 static noinline int
541 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
542 struct extent_buffer *eb, int slot,
543 enum mod_log_op op, gfp_t flags)
544 {
545 struct tree_mod_elem *tm;
546 int ret;
547
548 if (!tree_mod_need_log(fs_info, eb))
549 return 0;
550
551 tm = alloc_tree_mod_elem(eb, slot, op, flags);
552 if (!tm)
553 return -ENOMEM;
554
555 if (tree_mod_dont_log(fs_info, eb)) {
556 kfree(tm);
557 return 0;
558 }
559
560 ret = __tree_mod_log_insert(fs_info, tm);
561 tree_mod_log_write_unlock(fs_info);
562 if (ret)
563 kfree(tm);
564
565 return ret;
566 }
567
568 static noinline int
569 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
570 struct extent_buffer *eb, int dst_slot, int src_slot,
571 int nr_items, gfp_t flags)
572 {
573 struct tree_mod_elem *tm = NULL;
574 struct tree_mod_elem **tm_list = NULL;
575 int ret = 0;
576 int i;
577 int locked = 0;
578
579 if (!tree_mod_need_log(fs_info, eb))
580 return 0;
581
582 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
583 if (!tm_list)
584 return -ENOMEM;
585
586 tm = kzalloc(sizeof(*tm), flags);
587 if (!tm) {
588 ret = -ENOMEM;
589 goto free_tms;
590 }
591
592 tm->logical = eb->start;
593 tm->slot = src_slot;
594 tm->move.dst_slot = dst_slot;
595 tm->move.nr_items = nr_items;
596 tm->op = MOD_LOG_MOVE_KEYS;
597
598 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
599 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
600 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
601 if (!tm_list[i]) {
602 ret = -ENOMEM;
603 goto free_tms;
604 }
605 }
606
607 if (tree_mod_dont_log(fs_info, eb))
608 goto free_tms;
609 locked = 1;
610
611 /*
612 * When we override something during the move, we log these removals.
613 * This can only happen when we move towards the beginning of the
614 * buffer, i.e. dst_slot < src_slot.
615 */
616 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
617 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
618 if (ret)
619 goto free_tms;
620 }
621
622 ret = __tree_mod_log_insert(fs_info, tm);
623 if (ret)
624 goto free_tms;
625 tree_mod_log_write_unlock(fs_info);
626 kfree(tm_list);
627
628 return 0;
629 free_tms:
630 for (i = 0; i < nr_items; i++) {
631 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
632 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
633 kfree(tm_list[i]);
634 }
635 if (locked)
636 tree_mod_log_write_unlock(fs_info);
637 kfree(tm_list);
638 kfree(tm);
639
640 return ret;
641 }
642
643 static inline int
644 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
645 struct tree_mod_elem **tm_list,
646 int nritems)
647 {
648 int i, j;
649 int ret;
650
651 for (i = nritems - 1; i >= 0; i--) {
652 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 if (ret) {
654 for (j = nritems - 1; j > i; j--)
655 rb_erase(&tm_list[j]->node,
656 &fs_info->tree_mod_log);
657 return ret;
658 }
659 }
660
661 return 0;
662 }
663
664 static noinline int
665 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
666 struct extent_buffer *old_root,
667 struct extent_buffer *new_root, gfp_t flags,
668 int log_removal)
669 {
670 struct tree_mod_elem *tm = NULL;
671 struct tree_mod_elem **tm_list = NULL;
672 int nritems = 0;
673 int ret = 0;
674 int i;
675
676 if (!tree_mod_need_log(fs_info, NULL))
677 return 0;
678
679 if (log_removal && btrfs_header_level(old_root) > 0) {
680 nritems = btrfs_header_nritems(old_root);
681 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
682 flags);
683 if (!tm_list) {
684 ret = -ENOMEM;
685 goto free_tms;
686 }
687 for (i = 0; i < nritems; i++) {
688 tm_list[i] = alloc_tree_mod_elem(old_root, i,
689 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
690 if (!tm_list[i]) {
691 ret = -ENOMEM;
692 goto free_tms;
693 }
694 }
695 }
696
697 tm = kzalloc(sizeof(*tm), flags);
698 if (!tm) {
699 ret = -ENOMEM;
700 goto free_tms;
701 }
702
703 tm->logical = new_root->start;
704 tm->old_root.logical = old_root->start;
705 tm->old_root.level = btrfs_header_level(old_root);
706 tm->generation = btrfs_header_generation(old_root);
707 tm->op = MOD_LOG_ROOT_REPLACE;
708
709 if (tree_mod_dont_log(fs_info, NULL))
710 goto free_tms;
711
712 if (tm_list)
713 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 if (!ret)
715 ret = __tree_mod_log_insert(fs_info, tm);
716
717 tree_mod_log_write_unlock(fs_info);
718 if (ret)
719 goto free_tms;
720 kfree(tm_list);
721
722 return ret;
723
724 free_tms:
725 if (tm_list) {
726 for (i = 0; i < nritems; i++)
727 kfree(tm_list[i]);
728 kfree(tm_list);
729 }
730 kfree(tm);
731
732 return ret;
733 }
734
735 static struct tree_mod_elem *
736 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
737 int smallest)
738 {
739 struct rb_root *tm_root;
740 struct rb_node *node;
741 struct tree_mod_elem *cur = NULL;
742 struct tree_mod_elem *found = NULL;
743
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
747 while (node) {
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->logical < start) {
750 node = node->rb_left;
751 } else if (cur->logical > start) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
757 if (found)
758 BUG_ON(found->seq > cur->seq);
759 found = cur;
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
763 if (found)
764 BUG_ON(found->seq < cur->seq);
765 found = cur;
766 node = node->rb_right;
767 } else {
768 found = cur;
769 break;
770 }
771 }
772 tree_mod_log_read_unlock(fs_info);
773
774 return found;
775 }
776
777 /*
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
781 */
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 u64 min_seq)
785 {
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
787 }
788
789 /*
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
793 */
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
796 {
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
798 }
799
800 static noinline int
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
804 {
805 int ret = 0;
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
808 int i;
809 int locked = 0;
810
811 if (!tree_mod_need_log(fs_info, NULL))
812 return 0;
813
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 return 0;
816
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
818 GFP_NOFS);
819 if (!tm_list)
820 return -ENOMEM;
821
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
828 ret = -ENOMEM;
829 goto free_tms;
830 }
831
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
835 ret = -ENOMEM;
836 goto free_tms;
837 }
838 }
839
840 if (tree_mod_dont_log(fs_info, NULL))
841 goto free_tms;
842 locked = 1;
843
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 if (ret)
847 goto free_tms;
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
849 if (ret)
850 goto free_tms;
851 }
852
853 tree_mod_log_write_unlock(fs_info);
854 kfree(tm_list);
855
856 return 0;
857
858 free_tms:
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
862 kfree(tm_list[i]);
863 }
864 if (locked)
865 tree_mod_log_write_unlock(fs_info);
866 kfree(tm_list);
867
868 return ret;
869 }
870
871 static inline void
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
874 {
875 int ret;
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
877 nr_items, GFP_NOFS);
878 BUG_ON(ret < 0);
879 }
880
881 static noinline void
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
884 {
885 int ret;
886
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
888 MOD_LOG_KEY_REPLACE,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
890 BUG_ON(ret < 0);
891 }
892
893 static noinline int
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
895 {
896 struct tree_mod_elem **tm_list = NULL;
897 int nritems = 0;
898 int i;
899 int ret = 0;
900
901 if (btrfs_header_level(eb) == 0)
902 return 0;
903
904 if (!tree_mod_need_log(fs_info, NULL))
905 return 0;
906
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
909 if (!tm_list)
910 return -ENOMEM;
911
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
915 if (!tm_list[i]) {
916 ret = -ENOMEM;
917 goto free_tms;
918 }
919 }
920
921 if (tree_mod_dont_log(fs_info, eb))
922 goto free_tms;
923
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
926 if (ret)
927 goto free_tms;
928 kfree(tm_list);
929
930 return 0;
931
932 free_tms:
933 for (i = 0; i < nritems; i++)
934 kfree(tm_list[i]);
935 kfree(tm_list);
936
937 return ret;
938 }
939
940 static noinline void
941 tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
943 int log_removal)
944 {
945 int ret;
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
948 BUG_ON(ret < 0);
949 }
950
951 /*
952 * check if the tree block can be shared by multiple trees
953 */
954 int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
956 {
957 /*
958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
962 */
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
968 return 1;
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
972 return 1;
973 #endif
974 return 0;
975 }
976
977 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
981 int *last_ref)
982 {
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
988
989 /*
990 * Backrefs update rules:
991 *
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
994 *
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
998 *
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1004 */
1005
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1015 return ret;
1016 }
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1024 }
1025
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1036
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1043 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else {
1046
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1053 }
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1056
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1058 buf->start,
1059 buf->len,
1060 new_flags, level, 0);
1061 if (ret)
1062 return ret;
1063 }
1064 } else {
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1074 }
1075 clean_tree_block(trans, root->fs_info, buf);
1076 *last_ref = 1;
1077 }
1078 return 0;
1079 }
1080
1081 /*
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1085 * dirty again.
1086 *
1087 * search_start -- an allocation hint for the new block
1088 *
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1092 */
1093 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1099 {
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1102 int level, ret;
1103 int last_ref = 0;
1104 int unlock_orig = 0;
1105 u64 parent_start;
1106
1107 if (*cow_ret == buf)
1108 unlock_orig = 1;
1109
1110 btrfs_assert_tree_locked(buf);
1111
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1116
1117 level = btrfs_header_level(buf);
1118
1119 if (level == 0)
1120 btrfs_item_key(buf, &disk_key, 0);
1121 else
1122 btrfs_node_key(buf, &disk_key, 0);
1123
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1125 if (parent)
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1131
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1135 if (IS_ERR(cow))
1136 return PTR_ERR(cow);
1137
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1139
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1148 else
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1150
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 BTRFS_FSID_SIZE);
1153
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1155 if (ret) {
1156 btrfs_abort_transaction(trans, root, ret);
1157 return ret;
1158 }
1159
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 return ret;
1165 }
1166 }
1167
1168 if (buf == root->node) {
1169 WARN_ON(parent && parent != buf);
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1175
1176 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1);
1178 rcu_assign_pointer(root->node, cow);
1179
1180 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 last_ref);
1182 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root);
1184 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1189
1190 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1193 btrfs_set_node_blockptr(parent, parent_slot,
1194 cow->start);
1195 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 trans->transid);
1197 btrfs_mark_buffer_dirty(parent);
1198 if (last_ref) {
1199 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 return ret;
1203 }
1204 }
1205 btrfs_free_tree_block(trans, root, buf, parent_start,
1206 last_ref);
1207 }
1208 if (unlock_orig)
1209 btrfs_tree_unlock(buf);
1210 free_extent_buffer_stale(buf);
1211 btrfs_mark_buffer_dirty(cow);
1212 *cow_ret = cow;
1213 return 0;
1214 }
1215
1216 /*
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1219 */
1220 static struct tree_mod_elem *
1221 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1222 struct extent_buffer *eb_root, u64 time_seq)
1223 {
1224 struct tree_mod_elem *tm;
1225 struct tree_mod_elem *found = NULL;
1226 u64 root_logical = eb_root->start;
1227 int looped = 0;
1228
1229 if (!time_seq)
1230 return NULL;
1231
1232 /*
1233 * the very last operation that's logged for a root is the
1234 * replacement operation (if it is replaced at all). this has
1235 * the logical address of the *new* root, making it the very
1236 * first operation that's logged for this root.
1237 */
1238 while (1) {
1239 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1240 time_seq);
1241 if (!looped && !tm)
1242 return NULL;
1243 /*
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1246 * level 0.
1247 */
1248 if (!tm)
1249 break;
1250
1251 /*
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1255 */
1256 if (tm->op != MOD_LOG_ROOT_REPLACE)
1257 break;
1258
1259 found = tm;
1260 root_logical = tm->old_root.logical;
1261 looped = 1;
1262 }
1263
1264 /* if there's no old root to return, return what we found instead */
1265 if (!found)
1266 found = tm;
1267
1268 return found;
1269 }
1270
1271 /*
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewound (until we reach something older than
1274 * time_seq).
1275 */
1276 static void
1277 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1278 u64 time_seq, struct tree_mod_elem *first_tm)
1279 {
1280 u32 n;
1281 struct rb_node *next;
1282 struct tree_mod_elem *tm = first_tm;
1283 unsigned long o_dst;
1284 unsigned long o_src;
1285 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1286
1287 n = btrfs_header_nritems(eb);
1288 tree_mod_log_read_lock(fs_info);
1289 while (tm && tm->seq >= time_seq) {
1290 /*
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1294 */
1295 switch (tm->op) {
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1297 BUG_ON(tm->slot < n);
1298 /* Fallthrough */
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1300 case MOD_LOG_KEY_REMOVE:
1301 btrfs_set_node_key(eb, &tm->key, tm->slot);
1302 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1303 btrfs_set_node_ptr_generation(eb, tm->slot,
1304 tm->generation);
1305 n++;
1306 break;
1307 case MOD_LOG_KEY_REPLACE:
1308 BUG_ON(tm->slot >= n);
1309 btrfs_set_node_key(eb, &tm->key, tm->slot);
1310 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1311 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 tm->generation);
1313 break;
1314 case MOD_LOG_KEY_ADD:
1315 /* if a move operation is needed it's in the log */
1316 n--;
1317 break;
1318 case MOD_LOG_MOVE_KEYS:
1319 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1320 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1321 memmove_extent_buffer(eb, o_dst, o_src,
1322 tm->move.nr_items * p_size);
1323 break;
1324 case MOD_LOG_ROOT_REPLACE:
1325 /*
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1333 */
1334 break;
1335 }
1336 next = rb_next(&tm->node);
1337 if (!next)
1338 break;
1339 tm = container_of(next, struct tree_mod_elem, node);
1340 if (tm->logical != first_tm->logical)
1341 break;
1342 }
1343 tree_mod_log_read_unlock(fs_info);
1344 btrfs_set_header_nritems(eb, n);
1345 }
1346
1347 /*
1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1353 */
1354 static struct extent_buffer *
1355 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1356 struct extent_buffer *eb, u64 time_seq)
1357 {
1358 struct extent_buffer *eb_rewin;
1359 struct tree_mod_elem *tm;
1360
1361 if (!time_seq)
1362 return eb;
1363
1364 if (btrfs_header_level(eb) == 0)
1365 return eb;
1366
1367 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1368 if (!tm)
1369 return eb;
1370
1371 btrfs_set_path_blocking(path);
1372 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 eb->len);
1378 if (!eb_rewin) {
1379 btrfs_tree_read_unlock_blocking(eb);
1380 free_extent_buffer(eb);
1381 return NULL;
1382 }
1383 btrfs_set_header_bytenr(eb_rewin, eb->start);
1384 btrfs_set_header_backref_rev(eb_rewin,
1385 btrfs_header_backref_rev(eb));
1386 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1387 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1388 } else {
1389 eb_rewin = btrfs_clone_extent_buffer(eb);
1390 if (!eb_rewin) {
1391 btrfs_tree_read_unlock_blocking(eb);
1392 free_extent_buffer(eb);
1393 return NULL;
1394 }
1395 }
1396
1397 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1398 btrfs_tree_read_unlock_blocking(eb);
1399 free_extent_buffer(eb);
1400
1401 extent_buffer_get(eb_rewin);
1402 btrfs_tree_read_lock(eb_rewin);
1403 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1404 WARN_ON(btrfs_header_nritems(eb_rewin) >
1405 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1406
1407 return eb_rewin;
1408 }
1409
1410 /*
1411 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1412 * value. If there are no changes, the current root->root_node is returned. If
1413 * anything changed in between, there's a fresh buffer allocated on which the
1414 * rewind operations are done. In any case, the returned buffer is read locked.
1415 * Returns NULL on error (with no locks held).
1416 */
1417 static inline struct extent_buffer *
1418 get_old_root(struct btrfs_root *root, u64 time_seq)
1419 {
1420 struct tree_mod_elem *tm;
1421 struct extent_buffer *eb = NULL;
1422 struct extent_buffer *eb_root;
1423 struct extent_buffer *old;
1424 struct tree_mod_root *old_root = NULL;
1425 u64 old_generation = 0;
1426 u64 logical;
1427
1428 eb_root = btrfs_read_lock_root_node(root);
1429 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1430 if (!tm)
1431 return eb_root;
1432
1433 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1434 old_root = &tm->old_root;
1435 old_generation = tm->generation;
1436 logical = old_root->logical;
1437 } else {
1438 logical = eb_root->start;
1439 }
1440
1441 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1442 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1443 btrfs_tree_read_unlock(eb_root);
1444 free_extent_buffer(eb_root);
1445 old = read_tree_block(root, logical, 0);
1446 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1447 if (!IS_ERR(old))
1448 free_extent_buffer(old);
1449 btrfs_warn(root->fs_info,
1450 "failed to read tree block %llu from get_old_root", logical);
1451 } else {
1452 eb = btrfs_clone_extent_buffer(old);
1453 free_extent_buffer(old);
1454 }
1455 } else if (old_root) {
1456 btrfs_tree_read_unlock(eb_root);
1457 free_extent_buffer(eb_root);
1458 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 root->nodesize);
1460 } else {
1461 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1462 eb = btrfs_clone_extent_buffer(eb_root);
1463 btrfs_tree_read_unlock_blocking(eb_root);
1464 free_extent_buffer(eb_root);
1465 }
1466
1467 if (!eb)
1468 return NULL;
1469 extent_buffer_get(eb);
1470 btrfs_tree_read_lock(eb);
1471 if (old_root) {
1472 btrfs_set_header_bytenr(eb, eb->start);
1473 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1474 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1475 btrfs_set_header_level(eb, old_root->level);
1476 btrfs_set_header_generation(eb, old_generation);
1477 }
1478 if (tm)
1479 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1480 else
1481 WARN_ON(btrfs_header_level(eb) != 0);
1482 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1483
1484 return eb;
1485 }
1486
1487 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1488 {
1489 struct tree_mod_elem *tm;
1490 int level;
1491 struct extent_buffer *eb_root = btrfs_root_node(root);
1492
1493 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1494 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1495 level = tm->old_root.level;
1496 } else {
1497 level = btrfs_header_level(eb_root);
1498 }
1499 free_extent_buffer(eb_root);
1500
1501 return level;
1502 }
1503
1504 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1505 struct btrfs_root *root,
1506 struct extent_buffer *buf)
1507 {
1508 if (btrfs_test_is_dummy_root(root))
1509 return 0;
1510
1511 /* ensure we can see the force_cow */
1512 smp_rmb();
1513
1514 /*
1515 * We do not need to cow a block if
1516 * 1) this block is not created or changed in this transaction;
1517 * 2) this block does not belong to TREE_RELOC tree;
1518 * 3) the root is not forced COW.
1519 *
1520 * What is forced COW:
1521 * when we create snapshot during committing the transaction,
1522 * after we've finished coping src root, we must COW the shared
1523 * block to ensure the metadata consistency.
1524 */
1525 if (btrfs_header_generation(buf) == trans->transid &&
1526 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1527 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1528 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1529 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1530 return 0;
1531 return 1;
1532 }
1533
1534 /*
1535 * cows a single block, see __btrfs_cow_block for the real work.
1536 * This version of it has extra checks so that a block isn't COWed more than
1537 * once per transaction, as long as it hasn't been written yet
1538 */
1539 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1540 struct btrfs_root *root, struct extent_buffer *buf,
1541 struct extent_buffer *parent, int parent_slot,
1542 struct extent_buffer **cow_ret)
1543 {
1544 u64 search_start;
1545 int ret;
1546
1547 if (trans->transaction != root->fs_info->running_transaction)
1548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 trans->transid,
1550 root->fs_info->running_transaction->transid);
1551
1552 if (trans->transid != root->fs_info->generation)
1553 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1554 trans->transid, root->fs_info->generation);
1555
1556 if (!should_cow_block(trans, root, buf)) {
1557 trans->dirty = true;
1558 *cow_ret = buf;
1559 return 0;
1560 }
1561
1562 search_start = buf->start & ~((u64)SZ_1G - 1);
1563
1564 if (parent)
1565 btrfs_set_lock_blocking(parent);
1566 btrfs_set_lock_blocking(buf);
1567
1568 ret = __btrfs_cow_block(trans, root, buf, parent,
1569 parent_slot, cow_ret, search_start, 0);
1570
1571 trace_btrfs_cow_block(root, buf, *cow_ret);
1572
1573 return ret;
1574 }
1575
1576 /*
1577 * helper function for defrag to decide if two blocks pointed to by a
1578 * node are actually close by
1579 */
1580 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1581 {
1582 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1583 return 1;
1584 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1585 return 1;
1586 return 0;
1587 }
1588
1589 /*
1590 * compare two keys in a memcmp fashion
1591 */
1592 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1593 {
1594 struct btrfs_key k1;
1595
1596 btrfs_disk_key_to_cpu(&k1, disk);
1597
1598 return btrfs_comp_cpu_keys(&k1, k2);
1599 }
1600
1601 /*
1602 * same as comp_keys only with two btrfs_key's
1603 */
1604 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1605 {
1606 if (k1->objectid > k2->objectid)
1607 return 1;
1608 if (k1->objectid < k2->objectid)
1609 return -1;
1610 if (k1->type > k2->type)
1611 return 1;
1612 if (k1->type < k2->type)
1613 return -1;
1614 if (k1->offset > k2->offset)
1615 return 1;
1616 if (k1->offset < k2->offset)
1617 return -1;
1618 return 0;
1619 }
1620
1621 /*
1622 * this is used by the defrag code to go through all the
1623 * leaves pointed to by a node and reallocate them so that
1624 * disk order is close to key order
1625 */
1626 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1627 struct btrfs_root *root, struct extent_buffer *parent,
1628 int start_slot, u64 *last_ret,
1629 struct btrfs_key *progress)
1630 {
1631 struct extent_buffer *cur;
1632 u64 blocknr;
1633 u64 gen;
1634 u64 search_start = *last_ret;
1635 u64 last_block = 0;
1636 u64 other;
1637 u32 parent_nritems;
1638 int end_slot;
1639 int i;
1640 int err = 0;
1641 int parent_level;
1642 int uptodate;
1643 u32 blocksize;
1644 int progress_passed = 0;
1645 struct btrfs_disk_key disk_key;
1646
1647 parent_level = btrfs_header_level(parent);
1648
1649 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1650 WARN_ON(trans->transid != root->fs_info->generation);
1651
1652 parent_nritems = btrfs_header_nritems(parent);
1653 blocksize = root->nodesize;
1654 end_slot = parent_nritems - 1;
1655
1656 if (parent_nritems <= 1)
1657 return 0;
1658
1659 btrfs_set_lock_blocking(parent);
1660
1661 for (i = start_slot; i <= end_slot; i++) {
1662 int close = 1;
1663
1664 btrfs_node_key(parent, &disk_key, i);
1665 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1666 continue;
1667
1668 progress_passed = 1;
1669 blocknr = btrfs_node_blockptr(parent, i);
1670 gen = btrfs_node_ptr_generation(parent, i);
1671 if (last_block == 0)
1672 last_block = blocknr;
1673
1674 if (i > 0) {
1675 other = btrfs_node_blockptr(parent, i - 1);
1676 close = close_blocks(blocknr, other, blocksize);
1677 }
1678 if (!close && i < end_slot) {
1679 other = btrfs_node_blockptr(parent, i + 1);
1680 close = close_blocks(blocknr, other, blocksize);
1681 }
1682 if (close) {
1683 last_block = blocknr;
1684 continue;
1685 }
1686
1687 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1688 if (cur)
1689 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1690 else
1691 uptodate = 0;
1692 if (!cur || !uptodate) {
1693 if (!cur) {
1694 cur = read_tree_block(root, blocknr, gen);
1695 if (IS_ERR(cur)) {
1696 return PTR_ERR(cur);
1697 } else if (!extent_buffer_uptodate(cur)) {
1698 free_extent_buffer(cur);
1699 return -EIO;
1700 }
1701 } else if (!uptodate) {
1702 err = btrfs_read_buffer(cur, gen);
1703 if (err) {
1704 free_extent_buffer(cur);
1705 return err;
1706 }
1707 }
1708 }
1709 if (search_start == 0)
1710 search_start = last_block;
1711
1712 btrfs_tree_lock(cur);
1713 btrfs_set_lock_blocking(cur);
1714 err = __btrfs_cow_block(trans, root, cur, parent, i,
1715 &cur, search_start,
1716 min(16 * blocksize,
1717 (end_slot - i) * blocksize));
1718 if (err) {
1719 btrfs_tree_unlock(cur);
1720 free_extent_buffer(cur);
1721 break;
1722 }
1723 search_start = cur->start;
1724 last_block = cur->start;
1725 *last_ret = search_start;
1726 btrfs_tree_unlock(cur);
1727 free_extent_buffer(cur);
1728 }
1729 return err;
1730 }
1731
1732 /*
1733 * The leaf data grows from end-to-front in the node.
1734 * this returns the address of the start of the last item,
1735 * which is the stop of the leaf data stack
1736 */
1737 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1738 struct extent_buffer *leaf)
1739 {
1740 u32 nr = btrfs_header_nritems(leaf);
1741 if (nr == 0)
1742 return BTRFS_LEAF_DATA_SIZE(root);
1743 return btrfs_item_offset_nr(leaf, nr - 1);
1744 }
1745
1746
1747 /*
1748 * search for key in the extent_buffer. The items start at offset p,
1749 * and they are item_size apart. There are 'max' items in p.
1750 *
1751 * the slot in the array is returned via slot, and it points to
1752 * the place where you would insert key if it is not found in
1753 * the array.
1754 *
1755 * slot may point to max if the key is bigger than all of the keys
1756 */
1757 static noinline int generic_bin_search(struct extent_buffer *eb,
1758 unsigned long p,
1759 int item_size, struct btrfs_key *key,
1760 int max, int *slot)
1761 {
1762 int low = 0;
1763 int high = max;
1764 int mid;
1765 int ret;
1766 struct btrfs_disk_key *tmp = NULL;
1767 struct btrfs_disk_key unaligned;
1768 unsigned long offset;
1769 char *kaddr = NULL;
1770 unsigned long map_start = 0;
1771 unsigned long map_len = 0;
1772 int err;
1773
1774 if (low > high) {
1775 btrfs_err(eb->fs_info,
1776 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1777 __func__, low, high, eb->start,
1778 btrfs_header_owner(eb), btrfs_header_level(eb));
1779 return -EINVAL;
1780 }
1781
1782 while (low < high) {
1783 mid = (low + high) / 2;
1784 offset = p + mid * item_size;
1785
1786 if (!kaddr || offset < map_start ||
1787 (offset + sizeof(struct btrfs_disk_key)) >
1788 map_start + map_len) {
1789
1790 err = map_private_extent_buffer(eb, offset,
1791 sizeof(struct btrfs_disk_key),
1792 &kaddr, &map_start, &map_len);
1793
1794 if (!err) {
1795 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1796 map_start);
1797 } else if (err == 1) {
1798 read_extent_buffer(eb, &unaligned,
1799 offset, sizeof(unaligned));
1800 tmp = &unaligned;
1801 } else {
1802 return err;
1803 }
1804
1805 } else {
1806 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1807 map_start);
1808 }
1809 ret = comp_keys(tmp, key);
1810
1811 if (ret < 0)
1812 low = mid + 1;
1813 else if (ret > 0)
1814 high = mid;
1815 else {
1816 *slot = mid;
1817 return 0;
1818 }
1819 }
1820 *slot = low;
1821 return 1;
1822 }
1823
1824 /*
1825 * simple bin_search frontend that does the right thing for
1826 * leaves vs nodes
1827 */
1828 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1829 int level, int *slot)
1830 {
1831 if (level == 0)
1832 return generic_bin_search(eb,
1833 offsetof(struct btrfs_leaf, items),
1834 sizeof(struct btrfs_item),
1835 key, btrfs_header_nritems(eb),
1836 slot);
1837 else
1838 return generic_bin_search(eb,
1839 offsetof(struct btrfs_node, ptrs),
1840 sizeof(struct btrfs_key_ptr),
1841 key, btrfs_header_nritems(eb),
1842 slot);
1843 }
1844
1845 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1846 int level, int *slot)
1847 {
1848 return bin_search(eb, key, level, slot);
1849 }
1850
1851 static void root_add_used(struct btrfs_root *root, u32 size)
1852 {
1853 spin_lock(&root->accounting_lock);
1854 btrfs_set_root_used(&root->root_item,
1855 btrfs_root_used(&root->root_item) + size);
1856 spin_unlock(&root->accounting_lock);
1857 }
1858
1859 static void root_sub_used(struct btrfs_root *root, u32 size)
1860 {
1861 spin_lock(&root->accounting_lock);
1862 btrfs_set_root_used(&root->root_item,
1863 btrfs_root_used(&root->root_item) - size);
1864 spin_unlock(&root->accounting_lock);
1865 }
1866
1867 /* given a node and slot number, this reads the blocks it points to. The
1868 * extent buffer is returned with a reference taken (but unlocked).
1869 * NULL is returned on error.
1870 */
1871 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1872 struct extent_buffer *parent, int slot)
1873 {
1874 int level = btrfs_header_level(parent);
1875 struct extent_buffer *eb;
1876
1877 if (slot < 0)
1878 return NULL;
1879 if (slot >= btrfs_header_nritems(parent))
1880 return NULL;
1881
1882 BUG_ON(level == 0);
1883
1884 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1885 btrfs_node_ptr_generation(parent, slot));
1886 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1887 if (!IS_ERR(eb))
1888 free_extent_buffer(eb);
1889 eb = NULL;
1890 }
1891
1892 return eb;
1893 }
1894
1895 /*
1896 * node level balancing, used to make sure nodes are in proper order for
1897 * item deletion. We balance from the top down, so we have to make sure
1898 * that a deletion won't leave an node completely empty later on.
1899 */
1900 static noinline int balance_level(struct btrfs_trans_handle *trans,
1901 struct btrfs_root *root,
1902 struct btrfs_path *path, int level)
1903 {
1904 struct extent_buffer *right = NULL;
1905 struct extent_buffer *mid;
1906 struct extent_buffer *left = NULL;
1907 struct extent_buffer *parent = NULL;
1908 int ret = 0;
1909 int wret;
1910 int pslot;
1911 int orig_slot = path->slots[level];
1912 u64 orig_ptr;
1913
1914 if (level == 0)
1915 return 0;
1916
1917 mid = path->nodes[level];
1918
1919 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1920 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1921 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1922
1923 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1924
1925 if (level < BTRFS_MAX_LEVEL - 1) {
1926 parent = path->nodes[level + 1];
1927 pslot = path->slots[level + 1];
1928 }
1929
1930 /*
1931 * deal with the case where there is only one pointer in the root
1932 * by promoting the node below to a root
1933 */
1934 if (!parent) {
1935 struct extent_buffer *child;
1936
1937 if (btrfs_header_nritems(mid) != 1)
1938 return 0;
1939
1940 /* promote the child to a root */
1941 child = read_node_slot(root, mid, 0);
1942 if (!child) {
1943 ret = -EROFS;
1944 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1945 goto enospc;
1946 }
1947
1948 btrfs_tree_lock(child);
1949 btrfs_set_lock_blocking(child);
1950 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1951 if (ret) {
1952 btrfs_tree_unlock(child);
1953 free_extent_buffer(child);
1954 goto enospc;
1955 }
1956
1957 tree_mod_log_set_root_pointer(root, child, 1);
1958 rcu_assign_pointer(root->node, child);
1959
1960 add_root_to_dirty_list(root);
1961 btrfs_tree_unlock(child);
1962
1963 path->locks[level] = 0;
1964 path->nodes[level] = NULL;
1965 clean_tree_block(trans, root->fs_info, mid);
1966 btrfs_tree_unlock(mid);
1967 /* once for the path */
1968 free_extent_buffer(mid);
1969
1970 root_sub_used(root, mid->len);
1971 btrfs_free_tree_block(trans, root, mid, 0, 1);
1972 /* once for the root ptr */
1973 free_extent_buffer_stale(mid);
1974 return 0;
1975 }
1976 if (btrfs_header_nritems(mid) >
1977 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1978 return 0;
1979
1980 left = read_node_slot(root, parent, pslot - 1);
1981 if (left) {
1982 btrfs_tree_lock(left);
1983 btrfs_set_lock_blocking(left);
1984 wret = btrfs_cow_block(trans, root, left,
1985 parent, pslot - 1, &left);
1986 if (wret) {
1987 ret = wret;
1988 goto enospc;
1989 }
1990 }
1991 right = read_node_slot(root, parent, pslot + 1);
1992 if (right) {
1993 btrfs_tree_lock(right);
1994 btrfs_set_lock_blocking(right);
1995 wret = btrfs_cow_block(trans, root, right,
1996 parent, pslot + 1, &right);
1997 if (wret) {
1998 ret = wret;
1999 goto enospc;
2000 }
2001 }
2002
2003 /* first, try to make some room in the middle buffer */
2004 if (left) {
2005 orig_slot += btrfs_header_nritems(left);
2006 wret = push_node_left(trans, root, left, mid, 1);
2007 if (wret < 0)
2008 ret = wret;
2009 }
2010
2011 /*
2012 * then try to empty the right most buffer into the middle
2013 */
2014 if (right) {
2015 wret = push_node_left(trans, root, mid, right, 1);
2016 if (wret < 0 && wret != -ENOSPC)
2017 ret = wret;
2018 if (btrfs_header_nritems(right) == 0) {
2019 clean_tree_block(trans, root->fs_info, right);
2020 btrfs_tree_unlock(right);
2021 del_ptr(root, path, level + 1, pslot + 1);
2022 root_sub_used(root, right->len);
2023 btrfs_free_tree_block(trans, root, right, 0, 1);
2024 free_extent_buffer_stale(right);
2025 right = NULL;
2026 } else {
2027 struct btrfs_disk_key right_key;
2028 btrfs_node_key(right, &right_key, 0);
2029 tree_mod_log_set_node_key(root->fs_info, parent,
2030 pslot + 1, 0);
2031 btrfs_set_node_key(parent, &right_key, pslot + 1);
2032 btrfs_mark_buffer_dirty(parent);
2033 }
2034 }
2035 if (btrfs_header_nritems(mid) == 1) {
2036 /*
2037 * we're not allowed to leave a node with one item in the
2038 * tree during a delete. A deletion from lower in the tree
2039 * could try to delete the only pointer in this node.
2040 * So, pull some keys from the left.
2041 * There has to be a left pointer at this point because
2042 * otherwise we would have pulled some pointers from the
2043 * right
2044 */
2045 if (!left) {
2046 ret = -EROFS;
2047 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2048 goto enospc;
2049 }
2050 wret = balance_node_right(trans, root, mid, left);
2051 if (wret < 0) {
2052 ret = wret;
2053 goto enospc;
2054 }
2055 if (wret == 1) {
2056 wret = push_node_left(trans, root, left, mid, 1);
2057 if (wret < 0)
2058 ret = wret;
2059 }
2060 BUG_ON(wret == 1);
2061 }
2062 if (btrfs_header_nritems(mid) == 0) {
2063 clean_tree_block(trans, root->fs_info, mid);
2064 btrfs_tree_unlock(mid);
2065 del_ptr(root, path, level + 1, pslot);
2066 root_sub_used(root, mid->len);
2067 btrfs_free_tree_block(trans, root, mid, 0, 1);
2068 free_extent_buffer_stale(mid);
2069 mid = NULL;
2070 } else {
2071 /* update the parent key to reflect our changes */
2072 struct btrfs_disk_key mid_key;
2073 btrfs_node_key(mid, &mid_key, 0);
2074 tree_mod_log_set_node_key(root->fs_info, parent,
2075 pslot, 0);
2076 btrfs_set_node_key(parent, &mid_key, pslot);
2077 btrfs_mark_buffer_dirty(parent);
2078 }
2079
2080 /* update the path */
2081 if (left) {
2082 if (btrfs_header_nritems(left) > orig_slot) {
2083 extent_buffer_get(left);
2084 /* left was locked after cow */
2085 path->nodes[level] = left;
2086 path->slots[level + 1] -= 1;
2087 path->slots[level] = orig_slot;
2088 if (mid) {
2089 btrfs_tree_unlock(mid);
2090 free_extent_buffer(mid);
2091 }
2092 } else {
2093 orig_slot -= btrfs_header_nritems(left);
2094 path->slots[level] = orig_slot;
2095 }
2096 }
2097 /* double check we haven't messed things up */
2098 if (orig_ptr !=
2099 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2100 BUG();
2101 enospc:
2102 if (right) {
2103 btrfs_tree_unlock(right);
2104 free_extent_buffer(right);
2105 }
2106 if (left) {
2107 if (path->nodes[level] != left)
2108 btrfs_tree_unlock(left);
2109 free_extent_buffer(left);
2110 }
2111 return ret;
2112 }
2113
2114 /* Node balancing for insertion. Here we only split or push nodes around
2115 * when they are completely full. This is also done top down, so we
2116 * have to be pessimistic.
2117 */
2118 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2119 struct btrfs_root *root,
2120 struct btrfs_path *path, int level)
2121 {
2122 struct extent_buffer *right = NULL;
2123 struct extent_buffer *mid;
2124 struct extent_buffer *left = NULL;
2125 struct extent_buffer *parent = NULL;
2126 int ret = 0;
2127 int wret;
2128 int pslot;
2129 int orig_slot = path->slots[level];
2130
2131 if (level == 0)
2132 return 1;
2133
2134 mid = path->nodes[level];
2135 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2136
2137 if (level < BTRFS_MAX_LEVEL - 1) {
2138 parent = path->nodes[level + 1];
2139 pslot = path->slots[level + 1];
2140 }
2141
2142 if (!parent)
2143 return 1;
2144
2145 left = read_node_slot(root, parent, pslot - 1);
2146
2147 /* first, try to make some room in the middle buffer */
2148 if (left) {
2149 u32 left_nr;
2150
2151 btrfs_tree_lock(left);
2152 btrfs_set_lock_blocking(left);
2153
2154 left_nr = btrfs_header_nritems(left);
2155 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2156 wret = 1;
2157 } else {
2158 ret = btrfs_cow_block(trans, root, left, parent,
2159 pslot - 1, &left);
2160 if (ret)
2161 wret = 1;
2162 else {
2163 wret = push_node_left(trans, root,
2164 left, mid, 0);
2165 }
2166 }
2167 if (wret < 0)
2168 ret = wret;
2169 if (wret == 0) {
2170 struct btrfs_disk_key disk_key;
2171 orig_slot += left_nr;
2172 btrfs_node_key(mid, &disk_key, 0);
2173 tree_mod_log_set_node_key(root->fs_info, parent,
2174 pslot, 0);
2175 btrfs_set_node_key(parent, &disk_key, pslot);
2176 btrfs_mark_buffer_dirty(parent);
2177 if (btrfs_header_nritems(left) > orig_slot) {
2178 path->nodes[level] = left;
2179 path->slots[level + 1] -= 1;
2180 path->slots[level] = orig_slot;
2181 btrfs_tree_unlock(mid);
2182 free_extent_buffer(mid);
2183 } else {
2184 orig_slot -=
2185 btrfs_header_nritems(left);
2186 path->slots[level] = orig_slot;
2187 btrfs_tree_unlock(left);
2188 free_extent_buffer(left);
2189 }
2190 return 0;
2191 }
2192 btrfs_tree_unlock(left);
2193 free_extent_buffer(left);
2194 }
2195 right = read_node_slot(root, parent, pslot + 1);
2196
2197 /*
2198 * then try to empty the right most buffer into the middle
2199 */
2200 if (right) {
2201 u32 right_nr;
2202
2203 btrfs_tree_lock(right);
2204 btrfs_set_lock_blocking(right);
2205
2206 right_nr = btrfs_header_nritems(right);
2207 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2208 wret = 1;
2209 } else {
2210 ret = btrfs_cow_block(trans, root, right,
2211 parent, pslot + 1,
2212 &right);
2213 if (ret)
2214 wret = 1;
2215 else {
2216 wret = balance_node_right(trans, root,
2217 right, mid);
2218 }
2219 }
2220 if (wret < 0)
2221 ret = wret;
2222 if (wret == 0) {
2223 struct btrfs_disk_key disk_key;
2224
2225 btrfs_node_key(right, &disk_key, 0);
2226 tree_mod_log_set_node_key(root->fs_info, parent,
2227 pslot + 1, 0);
2228 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2229 btrfs_mark_buffer_dirty(parent);
2230
2231 if (btrfs_header_nritems(mid) <= orig_slot) {
2232 path->nodes[level] = right;
2233 path->slots[level + 1] += 1;
2234 path->slots[level] = orig_slot -
2235 btrfs_header_nritems(mid);
2236 btrfs_tree_unlock(mid);
2237 free_extent_buffer(mid);
2238 } else {
2239 btrfs_tree_unlock(right);
2240 free_extent_buffer(right);
2241 }
2242 return 0;
2243 }
2244 btrfs_tree_unlock(right);
2245 free_extent_buffer(right);
2246 }
2247 return 1;
2248 }
2249
2250 /*
2251 * readahead one full node of leaves, finding things that are close
2252 * to the block in 'slot', and triggering ra on them.
2253 */
2254 static void reada_for_search(struct btrfs_root *root,
2255 struct btrfs_path *path,
2256 int level, int slot, u64 objectid)
2257 {
2258 struct extent_buffer *node;
2259 struct btrfs_disk_key disk_key;
2260 u32 nritems;
2261 u64 search;
2262 u64 target;
2263 u64 nread = 0;
2264 u64 gen;
2265 struct extent_buffer *eb;
2266 u32 nr;
2267 u32 blocksize;
2268 u32 nscan = 0;
2269
2270 if (level != 1)
2271 return;
2272
2273 if (!path->nodes[level])
2274 return;
2275
2276 node = path->nodes[level];
2277
2278 search = btrfs_node_blockptr(node, slot);
2279 blocksize = root->nodesize;
2280 eb = btrfs_find_tree_block(root->fs_info, search);
2281 if (eb) {
2282 free_extent_buffer(eb);
2283 return;
2284 }
2285
2286 target = search;
2287
2288 nritems = btrfs_header_nritems(node);
2289 nr = slot;
2290
2291 while (1) {
2292 if (path->reada == READA_BACK) {
2293 if (nr == 0)
2294 break;
2295 nr--;
2296 } else if (path->reada == READA_FORWARD) {
2297 nr++;
2298 if (nr >= nritems)
2299 break;
2300 }
2301 if (path->reada == READA_BACK && objectid) {
2302 btrfs_node_key(node, &disk_key, nr);
2303 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2304 break;
2305 }
2306 search = btrfs_node_blockptr(node, nr);
2307 if ((search <= target && target - search <= 65536) ||
2308 (search > target && search - target <= 65536)) {
2309 gen = btrfs_node_ptr_generation(node, nr);
2310 readahead_tree_block(root, search);
2311 nread += blocksize;
2312 }
2313 nscan++;
2314 if ((nread > 65536 || nscan > 32))
2315 break;
2316 }
2317 }
2318
2319 static noinline void reada_for_balance(struct btrfs_root *root,
2320 struct btrfs_path *path, int level)
2321 {
2322 int slot;
2323 int nritems;
2324 struct extent_buffer *parent;
2325 struct extent_buffer *eb;
2326 u64 gen;
2327 u64 block1 = 0;
2328 u64 block2 = 0;
2329
2330 parent = path->nodes[level + 1];
2331 if (!parent)
2332 return;
2333
2334 nritems = btrfs_header_nritems(parent);
2335 slot = path->slots[level + 1];
2336
2337 if (slot > 0) {
2338 block1 = btrfs_node_blockptr(parent, slot - 1);
2339 gen = btrfs_node_ptr_generation(parent, slot - 1);
2340 eb = btrfs_find_tree_block(root->fs_info, block1);
2341 /*
2342 * if we get -eagain from btrfs_buffer_uptodate, we
2343 * don't want to return eagain here. That will loop
2344 * forever
2345 */
2346 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2347 block1 = 0;
2348 free_extent_buffer(eb);
2349 }
2350 if (slot + 1 < nritems) {
2351 block2 = btrfs_node_blockptr(parent, slot + 1);
2352 gen = btrfs_node_ptr_generation(parent, slot + 1);
2353 eb = btrfs_find_tree_block(root->fs_info, block2);
2354 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2355 block2 = 0;
2356 free_extent_buffer(eb);
2357 }
2358
2359 if (block1)
2360 readahead_tree_block(root, block1);
2361 if (block2)
2362 readahead_tree_block(root, block2);
2363 }
2364
2365
2366 /*
2367 * when we walk down the tree, it is usually safe to unlock the higher layers
2368 * in the tree. The exceptions are when our path goes through slot 0, because
2369 * operations on the tree might require changing key pointers higher up in the
2370 * tree.
2371 *
2372 * callers might also have set path->keep_locks, which tells this code to keep
2373 * the lock if the path points to the last slot in the block. This is part of
2374 * walking through the tree, and selecting the next slot in the higher block.
2375 *
2376 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2377 * if lowest_unlock is 1, level 0 won't be unlocked
2378 */
2379 static noinline void unlock_up(struct btrfs_path *path, int level,
2380 int lowest_unlock, int min_write_lock_level,
2381 int *write_lock_level)
2382 {
2383 int i;
2384 int skip_level = level;
2385 int no_skips = 0;
2386 struct extent_buffer *t;
2387
2388 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2389 if (!path->nodes[i])
2390 break;
2391 if (!path->locks[i])
2392 break;
2393 if (!no_skips && path->slots[i] == 0) {
2394 skip_level = i + 1;
2395 continue;
2396 }
2397 if (!no_skips && path->keep_locks) {
2398 u32 nritems;
2399 t = path->nodes[i];
2400 nritems = btrfs_header_nritems(t);
2401 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2402 skip_level = i + 1;
2403 continue;
2404 }
2405 }
2406 if (skip_level < i && i >= lowest_unlock)
2407 no_skips = 1;
2408
2409 t = path->nodes[i];
2410 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2411 btrfs_tree_unlock_rw(t, path->locks[i]);
2412 path->locks[i] = 0;
2413 if (write_lock_level &&
2414 i > min_write_lock_level &&
2415 i <= *write_lock_level) {
2416 *write_lock_level = i - 1;
2417 }
2418 }
2419 }
2420 }
2421
2422 /*
2423 * This releases any locks held in the path starting at level and
2424 * going all the way up to the root.
2425 *
2426 * btrfs_search_slot will keep the lock held on higher nodes in a few
2427 * corner cases, such as COW of the block at slot zero in the node. This
2428 * ignores those rules, and it should only be called when there are no
2429 * more updates to be done higher up in the tree.
2430 */
2431 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2432 {
2433 int i;
2434
2435 if (path->keep_locks)
2436 return;
2437
2438 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2439 if (!path->nodes[i])
2440 continue;
2441 if (!path->locks[i])
2442 continue;
2443 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2444 path->locks[i] = 0;
2445 }
2446 }
2447
2448 /*
2449 * helper function for btrfs_search_slot. The goal is to find a block
2450 * in cache without setting the path to blocking. If we find the block
2451 * we return zero and the path is unchanged.
2452 *
2453 * If we can't find the block, we set the path blocking and do some
2454 * reada. -EAGAIN is returned and the search must be repeated.
2455 */
2456 static int
2457 read_block_for_search(struct btrfs_trans_handle *trans,
2458 struct btrfs_root *root, struct btrfs_path *p,
2459 struct extent_buffer **eb_ret, int level, int slot,
2460 struct btrfs_key *key, u64 time_seq)
2461 {
2462 u64 blocknr;
2463 u64 gen;
2464 struct extent_buffer *b = *eb_ret;
2465 struct extent_buffer *tmp;
2466 int ret;
2467
2468 blocknr = btrfs_node_blockptr(b, slot);
2469 gen = btrfs_node_ptr_generation(b, slot);
2470
2471 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2472 if (tmp) {
2473 /* first we do an atomic uptodate check */
2474 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2475 *eb_ret = tmp;
2476 return 0;
2477 }
2478
2479 /* the pages were up to date, but we failed
2480 * the generation number check. Do a full
2481 * read for the generation number that is correct.
2482 * We must do this without dropping locks so
2483 * we can trust our generation number
2484 */
2485 btrfs_set_path_blocking(p);
2486
2487 /* now we're allowed to do a blocking uptodate check */
2488 ret = btrfs_read_buffer(tmp, gen);
2489 if (!ret) {
2490 *eb_ret = tmp;
2491 return 0;
2492 }
2493 free_extent_buffer(tmp);
2494 btrfs_release_path(p);
2495 return -EIO;
2496 }
2497
2498 /*
2499 * reduce lock contention at high levels
2500 * of the btree by dropping locks before
2501 * we read. Don't release the lock on the current
2502 * level because we need to walk this node to figure
2503 * out which blocks to read.
2504 */
2505 btrfs_unlock_up_safe(p, level + 1);
2506 btrfs_set_path_blocking(p);
2507
2508 free_extent_buffer(tmp);
2509 if (p->reada != READA_NONE)
2510 reada_for_search(root, p, level, slot, key->objectid);
2511
2512 btrfs_release_path(p);
2513
2514 ret = -EAGAIN;
2515 tmp = read_tree_block(root, blocknr, 0);
2516 if (!IS_ERR(tmp)) {
2517 /*
2518 * If the read above didn't mark this buffer up to date,
2519 * it will never end up being up to date. Set ret to EIO now
2520 * and give up so that our caller doesn't loop forever
2521 * on our EAGAINs.
2522 */
2523 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2524 ret = -EIO;
2525 free_extent_buffer(tmp);
2526 } else {
2527 ret = PTR_ERR(tmp);
2528 }
2529 return ret;
2530 }
2531
2532 /*
2533 * helper function for btrfs_search_slot. This does all of the checks
2534 * for node-level blocks and does any balancing required based on
2535 * the ins_len.
2536 *
2537 * If no extra work was required, zero is returned. If we had to
2538 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2539 * start over
2540 */
2541 static int
2542 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2543 struct btrfs_root *root, struct btrfs_path *p,
2544 struct extent_buffer *b, int level, int ins_len,
2545 int *write_lock_level)
2546 {
2547 int ret;
2548 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2549 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2550 int sret;
2551
2552 if (*write_lock_level < level + 1) {
2553 *write_lock_level = level + 1;
2554 btrfs_release_path(p);
2555 goto again;
2556 }
2557
2558 btrfs_set_path_blocking(p);
2559 reada_for_balance(root, p, level);
2560 sret = split_node(trans, root, p, level);
2561 btrfs_clear_path_blocking(p, NULL, 0);
2562
2563 BUG_ON(sret > 0);
2564 if (sret) {
2565 ret = sret;
2566 goto done;
2567 }
2568 b = p->nodes[level];
2569 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2570 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2571 int sret;
2572
2573 if (*write_lock_level < level + 1) {
2574 *write_lock_level = level + 1;
2575 btrfs_release_path(p);
2576 goto again;
2577 }
2578
2579 btrfs_set_path_blocking(p);
2580 reada_for_balance(root, p, level);
2581 sret = balance_level(trans, root, p, level);
2582 btrfs_clear_path_blocking(p, NULL, 0);
2583
2584 if (sret) {
2585 ret = sret;
2586 goto done;
2587 }
2588 b = p->nodes[level];
2589 if (!b) {
2590 btrfs_release_path(p);
2591 goto again;
2592 }
2593 BUG_ON(btrfs_header_nritems(b) == 1);
2594 }
2595 return 0;
2596
2597 again:
2598 ret = -EAGAIN;
2599 done:
2600 return ret;
2601 }
2602
2603 static void key_search_validate(struct extent_buffer *b,
2604 struct btrfs_key *key,
2605 int level)
2606 {
2607 #ifdef CONFIG_BTRFS_ASSERT
2608 struct btrfs_disk_key disk_key;
2609
2610 btrfs_cpu_key_to_disk(&disk_key, key);
2611
2612 if (level == 0)
2613 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2614 offsetof(struct btrfs_leaf, items[0].key),
2615 sizeof(disk_key)));
2616 else
2617 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2618 offsetof(struct btrfs_node, ptrs[0].key),
2619 sizeof(disk_key)));
2620 #endif
2621 }
2622
2623 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2624 int level, int *prev_cmp, int *slot)
2625 {
2626 if (*prev_cmp != 0) {
2627 *prev_cmp = bin_search(b, key, level, slot);
2628 return *prev_cmp;
2629 }
2630
2631 key_search_validate(b, key, level);
2632 *slot = 0;
2633
2634 return 0;
2635 }
2636
2637 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2638 u64 iobjectid, u64 ioff, u8 key_type,
2639 struct btrfs_key *found_key)
2640 {
2641 int ret;
2642 struct btrfs_key key;
2643 struct extent_buffer *eb;
2644
2645 ASSERT(path);
2646 ASSERT(found_key);
2647
2648 key.type = key_type;
2649 key.objectid = iobjectid;
2650 key.offset = ioff;
2651
2652 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2653 if (ret < 0)
2654 return ret;
2655
2656 eb = path->nodes[0];
2657 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2658 ret = btrfs_next_leaf(fs_root, path);
2659 if (ret)
2660 return ret;
2661 eb = path->nodes[0];
2662 }
2663
2664 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2665 if (found_key->type != key.type ||
2666 found_key->objectid != key.objectid)
2667 return 1;
2668
2669 return 0;
2670 }
2671
2672 /*
2673 * look for key in the tree. path is filled in with nodes along the way
2674 * if key is found, we return zero and you can find the item in the leaf
2675 * level of the path (level 0)
2676 *
2677 * If the key isn't found, the path points to the slot where it should
2678 * be inserted, and 1 is returned. If there are other errors during the
2679 * search a negative error number is returned.
2680 *
2681 * if ins_len > 0, nodes and leaves will be split as we walk down the
2682 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2683 * possible)
2684 */
2685 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2686 *root, struct btrfs_key *key, struct btrfs_path *p, int
2687 ins_len, int cow)
2688 {
2689 struct extent_buffer *b;
2690 int slot;
2691 int ret;
2692 int err;
2693 int level;
2694 int lowest_unlock = 1;
2695 int root_lock;
2696 /* everything at write_lock_level or lower must be write locked */
2697 int write_lock_level = 0;
2698 u8 lowest_level = 0;
2699 int min_write_lock_level;
2700 int prev_cmp;
2701
2702 lowest_level = p->lowest_level;
2703 WARN_ON(lowest_level && ins_len > 0);
2704 WARN_ON(p->nodes[0] != NULL);
2705 BUG_ON(!cow && ins_len);
2706
2707 if (ins_len < 0) {
2708 lowest_unlock = 2;
2709
2710 /* when we are removing items, we might have to go up to level
2711 * two as we update tree pointers Make sure we keep write
2712 * for those levels as well
2713 */
2714 write_lock_level = 2;
2715 } else if (ins_len > 0) {
2716 /*
2717 * for inserting items, make sure we have a write lock on
2718 * level 1 so we can update keys
2719 */
2720 write_lock_level = 1;
2721 }
2722
2723 if (!cow)
2724 write_lock_level = -1;
2725
2726 if (cow && (p->keep_locks || p->lowest_level))
2727 write_lock_level = BTRFS_MAX_LEVEL;
2728
2729 min_write_lock_level = write_lock_level;
2730
2731 again:
2732 prev_cmp = -1;
2733 /*
2734 * we try very hard to do read locks on the root
2735 */
2736 root_lock = BTRFS_READ_LOCK;
2737 level = 0;
2738 if (p->search_commit_root) {
2739 /*
2740 * the commit roots are read only
2741 * so we always do read locks
2742 */
2743 if (p->need_commit_sem)
2744 down_read(&root->fs_info->commit_root_sem);
2745 b = root->commit_root;
2746 extent_buffer_get(b);
2747 level = btrfs_header_level(b);
2748 if (p->need_commit_sem)
2749 up_read(&root->fs_info->commit_root_sem);
2750 if (!p->skip_locking)
2751 btrfs_tree_read_lock(b);
2752 } else {
2753 if (p->skip_locking) {
2754 b = btrfs_root_node(root);
2755 level = btrfs_header_level(b);
2756 } else {
2757 /* we don't know the level of the root node
2758 * until we actually have it read locked
2759 */
2760 b = btrfs_read_lock_root_node(root);
2761 level = btrfs_header_level(b);
2762 if (level <= write_lock_level) {
2763 /* whoops, must trade for write lock */
2764 btrfs_tree_read_unlock(b);
2765 free_extent_buffer(b);
2766 b = btrfs_lock_root_node(root);
2767 root_lock = BTRFS_WRITE_LOCK;
2768
2769 /* the level might have changed, check again */
2770 level = btrfs_header_level(b);
2771 }
2772 }
2773 }
2774 p->nodes[level] = b;
2775 if (!p->skip_locking)
2776 p->locks[level] = root_lock;
2777
2778 while (b) {
2779 level = btrfs_header_level(b);
2780
2781 /*
2782 * setup the path here so we can release it under lock
2783 * contention with the cow code
2784 */
2785 if (cow) {
2786 /*
2787 * if we don't really need to cow this block
2788 * then we don't want to set the path blocking,
2789 * so we test it here
2790 */
2791 if (!should_cow_block(trans, root, b)) {
2792 trans->dirty = true;
2793 goto cow_done;
2794 }
2795
2796 /*
2797 * must have write locks on this node and the
2798 * parent
2799 */
2800 if (level > write_lock_level ||
2801 (level + 1 > write_lock_level &&
2802 level + 1 < BTRFS_MAX_LEVEL &&
2803 p->nodes[level + 1])) {
2804 write_lock_level = level + 1;
2805 btrfs_release_path(p);
2806 goto again;
2807 }
2808
2809 btrfs_set_path_blocking(p);
2810 err = btrfs_cow_block(trans, root, b,
2811 p->nodes[level + 1],
2812 p->slots[level + 1], &b);
2813 if (err) {
2814 ret = err;
2815 goto done;
2816 }
2817 }
2818 cow_done:
2819 p->nodes[level] = b;
2820 btrfs_clear_path_blocking(p, NULL, 0);
2821
2822 /*
2823 * we have a lock on b and as long as we aren't changing
2824 * the tree, there is no way to for the items in b to change.
2825 * It is safe to drop the lock on our parent before we
2826 * go through the expensive btree search on b.
2827 *
2828 * If we're inserting or deleting (ins_len != 0), then we might
2829 * be changing slot zero, which may require changing the parent.
2830 * So, we can't drop the lock until after we know which slot
2831 * we're operating on.
2832 */
2833 if (!ins_len && !p->keep_locks) {
2834 int u = level + 1;
2835
2836 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2837 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2838 p->locks[u] = 0;
2839 }
2840 }
2841
2842 ret = key_search(b, key, level, &prev_cmp, &slot);
2843 if (ret < 0)
2844 goto done;
2845
2846 if (level != 0) {
2847 int dec = 0;
2848 if (ret && slot > 0) {
2849 dec = 1;
2850 slot -= 1;
2851 }
2852 p->slots[level] = slot;
2853 err = setup_nodes_for_search(trans, root, p, b, level,
2854 ins_len, &write_lock_level);
2855 if (err == -EAGAIN)
2856 goto again;
2857 if (err) {
2858 ret = err;
2859 goto done;
2860 }
2861 b = p->nodes[level];
2862 slot = p->slots[level];
2863
2864 /*
2865 * slot 0 is special, if we change the key
2866 * we have to update the parent pointer
2867 * which means we must have a write lock
2868 * on the parent
2869 */
2870 if (slot == 0 && ins_len &&
2871 write_lock_level < level + 1) {
2872 write_lock_level = level + 1;
2873 btrfs_release_path(p);
2874 goto again;
2875 }
2876
2877 unlock_up(p, level, lowest_unlock,
2878 min_write_lock_level, &write_lock_level);
2879
2880 if (level == lowest_level) {
2881 if (dec)
2882 p->slots[level]++;
2883 goto done;
2884 }
2885
2886 err = read_block_for_search(trans, root, p,
2887 &b, level, slot, key, 0);
2888 if (err == -EAGAIN)
2889 goto again;
2890 if (err) {
2891 ret = err;
2892 goto done;
2893 }
2894
2895 if (!p->skip_locking) {
2896 level = btrfs_header_level(b);
2897 if (level <= write_lock_level) {
2898 err = btrfs_try_tree_write_lock(b);
2899 if (!err) {
2900 btrfs_set_path_blocking(p);
2901 btrfs_tree_lock(b);
2902 btrfs_clear_path_blocking(p, b,
2903 BTRFS_WRITE_LOCK);
2904 }
2905 p->locks[level] = BTRFS_WRITE_LOCK;
2906 } else {
2907 err = btrfs_tree_read_lock_atomic(b);
2908 if (!err) {
2909 btrfs_set_path_blocking(p);
2910 btrfs_tree_read_lock(b);
2911 btrfs_clear_path_blocking(p, b,
2912 BTRFS_READ_LOCK);
2913 }
2914 p->locks[level] = BTRFS_READ_LOCK;
2915 }
2916 p->nodes[level] = b;
2917 }
2918 } else {
2919 p->slots[level] = slot;
2920 if (ins_len > 0 &&
2921 btrfs_leaf_free_space(root, b) < ins_len) {
2922 if (write_lock_level < 1) {
2923 write_lock_level = 1;
2924 btrfs_release_path(p);
2925 goto again;
2926 }
2927
2928 btrfs_set_path_blocking(p);
2929 err = split_leaf(trans, root, key,
2930 p, ins_len, ret == 0);
2931 btrfs_clear_path_blocking(p, NULL, 0);
2932
2933 BUG_ON(err > 0);
2934 if (err) {
2935 ret = err;
2936 goto done;
2937 }
2938 }
2939 if (!p->search_for_split)
2940 unlock_up(p, level, lowest_unlock,
2941 min_write_lock_level, &write_lock_level);
2942 goto done;
2943 }
2944 }
2945 ret = 1;
2946 done:
2947 /*
2948 * we don't really know what they plan on doing with the path
2949 * from here on, so for now just mark it as blocking
2950 */
2951 if (!p->leave_spinning)
2952 btrfs_set_path_blocking(p);
2953 if (ret < 0 && !p->skip_release_on_error)
2954 btrfs_release_path(p);
2955 return ret;
2956 }
2957
2958 /*
2959 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2960 * current state of the tree together with the operations recorded in the tree
2961 * modification log to search for the key in a previous version of this tree, as
2962 * denoted by the time_seq parameter.
2963 *
2964 * Naturally, there is no support for insert, delete or cow operations.
2965 *
2966 * The resulting path and return value will be set up as if we called
2967 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2968 */
2969 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2970 struct btrfs_path *p, u64 time_seq)
2971 {
2972 struct extent_buffer *b;
2973 int slot;
2974 int ret;
2975 int err;
2976 int level;
2977 int lowest_unlock = 1;
2978 u8 lowest_level = 0;
2979 int prev_cmp = -1;
2980
2981 lowest_level = p->lowest_level;
2982 WARN_ON(p->nodes[0] != NULL);
2983
2984 if (p->search_commit_root) {
2985 BUG_ON(time_seq);
2986 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2987 }
2988
2989 again:
2990 b = get_old_root(root, time_seq);
2991 level = btrfs_header_level(b);
2992 p->locks[level] = BTRFS_READ_LOCK;
2993
2994 while (b) {
2995 level = btrfs_header_level(b);
2996 p->nodes[level] = b;
2997 btrfs_clear_path_blocking(p, NULL, 0);
2998
2999 /*
3000 * we have a lock on b and as long as we aren't changing
3001 * the tree, there is no way to for the items in b to change.
3002 * It is safe to drop the lock on our parent before we
3003 * go through the expensive btree search on b.
3004 */
3005 btrfs_unlock_up_safe(p, level + 1);
3006
3007 /*
3008 * Since we can unwind ebs we want to do a real search every
3009 * time.
3010 */
3011 prev_cmp = -1;
3012 ret = key_search(b, key, level, &prev_cmp, &slot);
3013
3014 if (level != 0) {
3015 int dec = 0;
3016 if (ret && slot > 0) {
3017 dec = 1;
3018 slot -= 1;
3019 }
3020 p->slots[level] = slot;
3021 unlock_up(p, level, lowest_unlock, 0, NULL);
3022
3023 if (level == lowest_level) {
3024 if (dec)
3025 p->slots[level]++;
3026 goto done;
3027 }
3028
3029 err = read_block_for_search(NULL, root, p, &b, level,
3030 slot, key, time_seq);
3031 if (err == -EAGAIN)
3032 goto again;
3033 if (err) {
3034 ret = err;
3035 goto done;
3036 }
3037
3038 level = btrfs_header_level(b);
3039 err = btrfs_tree_read_lock_atomic(b);
3040 if (!err) {
3041 btrfs_set_path_blocking(p);
3042 btrfs_tree_read_lock(b);
3043 btrfs_clear_path_blocking(p, b,
3044 BTRFS_READ_LOCK);
3045 }
3046 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3047 if (!b) {
3048 ret = -ENOMEM;
3049 goto done;
3050 }
3051 p->locks[level] = BTRFS_READ_LOCK;
3052 p->nodes[level] = b;
3053 } else {
3054 p->slots[level] = slot;
3055 unlock_up(p, level, lowest_unlock, 0, NULL);
3056 goto done;
3057 }
3058 }
3059 ret = 1;
3060 done:
3061 if (!p->leave_spinning)
3062 btrfs_set_path_blocking(p);
3063 if (ret < 0)
3064 btrfs_release_path(p);
3065
3066 return ret;
3067 }
3068
3069 /*
3070 * helper to use instead of search slot if no exact match is needed but
3071 * instead the next or previous item should be returned.
3072 * When find_higher is true, the next higher item is returned, the next lower
3073 * otherwise.
3074 * When return_any and find_higher are both true, and no higher item is found,
3075 * return the next lower instead.
3076 * When return_any is true and find_higher is false, and no lower item is found,
3077 * return the next higher instead.
3078 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3079 * < 0 on error
3080 */
3081 int btrfs_search_slot_for_read(struct btrfs_root *root,
3082 struct btrfs_key *key, struct btrfs_path *p,
3083 int find_higher, int return_any)
3084 {
3085 int ret;
3086 struct extent_buffer *leaf;
3087
3088 again:
3089 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3090 if (ret <= 0)
3091 return ret;
3092 /*
3093 * a return value of 1 means the path is at the position where the
3094 * item should be inserted. Normally this is the next bigger item,
3095 * but in case the previous item is the last in a leaf, path points
3096 * to the first free slot in the previous leaf, i.e. at an invalid
3097 * item.
3098 */
3099 leaf = p->nodes[0];
3100
3101 if (find_higher) {
3102 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3103 ret = btrfs_next_leaf(root, p);
3104 if (ret <= 0)
3105 return ret;
3106 if (!return_any)
3107 return 1;
3108 /*
3109 * no higher item found, return the next
3110 * lower instead
3111 */
3112 return_any = 0;
3113 find_higher = 0;
3114 btrfs_release_path(p);
3115 goto again;
3116 }
3117 } else {
3118 if (p->slots[0] == 0) {
3119 ret = btrfs_prev_leaf(root, p);
3120 if (ret < 0)
3121 return ret;
3122 if (!ret) {
3123 leaf = p->nodes[0];
3124 if (p->slots[0] == btrfs_header_nritems(leaf))
3125 p->slots[0]--;
3126 return 0;
3127 }
3128 if (!return_any)
3129 return 1;
3130 /*
3131 * no lower item found, return the next
3132 * higher instead
3133 */
3134 return_any = 0;
3135 find_higher = 1;
3136 btrfs_release_path(p);
3137 goto again;
3138 } else {
3139 --p->slots[0];
3140 }
3141 }
3142 return 0;
3143 }
3144
3145 /*
3146 * adjust the pointers going up the tree, starting at level
3147 * making sure the right key of each node is points to 'key'.
3148 * This is used after shifting pointers to the left, so it stops
3149 * fixing up pointers when a given leaf/node is not in slot 0 of the
3150 * higher levels
3151 *
3152 */
3153 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3154 struct btrfs_path *path,
3155 struct btrfs_disk_key *key, int level)
3156 {
3157 int i;
3158 struct extent_buffer *t;
3159
3160 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3161 int tslot = path->slots[i];
3162 if (!path->nodes[i])
3163 break;
3164 t = path->nodes[i];
3165 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3166 btrfs_set_node_key(t, key, tslot);
3167 btrfs_mark_buffer_dirty(path->nodes[i]);
3168 if (tslot != 0)
3169 break;
3170 }
3171 }
3172
3173 /*
3174 * update item key.
3175 *
3176 * This function isn't completely safe. It's the caller's responsibility
3177 * that the new key won't break the order
3178 */
3179 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3180 struct btrfs_path *path,
3181 struct btrfs_key *new_key)
3182 {
3183 struct btrfs_disk_key disk_key;
3184 struct extent_buffer *eb;
3185 int slot;
3186
3187 eb = path->nodes[0];
3188 slot = path->slots[0];
3189 if (slot > 0) {
3190 btrfs_item_key(eb, &disk_key, slot - 1);
3191 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3192 }
3193 if (slot < btrfs_header_nritems(eb) - 1) {
3194 btrfs_item_key(eb, &disk_key, slot + 1);
3195 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3196 }
3197
3198 btrfs_cpu_key_to_disk(&disk_key, new_key);
3199 btrfs_set_item_key(eb, &disk_key, slot);
3200 btrfs_mark_buffer_dirty(eb);
3201 if (slot == 0)
3202 fixup_low_keys(fs_info, path, &disk_key, 1);
3203 }
3204
3205 /*
3206 * try to push data from one node into the next node left in the
3207 * tree.
3208 *
3209 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3210 * error, and > 0 if there was no room in the left hand block.
3211 */
3212 static int push_node_left(struct btrfs_trans_handle *trans,
3213 struct btrfs_root *root, struct extent_buffer *dst,
3214 struct extent_buffer *src, int empty)
3215 {
3216 int push_items = 0;
3217 int src_nritems;
3218 int dst_nritems;
3219 int ret = 0;
3220
3221 src_nritems = btrfs_header_nritems(src);
3222 dst_nritems = btrfs_header_nritems(dst);
3223 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3224 WARN_ON(btrfs_header_generation(src) != trans->transid);
3225 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3226
3227 if (!empty && src_nritems <= 8)
3228 return 1;
3229
3230 if (push_items <= 0)
3231 return 1;
3232
3233 if (empty) {
3234 push_items = min(src_nritems, push_items);
3235 if (push_items < src_nritems) {
3236 /* leave at least 8 pointers in the node if
3237 * we aren't going to empty it
3238 */
3239 if (src_nritems - push_items < 8) {
3240 if (push_items <= 8)
3241 return 1;
3242 push_items -= 8;
3243 }
3244 }
3245 } else
3246 push_items = min(src_nritems - 8, push_items);
3247
3248 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3249 push_items);
3250 if (ret) {
3251 btrfs_abort_transaction(trans, root, ret);
3252 return ret;
3253 }
3254 copy_extent_buffer(dst, src,
3255 btrfs_node_key_ptr_offset(dst_nritems),
3256 btrfs_node_key_ptr_offset(0),
3257 push_items * sizeof(struct btrfs_key_ptr));
3258
3259 if (push_items < src_nritems) {
3260 /*
3261 * don't call tree_mod_log_eb_move here, key removal was already
3262 * fully logged by tree_mod_log_eb_copy above.
3263 */
3264 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3265 btrfs_node_key_ptr_offset(push_items),
3266 (src_nritems - push_items) *
3267 sizeof(struct btrfs_key_ptr));
3268 }
3269 btrfs_set_header_nritems(src, src_nritems - push_items);
3270 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3271 btrfs_mark_buffer_dirty(src);
3272 btrfs_mark_buffer_dirty(dst);
3273
3274 return ret;
3275 }
3276
3277 /*
3278 * try to push data from one node into the next node right in the
3279 * tree.
3280 *
3281 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3282 * error, and > 0 if there was no room in the right hand block.
3283 *
3284 * this will only push up to 1/2 the contents of the left node over
3285 */
3286 static int balance_node_right(struct btrfs_trans_handle *trans,
3287 struct btrfs_root *root,
3288 struct extent_buffer *dst,
3289 struct extent_buffer *src)
3290 {
3291 int push_items = 0;
3292 int max_push;
3293 int src_nritems;
3294 int dst_nritems;
3295 int ret = 0;
3296
3297 WARN_ON(btrfs_header_generation(src) != trans->transid);
3298 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3299
3300 src_nritems = btrfs_header_nritems(src);
3301 dst_nritems = btrfs_header_nritems(dst);
3302 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3303 if (push_items <= 0)
3304 return 1;
3305
3306 if (src_nritems < 4)
3307 return 1;
3308
3309 max_push = src_nritems / 2 + 1;
3310 /* don't try to empty the node */
3311 if (max_push >= src_nritems)
3312 return 1;
3313
3314 if (max_push < push_items)
3315 push_items = max_push;
3316
3317 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3318 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3319 btrfs_node_key_ptr_offset(0),
3320 (dst_nritems) *
3321 sizeof(struct btrfs_key_ptr));
3322
3323 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3324 src_nritems - push_items, push_items);
3325 if (ret) {
3326 btrfs_abort_transaction(trans, root, ret);
3327 return ret;
3328 }
3329 copy_extent_buffer(dst, src,
3330 btrfs_node_key_ptr_offset(0),
3331 btrfs_node_key_ptr_offset(src_nritems - push_items),
3332 push_items * sizeof(struct btrfs_key_ptr));
3333
3334 btrfs_set_header_nritems(src, src_nritems - push_items);
3335 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3336
3337 btrfs_mark_buffer_dirty(src);
3338 btrfs_mark_buffer_dirty(dst);
3339
3340 return ret;
3341 }
3342
3343 /*
3344 * helper function to insert a new root level in the tree.
3345 * A new node is allocated, and a single item is inserted to
3346 * point to the existing root
3347 *
3348 * returns zero on success or < 0 on failure.
3349 */
3350 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3351 struct btrfs_root *root,
3352 struct btrfs_path *path, int level)
3353 {
3354 u64 lower_gen;
3355 struct extent_buffer *lower;
3356 struct extent_buffer *c;
3357 struct extent_buffer *old;
3358 struct btrfs_disk_key lower_key;
3359
3360 BUG_ON(path->nodes[level]);
3361 BUG_ON(path->nodes[level-1] != root->node);
3362
3363 lower = path->nodes[level-1];
3364 if (level == 1)
3365 btrfs_item_key(lower, &lower_key, 0);
3366 else
3367 btrfs_node_key(lower, &lower_key, 0);
3368
3369 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3370 &lower_key, level, root->node->start, 0);
3371 if (IS_ERR(c))
3372 return PTR_ERR(c);
3373
3374 root_add_used(root, root->nodesize);
3375
3376 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3377 btrfs_set_header_nritems(c, 1);
3378 btrfs_set_header_level(c, level);
3379 btrfs_set_header_bytenr(c, c->start);
3380 btrfs_set_header_generation(c, trans->transid);
3381 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3382 btrfs_set_header_owner(c, root->root_key.objectid);
3383
3384 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3385 BTRFS_FSID_SIZE);
3386
3387 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3388 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3389
3390 btrfs_set_node_key(c, &lower_key, 0);
3391 btrfs_set_node_blockptr(c, 0, lower->start);
3392 lower_gen = btrfs_header_generation(lower);
3393 WARN_ON(lower_gen != trans->transid);
3394
3395 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3396
3397 btrfs_mark_buffer_dirty(c);
3398
3399 old = root->node;
3400 tree_mod_log_set_root_pointer(root, c, 0);
3401 rcu_assign_pointer(root->node, c);
3402
3403 /* the super has an extra ref to root->node */
3404 free_extent_buffer(old);
3405
3406 add_root_to_dirty_list(root);
3407 extent_buffer_get(c);
3408 path->nodes[level] = c;
3409 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3410 path->slots[level] = 0;
3411 return 0;
3412 }
3413
3414 /*
3415 * worker function to insert a single pointer in a node.
3416 * the node should have enough room for the pointer already
3417 *
3418 * slot and level indicate where you want the key to go, and
3419 * blocknr is the block the key points to.
3420 */
3421 static void insert_ptr(struct btrfs_trans_handle *trans,
3422 struct btrfs_root *root, struct btrfs_path *path,
3423 struct btrfs_disk_key *key, u64 bytenr,
3424 int slot, int level)
3425 {
3426 struct extent_buffer *lower;
3427 int nritems;
3428 int ret;
3429
3430 BUG_ON(!path->nodes[level]);
3431 btrfs_assert_tree_locked(path->nodes[level]);
3432 lower = path->nodes[level];
3433 nritems = btrfs_header_nritems(lower);
3434 BUG_ON(slot > nritems);
3435 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3436 if (slot != nritems) {
3437 if (level)
3438 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3439 slot, nritems - slot);
3440 memmove_extent_buffer(lower,
3441 btrfs_node_key_ptr_offset(slot + 1),
3442 btrfs_node_key_ptr_offset(slot),
3443 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3444 }
3445 if (level) {
3446 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3447 MOD_LOG_KEY_ADD, GFP_NOFS);
3448 BUG_ON(ret < 0);
3449 }
3450 btrfs_set_node_key(lower, key, slot);
3451 btrfs_set_node_blockptr(lower, slot, bytenr);
3452 WARN_ON(trans->transid == 0);
3453 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3454 btrfs_set_header_nritems(lower, nritems + 1);
3455 btrfs_mark_buffer_dirty(lower);
3456 }
3457
3458 /*
3459 * split the node at the specified level in path in two.
3460 * The path is corrected to point to the appropriate node after the split
3461 *
3462 * Before splitting this tries to make some room in the node by pushing
3463 * left and right, if either one works, it returns right away.
3464 *
3465 * returns 0 on success and < 0 on failure
3466 */
3467 static noinline int split_node(struct btrfs_trans_handle *trans,
3468 struct btrfs_root *root,
3469 struct btrfs_path *path, int level)
3470 {
3471 struct extent_buffer *c;
3472 struct extent_buffer *split;
3473 struct btrfs_disk_key disk_key;
3474 int mid;
3475 int ret;
3476 u32 c_nritems;
3477
3478 c = path->nodes[level];
3479 WARN_ON(btrfs_header_generation(c) != trans->transid);
3480 if (c == root->node) {
3481 /*
3482 * trying to split the root, lets make a new one
3483 *
3484 * tree mod log: We don't log_removal old root in
3485 * insert_new_root, because that root buffer will be kept as a
3486 * normal node. We are going to log removal of half of the
3487 * elements below with tree_mod_log_eb_copy. We're holding a
3488 * tree lock on the buffer, which is why we cannot race with
3489 * other tree_mod_log users.
3490 */
3491 ret = insert_new_root(trans, root, path, level + 1);
3492 if (ret)
3493 return ret;
3494 } else {
3495 ret = push_nodes_for_insert(trans, root, path, level);
3496 c = path->nodes[level];
3497 if (!ret && btrfs_header_nritems(c) <
3498 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3499 return 0;
3500 if (ret < 0)
3501 return ret;
3502 }
3503
3504 c_nritems = btrfs_header_nritems(c);
3505 mid = (c_nritems + 1) / 2;
3506 btrfs_node_key(c, &disk_key, mid);
3507
3508 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3509 &disk_key, level, c->start, 0);
3510 if (IS_ERR(split))
3511 return PTR_ERR(split);
3512
3513 root_add_used(root, root->nodesize);
3514
3515 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3516 btrfs_set_header_level(split, btrfs_header_level(c));
3517 btrfs_set_header_bytenr(split, split->start);
3518 btrfs_set_header_generation(split, trans->transid);
3519 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3520 btrfs_set_header_owner(split, root->root_key.objectid);
3521 write_extent_buffer(split, root->fs_info->fsid,
3522 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3523 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3524 btrfs_header_chunk_tree_uuid(split),
3525 BTRFS_UUID_SIZE);
3526
3527 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3528 mid, c_nritems - mid);
3529 if (ret) {
3530 btrfs_abort_transaction(trans, root, ret);
3531 return ret;
3532 }
3533 copy_extent_buffer(split, c,
3534 btrfs_node_key_ptr_offset(0),
3535 btrfs_node_key_ptr_offset(mid),
3536 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3537 btrfs_set_header_nritems(split, c_nritems - mid);
3538 btrfs_set_header_nritems(c, mid);
3539 ret = 0;
3540
3541 btrfs_mark_buffer_dirty(c);
3542 btrfs_mark_buffer_dirty(split);
3543
3544 insert_ptr(trans, root, path, &disk_key, split->start,
3545 path->slots[level + 1] + 1, level + 1);
3546
3547 if (path->slots[level] >= mid) {
3548 path->slots[level] -= mid;
3549 btrfs_tree_unlock(c);
3550 free_extent_buffer(c);
3551 path->nodes[level] = split;
3552 path->slots[level + 1] += 1;
3553 } else {
3554 btrfs_tree_unlock(split);
3555 free_extent_buffer(split);
3556 }
3557 return ret;
3558 }
3559
3560 /*
3561 * how many bytes are required to store the items in a leaf. start
3562 * and nr indicate which items in the leaf to check. This totals up the
3563 * space used both by the item structs and the item data
3564 */
3565 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3566 {
3567 struct btrfs_item *start_item;
3568 struct btrfs_item *end_item;
3569 struct btrfs_map_token token;
3570 int data_len;
3571 int nritems = btrfs_header_nritems(l);
3572 int end = min(nritems, start + nr) - 1;
3573
3574 if (!nr)
3575 return 0;
3576 btrfs_init_map_token(&token);
3577 start_item = btrfs_item_nr(start);
3578 end_item = btrfs_item_nr(end);
3579 data_len = btrfs_token_item_offset(l, start_item, &token) +
3580 btrfs_token_item_size(l, start_item, &token);
3581 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3582 data_len += sizeof(struct btrfs_item) * nr;
3583 WARN_ON(data_len < 0);
3584 return data_len;
3585 }
3586
3587 /*
3588 * The space between the end of the leaf items and
3589 * the start of the leaf data. IOW, how much room
3590 * the leaf has left for both items and data
3591 */
3592 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3593 struct extent_buffer *leaf)
3594 {
3595 int nritems = btrfs_header_nritems(leaf);
3596 int ret;
3597 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3598 if (ret < 0) {
3599 btrfs_crit(root->fs_info,
3600 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3601 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3602 leaf_space_used(leaf, 0, nritems), nritems);
3603 }
3604 return ret;
3605 }
3606
3607 /*
3608 * min slot controls the lowest index we're willing to push to the
3609 * right. We'll push up to and including min_slot, but no lower
3610 */
3611 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3612 struct btrfs_root *root,
3613 struct btrfs_path *path,
3614 int data_size, int empty,
3615 struct extent_buffer *right,
3616 int free_space, u32 left_nritems,
3617 u32 min_slot)
3618 {
3619 struct extent_buffer *left = path->nodes[0];
3620 struct extent_buffer *upper = path->nodes[1];
3621 struct btrfs_map_token token;
3622 struct btrfs_disk_key disk_key;
3623 int slot;
3624 u32 i;
3625 int push_space = 0;
3626 int push_items = 0;
3627 struct btrfs_item *item;
3628 u32 nr;
3629 u32 right_nritems;
3630 u32 data_end;
3631 u32 this_item_size;
3632
3633 btrfs_init_map_token(&token);
3634
3635 if (empty)
3636 nr = 0;
3637 else
3638 nr = max_t(u32, 1, min_slot);
3639
3640 if (path->slots[0] >= left_nritems)
3641 push_space += data_size;
3642
3643 slot = path->slots[1];
3644 i = left_nritems - 1;
3645 while (i >= nr) {
3646 item = btrfs_item_nr(i);
3647
3648 if (!empty && push_items > 0) {
3649 if (path->slots[0] > i)
3650 break;
3651 if (path->slots[0] == i) {
3652 int space = btrfs_leaf_free_space(root, left);
3653 if (space + push_space * 2 > free_space)
3654 break;
3655 }
3656 }
3657
3658 if (path->slots[0] == i)
3659 push_space += data_size;
3660
3661 this_item_size = btrfs_item_size(left, item);
3662 if (this_item_size + sizeof(*item) + push_space > free_space)
3663 break;
3664
3665 push_items++;
3666 push_space += this_item_size + sizeof(*item);
3667 if (i == 0)
3668 break;
3669 i--;
3670 }
3671
3672 if (push_items == 0)
3673 goto out_unlock;
3674
3675 WARN_ON(!empty && push_items == left_nritems);
3676
3677 /* push left to right */
3678 right_nritems = btrfs_header_nritems(right);
3679
3680 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3681 push_space -= leaf_data_end(root, left);
3682
3683 /* make room in the right data area */
3684 data_end = leaf_data_end(root, right);
3685 memmove_extent_buffer(right,
3686 btrfs_leaf_data(right) + data_end - push_space,
3687 btrfs_leaf_data(right) + data_end,
3688 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3689
3690 /* copy from the left data area */
3691 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3692 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3693 btrfs_leaf_data(left) + leaf_data_end(root, left),
3694 push_space);
3695
3696 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3697 btrfs_item_nr_offset(0),
3698 right_nritems * sizeof(struct btrfs_item));
3699
3700 /* copy the items from left to right */
3701 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3702 btrfs_item_nr_offset(left_nritems - push_items),
3703 push_items * sizeof(struct btrfs_item));
3704
3705 /* update the item pointers */
3706 right_nritems += push_items;
3707 btrfs_set_header_nritems(right, right_nritems);
3708 push_space = BTRFS_LEAF_DATA_SIZE(root);
3709 for (i = 0; i < right_nritems; i++) {
3710 item = btrfs_item_nr(i);
3711 push_space -= btrfs_token_item_size(right, item, &token);
3712 btrfs_set_token_item_offset(right, item, push_space, &token);
3713 }
3714
3715 left_nritems -= push_items;
3716 btrfs_set_header_nritems(left, left_nritems);
3717
3718 if (left_nritems)
3719 btrfs_mark_buffer_dirty(left);
3720 else
3721 clean_tree_block(trans, root->fs_info, left);
3722
3723 btrfs_mark_buffer_dirty(right);
3724
3725 btrfs_item_key(right, &disk_key, 0);
3726 btrfs_set_node_key(upper, &disk_key, slot + 1);
3727 btrfs_mark_buffer_dirty(upper);
3728
3729 /* then fixup the leaf pointer in the path */
3730 if (path->slots[0] >= left_nritems) {
3731 path->slots[0] -= left_nritems;
3732 if (btrfs_header_nritems(path->nodes[0]) == 0)
3733 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3734 btrfs_tree_unlock(path->nodes[0]);
3735 free_extent_buffer(path->nodes[0]);
3736 path->nodes[0] = right;
3737 path->slots[1] += 1;
3738 } else {
3739 btrfs_tree_unlock(right);
3740 free_extent_buffer(right);
3741 }
3742 return 0;
3743
3744 out_unlock:
3745 btrfs_tree_unlock(right);
3746 free_extent_buffer(right);
3747 return 1;
3748 }
3749
3750 /*
3751 * push some data in the path leaf to the right, trying to free up at
3752 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3753 *
3754 * returns 1 if the push failed because the other node didn't have enough
3755 * room, 0 if everything worked out and < 0 if there were major errors.
3756 *
3757 * this will push starting from min_slot to the end of the leaf. It won't
3758 * push any slot lower than min_slot
3759 */
3760 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3761 *root, struct btrfs_path *path,
3762 int min_data_size, int data_size,
3763 int empty, u32 min_slot)
3764 {
3765 struct extent_buffer *left = path->nodes[0];
3766 struct extent_buffer *right;
3767 struct extent_buffer *upper;
3768 int slot;
3769 int free_space;
3770 u32 left_nritems;
3771 int ret;
3772
3773 if (!path->nodes[1])
3774 return 1;
3775
3776 slot = path->slots[1];
3777 upper = path->nodes[1];
3778 if (slot >= btrfs_header_nritems(upper) - 1)
3779 return 1;
3780
3781 btrfs_assert_tree_locked(path->nodes[1]);
3782
3783 right = read_node_slot(root, upper, slot + 1);
3784 if (right == NULL)
3785 return 1;
3786
3787 btrfs_tree_lock(right);
3788 btrfs_set_lock_blocking(right);
3789
3790 free_space = btrfs_leaf_free_space(root, right);
3791 if (free_space < data_size)
3792 goto out_unlock;
3793
3794 /* cow and double check */
3795 ret = btrfs_cow_block(trans, root, right, upper,
3796 slot + 1, &right);
3797 if (ret)
3798 goto out_unlock;
3799
3800 free_space = btrfs_leaf_free_space(root, right);
3801 if (free_space < data_size)
3802 goto out_unlock;
3803
3804 left_nritems = btrfs_header_nritems(left);
3805 if (left_nritems == 0)
3806 goto out_unlock;
3807
3808 if (path->slots[0] == left_nritems && !empty) {
3809 /* Key greater than all keys in the leaf, right neighbor has
3810 * enough room for it and we're not emptying our leaf to delete
3811 * it, therefore use right neighbor to insert the new item and
3812 * no need to touch/dirty our left leaft. */
3813 btrfs_tree_unlock(left);
3814 free_extent_buffer(left);
3815 path->nodes[0] = right;
3816 path->slots[0] = 0;
3817 path->slots[1]++;
3818 return 0;
3819 }
3820
3821 return __push_leaf_right(trans, root, path, min_data_size, empty,
3822 right, free_space, left_nritems, min_slot);
3823 out_unlock:
3824 btrfs_tree_unlock(right);
3825 free_extent_buffer(right);
3826 return 1;
3827 }
3828
3829 /*
3830 * push some data in the path leaf to the left, trying to free up at
3831 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3832 *
3833 * max_slot can put a limit on how far into the leaf we'll push items. The
3834 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3835 * items
3836 */
3837 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3838 struct btrfs_root *root,
3839 struct btrfs_path *path, int data_size,
3840 int empty, struct extent_buffer *left,
3841 int free_space, u32 right_nritems,
3842 u32 max_slot)
3843 {
3844 struct btrfs_disk_key disk_key;
3845 struct extent_buffer *right = path->nodes[0];
3846 int i;
3847 int push_space = 0;
3848 int push_items = 0;
3849 struct btrfs_item *item;
3850 u32 old_left_nritems;
3851 u32 nr;
3852 int ret = 0;
3853 u32 this_item_size;
3854 u32 old_left_item_size;
3855 struct btrfs_map_token token;
3856
3857 btrfs_init_map_token(&token);
3858
3859 if (empty)
3860 nr = min(right_nritems, max_slot);
3861 else
3862 nr = min(right_nritems - 1, max_slot);
3863
3864 for (i = 0; i < nr; i++) {
3865 item = btrfs_item_nr(i);
3866
3867 if (!empty && push_items > 0) {
3868 if (path->slots[0] < i)
3869 break;
3870 if (path->slots[0] == i) {
3871 int space = btrfs_leaf_free_space(root, right);
3872 if (space + push_space * 2 > free_space)
3873 break;
3874 }
3875 }
3876
3877 if (path->slots[0] == i)
3878 push_space += data_size;
3879
3880 this_item_size = btrfs_item_size(right, item);
3881 if (this_item_size + sizeof(*item) + push_space > free_space)
3882 break;
3883
3884 push_items++;
3885 push_space += this_item_size + sizeof(*item);
3886 }
3887
3888 if (push_items == 0) {
3889 ret = 1;
3890 goto out;
3891 }
3892 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3893
3894 /* push data from right to left */
3895 copy_extent_buffer(left, right,
3896 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3897 btrfs_item_nr_offset(0),
3898 push_items * sizeof(struct btrfs_item));
3899
3900 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3901 btrfs_item_offset_nr(right, push_items - 1);
3902
3903 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3904 leaf_data_end(root, left) - push_space,
3905 btrfs_leaf_data(right) +
3906 btrfs_item_offset_nr(right, push_items - 1),
3907 push_space);
3908 old_left_nritems = btrfs_header_nritems(left);
3909 BUG_ON(old_left_nritems <= 0);
3910
3911 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3912 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3913 u32 ioff;
3914
3915 item = btrfs_item_nr(i);
3916
3917 ioff = btrfs_token_item_offset(left, item, &token);
3918 btrfs_set_token_item_offset(left, item,
3919 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3920 &token);
3921 }
3922 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3923
3924 /* fixup right node */
3925 if (push_items > right_nritems)
3926 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3927 right_nritems);
3928
3929 if (push_items < right_nritems) {
3930 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3931 leaf_data_end(root, right);
3932 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3933 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3934 btrfs_leaf_data(right) +
3935 leaf_data_end(root, right), push_space);
3936
3937 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3938 btrfs_item_nr_offset(push_items),
3939 (btrfs_header_nritems(right) - push_items) *
3940 sizeof(struct btrfs_item));
3941 }
3942 right_nritems -= push_items;
3943 btrfs_set_header_nritems(right, right_nritems);
3944 push_space = BTRFS_LEAF_DATA_SIZE(root);
3945 for (i = 0; i < right_nritems; i++) {
3946 item = btrfs_item_nr(i);
3947
3948 push_space = push_space - btrfs_token_item_size(right,
3949 item, &token);
3950 btrfs_set_token_item_offset(right, item, push_space, &token);
3951 }
3952
3953 btrfs_mark_buffer_dirty(left);
3954 if (right_nritems)
3955 btrfs_mark_buffer_dirty(right);
3956 else
3957 clean_tree_block(trans, root->fs_info, right);
3958
3959 btrfs_item_key(right, &disk_key, 0);
3960 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3961
3962 /* then fixup the leaf pointer in the path */
3963 if (path->slots[0] < push_items) {
3964 path->slots[0] += old_left_nritems;
3965 btrfs_tree_unlock(path->nodes[0]);
3966 free_extent_buffer(path->nodes[0]);
3967 path->nodes[0] = left;
3968 path->slots[1] -= 1;
3969 } else {
3970 btrfs_tree_unlock(left);
3971 free_extent_buffer(left);
3972 path->slots[0] -= push_items;
3973 }
3974 BUG_ON(path->slots[0] < 0);
3975 return ret;
3976 out:
3977 btrfs_tree_unlock(left);
3978 free_extent_buffer(left);
3979 return ret;
3980 }
3981
3982 /*
3983 * push some data in the path leaf to the left, trying to free up at
3984 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3985 *
3986 * max_slot can put a limit on how far into the leaf we'll push items. The
3987 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3988 * items
3989 */
3990 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3991 *root, struct btrfs_path *path, int min_data_size,
3992 int data_size, int empty, u32 max_slot)
3993 {
3994 struct extent_buffer *right = path->nodes[0];
3995 struct extent_buffer *left;
3996 int slot;
3997 int free_space;
3998 u32 right_nritems;
3999 int ret = 0;
4000
4001 slot = path->slots[1];
4002 if (slot == 0)
4003 return 1;
4004 if (!path->nodes[1])
4005 return 1;
4006
4007 right_nritems = btrfs_header_nritems(right);
4008 if (right_nritems == 0)
4009 return 1;
4010
4011 btrfs_assert_tree_locked(path->nodes[1]);
4012
4013 left = read_node_slot(root, path->nodes[1], slot - 1);
4014 if (left == NULL)
4015 return 1;
4016
4017 btrfs_tree_lock(left);
4018 btrfs_set_lock_blocking(left);
4019
4020 free_space = btrfs_leaf_free_space(root, left);
4021 if (free_space < data_size) {
4022 ret = 1;
4023 goto out;
4024 }
4025
4026 /* cow and double check */
4027 ret = btrfs_cow_block(trans, root, left,
4028 path->nodes[1], slot - 1, &left);
4029 if (ret) {
4030 /* we hit -ENOSPC, but it isn't fatal here */
4031 if (ret == -ENOSPC)
4032 ret = 1;
4033 goto out;
4034 }
4035
4036 free_space = btrfs_leaf_free_space(root, left);
4037 if (free_space < data_size) {
4038 ret = 1;
4039 goto out;
4040 }
4041
4042 return __push_leaf_left(trans, root, path, min_data_size,
4043 empty, left, free_space, right_nritems,
4044 max_slot);
4045 out:
4046 btrfs_tree_unlock(left);
4047 free_extent_buffer(left);
4048 return ret;
4049 }
4050
4051 /*
4052 * split the path's leaf in two, making sure there is at least data_size
4053 * available for the resulting leaf level of the path.
4054 */
4055 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4056 struct btrfs_root *root,
4057 struct btrfs_path *path,
4058 struct extent_buffer *l,
4059 struct extent_buffer *right,
4060 int slot, int mid, int nritems)
4061 {
4062 int data_copy_size;
4063 int rt_data_off;
4064 int i;
4065 struct btrfs_disk_key disk_key;
4066 struct btrfs_map_token token;
4067
4068 btrfs_init_map_token(&token);
4069
4070 nritems = nritems - mid;
4071 btrfs_set_header_nritems(right, nritems);
4072 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4073
4074 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4075 btrfs_item_nr_offset(mid),
4076 nritems * sizeof(struct btrfs_item));
4077
4078 copy_extent_buffer(right, l,
4079 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4080 data_copy_size, btrfs_leaf_data(l) +
4081 leaf_data_end(root, l), data_copy_size);
4082
4083 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4084 btrfs_item_end_nr(l, mid);
4085
4086 for (i = 0; i < nritems; i++) {
4087 struct btrfs_item *item = btrfs_item_nr(i);
4088 u32 ioff;
4089
4090 ioff = btrfs_token_item_offset(right, item, &token);
4091 btrfs_set_token_item_offset(right, item,
4092 ioff + rt_data_off, &token);
4093 }
4094
4095 btrfs_set_header_nritems(l, mid);
4096 btrfs_item_key(right, &disk_key, 0);
4097 insert_ptr(trans, root, path, &disk_key, right->start,
4098 path->slots[1] + 1, 1);
4099
4100 btrfs_mark_buffer_dirty(right);
4101 btrfs_mark_buffer_dirty(l);
4102 BUG_ON(path->slots[0] != slot);
4103
4104 if (mid <= slot) {
4105 btrfs_tree_unlock(path->nodes[0]);
4106 free_extent_buffer(path->nodes[0]);
4107 path->nodes[0] = right;
4108 path->slots[0] -= mid;
4109 path->slots[1] += 1;
4110 } else {
4111 btrfs_tree_unlock(right);
4112 free_extent_buffer(right);
4113 }
4114
4115 BUG_ON(path->slots[0] < 0);
4116 }
4117
4118 /*
4119 * double splits happen when we need to insert a big item in the middle
4120 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4121 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4122 * A B C
4123 *
4124 * We avoid this by trying to push the items on either side of our target
4125 * into the adjacent leaves. If all goes well we can avoid the double split
4126 * completely.
4127 */
4128 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4129 struct btrfs_root *root,
4130 struct btrfs_path *path,
4131 int data_size)
4132 {
4133 int ret;
4134 int progress = 0;
4135 int slot;
4136 u32 nritems;
4137 int space_needed = data_size;
4138
4139 slot = path->slots[0];
4140 if (slot < btrfs_header_nritems(path->nodes[0]))
4141 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4142
4143 /*
4144 * try to push all the items after our slot into the
4145 * right leaf
4146 */
4147 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4148 if (ret < 0)
4149 return ret;
4150
4151 if (ret == 0)
4152 progress++;
4153
4154 nritems = btrfs_header_nritems(path->nodes[0]);
4155 /*
4156 * our goal is to get our slot at the start or end of a leaf. If
4157 * we've done so we're done
4158 */
4159 if (path->slots[0] == 0 || path->slots[0] == nritems)
4160 return 0;
4161
4162 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4163 return 0;
4164
4165 /* try to push all the items before our slot into the next leaf */
4166 slot = path->slots[0];
4167 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4168 if (ret < 0)
4169 return ret;
4170
4171 if (ret == 0)
4172 progress++;
4173
4174 if (progress)
4175 return 0;
4176 return 1;
4177 }
4178
4179 /*
4180 * split the path's leaf in two, making sure there is at least data_size
4181 * available for the resulting leaf level of the path.
4182 *
4183 * returns 0 if all went well and < 0 on failure.
4184 */
4185 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root,
4187 struct btrfs_key *ins_key,
4188 struct btrfs_path *path, int data_size,
4189 int extend)
4190 {
4191 struct btrfs_disk_key disk_key;
4192 struct extent_buffer *l;
4193 u32 nritems;
4194 int mid;
4195 int slot;
4196 struct extent_buffer *right;
4197 struct btrfs_fs_info *fs_info = root->fs_info;
4198 int ret = 0;
4199 int wret;
4200 int split;
4201 int num_doubles = 0;
4202 int tried_avoid_double = 0;
4203
4204 l = path->nodes[0];
4205 slot = path->slots[0];
4206 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4207 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4208 return -EOVERFLOW;
4209
4210 /* first try to make some room by pushing left and right */
4211 if (data_size && path->nodes[1]) {
4212 int space_needed = data_size;
4213
4214 if (slot < btrfs_header_nritems(l))
4215 space_needed -= btrfs_leaf_free_space(root, l);
4216
4217 wret = push_leaf_right(trans, root, path, space_needed,
4218 space_needed, 0, 0);
4219 if (wret < 0)
4220 return wret;
4221 if (wret) {
4222 wret = push_leaf_left(trans, root, path, space_needed,
4223 space_needed, 0, (u32)-1);
4224 if (wret < 0)
4225 return wret;
4226 }
4227 l = path->nodes[0];
4228
4229 /* did the pushes work? */
4230 if (btrfs_leaf_free_space(root, l) >= data_size)
4231 return 0;
4232 }
4233
4234 if (!path->nodes[1]) {
4235 ret = insert_new_root(trans, root, path, 1);
4236 if (ret)
4237 return ret;
4238 }
4239 again:
4240 split = 1;
4241 l = path->nodes[0];
4242 slot = path->slots[0];
4243 nritems = btrfs_header_nritems(l);
4244 mid = (nritems + 1) / 2;
4245
4246 if (mid <= slot) {
4247 if (nritems == 1 ||
4248 leaf_space_used(l, mid, nritems - mid) + data_size >
4249 BTRFS_LEAF_DATA_SIZE(root)) {
4250 if (slot >= nritems) {
4251 split = 0;
4252 } else {
4253 mid = slot;
4254 if (mid != nritems &&
4255 leaf_space_used(l, mid, nritems - mid) +
4256 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4257 if (data_size && !tried_avoid_double)
4258 goto push_for_double;
4259 split = 2;
4260 }
4261 }
4262 }
4263 } else {
4264 if (leaf_space_used(l, 0, mid) + data_size >
4265 BTRFS_LEAF_DATA_SIZE(root)) {
4266 if (!extend && data_size && slot == 0) {
4267 split = 0;
4268 } else if ((extend || !data_size) && slot == 0) {
4269 mid = 1;
4270 } else {
4271 mid = slot;
4272 if (mid != nritems &&
4273 leaf_space_used(l, mid, nritems - mid) +
4274 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4275 if (data_size && !tried_avoid_double)
4276 goto push_for_double;
4277 split = 2;
4278 }
4279 }
4280 }
4281 }
4282
4283 if (split == 0)
4284 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4285 else
4286 btrfs_item_key(l, &disk_key, mid);
4287
4288 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4289 &disk_key, 0, l->start, 0);
4290 if (IS_ERR(right))
4291 return PTR_ERR(right);
4292
4293 root_add_used(root, root->nodesize);
4294
4295 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4296 btrfs_set_header_bytenr(right, right->start);
4297 btrfs_set_header_generation(right, trans->transid);
4298 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4299 btrfs_set_header_owner(right, root->root_key.objectid);
4300 btrfs_set_header_level(right, 0);
4301 write_extent_buffer(right, fs_info->fsid,
4302 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4303
4304 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4305 btrfs_header_chunk_tree_uuid(right),
4306 BTRFS_UUID_SIZE);
4307
4308 if (split == 0) {
4309 if (mid <= slot) {
4310 btrfs_set_header_nritems(right, 0);
4311 insert_ptr(trans, root, path, &disk_key, right->start,
4312 path->slots[1] + 1, 1);
4313 btrfs_tree_unlock(path->nodes[0]);
4314 free_extent_buffer(path->nodes[0]);
4315 path->nodes[0] = right;
4316 path->slots[0] = 0;
4317 path->slots[1] += 1;
4318 } else {
4319 btrfs_set_header_nritems(right, 0);
4320 insert_ptr(trans, root, path, &disk_key, right->start,
4321 path->slots[1], 1);
4322 btrfs_tree_unlock(path->nodes[0]);
4323 free_extent_buffer(path->nodes[0]);
4324 path->nodes[0] = right;
4325 path->slots[0] = 0;
4326 if (path->slots[1] == 0)
4327 fixup_low_keys(fs_info, path, &disk_key, 1);
4328 }
4329 btrfs_mark_buffer_dirty(right);
4330 return ret;
4331 }
4332
4333 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4334
4335 if (split == 2) {
4336 BUG_ON(num_doubles != 0);
4337 num_doubles++;
4338 goto again;
4339 }
4340
4341 return 0;
4342
4343 push_for_double:
4344 push_for_double_split(trans, root, path, data_size);
4345 tried_avoid_double = 1;
4346 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4347 return 0;
4348 goto again;
4349 }
4350
4351 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4352 struct btrfs_root *root,
4353 struct btrfs_path *path, int ins_len)
4354 {
4355 struct btrfs_key key;
4356 struct extent_buffer *leaf;
4357 struct btrfs_file_extent_item *fi;
4358 u64 extent_len = 0;
4359 u32 item_size;
4360 int ret;
4361
4362 leaf = path->nodes[0];
4363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4364
4365 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4366 key.type != BTRFS_EXTENT_CSUM_KEY);
4367
4368 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4369 return 0;
4370
4371 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4372 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4373 fi = btrfs_item_ptr(leaf, path->slots[0],
4374 struct btrfs_file_extent_item);
4375 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4376 }
4377 btrfs_release_path(path);
4378
4379 path->keep_locks = 1;
4380 path->search_for_split = 1;
4381 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4382 path->search_for_split = 0;
4383 if (ret > 0)
4384 ret = -EAGAIN;
4385 if (ret < 0)
4386 goto err;
4387
4388 ret = -EAGAIN;
4389 leaf = path->nodes[0];
4390 /* if our item isn't there, return now */
4391 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4392 goto err;
4393
4394 /* the leaf has changed, it now has room. return now */
4395 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4396 goto err;
4397
4398 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4399 fi = btrfs_item_ptr(leaf, path->slots[0],
4400 struct btrfs_file_extent_item);
4401 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4402 goto err;
4403 }
4404
4405 btrfs_set_path_blocking(path);
4406 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4407 if (ret)
4408 goto err;
4409
4410 path->keep_locks = 0;
4411 btrfs_unlock_up_safe(path, 1);
4412 return 0;
4413 err:
4414 path->keep_locks = 0;
4415 return ret;
4416 }
4417
4418 static noinline int split_item(struct btrfs_trans_handle *trans,
4419 struct btrfs_root *root,
4420 struct btrfs_path *path,
4421 struct btrfs_key *new_key,
4422 unsigned long split_offset)
4423 {
4424 struct extent_buffer *leaf;
4425 struct btrfs_item *item;
4426 struct btrfs_item *new_item;
4427 int slot;
4428 char *buf;
4429 u32 nritems;
4430 u32 item_size;
4431 u32 orig_offset;
4432 struct btrfs_disk_key disk_key;
4433
4434 leaf = path->nodes[0];
4435 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4436
4437 btrfs_set_path_blocking(path);
4438
4439 item = btrfs_item_nr(path->slots[0]);
4440 orig_offset = btrfs_item_offset(leaf, item);
4441 item_size = btrfs_item_size(leaf, item);
4442
4443 buf = kmalloc(item_size, GFP_NOFS);
4444 if (!buf)
4445 return -ENOMEM;
4446
4447 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4448 path->slots[0]), item_size);
4449
4450 slot = path->slots[0] + 1;
4451 nritems = btrfs_header_nritems(leaf);
4452 if (slot != nritems) {
4453 /* shift the items */
4454 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4455 btrfs_item_nr_offset(slot),
4456 (nritems - slot) * sizeof(struct btrfs_item));
4457 }
4458
4459 btrfs_cpu_key_to_disk(&disk_key, new_key);
4460 btrfs_set_item_key(leaf, &disk_key, slot);
4461
4462 new_item = btrfs_item_nr(slot);
4463
4464 btrfs_set_item_offset(leaf, new_item, orig_offset);
4465 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4466
4467 btrfs_set_item_offset(leaf, item,
4468 orig_offset + item_size - split_offset);
4469 btrfs_set_item_size(leaf, item, split_offset);
4470
4471 btrfs_set_header_nritems(leaf, nritems + 1);
4472
4473 /* write the data for the start of the original item */
4474 write_extent_buffer(leaf, buf,
4475 btrfs_item_ptr_offset(leaf, path->slots[0]),
4476 split_offset);
4477
4478 /* write the data for the new item */
4479 write_extent_buffer(leaf, buf + split_offset,
4480 btrfs_item_ptr_offset(leaf, slot),
4481 item_size - split_offset);
4482 btrfs_mark_buffer_dirty(leaf);
4483
4484 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4485 kfree(buf);
4486 return 0;
4487 }
4488
4489 /*
4490 * This function splits a single item into two items,
4491 * giving 'new_key' to the new item and splitting the
4492 * old one at split_offset (from the start of the item).
4493 *
4494 * The path may be released by this operation. After
4495 * the split, the path is pointing to the old item. The
4496 * new item is going to be in the same node as the old one.
4497 *
4498 * Note, the item being split must be smaller enough to live alone on
4499 * a tree block with room for one extra struct btrfs_item
4500 *
4501 * This allows us to split the item in place, keeping a lock on the
4502 * leaf the entire time.
4503 */
4504 int btrfs_split_item(struct btrfs_trans_handle *trans,
4505 struct btrfs_root *root,
4506 struct btrfs_path *path,
4507 struct btrfs_key *new_key,
4508 unsigned long split_offset)
4509 {
4510 int ret;
4511 ret = setup_leaf_for_split(trans, root, path,
4512 sizeof(struct btrfs_item));
4513 if (ret)
4514 return ret;
4515
4516 ret = split_item(trans, root, path, new_key, split_offset);
4517 return ret;
4518 }
4519
4520 /*
4521 * This function duplicate a item, giving 'new_key' to the new item.
4522 * It guarantees both items live in the same tree leaf and the new item
4523 * is contiguous with the original item.
4524 *
4525 * This allows us to split file extent in place, keeping a lock on the
4526 * leaf the entire time.
4527 */
4528 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4529 struct btrfs_root *root,
4530 struct btrfs_path *path,
4531 struct btrfs_key *new_key)
4532 {
4533 struct extent_buffer *leaf;
4534 int ret;
4535 u32 item_size;
4536
4537 leaf = path->nodes[0];
4538 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4539 ret = setup_leaf_for_split(trans, root, path,
4540 item_size + sizeof(struct btrfs_item));
4541 if (ret)
4542 return ret;
4543
4544 path->slots[0]++;
4545 setup_items_for_insert(root, path, new_key, &item_size,
4546 item_size, item_size +
4547 sizeof(struct btrfs_item), 1);
4548 leaf = path->nodes[0];
4549 memcpy_extent_buffer(leaf,
4550 btrfs_item_ptr_offset(leaf, path->slots[0]),
4551 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4552 item_size);
4553 return 0;
4554 }
4555
4556 /*
4557 * make the item pointed to by the path smaller. new_size indicates
4558 * how small to make it, and from_end tells us if we just chop bytes
4559 * off the end of the item or if we shift the item to chop bytes off
4560 * the front.
4561 */
4562 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4563 u32 new_size, int from_end)
4564 {
4565 int slot;
4566 struct extent_buffer *leaf;
4567 struct btrfs_item *item;
4568 u32 nritems;
4569 unsigned int data_end;
4570 unsigned int old_data_start;
4571 unsigned int old_size;
4572 unsigned int size_diff;
4573 int i;
4574 struct btrfs_map_token token;
4575
4576 btrfs_init_map_token(&token);
4577
4578 leaf = path->nodes[0];
4579 slot = path->slots[0];
4580
4581 old_size = btrfs_item_size_nr(leaf, slot);
4582 if (old_size == new_size)
4583 return;
4584
4585 nritems = btrfs_header_nritems(leaf);
4586 data_end = leaf_data_end(root, leaf);
4587
4588 old_data_start = btrfs_item_offset_nr(leaf, slot);
4589
4590 size_diff = old_size - new_size;
4591
4592 BUG_ON(slot < 0);
4593 BUG_ON(slot >= nritems);
4594
4595 /*
4596 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4597 */
4598 /* first correct the data pointers */
4599 for (i = slot; i < nritems; i++) {
4600 u32 ioff;
4601 item = btrfs_item_nr(i);
4602
4603 ioff = btrfs_token_item_offset(leaf, item, &token);
4604 btrfs_set_token_item_offset(leaf, item,
4605 ioff + size_diff, &token);
4606 }
4607
4608 /* shift the data */
4609 if (from_end) {
4610 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4611 data_end + size_diff, btrfs_leaf_data(leaf) +
4612 data_end, old_data_start + new_size - data_end);
4613 } else {
4614 struct btrfs_disk_key disk_key;
4615 u64 offset;
4616
4617 btrfs_item_key(leaf, &disk_key, slot);
4618
4619 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4620 unsigned long ptr;
4621 struct btrfs_file_extent_item *fi;
4622
4623 fi = btrfs_item_ptr(leaf, slot,
4624 struct btrfs_file_extent_item);
4625 fi = (struct btrfs_file_extent_item *)(
4626 (unsigned long)fi - size_diff);
4627
4628 if (btrfs_file_extent_type(leaf, fi) ==
4629 BTRFS_FILE_EXTENT_INLINE) {
4630 ptr = btrfs_item_ptr_offset(leaf, slot);
4631 memmove_extent_buffer(leaf, ptr,
4632 (unsigned long)fi,
4633 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4634 }
4635 }
4636
4637 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4638 data_end + size_diff, btrfs_leaf_data(leaf) +
4639 data_end, old_data_start - data_end);
4640
4641 offset = btrfs_disk_key_offset(&disk_key);
4642 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4643 btrfs_set_item_key(leaf, &disk_key, slot);
4644 if (slot == 0)
4645 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4646 }
4647
4648 item = btrfs_item_nr(slot);
4649 btrfs_set_item_size(leaf, item, new_size);
4650 btrfs_mark_buffer_dirty(leaf);
4651
4652 if (btrfs_leaf_free_space(root, leaf) < 0) {
4653 btrfs_print_leaf(root, leaf);
4654 BUG();
4655 }
4656 }
4657
4658 /*
4659 * make the item pointed to by the path bigger, data_size is the added size.
4660 */
4661 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4662 u32 data_size)
4663 {
4664 int slot;
4665 struct extent_buffer *leaf;
4666 struct btrfs_item *item;
4667 u32 nritems;
4668 unsigned int data_end;
4669 unsigned int old_data;
4670 unsigned int old_size;
4671 int i;
4672 struct btrfs_map_token token;
4673
4674 btrfs_init_map_token(&token);
4675
4676 leaf = path->nodes[0];
4677
4678 nritems = btrfs_header_nritems(leaf);
4679 data_end = leaf_data_end(root, leaf);
4680
4681 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4682 btrfs_print_leaf(root, leaf);
4683 BUG();
4684 }
4685 slot = path->slots[0];
4686 old_data = btrfs_item_end_nr(leaf, slot);
4687
4688 BUG_ON(slot < 0);
4689 if (slot >= nritems) {
4690 btrfs_print_leaf(root, leaf);
4691 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4692 slot, nritems);
4693 BUG_ON(1);
4694 }
4695
4696 /*
4697 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4698 */
4699 /* first correct the data pointers */
4700 for (i = slot; i < nritems; i++) {
4701 u32 ioff;
4702 item = btrfs_item_nr(i);
4703
4704 ioff = btrfs_token_item_offset(leaf, item, &token);
4705 btrfs_set_token_item_offset(leaf, item,
4706 ioff - data_size, &token);
4707 }
4708
4709 /* shift the data */
4710 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4711 data_end - data_size, btrfs_leaf_data(leaf) +
4712 data_end, old_data - data_end);
4713
4714 data_end = old_data;
4715 old_size = btrfs_item_size_nr(leaf, slot);
4716 item = btrfs_item_nr(slot);
4717 btrfs_set_item_size(leaf, item, old_size + data_size);
4718 btrfs_mark_buffer_dirty(leaf);
4719
4720 if (btrfs_leaf_free_space(root, leaf) < 0) {
4721 btrfs_print_leaf(root, leaf);
4722 BUG();
4723 }
4724 }
4725
4726 /*
4727 * this is a helper for btrfs_insert_empty_items, the main goal here is
4728 * to save stack depth by doing the bulk of the work in a function
4729 * that doesn't call btrfs_search_slot
4730 */
4731 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4732 struct btrfs_key *cpu_key, u32 *data_size,
4733 u32 total_data, u32 total_size, int nr)
4734 {
4735 struct btrfs_item *item;
4736 int i;
4737 u32 nritems;
4738 unsigned int data_end;
4739 struct btrfs_disk_key disk_key;
4740 struct extent_buffer *leaf;
4741 int slot;
4742 struct btrfs_map_token token;
4743
4744 if (path->slots[0] == 0) {
4745 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4746 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4747 }
4748 btrfs_unlock_up_safe(path, 1);
4749
4750 btrfs_init_map_token(&token);
4751
4752 leaf = path->nodes[0];
4753 slot = path->slots[0];
4754
4755 nritems = btrfs_header_nritems(leaf);
4756 data_end = leaf_data_end(root, leaf);
4757
4758 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4759 btrfs_print_leaf(root, leaf);
4760 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4761 total_size, btrfs_leaf_free_space(root, leaf));
4762 BUG();
4763 }
4764
4765 if (slot != nritems) {
4766 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4767
4768 if (old_data < data_end) {
4769 btrfs_print_leaf(root, leaf);
4770 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4771 slot, old_data, data_end);
4772 BUG_ON(1);
4773 }
4774 /*
4775 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4776 */
4777 /* first correct the data pointers */
4778 for (i = slot; i < nritems; i++) {
4779 u32 ioff;
4780
4781 item = btrfs_item_nr( i);
4782 ioff = btrfs_token_item_offset(leaf, item, &token);
4783 btrfs_set_token_item_offset(leaf, item,
4784 ioff - total_data, &token);
4785 }
4786 /* shift the items */
4787 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4788 btrfs_item_nr_offset(slot),
4789 (nritems - slot) * sizeof(struct btrfs_item));
4790
4791 /* shift the data */
4792 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4793 data_end - total_data, btrfs_leaf_data(leaf) +
4794 data_end, old_data - data_end);
4795 data_end = old_data;
4796 }
4797
4798 /* setup the item for the new data */
4799 for (i = 0; i < nr; i++) {
4800 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4801 btrfs_set_item_key(leaf, &disk_key, slot + i);
4802 item = btrfs_item_nr(slot + i);
4803 btrfs_set_token_item_offset(leaf, item,
4804 data_end - data_size[i], &token);
4805 data_end -= data_size[i];
4806 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4807 }
4808
4809 btrfs_set_header_nritems(leaf, nritems + nr);
4810 btrfs_mark_buffer_dirty(leaf);
4811
4812 if (btrfs_leaf_free_space(root, leaf) < 0) {
4813 btrfs_print_leaf(root, leaf);
4814 BUG();
4815 }
4816 }
4817
4818 /*
4819 * Given a key and some data, insert items into the tree.
4820 * This does all the path init required, making room in the tree if needed.
4821 */
4822 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4823 struct btrfs_root *root,
4824 struct btrfs_path *path,
4825 struct btrfs_key *cpu_key, u32 *data_size,
4826 int nr)
4827 {
4828 int ret = 0;
4829 int slot;
4830 int i;
4831 u32 total_size = 0;
4832 u32 total_data = 0;
4833
4834 for (i = 0; i < nr; i++)
4835 total_data += data_size[i];
4836
4837 total_size = total_data + (nr * sizeof(struct btrfs_item));
4838 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4839 if (ret == 0)
4840 return -EEXIST;
4841 if (ret < 0)
4842 return ret;
4843
4844 slot = path->slots[0];
4845 BUG_ON(slot < 0);
4846
4847 setup_items_for_insert(root, path, cpu_key, data_size,
4848 total_data, total_size, nr);
4849 return 0;
4850 }
4851
4852 /*
4853 * Given a key and some data, insert an item into the tree.
4854 * This does all the path init required, making room in the tree if needed.
4855 */
4856 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4857 *root, struct btrfs_key *cpu_key, void *data, u32
4858 data_size)
4859 {
4860 int ret = 0;
4861 struct btrfs_path *path;
4862 struct extent_buffer *leaf;
4863 unsigned long ptr;
4864
4865 path = btrfs_alloc_path();
4866 if (!path)
4867 return -ENOMEM;
4868 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4869 if (!ret) {
4870 leaf = path->nodes[0];
4871 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4872 write_extent_buffer(leaf, data, ptr, data_size);
4873 btrfs_mark_buffer_dirty(leaf);
4874 }
4875 btrfs_free_path(path);
4876 return ret;
4877 }
4878
4879 /*
4880 * delete the pointer from a given node.
4881 *
4882 * the tree should have been previously balanced so the deletion does not
4883 * empty a node.
4884 */
4885 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4886 int level, int slot)
4887 {
4888 struct extent_buffer *parent = path->nodes[level];
4889 u32 nritems;
4890 int ret;
4891
4892 nritems = btrfs_header_nritems(parent);
4893 if (slot != nritems - 1) {
4894 if (level)
4895 tree_mod_log_eb_move(root->fs_info, parent, slot,
4896 slot + 1, nritems - slot - 1);
4897 memmove_extent_buffer(parent,
4898 btrfs_node_key_ptr_offset(slot),
4899 btrfs_node_key_ptr_offset(slot + 1),
4900 sizeof(struct btrfs_key_ptr) *
4901 (nritems - slot - 1));
4902 } else if (level) {
4903 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4904 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4905 BUG_ON(ret < 0);
4906 }
4907
4908 nritems--;
4909 btrfs_set_header_nritems(parent, nritems);
4910 if (nritems == 0 && parent == root->node) {
4911 BUG_ON(btrfs_header_level(root->node) != 1);
4912 /* just turn the root into a leaf and break */
4913 btrfs_set_header_level(root->node, 0);
4914 } else if (slot == 0) {
4915 struct btrfs_disk_key disk_key;
4916
4917 btrfs_node_key(parent, &disk_key, 0);
4918 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4919 }
4920 btrfs_mark_buffer_dirty(parent);
4921 }
4922
4923 /*
4924 * a helper function to delete the leaf pointed to by path->slots[1] and
4925 * path->nodes[1].
4926 *
4927 * This deletes the pointer in path->nodes[1] and frees the leaf
4928 * block extent. zero is returned if it all worked out, < 0 otherwise.
4929 *
4930 * The path must have already been setup for deleting the leaf, including
4931 * all the proper balancing. path->nodes[1] must be locked.
4932 */
4933 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4934 struct btrfs_root *root,
4935 struct btrfs_path *path,
4936 struct extent_buffer *leaf)
4937 {
4938 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4939 del_ptr(root, path, 1, path->slots[1]);
4940
4941 /*
4942 * btrfs_free_extent is expensive, we want to make sure we
4943 * aren't holding any locks when we call it
4944 */
4945 btrfs_unlock_up_safe(path, 0);
4946
4947 root_sub_used(root, leaf->len);
4948
4949 extent_buffer_get(leaf);
4950 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4951 free_extent_buffer_stale(leaf);
4952 }
4953 /*
4954 * delete the item at the leaf level in path. If that empties
4955 * the leaf, remove it from the tree
4956 */
4957 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4958 struct btrfs_path *path, int slot, int nr)
4959 {
4960 struct extent_buffer *leaf;
4961 struct btrfs_item *item;
4962 u32 last_off;
4963 u32 dsize = 0;
4964 int ret = 0;
4965 int wret;
4966 int i;
4967 u32 nritems;
4968 struct btrfs_map_token token;
4969
4970 btrfs_init_map_token(&token);
4971
4972 leaf = path->nodes[0];
4973 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4974
4975 for (i = 0; i < nr; i++)
4976 dsize += btrfs_item_size_nr(leaf, slot + i);
4977
4978 nritems = btrfs_header_nritems(leaf);
4979
4980 if (slot + nr != nritems) {
4981 int data_end = leaf_data_end(root, leaf);
4982
4983 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4984 data_end + dsize,
4985 btrfs_leaf_data(leaf) + data_end,
4986 last_off - data_end);
4987
4988 for (i = slot + nr; i < nritems; i++) {
4989 u32 ioff;
4990
4991 item = btrfs_item_nr(i);
4992 ioff = btrfs_token_item_offset(leaf, item, &token);
4993 btrfs_set_token_item_offset(leaf, item,
4994 ioff + dsize, &token);
4995 }
4996
4997 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4998 btrfs_item_nr_offset(slot + nr),
4999 sizeof(struct btrfs_item) *
5000 (nritems - slot - nr));
5001 }
5002 btrfs_set_header_nritems(leaf, nritems - nr);
5003 nritems -= nr;
5004
5005 /* delete the leaf if we've emptied it */
5006 if (nritems == 0) {
5007 if (leaf == root->node) {
5008 btrfs_set_header_level(leaf, 0);
5009 } else {
5010 btrfs_set_path_blocking(path);
5011 clean_tree_block(trans, root->fs_info, leaf);
5012 btrfs_del_leaf(trans, root, path, leaf);
5013 }
5014 } else {
5015 int used = leaf_space_used(leaf, 0, nritems);
5016 if (slot == 0) {
5017 struct btrfs_disk_key disk_key;
5018
5019 btrfs_item_key(leaf, &disk_key, 0);
5020 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5021 }
5022
5023 /* delete the leaf if it is mostly empty */
5024 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5025 /* push_leaf_left fixes the path.
5026 * make sure the path still points to our leaf
5027 * for possible call to del_ptr below
5028 */
5029 slot = path->slots[1];
5030 extent_buffer_get(leaf);
5031
5032 btrfs_set_path_blocking(path);
5033 wret = push_leaf_left(trans, root, path, 1, 1,
5034 1, (u32)-1);
5035 if (wret < 0 && wret != -ENOSPC)
5036 ret = wret;
5037
5038 if (path->nodes[0] == leaf &&
5039 btrfs_header_nritems(leaf)) {
5040 wret = push_leaf_right(trans, root, path, 1,
5041 1, 1, 0);
5042 if (wret < 0 && wret != -ENOSPC)
5043 ret = wret;
5044 }
5045
5046 if (btrfs_header_nritems(leaf) == 0) {
5047 path->slots[1] = slot;
5048 btrfs_del_leaf(trans, root, path, leaf);
5049 free_extent_buffer(leaf);
5050 ret = 0;
5051 } else {
5052 /* if we're still in the path, make sure
5053 * we're dirty. Otherwise, one of the
5054 * push_leaf functions must have already
5055 * dirtied this buffer
5056 */
5057 if (path->nodes[0] == leaf)
5058 btrfs_mark_buffer_dirty(leaf);
5059 free_extent_buffer(leaf);
5060 }
5061 } else {
5062 btrfs_mark_buffer_dirty(leaf);
5063 }
5064 }
5065 return ret;
5066 }
5067
5068 /*
5069 * search the tree again to find a leaf with lesser keys
5070 * returns 0 if it found something or 1 if there are no lesser leaves.
5071 * returns < 0 on io errors.
5072 *
5073 * This may release the path, and so you may lose any locks held at the
5074 * time you call it.
5075 */
5076 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5077 {
5078 struct btrfs_key key;
5079 struct btrfs_disk_key found_key;
5080 int ret;
5081
5082 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5083
5084 if (key.offset > 0) {
5085 key.offset--;
5086 } else if (key.type > 0) {
5087 key.type--;
5088 key.offset = (u64)-1;
5089 } else if (key.objectid > 0) {
5090 key.objectid--;
5091 key.type = (u8)-1;
5092 key.offset = (u64)-1;
5093 } else {
5094 return 1;
5095 }
5096
5097 btrfs_release_path(path);
5098 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5099 if (ret < 0)
5100 return ret;
5101 btrfs_item_key(path->nodes[0], &found_key, 0);
5102 ret = comp_keys(&found_key, &key);
5103 /*
5104 * We might have had an item with the previous key in the tree right
5105 * before we released our path. And after we released our path, that
5106 * item might have been pushed to the first slot (0) of the leaf we
5107 * were holding due to a tree balance. Alternatively, an item with the
5108 * previous key can exist as the only element of a leaf (big fat item).
5109 * Therefore account for these 2 cases, so that our callers (like
5110 * btrfs_previous_item) don't miss an existing item with a key matching
5111 * the previous key we computed above.
5112 */
5113 if (ret <= 0)
5114 return 0;
5115 return 1;
5116 }
5117
5118 /*
5119 * A helper function to walk down the tree starting at min_key, and looking
5120 * for nodes or leaves that are have a minimum transaction id.
5121 * This is used by the btree defrag code, and tree logging
5122 *
5123 * This does not cow, but it does stuff the starting key it finds back
5124 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5125 * key and get a writable path.
5126 *
5127 * This does lock as it descends, and path->keep_locks should be set
5128 * to 1 by the caller.
5129 *
5130 * This honors path->lowest_level to prevent descent past a given level
5131 * of the tree.
5132 *
5133 * min_trans indicates the oldest transaction that you are interested
5134 * in walking through. Any nodes or leaves older than min_trans are
5135 * skipped over (without reading them).
5136 *
5137 * returns zero if something useful was found, < 0 on error and 1 if there
5138 * was nothing in the tree that matched the search criteria.
5139 */
5140 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5141 struct btrfs_path *path,
5142 u64 min_trans)
5143 {
5144 struct extent_buffer *cur;
5145 struct btrfs_key found_key;
5146 int slot;
5147 int sret;
5148 u32 nritems;
5149 int level;
5150 int ret = 1;
5151 int keep_locks = path->keep_locks;
5152
5153 path->keep_locks = 1;
5154 again:
5155 cur = btrfs_read_lock_root_node(root);
5156 level = btrfs_header_level(cur);
5157 WARN_ON(path->nodes[level]);
5158 path->nodes[level] = cur;
5159 path->locks[level] = BTRFS_READ_LOCK;
5160
5161 if (btrfs_header_generation(cur) < min_trans) {
5162 ret = 1;
5163 goto out;
5164 }
5165 while (1) {
5166 nritems = btrfs_header_nritems(cur);
5167 level = btrfs_header_level(cur);
5168 sret = bin_search(cur, min_key, level, &slot);
5169
5170 /* at the lowest level, we're done, setup the path and exit */
5171 if (level == path->lowest_level) {
5172 if (slot >= nritems)
5173 goto find_next_key;
5174 ret = 0;
5175 path->slots[level] = slot;
5176 btrfs_item_key_to_cpu(cur, &found_key, slot);
5177 goto out;
5178 }
5179 if (sret && slot > 0)
5180 slot--;
5181 /*
5182 * check this node pointer against the min_trans parameters.
5183 * If it is too old, old, skip to the next one.
5184 */
5185 while (slot < nritems) {
5186 u64 gen;
5187
5188 gen = btrfs_node_ptr_generation(cur, slot);
5189 if (gen < min_trans) {
5190 slot++;
5191 continue;
5192 }
5193 break;
5194 }
5195 find_next_key:
5196 /*
5197 * we didn't find a candidate key in this node, walk forward
5198 * and find another one
5199 */
5200 if (slot >= nritems) {
5201 path->slots[level] = slot;
5202 btrfs_set_path_blocking(path);
5203 sret = btrfs_find_next_key(root, path, min_key, level,
5204 min_trans);
5205 if (sret == 0) {
5206 btrfs_release_path(path);
5207 goto again;
5208 } else {
5209 goto out;
5210 }
5211 }
5212 /* save our key for returning back */
5213 btrfs_node_key_to_cpu(cur, &found_key, slot);
5214 path->slots[level] = slot;
5215 if (level == path->lowest_level) {
5216 ret = 0;
5217 goto out;
5218 }
5219 btrfs_set_path_blocking(path);
5220 cur = read_node_slot(root, cur, slot);
5221 BUG_ON(!cur); /* -ENOMEM */
5222
5223 btrfs_tree_read_lock(cur);
5224
5225 path->locks[level - 1] = BTRFS_READ_LOCK;
5226 path->nodes[level - 1] = cur;
5227 unlock_up(path, level, 1, 0, NULL);
5228 btrfs_clear_path_blocking(path, NULL, 0);
5229 }
5230 out:
5231 path->keep_locks = keep_locks;
5232 if (ret == 0) {
5233 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5234 btrfs_set_path_blocking(path);
5235 memcpy(min_key, &found_key, sizeof(found_key));
5236 }
5237 return ret;
5238 }
5239
5240 static void tree_move_down(struct btrfs_root *root,
5241 struct btrfs_path *path,
5242 int *level, int root_level)
5243 {
5244 BUG_ON(*level == 0);
5245 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5246 path->slots[*level]);
5247 path->slots[*level - 1] = 0;
5248 (*level)--;
5249 }
5250
5251 static int tree_move_next_or_upnext(struct btrfs_root *root,
5252 struct btrfs_path *path,
5253 int *level, int root_level)
5254 {
5255 int ret = 0;
5256 int nritems;
5257 nritems = btrfs_header_nritems(path->nodes[*level]);
5258
5259 path->slots[*level]++;
5260
5261 while (path->slots[*level] >= nritems) {
5262 if (*level == root_level)
5263 return -1;
5264
5265 /* move upnext */
5266 path->slots[*level] = 0;
5267 free_extent_buffer(path->nodes[*level]);
5268 path->nodes[*level] = NULL;
5269 (*level)++;
5270 path->slots[*level]++;
5271
5272 nritems = btrfs_header_nritems(path->nodes[*level]);
5273 ret = 1;
5274 }
5275 return ret;
5276 }
5277
5278 /*
5279 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5280 * or down.
5281 */
5282 static int tree_advance(struct btrfs_root *root,
5283 struct btrfs_path *path,
5284 int *level, int root_level,
5285 int allow_down,
5286 struct btrfs_key *key)
5287 {
5288 int ret;
5289
5290 if (*level == 0 || !allow_down) {
5291 ret = tree_move_next_or_upnext(root, path, level, root_level);
5292 } else {
5293 tree_move_down(root, path, level, root_level);
5294 ret = 0;
5295 }
5296 if (ret >= 0) {
5297 if (*level == 0)
5298 btrfs_item_key_to_cpu(path->nodes[*level], key,
5299 path->slots[*level]);
5300 else
5301 btrfs_node_key_to_cpu(path->nodes[*level], key,
5302 path->slots[*level]);
5303 }
5304 return ret;
5305 }
5306
5307 static int tree_compare_item(struct btrfs_root *left_root,
5308 struct btrfs_path *left_path,
5309 struct btrfs_path *right_path,
5310 char *tmp_buf)
5311 {
5312 int cmp;
5313 int len1, len2;
5314 unsigned long off1, off2;
5315
5316 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5317 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5318 if (len1 != len2)
5319 return 1;
5320
5321 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5322 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5323 right_path->slots[0]);
5324
5325 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5326
5327 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5328 if (cmp)
5329 return 1;
5330 return 0;
5331 }
5332
5333 #define ADVANCE 1
5334 #define ADVANCE_ONLY_NEXT -1
5335
5336 /*
5337 * This function compares two trees and calls the provided callback for
5338 * every changed/new/deleted item it finds.
5339 * If shared tree blocks are encountered, whole subtrees are skipped, making
5340 * the compare pretty fast on snapshotted subvolumes.
5341 *
5342 * This currently works on commit roots only. As commit roots are read only,
5343 * we don't do any locking. The commit roots are protected with transactions.
5344 * Transactions are ended and rejoined when a commit is tried in between.
5345 *
5346 * This function checks for modifications done to the trees while comparing.
5347 * If it detects a change, it aborts immediately.
5348 */
5349 int btrfs_compare_trees(struct btrfs_root *left_root,
5350 struct btrfs_root *right_root,
5351 btrfs_changed_cb_t changed_cb, void *ctx)
5352 {
5353 int ret;
5354 int cmp;
5355 struct btrfs_path *left_path = NULL;
5356 struct btrfs_path *right_path = NULL;
5357 struct btrfs_key left_key;
5358 struct btrfs_key right_key;
5359 char *tmp_buf = NULL;
5360 int left_root_level;
5361 int right_root_level;
5362 int left_level;
5363 int right_level;
5364 int left_end_reached;
5365 int right_end_reached;
5366 int advance_left;
5367 int advance_right;
5368 u64 left_blockptr;
5369 u64 right_blockptr;
5370 u64 left_gen;
5371 u64 right_gen;
5372
5373 left_path = btrfs_alloc_path();
5374 if (!left_path) {
5375 ret = -ENOMEM;
5376 goto out;
5377 }
5378 right_path = btrfs_alloc_path();
5379 if (!right_path) {
5380 ret = -ENOMEM;
5381 goto out;
5382 }
5383
5384 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5385 if (!tmp_buf) {
5386 tmp_buf = vmalloc(left_root->nodesize);
5387 if (!tmp_buf) {
5388 ret = -ENOMEM;
5389 goto out;
5390 }
5391 }
5392
5393 left_path->search_commit_root = 1;
5394 left_path->skip_locking = 1;
5395 right_path->search_commit_root = 1;
5396 right_path->skip_locking = 1;
5397
5398 /*
5399 * Strategy: Go to the first items of both trees. Then do
5400 *
5401 * If both trees are at level 0
5402 * Compare keys of current items
5403 * If left < right treat left item as new, advance left tree
5404 * and repeat
5405 * If left > right treat right item as deleted, advance right tree
5406 * and repeat
5407 * If left == right do deep compare of items, treat as changed if
5408 * needed, advance both trees and repeat
5409 * If both trees are at the same level but not at level 0
5410 * Compare keys of current nodes/leafs
5411 * If left < right advance left tree and repeat
5412 * If left > right advance right tree and repeat
5413 * If left == right compare blockptrs of the next nodes/leafs
5414 * If they match advance both trees but stay at the same level
5415 * and repeat
5416 * If they don't match advance both trees while allowing to go
5417 * deeper and repeat
5418 * If tree levels are different
5419 * Advance the tree that needs it and repeat
5420 *
5421 * Advancing a tree means:
5422 * If we are at level 0, try to go to the next slot. If that's not
5423 * possible, go one level up and repeat. Stop when we found a level
5424 * where we could go to the next slot. We may at this point be on a
5425 * node or a leaf.
5426 *
5427 * If we are not at level 0 and not on shared tree blocks, go one
5428 * level deeper.
5429 *
5430 * If we are not at level 0 and on shared tree blocks, go one slot to
5431 * the right if possible or go up and right.
5432 */
5433
5434 down_read(&left_root->fs_info->commit_root_sem);
5435 left_level = btrfs_header_level(left_root->commit_root);
5436 left_root_level = left_level;
5437 left_path->nodes[left_level] = left_root->commit_root;
5438 extent_buffer_get(left_path->nodes[left_level]);
5439
5440 right_level = btrfs_header_level(right_root->commit_root);
5441 right_root_level = right_level;
5442 right_path->nodes[right_level] = right_root->commit_root;
5443 extent_buffer_get(right_path->nodes[right_level]);
5444 up_read(&left_root->fs_info->commit_root_sem);
5445
5446 if (left_level == 0)
5447 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5448 &left_key, left_path->slots[left_level]);
5449 else
5450 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5451 &left_key, left_path->slots[left_level]);
5452 if (right_level == 0)
5453 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5454 &right_key, right_path->slots[right_level]);
5455 else
5456 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5457 &right_key, right_path->slots[right_level]);
5458
5459 left_end_reached = right_end_reached = 0;
5460 advance_left = advance_right = 0;
5461
5462 while (1) {
5463 if (advance_left && !left_end_reached) {
5464 ret = tree_advance(left_root, left_path, &left_level,
5465 left_root_level,
5466 advance_left != ADVANCE_ONLY_NEXT,
5467 &left_key);
5468 if (ret < 0)
5469 left_end_reached = ADVANCE;
5470 advance_left = 0;
5471 }
5472 if (advance_right && !right_end_reached) {
5473 ret = tree_advance(right_root, right_path, &right_level,
5474 right_root_level,
5475 advance_right != ADVANCE_ONLY_NEXT,
5476 &right_key);
5477 if (ret < 0)
5478 right_end_reached = ADVANCE;
5479 advance_right = 0;
5480 }
5481
5482 if (left_end_reached && right_end_reached) {
5483 ret = 0;
5484 goto out;
5485 } else if (left_end_reached) {
5486 if (right_level == 0) {
5487 ret = changed_cb(left_root, right_root,
5488 left_path, right_path,
5489 &right_key,
5490 BTRFS_COMPARE_TREE_DELETED,
5491 ctx);
5492 if (ret < 0)
5493 goto out;
5494 }
5495 advance_right = ADVANCE;
5496 continue;
5497 } else if (right_end_reached) {
5498 if (left_level == 0) {
5499 ret = changed_cb(left_root, right_root,
5500 left_path, right_path,
5501 &left_key,
5502 BTRFS_COMPARE_TREE_NEW,
5503 ctx);
5504 if (ret < 0)
5505 goto out;
5506 }
5507 advance_left = ADVANCE;
5508 continue;
5509 }
5510
5511 if (left_level == 0 && right_level == 0) {
5512 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5513 if (cmp < 0) {
5514 ret = changed_cb(left_root, right_root,
5515 left_path, right_path,
5516 &left_key,
5517 BTRFS_COMPARE_TREE_NEW,
5518 ctx);
5519 if (ret < 0)
5520 goto out;
5521 advance_left = ADVANCE;
5522 } else if (cmp > 0) {
5523 ret = changed_cb(left_root, right_root,
5524 left_path, right_path,
5525 &right_key,
5526 BTRFS_COMPARE_TREE_DELETED,
5527 ctx);
5528 if (ret < 0)
5529 goto out;
5530 advance_right = ADVANCE;
5531 } else {
5532 enum btrfs_compare_tree_result result;
5533
5534 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5535 ret = tree_compare_item(left_root, left_path,
5536 right_path, tmp_buf);
5537 if (ret)
5538 result = BTRFS_COMPARE_TREE_CHANGED;
5539 else
5540 result = BTRFS_COMPARE_TREE_SAME;
5541 ret = changed_cb(left_root, right_root,
5542 left_path, right_path,
5543 &left_key, result, ctx);
5544 if (ret < 0)
5545 goto out;
5546 advance_left = ADVANCE;
5547 advance_right = ADVANCE;
5548 }
5549 } else if (left_level == right_level) {
5550 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5551 if (cmp < 0) {
5552 advance_left = ADVANCE;
5553 } else if (cmp > 0) {
5554 advance_right = ADVANCE;
5555 } else {
5556 left_blockptr = btrfs_node_blockptr(
5557 left_path->nodes[left_level],
5558 left_path->slots[left_level]);
5559 right_blockptr = btrfs_node_blockptr(
5560 right_path->nodes[right_level],
5561 right_path->slots[right_level]);
5562 left_gen = btrfs_node_ptr_generation(
5563 left_path->nodes[left_level],
5564 left_path->slots[left_level]);
5565 right_gen = btrfs_node_ptr_generation(
5566 right_path->nodes[right_level],
5567 right_path->slots[right_level]);
5568 if (left_blockptr == right_blockptr &&
5569 left_gen == right_gen) {
5570 /*
5571 * As we're on a shared block, don't
5572 * allow to go deeper.
5573 */
5574 advance_left = ADVANCE_ONLY_NEXT;
5575 advance_right = ADVANCE_ONLY_NEXT;
5576 } else {
5577 advance_left = ADVANCE;
5578 advance_right = ADVANCE;
5579 }
5580 }
5581 } else if (left_level < right_level) {
5582 advance_right = ADVANCE;
5583 } else {
5584 advance_left = ADVANCE;
5585 }
5586 }
5587
5588 out:
5589 btrfs_free_path(left_path);
5590 btrfs_free_path(right_path);
5591 kvfree(tmp_buf);
5592 return ret;
5593 }
5594
5595 /*
5596 * this is similar to btrfs_next_leaf, but does not try to preserve
5597 * and fixup the path. It looks for and returns the next key in the
5598 * tree based on the current path and the min_trans parameters.
5599 *
5600 * 0 is returned if another key is found, < 0 if there are any errors
5601 * and 1 is returned if there are no higher keys in the tree
5602 *
5603 * path->keep_locks should be set to 1 on the search made before
5604 * calling this function.
5605 */
5606 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5607 struct btrfs_key *key, int level, u64 min_trans)
5608 {
5609 int slot;
5610 struct extent_buffer *c;
5611
5612 WARN_ON(!path->keep_locks);
5613 while (level < BTRFS_MAX_LEVEL) {
5614 if (!path->nodes[level])
5615 return 1;
5616
5617 slot = path->slots[level] + 1;
5618 c = path->nodes[level];
5619 next:
5620 if (slot >= btrfs_header_nritems(c)) {
5621 int ret;
5622 int orig_lowest;
5623 struct btrfs_key cur_key;
5624 if (level + 1 >= BTRFS_MAX_LEVEL ||
5625 !path->nodes[level + 1])
5626 return 1;
5627
5628 if (path->locks[level + 1]) {
5629 level++;
5630 continue;
5631 }
5632
5633 slot = btrfs_header_nritems(c) - 1;
5634 if (level == 0)
5635 btrfs_item_key_to_cpu(c, &cur_key, slot);
5636 else
5637 btrfs_node_key_to_cpu(c, &cur_key, slot);
5638
5639 orig_lowest = path->lowest_level;
5640 btrfs_release_path(path);
5641 path->lowest_level = level;
5642 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5643 0, 0);
5644 path->lowest_level = orig_lowest;
5645 if (ret < 0)
5646 return ret;
5647
5648 c = path->nodes[level];
5649 slot = path->slots[level];
5650 if (ret == 0)
5651 slot++;
5652 goto next;
5653 }
5654
5655 if (level == 0)
5656 btrfs_item_key_to_cpu(c, key, slot);
5657 else {
5658 u64 gen = btrfs_node_ptr_generation(c, slot);
5659
5660 if (gen < min_trans) {
5661 slot++;
5662 goto next;
5663 }
5664 btrfs_node_key_to_cpu(c, key, slot);
5665 }
5666 return 0;
5667 }
5668 return 1;
5669 }
5670
5671 /*
5672 * search the tree again to find a leaf with greater keys
5673 * returns 0 if it found something or 1 if there are no greater leaves.
5674 * returns < 0 on io errors.
5675 */
5676 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5677 {
5678 return btrfs_next_old_leaf(root, path, 0);
5679 }
5680
5681 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5682 u64 time_seq)
5683 {
5684 int slot;
5685 int level;
5686 struct extent_buffer *c;
5687 struct extent_buffer *next;
5688 struct btrfs_key key;
5689 u32 nritems;
5690 int ret;
5691 int old_spinning = path->leave_spinning;
5692 int next_rw_lock = 0;
5693
5694 nritems = btrfs_header_nritems(path->nodes[0]);
5695 if (nritems == 0)
5696 return 1;
5697
5698 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5699 again:
5700 level = 1;
5701 next = NULL;
5702 next_rw_lock = 0;
5703 btrfs_release_path(path);
5704
5705 path->keep_locks = 1;
5706 path->leave_spinning = 1;
5707
5708 if (time_seq)
5709 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5710 else
5711 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5712 path->keep_locks = 0;
5713
5714 if (ret < 0)
5715 return ret;
5716
5717 nritems = btrfs_header_nritems(path->nodes[0]);
5718 /*
5719 * by releasing the path above we dropped all our locks. A balance
5720 * could have added more items next to the key that used to be
5721 * at the very end of the block. So, check again here and
5722 * advance the path if there are now more items available.
5723 */
5724 if (nritems > 0 && path->slots[0] < nritems - 1) {
5725 if (ret == 0)
5726 path->slots[0]++;
5727 ret = 0;
5728 goto done;
5729 }
5730 /*
5731 * So the above check misses one case:
5732 * - after releasing the path above, someone has removed the item that
5733 * used to be at the very end of the block, and balance between leafs
5734 * gets another one with bigger key.offset to replace it.
5735 *
5736 * This one should be returned as well, or we can get leaf corruption
5737 * later(esp. in __btrfs_drop_extents()).
5738 *
5739 * And a bit more explanation about this check,
5740 * with ret > 0, the key isn't found, the path points to the slot
5741 * where it should be inserted, so the path->slots[0] item must be the
5742 * bigger one.
5743 */
5744 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5745 ret = 0;
5746 goto done;
5747 }
5748
5749 while (level < BTRFS_MAX_LEVEL) {
5750 if (!path->nodes[level]) {
5751 ret = 1;
5752 goto done;
5753 }
5754
5755 slot = path->slots[level] + 1;
5756 c = path->nodes[level];
5757 if (slot >= btrfs_header_nritems(c)) {
5758 level++;
5759 if (level == BTRFS_MAX_LEVEL) {
5760 ret = 1;
5761 goto done;
5762 }
5763 continue;
5764 }
5765
5766 if (next) {
5767 btrfs_tree_unlock_rw(next, next_rw_lock);
5768 free_extent_buffer(next);
5769 }
5770
5771 next = c;
5772 next_rw_lock = path->locks[level];
5773 ret = read_block_for_search(NULL, root, path, &next, level,
5774 slot, &key, 0);
5775 if (ret == -EAGAIN)
5776 goto again;
5777
5778 if (ret < 0) {
5779 btrfs_release_path(path);
5780 goto done;
5781 }
5782
5783 if (!path->skip_locking) {
5784 ret = btrfs_try_tree_read_lock(next);
5785 if (!ret && time_seq) {
5786 /*
5787 * If we don't get the lock, we may be racing
5788 * with push_leaf_left, holding that lock while
5789 * itself waiting for the leaf we've currently
5790 * locked. To solve this situation, we give up
5791 * on our lock and cycle.
5792 */
5793 free_extent_buffer(next);
5794 btrfs_release_path(path);
5795 cond_resched();
5796 goto again;
5797 }
5798 if (!ret) {
5799 btrfs_set_path_blocking(path);
5800 btrfs_tree_read_lock(next);
5801 btrfs_clear_path_blocking(path, next,
5802 BTRFS_READ_LOCK);
5803 }
5804 next_rw_lock = BTRFS_READ_LOCK;
5805 }
5806 break;
5807 }
5808 path->slots[level] = slot;
5809 while (1) {
5810 level--;
5811 c = path->nodes[level];
5812 if (path->locks[level])
5813 btrfs_tree_unlock_rw(c, path->locks[level]);
5814
5815 free_extent_buffer(c);
5816 path->nodes[level] = next;
5817 path->slots[level] = 0;
5818 if (!path->skip_locking)
5819 path->locks[level] = next_rw_lock;
5820 if (!level)
5821 break;
5822
5823 ret = read_block_for_search(NULL, root, path, &next, level,
5824 0, &key, 0);
5825 if (ret == -EAGAIN)
5826 goto again;
5827
5828 if (ret < 0) {
5829 btrfs_release_path(path);
5830 goto done;
5831 }
5832
5833 if (!path->skip_locking) {
5834 ret = btrfs_try_tree_read_lock(next);
5835 if (!ret) {
5836 btrfs_set_path_blocking(path);
5837 btrfs_tree_read_lock(next);
5838 btrfs_clear_path_blocking(path, next,
5839 BTRFS_READ_LOCK);
5840 }
5841 next_rw_lock = BTRFS_READ_LOCK;
5842 }
5843 }
5844 ret = 0;
5845 done:
5846 unlock_up(path, 0, 1, 0, NULL);
5847 path->leave_spinning = old_spinning;
5848 if (!old_spinning)
5849 btrfs_set_path_blocking(path);
5850
5851 return ret;
5852 }
5853
5854 /*
5855 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5856 * searching until it gets past min_objectid or finds an item of 'type'
5857 *
5858 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5859 */
5860 int btrfs_previous_item(struct btrfs_root *root,
5861 struct btrfs_path *path, u64 min_objectid,
5862 int type)
5863 {
5864 struct btrfs_key found_key;
5865 struct extent_buffer *leaf;
5866 u32 nritems;
5867 int ret;
5868
5869 while (1) {
5870 if (path->slots[0] == 0) {
5871 btrfs_set_path_blocking(path);
5872 ret = btrfs_prev_leaf(root, path);
5873 if (ret != 0)
5874 return ret;
5875 } else {
5876 path->slots[0]--;
5877 }
5878 leaf = path->nodes[0];
5879 nritems = btrfs_header_nritems(leaf);
5880 if (nritems == 0)
5881 return 1;
5882 if (path->slots[0] == nritems)
5883 path->slots[0]--;
5884
5885 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5886 if (found_key.objectid < min_objectid)
5887 break;
5888 if (found_key.type == type)
5889 return 0;
5890 if (found_key.objectid == min_objectid &&
5891 found_key.type < type)
5892 break;
5893 }
5894 return 1;
5895 }
5896
5897 /*
5898 * search in extent tree to find a previous Metadata/Data extent item with
5899 * min objecitd.
5900 *
5901 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5902 */
5903 int btrfs_previous_extent_item(struct btrfs_root *root,
5904 struct btrfs_path *path, u64 min_objectid)
5905 {
5906 struct btrfs_key found_key;
5907 struct extent_buffer *leaf;
5908 u32 nritems;
5909 int ret;
5910
5911 while (1) {
5912 if (path->slots[0] == 0) {
5913 btrfs_set_path_blocking(path);
5914 ret = btrfs_prev_leaf(root, path);
5915 if (ret != 0)
5916 return ret;
5917 } else {
5918 path->slots[0]--;
5919 }
5920 leaf = path->nodes[0];
5921 nritems = btrfs_header_nritems(leaf);
5922 if (nritems == 0)
5923 return 1;
5924 if (path->slots[0] == nritems)
5925 path->slots[0]--;
5926
5927 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5928 if (found_key.objectid < min_objectid)
5929 break;
5930 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5931 found_key.type == BTRFS_METADATA_ITEM_KEY)
5932 return 0;
5933 if (found_key.objectid == min_objectid &&
5934 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5935 break;
5936 }
5937 return 1;
5938 }