]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/ctree.c
Btrfs: add del_ptr and insert_ptr modifications to the tree mod log
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
bd989ba3 21#include <linux/rbtree.h>
eb60ceac
CM
22#include "ctree.h"
23#include "disk-io.h"
7f5c1516 24#include "transaction.h"
5f39d397 25#include "print-tree.h"
925baedd 26#include "locking.h"
9a8dd150 27
e089f05c
CM
28static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 31 *root, struct btrfs_key *ins_key,
cc0c5538 32 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
33static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 35 struct extent_buffer *src, int empty);
5f39d397
CM
36static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
143bede5 40static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
41 struct btrfs_path *path, int level, int slot,
42 int tree_mod_log);
f230475e
JS
43static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
47 u64 time_seq);
48struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
50 u64 time_seq);
d97e63b6 51
df24a2b9 52struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 53{
df24a2b9 54 struct btrfs_path *path;
e00f7308 55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 56 return path;
2c90e5d6
CM
57}
58
b4ce94de
CM
59/*
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
62 */
63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64{
65 int i;
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
bd681513
CM
67 if (!p->nodes[i] || !p->locks[i])
68 continue;
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
b4ce94de
CM
74 }
75}
76
77/*
78 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
79 *
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
83 * for held
b4ce94de 84 */
4008c04a 85noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
bd681513 86 struct extent_buffer *held, int held_rw)
b4ce94de
CM
87{
88 int i;
4008c04a
CM
89
90#ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
95 * the path blocking.
96 */
bd681513
CM
97 if (held) {
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 }
4008c04a
CM
104 btrfs_set_path_blocking(p);
105#endif
106
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
bd681513
CM
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
114 }
b4ce94de 115 }
4008c04a
CM
116
117#ifdef CONFIG_DEBUG_LOCK_ALLOC
118 if (held)
bd681513 119 btrfs_clear_lock_blocking_rw(held, held_rw);
4008c04a 120#endif
b4ce94de
CM
121}
122
d352ac68 123/* this also releases the path */
df24a2b9 124void btrfs_free_path(struct btrfs_path *p)
be0e5c09 125{
ff175d57
JJ
126 if (!p)
127 return;
b3b4aa74 128 btrfs_release_path(p);
df24a2b9 129 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
130}
131
d352ac68
CM
132/*
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
135 *
136 * It is safe to call this on paths that no locks or extent buffers held.
137 */
b3b4aa74 138noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
139{
140 int i;
a2135011 141
234b63a0 142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 143 p->slots[i] = 0;
eb60ceac 144 if (!p->nodes[i])
925baedd
CM
145 continue;
146 if (p->locks[i]) {
bd681513 147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
148 p->locks[i] = 0;
149 }
5f39d397 150 free_extent_buffer(p->nodes[i]);
3f157a2f 151 p->nodes[i] = NULL;
eb60ceac
CM
152 }
153}
154
d352ac68
CM
155/*
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
159 * looping required.
160 *
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
164 */
925baedd
CM
165struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166{
167 struct extent_buffer *eb;
240f62c8 168
3083ee2e
JB
169 while (1) {
170 rcu_read_lock();
171 eb = rcu_dereference(root->node);
172
173 /*
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
178 */
179 if (atomic_inc_not_zero(&eb->refs)) {
180 rcu_read_unlock();
181 break;
182 }
183 rcu_read_unlock();
184 synchronize_rcu();
185 }
925baedd
CM
186 return eb;
187}
188
d352ac68
CM
189/* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
192 */
925baedd
CM
193struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194{
195 struct extent_buffer *eb;
196
d397712b 197 while (1) {
925baedd
CM
198 eb = btrfs_root_node(root);
199 btrfs_tree_lock(eb);
240f62c8 200 if (eb == root->node)
925baedd 201 break;
925baedd
CM
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
204 }
205 return eb;
206}
207
bd681513
CM
208/* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
211 */
212struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213{
214 struct extent_buffer *eb;
215
216 while (1) {
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
220 break;
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
223 }
224 return eb;
225}
226
d352ac68
CM
227/* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
230 */
0b86a832
CM
231static void add_root_to_dirty_list(struct btrfs_root *root)
232{
e5846fc6 233 spin_lock(&root->fs_info->trans_lock);
0b86a832
CM
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
237 }
e5846fc6 238 spin_unlock(&root->fs_info->trans_lock);
0b86a832
CM
239}
240
d352ac68
CM
241/*
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
245 */
be20aa9d
CM
246int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
250{
251 struct extent_buffer *cow;
be20aa9d
CM
252 int ret = 0;
253 int level;
5d4f98a2 254 struct btrfs_disk_key disk_key;
be20aa9d
CM
255
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259
260 level = btrfs_header_level(buf);
5d4f98a2
YZ
261 if (level == 0)
262 btrfs_item_key(buf, &disk_key, 0);
263 else
264 btrfs_node_key(buf, &disk_key, 0);
31840ae1 265
5d4f98a2
YZ
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
5581a51a 268 buf->start, 0);
5d4f98a2 269 if (IS_ERR(cow))
be20aa9d
CM
270 return PTR_ERR(cow);
271
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 else
281 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 282
2b82032c
YZ
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
285 BTRFS_FSID_SIZE);
286
be20aa9d 287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 290 else
66d7e7f0 291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
4aec2b52 292
be20aa9d
CM
293 if (ret)
294 return ret;
295
296 btrfs_mark_buffer_dirty(cow);
297 *cow_ret = cow;
298 return 0;
299}
300
bd989ba3
JS
301enum mod_log_op {
302 MOD_LOG_KEY_REPLACE,
303 MOD_LOG_KEY_ADD,
304 MOD_LOG_KEY_REMOVE,
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_MOVE_KEYS,
308 MOD_LOG_ROOT_REPLACE,
309};
310
311struct tree_mod_move {
312 int dst_slot;
313 int nr_items;
314};
315
316struct tree_mod_root {
317 u64 logical;
318 u8 level;
319};
320
321struct tree_mod_elem {
322 struct rb_node node;
323 u64 index; /* shifted logical */
324 struct seq_list elem;
325 enum mod_log_op op;
326
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 int slot;
329
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 u64 generation;
332
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
335 u64 blockptr;
336
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
339
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
342};
343
344static inline void
345__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346{
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349}
350
351void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 elem->flags = 1;
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
358}
359
360void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
362{
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
370
371 if (!seq_putting)
372 return;
373
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
377
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
381 /*
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
384 */
385 goto out;
386 }
387 min_seq = cur_elem->seq;
388 }
389 }
390
391 /*
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
394 */
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
401 continue;
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
404 kfree(tm);
405 }
406 write_unlock(&fs_info->tree_mod_log_lock);
407out:
408 spin_unlock(&fs_info->tree_mod_seq_lock);
409}
410
411/*
412 * key order of the log:
413 * index -> sequence
414 *
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
417 * operations.
418 */
419static noinline int
420__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421{
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
426 int ret = 0;
427
428 BUG_ON(!tm || !tm->elem.seq);
429
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
433 while (*new) {
434 cur = container_of(*new, struct tree_mod_elem, node);
435 parent = *new;
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
444 else {
445 kfree(tm);
446 ret = -EEXIST;
447 goto unlock;
448 }
449 }
450
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
453unlock:
454 write_unlock(&fs_info->tree_mod_log_lock);
455 return ret;
456}
457
458int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
459 struct tree_mod_elem **tm_ret)
460{
461 struct tree_mod_elem *tm;
462 u64 seq = 0;
463
464 smp_mb();
465 if (list_empty(&fs_info->tree_mod_seq_list))
466 return 0;
467
468 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
469 if (!tm)
470 return -ENOMEM;
471
472 __get_tree_mod_seq(fs_info, &tm->elem);
473 seq = tm->elem.seq;
474 tm->elem.flags = 0;
475
476 return seq;
477}
478
479static noinline int
480tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
481 struct extent_buffer *eb, int slot,
482 enum mod_log_op op, gfp_t flags)
483{
484 struct tree_mod_elem *tm;
485 int ret;
486
487 ret = tree_mod_alloc(fs_info, flags, &tm);
488 if (ret <= 0)
489 return ret;
490
491 tm->index = eb->start >> PAGE_CACHE_SHIFT;
492 if (op != MOD_LOG_KEY_ADD) {
493 btrfs_node_key(eb, &tm->key, slot);
494 tm->blockptr = btrfs_node_blockptr(eb, slot);
495 }
496 tm->op = op;
497 tm->slot = slot;
498 tm->generation = btrfs_node_ptr_generation(eb, slot);
499
500 return __tree_mod_log_insert(fs_info, tm);
501}
502
503static noinline int
504tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
505 int slot, enum mod_log_op op)
506{
507 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
508}
509
510static noinline int
511tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
512 struct extent_buffer *eb, int dst_slot, int src_slot,
513 int nr_items, gfp_t flags)
514{
515 struct tree_mod_elem *tm;
516 int ret;
517 int i;
518
519 ret = tree_mod_alloc(fs_info, flags, &tm);
520 if (ret <= 0)
521 return ret;
522
523 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
524 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
525 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
526 BUG_ON(ret < 0);
527 }
528
529 tm->index = eb->start >> PAGE_CACHE_SHIFT;
530 tm->slot = src_slot;
531 tm->move.dst_slot = dst_slot;
532 tm->move.nr_items = nr_items;
533 tm->op = MOD_LOG_MOVE_KEYS;
534
535 return __tree_mod_log_insert(fs_info, tm);
536}
537
538static noinline int
539tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
540 struct extent_buffer *old_root,
541 struct extent_buffer *new_root, gfp_t flags)
542{
543 struct tree_mod_elem *tm;
544 int ret;
545
546 ret = tree_mod_alloc(fs_info, flags, &tm);
547 if (ret <= 0)
548 return ret;
549
550 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
551 tm->old_root.logical = old_root->start;
552 tm->old_root.level = btrfs_header_level(old_root);
553 tm->generation = btrfs_header_generation(old_root);
554 tm->op = MOD_LOG_ROOT_REPLACE;
555
556 return __tree_mod_log_insert(fs_info, tm);
557}
558
559static struct tree_mod_elem *
560__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
561 int smallest)
562{
563 struct rb_root *tm_root;
564 struct rb_node *node;
565 struct tree_mod_elem *cur = NULL;
566 struct tree_mod_elem *found = NULL;
567 u64 index = start >> PAGE_CACHE_SHIFT;
568
569 read_lock(&fs_info->tree_mod_log_lock);
570 tm_root = &fs_info->tree_mod_log;
571 node = tm_root->rb_node;
572 while (node) {
573 cur = container_of(node, struct tree_mod_elem, node);
574 if (cur->index < index) {
575 node = node->rb_left;
576 } else if (cur->index > index) {
577 node = node->rb_right;
578 } else if (cur->elem.seq < min_seq) {
579 node = node->rb_left;
580 } else if (!smallest) {
581 /* we want the node with the highest seq */
582 if (found)
583 BUG_ON(found->elem.seq > cur->elem.seq);
584 found = cur;
585 node = node->rb_left;
586 } else if (cur->elem.seq > min_seq) {
587 /* we want the node with the smallest seq */
588 if (found)
589 BUG_ON(found->elem.seq < cur->elem.seq);
590 found = cur;
591 node = node->rb_right;
592 } else {
593 found = cur;
594 break;
595 }
596 }
597 read_unlock(&fs_info->tree_mod_log_lock);
598
599 return found;
600}
601
602/*
603 * this returns the element from the log with the smallest time sequence
604 * value that's in the log (the oldest log item). any element with a time
605 * sequence lower than min_seq will be ignored.
606 */
607static struct tree_mod_elem *
608tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
609 u64 min_seq)
610{
611 return __tree_mod_log_search(fs_info, start, min_seq, 1);
612}
613
614/*
615 * this returns the element from the log with the largest time sequence
616 * value that's in the log (the most recent log item). any element with
617 * a time sequence lower than min_seq will be ignored.
618 */
619static struct tree_mod_elem *
620tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
621{
622 return __tree_mod_log_search(fs_info, start, min_seq, 0);
623}
624
625static inline void
626tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
627 struct extent_buffer *src, unsigned long dst_offset,
628 unsigned long src_offset, int nr_items)
629{
630 int ret;
631 int i;
632
633 smp_mb();
634 if (list_empty(&fs_info->tree_mod_seq_list))
635 return;
636
637 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
638 return;
639
640 /* speed this up by single seq for all operations? */
641 for (i = 0; i < nr_items; i++) {
642 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
643 MOD_LOG_KEY_REMOVE);
644 BUG_ON(ret < 0);
645 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
646 MOD_LOG_KEY_ADD);
647 BUG_ON(ret < 0);
648 }
649}
650
651static inline void
652tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
653 int dst_offset, int src_offset, int nr_items)
654{
655 int ret;
656 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
657 nr_items, GFP_NOFS);
658 BUG_ON(ret < 0);
659}
660
661static inline void
662tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
663 struct extent_buffer *eb,
664 struct btrfs_disk_key *disk_key, int slot, int atomic)
665{
666 int ret;
667
668 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
669 MOD_LOG_KEY_REPLACE,
670 atomic ? GFP_ATOMIC : GFP_NOFS);
671 BUG_ON(ret < 0);
672}
673
674static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
675 struct extent_buffer *eb)
676{
677 int i;
678 int ret;
679 u32 nritems;
680
681 smp_mb();
682 if (list_empty(&fs_info->tree_mod_seq_list))
683 return;
684
685 if (btrfs_header_level(eb) == 0)
686 return;
687
688 nritems = btrfs_header_nritems(eb);
689 for (i = nritems - 1; i >= 0; i--) {
690 ret = tree_mod_log_insert_key(fs_info, eb, i,
691 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
692 BUG_ON(ret < 0);
693 }
694}
695
696static inline void
697tree_mod_log_set_root_pointer(struct btrfs_root *root,
698 struct extent_buffer *new_root_node)
699{
700 int ret;
701 tree_mod_log_free_eb(root->fs_info, root->node);
702 ret = tree_mod_log_insert_root(root->fs_info, root->node,
703 new_root_node, GFP_NOFS);
704 BUG_ON(ret < 0);
705}
706
5d4f98a2
YZ
707/*
708 * check if the tree block can be shared by multiple trees
709 */
710int btrfs_block_can_be_shared(struct btrfs_root *root,
711 struct extent_buffer *buf)
712{
713 /*
714 * Tree blocks not in refernece counted trees and tree roots
715 * are never shared. If a block was allocated after the last
716 * snapshot and the block was not allocated by tree relocation,
717 * we know the block is not shared.
718 */
719 if (root->ref_cows &&
720 buf != root->node && buf != root->commit_root &&
721 (btrfs_header_generation(buf) <=
722 btrfs_root_last_snapshot(&root->root_item) ||
723 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
724 return 1;
725#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
726 if (root->ref_cows &&
727 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
728 return 1;
729#endif
730 return 0;
731}
732
733static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
734 struct btrfs_root *root,
735 struct extent_buffer *buf,
f0486c68
YZ
736 struct extent_buffer *cow,
737 int *last_ref)
5d4f98a2
YZ
738{
739 u64 refs;
740 u64 owner;
741 u64 flags;
742 u64 new_flags = 0;
743 int ret;
744
745 /*
746 * Backrefs update rules:
747 *
748 * Always use full backrefs for extent pointers in tree block
749 * allocated by tree relocation.
750 *
751 * If a shared tree block is no longer referenced by its owner
752 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
753 * use full backrefs for extent pointers in tree block.
754 *
755 * If a tree block is been relocating
756 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
757 * use full backrefs for extent pointers in tree block.
758 * The reason for this is some operations (such as drop tree)
759 * are only allowed for blocks use full backrefs.
760 */
761
762 if (btrfs_block_can_be_shared(root, buf)) {
763 ret = btrfs_lookup_extent_info(trans, root, buf->start,
764 buf->len, &refs, &flags);
be1a5564
MF
765 if (ret)
766 return ret;
e5df9573
MF
767 if (refs == 0) {
768 ret = -EROFS;
769 btrfs_std_error(root->fs_info, ret);
770 return ret;
771 }
5d4f98a2
YZ
772 } else {
773 refs = 1;
774 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
775 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
776 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
777 else
778 flags = 0;
779 }
780
781 owner = btrfs_header_owner(buf);
782 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
783 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
784
785 if (refs > 1) {
786 if ((owner == root->root_key.objectid ||
787 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
788 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
66d7e7f0 789 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
79787eaa 790 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
791
792 if (root->root_key.objectid ==
793 BTRFS_TREE_RELOC_OBJECTID) {
66d7e7f0 794 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
79787eaa 795 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 796 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
79787eaa 797 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
798 }
799 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
800 } else {
801
802 if (root->root_key.objectid ==
803 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 804 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 805 else
66d7e7f0 806 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 807 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
808 }
809 if (new_flags != 0) {
810 ret = btrfs_set_disk_extent_flags(trans, root,
811 buf->start,
812 buf->len,
813 new_flags, 0);
be1a5564
MF
814 if (ret)
815 return ret;
5d4f98a2
YZ
816 }
817 } else {
818 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
819 if (root->root_key.objectid ==
820 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 821 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 822 else
66d7e7f0 823 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 824 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 825 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
79787eaa 826 BUG_ON(ret); /* -ENOMEM */
5d4f98a2 827 }
f230475e
JS
828 /*
829 * don't log freeing in case we're freeing the root node, this
830 * is done by tree_mod_log_set_root_pointer later
831 */
832 if (buf != root->node && btrfs_header_level(buf) != 0)
833 tree_mod_log_free_eb(root->fs_info, buf);
5d4f98a2 834 clean_tree_block(trans, root, buf);
f0486c68 835 *last_ref = 1;
5d4f98a2
YZ
836 }
837 return 0;
838}
839
d352ac68 840/*
d397712b
CM
841 * does the dirty work in cow of a single block. The parent block (if
842 * supplied) is updated to point to the new cow copy. The new buffer is marked
843 * dirty and returned locked. If you modify the block it needs to be marked
844 * dirty again.
d352ac68
CM
845 *
846 * search_start -- an allocation hint for the new block
847 *
d397712b
CM
848 * empty_size -- a hint that you plan on doing more cow. This is the size in
849 * bytes the allocator should try to find free next to the block it returns.
850 * This is just a hint and may be ignored by the allocator.
d352ac68 851 */
d397712b 852static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
853 struct btrfs_root *root,
854 struct extent_buffer *buf,
855 struct extent_buffer *parent, int parent_slot,
856 struct extent_buffer **cow_ret,
9fa8cfe7 857 u64 search_start, u64 empty_size)
02217ed2 858{
5d4f98a2 859 struct btrfs_disk_key disk_key;
5f39d397 860 struct extent_buffer *cow;
be1a5564 861 int level, ret;
f0486c68 862 int last_ref = 0;
925baedd 863 int unlock_orig = 0;
5d4f98a2 864 u64 parent_start;
7bb86316 865
925baedd
CM
866 if (*cow_ret == buf)
867 unlock_orig = 1;
868
b9447ef8 869 btrfs_assert_tree_locked(buf);
925baedd 870
7bb86316
CM
871 WARN_ON(root->ref_cows && trans->transid !=
872 root->fs_info->running_transaction->transid);
6702ed49 873 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 874
7bb86316 875 level = btrfs_header_level(buf);
31840ae1 876
5d4f98a2
YZ
877 if (level == 0)
878 btrfs_item_key(buf, &disk_key, 0);
879 else
880 btrfs_node_key(buf, &disk_key, 0);
881
882 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
883 if (parent)
884 parent_start = parent->start;
885 else
886 parent_start = 0;
887 } else
888 parent_start = 0;
889
890 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
891 root->root_key.objectid, &disk_key,
5581a51a 892 level, search_start, empty_size);
54aa1f4d
CM
893 if (IS_ERR(cow))
894 return PTR_ERR(cow);
6702ed49 895
b4ce94de
CM
896 /* cow is set to blocking by btrfs_init_new_buffer */
897
5f39d397 898 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 899 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 900 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
901 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
902 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
903 BTRFS_HEADER_FLAG_RELOC);
904 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
905 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
906 else
907 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 908
2b82032c
YZ
909 write_extent_buffer(cow, root->fs_info->fsid,
910 (unsigned long)btrfs_header_fsid(cow),
911 BTRFS_FSID_SIZE);
912
be1a5564 913 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 914 if (ret) {
79787eaa 915 btrfs_abort_transaction(trans, root, ret);
b68dc2a9
MF
916 return ret;
917 }
1a40e23b 918
3fd0a558
YZ
919 if (root->ref_cows)
920 btrfs_reloc_cow_block(trans, root, buf, cow);
921
02217ed2 922 if (buf == root->node) {
925baedd 923 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
924 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
925 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
926 parent_start = buf->start;
927 else
928 parent_start = 0;
925baedd 929
5f39d397 930 extent_buffer_get(cow);
f230475e 931 tree_mod_log_set_root_pointer(root, cow);
240f62c8 932 rcu_assign_pointer(root->node, cow);
925baedd 933
f0486c68 934 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 935 last_ref);
5f39d397 936 free_extent_buffer(buf);
0b86a832 937 add_root_to_dirty_list(root);
02217ed2 938 } else {
5d4f98a2
YZ
939 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
940 parent_start = parent->start;
941 else
942 parent_start = 0;
943
944 WARN_ON(trans->transid != btrfs_header_generation(parent));
f230475e
JS
945 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
946 MOD_LOG_KEY_REPLACE);
5f39d397 947 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 948 cow->start);
74493f7a
CM
949 btrfs_set_node_ptr_generation(parent, parent_slot,
950 trans->transid);
d6025579 951 btrfs_mark_buffer_dirty(parent);
f0486c68 952 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 953 last_ref);
02217ed2 954 }
925baedd
CM
955 if (unlock_orig)
956 btrfs_tree_unlock(buf);
3083ee2e 957 free_extent_buffer_stale(buf);
ccd467d6 958 btrfs_mark_buffer_dirty(cow);
2c90e5d6 959 *cow_ret = cow;
02217ed2
CM
960 return 0;
961}
962
5d4f98a2
YZ
963static inline int should_cow_block(struct btrfs_trans_handle *trans,
964 struct btrfs_root *root,
965 struct extent_buffer *buf)
966{
f1ebcc74
LB
967 /* ensure we can see the force_cow */
968 smp_rmb();
969
970 /*
971 * We do not need to cow a block if
972 * 1) this block is not created or changed in this transaction;
973 * 2) this block does not belong to TREE_RELOC tree;
974 * 3) the root is not forced COW.
975 *
976 * What is forced COW:
977 * when we create snapshot during commiting the transaction,
978 * after we've finished coping src root, we must COW the shared
979 * block to ensure the metadata consistency.
980 */
5d4f98a2
YZ
981 if (btrfs_header_generation(buf) == trans->transid &&
982 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
983 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74
LB
984 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
985 !root->force_cow)
5d4f98a2
YZ
986 return 0;
987 return 1;
988}
989
d352ac68
CM
990/*
991 * cows a single block, see __btrfs_cow_block for the real work.
992 * This version of it has extra checks so that a block isn't cow'd more than
993 * once per transaction, as long as it hasn't been written yet
994 */
d397712b 995noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
996 struct btrfs_root *root, struct extent_buffer *buf,
997 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 998 struct extent_buffer **cow_ret)
6702ed49
CM
999{
1000 u64 search_start;
f510cfec 1001 int ret;
dc17ff8f 1002
6702ed49 1003 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
1004 printk(KERN_CRIT "trans %llu running %llu\n",
1005 (unsigned long long)trans->transid,
1006 (unsigned long long)
6702ed49
CM
1007 root->fs_info->running_transaction->transid);
1008 WARN_ON(1);
1009 }
1010 if (trans->transid != root->fs_info->generation) {
d397712b
CM
1011 printk(KERN_CRIT "trans %llu running %llu\n",
1012 (unsigned long long)trans->transid,
1013 (unsigned long long)root->fs_info->generation);
6702ed49
CM
1014 WARN_ON(1);
1015 }
dc17ff8f 1016
5d4f98a2 1017 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
1018 *cow_ret = buf;
1019 return 0;
1020 }
c487685d 1021
0b86a832 1022 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
1023
1024 if (parent)
1025 btrfs_set_lock_blocking(parent);
1026 btrfs_set_lock_blocking(buf);
1027
f510cfec 1028 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 1029 parent_slot, cow_ret, search_start, 0);
1abe9b8a 1030
1031 trace_btrfs_cow_block(root, buf, *cow_ret);
1032
f510cfec 1033 return ret;
6702ed49
CM
1034}
1035
d352ac68
CM
1036/*
1037 * helper function for defrag to decide if two blocks pointed to by a
1038 * node are actually close by
1039 */
6b80053d 1040static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 1041{
6b80053d 1042 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 1043 return 1;
6b80053d 1044 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
1045 return 1;
1046 return 0;
1047}
1048
081e9573
CM
1049/*
1050 * compare two keys in a memcmp fashion
1051 */
1052static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1053{
1054 struct btrfs_key k1;
1055
1056 btrfs_disk_key_to_cpu(&k1, disk);
1057
20736aba 1058 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
1059}
1060
f3465ca4
JB
1061/*
1062 * same as comp_keys only with two btrfs_key's
1063 */
5d4f98a2 1064int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
1065{
1066 if (k1->objectid > k2->objectid)
1067 return 1;
1068 if (k1->objectid < k2->objectid)
1069 return -1;
1070 if (k1->type > k2->type)
1071 return 1;
1072 if (k1->type < k2->type)
1073 return -1;
1074 if (k1->offset > k2->offset)
1075 return 1;
1076 if (k1->offset < k2->offset)
1077 return -1;
1078 return 0;
1079}
081e9573 1080
d352ac68
CM
1081/*
1082 * this is used by the defrag code to go through all the
1083 * leaves pointed to by a node and reallocate them so that
1084 * disk order is close to key order
1085 */
6702ed49 1086int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1087 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
1088 int start_slot, int cache_only, u64 *last_ret,
1089 struct btrfs_key *progress)
6702ed49 1090{
6b80053d 1091 struct extent_buffer *cur;
6702ed49 1092 u64 blocknr;
ca7a79ad 1093 u64 gen;
e9d0b13b
CM
1094 u64 search_start = *last_ret;
1095 u64 last_block = 0;
6702ed49
CM
1096 u64 other;
1097 u32 parent_nritems;
6702ed49
CM
1098 int end_slot;
1099 int i;
1100 int err = 0;
f2183bde 1101 int parent_level;
6b80053d
CM
1102 int uptodate;
1103 u32 blocksize;
081e9573
CM
1104 int progress_passed = 0;
1105 struct btrfs_disk_key disk_key;
6702ed49 1106
5708b959
CM
1107 parent_level = btrfs_header_level(parent);
1108 if (cache_only && parent_level != 1)
1109 return 0;
1110
d397712b 1111 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 1112 WARN_ON(1);
d397712b 1113 if (trans->transid != root->fs_info->generation)
6702ed49 1114 WARN_ON(1);
86479a04 1115
6b80053d 1116 parent_nritems = btrfs_header_nritems(parent);
6b80053d 1117 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
1118 end_slot = parent_nritems;
1119
1120 if (parent_nritems == 1)
1121 return 0;
1122
b4ce94de
CM
1123 btrfs_set_lock_blocking(parent);
1124
6702ed49
CM
1125 for (i = start_slot; i < end_slot; i++) {
1126 int close = 1;
a6b6e75e 1127
081e9573
CM
1128 btrfs_node_key(parent, &disk_key, i);
1129 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1130 continue;
1131
1132 progress_passed = 1;
6b80053d 1133 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 1134 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
1135 if (last_block == 0)
1136 last_block = blocknr;
5708b959 1137
6702ed49 1138 if (i > 0) {
6b80053d
CM
1139 other = btrfs_node_blockptr(parent, i - 1);
1140 close = close_blocks(blocknr, other, blocksize);
6702ed49 1141 }
0ef3e66b 1142 if (!close && i < end_slot - 2) {
6b80053d
CM
1143 other = btrfs_node_blockptr(parent, i + 1);
1144 close = close_blocks(blocknr, other, blocksize);
6702ed49 1145 }
e9d0b13b
CM
1146 if (close) {
1147 last_block = blocknr;
6702ed49 1148 continue;
e9d0b13b 1149 }
6702ed49 1150
6b80053d
CM
1151 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1152 if (cur)
b9fab919 1153 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
6b80053d
CM
1154 else
1155 uptodate = 0;
5708b959 1156 if (!cur || !uptodate) {
6702ed49 1157 if (cache_only) {
6b80053d 1158 free_extent_buffer(cur);
6702ed49
CM
1159 continue;
1160 }
6b80053d
CM
1161 if (!cur) {
1162 cur = read_tree_block(root, blocknr,
ca7a79ad 1163 blocksize, gen);
97d9a8a4
TI
1164 if (!cur)
1165 return -EIO;
6b80053d 1166 } else if (!uptodate) {
ca7a79ad 1167 btrfs_read_buffer(cur, gen);
f2183bde 1168 }
6702ed49 1169 }
e9d0b13b 1170 if (search_start == 0)
6b80053d 1171 search_start = last_block;
e9d0b13b 1172
e7a84565 1173 btrfs_tree_lock(cur);
b4ce94de 1174 btrfs_set_lock_blocking(cur);
6b80053d 1175 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 1176 &cur, search_start,
6b80053d 1177 min(16 * blocksize,
9fa8cfe7 1178 (end_slot - i) * blocksize));
252c38f0 1179 if (err) {
e7a84565 1180 btrfs_tree_unlock(cur);
6b80053d 1181 free_extent_buffer(cur);
6702ed49 1182 break;
252c38f0 1183 }
e7a84565
CM
1184 search_start = cur->start;
1185 last_block = cur->start;
f2183bde 1186 *last_ret = search_start;
e7a84565
CM
1187 btrfs_tree_unlock(cur);
1188 free_extent_buffer(cur);
6702ed49
CM
1189 }
1190 return err;
1191}
1192
74123bd7
CM
1193/*
1194 * The leaf data grows from end-to-front in the node.
1195 * this returns the address of the start of the last item,
1196 * which is the stop of the leaf data stack
1197 */
123abc88 1198static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 1199 struct extent_buffer *leaf)
be0e5c09 1200{
5f39d397 1201 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 1202 if (nr == 0)
123abc88 1203 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 1204 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
1205}
1206
aa5d6bed 1207
74123bd7 1208/*
5f39d397
CM
1209 * search for key in the extent_buffer. The items start at offset p,
1210 * and they are item_size apart. There are 'max' items in p.
1211 *
74123bd7
CM
1212 * the slot in the array is returned via slot, and it points to
1213 * the place where you would insert key if it is not found in
1214 * the array.
1215 *
1216 * slot may point to max if the key is bigger than all of the keys
1217 */
e02119d5
CM
1218static noinline int generic_bin_search(struct extent_buffer *eb,
1219 unsigned long p,
1220 int item_size, struct btrfs_key *key,
1221 int max, int *slot)
be0e5c09
CM
1222{
1223 int low = 0;
1224 int high = max;
1225 int mid;
1226 int ret;
479965d6 1227 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
1228 struct btrfs_disk_key unaligned;
1229 unsigned long offset;
5f39d397
CM
1230 char *kaddr = NULL;
1231 unsigned long map_start = 0;
1232 unsigned long map_len = 0;
479965d6 1233 int err;
be0e5c09 1234
d397712b 1235 while (low < high) {
be0e5c09 1236 mid = (low + high) / 2;
5f39d397
CM
1237 offset = p + mid * item_size;
1238
a6591715 1239 if (!kaddr || offset < map_start ||
5f39d397
CM
1240 (offset + sizeof(struct btrfs_disk_key)) >
1241 map_start + map_len) {
934d375b
CM
1242
1243 err = map_private_extent_buffer(eb, offset,
479965d6 1244 sizeof(struct btrfs_disk_key),
a6591715 1245 &kaddr, &map_start, &map_len);
479965d6
CM
1246
1247 if (!err) {
1248 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1249 map_start);
1250 } else {
1251 read_extent_buffer(eb, &unaligned,
1252 offset, sizeof(unaligned));
1253 tmp = &unaligned;
1254 }
5f39d397 1255
5f39d397
CM
1256 } else {
1257 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1258 map_start);
1259 }
be0e5c09
CM
1260 ret = comp_keys(tmp, key);
1261
1262 if (ret < 0)
1263 low = mid + 1;
1264 else if (ret > 0)
1265 high = mid;
1266 else {
1267 *slot = mid;
1268 return 0;
1269 }
1270 }
1271 *slot = low;
1272 return 1;
1273}
1274
97571fd0
CM
1275/*
1276 * simple bin_search frontend that does the right thing for
1277 * leaves vs nodes
1278 */
5f39d397
CM
1279static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1280 int level, int *slot)
be0e5c09 1281{
5f39d397
CM
1282 if (level == 0) {
1283 return generic_bin_search(eb,
1284 offsetof(struct btrfs_leaf, items),
0783fcfc 1285 sizeof(struct btrfs_item),
5f39d397 1286 key, btrfs_header_nritems(eb),
7518a238 1287 slot);
be0e5c09 1288 } else {
5f39d397
CM
1289 return generic_bin_search(eb,
1290 offsetof(struct btrfs_node, ptrs),
123abc88 1291 sizeof(struct btrfs_key_ptr),
5f39d397 1292 key, btrfs_header_nritems(eb),
7518a238 1293 slot);
be0e5c09
CM
1294 }
1295 return -1;
1296}
1297
5d4f98a2
YZ
1298int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1299 int level, int *slot)
1300{
1301 return bin_search(eb, key, level, slot);
1302}
1303
f0486c68
YZ
1304static void root_add_used(struct btrfs_root *root, u32 size)
1305{
1306 spin_lock(&root->accounting_lock);
1307 btrfs_set_root_used(&root->root_item,
1308 btrfs_root_used(&root->root_item) + size);
1309 spin_unlock(&root->accounting_lock);
1310}
1311
1312static void root_sub_used(struct btrfs_root *root, u32 size)
1313{
1314 spin_lock(&root->accounting_lock);
1315 btrfs_set_root_used(&root->root_item,
1316 btrfs_root_used(&root->root_item) - size);
1317 spin_unlock(&root->accounting_lock);
1318}
1319
d352ac68
CM
1320/* given a node and slot number, this reads the blocks it points to. The
1321 * extent buffer is returned with a reference taken (but unlocked).
1322 * NULL is returned on error.
1323 */
e02119d5 1324static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 1325 struct extent_buffer *parent, int slot)
bb803951 1326{
ca7a79ad 1327 int level = btrfs_header_level(parent);
bb803951
CM
1328 if (slot < 0)
1329 return NULL;
5f39d397 1330 if (slot >= btrfs_header_nritems(parent))
bb803951 1331 return NULL;
ca7a79ad
CM
1332
1333 BUG_ON(level == 0);
1334
db94535d 1335 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
1336 btrfs_level_size(root, level - 1),
1337 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
1338}
1339
d352ac68
CM
1340/*
1341 * node level balancing, used to make sure nodes are in proper order for
1342 * item deletion. We balance from the top down, so we have to make sure
1343 * that a deletion won't leave an node completely empty later on.
1344 */
e02119d5 1345static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
1346 struct btrfs_root *root,
1347 struct btrfs_path *path, int level)
bb803951 1348{
5f39d397
CM
1349 struct extent_buffer *right = NULL;
1350 struct extent_buffer *mid;
1351 struct extent_buffer *left = NULL;
1352 struct extent_buffer *parent = NULL;
bb803951
CM
1353 int ret = 0;
1354 int wret;
1355 int pslot;
bb803951 1356 int orig_slot = path->slots[level];
79f95c82 1357 u64 orig_ptr;
bb803951
CM
1358
1359 if (level == 0)
1360 return 0;
1361
5f39d397 1362 mid = path->nodes[level];
b4ce94de 1363
bd681513
CM
1364 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1365 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
7bb86316
CM
1366 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1367
1d4f8a0c 1368 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1369
a05a9bb1 1370 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1371 parent = path->nodes[level + 1];
a05a9bb1
LZ
1372 pslot = path->slots[level + 1];
1373 }
bb803951 1374
40689478
CM
1375 /*
1376 * deal with the case where there is only one pointer in the root
1377 * by promoting the node below to a root
1378 */
5f39d397
CM
1379 if (!parent) {
1380 struct extent_buffer *child;
bb803951 1381
5f39d397 1382 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1383 return 0;
1384
1385 /* promote the child to a root */
5f39d397 1386 child = read_node_slot(root, mid, 0);
305a26af
MF
1387 if (!child) {
1388 ret = -EROFS;
1389 btrfs_std_error(root->fs_info, ret);
1390 goto enospc;
1391 }
1392
925baedd 1393 btrfs_tree_lock(child);
b4ce94de 1394 btrfs_set_lock_blocking(child);
9fa8cfe7 1395 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
1396 if (ret) {
1397 btrfs_tree_unlock(child);
1398 free_extent_buffer(child);
1399 goto enospc;
1400 }
2f375ab9 1401
f230475e 1402 tree_mod_log_set_root_pointer(root, child);
240f62c8 1403 rcu_assign_pointer(root->node, child);
925baedd 1404
0b86a832 1405 add_root_to_dirty_list(root);
925baedd 1406 btrfs_tree_unlock(child);
b4ce94de 1407
925baedd 1408 path->locks[level] = 0;
bb803951 1409 path->nodes[level] = NULL;
5f39d397 1410 clean_tree_block(trans, root, mid);
925baedd 1411 btrfs_tree_unlock(mid);
bb803951 1412 /* once for the path */
5f39d397 1413 free_extent_buffer(mid);
f0486c68
YZ
1414
1415 root_sub_used(root, mid->len);
5581a51a 1416 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 1417 /* once for the root ptr */
3083ee2e 1418 free_extent_buffer_stale(mid);
f0486c68 1419 return 0;
bb803951 1420 }
5f39d397 1421 if (btrfs_header_nritems(mid) >
123abc88 1422 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
1423 return 0;
1424
559af821 1425 btrfs_header_nritems(mid);
54aa1f4d 1426
5f39d397
CM
1427 left = read_node_slot(root, parent, pslot - 1);
1428 if (left) {
925baedd 1429 btrfs_tree_lock(left);
b4ce94de 1430 btrfs_set_lock_blocking(left);
5f39d397 1431 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1432 parent, pslot - 1, &left);
54aa1f4d
CM
1433 if (wret) {
1434 ret = wret;
1435 goto enospc;
1436 }
2cc58cf2 1437 }
5f39d397
CM
1438 right = read_node_slot(root, parent, pslot + 1);
1439 if (right) {
925baedd 1440 btrfs_tree_lock(right);
b4ce94de 1441 btrfs_set_lock_blocking(right);
5f39d397 1442 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1443 parent, pslot + 1, &right);
2cc58cf2
CM
1444 if (wret) {
1445 ret = wret;
1446 goto enospc;
1447 }
1448 }
1449
1450 /* first, try to make some room in the middle buffer */
5f39d397
CM
1451 if (left) {
1452 orig_slot += btrfs_header_nritems(left);
bce4eae9 1453 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1454 if (wret < 0)
1455 ret = wret;
559af821 1456 btrfs_header_nritems(mid);
bb803951 1457 }
79f95c82
CM
1458
1459 /*
1460 * then try to empty the right most buffer into the middle
1461 */
5f39d397 1462 if (right) {
971a1f66 1463 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1464 if (wret < 0 && wret != -ENOSPC)
79f95c82 1465 ret = wret;
5f39d397 1466 if (btrfs_header_nritems(right) == 0) {
5f39d397 1467 clean_tree_block(trans, root, right);
925baedd 1468 btrfs_tree_unlock(right);
f3ea38da 1469 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
f0486c68 1470 root_sub_used(root, right->len);
5581a51a 1471 btrfs_free_tree_block(trans, root, right, 0, 1);
3083ee2e 1472 free_extent_buffer_stale(right);
f0486c68 1473 right = NULL;
bb803951 1474 } else {
5f39d397
CM
1475 struct btrfs_disk_key right_key;
1476 btrfs_node_key(right, &right_key, 0);
f230475e
JS
1477 tree_mod_log_set_node_key(root->fs_info, parent,
1478 &right_key, pslot + 1, 0);
5f39d397
CM
1479 btrfs_set_node_key(parent, &right_key, pslot + 1);
1480 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1481 }
1482 }
5f39d397 1483 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1484 /*
1485 * we're not allowed to leave a node with one item in the
1486 * tree during a delete. A deletion from lower in the tree
1487 * could try to delete the only pointer in this node.
1488 * So, pull some keys from the left.
1489 * There has to be a left pointer at this point because
1490 * otherwise we would have pulled some pointers from the
1491 * right
1492 */
305a26af
MF
1493 if (!left) {
1494 ret = -EROFS;
1495 btrfs_std_error(root->fs_info, ret);
1496 goto enospc;
1497 }
5f39d397 1498 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1499 if (wret < 0) {
79f95c82 1500 ret = wret;
54aa1f4d
CM
1501 goto enospc;
1502 }
bce4eae9
CM
1503 if (wret == 1) {
1504 wret = push_node_left(trans, root, left, mid, 1);
1505 if (wret < 0)
1506 ret = wret;
1507 }
79f95c82
CM
1508 BUG_ON(wret == 1);
1509 }
5f39d397 1510 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1511 clean_tree_block(trans, root, mid);
925baedd 1512 btrfs_tree_unlock(mid);
f3ea38da 1513 del_ptr(trans, root, path, level + 1, pslot, 1);
f0486c68 1514 root_sub_used(root, mid->len);
5581a51a 1515 btrfs_free_tree_block(trans, root, mid, 0, 1);
3083ee2e 1516 free_extent_buffer_stale(mid);
f0486c68 1517 mid = NULL;
79f95c82
CM
1518 } else {
1519 /* update the parent key to reflect our changes */
5f39d397
CM
1520 struct btrfs_disk_key mid_key;
1521 btrfs_node_key(mid, &mid_key, 0);
f230475e
JS
1522 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1523 pslot, 0);
5f39d397
CM
1524 btrfs_set_node_key(parent, &mid_key, pslot);
1525 btrfs_mark_buffer_dirty(parent);
79f95c82 1526 }
bb803951 1527
79f95c82 1528 /* update the path */
5f39d397
CM
1529 if (left) {
1530 if (btrfs_header_nritems(left) > orig_slot) {
1531 extent_buffer_get(left);
925baedd 1532 /* left was locked after cow */
5f39d397 1533 path->nodes[level] = left;
bb803951
CM
1534 path->slots[level + 1] -= 1;
1535 path->slots[level] = orig_slot;
925baedd
CM
1536 if (mid) {
1537 btrfs_tree_unlock(mid);
5f39d397 1538 free_extent_buffer(mid);
925baedd 1539 }
bb803951 1540 } else {
5f39d397 1541 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1542 path->slots[level] = orig_slot;
1543 }
1544 }
79f95c82 1545 /* double check we haven't messed things up */
e20d96d6 1546 if (orig_ptr !=
5f39d397 1547 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1548 BUG();
54aa1f4d 1549enospc:
925baedd
CM
1550 if (right) {
1551 btrfs_tree_unlock(right);
5f39d397 1552 free_extent_buffer(right);
925baedd
CM
1553 }
1554 if (left) {
1555 if (path->nodes[level] != left)
1556 btrfs_tree_unlock(left);
5f39d397 1557 free_extent_buffer(left);
925baedd 1558 }
bb803951
CM
1559 return ret;
1560}
1561
d352ac68
CM
1562/* Node balancing for insertion. Here we only split or push nodes around
1563 * when they are completely full. This is also done top down, so we
1564 * have to be pessimistic.
1565 */
d397712b 1566static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1567 struct btrfs_root *root,
1568 struct btrfs_path *path, int level)
e66f709b 1569{
5f39d397
CM
1570 struct extent_buffer *right = NULL;
1571 struct extent_buffer *mid;
1572 struct extent_buffer *left = NULL;
1573 struct extent_buffer *parent = NULL;
e66f709b
CM
1574 int ret = 0;
1575 int wret;
1576 int pslot;
1577 int orig_slot = path->slots[level];
e66f709b
CM
1578
1579 if (level == 0)
1580 return 1;
1581
5f39d397 1582 mid = path->nodes[level];
7bb86316 1583 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 1584
a05a9bb1 1585 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1586 parent = path->nodes[level + 1];
a05a9bb1
LZ
1587 pslot = path->slots[level + 1];
1588 }
e66f709b 1589
5f39d397 1590 if (!parent)
e66f709b 1591 return 1;
e66f709b 1592
5f39d397 1593 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1594
1595 /* first, try to make some room in the middle buffer */
5f39d397 1596 if (left) {
e66f709b 1597 u32 left_nr;
925baedd
CM
1598
1599 btrfs_tree_lock(left);
b4ce94de
CM
1600 btrfs_set_lock_blocking(left);
1601
5f39d397 1602 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1603 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1604 wret = 1;
1605 } else {
5f39d397 1606 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1607 pslot - 1, &left);
54aa1f4d
CM
1608 if (ret)
1609 wret = 1;
1610 else {
54aa1f4d 1611 wret = push_node_left(trans, root,
971a1f66 1612 left, mid, 0);
54aa1f4d 1613 }
33ade1f8 1614 }
e66f709b
CM
1615 if (wret < 0)
1616 ret = wret;
1617 if (wret == 0) {
5f39d397 1618 struct btrfs_disk_key disk_key;
e66f709b 1619 orig_slot += left_nr;
5f39d397 1620 btrfs_node_key(mid, &disk_key, 0);
f230475e
JS
1621 tree_mod_log_set_node_key(root->fs_info, parent,
1622 &disk_key, pslot, 0);
5f39d397
CM
1623 btrfs_set_node_key(parent, &disk_key, pslot);
1624 btrfs_mark_buffer_dirty(parent);
1625 if (btrfs_header_nritems(left) > orig_slot) {
1626 path->nodes[level] = left;
e66f709b
CM
1627 path->slots[level + 1] -= 1;
1628 path->slots[level] = orig_slot;
925baedd 1629 btrfs_tree_unlock(mid);
5f39d397 1630 free_extent_buffer(mid);
e66f709b
CM
1631 } else {
1632 orig_slot -=
5f39d397 1633 btrfs_header_nritems(left);
e66f709b 1634 path->slots[level] = orig_slot;
925baedd 1635 btrfs_tree_unlock(left);
5f39d397 1636 free_extent_buffer(left);
e66f709b 1637 }
e66f709b
CM
1638 return 0;
1639 }
925baedd 1640 btrfs_tree_unlock(left);
5f39d397 1641 free_extent_buffer(left);
e66f709b 1642 }
925baedd 1643 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1644
1645 /*
1646 * then try to empty the right most buffer into the middle
1647 */
5f39d397 1648 if (right) {
33ade1f8 1649 u32 right_nr;
b4ce94de 1650
925baedd 1651 btrfs_tree_lock(right);
b4ce94de
CM
1652 btrfs_set_lock_blocking(right);
1653
5f39d397 1654 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1655 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1656 wret = 1;
1657 } else {
5f39d397
CM
1658 ret = btrfs_cow_block(trans, root, right,
1659 parent, pslot + 1,
9fa8cfe7 1660 &right);
54aa1f4d
CM
1661 if (ret)
1662 wret = 1;
1663 else {
54aa1f4d 1664 wret = balance_node_right(trans, root,
5f39d397 1665 right, mid);
54aa1f4d 1666 }
33ade1f8 1667 }
e66f709b
CM
1668 if (wret < 0)
1669 ret = wret;
1670 if (wret == 0) {
5f39d397
CM
1671 struct btrfs_disk_key disk_key;
1672
1673 btrfs_node_key(right, &disk_key, 0);
f230475e
JS
1674 tree_mod_log_set_node_key(root->fs_info, parent,
1675 &disk_key, pslot + 1, 0);
5f39d397
CM
1676 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1677 btrfs_mark_buffer_dirty(parent);
1678
1679 if (btrfs_header_nritems(mid) <= orig_slot) {
1680 path->nodes[level] = right;
e66f709b
CM
1681 path->slots[level + 1] += 1;
1682 path->slots[level] = orig_slot -
5f39d397 1683 btrfs_header_nritems(mid);
925baedd 1684 btrfs_tree_unlock(mid);
5f39d397 1685 free_extent_buffer(mid);
e66f709b 1686 } else {
925baedd 1687 btrfs_tree_unlock(right);
5f39d397 1688 free_extent_buffer(right);
e66f709b 1689 }
e66f709b
CM
1690 return 0;
1691 }
925baedd 1692 btrfs_tree_unlock(right);
5f39d397 1693 free_extent_buffer(right);
e66f709b 1694 }
e66f709b
CM
1695 return 1;
1696}
1697
3c69faec 1698/*
d352ac68
CM
1699 * readahead one full node of leaves, finding things that are close
1700 * to the block in 'slot', and triggering ra on them.
3c69faec 1701 */
c8c42864
CM
1702static void reada_for_search(struct btrfs_root *root,
1703 struct btrfs_path *path,
1704 int level, int slot, u64 objectid)
3c69faec 1705{
5f39d397 1706 struct extent_buffer *node;
01f46658 1707 struct btrfs_disk_key disk_key;
3c69faec 1708 u32 nritems;
3c69faec 1709 u64 search;
a7175319 1710 u64 target;
6b80053d 1711 u64 nread = 0;
cb25c2ea 1712 u64 gen;
3c69faec 1713 int direction = path->reada;
5f39d397 1714 struct extent_buffer *eb;
6b80053d
CM
1715 u32 nr;
1716 u32 blocksize;
1717 u32 nscan = 0;
db94535d 1718
a6b6e75e 1719 if (level != 1)
6702ed49
CM
1720 return;
1721
1722 if (!path->nodes[level])
3c69faec
CM
1723 return;
1724
5f39d397 1725 node = path->nodes[level];
925baedd 1726
3c69faec 1727 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1728 blocksize = btrfs_level_size(root, level - 1);
1729 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1730 if (eb) {
1731 free_extent_buffer(eb);
3c69faec
CM
1732 return;
1733 }
1734
a7175319 1735 target = search;
6b80053d 1736
5f39d397 1737 nritems = btrfs_header_nritems(node);
6b80053d 1738 nr = slot;
25b8b936 1739
d397712b 1740 while (1) {
6b80053d
CM
1741 if (direction < 0) {
1742 if (nr == 0)
1743 break;
1744 nr--;
1745 } else if (direction > 0) {
1746 nr++;
1747 if (nr >= nritems)
1748 break;
3c69faec 1749 }
01f46658
CM
1750 if (path->reada < 0 && objectid) {
1751 btrfs_node_key(node, &disk_key, nr);
1752 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1753 break;
1754 }
6b80053d 1755 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1756 if ((search <= target && target - search <= 65536) ||
1757 (search > target && search - target <= 65536)) {
cb25c2ea 1758 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 1759 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
1760 nread += blocksize;
1761 }
1762 nscan++;
a7175319 1763 if ((nread > 65536 || nscan > 32))
6b80053d 1764 break;
3c69faec
CM
1765 }
1766}
925baedd 1767
b4ce94de
CM
1768/*
1769 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1770 * cache
1771 */
1772static noinline int reada_for_balance(struct btrfs_root *root,
1773 struct btrfs_path *path, int level)
1774{
1775 int slot;
1776 int nritems;
1777 struct extent_buffer *parent;
1778 struct extent_buffer *eb;
1779 u64 gen;
1780 u64 block1 = 0;
1781 u64 block2 = 0;
1782 int ret = 0;
1783 int blocksize;
1784
8c594ea8 1785 parent = path->nodes[level + 1];
b4ce94de
CM
1786 if (!parent)
1787 return 0;
1788
1789 nritems = btrfs_header_nritems(parent);
8c594ea8 1790 slot = path->slots[level + 1];
b4ce94de
CM
1791 blocksize = btrfs_level_size(root, level);
1792
1793 if (slot > 0) {
1794 block1 = btrfs_node_blockptr(parent, slot - 1);
1795 gen = btrfs_node_ptr_generation(parent, slot - 1);
1796 eb = btrfs_find_tree_block(root, block1, blocksize);
b9fab919
CM
1797 /*
1798 * if we get -eagain from btrfs_buffer_uptodate, we
1799 * don't want to return eagain here. That will loop
1800 * forever
1801 */
1802 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
1803 block1 = 0;
1804 free_extent_buffer(eb);
1805 }
8c594ea8 1806 if (slot + 1 < nritems) {
b4ce94de
CM
1807 block2 = btrfs_node_blockptr(parent, slot + 1);
1808 gen = btrfs_node_ptr_generation(parent, slot + 1);
1809 eb = btrfs_find_tree_block(root, block2, blocksize);
b9fab919 1810 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
1811 block2 = 0;
1812 free_extent_buffer(eb);
1813 }
1814 if (block1 || block2) {
1815 ret = -EAGAIN;
8c594ea8
CM
1816
1817 /* release the whole path */
b3b4aa74 1818 btrfs_release_path(path);
8c594ea8
CM
1819
1820 /* read the blocks */
b4ce94de
CM
1821 if (block1)
1822 readahead_tree_block(root, block1, blocksize, 0);
1823 if (block2)
1824 readahead_tree_block(root, block2, blocksize, 0);
1825
1826 if (block1) {
1827 eb = read_tree_block(root, block1, blocksize, 0);
1828 free_extent_buffer(eb);
1829 }
8c594ea8 1830 if (block2) {
b4ce94de
CM
1831 eb = read_tree_block(root, block2, blocksize, 0);
1832 free_extent_buffer(eb);
1833 }
1834 }
1835 return ret;
1836}
1837
1838
d352ac68 1839/*
d397712b
CM
1840 * when we walk down the tree, it is usually safe to unlock the higher layers
1841 * in the tree. The exceptions are when our path goes through slot 0, because
1842 * operations on the tree might require changing key pointers higher up in the
1843 * tree.
d352ac68 1844 *
d397712b
CM
1845 * callers might also have set path->keep_locks, which tells this code to keep
1846 * the lock if the path points to the last slot in the block. This is part of
1847 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 1848 *
d397712b
CM
1849 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1850 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 1851 */
e02119d5 1852static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
1853 int lowest_unlock, int min_write_lock_level,
1854 int *write_lock_level)
925baedd
CM
1855{
1856 int i;
1857 int skip_level = level;
051e1b9f 1858 int no_skips = 0;
925baedd
CM
1859 struct extent_buffer *t;
1860
1861 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1862 if (!path->nodes[i])
1863 break;
1864 if (!path->locks[i])
1865 break;
051e1b9f 1866 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
1867 skip_level = i + 1;
1868 continue;
1869 }
051e1b9f 1870 if (!no_skips && path->keep_locks) {
925baedd
CM
1871 u32 nritems;
1872 t = path->nodes[i];
1873 nritems = btrfs_header_nritems(t);
051e1b9f 1874 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
1875 skip_level = i + 1;
1876 continue;
1877 }
1878 }
051e1b9f
CM
1879 if (skip_level < i && i >= lowest_unlock)
1880 no_skips = 1;
1881
925baedd
CM
1882 t = path->nodes[i];
1883 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
bd681513 1884 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 1885 path->locks[i] = 0;
f7c79f30
CM
1886 if (write_lock_level &&
1887 i > min_write_lock_level &&
1888 i <= *write_lock_level) {
1889 *write_lock_level = i - 1;
1890 }
925baedd
CM
1891 }
1892 }
1893}
1894
b4ce94de
CM
1895/*
1896 * This releases any locks held in the path starting at level and
1897 * going all the way up to the root.
1898 *
1899 * btrfs_search_slot will keep the lock held on higher nodes in a few
1900 * corner cases, such as COW of the block at slot zero in the node. This
1901 * ignores those rules, and it should only be called when there are no
1902 * more updates to be done higher up in the tree.
1903 */
1904noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1905{
1906 int i;
1907
5d4f98a2 1908 if (path->keep_locks)
b4ce94de
CM
1909 return;
1910
1911 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1912 if (!path->nodes[i])
12f4dacc 1913 continue;
b4ce94de 1914 if (!path->locks[i])
12f4dacc 1915 continue;
bd681513 1916 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
b4ce94de
CM
1917 path->locks[i] = 0;
1918 }
1919}
1920
c8c42864
CM
1921/*
1922 * helper function for btrfs_search_slot. The goal is to find a block
1923 * in cache without setting the path to blocking. If we find the block
1924 * we return zero and the path is unchanged.
1925 *
1926 * If we can't find the block, we set the path blocking and do some
1927 * reada. -EAGAIN is returned and the search must be repeated.
1928 */
1929static int
1930read_block_for_search(struct btrfs_trans_handle *trans,
1931 struct btrfs_root *root, struct btrfs_path *p,
1932 struct extent_buffer **eb_ret, int level, int slot,
1933 struct btrfs_key *key)
1934{
1935 u64 blocknr;
1936 u64 gen;
1937 u32 blocksize;
1938 struct extent_buffer *b = *eb_ret;
1939 struct extent_buffer *tmp;
76a05b35 1940 int ret;
c8c42864
CM
1941
1942 blocknr = btrfs_node_blockptr(b, slot);
1943 gen = btrfs_node_ptr_generation(b, slot);
1944 blocksize = btrfs_level_size(root, level - 1);
1945
1946 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a 1947 if (tmp) {
b9fab919
CM
1948 /* first we do an atomic uptodate check */
1949 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
1950 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
cb44921a
CM
1951 /*
1952 * we found an up to date block without
1953 * sleeping, return
1954 * right away
1955 */
1956 *eb_ret = tmp;
1957 return 0;
1958 }
1959 /* the pages were up to date, but we failed
1960 * the generation number check. Do a full
1961 * read for the generation number that is correct.
1962 * We must do this without dropping locks so
1963 * we can trust our generation number
1964 */
1965 free_extent_buffer(tmp);
bd681513
CM
1966 btrfs_set_path_blocking(p);
1967
b9fab919 1968 /* now we're allowed to do a blocking uptodate check */
cb44921a 1969 tmp = read_tree_block(root, blocknr, blocksize, gen);
b9fab919 1970 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
cb44921a
CM
1971 *eb_ret = tmp;
1972 return 0;
1973 }
1974 free_extent_buffer(tmp);
b3b4aa74 1975 btrfs_release_path(p);
cb44921a
CM
1976 return -EIO;
1977 }
c8c42864
CM
1978 }
1979
1980 /*
1981 * reduce lock contention at high levels
1982 * of the btree by dropping locks before
76a05b35
CM
1983 * we read. Don't release the lock on the current
1984 * level because we need to walk this node to figure
1985 * out which blocks to read.
c8c42864 1986 */
8c594ea8
CM
1987 btrfs_unlock_up_safe(p, level + 1);
1988 btrfs_set_path_blocking(p);
1989
cb44921a 1990 free_extent_buffer(tmp);
c8c42864
CM
1991 if (p->reada)
1992 reada_for_search(root, p, level, slot, key->objectid);
1993
b3b4aa74 1994 btrfs_release_path(p);
76a05b35
CM
1995
1996 ret = -EAGAIN;
5bdd3536 1997 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
1998 if (tmp) {
1999 /*
2000 * If the read above didn't mark this buffer up to date,
2001 * it will never end up being up to date. Set ret to EIO now
2002 * and give up so that our caller doesn't loop forever
2003 * on our EAGAINs.
2004 */
b9fab919 2005 if (!btrfs_buffer_uptodate(tmp, 0, 0))
76a05b35 2006 ret = -EIO;
c8c42864 2007 free_extent_buffer(tmp);
76a05b35
CM
2008 }
2009 return ret;
c8c42864
CM
2010}
2011
2012/*
2013 * helper function for btrfs_search_slot. This does all of the checks
2014 * for node-level blocks and does any balancing required based on
2015 * the ins_len.
2016 *
2017 * If no extra work was required, zero is returned. If we had to
2018 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2019 * start over
2020 */
2021static int
2022setup_nodes_for_search(struct btrfs_trans_handle *trans,
2023 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
2024 struct extent_buffer *b, int level, int ins_len,
2025 int *write_lock_level)
c8c42864
CM
2026{
2027 int ret;
2028 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2029 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2030 int sret;
2031
bd681513
CM
2032 if (*write_lock_level < level + 1) {
2033 *write_lock_level = level + 1;
2034 btrfs_release_path(p);
2035 goto again;
2036 }
2037
c8c42864
CM
2038 sret = reada_for_balance(root, p, level);
2039 if (sret)
2040 goto again;
2041
2042 btrfs_set_path_blocking(p);
2043 sret = split_node(trans, root, p, level);
bd681513 2044 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2045
2046 BUG_ON(sret > 0);
2047 if (sret) {
2048 ret = sret;
2049 goto done;
2050 }
2051 b = p->nodes[level];
2052 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 2053 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
2054 int sret;
2055
bd681513
CM
2056 if (*write_lock_level < level + 1) {
2057 *write_lock_level = level + 1;
2058 btrfs_release_path(p);
2059 goto again;
2060 }
2061
c8c42864
CM
2062 sret = reada_for_balance(root, p, level);
2063 if (sret)
2064 goto again;
2065
2066 btrfs_set_path_blocking(p);
2067 sret = balance_level(trans, root, p, level);
bd681513 2068 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2069
2070 if (sret) {
2071 ret = sret;
2072 goto done;
2073 }
2074 b = p->nodes[level];
2075 if (!b) {
b3b4aa74 2076 btrfs_release_path(p);
c8c42864
CM
2077 goto again;
2078 }
2079 BUG_ON(btrfs_header_nritems(b) == 1);
2080 }
2081 return 0;
2082
2083again:
2084 ret = -EAGAIN;
2085done:
2086 return ret;
2087}
2088
74123bd7
CM
2089/*
2090 * look for key in the tree. path is filled in with nodes along the way
2091 * if key is found, we return zero and you can find the item in the leaf
2092 * level of the path (level 0)
2093 *
2094 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
2095 * be inserted, and 1 is returned. If there are other errors during the
2096 * search a negative error number is returned.
97571fd0
CM
2097 *
2098 * if ins_len > 0, nodes and leaves will be split as we walk down the
2099 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2100 * possible)
74123bd7 2101 */
e089f05c
CM
2102int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2103 *root, struct btrfs_key *key, struct btrfs_path *p, int
2104 ins_len, int cow)
be0e5c09 2105{
5f39d397 2106 struct extent_buffer *b;
be0e5c09
CM
2107 int slot;
2108 int ret;
33c66f43 2109 int err;
be0e5c09 2110 int level;
925baedd 2111 int lowest_unlock = 1;
bd681513
CM
2112 int root_lock;
2113 /* everything at write_lock_level or lower must be write locked */
2114 int write_lock_level = 0;
9f3a7427 2115 u8 lowest_level = 0;
f7c79f30 2116 int min_write_lock_level;
9f3a7427 2117
6702ed49 2118 lowest_level = p->lowest_level;
323ac95b 2119 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 2120 WARN_ON(p->nodes[0] != NULL);
25179201 2121
bd681513 2122 if (ins_len < 0) {
925baedd 2123 lowest_unlock = 2;
65b51a00 2124
bd681513
CM
2125 /* when we are removing items, we might have to go up to level
2126 * two as we update tree pointers Make sure we keep write
2127 * for those levels as well
2128 */
2129 write_lock_level = 2;
2130 } else if (ins_len > 0) {
2131 /*
2132 * for inserting items, make sure we have a write lock on
2133 * level 1 so we can update keys
2134 */
2135 write_lock_level = 1;
2136 }
2137
2138 if (!cow)
2139 write_lock_level = -1;
2140
2141 if (cow && (p->keep_locks || p->lowest_level))
2142 write_lock_level = BTRFS_MAX_LEVEL;
2143
f7c79f30
CM
2144 min_write_lock_level = write_lock_level;
2145
bb803951 2146again:
bd681513
CM
2147 /*
2148 * we try very hard to do read locks on the root
2149 */
2150 root_lock = BTRFS_READ_LOCK;
2151 level = 0;
5d4f98a2 2152 if (p->search_commit_root) {
bd681513
CM
2153 /*
2154 * the commit roots are read only
2155 * so we always do read locks
2156 */
5d4f98a2
YZ
2157 b = root->commit_root;
2158 extent_buffer_get(b);
bd681513 2159 level = btrfs_header_level(b);
5d4f98a2 2160 if (!p->skip_locking)
bd681513 2161 btrfs_tree_read_lock(b);
5d4f98a2 2162 } else {
bd681513 2163 if (p->skip_locking) {
5d4f98a2 2164 b = btrfs_root_node(root);
bd681513
CM
2165 level = btrfs_header_level(b);
2166 } else {
2167 /* we don't know the level of the root node
2168 * until we actually have it read locked
2169 */
2170 b = btrfs_read_lock_root_node(root);
2171 level = btrfs_header_level(b);
2172 if (level <= write_lock_level) {
2173 /* whoops, must trade for write lock */
2174 btrfs_tree_read_unlock(b);
2175 free_extent_buffer(b);
2176 b = btrfs_lock_root_node(root);
2177 root_lock = BTRFS_WRITE_LOCK;
2178
2179 /* the level might have changed, check again */
2180 level = btrfs_header_level(b);
2181 }
2182 }
5d4f98a2 2183 }
bd681513
CM
2184 p->nodes[level] = b;
2185 if (!p->skip_locking)
2186 p->locks[level] = root_lock;
925baedd 2187
eb60ceac 2188 while (b) {
5f39d397 2189 level = btrfs_header_level(b);
65b51a00
CM
2190
2191 /*
2192 * setup the path here so we can release it under lock
2193 * contention with the cow code
2194 */
02217ed2 2195 if (cow) {
c8c42864
CM
2196 /*
2197 * if we don't really need to cow this block
2198 * then we don't want to set the path blocking,
2199 * so we test it here
2200 */
5d4f98a2 2201 if (!should_cow_block(trans, root, b))
65b51a00 2202 goto cow_done;
5d4f98a2 2203
b4ce94de
CM
2204 btrfs_set_path_blocking(p);
2205
bd681513
CM
2206 /*
2207 * must have write locks on this node and the
2208 * parent
2209 */
2210 if (level + 1 > write_lock_level) {
2211 write_lock_level = level + 1;
2212 btrfs_release_path(p);
2213 goto again;
2214 }
2215
33c66f43
YZ
2216 err = btrfs_cow_block(trans, root, b,
2217 p->nodes[level + 1],
2218 p->slots[level + 1], &b);
2219 if (err) {
33c66f43 2220 ret = err;
65b51a00 2221 goto done;
54aa1f4d 2222 }
02217ed2 2223 }
65b51a00 2224cow_done:
02217ed2 2225 BUG_ON(!cow && ins_len);
65b51a00 2226
eb60ceac 2227 p->nodes[level] = b;
bd681513 2228 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de
CM
2229
2230 /*
2231 * we have a lock on b and as long as we aren't changing
2232 * the tree, there is no way to for the items in b to change.
2233 * It is safe to drop the lock on our parent before we
2234 * go through the expensive btree search on b.
2235 *
2236 * If cow is true, then we might be changing slot zero,
2237 * which may require changing the parent. So, we can't
2238 * drop the lock until after we know which slot we're
2239 * operating on.
2240 */
2241 if (!cow)
2242 btrfs_unlock_up_safe(p, level + 1);
2243
5f39d397 2244 ret = bin_search(b, key, level, &slot);
b4ce94de 2245
5f39d397 2246 if (level != 0) {
33c66f43
YZ
2247 int dec = 0;
2248 if (ret && slot > 0) {
2249 dec = 1;
be0e5c09 2250 slot -= 1;
33c66f43 2251 }
be0e5c09 2252 p->slots[level] = slot;
33c66f43 2253 err = setup_nodes_for_search(trans, root, p, b, level,
bd681513 2254 ins_len, &write_lock_level);
33c66f43 2255 if (err == -EAGAIN)
c8c42864 2256 goto again;
33c66f43
YZ
2257 if (err) {
2258 ret = err;
c8c42864 2259 goto done;
33c66f43 2260 }
c8c42864
CM
2261 b = p->nodes[level];
2262 slot = p->slots[level];
b4ce94de 2263
bd681513
CM
2264 /*
2265 * slot 0 is special, if we change the key
2266 * we have to update the parent pointer
2267 * which means we must have a write lock
2268 * on the parent
2269 */
2270 if (slot == 0 && cow &&
2271 write_lock_level < level + 1) {
2272 write_lock_level = level + 1;
2273 btrfs_release_path(p);
2274 goto again;
2275 }
2276
f7c79f30
CM
2277 unlock_up(p, level, lowest_unlock,
2278 min_write_lock_level, &write_lock_level);
f9efa9c7 2279
925baedd 2280 if (level == lowest_level) {
33c66f43
YZ
2281 if (dec)
2282 p->slots[level]++;
5b21f2ed 2283 goto done;
925baedd 2284 }
ca7a79ad 2285
33c66f43 2286 err = read_block_for_search(trans, root, p,
c8c42864 2287 &b, level, slot, key);
33c66f43 2288 if (err == -EAGAIN)
c8c42864 2289 goto again;
33c66f43
YZ
2290 if (err) {
2291 ret = err;
76a05b35 2292 goto done;
33c66f43 2293 }
76a05b35 2294
b4ce94de 2295 if (!p->skip_locking) {
bd681513
CM
2296 level = btrfs_header_level(b);
2297 if (level <= write_lock_level) {
2298 err = btrfs_try_tree_write_lock(b);
2299 if (!err) {
2300 btrfs_set_path_blocking(p);
2301 btrfs_tree_lock(b);
2302 btrfs_clear_path_blocking(p, b,
2303 BTRFS_WRITE_LOCK);
2304 }
2305 p->locks[level] = BTRFS_WRITE_LOCK;
2306 } else {
2307 err = btrfs_try_tree_read_lock(b);
2308 if (!err) {
2309 btrfs_set_path_blocking(p);
2310 btrfs_tree_read_lock(b);
2311 btrfs_clear_path_blocking(p, b,
2312 BTRFS_READ_LOCK);
2313 }
2314 p->locks[level] = BTRFS_READ_LOCK;
b4ce94de 2315 }
bd681513 2316 p->nodes[level] = b;
b4ce94de 2317 }
be0e5c09
CM
2318 } else {
2319 p->slots[level] = slot;
87b29b20
YZ
2320 if (ins_len > 0 &&
2321 btrfs_leaf_free_space(root, b) < ins_len) {
bd681513
CM
2322 if (write_lock_level < 1) {
2323 write_lock_level = 1;
2324 btrfs_release_path(p);
2325 goto again;
2326 }
2327
b4ce94de 2328 btrfs_set_path_blocking(p);
33c66f43
YZ
2329 err = split_leaf(trans, root, key,
2330 p, ins_len, ret == 0);
bd681513 2331 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de 2332
33c66f43
YZ
2333 BUG_ON(err > 0);
2334 if (err) {
2335 ret = err;
65b51a00
CM
2336 goto done;
2337 }
5c680ed6 2338 }
459931ec 2339 if (!p->search_for_split)
f7c79f30
CM
2340 unlock_up(p, level, lowest_unlock,
2341 min_write_lock_level, &write_lock_level);
65b51a00 2342 goto done;
be0e5c09
CM
2343 }
2344 }
65b51a00
CM
2345 ret = 1;
2346done:
b4ce94de
CM
2347 /*
2348 * we don't really know what they plan on doing with the path
2349 * from here on, so for now just mark it as blocking
2350 */
b9473439
CM
2351 if (!p->leave_spinning)
2352 btrfs_set_path_blocking(p);
76a05b35 2353 if (ret < 0)
b3b4aa74 2354 btrfs_release_path(p);
65b51a00 2355 return ret;
be0e5c09
CM
2356}
2357
74123bd7
CM
2358/*
2359 * adjust the pointers going up the tree, starting at level
2360 * making sure the right key of each node is points to 'key'.
2361 * This is used after shifting pointers to the left, so it stops
2362 * fixing up pointers when a given leaf/node is not in slot 0 of the
2363 * higher levels
aa5d6bed 2364 *
74123bd7 2365 */
143bede5
JM
2366static void fixup_low_keys(struct btrfs_trans_handle *trans,
2367 struct btrfs_root *root, struct btrfs_path *path,
2368 struct btrfs_disk_key *key, int level)
be0e5c09
CM
2369{
2370 int i;
5f39d397
CM
2371 struct extent_buffer *t;
2372
234b63a0 2373 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 2374 int tslot = path->slots[i];
eb60ceac 2375 if (!path->nodes[i])
be0e5c09 2376 break;
5f39d397 2377 t = path->nodes[i];
f230475e 2378 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
5f39d397 2379 btrfs_set_node_key(t, key, tslot);
d6025579 2380 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
2381 if (tslot != 0)
2382 break;
2383 }
2384}
2385
31840ae1
ZY
2386/*
2387 * update item key.
2388 *
2389 * This function isn't completely safe. It's the caller's responsibility
2390 * that the new key won't break the order
2391 */
143bede5
JM
2392void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2393 struct btrfs_root *root, struct btrfs_path *path,
2394 struct btrfs_key *new_key)
31840ae1
ZY
2395{
2396 struct btrfs_disk_key disk_key;
2397 struct extent_buffer *eb;
2398 int slot;
2399
2400 eb = path->nodes[0];
2401 slot = path->slots[0];
2402 if (slot > 0) {
2403 btrfs_item_key(eb, &disk_key, slot - 1);
143bede5 2404 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
31840ae1
ZY
2405 }
2406 if (slot < btrfs_header_nritems(eb) - 1) {
2407 btrfs_item_key(eb, &disk_key, slot + 1);
143bede5 2408 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
31840ae1
ZY
2409 }
2410
2411 btrfs_cpu_key_to_disk(&disk_key, new_key);
2412 btrfs_set_item_key(eb, &disk_key, slot);
2413 btrfs_mark_buffer_dirty(eb);
2414 if (slot == 0)
2415 fixup_low_keys(trans, root, path, &disk_key, 1);
31840ae1
ZY
2416}
2417
74123bd7
CM
2418/*
2419 * try to push data from one node into the next node left in the
79f95c82 2420 * tree.
aa5d6bed
CM
2421 *
2422 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2423 * error, and > 0 if there was no room in the left hand block.
74123bd7 2424 */
98ed5174
CM
2425static int push_node_left(struct btrfs_trans_handle *trans,
2426 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 2427 struct extent_buffer *src, int empty)
be0e5c09 2428{
be0e5c09 2429 int push_items = 0;
bb803951
CM
2430 int src_nritems;
2431 int dst_nritems;
aa5d6bed 2432 int ret = 0;
be0e5c09 2433
5f39d397
CM
2434 src_nritems = btrfs_header_nritems(src);
2435 dst_nritems = btrfs_header_nritems(dst);
123abc88 2436 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
2437 WARN_ON(btrfs_header_generation(src) != trans->transid);
2438 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 2439
bce4eae9 2440 if (!empty && src_nritems <= 8)
971a1f66
CM
2441 return 1;
2442
d397712b 2443 if (push_items <= 0)
be0e5c09
CM
2444 return 1;
2445
bce4eae9 2446 if (empty) {
971a1f66 2447 push_items = min(src_nritems, push_items);
bce4eae9
CM
2448 if (push_items < src_nritems) {
2449 /* leave at least 8 pointers in the node if
2450 * we aren't going to empty it
2451 */
2452 if (src_nritems - push_items < 8) {
2453 if (push_items <= 8)
2454 return 1;
2455 push_items -= 8;
2456 }
2457 }
2458 } else
2459 push_items = min(src_nritems - 8, push_items);
79f95c82 2460
f230475e
JS
2461 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2462 push_items);
5f39d397
CM
2463 copy_extent_buffer(dst, src,
2464 btrfs_node_key_ptr_offset(dst_nritems),
2465 btrfs_node_key_ptr_offset(0),
d397712b 2466 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 2467
bb803951 2468 if (push_items < src_nritems) {
f230475e
JS
2469 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2470 src_nritems - push_items);
5f39d397
CM
2471 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2472 btrfs_node_key_ptr_offset(push_items),
2473 (src_nritems - push_items) *
2474 sizeof(struct btrfs_key_ptr));
2475 }
2476 btrfs_set_header_nritems(src, src_nritems - push_items);
2477 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2478 btrfs_mark_buffer_dirty(src);
2479 btrfs_mark_buffer_dirty(dst);
31840ae1 2480
79f95c82
CM
2481 return ret;
2482}
2483
2484/*
2485 * try to push data from one node into the next node right in the
2486 * tree.
2487 *
2488 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2489 * error, and > 0 if there was no room in the right hand block.
2490 *
2491 * this will only push up to 1/2 the contents of the left node over
2492 */
5f39d397
CM
2493static int balance_node_right(struct btrfs_trans_handle *trans,
2494 struct btrfs_root *root,
2495 struct extent_buffer *dst,
2496 struct extent_buffer *src)
79f95c82 2497{
79f95c82
CM
2498 int push_items = 0;
2499 int max_push;
2500 int src_nritems;
2501 int dst_nritems;
2502 int ret = 0;
79f95c82 2503
7bb86316
CM
2504 WARN_ON(btrfs_header_generation(src) != trans->transid);
2505 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2506
5f39d397
CM
2507 src_nritems = btrfs_header_nritems(src);
2508 dst_nritems = btrfs_header_nritems(dst);
123abc88 2509 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 2510 if (push_items <= 0)
79f95c82 2511 return 1;
bce4eae9 2512
d397712b 2513 if (src_nritems < 4)
bce4eae9 2514 return 1;
79f95c82
CM
2515
2516 max_push = src_nritems / 2 + 1;
2517 /* don't try to empty the node */
d397712b 2518 if (max_push >= src_nritems)
79f95c82 2519 return 1;
252c38f0 2520
79f95c82
CM
2521 if (max_push < push_items)
2522 push_items = max_push;
2523
f230475e 2524 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
5f39d397
CM
2525 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2526 btrfs_node_key_ptr_offset(0),
2527 (dst_nritems) *
2528 sizeof(struct btrfs_key_ptr));
d6025579 2529
f230475e
JS
2530 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2531 src_nritems - push_items, push_items);
5f39d397
CM
2532 copy_extent_buffer(dst, src,
2533 btrfs_node_key_ptr_offset(0),
2534 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 2535 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 2536
5f39d397
CM
2537 btrfs_set_header_nritems(src, src_nritems - push_items);
2538 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 2539
5f39d397
CM
2540 btrfs_mark_buffer_dirty(src);
2541 btrfs_mark_buffer_dirty(dst);
31840ae1 2542
aa5d6bed 2543 return ret;
be0e5c09
CM
2544}
2545
97571fd0
CM
2546/*
2547 * helper function to insert a new root level in the tree.
2548 * A new node is allocated, and a single item is inserted to
2549 * point to the existing root
aa5d6bed
CM
2550 *
2551 * returns zero on success or < 0 on failure.
97571fd0 2552 */
d397712b 2553static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
2554 struct btrfs_root *root,
2555 struct btrfs_path *path, int level)
5c680ed6 2556{
7bb86316 2557 u64 lower_gen;
5f39d397
CM
2558 struct extent_buffer *lower;
2559 struct extent_buffer *c;
925baedd 2560 struct extent_buffer *old;
5f39d397 2561 struct btrfs_disk_key lower_key;
5c680ed6
CM
2562
2563 BUG_ON(path->nodes[level]);
2564 BUG_ON(path->nodes[level-1] != root->node);
2565
7bb86316
CM
2566 lower = path->nodes[level-1];
2567 if (level == 1)
2568 btrfs_item_key(lower, &lower_key, 0);
2569 else
2570 btrfs_node_key(lower, &lower_key, 0);
2571
31840ae1 2572 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 2573 root->root_key.objectid, &lower_key,
5581a51a 2574 level, root->node->start, 0);
5f39d397
CM
2575 if (IS_ERR(c))
2576 return PTR_ERR(c);
925baedd 2577
f0486c68
YZ
2578 root_add_used(root, root->nodesize);
2579
5d4f98a2 2580 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
2581 btrfs_set_header_nritems(c, 1);
2582 btrfs_set_header_level(c, level);
db94535d 2583 btrfs_set_header_bytenr(c, c->start);
5f39d397 2584 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 2585 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 2586 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
2587
2588 write_extent_buffer(c, root->fs_info->fsid,
2589 (unsigned long)btrfs_header_fsid(c),
2590 BTRFS_FSID_SIZE);
e17cade2
CM
2591
2592 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2593 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2594 BTRFS_UUID_SIZE);
2595
5f39d397 2596 btrfs_set_node_key(c, &lower_key, 0);
db94535d 2597 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 2598 lower_gen = btrfs_header_generation(lower);
31840ae1 2599 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
2600
2601 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 2602
5f39d397 2603 btrfs_mark_buffer_dirty(c);
d5719762 2604
925baedd 2605 old = root->node;
f230475e 2606 tree_mod_log_set_root_pointer(root, c);
240f62c8 2607 rcu_assign_pointer(root->node, c);
925baedd
CM
2608
2609 /* the super has an extra ref to root->node */
2610 free_extent_buffer(old);
2611
0b86a832 2612 add_root_to_dirty_list(root);
5f39d397
CM
2613 extent_buffer_get(c);
2614 path->nodes[level] = c;
bd681513 2615 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
2616 path->slots[level] = 0;
2617 return 0;
2618}
2619
74123bd7
CM
2620/*
2621 * worker function to insert a single pointer in a node.
2622 * the node should have enough room for the pointer already
97571fd0 2623 *
74123bd7
CM
2624 * slot and level indicate where you want the key to go, and
2625 * blocknr is the block the key points to.
2626 */
143bede5
JM
2627static void insert_ptr(struct btrfs_trans_handle *trans,
2628 struct btrfs_root *root, struct btrfs_path *path,
2629 struct btrfs_disk_key *key, u64 bytenr,
f3ea38da 2630 int slot, int level, int tree_mod_log)
74123bd7 2631{
5f39d397 2632 struct extent_buffer *lower;
74123bd7 2633 int nritems;
f3ea38da 2634 int ret;
5c680ed6
CM
2635
2636 BUG_ON(!path->nodes[level]);
f0486c68 2637 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
2638 lower = path->nodes[level];
2639 nritems = btrfs_header_nritems(lower);
c293498b 2640 BUG_ON(slot > nritems);
143bede5 2641 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
74123bd7 2642 if (slot != nritems) {
f3ea38da
JS
2643 if (tree_mod_log && level)
2644 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
2645 slot, nritems - slot);
5f39d397
CM
2646 memmove_extent_buffer(lower,
2647 btrfs_node_key_ptr_offset(slot + 1),
2648 btrfs_node_key_ptr_offset(slot),
d6025579 2649 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2650 }
f3ea38da
JS
2651 if (tree_mod_log && level) {
2652 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
2653 MOD_LOG_KEY_ADD);
2654 BUG_ON(ret < 0);
2655 }
5f39d397 2656 btrfs_set_node_key(lower, key, slot);
db94535d 2657 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
2658 WARN_ON(trans->transid == 0);
2659 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
2660 btrfs_set_header_nritems(lower, nritems + 1);
2661 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
2662}
2663
97571fd0
CM
2664/*
2665 * split the node at the specified level in path in two.
2666 * The path is corrected to point to the appropriate node after the split
2667 *
2668 * Before splitting this tries to make some room in the node by pushing
2669 * left and right, if either one works, it returns right away.
aa5d6bed
CM
2670 *
2671 * returns 0 on success and < 0 on failure
97571fd0 2672 */
e02119d5
CM
2673static noinline int split_node(struct btrfs_trans_handle *trans,
2674 struct btrfs_root *root,
2675 struct btrfs_path *path, int level)
be0e5c09 2676{
5f39d397
CM
2677 struct extent_buffer *c;
2678 struct extent_buffer *split;
2679 struct btrfs_disk_key disk_key;
be0e5c09 2680 int mid;
5c680ed6 2681 int ret;
7518a238 2682 u32 c_nritems;
eb60ceac 2683
5f39d397 2684 c = path->nodes[level];
7bb86316 2685 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 2686 if (c == root->node) {
5c680ed6 2687 /* trying to split the root, lets make a new one */
e089f05c 2688 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
2689 if (ret)
2690 return ret;
b3612421 2691 } else {
e66f709b 2692 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
2693 c = path->nodes[level];
2694 if (!ret && btrfs_header_nritems(c) <
c448acf0 2695 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 2696 return 0;
54aa1f4d
CM
2697 if (ret < 0)
2698 return ret;
be0e5c09 2699 }
e66f709b 2700
5f39d397 2701 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
2702 mid = (c_nritems + 1) / 2;
2703 btrfs_node_key(c, &disk_key, mid);
7bb86316 2704
5d4f98a2 2705 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 2706 root->root_key.objectid,
5581a51a 2707 &disk_key, level, c->start, 0);
5f39d397
CM
2708 if (IS_ERR(split))
2709 return PTR_ERR(split);
2710
f0486c68
YZ
2711 root_add_used(root, root->nodesize);
2712
5d4f98a2 2713 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 2714 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 2715 btrfs_set_header_bytenr(split, split->start);
5f39d397 2716 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 2717 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2718 btrfs_set_header_owner(split, root->root_key.objectid);
2719 write_extent_buffer(split, root->fs_info->fsid,
2720 (unsigned long)btrfs_header_fsid(split),
2721 BTRFS_FSID_SIZE);
e17cade2
CM
2722 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2723 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2724 BTRFS_UUID_SIZE);
54aa1f4d 2725
f230475e 2726 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
5f39d397
CM
2727 copy_extent_buffer(split, c,
2728 btrfs_node_key_ptr_offset(0),
2729 btrfs_node_key_ptr_offset(mid),
2730 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2731 btrfs_set_header_nritems(split, c_nritems - mid);
2732 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
2733 ret = 0;
2734
5f39d397
CM
2735 btrfs_mark_buffer_dirty(c);
2736 btrfs_mark_buffer_dirty(split);
2737
143bede5 2738 insert_ptr(trans, root, path, &disk_key, split->start,
f3ea38da 2739 path->slots[level + 1] + 1, level + 1, 1);
aa5d6bed 2740
5de08d7d 2741 if (path->slots[level] >= mid) {
5c680ed6 2742 path->slots[level] -= mid;
925baedd 2743 btrfs_tree_unlock(c);
5f39d397
CM
2744 free_extent_buffer(c);
2745 path->nodes[level] = split;
5c680ed6
CM
2746 path->slots[level + 1] += 1;
2747 } else {
925baedd 2748 btrfs_tree_unlock(split);
5f39d397 2749 free_extent_buffer(split);
be0e5c09 2750 }
aa5d6bed 2751 return ret;
be0e5c09
CM
2752}
2753
74123bd7
CM
2754/*
2755 * how many bytes are required to store the items in a leaf. start
2756 * and nr indicate which items in the leaf to check. This totals up the
2757 * space used both by the item structs and the item data
2758 */
5f39d397 2759static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
2760{
2761 int data_len;
5f39d397 2762 int nritems = btrfs_header_nritems(l);
d4dbff95 2763 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
2764
2765 if (!nr)
2766 return 0;
5f39d397
CM
2767 data_len = btrfs_item_end_nr(l, start);
2768 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 2769 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 2770 WARN_ON(data_len < 0);
be0e5c09
CM
2771 return data_len;
2772}
2773
d4dbff95
CM
2774/*
2775 * The space between the end of the leaf items and
2776 * the start of the leaf data. IOW, how much room
2777 * the leaf has left for both items and data
2778 */
d397712b 2779noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 2780 struct extent_buffer *leaf)
d4dbff95 2781{
5f39d397
CM
2782 int nritems = btrfs_header_nritems(leaf);
2783 int ret;
2784 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2785 if (ret < 0) {
d397712b
CM
2786 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2787 "used %d nritems %d\n",
ae2f5411 2788 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
2789 leaf_space_used(leaf, 0, nritems), nritems);
2790 }
2791 return ret;
d4dbff95
CM
2792}
2793
99d8f83c
CM
2794/*
2795 * min slot controls the lowest index we're willing to push to the
2796 * right. We'll push up to and including min_slot, but no lower
2797 */
44871b1b
CM
2798static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2799 struct btrfs_root *root,
2800 struct btrfs_path *path,
2801 int data_size, int empty,
2802 struct extent_buffer *right,
99d8f83c
CM
2803 int free_space, u32 left_nritems,
2804 u32 min_slot)
00ec4c51 2805{
5f39d397 2806 struct extent_buffer *left = path->nodes[0];
44871b1b 2807 struct extent_buffer *upper = path->nodes[1];
cfed81a0 2808 struct btrfs_map_token token;
5f39d397 2809 struct btrfs_disk_key disk_key;
00ec4c51 2810 int slot;
34a38218 2811 u32 i;
00ec4c51
CM
2812 int push_space = 0;
2813 int push_items = 0;
0783fcfc 2814 struct btrfs_item *item;
34a38218 2815 u32 nr;
7518a238 2816 u32 right_nritems;
5f39d397 2817 u32 data_end;
db94535d 2818 u32 this_item_size;
00ec4c51 2819
cfed81a0
CM
2820 btrfs_init_map_token(&token);
2821
34a38218
CM
2822 if (empty)
2823 nr = 0;
2824 else
99d8f83c 2825 nr = max_t(u32, 1, min_slot);
34a38218 2826
31840ae1 2827 if (path->slots[0] >= left_nritems)
87b29b20 2828 push_space += data_size;
31840ae1 2829
44871b1b 2830 slot = path->slots[1];
34a38218
CM
2831 i = left_nritems - 1;
2832 while (i >= nr) {
5f39d397 2833 item = btrfs_item_nr(left, i);
db94535d 2834
31840ae1
ZY
2835 if (!empty && push_items > 0) {
2836 if (path->slots[0] > i)
2837 break;
2838 if (path->slots[0] == i) {
2839 int space = btrfs_leaf_free_space(root, left);
2840 if (space + push_space * 2 > free_space)
2841 break;
2842 }
2843 }
2844
00ec4c51 2845 if (path->slots[0] == i)
87b29b20 2846 push_space += data_size;
db94535d 2847
db94535d
CM
2848 this_item_size = btrfs_item_size(left, item);
2849 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 2850 break;
31840ae1 2851
00ec4c51 2852 push_items++;
db94535d 2853 push_space += this_item_size + sizeof(*item);
34a38218
CM
2854 if (i == 0)
2855 break;
2856 i--;
db94535d 2857 }
5f39d397 2858
925baedd
CM
2859 if (push_items == 0)
2860 goto out_unlock;
5f39d397 2861
34a38218 2862 if (!empty && push_items == left_nritems)
a429e513 2863 WARN_ON(1);
5f39d397 2864
00ec4c51 2865 /* push left to right */
5f39d397 2866 right_nritems = btrfs_header_nritems(right);
34a38218 2867
5f39d397 2868 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 2869 push_space -= leaf_data_end(root, left);
5f39d397 2870
00ec4c51 2871 /* make room in the right data area */
5f39d397
CM
2872 data_end = leaf_data_end(root, right);
2873 memmove_extent_buffer(right,
2874 btrfs_leaf_data(right) + data_end - push_space,
2875 btrfs_leaf_data(right) + data_end,
2876 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2877
00ec4c51 2878 /* copy from the left data area */
5f39d397 2879 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
2880 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2881 btrfs_leaf_data(left) + leaf_data_end(root, left),
2882 push_space);
5f39d397
CM
2883
2884 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2885 btrfs_item_nr_offset(0),
2886 right_nritems * sizeof(struct btrfs_item));
2887
00ec4c51 2888 /* copy the items from left to right */
5f39d397
CM
2889 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2890 btrfs_item_nr_offset(left_nritems - push_items),
2891 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
2892
2893 /* update the item pointers */
7518a238 2894 right_nritems += push_items;
5f39d397 2895 btrfs_set_header_nritems(right, right_nritems);
123abc88 2896 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 2897 for (i = 0; i < right_nritems; i++) {
5f39d397 2898 item = btrfs_item_nr(right, i);
cfed81a0
CM
2899 push_space -= btrfs_token_item_size(right, item, &token);
2900 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d
CM
2901 }
2902
7518a238 2903 left_nritems -= push_items;
5f39d397 2904 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 2905
34a38218
CM
2906 if (left_nritems)
2907 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
2908 else
2909 clean_tree_block(trans, root, left);
2910
5f39d397 2911 btrfs_mark_buffer_dirty(right);
a429e513 2912
5f39d397
CM
2913 btrfs_item_key(right, &disk_key, 0);
2914 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 2915 btrfs_mark_buffer_dirty(upper);
02217ed2 2916
00ec4c51 2917 /* then fixup the leaf pointer in the path */
7518a238
CM
2918 if (path->slots[0] >= left_nritems) {
2919 path->slots[0] -= left_nritems;
925baedd
CM
2920 if (btrfs_header_nritems(path->nodes[0]) == 0)
2921 clean_tree_block(trans, root, path->nodes[0]);
2922 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2923 free_extent_buffer(path->nodes[0]);
2924 path->nodes[0] = right;
00ec4c51
CM
2925 path->slots[1] += 1;
2926 } else {
925baedd 2927 btrfs_tree_unlock(right);
5f39d397 2928 free_extent_buffer(right);
00ec4c51
CM
2929 }
2930 return 0;
925baedd
CM
2931
2932out_unlock:
2933 btrfs_tree_unlock(right);
2934 free_extent_buffer(right);
2935 return 1;
00ec4c51 2936}
925baedd 2937
44871b1b
CM
2938/*
2939 * push some data in the path leaf to the right, trying to free up at
2940 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2941 *
2942 * returns 1 if the push failed because the other node didn't have enough
2943 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
2944 *
2945 * this will push starting from min_slot to the end of the leaf. It won't
2946 * push any slot lower than min_slot
44871b1b
CM
2947 */
2948static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
2949 *root, struct btrfs_path *path,
2950 int min_data_size, int data_size,
2951 int empty, u32 min_slot)
44871b1b
CM
2952{
2953 struct extent_buffer *left = path->nodes[0];
2954 struct extent_buffer *right;
2955 struct extent_buffer *upper;
2956 int slot;
2957 int free_space;
2958 u32 left_nritems;
2959 int ret;
2960
2961 if (!path->nodes[1])
2962 return 1;
2963
2964 slot = path->slots[1];
2965 upper = path->nodes[1];
2966 if (slot >= btrfs_header_nritems(upper) - 1)
2967 return 1;
2968
2969 btrfs_assert_tree_locked(path->nodes[1]);
2970
2971 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
2972 if (right == NULL)
2973 return 1;
2974
44871b1b
CM
2975 btrfs_tree_lock(right);
2976 btrfs_set_lock_blocking(right);
2977
2978 free_space = btrfs_leaf_free_space(root, right);
2979 if (free_space < data_size)
2980 goto out_unlock;
2981
2982 /* cow and double check */
2983 ret = btrfs_cow_block(trans, root, right, upper,
2984 slot + 1, &right);
2985 if (ret)
2986 goto out_unlock;
2987
2988 free_space = btrfs_leaf_free_space(root, right);
2989 if (free_space < data_size)
2990 goto out_unlock;
2991
2992 left_nritems = btrfs_header_nritems(left);
2993 if (left_nritems == 0)
2994 goto out_unlock;
2995
99d8f83c
CM
2996 return __push_leaf_right(trans, root, path, min_data_size, empty,
2997 right, free_space, left_nritems, min_slot);
44871b1b
CM
2998out_unlock:
2999 btrfs_tree_unlock(right);
3000 free_extent_buffer(right);
3001 return 1;
3002}
3003
74123bd7
CM
3004/*
3005 * push some data in the path leaf to the left, trying to free up at
3006 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3007 *
3008 * max_slot can put a limit on how far into the leaf we'll push items. The
3009 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3010 * items
74123bd7 3011 */
44871b1b
CM
3012static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3013 struct btrfs_root *root,
3014 struct btrfs_path *path, int data_size,
3015 int empty, struct extent_buffer *left,
99d8f83c
CM
3016 int free_space, u32 right_nritems,
3017 u32 max_slot)
be0e5c09 3018{
5f39d397
CM
3019 struct btrfs_disk_key disk_key;
3020 struct extent_buffer *right = path->nodes[0];
be0e5c09 3021 int i;
be0e5c09
CM
3022 int push_space = 0;
3023 int push_items = 0;
0783fcfc 3024 struct btrfs_item *item;
7518a238 3025 u32 old_left_nritems;
34a38218 3026 u32 nr;
aa5d6bed 3027 int ret = 0;
db94535d
CM
3028 u32 this_item_size;
3029 u32 old_left_item_size;
cfed81a0
CM
3030 struct btrfs_map_token token;
3031
3032 btrfs_init_map_token(&token);
be0e5c09 3033
34a38218 3034 if (empty)
99d8f83c 3035 nr = min(right_nritems, max_slot);
34a38218 3036 else
99d8f83c 3037 nr = min(right_nritems - 1, max_slot);
34a38218
CM
3038
3039 for (i = 0; i < nr; i++) {
5f39d397 3040 item = btrfs_item_nr(right, i);
db94535d 3041
31840ae1
ZY
3042 if (!empty && push_items > 0) {
3043 if (path->slots[0] < i)
3044 break;
3045 if (path->slots[0] == i) {
3046 int space = btrfs_leaf_free_space(root, right);
3047 if (space + push_space * 2 > free_space)
3048 break;
3049 }
3050 }
3051
be0e5c09 3052 if (path->slots[0] == i)
87b29b20 3053 push_space += data_size;
db94535d
CM
3054
3055 this_item_size = btrfs_item_size(right, item);
3056 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 3057 break;
db94535d 3058
be0e5c09 3059 push_items++;
db94535d
CM
3060 push_space += this_item_size + sizeof(*item);
3061 }
3062
be0e5c09 3063 if (push_items == 0) {
925baedd
CM
3064 ret = 1;
3065 goto out;
be0e5c09 3066 }
34a38218 3067 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 3068 WARN_ON(1);
5f39d397 3069
be0e5c09 3070 /* push data from right to left */
5f39d397
CM
3071 copy_extent_buffer(left, right,
3072 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3073 btrfs_item_nr_offset(0),
3074 push_items * sizeof(struct btrfs_item));
3075
123abc88 3076 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 3077 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
3078
3079 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
3080 leaf_data_end(root, left) - push_space,
3081 btrfs_leaf_data(right) +
5f39d397 3082 btrfs_item_offset_nr(right, push_items - 1),
d6025579 3083 push_space);
5f39d397 3084 old_left_nritems = btrfs_header_nritems(left);
87b29b20 3085 BUG_ON(old_left_nritems <= 0);
eb60ceac 3086
db94535d 3087 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 3088 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 3089 u32 ioff;
db94535d 3090
5f39d397 3091 item = btrfs_item_nr(left, i);
db94535d 3092
cfed81a0
CM
3093 ioff = btrfs_token_item_offset(left, item, &token);
3094 btrfs_set_token_item_offset(left, item,
3095 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3096 &token);
be0e5c09 3097 }
5f39d397 3098 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
3099
3100 /* fixup right node */
34a38218 3101 if (push_items > right_nritems) {
d397712b
CM
3102 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3103 right_nritems);
34a38218
CM
3104 WARN_ON(1);
3105 }
3106
3107 if (push_items < right_nritems) {
3108 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3109 leaf_data_end(root, right);
3110 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3111 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3112 btrfs_leaf_data(right) +
3113 leaf_data_end(root, right), push_space);
3114
3115 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
3116 btrfs_item_nr_offset(push_items),
3117 (btrfs_header_nritems(right) - push_items) *
3118 sizeof(struct btrfs_item));
34a38218 3119 }
eef1c494
Y
3120 right_nritems -= push_items;
3121 btrfs_set_header_nritems(right, right_nritems);
123abc88 3122 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
3123 for (i = 0; i < right_nritems; i++) {
3124 item = btrfs_item_nr(right, i);
db94535d 3125
cfed81a0
CM
3126 push_space = push_space - btrfs_token_item_size(right,
3127 item, &token);
3128 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d 3129 }
eb60ceac 3130
5f39d397 3131 btrfs_mark_buffer_dirty(left);
34a38218
CM
3132 if (right_nritems)
3133 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
3134 else
3135 clean_tree_block(trans, root, right);
098f59c2 3136
5f39d397 3137 btrfs_item_key(right, &disk_key, 0);
143bede5 3138 fixup_low_keys(trans, root, path, &disk_key, 1);
be0e5c09
CM
3139
3140 /* then fixup the leaf pointer in the path */
3141 if (path->slots[0] < push_items) {
3142 path->slots[0] += old_left_nritems;
925baedd 3143 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3144 free_extent_buffer(path->nodes[0]);
3145 path->nodes[0] = left;
be0e5c09
CM
3146 path->slots[1] -= 1;
3147 } else {
925baedd 3148 btrfs_tree_unlock(left);
5f39d397 3149 free_extent_buffer(left);
be0e5c09
CM
3150 path->slots[0] -= push_items;
3151 }
eb60ceac 3152 BUG_ON(path->slots[0] < 0);
aa5d6bed 3153 return ret;
925baedd
CM
3154out:
3155 btrfs_tree_unlock(left);
3156 free_extent_buffer(left);
3157 return ret;
be0e5c09
CM
3158}
3159
44871b1b
CM
3160/*
3161 * push some data in the path leaf to the left, trying to free up at
3162 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3163 *
3164 * max_slot can put a limit on how far into the leaf we'll push items. The
3165 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3166 * items
44871b1b
CM
3167 */
3168static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3169 *root, struct btrfs_path *path, int min_data_size,
3170 int data_size, int empty, u32 max_slot)
44871b1b
CM
3171{
3172 struct extent_buffer *right = path->nodes[0];
3173 struct extent_buffer *left;
3174 int slot;
3175 int free_space;
3176 u32 right_nritems;
3177 int ret = 0;
3178
3179 slot = path->slots[1];
3180 if (slot == 0)
3181 return 1;
3182 if (!path->nodes[1])
3183 return 1;
3184
3185 right_nritems = btrfs_header_nritems(right);
3186 if (right_nritems == 0)
3187 return 1;
3188
3189 btrfs_assert_tree_locked(path->nodes[1]);
3190
3191 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
3192 if (left == NULL)
3193 return 1;
3194
44871b1b
CM
3195 btrfs_tree_lock(left);
3196 btrfs_set_lock_blocking(left);
3197
3198 free_space = btrfs_leaf_free_space(root, left);
3199 if (free_space < data_size) {
3200 ret = 1;
3201 goto out;
3202 }
3203
3204 /* cow and double check */
3205 ret = btrfs_cow_block(trans, root, left,
3206 path->nodes[1], slot - 1, &left);
3207 if (ret) {
3208 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
3209 if (ret == -ENOSPC)
3210 ret = 1;
44871b1b
CM
3211 goto out;
3212 }
3213
3214 free_space = btrfs_leaf_free_space(root, left);
3215 if (free_space < data_size) {
3216 ret = 1;
3217 goto out;
3218 }
3219
99d8f83c
CM
3220 return __push_leaf_left(trans, root, path, min_data_size,
3221 empty, left, free_space, right_nritems,
3222 max_slot);
44871b1b
CM
3223out:
3224 btrfs_tree_unlock(left);
3225 free_extent_buffer(left);
3226 return ret;
3227}
3228
3229/*
3230 * split the path's leaf in two, making sure there is at least data_size
3231 * available for the resulting leaf level of the path.
44871b1b 3232 */
143bede5
JM
3233static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3234 struct btrfs_root *root,
3235 struct btrfs_path *path,
3236 struct extent_buffer *l,
3237 struct extent_buffer *right,
3238 int slot, int mid, int nritems)
44871b1b
CM
3239{
3240 int data_copy_size;
3241 int rt_data_off;
3242 int i;
44871b1b 3243 struct btrfs_disk_key disk_key;
cfed81a0
CM
3244 struct btrfs_map_token token;
3245
3246 btrfs_init_map_token(&token);
44871b1b
CM
3247
3248 nritems = nritems - mid;
3249 btrfs_set_header_nritems(right, nritems);
3250 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3251
3252 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3253 btrfs_item_nr_offset(mid),
3254 nritems * sizeof(struct btrfs_item));
3255
3256 copy_extent_buffer(right, l,
3257 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3258 data_copy_size, btrfs_leaf_data(l) +
3259 leaf_data_end(root, l), data_copy_size);
3260
3261 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3262 btrfs_item_end_nr(l, mid);
3263
3264 for (i = 0; i < nritems; i++) {
3265 struct btrfs_item *item = btrfs_item_nr(right, i);
3266 u32 ioff;
3267
cfed81a0
CM
3268 ioff = btrfs_token_item_offset(right, item, &token);
3269 btrfs_set_token_item_offset(right, item,
3270 ioff + rt_data_off, &token);
44871b1b
CM
3271 }
3272
44871b1b 3273 btrfs_set_header_nritems(l, mid);
44871b1b 3274 btrfs_item_key(right, &disk_key, 0);
143bede5 3275 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3276 path->slots[1] + 1, 1, 0);
44871b1b
CM
3277
3278 btrfs_mark_buffer_dirty(right);
3279 btrfs_mark_buffer_dirty(l);
3280 BUG_ON(path->slots[0] != slot);
3281
44871b1b
CM
3282 if (mid <= slot) {
3283 btrfs_tree_unlock(path->nodes[0]);
3284 free_extent_buffer(path->nodes[0]);
3285 path->nodes[0] = right;
3286 path->slots[0] -= mid;
3287 path->slots[1] += 1;
3288 } else {
3289 btrfs_tree_unlock(right);
3290 free_extent_buffer(right);
3291 }
3292
3293 BUG_ON(path->slots[0] < 0);
44871b1b
CM
3294}
3295
99d8f83c
CM
3296/*
3297 * double splits happen when we need to insert a big item in the middle
3298 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3299 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3300 * A B C
3301 *
3302 * We avoid this by trying to push the items on either side of our target
3303 * into the adjacent leaves. If all goes well we can avoid the double split
3304 * completely.
3305 */
3306static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3307 struct btrfs_root *root,
3308 struct btrfs_path *path,
3309 int data_size)
3310{
3311 int ret;
3312 int progress = 0;
3313 int slot;
3314 u32 nritems;
3315
3316 slot = path->slots[0];
3317
3318 /*
3319 * try to push all the items after our slot into the
3320 * right leaf
3321 */
3322 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3323 if (ret < 0)
3324 return ret;
3325
3326 if (ret == 0)
3327 progress++;
3328
3329 nritems = btrfs_header_nritems(path->nodes[0]);
3330 /*
3331 * our goal is to get our slot at the start or end of a leaf. If
3332 * we've done so we're done
3333 */
3334 if (path->slots[0] == 0 || path->slots[0] == nritems)
3335 return 0;
3336
3337 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3338 return 0;
3339
3340 /* try to push all the items before our slot into the next leaf */
3341 slot = path->slots[0];
3342 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3343 if (ret < 0)
3344 return ret;
3345
3346 if (ret == 0)
3347 progress++;
3348
3349 if (progress)
3350 return 0;
3351 return 1;
3352}
3353
74123bd7
CM
3354/*
3355 * split the path's leaf in two, making sure there is at least data_size
3356 * available for the resulting leaf level of the path.
aa5d6bed
CM
3357 *
3358 * returns 0 if all went well and < 0 on failure.
74123bd7 3359 */
e02119d5
CM
3360static noinline int split_leaf(struct btrfs_trans_handle *trans,
3361 struct btrfs_root *root,
3362 struct btrfs_key *ins_key,
3363 struct btrfs_path *path, int data_size,
3364 int extend)
be0e5c09 3365{
5d4f98a2 3366 struct btrfs_disk_key disk_key;
5f39d397 3367 struct extent_buffer *l;
7518a238 3368 u32 nritems;
eb60ceac
CM
3369 int mid;
3370 int slot;
5f39d397 3371 struct extent_buffer *right;
d4dbff95 3372 int ret = 0;
aa5d6bed 3373 int wret;
5d4f98a2 3374 int split;
cc0c5538 3375 int num_doubles = 0;
99d8f83c 3376 int tried_avoid_double = 0;
aa5d6bed 3377
a5719521
YZ
3378 l = path->nodes[0];
3379 slot = path->slots[0];
3380 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3381 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3382 return -EOVERFLOW;
3383
40689478 3384 /* first try to make some room by pushing left and right */
99d8f83c
CM
3385 if (data_size) {
3386 wret = push_leaf_right(trans, root, path, data_size,
3387 data_size, 0, 0);
d397712b 3388 if (wret < 0)
eaee50e8 3389 return wret;
3685f791 3390 if (wret) {
99d8f83c
CM
3391 wret = push_leaf_left(trans, root, path, data_size,
3392 data_size, 0, (u32)-1);
3685f791
CM
3393 if (wret < 0)
3394 return wret;
3395 }
3396 l = path->nodes[0];
aa5d6bed 3397
3685f791 3398 /* did the pushes work? */
87b29b20 3399 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 3400 return 0;
3326d1b0 3401 }
aa5d6bed 3402
5c680ed6 3403 if (!path->nodes[1]) {
e089f05c 3404 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
3405 if (ret)
3406 return ret;
3407 }
cc0c5538 3408again:
5d4f98a2 3409 split = 1;
cc0c5538 3410 l = path->nodes[0];
eb60ceac 3411 slot = path->slots[0];
5f39d397 3412 nritems = btrfs_header_nritems(l);
d397712b 3413 mid = (nritems + 1) / 2;
54aa1f4d 3414
5d4f98a2
YZ
3415 if (mid <= slot) {
3416 if (nritems == 1 ||
3417 leaf_space_used(l, mid, nritems - mid) + data_size >
3418 BTRFS_LEAF_DATA_SIZE(root)) {
3419 if (slot >= nritems) {
3420 split = 0;
3421 } else {
3422 mid = slot;
3423 if (mid != nritems &&
3424 leaf_space_used(l, mid, nritems - mid) +
3425 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3426 if (data_size && !tried_avoid_double)
3427 goto push_for_double;
5d4f98a2
YZ
3428 split = 2;
3429 }
3430 }
3431 }
3432 } else {
3433 if (leaf_space_used(l, 0, mid) + data_size >
3434 BTRFS_LEAF_DATA_SIZE(root)) {
3435 if (!extend && data_size && slot == 0) {
3436 split = 0;
3437 } else if ((extend || !data_size) && slot == 0) {
3438 mid = 1;
3439 } else {
3440 mid = slot;
3441 if (mid != nritems &&
3442 leaf_space_used(l, mid, nritems - mid) +
3443 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3444 if (data_size && !tried_avoid_double)
3445 goto push_for_double;
5d4f98a2
YZ
3446 split = 2 ;
3447 }
3448 }
3449 }
3450 }
3451
3452 if (split == 0)
3453 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3454 else
3455 btrfs_item_key(l, &disk_key, mid);
3456
3457 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 3458 root->root_key.objectid,
5581a51a 3459 &disk_key, 0, l->start, 0);
f0486c68 3460 if (IS_ERR(right))
5f39d397 3461 return PTR_ERR(right);
f0486c68
YZ
3462
3463 root_add_used(root, root->leafsize);
5f39d397
CM
3464
3465 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 3466 btrfs_set_header_bytenr(right, right->start);
5f39d397 3467 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 3468 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3469 btrfs_set_header_owner(right, root->root_key.objectid);
3470 btrfs_set_header_level(right, 0);
3471 write_extent_buffer(right, root->fs_info->fsid,
3472 (unsigned long)btrfs_header_fsid(right),
3473 BTRFS_FSID_SIZE);
e17cade2
CM
3474
3475 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3476 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3477 BTRFS_UUID_SIZE);
44871b1b 3478
5d4f98a2
YZ
3479 if (split == 0) {
3480 if (mid <= slot) {
3481 btrfs_set_header_nritems(right, 0);
143bede5 3482 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3483 path->slots[1] + 1, 1, 0);
5d4f98a2
YZ
3484 btrfs_tree_unlock(path->nodes[0]);
3485 free_extent_buffer(path->nodes[0]);
3486 path->nodes[0] = right;
3487 path->slots[0] = 0;
3488 path->slots[1] += 1;
3489 } else {
3490 btrfs_set_header_nritems(right, 0);
143bede5 3491 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3492 path->slots[1], 1, 0);
5d4f98a2
YZ
3493 btrfs_tree_unlock(path->nodes[0]);
3494 free_extent_buffer(path->nodes[0]);
3495 path->nodes[0] = right;
3496 path->slots[0] = 0;
143bede5
JM
3497 if (path->slots[1] == 0)
3498 fixup_low_keys(trans, root, path,
3499 &disk_key, 1);
d4dbff95 3500 }
5d4f98a2
YZ
3501 btrfs_mark_buffer_dirty(right);
3502 return ret;
d4dbff95 3503 }
74123bd7 3504
143bede5 3505 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1 3506
5d4f98a2 3507 if (split == 2) {
cc0c5538
CM
3508 BUG_ON(num_doubles != 0);
3509 num_doubles++;
3510 goto again;
a429e513 3511 }
44871b1b 3512
143bede5 3513 return 0;
99d8f83c
CM
3514
3515push_for_double:
3516 push_for_double_split(trans, root, path, data_size);
3517 tried_avoid_double = 1;
3518 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3519 return 0;
3520 goto again;
be0e5c09
CM
3521}
3522
ad48fd75
YZ
3523static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3524 struct btrfs_root *root,
3525 struct btrfs_path *path, int ins_len)
459931ec 3526{
ad48fd75 3527 struct btrfs_key key;
459931ec 3528 struct extent_buffer *leaf;
ad48fd75
YZ
3529 struct btrfs_file_extent_item *fi;
3530 u64 extent_len = 0;
3531 u32 item_size;
3532 int ret;
459931ec
CM
3533
3534 leaf = path->nodes[0];
ad48fd75
YZ
3535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3536
3537 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3538 key.type != BTRFS_EXTENT_CSUM_KEY);
3539
3540 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3541 return 0;
459931ec
CM
3542
3543 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
3544 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3545 fi = btrfs_item_ptr(leaf, path->slots[0],
3546 struct btrfs_file_extent_item);
3547 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3548 }
b3b4aa74 3549 btrfs_release_path(path);
459931ec 3550
459931ec 3551 path->keep_locks = 1;
ad48fd75
YZ
3552 path->search_for_split = 1;
3553 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 3554 path->search_for_split = 0;
ad48fd75
YZ
3555 if (ret < 0)
3556 goto err;
459931ec 3557
ad48fd75
YZ
3558 ret = -EAGAIN;
3559 leaf = path->nodes[0];
459931ec 3560 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
3561 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3562 goto err;
3563
109f6aef
CM
3564 /* the leaf has changed, it now has room. return now */
3565 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3566 goto err;
3567
ad48fd75
YZ
3568 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3569 fi = btrfs_item_ptr(leaf, path->slots[0],
3570 struct btrfs_file_extent_item);
3571 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3572 goto err;
459931ec
CM
3573 }
3574
b9473439 3575 btrfs_set_path_blocking(path);
ad48fd75 3576 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
3577 if (ret)
3578 goto err;
459931ec 3579
ad48fd75 3580 path->keep_locks = 0;
b9473439 3581 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
3582 return 0;
3583err:
3584 path->keep_locks = 0;
3585 return ret;
3586}
3587
3588static noinline int split_item(struct btrfs_trans_handle *trans,
3589 struct btrfs_root *root,
3590 struct btrfs_path *path,
3591 struct btrfs_key *new_key,
3592 unsigned long split_offset)
3593{
3594 struct extent_buffer *leaf;
3595 struct btrfs_item *item;
3596 struct btrfs_item *new_item;
3597 int slot;
3598 char *buf;
3599 u32 nritems;
3600 u32 item_size;
3601 u32 orig_offset;
3602 struct btrfs_disk_key disk_key;
3603
b9473439
CM
3604 leaf = path->nodes[0];
3605 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3606
b4ce94de
CM
3607 btrfs_set_path_blocking(path);
3608
459931ec
CM
3609 item = btrfs_item_nr(leaf, path->slots[0]);
3610 orig_offset = btrfs_item_offset(leaf, item);
3611 item_size = btrfs_item_size(leaf, item);
3612
459931ec 3613 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
3614 if (!buf)
3615 return -ENOMEM;
3616
459931ec
CM
3617 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3618 path->slots[0]), item_size);
459931ec 3619
ad48fd75 3620 slot = path->slots[0] + 1;
459931ec 3621 nritems = btrfs_header_nritems(leaf);
459931ec
CM
3622 if (slot != nritems) {
3623 /* shift the items */
3624 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
3625 btrfs_item_nr_offset(slot),
3626 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
3627 }
3628
3629 btrfs_cpu_key_to_disk(&disk_key, new_key);
3630 btrfs_set_item_key(leaf, &disk_key, slot);
3631
3632 new_item = btrfs_item_nr(leaf, slot);
3633
3634 btrfs_set_item_offset(leaf, new_item, orig_offset);
3635 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3636
3637 btrfs_set_item_offset(leaf, item,
3638 orig_offset + item_size - split_offset);
3639 btrfs_set_item_size(leaf, item, split_offset);
3640
3641 btrfs_set_header_nritems(leaf, nritems + 1);
3642
3643 /* write the data for the start of the original item */
3644 write_extent_buffer(leaf, buf,
3645 btrfs_item_ptr_offset(leaf, path->slots[0]),
3646 split_offset);
3647
3648 /* write the data for the new item */
3649 write_extent_buffer(leaf, buf + split_offset,
3650 btrfs_item_ptr_offset(leaf, slot),
3651 item_size - split_offset);
3652 btrfs_mark_buffer_dirty(leaf);
3653
ad48fd75 3654 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 3655 kfree(buf);
ad48fd75
YZ
3656 return 0;
3657}
3658
3659/*
3660 * This function splits a single item into two items,
3661 * giving 'new_key' to the new item and splitting the
3662 * old one at split_offset (from the start of the item).
3663 *
3664 * The path may be released by this operation. After
3665 * the split, the path is pointing to the old item. The
3666 * new item is going to be in the same node as the old one.
3667 *
3668 * Note, the item being split must be smaller enough to live alone on
3669 * a tree block with room for one extra struct btrfs_item
3670 *
3671 * This allows us to split the item in place, keeping a lock on the
3672 * leaf the entire time.
3673 */
3674int btrfs_split_item(struct btrfs_trans_handle *trans,
3675 struct btrfs_root *root,
3676 struct btrfs_path *path,
3677 struct btrfs_key *new_key,
3678 unsigned long split_offset)
3679{
3680 int ret;
3681 ret = setup_leaf_for_split(trans, root, path,
3682 sizeof(struct btrfs_item));
3683 if (ret)
3684 return ret;
3685
3686 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
3687 return ret;
3688}
3689
ad48fd75
YZ
3690/*
3691 * This function duplicate a item, giving 'new_key' to the new item.
3692 * It guarantees both items live in the same tree leaf and the new item
3693 * is contiguous with the original item.
3694 *
3695 * This allows us to split file extent in place, keeping a lock on the
3696 * leaf the entire time.
3697 */
3698int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3699 struct btrfs_root *root,
3700 struct btrfs_path *path,
3701 struct btrfs_key *new_key)
3702{
3703 struct extent_buffer *leaf;
3704 int ret;
3705 u32 item_size;
3706
3707 leaf = path->nodes[0];
3708 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3709 ret = setup_leaf_for_split(trans, root, path,
3710 item_size + sizeof(struct btrfs_item));
3711 if (ret)
3712 return ret;
3713
3714 path->slots[0]++;
143bede5
JM
3715 setup_items_for_insert(trans, root, path, new_key, &item_size,
3716 item_size, item_size +
3717 sizeof(struct btrfs_item), 1);
ad48fd75
YZ
3718 leaf = path->nodes[0];
3719 memcpy_extent_buffer(leaf,
3720 btrfs_item_ptr_offset(leaf, path->slots[0]),
3721 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3722 item_size);
3723 return 0;
3724}
3725
d352ac68
CM
3726/*
3727 * make the item pointed to by the path smaller. new_size indicates
3728 * how small to make it, and from_end tells us if we just chop bytes
3729 * off the end of the item or if we shift the item to chop bytes off
3730 * the front.
3731 */
143bede5
JM
3732void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3733 struct btrfs_root *root,
3734 struct btrfs_path *path,
3735 u32 new_size, int from_end)
b18c6685 3736{
b18c6685 3737 int slot;
5f39d397
CM
3738 struct extent_buffer *leaf;
3739 struct btrfs_item *item;
b18c6685
CM
3740 u32 nritems;
3741 unsigned int data_end;
3742 unsigned int old_data_start;
3743 unsigned int old_size;
3744 unsigned int size_diff;
3745 int i;
cfed81a0
CM
3746 struct btrfs_map_token token;
3747
3748 btrfs_init_map_token(&token);
b18c6685 3749
5f39d397 3750 leaf = path->nodes[0];
179e29e4
CM
3751 slot = path->slots[0];
3752
3753 old_size = btrfs_item_size_nr(leaf, slot);
3754 if (old_size == new_size)
143bede5 3755 return;
b18c6685 3756
5f39d397 3757 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
3758 data_end = leaf_data_end(root, leaf);
3759
5f39d397 3760 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 3761
b18c6685
CM
3762 size_diff = old_size - new_size;
3763
3764 BUG_ON(slot < 0);
3765 BUG_ON(slot >= nritems);
3766
3767 /*
3768 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3769 */
3770 /* first correct the data pointers */
3771 for (i = slot; i < nritems; i++) {
5f39d397
CM
3772 u32 ioff;
3773 item = btrfs_item_nr(leaf, i);
db94535d 3774
cfed81a0
CM
3775 ioff = btrfs_token_item_offset(leaf, item, &token);
3776 btrfs_set_token_item_offset(leaf, item,
3777 ioff + size_diff, &token);
b18c6685 3778 }
db94535d 3779
b18c6685 3780 /* shift the data */
179e29e4
CM
3781 if (from_end) {
3782 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3783 data_end + size_diff, btrfs_leaf_data(leaf) +
3784 data_end, old_data_start + new_size - data_end);
3785 } else {
3786 struct btrfs_disk_key disk_key;
3787 u64 offset;
3788
3789 btrfs_item_key(leaf, &disk_key, slot);
3790
3791 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3792 unsigned long ptr;
3793 struct btrfs_file_extent_item *fi;
3794
3795 fi = btrfs_item_ptr(leaf, slot,
3796 struct btrfs_file_extent_item);
3797 fi = (struct btrfs_file_extent_item *)(
3798 (unsigned long)fi - size_diff);
3799
3800 if (btrfs_file_extent_type(leaf, fi) ==
3801 BTRFS_FILE_EXTENT_INLINE) {
3802 ptr = btrfs_item_ptr_offset(leaf, slot);
3803 memmove_extent_buffer(leaf, ptr,
d397712b
CM
3804 (unsigned long)fi,
3805 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
3806 disk_bytenr));
3807 }
3808 }
3809
3810 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3811 data_end + size_diff, btrfs_leaf_data(leaf) +
3812 data_end, old_data_start - data_end);
3813
3814 offset = btrfs_disk_key_offset(&disk_key);
3815 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3816 btrfs_set_item_key(leaf, &disk_key, slot);
3817 if (slot == 0)
3818 fixup_low_keys(trans, root, path, &disk_key, 1);
3819 }
5f39d397
CM
3820
3821 item = btrfs_item_nr(leaf, slot);
3822 btrfs_set_item_size(leaf, item, new_size);
3823 btrfs_mark_buffer_dirty(leaf);
b18c6685 3824
5f39d397
CM
3825 if (btrfs_leaf_free_space(root, leaf) < 0) {
3826 btrfs_print_leaf(root, leaf);
b18c6685 3827 BUG();
5f39d397 3828 }
b18c6685
CM
3829}
3830
d352ac68
CM
3831/*
3832 * make the item pointed to by the path bigger, data_size is the new size.
3833 */
143bede5
JM
3834void btrfs_extend_item(struct btrfs_trans_handle *trans,
3835 struct btrfs_root *root, struct btrfs_path *path,
3836 u32 data_size)
6567e837 3837{
6567e837 3838 int slot;
5f39d397
CM
3839 struct extent_buffer *leaf;
3840 struct btrfs_item *item;
6567e837
CM
3841 u32 nritems;
3842 unsigned int data_end;
3843 unsigned int old_data;
3844 unsigned int old_size;
3845 int i;
cfed81a0
CM
3846 struct btrfs_map_token token;
3847
3848 btrfs_init_map_token(&token);
6567e837 3849
5f39d397 3850 leaf = path->nodes[0];
6567e837 3851
5f39d397 3852 nritems = btrfs_header_nritems(leaf);
6567e837
CM
3853 data_end = leaf_data_end(root, leaf);
3854
5f39d397
CM
3855 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3856 btrfs_print_leaf(root, leaf);
6567e837 3857 BUG();
5f39d397 3858 }
6567e837 3859 slot = path->slots[0];
5f39d397 3860 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
3861
3862 BUG_ON(slot < 0);
3326d1b0
CM
3863 if (slot >= nritems) {
3864 btrfs_print_leaf(root, leaf);
d397712b
CM
3865 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3866 slot, nritems);
3326d1b0
CM
3867 BUG_ON(1);
3868 }
6567e837
CM
3869
3870 /*
3871 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3872 */
3873 /* first correct the data pointers */
3874 for (i = slot; i < nritems; i++) {
5f39d397
CM
3875 u32 ioff;
3876 item = btrfs_item_nr(leaf, i);
db94535d 3877
cfed81a0
CM
3878 ioff = btrfs_token_item_offset(leaf, item, &token);
3879 btrfs_set_token_item_offset(leaf, item,
3880 ioff - data_size, &token);
6567e837 3881 }
5f39d397 3882
6567e837 3883 /* shift the data */
5f39d397 3884 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
3885 data_end - data_size, btrfs_leaf_data(leaf) +
3886 data_end, old_data - data_end);
5f39d397 3887
6567e837 3888 data_end = old_data;
5f39d397
CM
3889 old_size = btrfs_item_size_nr(leaf, slot);
3890 item = btrfs_item_nr(leaf, slot);
3891 btrfs_set_item_size(leaf, item, old_size + data_size);
3892 btrfs_mark_buffer_dirty(leaf);
6567e837 3893
5f39d397
CM
3894 if (btrfs_leaf_free_space(root, leaf) < 0) {
3895 btrfs_print_leaf(root, leaf);
6567e837 3896 BUG();
5f39d397 3897 }
6567e837
CM
3898}
3899
f3465ca4
JB
3900/*
3901 * Given a key and some data, insert items into the tree.
3902 * This does all the path init required, making room in the tree if needed.
3903 * Returns the number of keys that were inserted.
3904 */
3905int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3906 struct btrfs_root *root,
3907 struct btrfs_path *path,
3908 struct btrfs_key *cpu_key, u32 *data_size,
3909 int nr)
3910{
3911 struct extent_buffer *leaf;
3912 struct btrfs_item *item;
3913 int ret = 0;
3914 int slot;
f3465ca4
JB
3915 int i;
3916 u32 nritems;
3917 u32 total_data = 0;
3918 u32 total_size = 0;
3919 unsigned int data_end;
3920 struct btrfs_disk_key disk_key;
3921 struct btrfs_key found_key;
cfed81a0
CM
3922 struct btrfs_map_token token;
3923
3924 btrfs_init_map_token(&token);
f3465ca4 3925
87b29b20
YZ
3926 for (i = 0; i < nr; i++) {
3927 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3928 BTRFS_LEAF_DATA_SIZE(root)) {
3929 break;
3930 nr = i;
3931 }
f3465ca4 3932 total_data += data_size[i];
87b29b20
YZ
3933 total_size += data_size[i] + sizeof(struct btrfs_item);
3934 }
3935 BUG_ON(nr == 0);
f3465ca4 3936
f3465ca4
JB
3937 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3938 if (ret == 0)
3939 return -EEXIST;
3940 if (ret < 0)
3941 goto out;
3942
f3465ca4
JB
3943 leaf = path->nodes[0];
3944
3945 nritems = btrfs_header_nritems(leaf);
3946 data_end = leaf_data_end(root, leaf);
3947
3948 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3949 for (i = nr; i >= 0; i--) {
3950 total_data -= data_size[i];
3951 total_size -= data_size[i] + sizeof(struct btrfs_item);
3952 if (total_size < btrfs_leaf_free_space(root, leaf))
3953 break;
3954 }
3955 nr = i;
3956 }
3957
3958 slot = path->slots[0];
3959 BUG_ON(slot < 0);
3960
3961 if (slot != nritems) {
3962 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3963
3964 item = btrfs_item_nr(leaf, slot);
3965 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3966
3967 /* figure out how many keys we can insert in here */
3968 total_data = data_size[0];
3969 for (i = 1; i < nr; i++) {
5d4f98a2 3970 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
3971 break;
3972 total_data += data_size[i];
3973 }
3974 nr = i;
3975
3976 if (old_data < data_end) {
3977 btrfs_print_leaf(root, leaf);
d397712b 3978 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
3979 slot, old_data, data_end);
3980 BUG_ON(1);
3981 }
3982 /*
3983 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3984 */
3985 /* first correct the data pointers */
f3465ca4
JB
3986 for (i = slot; i < nritems; i++) {
3987 u32 ioff;
3988
3989 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
3990 ioff = btrfs_token_item_offset(leaf, item, &token);
3991 btrfs_set_token_item_offset(leaf, item,
3992 ioff - total_data, &token);
f3465ca4 3993 }
f3465ca4
JB
3994 /* shift the items */
3995 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3996 btrfs_item_nr_offset(slot),
3997 (nritems - slot) * sizeof(struct btrfs_item));
3998
3999 /* shift the data */
4000 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4001 data_end - total_data, btrfs_leaf_data(leaf) +
4002 data_end, old_data - data_end);
4003 data_end = old_data;
4004 } else {
4005 /*
4006 * this sucks but it has to be done, if we are inserting at
4007 * the end of the leaf only insert 1 of the items, since we
4008 * have no way of knowing whats on the next leaf and we'd have
4009 * to drop our current locks to figure it out
4010 */
4011 nr = 1;
4012 }
4013
4014 /* setup the item for the new data */
4015 for (i = 0; i < nr; i++) {
4016 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4017 btrfs_set_item_key(leaf, &disk_key, slot + i);
4018 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4019 btrfs_set_token_item_offset(leaf, item,
4020 data_end - data_size[i], &token);
f3465ca4 4021 data_end -= data_size[i];
cfed81a0 4022 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
f3465ca4
JB
4023 }
4024 btrfs_set_header_nritems(leaf, nritems + nr);
4025 btrfs_mark_buffer_dirty(leaf);
4026
4027 ret = 0;
4028 if (slot == 0) {
4029 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4030 fixup_low_keys(trans, root, path, &disk_key, 1);
f3465ca4
JB
4031 }
4032
4033 if (btrfs_leaf_free_space(root, leaf) < 0) {
4034 btrfs_print_leaf(root, leaf);
4035 BUG();
4036 }
4037out:
4038 if (!ret)
4039 ret = nr;
4040 return ret;
4041}
4042
74123bd7 4043/*
44871b1b
CM
4044 * this is a helper for btrfs_insert_empty_items, the main goal here is
4045 * to save stack depth by doing the bulk of the work in a function
4046 * that doesn't call btrfs_search_slot
74123bd7 4047 */
143bede5
JM
4048void setup_items_for_insert(struct btrfs_trans_handle *trans,
4049 struct btrfs_root *root, struct btrfs_path *path,
4050 struct btrfs_key *cpu_key, u32 *data_size,
4051 u32 total_data, u32 total_size, int nr)
be0e5c09 4052{
5f39d397 4053 struct btrfs_item *item;
9c58309d 4054 int i;
7518a238 4055 u32 nritems;
be0e5c09 4056 unsigned int data_end;
e2fa7227 4057 struct btrfs_disk_key disk_key;
44871b1b
CM
4058 struct extent_buffer *leaf;
4059 int slot;
cfed81a0
CM
4060 struct btrfs_map_token token;
4061
4062 btrfs_init_map_token(&token);
e2fa7227 4063
5f39d397 4064 leaf = path->nodes[0];
44871b1b 4065 slot = path->slots[0];
74123bd7 4066
5f39d397 4067 nritems = btrfs_header_nritems(leaf);
123abc88 4068 data_end = leaf_data_end(root, leaf);
eb60ceac 4069
f25956cc 4070 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 4071 btrfs_print_leaf(root, leaf);
d397712b 4072 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 4073 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 4074 BUG();
d4dbff95 4075 }
5f39d397 4076
be0e5c09 4077 if (slot != nritems) {
5f39d397 4078 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 4079
5f39d397
CM
4080 if (old_data < data_end) {
4081 btrfs_print_leaf(root, leaf);
d397712b 4082 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
4083 slot, old_data, data_end);
4084 BUG_ON(1);
4085 }
be0e5c09
CM
4086 /*
4087 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4088 */
4089 /* first correct the data pointers */
0783fcfc 4090 for (i = slot; i < nritems; i++) {
5f39d397 4091 u32 ioff;
db94535d 4092
5f39d397 4093 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4094 ioff = btrfs_token_item_offset(leaf, item, &token);
4095 btrfs_set_token_item_offset(leaf, item,
4096 ioff - total_data, &token);
0783fcfc 4097 }
be0e5c09 4098 /* shift the items */
9c58309d 4099 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 4100 btrfs_item_nr_offset(slot),
d6025579 4101 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
4102
4103 /* shift the data */
5f39d397 4104 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 4105 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 4106 data_end, old_data - data_end);
be0e5c09
CM
4107 data_end = old_data;
4108 }
5f39d397 4109
62e2749e 4110 /* setup the item for the new data */
9c58309d
CM
4111 for (i = 0; i < nr; i++) {
4112 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4113 btrfs_set_item_key(leaf, &disk_key, slot + i);
4114 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4115 btrfs_set_token_item_offset(leaf, item,
4116 data_end - data_size[i], &token);
9c58309d 4117 data_end -= data_size[i];
cfed81a0 4118 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
9c58309d 4119 }
44871b1b 4120
9c58309d 4121 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed 4122
5a01a2e3
CM
4123 if (slot == 0) {
4124 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4125 fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 4126 }
b9473439
CM
4127 btrfs_unlock_up_safe(path, 1);
4128 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 4129
5f39d397
CM
4130 if (btrfs_leaf_free_space(root, leaf) < 0) {
4131 btrfs_print_leaf(root, leaf);
be0e5c09 4132 BUG();
5f39d397 4133 }
44871b1b
CM
4134}
4135
4136/*
4137 * Given a key and some data, insert items into the tree.
4138 * This does all the path init required, making room in the tree if needed.
4139 */
4140int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4141 struct btrfs_root *root,
4142 struct btrfs_path *path,
4143 struct btrfs_key *cpu_key, u32 *data_size,
4144 int nr)
4145{
44871b1b
CM
4146 int ret = 0;
4147 int slot;
4148 int i;
4149 u32 total_size = 0;
4150 u32 total_data = 0;
4151
4152 for (i = 0; i < nr; i++)
4153 total_data += data_size[i];
4154
4155 total_size = total_data + (nr * sizeof(struct btrfs_item));
4156 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4157 if (ret == 0)
4158 return -EEXIST;
4159 if (ret < 0)
143bede5 4160 return ret;
44871b1b 4161
44871b1b
CM
4162 slot = path->slots[0];
4163 BUG_ON(slot < 0);
4164
143bede5 4165 setup_items_for_insert(trans, root, path, cpu_key, data_size,
44871b1b 4166 total_data, total_size, nr);
143bede5 4167 return 0;
62e2749e
CM
4168}
4169
4170/*
4171 * Given a key and some data, insert an item into the tree.
4172 * This does all the path init required, making room in the tree if needed.
4173 */
e089f05c
CM
4174int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4175 *root, struct btrfs_key *cpu_key, void *data, u32
4176 data_size)
62e2749e
CM
4177{
4178 int ret = 0;
2c90e5d6 4179 struct btrfs_path *path;
5f39d397
CM
4180 struct extent_buffer *leaf;
4181 unsigned long ptr;
62e2749e 4182
2c90e5d6 4183 path = btrfs_alloc_path();
db5b493a
TI
4184 if (!path)
4185 return -ENOMEM;
2c90e5d6 4186 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 4187 if (!ret) {
5f39d397
CM
4188 leaf = path->nodes[0];
4189 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4190 write_extent_buffer(leaf, data, ptr, data_size);
4191 btrfs_mark_buffer_dirty(leaf);
62e2749e 4192 }
2c90e5d6 4193 btrfs_free_path(path);
aa5d6bed 4194 return ret;
be0e5c09
CM
4195}
4196
74123bd7 4197/*
5de08d7d 4198 * delete the pointer from a given node.
74123bd7 4199 *
d352ac68
CM
4200 * the tree should have been previously balanced so the deletion does not
4201 * empty a node.
74123bd7 4202 */
143bede5 4203static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
4204 struct btrfs_path *path, int level, int slot,
4205 int tree_mod_log)
be0e5c09 4206{
5f39d397 4207 struct extent_buffer *parent = path->nodes[level];
7518a238 4208 u32 nritems;
f3ea38da 4209 int ret;
be0e5c09 4210
5f39d397 4211 nritems = btrfs_header_nritems(parent);
d397712b 4212 if (slot != nritems - 1) {
f3ea38da
JS
4213 if (tree_mod_log && level)
4214 tree_mod_log_eb_move(root->fs_info, parent, slot,
4215 slot + 1, nritems - slot - 1);
5f39d397
CM
4216 memmove_extent_buffer(parent,
4217 btrfs_node_key_ptr_offset(slot),
4218 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
4219 sizeof(struct btrfs_key_ptr) *
4220 (nritems - slot - 1));
bb803951 4221 }
f3ea38da
JS
4222
4223 if (tree_mod_log && level) {
4224 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4225 MOD_LOG_KEY_REMOVE);
4226 BUG_ON(ret < 0);
4227 }
4228
7518a238 4229 nritems--;
5f39d397 4230 btrfs_set_header_nritems(parent, nritems);
7518a238 4231 if (nritems == 0 && parent == root->node) {
5f39d397 4232 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 4233 /* just turn the root into a leaf and break */
5f39d397 4234 btrfs_set_header_level(root->node, 0);
bb803951 4235 } else if (slot == 0) {
5f39d397
CM
4236 struct btrfs_disk_key disk_key;
4237
4238 btrfs_node_key(parent, &disk_key, 0);
143bede5 4239 fixup_low_keys(trans, root, path, &disk_key, level + 1);
be0e5c09 4240 }
d6025579 4241 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
4242}
4243
323ac95b
CM
4244/*
4245 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 4246 * path->nodes[1].
323ac95b
CM
4247 *
4248 * This deletes the pointer in path->nodes[1] and frees the leaf
4249 * block extent. zero is returned if it all worked out, < 0 otherwise.
4250 *
4251 * The path must have already been setup for deleting the leaf, including
4252 * all the proper balancing. path->nodes[1] must be locked.
4253 */
143bede5
JM
4254static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4255 struct btrfs_root *root,
4256 struct btrfs_path *path,
4257 struct extent_buffer *leaf)
323ac95b 4258{
5d4f98a2 4259 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
f3ea38da 4260 del_ptr(trans, root, path, 1, path->slots[1], 1);
323ac95b 4261
4d081c41
CM
4262 /*
4263 * btrfs_free_extent is expensive, we want to make sure we
4264 * aren't holding any locks when we call it
4265 */
4266 btrfs_unlock_up_safe(path, 0);
4267
f0486c68
YZ
4268 root_sub_used(root, leaf->len);
4269
3083ee2e 4270 extent_buffer_get(leaf);
5581a51a 4271 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3083ee2e 4272 free_extent_buffer_stale(leaf);
323ac95b 4273}
74123bd7
CM
4274/*
4275 * delete the item at the leaf level in path. If that empties
4276 * the leaf, remove it from the tree
4277 */
85e21bac
CM
4278int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4279 struct btrfs_path *path, int slot, int nr)
be0e5c09 4280{
5f39d397
CM
4281 struct extent_buffer *leaf;
4282 struct btrfs_item *item;
85e21bac
CM
4283 int last_off;
4284 int dsize = 0;
aa5d6bed
CM
4285 int ret = 0;
4286 int wret;
85e21bac 4287 int i;
7518a238 4288 u32 nritems;
cfed81a0
CM
4289 struct btrfs_map_token token;
4290
4291 btrfs_init_map_token(&token);
be0e5c09 4292
5f39d397 4293 leaf = path->nodes[0];
85e21bac
CM
4294 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4295
4296 for (i = 0; i < nr; i++)
4297 dsize += btrfs_item_size_nr(leaf, slot + i);
4298
5f39d397 4299 nritems = btrfs_header_nritems(leaf);
be0e5c09 4300
85e21bac 4301 if (slot + nr != nritems) {
123abc88 4302 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
4303
4304 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
4305 data_end + dsize,
4306 btrfs_leaf_data(leaf) + data_end,
85e21bac 4307 last_off - data_end);
5f39d397 4308
85e21bac 4309 for (i = slot + nr; i < nritems; i++) {
5f39d397 4310 u32 ioff;
db94535d 4311
5f39d397 4312 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4313 ioff = btrfs_token_item_offset(leaf, item, &token);
4314 btrfs_set_token_item_offset(leaf, item,
4315 ioff + dsize, &token);
0783fcfc 4316 }
db94535d 4317
5f39d397 4318 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 4319 btrfs_item_nr_offset(slot + nr),
d6025579 4320 sizeof(struct btrfs_item) *
85e21bac 4321 (nritems - slot - nr));
be0e5c09 4322 }
85e21bac
CM
4323 btrfs_set_header_nritems(leaf, nritems - nr);
4324 nritems -= nr;
5f39d397 4325
74123bd7 4326 /* delete the leaf if we've emptied it */
7518a238 4327 if (nritems == 0) {
5f39d397
CM
4328 if (leaf == root->node) {
4329 btrfs_set_header_level(leaf, 0);
9a8dd150 4330 } else {
f0486c68
YZ
4331 btrfs_set_path_blocking(path);
4332 clean_tree_block(trans, root, leaf);
143bede5 4333 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 4334 }
be0e5c09 4335 } else {
7518a238 4336 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 4337 if (slot == 0) {
5f39d397
CM
4338 struct btrfs_disk_key disk_key;
4339
4340 btrfs_item_key(leaf, &disk_key, 0);
143bede5 4341 fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed 4342 }
aa5d6bed 4343
74123bd7 4344 /* delete the leaf if it is mostly empty */
d717aa1d 4345 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
4346 /* push_leaf_left fixes the path.
4347 * make sure the path still points to our leaf
4348 * for possible call to del_ptr below
4349 */
4920c9ac 4350 slot = path->slots[1];
5f39d397
CM
4351 extent_buffer_get(leaf);
4352
b9473439 4353 btrfs_set_path_blocking(path);
99d8f83c
CM
4354 wret = push_leaf_left(trans, root, path, 1, 1,
4355 1, (u32)-1);
54aa1f4d 4356 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 4357 ret = wret;
5f39d397
CM
4358
4359 if (path->nodes[0] == leaf &&
4360 btrfs_header_nritems(leaf)) {
99d8f83c
CM
4361 wret = push_leaf_right(trans, root, path, 1,
4362 1, 1, 0);
54aa1f4d 4363 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
4364 ret = wret;
4365 }
5f39d397
CM
4366
4367 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 4368 path->slots[1] = slot;
143bede5 4369 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 4370 free_extent_buffer(leaf);
143bede5 4371 ret = 0;
5de08d7d 4372 } else {
925baedd
CM
4373 /* if we're still in the path, make sure
4374 * we're dirty. Otherwise, one of the
4375 * push_leaf functions must have already
4376 * dirtied this buffer
4377 */
4378 if (path->nodes[0] == leaf)
4379 btrfs_mark_buffer_dirty(leaf);
5f39d397 4380 free_extent_buffer(leaf);
be0e5c09 4381 }
d5719762 4382 } else {
5f39d397 4383 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
4384 }
4385 }
aa5d6bed 4386 return ret;
be0e5c09
CM
4387}
4388
7bb86316 4389/*
925baedd 4390 * search the tree again to find a leaf with lesser keys
7bb86316
CM
4391 * returns 0 if it found something or 1 if there are no lesser leaves.
4392 * returns < 0 on io errors.
d352ac68
CM
4393 *
4394 * This may release the path, and so you may lose any locks held at the
4395 * time you call it.
7bb86316
CM
4396 */
4397int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4398{
925baedd
CM
4399 struct btrfs_key key;
4400 struct btrfs_disk_key found_key;
4401 int ret;
7bb86316 4402
925baedd 4403 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 4404
925baedd
CM
4405 if (key.offset > 0)
4406 key.offset--;
4407 else if (key.type > 0)
4408 key.type--;
4409 else if (key.objectid > 0)
4410 key.objectid--;
4411 else
4412 return 1;
7bb86316 4413
b3b4aa74 4414 btrfs_release_path(path);
925baedd
CM
4415 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4416 if (ret < 0)
4417 return ret;
4418 btrfs_item_key(path->nodes[0], &found_key, 0);
4419 ret = comp_keys(&found_key, &key);
4420 if (ret < 0)
4421 return 0;
4422 return 1;
7bb86316
CM
4423}
4424
3f157a2f
CM
4425/*
4426 * A helper function to walk down the tree starting at min_key, and looking
4427 * for nodes or leaves that are either in cache or have a minimum
d352ac68 4428 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
4429 *
4430 * This does not cow, but it does stuff the starting key it finds back
4431 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4432 * key and get a writable path.
4433 *
4434 * This does lock as it descends, and path->keep_locks should be set
4435 * to 1 by the caller.
4436 *
4437 * This honors path->lowest_level to prevent descent past a given level
4438 * of the tree.
4439 *
d352ac68
CM
4440 * min_trans indicates the oldest transaction that you are interested
4441 * in walking through. Any nodes or leaves older than min_trans are
4442 * skipped over (without reading them).
4443 *
3f157a2f
CM
4444 * returns zero if something useful was found, < 0 on error and 1 if there
4445 * was nothing in the tree that matched the search criteria.
4446 */
4447int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 4448 struct btrfs_key *max_key,
3f157a2f
CM
4449 struct btrfs_path *path, int cache_only,
4450 u64 min_trans)
4451{
4452 struct extent_buffer *cur;
4453 struct btrfs_key found_key;
4454 int slot;
9652480b 4455 int sret;
3f157a2f
CM
4456 u32 nritems;
4457 int level;
4458 int ret = 1;
4459
934d375b 4460 WARN_ON(!path->keep_locks);
3f157a2f 4461again:
bd681513 4462 cur = btrfs_read_lock_root_node(root);
3f157a2f 4463 level = btrfs_header_level(cur);
e02119d5 4464 WARN_ON(path->nodes[level]);
3f157a2f 4465 path->nodes[level] = cur;
bd681513 4466 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
4467
4468 if (btrfs_header_generation(cur) < min_trans) {
4469 ret = 1;
4470 goto out;
4471 }
d397712b 4472 while (1) {
3f157a2f
CM
4473 nritems = btrfs_header_nritems(cur);
4474 level = btrfs_header_level(cur);
9652480b 4475 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 4476
323ac95b
CM
4477 /* at the lowest level, we're done, setup the path and exit */
4478 if (level == path->lowest_level) {
e02119d5
CM
4479 if (slot >= nritems)
4480 goto find_next_key;
3f157a2f
CM
4481 ret = 0;
4482 path->slots[level] = slot;
4483 btrfs_item_key_to_cpu(cur, &found_key, slot);
4484 goto out;
4485 }
9652480b
Y
4486 if (sret && slot > 0)
4487 slot--;
3f157a2f
CM
4488 /*
4489 * check this node pointer against the cache_only and
4490 * min_trans parameters. If it isn't in cache or is too
4491 * old, skip to the next one.
4492 */
d397712b 4493 while (slot < nritems) {
3f157a2f
CM
4494 u64 blockptr;
4495 u64 gen;
4496 struct extent_buffer *tmp;
e02119d5
CM
4497 struct btrfs_disk_key disk_key;
4498
3f157a2f
CM
4499 blockptr = btrfs_node_blockptr(cur, slot);
4500 gen = btrfs_node_ptr_generation(cur, slot);
4501 if (gen < min_trans) {
4502 slot++;
4503 continue;
4504 }
4505 if (!cache_only)
4506 break;
4507
e02119d5
CM
4508 if (max_key) {
4509 btrfs_node_key(cur, &disk_key, slot);
4510 if (comp_keys(&disk_key, max_key) >= 0) {
4511 ret = 1;
4512 goto out;
4513 }
4514 }
4515
3f157a2f
CM
4516 tmp = btrfs_find_tree_block(root, blockptr,
4517 btrfs_level_size(root, level - 1));
4518
b9fab919 4519 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
3f157a2f
CM
4520 free_extent_buffer(tmp);
4521 break;
4522 }
4523 if (tmp)
4524 free_extent_buffer(tmp);
4525 slot++;
4526 }
e02119d5 4527find_next_key:
3f157a2f
CM
4528 /*
4529 * we didn't find a candidate key in this node, walk forward
4530 * and find another one
4531 */
4532 if (slot >= nritems) {
e02119d5 4533 path->slots[level] = slot;
b4ce94de 4534 btrfs_set_path_blocking(path);
e02119d5 4535 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 4536 cache_only, min_trans);
e02119d5 4537 if (sret == 0) {
b3b4aa74 4538 btrfs_release_path(path);
3f157a2f
CM
4539 goto again;
4540 } else {
4541 goto out;
4542 }
4543 }
4544 /* save our key for returning back */
4545 btrfs_node_key_to_cpu(cur, &found_key, slot);
4546 path->slots[level] = slot;
4547 if (level == path->lowest_level) {
4548 ret = 0;
f7c79f30 4549 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
4550 goto out;
4551 }
b4ce94de 4552 btrfs_set_path_blocking(path);
3f157a2f 4553 cur = read_node_slot(root, cur, slot);
79787eaa 4554 BUG_ON(!cur); /* -ENOMEM */
3f157a2f 4555
bd681513 4556 btrfs_tree_read_lock(cur);
b4ce94de 4557
bd681513 4558 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 4559 path->nodes[level - 1] = cur;
f7c79f30 4560 unlock_up(path, level, 1, 0, NULL);
bd681513 4561 btrfs_clear_path_blocking(path, NULL, 0);
3f157a2f
CM
4562 }
4563out:
4564 if (ret == 0)
4565 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4566 btrfs_set_path_blocking(path);
3f157a2f
CM
4567 return ret;
4568}
4569
4570/*
4571 * this is similar to btrfs_next_leaf, but does not try to preserve
4572 * and fixup the path. It looks for and returns the next key in the
4573 * tree based on the current path and the cache_only and min_trans
4574 * parameters.
4575 *
4576 * 0 is returned if another key is found, < 0 if there are any errors
4577 * and 1 is returned if there are no higher keys in the tree
4578 *
4579 * path->keep_locks should be set to 1 on the search made before
4580 * calling this function.
4581 */
e7a84565 4582int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
33c66f43 4583 struct btrfs_key *key, int level,
3f157a2f 4584 int cache_only, u64 min_trans)
e7a84565 4585{
e7a84565
CM
4586 int slot;
4587 struct extent_buffer *c;
4588
934d375b 4589 WARN_ON(!path->keep_locks);
d397712b 4590 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
4591 if (!path->nodes[level])
4592 return 1;
4593
4594 slot = path->slots[level] + 1;
4595 c = path->nodes[level];
3f157a2f 4596next:
e7a84565 4597 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
4598 int ret;
4599 int orig_lowest;
4600 struct btrfs_key cur_key;
4601 if (level + 1 >= BTRFS_MAX_LEVEL ||
4602 !path->nodes[level + 1])
e7a84565 4603 return 1;
33c66f43
YZ
4604
4605 if (path->locks[level + 1]) {
4606 level++;
4607 continue;
4608 }
4609
4610 slot = btrfs_header_nritems(c) - 1;
4611 if (level == 0)
4612 btrfs_item_key_to_cpu(c, &cur_key, slot);
4613 else
4614 btrfs_node_key_to_cpu(c, &cur_key, slot);
4615
4616 orig_lowest = path->lowest_level;
b3b4aa74 4617 btrfs_release_path(path);
33c66f43
YZ
4618 path->lowest_level = level;
4619 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4620 0, 0);
4621 path->lowest_level = orig_lowest;
4622 if (ret < 0)
4623 return ret;
4624
4625 c = path->nodes[level];
4626 slot = path->slots[level];
4627 if (ret == 0)
4628 slot++;
4629 goto next;
e7a84565 4630 }
33c66f43 4631
e7a84565
CM
4632 if (level == 0)
4633 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
4634 else {
4635 u64 blockptr = btrfs_node_blockptr(c, slot);
4636 u64 gen = btrfs_node_ptr_generation(c, slot);
4637
4638 if (cache_only) {
4639 struct extent_buffer *cur;
4640 cur = btrfs_find_tree_block(root, blockptr,
4641 btrfs_level_size(root, level - 1));
b9fab919
CM
4642 if (!cur ||
4643 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
3f157a2f
CM
4644 slot++;
4645 if (cur)
4646 free_extent_buffer(cur);
4647 goto next;
4648 }
4649 free_extent_buffer(cur);
4650 }
4651 if (gen < min_trans) {
4652 slot++;
4653 goto next;
4654 }
e7a84565 4655 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 4656 }
e7a84565
CM
4657 return 0;
4658 }
4659 return 1;
4660}
4661
97571fd0 4662/*
925baedd 4663 * search the tree again to find a leaf with greater keys
0f70abe2
CM
4664 * returns 0 if it found something or 1 if there are no greater leaves.
4665 * returns < 0 on io errors.
97571fd0 4666 */
234b63a0 4667int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
4668{
4669 int slot;
8e73f275 4670 int level;
5f39d397 4671 struct extent_buffer *c;
8e73f275 4672 struct extent_buffer *next;
925baedd
CM
4673 struct btrfs_key key;
4674 u32 nritems;
4675 int ret;
8e73f275 4676 int old_spinning = path->leave_spinning;
bd681513 4677 int next_rw_lock = 0;
925baedd
CM
4678
4679 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 4680 if (nritems == 0)
925baedd 4681 return 1;
925baedd 4682
8e73f275
CM
4683 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4684again:
4685 level = 1;
4686 next = NULL;
bd681513 4687 next_rw_lock = 0;
b3b4aa74 4688 btrfs_release_path(path);
8e73f275 4689
a2135011 4690 path->keep_locks = 1;
31533fb2 4691 path->leave_spinning = 1;
8e73f275 4692
925baedd
CM
4693 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4694 path->keep_locks = 0;
4695
4696 if (ret < 0)
4697 return ret;
4698
a2135011 4699 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
4700 /*
4701 * by releasing the path above we dropped all our locks. A balance
4702 * could have added more items next to the key that used to be
4703 * at the very end of the block. So, check again here and
4704 * advance the path if there are now more items available.
4705 */
a2135011 4706 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
4707 if (ret == 0)
4708 path->slots[0]++;
8e73f275 4709 ret = 0;
925baedd
CM
4710 goto done;
4711 }
d97e63b6 4712
d397712b 4713 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
4714 if (!path->nodes[level]) {
4715 ret = 1;
4716 goto done;
4717 }
5f39d397 4718
d97e63b6
CM
4719 slot = path->slots[level] + 1;
4720 c = path->nodes[level];
5f39d397 4721 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 4722 level++;
8e73f275
CM
4723 if (level == BTRFS_MAX_LEVEL) {
4724 ret = 1;
4725 goto done;
4726 }
d97e63b6
CM
4727 continue;
4728 }
5f39d397 4729
925baedd 4730 if (next) {
bd681513 4731 btrfs_tree_unlock_rw(next, next_rw_lock);
5f39d397 4732 free_extent_buffer(next);
925baedd 4733 }
5f39d397 4734
8e73f275 4735 next = c;
bd681513 4736 next_rw_lock = path->locks[level];
8e73f275
CM
4737 ret = read_block_for_search(NULL, root, path, &next, level,
4738 slot, &key);
4739 if (ret == -EAGAIN)
4740 goto again;
5f39d397 4741
76a05b35 4742 if (ret < 0) {
b3b4aa74 4743 btrfs_release_path(path);
76a05b35
CM
4744 goto done;
4745 }
4746
5cd57b2c 4747 if (!path->skip_locking) {
bd681513 4748 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
4749 if (!ret) {
4750 btrfs_set_path_blocking(path);
bd681513 4751 btrfs_tree_read_lock(next);
31533fb2 4752 btrfs_clear_path_blocking(path, next,
bd681513 4753 BTRFS_READ_LOCK);
8e73f275 4754 }
31533fb2 4755 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 4756 }
d97e63b6
CM
4757 break;
4758 }
4759 path->slots[level] = slot;
d397712b 4760 while (1) {
d97e63b6
CM
4761 level--;
4762 c = path->nodes[level];
925baedd 4763 if (path->locks[level])
bd681513 4764 btrfs_tree_unlock_rw(c, path->locks[level]);
8e73f275 4765
5f39d397 4766 free_extent_buffer(c);
d97e63b6
CM
4767 path->nodes[level] = next;
4768 path->slots[level] = 0;
a74a4b97 4769 if (!path->skip_locking)
bd681513 4770 path->locks[level] = next_rw_lock;
d97e63b6
CM
4771 if (!level)
4772 break;
b4ce94de 4773
8e73f275
CM
4774 ret = read_block_for_search(NULL, root, path, &next, level,
4775 0, &key);
4776 if (ret == -EAGAIN)
4777 goto again;
4778
76a05b35 4779 if (ret < 0) {
b3b4aa74 4780 btrfs_release_path(path);
76a05b35
CM
4781 goto done;
4782 }
4783
5cd57b2c 4784 if (!path->skip_locking) {
bd681513 4785 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
4786 if (!ret) {
4787 btrfs_set_path_blocking(path);
bd681513 4788 btrfs_tree_read_lock(next);
31533fb2 4789 btrfs_clear_path_blocking(path, next,
bd681513
CM
4790 BTRFS_READ_LOCK);
4791 }
31533fb2 4792 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 4793 }
d97e63b6 4794 }
8e73f275 4795 ret = 0;
925baedd 4796done:
f7c79f30 4797 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
4798 path->leave_spinning = old_spinning;
4799 if (!old_spinning)
4800 btrfs_set_path_blocking(path);
4801
4802 return ret;
d97e63b6 4803}
0b86a832 4804
3f157a2f
CM
4805/*
4806 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4807 * searching until it gets past min_objectid or finds an item of 'type'
4808 *
4809 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4810 */
0b86a832
CM
4811int btrfs_previous_item(struct btrfs_root *root,
4812 struct btrfs_path *path, u64 min_objectid,
4813 int type)
4814{
4815 struct btrfs_key found_key;
4816 struct extent_buffer *leaf;
e02119d5 4817 u32 nritems;
0b86a832
CM
4818 int ret;
4819
d397712b 4820 while (1) {
0b86a832 4821 if (path->slots[0] == 0) {
b4ce94de 4822 btrfs_set_path_blocking(path);
0b86a832
CM
4823 ret = btrfs_prev_leaf(root, path);
4824 if (ret != 0)
4825 return ret;
4826 } else {
4827 path->slots[0]--;
4828 }
4829 leaf = path->nodes[0];
e02119d5
CM
4830 nritems = btrfs_header_nritems(leaf);
4831 if (nritems == 0)
4832 return 1;
4833 if (path->slots[0] == nritems)
4834 path->slots[0]--;
4835
0b86a832 4836 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
4837 if (found_key.objectid < min_objectid)
4838 break;
0a4eefbb
YZ
4839 if (found_key.type == type)
4840 return 0;
e02119d5
CM
4841 if (found_key.objectid == min_objectid &&
4842 found_key.type < type)
4843 break;
0b86a832
CM
4844 }
4845 return 1;
4846}