]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/ctree.c
Btrfs: use btrfs_read_lock_root_node in get_old_root
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
bd989ba3 21#include <linux/rbtree.h>
eb60ceac
CM
22#include "ctree.h"
23#include "disk-io.h"
7f5c1516 24#include "transaction.h"
5f39d397 25#include "print-tree.h"
925baedd 26#include "locking.h"
9a8dd150 27
e089f05c
CM
28static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 31 *root, struct btrfs_key *ins_key,
cc0c5538 32 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
33static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 35 struct extent_buffer *src, int empty);
5f39d397
CM
36static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
143bede5 40static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
41 struct btrfs_path *path, int level, int slot,
42 int tree_mod_log);
f230475e
JS
43static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
47 u64 time_seq);
48struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
50 u64 time_seq);
d97e63b6 51
df24a2b9 52struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 53{
df24a2b9 54 struct btrfs_path *path;
e00f7308 55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 56 return path;
2c90e5d6
CM
57}
58
b4ce94de
CM
59/*
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
62 */
63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64{
65 int i;
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
bd681513
CM
67 if (!p->nodes[i] || !p->locks[i])
68 continue;
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
b4ce94de
CM
74 }
75}
76
77/*
78 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
79 *
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
83 * for held
b4ce94de 84 */
4008c04a 85noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
bd681513 86 struct extent_buffer *held, int held_rw)
b4ce94de
CM
87{
88 int i;
4008c04a
CM
89
90#ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
95 * the path blocking.
96 */
bd681513
CM
97 if (held) {
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 }
4008c04a
CM
104 btrfs_set_path_blocking(p);
105#endif
106
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
bd681513
CM
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
114 }
b4ce94de 115 }
4008c04a
CM
116
117#ifdef CONFIG_DEBUG_LOCK_ALLOC
118 if (held)
bd681513 119 btrfs_clear_lock_blocking_rw(held, held_rw);
4008c04a 120#endif
b4ce94de
CM
121}
122
d352ac68 123/* this also releases the path */
df24a2b9 124void btrfs_free_path(struct btrfs_path *p)
be0e5c09 125{
ff175d57
JJ
126 if (!p)
127 return;
b3b4aa74 128 btrfs_release_path(p);
df24a2b9 129 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
130}
131
d352ac68
CM
132/*
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
135 *
136 * It is safe to call this on paths that no locks or extent buffers held.
137 */
b3b4aa74 138noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
139{
140 int i;
a2135011 141
234b63a0 142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 143 p->slots[i] = 0;
eb60ceac 144 if (!p->nodes[i])
925baedd
CM
145 continue;
146 if (p->locks[i]) {
bd681513 147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
148 p->locks[i] = 0;
149 }
5f39d397 150 free_extent_buffer(p->nodes[i]);
3f157a2f 151 p->nodes[i] = NULL;
eb60ceac
CM
152 }
153}
154
d352ac68
CM
155/*
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
159 * looping required.
160 *
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
164 */
925baedd
CM
165struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166{
167 struct extent_buffer *eb;
240f62c8 168
3083ee2e
JB
169 while (1) {
170 rcu_read_lock();
171 eb = rcu_dereference(root->node);
172
173 /*
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
178 */
179 if (atomic_inc_not_zero(&eb->refs)) {
180 rcu_read_unlock();
181 break;
182 }
183 rcu_read_unlock();
184 synchronize_rcu();
185 }
925baedd
CM
186 return eb;
187}
188
d352ac68
CM
189/* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
192 */
925baedd
CM
193struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194{
195 struct extent_buffer *eb;
196
d397712b 197 while (1) {
925baedd
CM
198 eb = btrfs_root_node(root);
199 btrfs_tree_lock(eb);
240f62c8 200 if (eb == root->node)
925baedd 201 break;
925baedd
CM
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
204 }
205 return eb;
206}
207
bd681513
CM
208/* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
211 */
212struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213{
214 struct extent_buffer *eb;
215
216 while (1) {
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
220 break;
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
223 }
224 return eb;
225}
226
d352ac68
CM
227/* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
230 */
0b86a832
CM
231static void add_root_to_dirty_list(struct btrfs_root *root)
232{
e5846fc6 233 spin_lock(&root->fs_info->trans_lock);
0b86a832
CM
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
237 }
e5846fc6 238 spin_unlock(&root->fs_info->trans_lock);
0b86a832
CM
239}
240
d352ac68
CM
241/*
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
245 */
be20aa9d
CM
246int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
250{
251 struct extent_buffer *cow;
be20aa9d
CM
252 int ret = 0;
253 int level;
5d4f98a2 254 struct btrfs_disk_key disk_key;
be20aa9d
CM
255
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259
260 level = btrfs_header_level(buf);
5d4f98a2
YZ
261 if (level == 0)
262 btrfs_item_key(buf, &disk_key, 0);
263 else
264 btrfs_node_key(buf, &disk_key, 0);
31840ae1 265
5d4f98a2
YZ
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
5581a51a 268 buf->start, 0);
5d4f98a2 269 if (IS_ERR(cow))
be20aa9d
CM
270 return PTR_ERR(cow);
271
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 else
281 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 282
2b82032c
YZ
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
285 BTRFS_FSID_SIZE);
286
be20aa9d 287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 290 else
66d7e7f0 291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
4aec2b52 292
be20aa9d
CM
293 if (ret)
294 return ret;
295
296 btrfs_mark_buffer_dirty(cow);
297 *cow_ret = cow;
298 return 0;
299}
300
bd989ba3
JS
301enum mod_log_op {
302 MOD_LOG_KEY_REPLACE,
303 MOD_LOG_KEY_ADD,
304 MOD_LOG_KEY_REMOVE,
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_MOVE_KEYS,
308 MOD_LOG_ROOT_REPLACE,
309};
310
311struct tree_mod_move {
312 int dst_slot;
313 int nr_items;
314};
315
316struct tree_mod_root {
317 u64 logical;
318 u8 level;
319};
320
321struct tree_mod_elem {
322 struct rb_node node;
323 u64 index; /* shifted logical */
324 struct seq_list elem;
325 enum mod_log_op op;
326
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 int slot;
329
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 u64 generation;
332
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
335 u64 blockptr;
336
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
339
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
342};
343
344static inline void
345__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346{
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349}
350
351void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 elem->flags = 1;
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
358}
359
360void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
362{
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
370
371 if (!seq_putting)
372 return;
373
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
377
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
381 /*
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
384 */
385 goto out;
386 }
387 min_seq = cur_elem->seq;
388 }
389 }
390
391 /*
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
394 */
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
401 continue;
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
404 kfree(tm);
405 }
406 write_unlock(&fs_info->tree_mod_log_lock);
407out:
408 spin_unlock(&fs_info->tree_mod_seq_lock);
409}
410
411/*
412 * key order of the log:
413 * index -> sequence
414 *
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
417 * operations.
418 */
419static noinline int
420__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421{
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
426 int ret = 0;
427
428 BUG_ON(!tm || !tm->elem.seq);
429
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
433 while (*new) {
434 cur = container_of(*new, struct tree_mod_elem, node);
435 parent = *new;
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
444 else {
445 kfree(tm);
446 ret = -EEXIST;
447 goto unlock;
448 }
449 }
450
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
453unlock:
454 write_unlock(&fs_info->tree_mod_log_lock);
455 return ret;
456}
457
e9b7fd4d
JS
458static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
459 struct extent_buffer *eb) {
460 smp_mb();
461 if (list_empty(&(fs_info)->tree_mod_seq_list))
462 return 1;
463 if (!eb)
464 return 0;
465 if (btrfs_header_level(eb) == 0)
466 return 1;
467 return 0;
468}
469
926dd8a6
JS
470static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
471 struct tree_mod_elem **tm_ret)
bd989ba3
JS
472{
473 struct tree_mod_elem *tm;
926dd8a6 474 int seq;
bd989ba3 475
e9b7fd4d 476 if (tree_mod_dont_log(fs_info, NULL))
bd989ba3
JS
477 return 0;
478
479 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
480 if (!tm)
481 return -ENOMEM;
482
bd989ba3 483 tm->elem.flags = 0;
926dd8a6
JS
484 spin_lock(&fs_info->tree_mod_seq_lock);
485 if (list_empty(&fs_info->tree_mod_seq_list)) {
486 /*
487 * someone emptied the list while we were waiting for the lock.
488 * we must not add to the list, because no blocker exists. items
489 * are removed from the list only when the existing blocker is
490 * removed from the list.
491 */
492 kfree(tm);
493 seq = 0;
494 } else {
495 __get_tree_mod_seq(fs_info, &tm->elem);
496 seq = tm->elem.seq;
497 }
498 spin_unlock(&fs_info->tree_mod_seq_lock);
bd989ba3
JS
499
500 return seq;
501}
502
503static noinline int
504tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb, int slot,
506 enum mod_log_op op, gfp_t flags)
507{
508 struct tree_mod_elem *tm;
509 int ret;
510
511 ret = tree_mod_alloc(fs_info, flags, &tm);
512 if (ret <= 0)
513 return ret;
514
515 tm->index = eb->start >> PAGE_CACHE_SHIFT;
516 if (op != MOD_LOG_KEY_ADD) {
517 btrfs_node_key(eb, &tm->key, slot);
518 tm->blockptr = btrfs_node_blockptr(eb, slot);
519 }
520 tm->op = op;
521 tm->slot = slot;
522 tm->generation = btrfs_node_ptr_generation(eb, slot);
523
524 return __tree_mod_log_insert(fs_info, tm);
525}
526
527static noinline int
528tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
529 int slot, enum mod_log_op op)
530{
531 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
532}
533
534static noinline int
535tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
536 struct extent_buffer *eb, int dst_slot, int src_slot,
537 int nr_items, gfp_t flags)
538{
539 struct tree_mod_elem *tm;
540 int ret;
541 int i;
542
f395694c
JS
543 if (tree_mod_dont_log(fs_info, eb))
544 return 0;
bd989ba3
JS
545
546 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
547 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
548 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
549 BUG_ON(ret < 0);
550 }
551
f395694c
JS
552 ret = tree_mod_alloc(fs_info, flags, &tm);
553 if (ret <= 0)
554 return ret;
555
bd989ba3
JS
556 tm->index = eb->start >> PAGE_CACHE_SHIFT;
557 tm->slot = src_slot;
558 tm->move.dst_slot = dst_slot;
559 tm->move.nr_items = nr_items;
560 tm->op = MOD_LOG_MOVE_KEYS;
561
562 return __tree_mod_log_insert(fs_info, tm);
563}
564
565static noinline int
566tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
567 struct extent_buffer *old_root,
568 struct extent_buffer *new_root, gfp_t flags)
569{
570 struct tree_mod_elem *tm;
571 int ret;
572
573 ret = tree_mod_alloc(fs_info, flags, &tm);
574 if (ret <= 0)
575 return ret;
576
577 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
578 tm->old_root.logical = old_root->start;
579 tm->old_root.level = btrfs_header_level(old_root);
580 tm->generation = btrfs_header_generation(old_root);
581 tm->op = MOD_LOG_ROOT_REPLACE;
582
583 return __tree_mod_log_insert(fs_info, tm);
584}
585
586static struct tree_mod_elem *
587__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
588 int smallest)
589{
590 struct rb_root *tm_root;
591 struct rb_node *node;
592 struct tree_mod_elem *cur = NULL;
593 struct tree_mod_elem *found = NULL;
594 u64 index = start >> PAGE_CACHE_SHIFT;
595
596 read_lock(&fs_info->tree_mod_log_lock);
597 tm_root = &fs_info->tree_mod_log;
598 node = tm_root->rb_node;
599 while (node) {
600 cur = container_of(node, struct tree_mod_elem, node);
601 if (cur->index < index) {
602 node = node->rb_left;
603 } else if (cur->index > index) {
604 node = node->rb_right;
605 } else if (cur->elem.seq < min_seq) {
606 node = node->rb_left;
607 } else if (!smallest) {
608 /* we want the node with the highest seq */
609 if (found)
610 BUG_ON(found->elem.seq > cur->elem.seq);
611 found = cur;
612 node = node->rb_left;
613 } else if (cur->elem.seq > min_seq) {
614 /* we want the node with the smallest seq */
615 if (found)
616 BUG_ON(found->elem.seq < cur->elem.seq);
617 found = cur;
618 node = node->rb_right;
619 } else {
620 found = cur;
621 break;
622 }
623 }
624 read_unlock(&fs_info->tree_mod_log_lock);
625
626 return found;
627}
628
629/*
630 * this returns the element from the log with the smallest time sequence
631 * value that's in the log (the oldest log item). any element with a time
632 * sequence lower than min_seq will be ignored.
633 */
634static struct tree_mod_elem *
635tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
636 u64 min_seq)
637{
638 return __tree_mod_log_search(fs_info, start, min_seq, 1);
639}
640
641/*
642 * this returns the element from the log with the largest time sequence
643 * value that's in the log (the most recent log item). any element with
644 * a time sequence lower than min_seq will be ignored.
645 */
646static struct tree_mod_elem *
647tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
648{
649 return __tree_mod_log_search(fs_info, start, min_seq, 0);
650}
651
652static inline void
653tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
654 struct extent_buffer *src, unsigned long dst_offset,
655 unsigned long src_offset, int nr_items)
656{
657 int ret;
658 int i;
659
e9b7fd4d 660 if (tree_mod_dont_log(fs_info, NULL))
bd989ba3
JS
661 return;
662
663 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
664 return;
665
666 /* speed this up by single seq for all operations? */
667 for (i = 0; i < nr_items; i++) {
668 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
669 MOD_LOG_KEY_REMOVE);
670 BUG_ON(ret < 0);
671 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
672 MOD_LOG_KEY_ADD);
673 BUG_ON(ret < 0);
674 }
675}
676
677static inline void
678tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
679 int dst_offset, int src_offset, int nr_items)
680{
681 int ret;
682 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
683 nr_items, GFP_NOFS);
684 BUG_ON(ret < 0);
685}
686
687static inline void
688tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
689 struct extent_buffer *eb,
690 struct btrfs_disk_key *disk_key, int slot, int atomic)
691{
692 int ret;
693
694 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
695 MOD_LOG_KEY_REPLACE,
696 atomic ? GFP_ATOMIC : GFP_NOFS);
697 BUG_ON(ret < 0);
698}
699
700static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
701 struct extent_buffer *eb)
702{
703 int i;
704 int ret;
705 u32 nritems;
706
e9b7fd4d 707 if (tree_mod_dont_log(fs_info, eb))
bd989ba3
JS
708 return;
709
710 nritems = btrfs_header_nritems(eb);
711 for (i = nritems - 1; i >= 0; i--) {
712 ret = tree_mod_log_insert_key(fs_info, eb, i,
713 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
714 BUG_ON(ret < 0);
715 }
716}
717
718static inline void
719tree_mod_log_set_root_pointer(struct btrfs_root *root,
720 struct extent_buffer *new_root_node)
721{
722 int ret;
723 tree_mod_log_free_eb(root->fs_info, root->node);
724 ret = tree_mod_log_insert_root(root->fs_info, root->node,
725 new_root_node, GFP_NOFS);
726 BUG_ON(ret < 0);
727}
728
5d4f98a2
YZ
729/*
730 * check if the tree block can be shared by multiple trees
731 */
732int btrfs_block_can_be_shared(struct btrfs_root *root,
733 struct extent_buffer *buf)
734{
735 /*
736 * Tree blocks not in refernece counted trees and tree roots
737 * are never shared. If a block was allocated after the last
738 * snapshot and the block was not allocated by tree relocation,
739 * we know the block is not shared.
740 */
741 if (root->ref_cows &&
742 buf != root->node && buf != root->commit_root &&
743 (btrfs_header_generation(buf) <=
744 btrfs_root_last_snapshot(&root->root_item) ||
745 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
746 return 1;
747#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
748 if (root->ref_cows &&
749 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
750 return 1;
751#endif
752 return 0;
753}
754
755static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
756 struct btrfs_root *root,
757 struct extent_buffer *buf,
f0486c68
YZ
758 struct extent_buffer *cow,
759 int *last_ref)
5d4f98a2
YZ
760{
761 u64 refs;
762 u64 owner;
763 u64 flags;
764 u64 new_flags = 0;
765 int ret;
766
767 /*
768 * Backrefs update rules:
769 *
770 * Always use full backrefs for extent pointers in tree block
771 * allocated by tree relocation.
772 *
773 * If a shared tree block is no longer referenced by its owner
774 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
775 * use full backrefs for extent pointers in tree block.
776 *
777 * If a tree block is been relocating
778 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
779 * use full backrefs for extent pointers in tree block.
780 * The reason for this is some operations (such as drop tree)
781 * are only allowed for blocks use full backrefs.
782 */
783
784 if (btrfs_block_can_be_shared(root, buf)) {
785 ret = btrfs_lookup_extent_info(trans, root, buf->start,
786 buf->len, &refs, &flags);
be1a5564
MF
787 if (ret)
788 return ret;
e5df9573
MF
789 if (refs == 0) {
790 ret = -EROFS;
791 btrfs_std_error(root->fs_info, ret);
792 return ret;
793 }
5d4f98a2
YZ
794 } else {
795 refs = 1;
796 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
797 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
798 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
799 else
800 flags = 0;
801 }
802
803 owner = btrfs_header_owner(buf);
804 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
805 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
806
807 if (refs > 1) {
808 if ((owner == root->root_key.objectid ||
809 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
810 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
66d7e7f0 811 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
79787eaa 812 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
813
814 if (root->root_key.objectid ==
815 BTRFS_TREE_RELOC_OBJECTID) {
66d7e7f0 816 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
79787eaa 817 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 818 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
79787eaa 819 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
820 }
821 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
822 } else {
823
824 if (root->root_key.objectid ==
825 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 826 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 827 else
66d7e7f0 828 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 829 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
830 }
831 if (new_flags != 0) {
832 ret = btrfs_set_disk_extent_flags(trans, root,
833 buf->start,
834 buf->len,
835 new_flags, 0);
be1a5564
MF
836 if (ret)
837 return ret;
5d4f98a2
YZ
838 }
839 } else {
840 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
841 if (root->root_key.objectid ==
842 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 843 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 844 else
66d7e7f0 845 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 846 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 847 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
79787eaa 848 BUG_ON(ret); /* -ENOMEM */
5d4f98a2 849 }
f230475e
JS
850 /*
851 * don't log freeing in case we're freeing the root node, this
852 * is done by tree_mod_log_set_root_pointer later
853 */
854 if (buf != root->node && btrfs_header_level(buf) != 0)
855 tree_mod_log_free_eb(root->fs_info, buf);
5d4f98a2 856 clean_tree_block(trans, root, buf);
f0486c68 857 *last_ref = 1;
5d4f98a2
YZ
858 }
859 return 0;
860}
861
d352ac68 862/*
d397712b
CM
863 * does the dirty work in cow of a single block. The parent block (if
864 * supplied) is updated to point to the new cow copy. The new buffer is marked
865 * dirty and returned locked. If you modify the block it needs to be marked
866 * dirty again.
d352ac68
CM
867 *
868 * search_start -- an allocation hint for the new block
869 *
d397712b
CM
870 * empty_size -- a hint that you plan on doing more cow. This is the size in
871 * bytes the allocator should try to find free next to the block it returns.
872 * This is just a hint and may be ignored by the allocator.
d352ac68 873 */
d397712b 874static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
875 struct btrfs_root *root,
876 struct extent_buffer *buf,
877 struct extent_buffer *parent, int parent_slot,
878 struct extent_buffer **cow_ret,
9fa8cfe7 879 u64 search_start, u64 empty_size)
02217ed2 880{
5d4f98a2 881 struct btrfs_disk_key disk_key;
5f39d397 882 struct extent_buffer *cow;
be1a5564 883 int level, ret;
f0486c68 884 int last_ref = 0;
925baedd 885 int unlock_orig = 0;
5d4f98a2 886 u64 parent_start;
7bb86316 887
925baedd
CM
888 if (*cow_ret == buf)
889 unlock_orig = 1;
890
b9447ef8 891 btrfs_assert_tree_locked(buf);
925baedd 892
7bb86316
CM
893 WARN_ON(root->ref_cows && trans->transid !=
894 root->fs_info->running_transaction->transid);
6702ed49 895 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 896
7bb86316 897 level = btrfs_header_level(buf);
31840ae1 898
5d4f98a2
YZ
899 if (level == 0)
900 btrfs_item_key(buf, &disk_key, 0);
901 else
902 btrfs_node_key(buf, &disk_key, 0);
903
904 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
905 if (parent)
906 parent_start = parent->start;
907 else
908 parent_start = 0;
909 } else
910 parent_start = 0;
911
912 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
913 root->root_key.objectid, &disk_key,
5581a51a 914 level, search_start, empty_size);
54aa1f4d
CM
915 if (IS_ERR(cow))
916 return PTR_ERR(cow);
6702ed49 917
b4ce94de
CM
918 /* cow is set to blocking by btrfs_init_new_buffer */
919
5f39d397 920 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 921 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 922 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
923 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
924 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
925 BTRFS_HEADER_FLAG_RELOC);
926 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
927 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
928 else
929 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 930
2b82032c
YZ
931 write_extent_buffer(cow, root->fs_info->fsid,
932 (unsigned long)btrfs_header_fsid(cow),
933 BTRFS_FSID_SIZE);
934
be1a5564 935 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 936 if (ret) {
79787eaa 937 btrfs_abort_transaction(trans, root, ret);
b68dc2a9
MF
938 return ret;
939 }
1a40e23b 940
3fd0a558
YZ
941 if (root->ref_cows)
942 btrfs_reloc_cow_block(trans, root, buf, cow);
943
02217ed2 944 if (buf == root->node) {
925baedd 945 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
946 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
947 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
948 parent_start = buf->start;
949 else
950 parent_start = 0;
925baedd 951
5f39d397 952 extent_buffer_get(cow);
f230475e 953 tree_mod_log_set_root_pointer(root, cow);
240f62c8 954 rcu_assign_pointer(root->node, cow);
925baedd 955
f0486c68 956 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 957 last_ref);
5f39d397 958 free_extent_buffer(buf);
0b86a832 959 add_root_to_dirty_list(root);
02217ed2 960 } else {
5d4f98a2
YZ
961 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
962 parent_start = parent->start;
963 else
964 parent_start = 0;
965
966 WARN_ON(trans->transid != btrfs_header_generation(parent));
f230475e
JS
967 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
968 MOD_LOG_KEY_REPLACE);
5f39d397 969 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 970 cow->start);
74493f7a
CM
971 btrfs_set_node_ptr_generation(parent, parent_slot,
972 trans->transid);
d6025579 973 btrfs_mark_buffer_dirty(parent);
f0486c68 974 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 975 last_ref);
02217ed2 976 }
925baedd
CM
977 if (unlock_orig)
978 btrfs_tree_unlock(buf);
3083ee2e 979 free_extent_buffer_stale(buf);
ccd467d6 980 btrfs_mark_buffer_dirty(cow);
2c90e5d6 981 *cow_ret = cow;
02217ed2
CM
982 return 0;
983}
984
5d9e75c4
JS
985/*
986 * returns the logical address of the oldest predecessor of the given root.
987 * entries older than time_seq are ignored.
988 */
989static struct tree_mod_elem *
990__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
991 struct btrfs_root *root, u64 time_seq)
992{
993 struct tree_mod_elem *tm;
994 struct tree_mod_elem *found = NULL;
995 u64 root_logical = root->node->start;
996 int looped = 0;
997
998 if (!time_seq)
999 return 0;
1000
1001 /*
1002 * the very last operation that's logged for a root is the replacement
1003 * operation (if it is replaced at all). this has the index of the *new*
1004 * root, making it the very first operation that's logged for this root.
1005 */
1006 while (1) {
1007 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1008 time_seq);
1009 if (!looped && !tm)
1010 return 0;
1011 /*
1012 * we must have key remove operations in the log before the
1013 * replace operation.
1014 */
1015 BUG_ON(!tm);
1016
1017 if (tm->op != MOD_LOG_ROOT_REPLACE)
1018 break;
1019
1020 found = tm;
1021 root_logical = tm->old_root.logical;
1022 BUG_ON(root_logical == root->node->start);
1023 looped = 1;
1024 }
1025
1026 return found;
1027}
1028
1029/*
1030 * tm is a pointer to the first operation to rewind within eb. then, all
1031 * previous operations will be rewinded (until we reach something older than
1032 * time_seq).
1033 */
1034static void
1035__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1036 struct tree_mod_elem *first_tm)
1037{
1038 u32 n;
1039 struct rb_node *next;
1040 struct tree_mod_elem *tm = first_tm;
1041 unsigned long o_dst;
1042 unsigned long o_src;
1043 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1044
1045 n = btrfs_header_nritems(eb);
1046 while (tm && tm->elem.seq >= time_seq) {
1047 /*
1048 * all the operations are recorded with the operator used for
1049 * the modification. as we're going backwards, we do the
1050 * opposite of each operation here.
1051 */
1052 switch (tm->op) {
1053 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1054 BUG_ON(tm->slot < n);
1055 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1056 case MOD_LOG_KEY_REMOVE:
1057 btrfs_set_node_key(eb, &tm->key, tm->slot);
1058 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1059 btrfs_set_node_ptr_generation(eb, tm->slot,
1060 tm->generation);
1061 n++;
1062 break;
1063 case MOD_LOG_KEY_REPLACE:
1064 BUG_ON(tm->slot >= n);
1065 btrfs_set_node_key(eb, &tm->key, tm->slot);
1066 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1067 btrfs_set_node_ptr_generation(eb, tm->slot,
1068 tm->generation);
1069 break;
1070 case MOD_LOG_KEY_ADD:
1071 if (tm->slot != n - 1) {
1072 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1073 o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1074 memmove_extent_buffer(eb, o_dst, o_src, p_size);
1075 }
1076 n--;
1077 break;
1078 case MOD_LOG_MOVE_KEYS:
c3193108
JS
1079 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1080 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1081 memmove_extent_buffer(eb, o_dst, o_src,
5d9e75c4
JS
1082 tm->move.nr_items * p_size);
1083 break;
1084 case MOD_LOG_ROOT_REPLACE:
1085 /*
1086 * this operation is special. for roots, this must be
1087 * handled explicitly before rewinding.
1088 * for non-roots, this operation may exist if the node
1089 * was a root: root A -> child B; then A gets empty and
1090 * B is promoted to the new root. in the mod log, we'll
1091 * have a root-replace operation for B, a tree block
1092 * that is no root. we simply ignore that operation.
1093 */
1094 break;
1095 }
1096 next = rb_next(&tm->node);
1097 if (!next)
1098 break;
1099 tm = container_of(next, struct tree_mod_elem, node);
1100 if (tm->index != first_tm->index)
1101 break;
1102 }
1103 btrfs_set_header_nritems(eb, n);
1104}
1105
1106static struct extent_buffer *
1107tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1108 u64 time_seq)
1109{
1110 struct extent_buffer *eb_rewin;
1111 struct tree_mod_elem *tm;
1112
1113 if (!time_seq)
1114 return eb;
1115
1116 if (btrfs_header_level(eb) == 0)
1117 return eb;
1118
1119 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1120 if (!tm)
1121 return eb;
1122
1123 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1124 BUG_ON(tm->slot != 0);
1125 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1126 fs_info->tree_root->nodesize);
1127 BUG_ON(!eb_rewin);
1128 btrfs_set_header_bytenr(eb_rewin, eb->start);
1129 btrfs_set_header_backref_rev(eb_rewin,
1130 btrfs_header_backref_rev(eb));
1131 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
c3193108 1132 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
5d9e75c4
JS
1133 } else {
1134 eb_rewin = btrfs_clone_extent_buffer(eb);
1135 BUG_ON(!eb_rewin);
1136 }
1137
1138 extent_buffer_get(eb_rewin);
1139 free_extent_buffer(eb);
1140
1141 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1142
1143 return eb_rewin;
1144}
1145
8ba97a15
JS
1146/*
1147 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1148 * value. If there are no changes, the current root->root_node is returned. If
1149 * anything changed in between, there's a fresh buffer allocated on which the
1150 * rewind operations are done. In any case, the returned buffer is read locked.
1151 * Returns NULL on error (with no locks held).
1152 */
5d9e75c4
JS
1153static inline struct extent_buffer *
1154get_old_root(struct btrfs_root *root, u64 time_seq)
1155{
1156 struct tree_mod_elem *tm;
1157 struct extent_buffer *eb;
1158 struct tree_mod_root *old_root;
1159 u64 old_generation;
1160
8ba97a15 1161 eb = btrfs_read_lock_root_node(root);
5d9e75c4
JS
1162 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1163 if (!tm)
1164 return root->node;
1165
1166 old_root = &tm->old_root;
1167 old_generation = tm->generation;
1168
1169 tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
1170 /*
1171 * there was an item in the log when __tree_mod_log_oldest_root
1172 * returned. this one must not go away, because the time_seq passed to
1173 * us must be blocking its removal.
1174 */
1175 BUG_ON(!tm);
1176
1177 if (old_root->logical == root->node->start) {
1178 /* there are logged operations for the current root */
1179 eb = btrfs_clone_extent_buffer(root->node);
1180 } else {
1181 /* there's a root replace operation for the current root */
1182 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
1183 root->nodesize);
8ba97a15
JS
1184 }
1185 btrfs_tree_read_unlock(root->node);
1186 free_extent_buffer(root->node);
1187 if (!eb)
1188 return NULL;
1189 btrfs_tree_read_lock(eb);
1190 if (old_root->logical != root->node->start) {
5d9e75c4
JS
1191 btrfs_set_header_bytenr(eb, eb->start);
1192 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1193 btrfs_set_header_owner(eb, root->root_key.objectid);
1194 }
5d9e75c4
JS
1195 btrfs_set_header_level(eb, old_root->level);
1196 btrfs_set_header_generation(eb, old_generation);
1197 __tree_mod_log_rewind(eb, time_seq, tm);
8ba97a15 1198 extent_buffer_get(eb);
5d9e75c4
JS
1199
1200 return eb;
1201}
1202
5d4f98a2
YZ
1203static inline int should_cow_block(struct btrfs_trans_handle *trans,
1204 struct btrfs_root *root,
1205 struct extent_buffer *buf)
1206{
f1ebcc74
LB
1207 /* ensure we can see the force_cow */
1208 smp_rmb();
1209
1210 /*
1211 * We do not need to cow a block if
1212 * 1) this block is not created or changed in this transaction;
1213 * 2) this block does not belong to TREE_RELOC tree;
1214 * 3) the root is not forced COW.
1215 *
1216 * What is forced COW:
1217 * when we create snapshot during commiting the transaction,
1218 * after we've finished coping src root, we must COW the shared
1219 * block to ensure the metadata consistency.
1220 */
5d4f98a2
YZ
1221 if (btrfs_header_generation(buf) == trans->transid &&
1222 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1223 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74
LB
1224 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1225 !root->force_cow)
5d4f98a2
YZ
1226 return 0;
1227 return 1;
1228}
1229
d352ac68
CM
1230/*
1231 * cows a single block, see __btrfs_cow_block for the real work.
1232 * This version of it has extra checks so that a block isn't cow'd more than
1233 * once per transaction, as long as it hasn't been written yet
1234 */
d397712b 1235noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
1236 struct btrfs_root *root, struct extent_buffer *buf,
1237 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 1238 struct extent_buffer **cow_ret)
6702ed49
CM
1239{
1240 u64 search_start;
f510cfec 1241 int ret;
dc17ff8f 1242
6702ed49 1243 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
1244 printk(KERN_CRIT "trans %llu running %llu\n",
1245 (unsigned long long)trans->transid,
1246 (unsigned long long)
6702ed49
CM
1247 root->fs_info->running_transaction->transid);
1248 WARN_ON(1);
1249 }
1250 if (trans->transid != root->fs_info->generation) {
d397712b
CM
1251 printk(KERN_CRIT "trans %llu running %llu\n",
1252 (unsigned long long)trans->transid,
1253 (unsigned long long)root->fs_info->generation);
6702ed49
CM
1254 WARN_ON(1);
1255 }
dc17ff8f 1256
5d4f98a2 1257 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
1258 *cow_ret = buf;
1259 return 0;
1260 }
c487685d 1261
0b86a832 1262 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
1263
1264 if (parent)
1265 btrfs_set_lock_blocking(parent);
1266 btrfs_set_lock_blocking(buf);
1267
f510cfec 1268 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 1269 parent_slot, cow_ret, search_start, 0);
1abe9b8a 1270
1271 trace_btrfs_cow_block(root, buf, *cow_ret);
1272
f510cfec 1273 return ret;
6702ed49
CM
1274}
1275
d352ac68
CM
1276/*
1277 * helper function for defrag to decide if two blocks pointed to by a
1278 * node are actually close by
1279 */
6b80053d 1280static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 1281{
6b80053d 1282 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 1283 return 1;
6b80053d 1284 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
1285 return 1;
1286 return 0;
1287}
1288
081e9573
CM
1289/*
1290 * compare two keys in a memcmp fashion
1291 */
1292static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1293{
1294 struct btrfs_key k1;
1295
1296 btrfs_disk_key_to_cpu(&k1, disk);
1297
20736aba 1298 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
1299}
1300
f3465ca4
JB
1301/*
1302 * same as comp_keys only with two btrfs_key's
1303 */
5d4f98a2 1304int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
1305{
1306 if (k1->objectid > k2->objectid)
1307 return 1;
1308 if (k1->objectid < k2->objectid)
1309 return -1;
1310 if (k1->type > k2->type)
1311 return 1;
1312 if (k1->type < k2->type)
1313 return -1;
1314 if (k1->offset > k2->offset)
1315 return 1;
1316 if (k1->offset < k2->offset)
1317 return -1;
1318 return 0;
1319}
081e9573 1320
d352ac68
CM
1321/*
1322 * this is used by the defrag code to go through all the
1323 * leaves pointed to by a node and reallocate them so that
1324 * disk order is close to key order
1325 */
6702ed49 1326int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1327 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
1328 int start_slot, int cache_only, u64 *last_ret,
1329 struct btrfs_key *progress)
6702ed49 1330{
6b80053d 1331 struct extent_buffer *cur;
6702ed49 1332 u64 blocknr;
ca7a79ad 1333 u64 gen;
e9d0b13b
CM
1334 u64 search_start = *last_ret;
1335 u64 last_block = 0;
6702ed49
CM
1336 u64 other;
1337 u32 parent_nritems;
6702ed49
CM
1338 int end_slot;
1339 int i;
1340 int err = 0;
f2183bde 1341 int parent_level;
6b80053d
CM
1342 int uptodate;
1343 u32 blocksize;
081e9573
CM
1344 int progress_passed = 0;
1345 struct btrfs_disk_key disk_key;
6702ed49 1346
5708b959
CM
1347 parent_level = btrfs_header_level(parent);
1348 if (cache_only && parent_level != 1)
1349 return 0;
1350
d397712b 1351 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 1352 WARN_ON(1);
d397712b 1353 if (trans->transid != root->fs_info->generation)
6702ed49 1354 WARN_ON(1);
86479a04 1355
6b80053d 1356 parent_nritems = btrfs_header_nritems(parent);
6b80053d 1357 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
1358 end_slot = parent_nritems;
1359
1360 if (parent_nritems == 1)
1361 return 0;
1362
b4ce94de
CM
1363 btrfs_set_lock_blocking(parent);
1364
6702ed49
CM
1365 for (i = start_slot; i < end_slot; i++) {
1366 int close = 1;
a6b6e75e 1367
081e9573
CM
1368 btrfs_node_key(parent, &disk_key, i);
1369 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1370 continue;
1371
1372 progress_passed = 1;
6b80053d 1373 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 1374 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
1375 if (last_block == 0)
1376 last_block = blocknr;
5708b959 1377
6702ed49 1378 if (i > 0) {
6b80053d
CM
1379 other = btrfs_node_blockptr(parent, i - 1);
1380 close = close_blocks(blocknr, other, blocksize);
6702ed49 1381 }
0ef3e66b 1382 if (!close && i < end_slot - 2) {
6b80053d
CM
1383 other = btrfs_node_blockptr(parent, i + 1);
1384 close = close_blocks(blocknr, other, blocksize);
6702ed49 1385 }
e9d0b13b
CM
1386 if (close) {
1387 last_block = blocknr;
6702ed49 1388 continue;
e9d0b13b 1389 }
6702ed49 1390
6b80053d
CM
1391 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1392 if (cur)
b9fab919 1393 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
6b80053d
CM
1394 else
1395 uptodate = 0;
5708b959 1396 if (!cur || !uptodate) {
6702ed49 1397 if (cache_only) {
6b80053d 1398 free_extent_buffer(cur);
6702ed49
CM
1399 continue;
1400 }
6b80053d
CM
1401 if (!cur) {
1402 cur = read_tree_block(root, blocknr,
ca7a79ad 1403 blocksize, gen);
97d9a8a4
TI
1404 if (!cur)
1405 return -EIO;
6b80053d 1406 } else if (!uptodate) {
018642a1
TI
1407 err = btrfs_read_buffer(cur, gen);
1408 if (err) {
1409 free_extent_buffer(cur);
1410 return err;
1411 }
f2183bde 1412 }
6702ed49 1413 }
e9d0b13b 1414 if (search_start == 0)
6b80053d 1415 search_start = last_block;
e9d0b13b 1416
e7a84565 1417 btrfs_tree_lock(cur);
b4ce94de 1418 btrfs_set_lock_blocking(cur);
6b80053d 1419 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 1420 &cur, search_start,
6b80053d 1421 min(16 * blocksize,
9fa8cfe7 1422 (end_slot - i) * blocksize));
252c38f0 1423 if (err) {
e7a84565 1424 btrfs_tree_unlock(cur);
6b80053d 1425 free_extent_buffer(cur);
6702ed49 1426 break;
252c38f0 1427 }
e7a84565
CM
1428 search_start = cur->start;
1429 last_block = cur->start;
f2183bde 1430 *last_ret = search_start;
e7a84565
CM
1431 btrfs_tree_unlock(cur);
1432 free_extent_buffer(cur);
6702ed49
CM
1433 }
1434 return err;
1435}
1436
74123bd7
CM
1437/*
1438 * The leaf data grows from end-to-front in the node.
1439 * this returns the address of the start of the last item,
1440 * which is the stop of the leaf data stack
1441 */
123abc88 1442static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 1443 struct extent_buffer *leaf)
be0e5c09 1444{
5f39d397 1445 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 1446 if (nr == 0)
123abc88 1447 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 1448 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
1449}
1450
aa5d6bed 1451
74123bd7 1452/*
5f39d397
CM
1453 * search for key in the extent_buffer. The items start at offset p,
1454 * and they are item_size apart. There are 'max' items in p.
1455 *
74123bd7
CM
1456 * the slot in the array is returned via slot, and it points to
1457 * the place where you would insert key if it is not found in
1458 * the array.
1459 *
1460 * slot may point to max if the key is bigger than all of the keys
1461 */
e02119d5
CM
1462static noinline int generic_bin_search(struct extent_buffer *eb,
1463 unsigned long p,
1464 int item_size, struct btrfs_key *key,
1465 int max, int *slot)
be0e5c09
CM
1466{
1467 int low = 0;
1468 int high = max;
1469 int mid;
1470 int ret;
479965d6 1471 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
1472 struct btrfs_disk_key unaligned;
1473 unsigned long offset;
5f39d397
CM
1474 char *kaddr = NULL;
1475 unsigned long map_start = 0;
1476 unsigned long map_len = 0;
479965d6 1477 int err;
be0e5c09 1478
d397712b 1479 while (low < high) {
be0e5c09 1480 mid = (low + high) / 2;
5f39d397
CM
1481 offset = p + mid * item_size;
1482
a6591715 1483 if (!kaddr || offset < map_start ||
5f39d397
CM
1484 (offset + sizeof(struct btrfs_disk_key)) >
1485 map_start + map_len) {
934d375b
CM
1486
1487 err = map_private_extent_buffer(eb, offset,
479965d6 1488 sizeof(struct btrfs_disk_key),
a6591715 1489 &kaddr, &map_start, &map_len);
479965d6
CM
1490
1491 if (!err) {
1492 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1493 map_start);
1494 } else {
1495 read_extent_buffer(eb, &unaligned,
1496 offset, sizeof(unaligned));
1497 tmp = &unaligned;
1498 }
5f39d397 1499
5f39d397
CM
1500 } else {
1501 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1502 map_start);
1503 }
be0e5c09
CM
1504 ret = comp_keys(tmp, key);
1505
1506 if (ret < 0)
1507 low = mid + 1;
1508 else if (ret > 0)
1509 high = mid;
1510 else {
1511 *slot = mid;
1512 return 0;
1513 }
1514 }
1515 *slot = low;
1516 return 1;
1517}
1518
97571fd0
CM
1519/*
1520 * simple bin_search frontend that does the right thing for
1521 * leaves vs nodes
1522 */
5f39d397
CM
1523static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1524 int level, int *slot)
be0e5c09 1525{
f775738f 1526 if (level == 0)
5f39d397
CM
1527 return generic_bin_search(eb,
1528 offsetof(struct btrfs_leaf, items),
0783fcfc 1529 sizeof(struct btrfs_item),
5f39d397 1530 key, btrfs_header_nritems(eb),
7518a238 1531 slot);
f775738f 1532 else
5f39d397
CM
1533 return generic_bin_search(eb,
1534 offsetof(struct btrfs_node, ptrs),
123abc88 1535 sizeof(struct btrfs_key_ptr),
5f39d397 1536 key, btrfs_header_nritems(eb),
7518a238 1537 slot);
be0e5c09
CM
1538}
1539
5d4f98a2
YZ
1540int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1541 int level, int *slot)
1542{
1543 return bin_search(eb, key, level, slot);
1544}
1545
f0486c68
YZ
1546static void root_add_used(struct btrfs_root *root, u32 size)
1547{
1548 spin_lock(&root->accounting_lock);
1549 btrfs_set_root_used(&root->root_item,
1550 btrfs_root_used(&root->root_item) + size);
1551 spin_unlock(&root->accounting_lock);
1552}
1553
1554static void root_sub_used(struct btrfs_root *root, u32 size)
1555{
1556 spin_lock(&root->accounting_lock);
1557 btrfs_set_root_used(&root->root_item,
1558 btrfs_root_used(&root->root_item) - size);
1559 spin_unlock(&root->accounting_lock);
1560}
1561
d352ac68
CM
1562/* given a node and slot number, this reads the blocks it points to. The
1563 * extent buffer is returned with a reference taken (but unlocked).
1564 * NULL is returned on error.
1565 */
e02119d5 1566static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 1567 struct extent_buffer *parent, int slot)
bb803951 1568{
ca7a79ad 1569 int level = btrfs_header_level(parent);
bb803951
CM
1570 if (slot < 0)
1571 return NULL;
5f39d397 1572 if (slot >= btrfs_header_nritems(parent))
bb803951 1573 return NULL;
ca7a79ad
CM
1574
1575 BUG_ON(level == 0);
1576
db94535d 1577 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
1578 btrfs_level_size(root, level - 1),
1579 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
1580}
1581
d352ac68
CM
1582/*
1583 * node level balancing, used to make sure nodes are in proper order for
1584 * item deletion. We balance from the top down, so we have to make sure
1585 * that a deletion won't leave an node completely empty later on.
1586 */
e02119d5 1587static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
1588 struct btrfs_root *root,
1589 struct btrfs_path *path, int level)
bb803951 1590{
5f39d397
CM
1591 struct extent_buffer *right = NULL;
1592 struct extent_buffer *mid;
1593 struct extent_buffer *left = NULL;
1594 struct extent_buffer *parent = NULL;
bb803951
CM
1595 int ret = 0;
1596 int wret;
1597 int pslot;
bb803951 1598 int orig_slot = path->slots[level];
79f95c82 1599 u64 orig_ptr;
bb803951
CM
1600
1601 if (level == 0)
1602 return 0;
1603
5f39d397 1604 mid = path->nodes[level];
b4ce94de 1605
bd681513
CM
1606 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1607 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
7bb86316
CM
1608 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1609
1d4f8a0c 1610 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1611
a05a9bb1 1612 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1613 parent = path->nodes[level + 1];
a05a9bb1
LZ
1614 pslot = path->slots[level + 1];
1615 }
bb803951 1616
40689478
CM
1617 /*
1618 * deal with the case where there is only one pointer in the root
1619 * by promoting the node below to a root
1620 */
5f39d397
CM
1621 if (!parent) {
1622 struct extent_buffer *child;
bb803951 1623
5f39d397 1624 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1625 return 0;
1626
1627 /* promote the child to a root */
5f39d397 1628 child = read_node_slot(root, mid, 0);
305a26af
MF
1629 if (!child) {
1630 ret = -EROFS;
1631 btrfs_std_error(root->fs_info, ret);
1632 goto enospc;
1633 }
1634
925baedd 1635 btrfs_tree_lock(child);
b4ce94de 1636 btrfs_set_lock_blocking(child);
9fa8cfe7 1637 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
1638 if (ret) {
1639 btrfs_tree_unlock(child);
1640 free_extent_buffer(child);
1641 goto enospc;
1642 }
2f375ab9 1643
f230475e 1644 tree_mod_log_set_root_pointer(root, child);
240f62c8 1645 rcu_assign_pointer(root->node, child);
925baedd 1646
0b86a832 1647 add_root_to_dirty_list(root);
925baedd 1648 btrfs_tree_unlock(child);
b4ce94de 1649
925baedd 1650 path->locks[level] = 0;
bb803951 1651 path->nodes[level] = NULL;
5f39d397 1652 clean_tree_block(trans, root, mid);
925baedd 1653 btrfs_tree_unlock(mid);
bb803951 1654 /* once for the path */
5f39d397 1655 free_extent_buffer(mid);
f0486c68
YZ
1656
1657 root_sub_used(root, mid->len);
5581a51a 1658 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 1659 /* once for the root ptr */
3083ee2e 1660 free_extent_buffer_stale(mid);
f0486c68 1661 return 0;
bb803951 1662 }
5f39d397 1663 if (btrfs_header_nritems(mid) >
123abc88 1664 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
1665 return 0;
1666
5f39d397
CM
1667 left = read_node_slot(root, parent, pslot - 1);
1668 if (left) {
925baedd 1669 btrfs_tree_lock(left);
b4ce94de 1670 btrfs_set_lock_blocking(left);
5f39d397 1671 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1672 parent, pslot - 1, &left);
54aa1f4d
CM
1673 if (wret) {
1674 ret = wret;
1675 goto enospc;
1676 }
2cc58cf2 1677 }
5f39d397
CM
1678 right = read_node_slot(root, parent, pslot + 1);
1679 if (right) {
925baedd 1680 btrfs_tree_lock(right);
b4ce94de 1681 btrfs_set_lock_blocking(right);
5f39d397 1682 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1683 parent, pslot + 1, &right);
2cc58cf2
CM
1684 if (wret) {
1685 ret = wret;
1686 goto enospc;
1687 }
1688 }
1689
1690 /* first, try to make some room in the middle buffer */
5f39d397
CM
1691 if (left) {
1692 orig_slot += btrfs_header_nritems(left);
bce4eae9 1693 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1694 if (wret < 0)
1695 ret = wret;
bb803951 1696 }
79f95c82
CM
1697
1698 /*
1699 * then try to empty the right most buffer into the middle
1700 */
5f39d397 1701 if (right) {
971a1f66 1702 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1703 if (wret < 0 && wret != -ENOSPC)
79f95c82 1704 ret = wret;
5f39d397 1705 if (btrfs_header_nritems(right) == 0) {
5f39d397 1706 clean_tree_block(trans, root, right);
925baedd 1707 btrfs_tree_unlock(right);
f3ea38da 1708 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
f0486c68 1709 root_sub_used(root, right->len);
5581a51a 1710 btrfs_free_tree_block(trans, root, right, 0, 1);
3083ee2e 1711 free_extent_buffer_stale(right);
f0486c68 1712 right = NULL;
bb803951 1713 } else {
5f39d397
CM
1714 struct btrfs_disk_key right_key;
1715 btrfs_node_key(right, &right_key, 0);
f230475e
JS
1716 tree_mod_log_set_node_key(root->fs_info, parent,
1717 &right_key, pslot + 1, 0);
5f39d397
CM
1718 btrfs_set_node_key(parent, &right_key, pslot + 1);
1719 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1720 }
1721 }
5f39d397 1722 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1723 /*
1724 * we're not allowed to leave a node with one item in the
1725 * tree during a delete. A deletion from lower in the tree
1726 * could try to delete the only pointer in this node.
1727 * So, pull some keys from the left.
1728 * There has to be a left pointer at this point because
1729 * otherwise we would have pulled some pointers from the
1730 * right
1731 */
305a26af
MF
1732 if (!left) {
1733 ret = -EROFS;
1734 btrfs_std_error(root->fs_info, ret);
1735 goto enospc;
1736 }
5f39d397 1737 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1738 if (wret < 0) {
79f95c82 1739 ret = wret;
54aa1f4d
CM
1740 goto enospc;
1741 }
bce4eae9
CM
1742 if (wret == 1) {
1743 wret = push_node_left(trans, root, left, mid, 1);
1744 if (wret < 0)
1745 ret = wret;
1746 }
79f95c82
CM
1747 BUG_ON(wret == 1);
1748 }
5f39d397 1749 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1750 clean_tree_block(trans, root, mid);
925baedd 1751 btrfs_tree_unlock(mid);
f3ea38da 1752 del_ptr(trans, root, path, level + 1, pslot, 1);
f0486c68 1753 root_sub_used(root, mid->len);
5581a51a 1754 btrfs_free_tree_block(trans, root, mid, 0, 1);
3083ee2e 1755 free_extent_buffer_stale(mid);
f0486c68 1756 mid = NULL;
79f95c82
CM
1757 } else {
1758 /* update the parent key to reflect our changes */
5f39d397
CM
1759 struct btrfs_disk_key mid_key;
1760 btrfs_node_key(mid, &mid_key, 0);
f230475e
JS
1761 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1762 pslot, 0);
5f39d397
CM
1763 btrfs_set_node_key(parent, &mid_key, pslot);
1764 btrfs_mark_buffer_dirty(parent);
79f95c82 1765 }
bb803951 1766
79f95c82 1767 /* update the path */
5f39d397
CM
1768 if (left) {
1769 if (btrfs_header_nritems(left) > orig_slot) {
1770 extent_buffer_get(left);
925baedd 1771 /* left was locked after cow */
5f39d397 1772 path->nodes[level] = left;
bb803951
CM
1773 path->slots[level + 1] -= 1;
1774 path->slots[level] = orig_slot;
925baedd
CM
1775 if (mid) {
1776 btrfs_tree_unlock(mid);
5f39d397 1777 free_extent_buffer(mid);
925baedd 1778 }
bb803951 1779 } else {
5f39d397 1780 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1781 path->slots[level] = orig_slot;
1782 }
1783 }
79f95c82 1784 /* double check we haven't messed things up */
e20d96d6 1785 if (orig_ptr !=
5f39d397 1786 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1787 BUG();
54aa1f4d 1788enospc:
925baedd
CM
1789 if (right) {
1790 btrfs_tree_unlock(right);
5f39d397 1791 free_extent_buffer(right);
925baedd
CM
1792 }
1793 if (left) {
1794 if (path->nodes[level] != left)
1795 btrfs_tree_unlock(left);
5f39d397 1796 free_extent_buffer(left);
925baedd 1797 }
bb803951
CM
1798 return ret;
1799}
1800
d352ac68
CM
1801/* Node balancing for insertion. Here we only split or push nodes around
1802 * when they are completely full. This is also done top down, so we
1803 * have to be pessimistic.
1804 */
d397712b 1805static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1806 struct btrfs_root *root,
1807 struct btrfs_path *path, int level)
e66f709b 1808{
5f39d397
CM
1809 struct extent_buffer *right = NULL;
1810 struct extent_buffer *mid;
1811 struct extent_buffer *left = NULL;
1812 struct extent_buffer *parent = NULL;
e66f709b
CM
1813 int ret = 0;
1814 int wret;
1815 int pslot;
1816 int orig_slot = path->slots[level];
e66f709b
CM
1817
1818 if (level == 0)
1819 return 1;
1820
5f39d397 1821 mid = path->nodes[level];
7bb86316 1822 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 1823
a05a9bb1 1824 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1825 parent = path->nodes[level + 1];
a05a9bb1
LZ
1826 pslot = path->slots[level + 1];
1827 }
e66f709b 1828
5f39d397 1829 if (!parent)
e66f709b 1830 return 1;
e66f709b 1831
5f39d397 1832 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1833
1834 /* first, try to make some room in the middle buffer */
5f39d397 1835 if (left) {
e66f709b 1836 u32 left_nr;
925baedd
CM
1837
1838 btrfs_tree_lock(left);
b4ce94de
CM
1839 btrfs_set_lock_blocking(left);
1840
5f39d397 1841 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1842 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1843 wret = 1;
1844 } else {
5f39d397 1845 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1846 pslot - 1, &left);
54aa1f4d
CM
1847 if (ret)
1848 wret = 1;
1849 else {
54aa1f4d 1850 wret = push_node_left(trans, root,
971a1f66 1851 left, mid, 0);
54aa1f4d 1852 }
33ade1f8 1853 }
e66f709b
CM
1854 if (wret < 0)
1855 ret = wret;
1856 if (wret == 0) {
5f39d397 1857 struct btrfs_disk_key disk_key;
e66f709b 1858 orig_slot += left_nr;
5f39d397 1859 btrfs_node_key(mid, &disk_key, 0);
f230475e
JS
1860 tree_mod_log_set_node_key(root->fs_info, parent,
1861 &disk_key, pslot, 0);
5f39d397
CM
1862 btrfs_set_node_key(parent, &disk_key, pslot);
1863 btrfs_mark_buffer_dirty(parent);
1864 if (btrfs_header_nritems(left) > orig_slot) {
1865 path->nodes[level] = left;
e66f709b
CM
1866 path->slots[level + 1] -= 1;
1867 path->slots[level] = orig_slot;
925baedd 1868 btrfs_tree_unlock(mid);
5f39d397 1869 free_extent_buffer(mid);
e66f709b
CM
1870 } else {
1871 orig_slot -=
5f39d397 1872 btrfs_header_nritems(left);
e66f709b 1873 path->slots[level] = orig_slot;
925baedd 1874 btrfs_tree_unlock(left);
5f39d397 1875 free_extent_buffer(left);
e66f709b 1876 }
e66f709b
CM
1877 return 0;
1878 }
925baedd 1879 btrfs_tree_unlock(left);
5f39d397 1880 free_extent_buffer(left);
e66f709b 1881 }
925baedd 1882 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1883
1884 /*
1885 * then try to empty the right most buffer into the middle
1886 */
5f39d397 1887 if (right) {
33ade1f8 1888 u32 right_nr;
b4ce94de 1889
925baedd 1890 btrfs_tree_lock(right);
b4ce94de
CM
1891 btrfs_set_lock_blocking(right);
1892
5f39d397 1893 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1894 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1895 wret = 1;
1896 } else {
5f39d397
CM
1897 ret = btrfs_cow_block(trans, root, right,
1898 parent, pslot + 1,
9fa8cfe7 1899 &right);
54aa1f4d
CM
1900 if (ret)
1901 wret = 1;
1902 else {
54aa1f4d 1903 wret = balance_node_right(trans, root,
5f39d397 1904 right, mid);
54aa1f4d 1905 }
33ade1f8 1906 }
e66f709b
CM
1907 if (wret < 0)
1908 ret = wret;
1909 if (wret == 0) {
5f39d397
CM
1910 struct btrfs_disk_key disk_key;
1911
1912 btrfs_node_key(right, &disk_key, 0);
f230475e
JS
1913 tree_mod_log_set_node_key(root->fs_info, parent,
1914 &disk_key, pslot + 1, 0);
5f39d397
CM
1915 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1916 btrfs_mark_buffer_dirty(parent);
1917
1918 if (btrfs_header_nritems(mid) <= orig_slot) {
1919 path->nodes[level] = right;
e66f709b
CM
1920 path->slots[level + 1] += 1;
1921 path->slots[level] = orig_slot -
5f39d397 1922 btrfs_header_nritems(mid);
925baedd 1923 btrfs_tree_unlock(mid);
5f39d397 1924 free_extent_buffer(mid);
e66f709b 1925 } else {
925baedd 1926 btrfs_tree_unlock(right);
5f39d397 1927 free_extent_buffer(right);
e66f709b 1928 }
e66f709b
CM
1929 return 0;
1930 }
925baedd 1931 btrfs_tree_unlock(right);
5f39d397 1932 free_extent_buffer(right);
e66f709b 1933 }
e66f709b
CM
1934 return 1;
1935}
1936
3c69faec 1937/*
d352ac68
CM
1938 * readahead one full node of leaves, finding things that are close
1939 * to the block in 'slot', and triggering ra on them.
3c69faec 1940 */
c8c42864
CM
1941static void reada_for_search(struct btrfs_root *root,
1942 struct btrfs_path *path,
1943 int level, int slot, u64 objectid)
3c69faec 1944{
5f39d397 1945 struct extent_buffer *node;
01f46658 1946 struct btrfs_disk_key disk_key;
3c69faec 1947 u32 nritems;
3c69faec 1948 u64 search;
a7175319 1949 u64 target;
6b80053d 1950 u64 nread = 0;
cb25c2ea 1951 u64 gen;
3c69faec 1952 int direction = path->reada;
5f39d397 1953 struct extent_buffer *eb;
6b80053d
CM
1954 u32 nr;
1955 u32 blocksize;
1956 u32 nscan = 0;
db94535d 1957
a6b6e75e 1958 if (level != 1)
6702ed49
CM
1959 return;
1960
1961 if (!path->nodes[level])
3c69faec
CM
1962 return;
1963
5f39d397 1964 node = path->nodes[level];
925baedd 1965
3c69faec 1966 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1967 blocksize = btrfs_level_size(root, level - 1);
1968 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1969 if (eb) {
1970 free_extent_buffer(eb);
3c69faec
CM
1971 return;
1972 }
1973
a7175319 1974 target = search;
6b80053d 1975
5f39d397 1976 nritems = btrfs_header_nritems(node);
6b80053d 1977 nr = slot;
25b8b936 1978
d397712b 1979 while (1) {
6b80053d
CM
1980 if (direction < 0) {
1981 if (nr == 0)
1982 break;
1983 nr--;
1984 } else if (direction > 0) {
1985 nr++;
1986 if (nr >= nritems)
1987 break;
3c69faec 1988 }
01f46658
CM
1989 if (path->reada < 0 && objectid) {
1990 btrfs_node_key(node, &disk_key, nr);
1991 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1992 break;
1993 }
6b80053d 1994 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1995 if ((search <= target && target - search <= 65536) ||
1996 (search > target && search - target <= 65536)) {
cb25c2ea 1997 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 1998 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
1999 nread += blocksize;
2000 }
2001 nscan++;
a7175319 2002 if ((nread > 65536 || nscan > 32))
6b80053d 2003 break;
3c69faec
CM
2004 }
2005}
925baedd 2006
b4ce94de
CM
2007/*
2008 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2009 * cache
2010 */
2011static noinline int reada_for_balance(struct btrfs_root *root,
2012 struct btrfs_path *path, int level)
2013{
2014 int slot;
2015 int nritems;
2016 struct extent_buffer *parent;
2017 struct extent_buffer *eb;
2018 u64 gen;
2019 u64 block1 = 0;
2020 u64 block2 = 0;
2021 int ret = 0;
2022 int blocksize;
2023
8c594ea8 2024 parent = path->nodes[level + 1];
b4ce94de
CM
2025 if (!parent)
2026 return 0;
2027
2028 nritems = btrfs_header_nritems(parent);
8c594ea8 2029 slot = path->slots[level + 1];
b4ce94de
CM
2030 blocksize = btrfs_level_size(root, level);
2031
2032 if (slot > 0) {
2033 block1 = btrfs_node_blockptr(parent, slot - 1);
2034 gen = btrfs_node_ptr_generation(parent, slot - 1);
2035 eb = btrfs_find_tree_block(root, block1, blocksize);
b9fab919
CM
2036 /*
2037 * if we get -eagain from btrfs_buffer_uptodate, we
2038 * don't want to return eagain here. That will loop
2039 * forever
2040 */
2041 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2042 block1 = 0;
2043 free_extent_buffer(eb);
2044 }
8c594ea8 2045 if (slot + 1 < nritems) {
b4ce94de
CM
2046 block2 = btrfs_node_blockptr(parent, slot + 1);
2047 gen = btrfs_node_ptr_generation(parent, slot + 1);
2048 eb = btrfs_find_tree_block(root, block2, blocksize);
b9fab919 2049 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2050 block2 = 0;
2051 free_extent_buffer(eb);
2052 }
2053 if (block1 || block2) {
2054 ret = -EAGAIN;
8c594ea8
CM
2055
2056 /* release the whole path */
b3b4aa74 2057 btrfs_release_path(path);
8c594ea8
CM
2058
2059 /* read the blocks */
b4ce94de
CM
2060 if (block1)
2061 readahead_tree_block(root, block1, blocksize, 0);
2062 if (block2)
2063 readahead_tree_block(root, block2, blocksize, 0);
2064
2065 if (block1) {
2066 eb = read_tree_block(root, block1, blocksize, 0);
2067 free_extent_buffer(eb);
2068 }
8c594ea8 2069 if (block2) {
b4ce94de
CM
2070 eb = read_tree_block(root, block2, blocksize, 0);
2071 free_extent_buffer(eb);
2072 }
2073 }
2074 return ret;
2075}
2076
2077
d352ac68 2078/*
d397712b
CM
2079 * when we walk down the tree, it is usually safe to unlock the higher layers
2080 * in the tree. The exceptions are when our path goes through slot 0, because
2081 * operations on the tree might require changing key pointers higher up in the
2082 * tree.
d352ac68 2083 *
d397712b
CM
2084 * callers might also have set path->keep_locks, which tells this code to keep
2085 * the lock if the path points to the last slot in the block. This is part of
2086 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 2087 *
d397712b
CM
2088 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2089 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 2090 */
e02119d5 2091static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
2092 int lowest_unlock, int min_write_lock_level,
2093 int *write_lock_level)
925baedd
CM
2094{
2095 int i;
2096 int skip_level = level;
051e1b9f 2097 int no_skips = 0;
925baedd
CM
2098 struct extent_buffer *t;
2099
2100 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2101 if (!path->nodes[i])
2102 break;
2103 if (!path->locks[i])
2104 break;
051e1b9f 2105 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
2106 skip_level = i + 1;
2107 continue;
2108 }
051e1b9f 2109 if (!no_skips && path->keep_locks) {
925baedd
CM
2110 u32 nritems;
2111 t = path->nodes[i];
2112 nritems = btrfs_header_nritems(t);
051e1b9f 2113 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
2114 skip_level = i + 1;
2115 continue;
2116 }
2117 }
051e1b9f
CM
2118 if (skip_level < i && i >= lowest_unlock)
2119 no_skips = 1;
2120
925baedd
CM
2121 t = path->nodes[i];
2122 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
bd681513 2123 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 2124 path->locks[i] = 0;
f7c79f30
CM
2125 if (write_lock_level &&
2126 i > min_write_lock_level &&
2127 i <= *write_lock_level) {
2128 *write_lock_level = i - 1;
2129 }
925baedd
CM
2130 }
2131 }
2132}
2133
b4ce94de
CM
2134/*
2135 * This releases any locks held in the path starting at level and
2136 * going all the way up to the root.
2137 *
2138 * btrfs_search_slot will keep the lock held on higher nodes in a few
2139 * corner cases, such as COW of the block at slot zero in the node. This
2140 * ignores those rules, and it should only be called when there are no
2141 * more updates to be done higher up in the tree.
2142 */
2143noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2144{
2145 int i;
2146
5d4f98a2 2147 if (path->keep_locks)
b4ce94de
CM
2148 return;
2149
2150 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2151 if (!path->nodes[i])
12f4dacc 2152 continue;
b4ce94de 2153 if (!path->locks[i])
12f4dacc 2154 continue;
bd681513 2155 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
b4ce94de
CM
2156 path->locks[i] = 0;
2157 }
2158}
2159
c8c42864
CM
2160/*
2161 * helper function for btrfs_search_slot. The goal is to find a block
2162 * in cache without setting the path to blocking. If we find the block
2163 * we return zero and the path is unchanged.
2164 *
2165 * If we can't find the block, we set the path blocking and do some
2166 * reada. -EAGAIN is returned and the search must be repeated.
2167 */
2168static int
2169read_block_for_search(struct btrfs_trans_handle *trans,
2170 struct btrfs_root *root, struct btrfs_path *p,
2171 struct extent_buffer **eb_ret, int level, int slot,
5d9e75c4 2172 struct btrfs_key *key, u64 time_seq)
c8c42864
CM
2173{
2174 u64 blocknr;
2175 u64 gen;
2176 u32 blocksize;
2177 struct extent_buffer *b = *eb_ret;
2178 struct extent_buffer *tmp;
76a05b35 2179 int ret;
c8c42864
CM
2180
2181 blocknr = btrfs_node_blockptr(b, slot);
2182 gen = btrfs_node_ptr_generation(b, slot);
2183 blocksize = btrfs_level_size(root, level - 1);
2184
2185 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a 2186 if (tmp) {
b9fab919
CM
2187 /* first we do an atomic uptodate check */
2188 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2189 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
cb44921a
CM
2190 /*
2191 * we found an up to date block without
2192 * sleeping, return
2193 * right away
2194 */
2195 *eb_ret = tmp;
2196 return 0;
2197 }
2198 /* the pages were up to date, but we failed
2199 * the generation number check. Do a full
2200 * read for the generation number that is correct.
2201 * We must do this without dropping locks so
2202 * we can trust our generation number
2203 */
2204 free_extent_buffer(tmp);
bd681513
CM
2205 btrfs_set_path_blocking(p);
2206
b9fab919 2207 /* now we're allowed to do a blocking uptodate check */
cb44921a 2208 tmp = read_tree_block(root, blocknr, blocksize, gen);
b9fab919 2209 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
cb44921a
CM
2210 *eb_ret = tmp;
2211 return 0;
2212 }
2213 free_extent_buffer(tmp);
b3b4aa74 2214 btrfs_release_path(p);
cb44921a
CM
2215 return -EIO;
2216 }
c8c42864
CM
2217 }
2218
2219 /*
2220 * reduce lock contention at high levels
2221 * of the btree by dropping locks before
76a05b35
CM
2222 * we read. Don't release the lock on the current
2223 * level because we need to walk this node to figure
2224 * out which blocks to read.
c8c42864 2225 */
8c594ea8
CM
2226 btrfs_unlock_up_safe(p, level + 1);
2227 btrfs_set_path_blocking(p);
2228
cb44921a 2229 free_extent_buffer(tmp);
c8c42864
CM
2230 if (p->reada)
2231 reada_for_search(root, p, level, slot, key->objectid);
2232
b3b4aa74 2233 btrfs_release_path(p);
76a05b35
CM
2234
2235 ret = -EAGAIN;
5bdd3536 2236 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
2237 if (tmp) {
2238 /*
2239 * If the read above didn't mark this buffer up to date,
2240 * it will never end up being up to date. Set ret to EIO now
2241 * and give up so that our caller doesn't loop forever
2242 * on our EAGAINs.
2243 */
b9fab919 2244 if (!btrfs_buffer_uptodate(tmp, 0, 0))
76a05b35 2245 ret = -EIO;
c8c42864 2246 free_extent_buffer(tmp);
76a05b35
CM
2247 }
2248 return ret;
c8c42864
CM
2249}
2250
2251/*
2252 * helper function for btrfs_search_slot. This does all of the checks
2253 * for node-level blocks and does any balancing required based on
2254 * the ins_len.
2255 *
2256 * If no extra work was required, zero is returned. If we had to
2257 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2258 * start over
2259 */
2260static int
2261setup_nodes_for_search(struct btrfs_trans_handle *trans,
2262 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
2263 struct extent_buffer *b, int level, int ins_len,
2264 int *write_lock_level)
c8c42864
CM
2265{
2266 int ret;
2267 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2268 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2269 int sret;
2270
bd681513
CM
2271 if (*write_lock_level < level + 1) {
2272 *write_lock_level = level + 1;
2273 btrfs_release_path(p);
2274 goto again;
2275 }
2276
c8c42864
CM
2277 sret = reada_for_balance(root, p, level);
2278 if (sret)
2279 goto again;
2280
2281 btrfs_set_path_blocking(p);
2282 sret = split_node(trans, root, p, level);
bd681513 2283 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2284
2285 BUG_ON(sret > 0);
2286 if (sret) {
2287 ret = sret;
2288 goto done;
2289 }
2290 b = p->nodes[level];
2291 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 2292 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
2293 int sret;
2294
bd681513
CM
2295 if (*write_lock_level < level + 1) {
2296 *write_lock_level = level + 1;
2297 btrfs_release_path(p);
2298 goto again;
2299 }
2300
c8c42864
CM
2301 sret = reada_for_balance(root, p, level);
2302 if (sret)
2303 goto again;
2304
2305 btrfs_set_path_blocking(p);
2306 sret = balance_level(trans, root, p, level);
bd681513 2307 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2308
2309 if (sret) {
2310 ret = sret;
2311 goto done;
2312 }
2313 b = p->nodes[level];
2314 if (!b) {
b3b4aa74 2315 btrfs_release_path(p);
c8c42864
CM
2316 goto again;
2317 }
2318 BUG_ON(btrfs_header_nritems(b) == 1);
2319 }
2320 return 0;
2321
2322again:
2323 ret = -EAGAIN;
2324done:
2325 return ret;
2326}
2327
74123bd7
CM
2328/*
2329 * look for key in the tree. path is filled in with nodes along the way
2330 * if key is found, we return zero and you can find the item in the leaf
2331 * level of the path (level 0)
2332 *
2333 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
2334 * be inserted, and 1 is returned. If there are other errors during the
2335 * search a negative error number is returned.
97571fd0
CM
2336 *
2337 * if ins_len > 0, nodes and leaves will be split as we walk down the
2338 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2339 * possible)
74123bd7 2340 */
e089f05c
CM
2341int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2342 *root, struct btrfs_key *key, struct btrfs_path *p, int
2343 ins_len, int cow)
be0e5c09 2344{
5f39d397 2345 struct extent_buffer *b;
be0e5c09
CM
2346 int slot;
2347 int ret;
33c66f43 2348 int err;
be0e5c09 2349 int level;
925baedd 2350 int lowest_unlock = 1;
bd681513
CM
2351 int root_lock;
2352 /* everything at write_lock_level or lower must be write locked */
2353 int write_lock_level = 0;
9f3a7427 2354 u8 lowest_level = 0;
f7c79f30 2355 int min_write_lock_level;
9f3a7427 2356
6702ed49 2357 lowest_level = p->lowest_level;
323ac95b 2358 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 2359 WARN_ON(p->nodes[0] != NULL);
25179201 2360
bd681513 2361 if (ins_len < 0) {
925baedd 2362 lowest_unlock = 2;
65b51a00 2363
bd681513
CM
2364 /* when we are removing items, we might have to go up to level
2365 * two as we update tree pointers Make sure we keep write
2366 * for those levels as well
2367 */
2368 write_lock_level = 2;
2369 } else if (ins_len > 0) {
2370 /*
2371 * for inserting items, make sure we have a write lock on
2372 * level 1 so we can update keys
2373 */
2374 write_lock_level = 1;
2375 }
2376
2377 if (!cow)
2378 write_lock_level = -1;
2379
2380 if (cow && (p->keep_locks || p->lowest_level))
2381 write_lock_level = BTRFS_MAX_LEVEL;
2382
f7c79f30
CM
2383 min_write_lock_level = write_lock_level;
2384
bb803951 2385again:
bd681513
CM
2386 /*
2387 * we try very hard to do read locks on the root
2388 */
2389 root_lock = BTRFS_READ_LOCK;
2390 level = 0;
5d4f98a2 2391 if (p->search_commit_root) {
bd681513
CM
2392 /*
2393 * the commit roots are read only
2394 * so we always do read locks
2395 */
5d4f98a2
YZ
2396 b = root->commit_root;
2397 extent_buffer_get(b);
bd681513 2398 level = btrfs_header_level(b);
5d4f98a2 2399 if (!p->skip_locking)
bd681513 2400 btrfs_tree_read_lock(b);
5d4f98a2 2401 } else {
bd681513 2402 if (p->skip_locking) {
5d4f98a2 2403 b = btrfs_root_node(root);
bd681513
CM
2404 level = btrfs_header_level(b);
2405 } else {
2406 /* we don't know the level of the root node
2407 * until we actually have it read locked
2408 */
2409 b = btrfs_read_lock_root_node(root);
2410 level = btrfs_header_level(b);
2411 if (level <= write_lock_level) {
2412 /* whoops, must trade for write lock */
2413 btrfs_tree_read_unlock(b);
2414 free_extent_buffer(b);
2415 b = btrfs_lock_root_node(root);
2416 root_lock = BTRFS_WRITE_LOCK;
2417
2418 /* the level might have changed, check again */
2419 level = btrfs_header_level(b);
2420 }
2421 }
5d4f98a2 2422 }
bd681513
CM
2423 p->nodes[level] = b;
2424 if (!p->skip_locking)
2425 p->locks[level] = root_lock;
925baedd 2426
eb60ceac 2427 while (b) {
5f39d397 2428 level = btrfs_header_level(b);
65b51a00
CM
2429
2430 /*
2431 * setup the path here so we can release it under lock
2432 * contention with the cow code
2433 */
02217ed2 2434 if (cow) {
c8c42864
CM
2435 /*
2436 * if we don't really need to cow this block
2437 * then we don't want to set the path blocking,
2438 * so we test it here
2439 */
5d4f98a2 2440 if (!should_cow_block(trans, root, b))
65b51a00 2441 goto cow_done;
5d4f98a2 2442
b4ce94de
CM
2443 btrfs_set_path_blocking(p);
2444
bd681513
CM
2445 /*
2446 * must have write locks on this node and the
2447 * parent
2448 */
2449 if (level + 1 > write_lock_level) {
2450 write_lock_level = level + 1;
2451 btrfs_release_path(p);
2452 goto again;
2453 }
2454
33c66f43
YZ
2455 err = btrfs_cow_block(trans, root, b,
2456 p->nodes[level + 1],
2457 p->slots[level + 1], &b);
2458 if (err) {
33c66f43 2459 ret = err;
65b51a00 2460 goto done;
54aa1f4d 2461 }
02217ed2 2462 }
65b51a00 2463cow_done:
02217ed2 2464 BUG_ON(!cow && ins_len);
65b51a00 2465
eb60ceac 2466 p->nodes[level] = b;
bd681513 2467 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de
CM
2468
2469 /*
2470 * we have a lock on b and as long as we aren't changing
2471 * the tree, there is no way to for the items in b to change.
2472 * It is safe to drop the lock on our parent before we
2473 * go through the expensive btree search on b.
2474 *
2475 * If cow is true, then we might be changing slot zero,
2476 * which may require changing the parent. So, we can't
2477 * drop the lock until after we know which slot we're
2478 * operating on.
2479 */
2480 if (!cow)
2481 btrfs_unlock_up_safe(p, level + 1);
2482
5f39d397 2483 ret = bin_search(b, key, level, &slot);
b4ce94de 2484
5f39d397 2485 if (level != 0) {
33c66f43
YZ
2486 int dec = 0;
2487 if (ret && slot > 0) {
2488 dec = 1;
be0e5c09 2489 slot -= 1;
33c66f43 2490 }
be0e5c09 2491 p->slots[level] = slot;
33c66f43 2492 err = setup_nodes_for_search(trans, root, p, b, level,
bd681513 2493 ins_len, &write_lock_level);
33c66f43 2494 if (err == -EAGAIN)
c8c42864 2495 goto again;
33c66f43
YZ
2496 if (err) {
2497 ret = err;
c8c42864 2498 goto done;
33c66f43 2499 }
c8c42864
CM
2500 b = p->nodes[level];
2501 slot = p->slots[level];
b4ce94de 2502
bd681513
CM
2503 /*
2504 * slot 0 is special, if we change the key
2505 * we have to update the parent pointer
2506 * which means we must have a write lock
2507 * on the parent
2508 */
2509 if (slot == 0 && cow &&
2510 write_lock_level < level + 1) {
2511 write_lock_level = level + 1;
2512 btrfs_release_path(p);
2513 goto again;
2514 }
2515
f7c79f30
CM
2516 unlock_up(p, level, lowest_unlock,
2517 min_write_lock_level, &write_lock_level);
f9efa9c7 2518
925baedd 2519 if (level == lowest_level) {
33c66f43
YZ
2520 if (dec)
2521 p->slots[level]++;
5b21f2ed 2522 goto done;
925baedd 2523 }
ca7a79ad 2524
33c66f43 2525 err = read_block_for_search(trans, root, p,
5d9e75c4 2526 &b, level, slot, key, 0);
33c66f43 2527 if (err == -EAGAIN)
c8c42864 2528 goto again;
33c66f43
YZ
2529 if (err) {
2530 ret = err;
76a05b35 2531 goto done;
33c66f43 2532 }
76a05b35 2533
b4ce94de 2534 if (!p->skip_locking) {
bd681513
CM
2535 level = btrfs_header_level(b);
2536 if (level <= write_lock_level) {
2537 err = btrfs_try_tree_write_lock(b);
2538 if (!err) {
2539 btrfs_set_path_blocking(p);
2540 btrfs_tree_lock(b);
2541 btrfs_clear_path_blocking(p, b,
2542 BTRFS_WRITE_LOCK);
2543 }
2544 p->locks[level] = BTRFS_WRITE_LOCK;
2545 } else {
2546 err = btrfs_try_tree_read_lock(b);
2547 if (!err) {
2548 btrfs_set_path_blocking(p);
2549 btrfs_tree_read_lock(b);
2550 btrfs_clear_path_blocking(p, b,
2551 BTRFS_READ_LOCK);
2552 }
2553 p->locks[level] = BTRFS_READ_LOCK;
b4ce94de 2554 }
bd681513 2555 p->nodes[level] = b;
b4ce94de 2556 }
be0e5c09
CM
2557 } else {
2558 p->slots[level] = slot;
87b29b20
YZ
2559 if (ins_len > 0 &&
2560 btrfs_leaf_free_space(root, b) < ins_len) {
bd681513
CM
2561 if (write_lock_level < 1) {
2562 write_lock_level = 1;
2563 btrfs_release_path(p);
2564 goto again;
2565 }
2566
b4ce94de 2567 btrfs_set_path_blocking(p);
33c66f43
YZ
2568 err = split_leaf(trans, root, key,
2569 p, ins_len, ret == 0);
bd681513 2570 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de 2571
33c66f43
YZ
2572 BUG_ON(err > 0);
2573 if (err) {
2574 ret = err;
65b51a00
CM
2575 goto done;
2576 }
5c680ed6 2577 }
459931ec 2578 if (!p->search_for_split)
f7c79f30
CM
2579 unlock_up(p, level, lowest_unlock,
2580 min_write_lock_level, &write_lock_level);
65b51a00 2581 goto done;
be0e5c09
CM
2582 }
2583 }
65b51a00
CM
2584 ret = 1;
2585done:
b4ce94de
CM
2586 /*
2587 * we don't really know what they plan on doing with the path
2588 * from here on, so for now just mark it as blocking
2589 */
b9473439
CM
2590 if (!p->leave_spinning)
2591 btrfs_set_path_blocking(p);
76a05b35 2592 if (ret < 0)
b3b4aa74 2593 btrfs_release_path(p);
65b51a00 2594 return ret;
be0e5c09
CM
2595}
2596
5d9e75c4
JS
2597/*
2598 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2599 * current state of the tree together with the operations recorded in the tree
2600 * modification log to search for the key in a previous version of this tree, as
2601 * denoted by the time_seq parameter.
2602 *
2603 * Naturally, there is no support for insert, delete or cow operations.
2604 *
2605 * The resulting path and return value will be set up as if we called
2606 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2607 */
2608int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2609 struct btrfs_path *p, u64 time_seq)
2610{
2611 struct extent_buffer *b;
2612 int slot;
2613 int ret;
2614 int err;
2615 int level;
2616 int lowest_unlock = 1;
2617 u8 lowest_level = 0;
2618
2619 lowest_level = p->lowest_level;
2620 WARN_ON(p->nodes[0] != NULL);
2621
2622 if (p->search_commit_root) {
2623 BUG_ON(time_seq);
2624 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2625 }
2626
2627again:
5d9e75c4 2628 b = get_old_root(root, time_seq);
5d9e75c4 2629 level = btrfs_header_level(b);
5d9e75c4
JS
2630 p->locks[level] = BTRFS_READ_LOCK;
2631
2632 while (b) {
2633 level = btrfs_header_level(b);
2634 p->nodes[level] = b;
2635 btrfs_clear_path_blocking(p, NULL, 0);
2636
2637 /*
2638 * we have a lock on b and as long as we aren't changing
2639 * the tree, there is no way to for the items in b to change.
2640 * It is safe to drop the lock on our parent before we
2641 * go through the expensive btree search on b.
2642 */
2643 btrfs_unlock_up_safe(p, level + 1);
2644
2645 ret = bin_search(b, key, level, &slot);
2646
2647 if (level != 0) {
2648 int dec = 0;
2649 if (ret && slot > 0) {
2650 dec = 1;
2651 slot -= 1;
2652 }
2653 p->slots[level] = slot;
2654 unlock_up(p, level, lowest_unlock, 0, NULL);
2655
2656 if (level == lowest_level) {
2657 if (dec)
2658 p->slots[level]++;
2659 goto done;
2660 }
2661
2662 err = read_block_for_search(NULL, root, p, &b, level,
2663 slot, key, time_seq);
2664 if (err == -EAGAIN)
2665 goto again;
2666 if (err) {
2667 ret = err;
2668 goto done;
2669 }
2670
2671 level = btrfs_header_level(b);
2672 err = btrfs_try_tree_read_lock(b);
2673 if (!err) {
2674 btrfs_set_path_blocking(p);
2675 btrfs_tree_read_lock(b);
2676 btrfs_clear_path_blocking(p, b,
2677 BTRFS_READ_LOCK);
2678 }
2679 p->locks[level] = BTRFS_READ_LOCK;
2680 p->nodes[level] = b;
2681 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2682 if (b != p->nodes[level]) {
2683 btrfs_tree_unlock_rw(p->nodes[level],
2684 p->locks[level]);
2685 p->locks[level] = 0;
2686 p->nodes[level] = b;
2687 }
2688 } else {
2689 p->slots[level] = slot;
2690 unlock_up(p, level, lowest_unlock, 0, NULL);
2691 goto done;
2692 }
2693 }
2694 ret = 1;
2695done:
2696 if (!p->leave_spinning)
2697 btrfs_set_path_blocking(p);
2698 if (ret < 0)
2699 btrfs_release_path(p);
2700
2701 return ret;
2702}
2703
74123bd7
CM
2704/*
2705 * adjust the pointers going up the tree, starting at level
2706 * making sure the right key of each node is points to 'key'.
2707 * This is used after shifting pointers to the left, so it stops
2708 * fixing up pointers when a given leaf/node is not in slot 0 of the
2709 * higher levels
aa5d6bed 2710 *
74123bd7 2711 */
143bede5
JM
2712static void fixup_low_keys(struct btrfs_trans_handle *trans,
2713 struct btrfs_root *root, struct btrfs_path *path,
2714 struct btrfs_disk_key *key, int level)
be0e5c09
CM
2715{
2716 int i;
5f39d397
CM
2717 struct extent_buffer *t;
2718
234b63a0 2719 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 2720 int tslot = path->slots[i];
eb60ceac 2721 if (!path->nodes[i])
be0e5c09 2722 break;
5f39d397 2723 t = path->nodes[i];
f230475e 2724 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
5f39d397 2725 btrfs_set_node_key(t, key, tslot);
d6025579 2726 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
2727 if (tslot != 0)
2728 break;
2729 }
2730}
2731
31840ae1
ZY
2732/*
2733 * update item key.
2734 *
2735 * This function isn't completely safe. It's the caller's responsibility
2736 * that the new key won't break the order
2737 */
143bede5
JM
2738void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2739 struct btrfs_root *root, struct btrfs_path *path,
2740 struct btrfs_key *new_key)
31840ae1
ZY
2741{
2742 struct btrfs_disk_key disk_key;
2743 struct extent_buffer *eb;
2744 int slot;
2745
2746 eb = path->nodes[0];
2747 slot = path->slots[0];
2748 if (slot > 0) {
2749 btrfs_item_key(eb, &disk_key, slot - 1);
143bede5 2750 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
31840ae1
ZY
2751 }
2752 if (slot < btrfs_header_nritems(eb) - 1) {
2753 btrfs_item_key(eb, &disk_key, slot + 1);
143bede5 2754 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
31840ae1
ZY
2755 }
2756
2757 btrfs_cpu_key_to_disk(&disk_key, new_key);
2758 btrfs_set_item_key(eb, &disk_key, slot);
2759 btrfs_mark_buffer_dirty(eb);
2760 if (slot == 0)
2761 fixup_low_keys(trans, root, path, &disk_key, 1);
31840ae1
ZY
2762}
2763
74123bd7
CM
2764/*
2765 * try to push data from one node into the next node left in the
79f95c82 2766 * tree.
aa5d6bed
CM
2767 *
2768 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2769 * error, and > 0 if there was no room in the left hand block.
74123bd7 2770 */
98ed5174
CM
2771static int push_node_left(struct btrfs_trans_handle *trans,
2772 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 2773 struct extent_buffer *src, int empty)
be0e5c09 2774{
be0e5c09 2775 int push_items = 0;
bb803951
CM
2776 int src_nritems;
2777 int dst_nritems;
aa5d6bed 2778 int ret = 0;
be0e5c09 2779
5f39d397
CM
2780 src_nritems = btrfs_header_nritems(src);
2781 dst_nritems = btrfs_header_nritems(dst);
123abc88 2782 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
2783 WARN_ON(btrfs_header_generation(src) != trans->transid);
2784 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 2785
bce4eae9 2786 if (!empty && src_nritems <= 8)
971a1f66
CM
2787 return 1;
2788
d397712b 2789 if (push_items <= 0)
be0e5c09
CM
2790 return 1;
2791
bce4eae9 2792 if (empty) {
971a1f66 2793 push_items = min(src_nritems, push_items);
bce4eae9
CM
2794 if (push_items < src_nritems) {
2795 /* leave at least 8 pointers in the node if
2796 * we aren't going to empty it
2797 */
2798 if (src_nritems - push_items < 8) {
2799 if (push_items <= 8)
2800 return 1;
2801 push_items -= 8;
2802 }
2803 }
2804 } else
2805 push_items = min(src_nritems - 8, push_items);
79f95c82 2806
f230475e
JS
2807 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2808 push_items);
5f39d397
CM
2809 copy_extent_buffer(dst, src,
2810 btrfs_node_key_ptr_offset(dst_nritems),
2811 btrfs_node_key_ptr_offset(0),
d397712b 2812 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 2813
bb803951 2814 if (push_items < src_nritems) {
f230475e
JS
2815 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2816 src_nritems - push_items);
5f39d397
CM
2817 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2818 btrfs_node_key_ptr_offset(push_items),
2819 (src_nritems - push_items) *
2820 sizeof(struct btrfs_key_ptr));
2821 }
2822 btrfs_set_header_nritems(src, src_nritems - push_items);
2823 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2824 btrfs_mark_buffer_dirty(src);
2825 btrfs_mark_buffer_dirty(dst);
31840ae1 2826
79f95c82
CM
2827 return ret;
2828}
2829
2830/*
2831 * try to push data from one node into the next node right in the
2832 * tree.
2833 *
2834 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2835 * error, and > 0 if there was no room in the right hand block.
2836 *
2837 * this will only push up to 1/2 the contents of the left node over
2838 */
5f39d397
CM
2839static int balance_node_right(struct btrfs_trans_handle *trans,
2840 struct btrfs_root *root,
2841 struct extent_buffer *dst,
2842 struct extent_buffer *src)
79f95c82 2843{
79f95c82
CM
2844 int push_items = 0;
2845 int max_push;
2846 int src_nritems;
2847 int dst_nritems;
2848 int ret = 0;
79f95c82 2849
7bb86316
CM
2850 WARN_ON(btrfs_header_generation(src) != trans->transid);
2851 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2852
5f39d397
CM
2853 src_nritems = btrfs_header_nritems(src);
2854 dst_nritems = btrfs_header_nritems(dst);
123abc88 2855 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 2856 if (push_items <= 0)
79f95c82 2857 return 1;
bce4eae9 2858
d397712b 2859 if (src_nritems < 4)
bce4eae9 2860 return 1;
79f95c82
CM
2861
2862 max_push = src_nritems / 2 + 1;
2863 /* don't try to empty the node */
d397712b 2864 if (max_push >= src_nritems)
79f95c82 2865 return 1;
252c38f0 2866
79f95c82
CM
2867 if (max_push < push_items)
2868 push_items = max_push;
2869
f230475e 2870 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
5f39d397
CM
2871 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2872 btrfs_node_key_ptr_offset(0),
2873 (dst_nritems) *
2874 sizeof(struct btrfs_key_ptr));
d6025579 2875
f230475e
JS
2876 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2877 src_nritems - push_items, push_items);
5f39d397
CM
2878 copy_extent_buffer(dst, src,
2879 btrfs_node_key_ptr_offset(0),
2880 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 2881 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 2882
5f39d397
CM
2883 btrfs_set_header_nritems(src, src_nritems - push_items);
2884 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 2885
5f39d397
CM
2886 btrfs_mark_buffer_dirty(src);
2887 btrfs_mark_buffer_dirty(dst);
31840ae1 2888
aa5d6bed 2889 return ret;
be0e5c09
CM
2890}
2891
97571fd0
CM
2892/*
2893 * helper function to insert a new root level in the tree.
2894 * A new node is allocated, and a single item is inserted to
2895 * point to the existing root
aa5d6bed
CM
2896 *
2897 * returns zero on success or < 0 on failure.
97571fd0 2898 */
d397712b 2899static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
2900 struct btrfs_root *root,
2901 struct btrfs_path *path, int level)
5c680ed6 2902{
7bb86316 2903 u64 lower_gen;
5f39d397
CM
2904 struct extent_buffer *lower;
2905 struct extent_buffer *c;
925baedd 2906 struct extent_buffer *old;
5f39d397 2907 struct btrfs_disk_key lower_key;
5c680ed6
CM
2908
2909 BUG_ON(path->nodes[level]);
2910 BUG_ON(path->nodes[level-1] != root->node);
2911
7bb86316
CM
2912 lower = path->nodes[level-1];
2913 if (level == 1)
2914 btrfs_item_key(lower, &lower_key, 0);
2915 else
2916 btrfs_node_key(lower, &lower_key, 0);
2917
31840ae1 2918 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 2919 root->root_key.objectid, &lower_key,
5581a51a 2920 level, root->node->start, 0);
5f39d397
CM
2921 if (IS_ERR(c))
2922 return PTR_ERR(c);
925baedd 2923
f0486c68
YZ
2924 root_add_used(root, root->nodesize);
2925
5d4f98a2 2926 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
2927 btrfs_set_header_nritems(c, 1);
2928 btrfs_set_header_level(c, level);
db94535d 2929 btrfs_set_header_bytenr(c, c->start);
5f39d397 2930 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 2931 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 2932 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
2933
2934 write_extent_buffer(c, root->fs_info->fsid,
2935 (unsigned long)btrfs_header_fsid(c),
2936 BTRFS_FSID_SIZE);
e17cade2
CM
2937
2938 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2939 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2940 BTRFS_UUID_SIZE);
2941
5f39d397 2942 btrfs_set_node_key(c, &lower_key, 0);
db94535d 2943 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 2944 lower_gen = btrfs_header_generation(lower);
31840ae1 2945 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
2946
2947 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 2948
5f39d397 2949 btrfs_mark_buffer_dirty(c);
d5719762 2950
925baedd 2951 old = root->node;
f230475e 2952 tree_mod_log_set_root_pointer(root, c);
240f62c8 2953 rcu_assign_pointer(root->node, c);
925baedd
CM
2954
2955 /* the super has an extra ref to root->node */
2956 free_extent_buffer(old);
2957
0b86a832 2958 add_root_to_dirty_list(root);
5f39d397
CM
2959 extent_buffer_get(c);
2960 path->nodes[level] = c;
bd681513 2961 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
2962 path->slots[level] = 0;
2963 return 0;
2964}
2965
74123bd7
CM
2966/*
2967 * worker function to insert a single pointer in a node.
2968 * the node should have enough room for the pointer already
97571fd0 2969 *
74123bd7
CM
2970 * slot and level indicate where you want the key to go, and
2971 * blocknr is the block the key points to.
2972 */
143bede5
JM
2973static void insert_ptr(struct btrfs_trans_handle *trans,
2974 struct btrfs_root *root, struct btrfs_path *path,
2975 struct btrfs_disk_key *key, u64 bytenr,
f3ea38da 2976 int slot, int level, int tree_mod_log)
74123bd7 2977{
5f39d397 2978 struct extent_buffer *lower;
74123bd7 2979 int nritems;
f3ea38da 2980 int ret;
5c680ed6
CM
2981
2982 BUG_ON(!path->nodes[level]);
f0486c68 2983 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
2984 lower = path->nodes[level];
2985 nritems = btrfs_header_nritems(lower);
c293498b 2986 BUG_ON(slot > nritems);
143bede5 2987 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
74123bd7 2988 if (slot != nritems) {
f3ea38da
JS
2989 if (tree_mod_log && level)
2990 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
2991 slot, nritems - slot);
5f39d397
CM
2992 memmove_extent_buffer(lower,
2993 btrfs_node_key_ptr_offset(slot + 1),
2994 btrfs_node_key_ptr_offset(slot),
d6025579 2995 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2996 }
f3ea38da
JS
2997 if (tree_mod_log && level) {
2998 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
2999 MOD_LOG_KEY_ADD);
3000 BUG_ON(ret < 0);
3001 }
5f39d397 3002 btrfs_set_node_key(lower, key, slot);
db94535d 3003 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
3004 WARN_ON(trans->transid == 0);
3005 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
3006 btrfs_set_header_nritems(lower, nritems + 1);
3007 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
3008}
3009
97571fd0
CM
3010/*
3011 * split the node at the specified level in path in two.
3012 * The path is corrected to point to the appropriate node after the split
3013 *
3014 * Before splitting this tries to make some room in the node by pushing
3015 * left and right, if either one works, it returns right away.
aa5d6bed
CM
3016 *
3017 * returns 0 on success and < 0 on failure
97571fd0 3018 */
e02119d5
CM
3019static noinline int split_node(struct btrfs_trans_handle *trans,
3020 struct btrfs_root *root,
3021 struct btrfs_path *path, int level)
be0e5c09 3022{
5f39d397
CM
3023 struct extent_buffer *c;
3024 struct extent_buffer *split;
3025 struct btrfs_disk_key disk_key;
be0e5c09 3026 int mid;
5c680ed6 3027 int ret;
7518a238 3028 u32 c_nritems;
eb60ceac 3029
5f39d397 3030 c = path->nodes[level];
7bb86316 3031 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 3032 if (c == root->node) {
5c680ed6 3033 /* trying to split the root, lets make a new one */
e089f05c 3034 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
3035 if (ret)
3036 return ret;
b3612421 3037 } else {
e66f709b 3038 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
3039 c = path->nodes[level];
3040 if (!ret && btrfs_header_nritems(c) <
c448acf0 3041 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 3042 return 0;
54aa1f4d
CM
3043 if (ret < 0)
3044 return ret;
be0e5c09 3045 }
e66f709b 3046
5f39d397 3047 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
3048 mid = (c_nritems + 1) / 2;
3049 btrfs_node_key(c, &disk_key, mid);
7bb86316 3050
5d4f98a2 3051 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 3052 root->root_key.objectid,
5581a51a 3053 &disk_key, level, c->start, 0);
5f39d397
CM
3054 if (IS_ERR(split))
3055 return PTR_ERR(split);
3056
f0486c68
YZ
3057 root_add_used(root, root->nodesize);
3058
5d4f98a2 3059 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 3060 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 3061 btrfs_set_header_bytenr(split, split->start);
5f39d397 3062 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 3063 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3064 btrfs_set_header_owner(split, root->root_key.objectid);
3065 write_extent_buffer(split, root->fs_info->fsid,
3066 (unsigned long)btrfs_header_fsid(split),
3067 BTRFS_FSID_SIZE);
e17cade2
CM
3068 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3069 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3070 BTRFS_UUID_SIZE);
54aa1f4d 3071
f230475e 3072 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
5f39d397
CM
3073 copy_extent_buffer(split, c,
3074 btrfs_node_key_ptr_offset(0),
3075 btrfs_node_key_ptr_offset(mid),
3076 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3077 btrfs_set_header_nritems(split, c_nritems - mid);
3078 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
3079 ret = 0;
3080
5f39d397
CM
3081 btrfs_mark_buffer_dirty(c);
3082 btrfs_mark_buffer_dirty(split);
3083
143bede5 3084 insert_ptr(trans, root, path, &disk_key, split->start,
f3ea38da 3085 path->slots[level + 1] + 1, level + 1, 1);
aa5d6bed 3086
5de08d7d 3087 if (path->slots[level] >= mid) {
5c680ed6 3088 path->slots[level] -= mid;
925baedd 3089 btrfs_tree_unlock(c);
5f39d397
CM
3090 free_extent_buffer(c);
3091 path->nodes[level] = split;
5c680ed6
CM
3092 path->slots[level + 1] += 1;
3093 } else {
925baedd 3094 btrfs_tree_unlock(split);
5f39d397 3095 free_extent_buffer(split);
be0e5c09 3096 }
aa5d6bed 3097 return ret;
be0e5c09
CM
3098}
3099
74123bd7
CM
3100/*
3101 * how many bytes are required to store the items in a leaf. start
3102 * and nr indicate which items in the leaf to check. This totals up the
3103 * space used both by the item structs and the item data
3104 */
5f39d397 3105static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
3106{
3107 int data_len;
5f39d397 3108 int nritems = btrfs_header_nritems(l);
d4dbff95 3109 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
3110
3111 if (!nr)
3112 return 0;
5f39d397
CM
3113 data_len = btrfs_item_end_nr(l, start);
3114 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 3115 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 3116 WARN_ON(data_len < 0);
be0e5c09
CM
3117 return data_len;
3118}
3119
d4dbff95
CM
3120/*
3121 * The space between the end of the leaf items and
3122 * the start of the leaf data. IOW, how much room
3123 * the leaf has left for both items and data
3124 */
d397712b 3125noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 3126 struct extent_buffer *leaf)
d4dbff95 3127{
5f39d397
CM
3128 int nritems = btrfs_header_nritems(leaf);
3129 int ret;
3130 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3131 if (ret < 0) {
d397712b
CM
3132 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3133 "used %d nritems %d\n",
ae2f5411 3134 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
3135 leaf_space_used(leaf, 0, nritems), nritems);
3136 }
3137 return ret;
d4dbff95
CM
3138}
3139
99d8f83c
CM
3140/*
3141 * min slot controls the lowest index we're willing to push to the
3142 * right. We'll push up to and including min_slot, but no lower
3143 */
44871b1b
CM
3144static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3145 struct btrfs_root *root,
3146 struct btrfs_path *path,
3147 int data_size, int empty,
3148 struct extent_buffer *right,
99d8f83c
CM
3149 int free_space, u32 left_nritems,
3150 u32 min_slot)
00ec4c51 3151{
5f39d397 3152 struct extent_buffer *left = path->nodes[0];
44871b1b 3153 struct extent_buffer *upper = path->nodes[1];
cfed81a0 3154 struct btrfs_map_token token;
5f39d397 3155 struct btrfs_disk_key disk_key;
00ec4c51 3156 int slot;
34a38218 3157 u32 i;
00ec4c51
CM
3158 int push_space = 0;
3159 int push_items = 0;
0783fcfc 3160 struct btrfs_item *item;
34a38218 3161 u32 nr;
7518a238 3162 u32 right_nritems;
5f39d397 3163 u32 data_end;
db94535d 3164 u32 this_item_size;
00ec4c51 3165
cfed81a0
CM
3166 btrfs_init_map_token(&token);
3167
34a38218
CM
3168 if (empty)
3169 nr = 0;
3170 else
99d8f83c 3171 nr = max_t(u32, 1, min_slot);
34a38218 3172
31840ae1 3173 if (path->slots[0] >= left_nritems)
87b29b20 3174 push_space += data_size;
31840ae1 3175
44871b1b 3176 slot = path->slots[1];
34a38218
CM
3177 i = left_nritems - 1;
3178 while (i >= nr) {
5f39d397 3179 item = btrfs_item_nr(left, i);
db94535d 3180
31840ae1
ZY
3181 if (!empty && push_items > 0) {
3182 if (path->slots[0] > i)
3183 break;
3184 if (path->slots[0] == i) {
3185 int space = btrfs_leaf_free_space(root, left);
3186 if (space + push_space * 2 > free_space)
3187 break;
3188 }
3189 }
3190
00ec4c51 3191 if (path->slots[0] == i)
87b29b20 3192 push_space += data_size;
db94535d 3193
db94535d
CM
3194 this_item_size = btrfs_item_size(left, item);
3195 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 3196 break;
31840ae1 3197
00ec4c51 3198 push_items++;
db94535d 3199 push_space += this_item_size + sizeof(*item);
34a38218
CM
3200 if (i == 0)
3201 break;
3202 i--;
db94535d 3203 }
5f39d397 3204
925baedd
CM
3205 if (push_items == 0)
3206 goto out_unlock;
5f39d397 3207
34a38218 3208 if (!empty && push_items == left_nritems)
a429e513 3209 WARN_ON(1);
5f39d397 3210
00ec4c51 3211 /* push left to right */
5f39d397 3212 right_nritems = btrfs_header_nritems(right);
34a38218 3213
5f39d397 3214 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 3215 push_space -= leaf_data_end(root, left);
5f39d397 3216
00ec4c51 3217 /* make room in the right data area */
5f39d397
CM
3218 data_end = leaf_data_end(root, right);
3219 memmove_extent_buffer(right,
3220 btrfs_leaf_data(right) + data_end - push_space,
3221 btrfs_leaf_data(right) + data_end,
3222 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3223
00ec4c51 3224 /* copy from the left data area */
5f39d397 3225 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
3226 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3227 btrfs_leaf_data(left) + leaf_data_end(root, left),
3228 push_space);
5f39d397
CM
3229
3230 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3231 btrfs_item_nr_offset(0),
3232 right_nritems * sizeof(struct btrfs_item));
3233
00ec4c51 3234 /* copy the items from left to right */
5f39d397
CM
3235 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3236 btrfs_item_nr_offset(left_nritems - push_items),
3237 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
3238
3239 /* update the item pointers */
7518a238 3240 right_nritems += push_items;
5f39d397 3241 btrfs_set_header_nritems(right, right_nritems);
123abc88 3242 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 3243 for (i = 0; i < right_nritems; i++) {
5f39d397 3244 item = btrfs_item_nr(right, i);
cfed81a0
CM
3245 push_space -= btrfs_token_item_size(right, item, &token);
3246 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d
CM
3247 }
3248
7518a238 3249 left_nritems -= push_items;
5f39d397 3250 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 3251
34a38218
CM
3252 if (left_nritems)
3253 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
3254 else
3255 clean_tree_block(trans, root, left);
3256
5f39d397 3257 btrfs_mark_buffer_dirty(right);
a429e513 3258
5f39d397
CM
3259 btrfs_item_key(right, &disk_key, 0);
3260 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 3261 btrfs_mark_buffer_dirty(upper);
02217ed2 3262
00ec4c51 3263 /* then fixup the leaf pointer in the path */
7518a238
CM
3264 if (path->slots[0] >= left_nritems) {
3265 path->slots[0] -= left_nritems;
925baedd
CM
3266 if (btrfs_header_nritems(path->nodes[0]) == 0)
3267 clean_tree_block(trans, root, path->nodes[0]);
3268 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3269 free_extent_buffer(path->nodes[0]);
3270 path->nodes[0] = right;
00ec4c51
CM
3271 path->slots[1] += 1;
3272 } else {
925baedd 3273 btrfs_tree_unlock(right);
5f39d397 3274 free_extent_buffer(right);
00ec4c51
CM
3275 }
3276 return 0;
925baedd
CM
3277
3278out_unlock:
3279 btrfs_tree_unlock(right);
3280 free_extent_buffer(right);
3281 return 1;
00ec4c51 3282}
925baedd 3283
44871b1b
CM
3284/*
3285 * push some data in the path leaf to the right, trying to free up at
3286 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3287 *
3288 * returns 1 if the push failed because the other node didn't have enough
3289 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
3290 *
3291 * this will push starting from min_slot to the end of the leaf. It won't
3292 * push any slot lower than min_slot
44871b1b
CM
3293 */
3294static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3295 *root, struct btrfs_path *path,
3296 int min_data_size, int data_size,
3297 int empty, u32 min_slot)
44871b1b
CM
3298{
3299 struct extent_buffer *left = path->nodes[0];
3300 struct extent_buffer *right;
3301 struct extent_buffer *upper;
3302 int slot;
3303 int free_space;
3304 u32 left_nritems;
3305 int ret;
3306
3307 if (!path->nodes[1])
3308 return 1;
3309
3310 slot = path->slots[1];
3311 upper = path->nodes[1];
3312 if (slot >= btrfs_header_nritems(upper) - 1)
3313 return 1;
3314
3315 btrfs_assert_tree_locked(path->nodes[1]);
3316
3317 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
3318 if (right == NULL)
3319 return 1;
3320
44871b1b
CM
3321 btrfs_tree_lock(right);
3322 btrfs_set_lock_blocking(right);
3323
3324 free_space = btrfs_leaf_free_space(root, right);
3325 if (free_space < data_size)
3326 goto out_unlock;
3327
3328 /* cow and double check */
3329 ret = btrfs_cow_block(trans, root, right, upper,
3330 slot + 1, &right);
3331 if (ret)
3332 goto out_unlock;
3333
3334 free_space = btrfs_leaf_free_space(root, right);
3335 if (free_space < data_size)
3336 goto out_unlock;
3337
3338 left_nritems = btrfs_header_nritems(left);
3339 if (left_nritems == 0)
3340 goto out_unlock;
3341
99d8f83c
CM
3342 return __push_leaf_right(trans, root, path, min_data_size, empty,
3343 right, free_space, left_nritems, min_slot);
44871b1b
CM
3344out_unlock:
3345 btrfs_tree_unlock(right);
3346 free_extent_buffer(right);
3347 return 1;
3348}
3349
74123bd7
CM
3350/*
3351 * push some data in the path leaf to the left, trying to free up at
3352 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3353 *
3354 * max_slot can put a limit on how far into the leaf we'll push items. The
3355 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3356 * items
74123bd7 3357 */
44871b1b
CM
3358static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3359 struct btrfs_root *root,
3360 struct btrfs_path *path, int data_size,
3361 int empty, struct extent_buffer *left,
99d8f83c
CM
3362 int free_space, u32 right_nritems,
3363 u32 max_slot)
be0e5c09 3364{
5f39d397
CM
3365 struct btrfs_disk_key disk_key;
3366 struct extent_buffer *right = path->nodes[0];
be0e5c09 3367 int i;
be0e5c09
CM
3368 int push_space = 0;
3369 int push_items = 0;
0783fcfc 3370 struct btrfs_item *item;
7518a238 3371 u32 old_left_nritems;
34a38218 3372 u32 nr;
aa5d6bed 3373 int ret = 0;
db94535d
CM
3374 u32 this_item_size;
3375 u32 old_left_item_size;
cfed81a0
CM
3376 struct btrfs_map_token token;
3377
3378 btrfs_init_map_token(&token);
be0e5c09 3379
34a38218 3380 if (empty)
99d8f83c 3381 nr = min(right_nritems, max_slot);
34a38218 3382 else
99d8f83c 3383 nr = min(right_nritems - 1, max_slot);
34a38218
CM
3384
3385 for (i = 0; i < nr; i++) {
5f39d397 3386 item = btrfs_item_nr(right, i);
db94535d 3387
31840ae1
ZY
3388 if (!empty && push_items > 0) {
3389 if (path->slots[0] < i)
3390 break;
3391 if (path->slots[0] == i) {
3392 int space = btrfs_leaf_free_space(root, right);
3393 if (space + push_space * 2 > free_space)
3394 break;
3395 }
3396 }
3397
be0e5c09 3398 if (path->slots[0] == i)
87b29b20 3399 push_space += data_size;
db94535d
CM
3400
3401 this_item_size = btrfs_item_size(right, item);
3402 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 3403 break;
db94535d 3404
be0e5c09 3405 push_items++;
db94535d
CM
3406 push_space += this_item_size + sizeof(*item);
3407 }
3408
be0e5c09 3409 if (push_items == 0) {
925baedd
CM
3410 ret = 1;
3411 goto out;
be0e5c09 3412 }
34a38218 3413 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 3414 WARN_ON(1);
5f39d397 3415
be0e5c09 3416 /* push data from right to left */
5f39d397
CM
3417 copy_extent_buffer(left, right,
3418 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3419 btrfs_item_nr_offset(0),
3420 push_items * sizeof(struct btrfs_item));
3421
123abc88 3422 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 3423 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
3424
3425 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
3426 leaf_data_end(root, left) - push_space,
3427 btrfs_leaf_data(right) +
5f39d397 3428 btrfs_item_offset_nr(right, push_items - 1),
d6025579 3429 push_space);
5f39d397 3430 old_left_nritems = btrfs_header_nritems(left);
87b29b20 3431 BUG_ON(old_left_nritems <= 0);
eb60ceac 3432
db94535d 3433 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 3434 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 3435 u32 ioff;
db94535d 3436
5f39d397 3437 item = btrfs_item_nr(left, i);
db94535d 3438
cfed81a0
CM
3439 ioff = btrfs_token_item_offset(left, item, &token);
3440 btrfs_set_token_item_offset(left, item,
3441 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3442 &token);
be0e5c09 3443 }
5f39d397 3444 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
3445
3446 /* fixup right node */
34a38218 3447 if (push_items > right_nritems) {
d397712b
CM
3448 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3449 right_nritems);
34a38218
CM
3450 WARN_ON(1);
3451 }
3452
3453 if (push_items < right_nritems) {
3454 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3455 leaf_data_end(root, right);
3456 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3457 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3458 btrfs_leaf_data(right) +
3459 leaf_data_end(root, right), push_space);
3460
3461 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
3462 btrfs_item_nr_offset(push_items),
3463 (btrfs_header_nritems(right) - push_items) *
3464 sizeof(struct btrfs_item));
34a38218 3465 }
eef1c494
Y
3466 right_nritems -= push_items;
3467 btrfs_set_header_nritems(right, right_nritems);
123abc88 3468 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
3469 for (i = 0; i < right_nritems; i++) {
3470 item = btrfs_item_nr(right, i);
db94535d 3471
cfed81a0
CM
3472 push_space = push_space - btrfs_token_item_size(right,
3473 item, &token);
3474 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d 3475 }
eb60ceac 3476
5f39d397 3477 btrfs_mark_buffer_dirty(left);
34a38218
CM
3478 if (right_nritems)
3479 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
3480 else
3481 clean_tree_block(trans, root, right);
098f59c2 3482
5f39d397 3483 btrfs_item_key(right, &disk_key, 0);
143bede5 3484 fixup_low_keys(trans, root, path, &disk_key, 1);
be0e5c09
CM
3485
3486 /* then fixup the leaf pointer in the path */
3487 if (path->slots[0] < push_items) {
3488 path->slots[0] += old_left_nritems;
925baedd 3489 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3490 free_extent_buffer(path->nodes[0]);
3491 path->nodes[0] = left;
be0e5c09
CM
3492 path->slots[1] -= 1;
3493 } else {
925baedd 3494 btrfs_tree_unlock(left);
5f39d397 3495 free_extent_buffer(left);
be0e5c09
CM
3496 path->slots[0] -= push_items;
3497 }
eb60ceac 3498 BUG_ON(path->slots[0] < 0);
aa5d6bed 3499 return ret;
925baedd
CM
3500out:
3501 btrfs_tree_unlock(left);
3502 free_extent_buffer(left);
3503 return ret;
be0e5c09
CM
3504}
3505
44871b1b
CM
3506/*
3507 * push some data in the path leaf to the left, trying to free up at
3508 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3509 *
3510 * max_slot can put a limit on how far into the leaf we'll push items. The
3511 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3512 * items
44871b1b
CM
3513 */
3514static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3515 *root, struct btrfs_path *path, int min_data_size,
3516 int data_size, int empty, u32 max_slot)
44871b1b
CM
3517{
3518 struct extent_buffer *right = path->nodes[0];
3519 struct extent_buffer *left;
3520 int slot;
3521 int free_space;
3522 u32 right_nritems;
3523 int ret = 0;
3524
3525 slot = path->slots[1];
3526 if (slot == 0)
3527 return 1;
3528 if (!path->nodes[1])
3529 return 1;
3530
3531 right_nritems = btrfs_header_nritems(right);
3532 if (right_nritems == 0)
3533 return 1;
3534
3535 btrfs_assert_tree_locked(path->nodes[1]);
3536
3537 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
3538 if (left == NULL)
3539 return 1;
3540
44871b1b
CM
3541 btrfs_tree_lock(left);
3542 btrfs_set_lock_blocking(left);
3543
3544 free_space = btrfs_leaf_free_space(root, left);
3545 if (free_space < data_size) {
3546 ret = 1;
3547 goto out;
3548 }
3549
3550 /* cow and double check */
3551 ret = btrfs_cow_block(trans, root, left,
3552 path->nodes[1], slot - 1, &left);
3553 if (ret) {
3554 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
3555 if (ret == -ENOSPC)
3556 ret = 1;
44871b1b
CM
3557 goto out;
3558 }
3559
3560 free_space = btrfs_leaf_free_space(root, left);
3561 if (free_space < data_size) {
3562 ret = 1;
3563 goto out;
3564 }
3565
99d8f83c
CM
3566 return __push_leaf_left(trans, root, path, min_data_size,
3567 empty, left, free_space, right_nritems,
3568 max_slot);
44871b1b
CM
3569out:
3570 btrfs_tree_unlock(left);
3571 free_extent_buffer(left);
3572 return ret;
3573}
3574
3575/*
3576 * split the path's leaf in two, making sure there is at least data_size
3577 * available for the resulting leaf level of the path.
44871b1b 3578 */
143bede5
JM
3579static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3580 struct btrfs_root *root,
3581 struct btrfs_path *path,
3582 struct extent_buffer *l,
3583 struct extent_buffer *right,
3584 int slot, int mid, int nritems)
44871b1b
CM
3585{
3586 int data_copy_size;
3587 int rt_data_off;
3588 int i;
44871b1b 3589 struct btrfs_disk_key disk_key;
cfed81a0
CM
3590 struct btrfs_map_token token;
3591
3592 btrfs_init_map_token(&token);
44871b1b
CM
3593
3594 nritems = nritems - mid;
3595 btrfs_set_header_nritems(right, nritems);
3596 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3597
3598 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3599 btrfs_item_nr_offset(mid),
3600 nritems * sizeof(struct btrfs_item));
3601
3602 copy_extent_buffer(right, l,
3603 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3604 data_copy_size, btrfs_leaf_data(l) +
3605 leaf_data_end(root, l), data_copy_size);
3606
3607 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3608 btrfs_item_end_nr(l, mid);
3609
3610 for (i = 0; i < nritems; i++) {
3611 struct btrfs_item *item = btrfs_item_nr(right, i);
3612 u32 ioff;
3613
cfed81a0
CM
3614 ioff = btrfs_token_item_offset(right, item, &token);
3615 btrfs_set_token_item_offset(right, item,
3616 ioff + rt_data_off, &token);
44871b1b
CM
3617 }
3618
44871b1b 3619 btrfs_set_header_nritems(l, mid);
44871b1b 3620 btrfs_item_key(right, &disk_key, 0);
143bede5 3621 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3622 path->slots[1] + 1, 1, 0);
44871b1b
CM
3623
3624 btrfs_mark_buffer_dirty(right);
3625 btrfs_mark_buffer_dirty(l);
3626 BUG_ON(path->slots[0] != slot);
3627
44871b1b
CM
3628 if (mid <= slot) {
3629 btrfs_tree_unlock(path->nodes[0]);
3630 free_extent_buffer(path->nodes[0]);
3631 path->nodes[0] = right;
3632 path->slots[0] -= mid;
3633 path->slots[1] += 1;
3634 } else {
3635 btrfs_tree_unlock(right);
3636 free_extent_buffer(right);
3637 }
3638
3639 BUG_ON(path->slots[0] < 0);
44871b1b
CM
3640}
3641
99d8f83c
CM
3642/*
3643 * double splits happen when we need to insert a big item in the middle
3644 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3645 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3646 * A B C
3647 *
3648 * We avoid this by trying to push the items on either side of our target
3649 * into the adjacent leaves. If all goes well we can avoid the double split
3650 * completely.
3651 */
3652static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3653 struct btrfs_root *root,
3654 struct btrfs_path *path,
3655 int data_size)
3656{
3657 int ret;
3658 int progress = 0;
3659 int slot;
3660 u32 nritems;
3661
3662 slot = path->slots[0];
3663
3664 /*
3665 * try to push all the items after our slot into the
3666 * right leaf
3667 */
3668 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3669 if (ret < 0)
3670 return ret;
3671
3672 if (ret == 0)
3673 progress++;
3674
3675 nritems = btrfs_header_nritems(path->nodes[0]);
3676 /*
3677 * our goal is to get our slot at the start or end of a leaf. If
3678 * we've done so we're done
3679 */
3680 if (path->slots[0] == 0 || path->slots[0] == nritems)
3681 return 0;
3682
3683 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3684 return 0;
3685
3686 /* try to push all the items before our slot into the next leaf */
3687 slot = path->slots[0];
3688 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3689 if (ret < 0)
3690 return ret;
3691
3692 if (ret == 0)
3693 progress++;
3694
3695 if (progress)
3696 return 0;
3697 return 1;
3698}
3699
74123bd7
CM
3700/*
3701 * split the path's leaf in two, making sure there is at least data_size
3702 * available for the resulting leaf level of the path.
aa5d6bed
CM
3703 *
3704 * returns 0 if all went well and < 0 on failure.
74123bd7 3705 */
e02119d5
CM
3706static noinline int split_leaf(struct btrfs_trans_handle *trans,
3707 struct btrfs_root *root,
3708 struct btrfs_key *ins_key,
3709 struct btrfs_path *path, int data_size,
3710 int extend)
be0e5c09 3711{
5d4f98a2 3712 struct btrfs_disk_key disk_key;
5f39d397 3713 struct extent_buffer *l;
7518a238 3714 u32 nritems;
eb60ceac
CM
3715 int mid;
3716 int slot;
5f39d397 3717 struct extent_buffer *right;
d4dbff95 3718 int ret = 0;
aa5d6bed 3719 int wret;
5d4f98a2 3720 int split;
cc0c5538 3721 int num_doubles = 0;
99d8f83c 3722 int tried_avoid_double = 0;
aa5d6bed 3723
a5719521
YZ
3724 l = path->nodes[0];
3725 slot = path->slots[0];
3726 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3727 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3728 return -EOVERFLOW;
3729
40689478 3730 /* first try to make some room by pushing left and right */
99d8f83c
CM
3731 if (data_size) {
3732 wret = push_leaf_right(trans, root, path, data_size,
3733 data_size, 0, 0);
d397712b 3734 if (wret < 0)
eaee50e8 3735 return wret;
3685f791 3736 if (wret) {
99d8f83c
CM
3737 wret = push_leaf_left(trans, root, path, data_size,
3738 data_size, 0, (u32)-1);
3685f791
CM
3739 if (wret < 0)
3740 return wret;
3741 }
3742 l = path->nodes[0];
aa5d6bed 3743
3685f791 3744 /* did the pushes work? */
87b29b20 3745 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 3746 return 0;
3326d1b0 3747 }
aa5d6bed 3748
5c680ed6 3749 if (!path->nodes[1]) {
e089f05c 3750 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
3751 if (ret)
3752 return ret;
3753 }
cc0c5538 3754again:
5d4f98a2 3755 split = 1;
cc0c5538 3756 l = path->nodes[0];
eb60ceac 3757 slot = path->slots[0];
5f39d397 3758 nritems = btrfs_header_nritems(l);
d397712b 3759 mid = (nritems + 1) / 2;
54aa1f4d 3760
5d4f98a2
YZ
3761 if (mid <= slot) {
3762 if (nritems == 1 ||
3763 leaf_space_used(l, mid, nritems - mid) + data_size >
3764 BTRFS_LEAF_DATA_SIZE(root)) {
3765 if (slot >= nritems) {
3766 split = 0;
3767 } else {
3768 mid = slot;
3769 if (mid != nritems &&
3770 leaf_space_used(l, mid, nritems - mid) +
3771 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3772 if (data_size && !tried_avoid_double)
3773 goto push_for_double;
5d4f98a2
YZ
3774 split = 2;
3775 }
3776 }
3777 }
3778 } else {
3779 if (leaf_space_used(l, 0, mid) + data_size >
3780 BTRFS_LEAF_DATA_SIZE(root)) {
3781 if (!extend && data_size && slot == 0) {
3782 split = 0;
3783 } else if ((extend || !data_size) && slot == 0) {
3784 mid = 1;
3785 } else {
3786 mid = slot;
3787 if (mid != nritems &&
3788 leaf_space_used(l, mid, nritems - mid) +
3789 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3790 if (data_size && !tried_avoid_double)
3791 goto push_for_double;
5d4f98a2
YZ
3792 split = 2 ;
3793 }
3794 }
3795 }
3796 }
3797
3798 if (split == 0)
3799 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3800 else
3801 btrfs_item_key(l, &disk_key, mid);
3802
3803 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 3804 root->root_key.objectid,
5581a51a 3805 &disk_key, 0, l->start, 0);
f0486c68 3806 if (IS_ERR(right))
5f39d397 3807 return PTR_ERR(right);
f0486c68
YZ
3808
3809 root_add_used(root, root->leafsize);
5f39d397
CM
3810
3811 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 3812 btrfs_set_header_bytenr(right, right->start);
5f39d397 3813 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 3814 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3815 btrfs_set_header_owner(right, root->root_key.objectid);
3816 btrfs_set_header_level(right, 0);
3817 write_extent_buffer(right, root->fs_info->fsid,
3818 (unsigned long)btrfs_header_fsid(right),
3819 BTRFS_FSID_SIZE);
e17cade2
CM
3820
3821 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3822 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3823 BTRFS_UUID_SIZE);
44871b1b 3824
5d4f98a2
YZ
3825 if (split == 0) {
3826 if (mid <= slot) {
3827 btrfs_set_header_nritems(right, 0);
143bede5 3828 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3829 path->slots[1] + 1, 1, 0);
5d4f98a2
YZ
3830 btrfs_tree_unlock(path->nodes[0]);
3831 free_extent_buffer(path->nodes[0]);
3832 path->nodes[0] = right;
3833 path->slots[0] = 0;
3834 path->slots[1] += 1;
3835 } else {
3836 btrfs_set_header_nritems(right, 0);
143bede5 3837 insert_ptr(trans, root, path, &disk_key, right->start,
f3ea38da 3838 path->slots[1], 1, 0);
5d4f98a2
YZ
3839 btrfs_tree_unlock(path->nodes[0]);
3840 free_extent_buffer(path->nodes[0]);
3841 path->nodes[0] = right;
3842 path->slots[0] = 0;
143bede5
JM
3843 if (path->slots[1] == 0)
3844 fixup_low_keys(trans, root, path,
3845 &disk_key, 1);
d4dbff95 3846 }
5d4f98a2
YZ
3847 btrfs_mark_buffer_dirty(right);
3848 return ret;
d4dbff95 3849 }
74123bd7 3850
143bede5 3851 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1 3852
5d4f98a2 3853 if (split == 2) {
cc0c5538
CM
3854 BUG_ON(num_doubles != 0);
3855 num_doubles++;
3856 goto again;
a429e513 3857 }
44871b1b 3858
143bede5 3859 return 0;
99d8f83c
CM
3860
3861push_for_double:
3862 push_for_double_split(trans, root, path, data_size);
3863 tried_avoid_double = 1;
3864 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3865 return 0;
3866 goto again;
be0e5c09
CM
3867}
3868
ad48fd75
YZ
3869static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3870 struct btrfs_root *root,
3871 struct btrfs_path *path, int ins_len)
459931ec 3872{
ad48fd75 3873 struct btrfs_key key;
459931ec 3874 struct extent_buffer *leaf;
ad48fd75
YZ
3875 struct btrfs_file_extent_item *fi;
3876 u64 extent_len = 0;
3877 u32 item_size;
3878 int ret;
459931ec
CM
3879
3880 leaf = path->nodes[0];
ad48fd75
YZ
3881 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3882
3883 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3884 key.type != BTRFS_EXTENT_CSUM_KEY);
3885
3886 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3887 return 0;
459931ec
CM
3888
3889 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
3890 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3891 fi = btrfs_item_ptr(leaf, path->slots[0],
3892 struct btrfs_file_extent_item);
3893 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3894 }
b3b4aa74 3895 btrfs_release_path(path);
459931ec 3896
459931ec 3897 path->keep_locks = 1;
ad48fd75
YZ
3898 path->search_for_split = 1;
3899 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 3900 path->search_for_split = 0;
ad48fd75
YZ
3901 if (ret < 0)
3902 goto err;
459931ec 3903
ad48fd75
YZ
3904 ret = -EAGAIN;
3905 leaf = path->nodes[0];
459931ec 3906 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
3907 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3908 goto err;
3909
109f6aef
CM
3910 /* the leaf has changed, it now has room. return now */
3911 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3912 goto err;
3913
ad48fd75
YZ
3914 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3915 fi = btrfs_item_ptr(leaf, path->slots[0],
3916 struct btrfs_file_extent_item);
3917 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3918 goto err;
459931ec
CM
3919 }
3920
b9473439 3921 btrfs_set_path_blocking(path);
ad48fd75 3922 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
3923 if (ret)
3924 goto err;
459931ec 3925
ad48fd75 3926 path->keep_locks = 0;
b9473439 3927 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
3928 return 0;
3929err:
3930 path->keep_locks = 0;
3931 return ret;
3932}
3933
3934static noinline int split_item(struct btrfs_trans_handle *trans,
3935 struct btrfs_root *root,
3936 struct btrfs_path *path,
3937 struct btrfs_key *new_key,
3938 unsigned long split_offset)
3939{
3940 struct extent_buffer *leaf;
3941 struct btrfs_item *item;
3942 struct btrfs_item *new_item;
3943 int slot;
3944 char *buf;
3945 u32 nritems;
3946 u32 item_size;
3947 u32 orig_offset;
3948 struct btrfs_disk_key disk_key;
3949
b9473439
CM
3950 leaf = path->nodes[0];
3951 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3952
b4ce94de
CM
3953 btrfs_set_path_blocking(path);
3954
459931ec
CM
3955 item = btrfs_item_nr(leaf, path->slots[0]);
3956 orig_offset = btrfs_item_offset(leaf, item);
3957 item_size = btrfs_item_size(leaf, item);
3958
459931ec 3959 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
3960 if (!buf)
3961 return -ENOMEM;
3962
459931ec
CM
3963 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3964 path->slots[0]), item_size);
459931ec 3965
ad48fd75 3966 slot = path->slots[0] + 1;
459931ec 3967 nritems = btrfs_header_nritems(leaf);
459931ec
CM
3968 if (slot != nritems) {
3969 /* shift the items */
3970 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
3971 btrfs_item_nr_offset(slot),
3972 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
3973 }
3974
3975 btrfs_cpu_key_to_disk(&disk_key, new_key);
3976 btrfs_set_item_key(leaf, &disk_key, slot);
3977
3978 new_item = btrfs_item_nr(leaf, slot);
3979
3980 btrfs_set_item_offset(leaf, new_item, orig_offset);
3981 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3982
3983 btrfs_set_item_offset(leaf, item,
3984 orig_offset + item_size - split_offset);
3985 btrfs_set_item_size(leaf, item, split_offset);
3986
3987 btrfs_set_header_nritems(leaf, nritems + 1);
3988
3989 /* write the data for the start of the original item */
3990 write_extent_buffer(leaf, buf,
3991 btrfs_item_ptr_offset(leaf, path->slots[0]),
3992 split_offset);
3993
3994 /* write the data for the new item */
3995 write_extent_buffer(leaf, buf + split_offset,
3996 btrfs_item_ptr_offset(leaf, slot),
3997 item_size - split_offset);
3998 btrfs_mark_buffer_dirty(leaf);
3999
ad48fd75 4000 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 4001 kfree(buf);
ad48fd75
YZ
4002 return 0;
4003}
4004
4005/*
4006 * This function splits a single item into two items,
4007 * giving 'new_key' to the new item and splitting the
4008 * old one at split_offset (from the start of the item).
4009 *
4010 * The path may be released by this operation. After
4011 * the split, the path is pointing to the old item. The
4012 * new item is going to be in the same node as the old one.
4013 *
4014 * Note, the item being split must be smaller enough to live alone on
4015 * a tree block with room for one extra struct btrfs_item
4016 *
4017 * This allows us to split the item in place, keeping a lock on the
4018 * leaf the entire time.
4019 */
4020int btrfs_split_item(struct btrfs_trans_handle *trans,
4021 struct btrfs_root *root,
4022 struct btrfs_path *path,
4023 struct btrfs_key *new_key,
4024 unsigned long split_offset)
4025{
4026 int ret;
4027 ret = setup_leaf_for_split(trans, root, path,
4028 sizeof(struct btrfs_item));
4029 if (ret)
4030 return ret;
4031
4032 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
4033 return ret;
4034}
4035
ad48fd75
YZ
4036/*
4037 * This function duplicate a item, giving 'new_key' to the new item.
4038 * It guarantees both items live in the same tree leaf and the new item
4039 * is contiguous with the original item.
4040 *
4041 * This allows us to split file extent in place, keeping a lock on the
4042 * leaf the entire time.
4043 */
4044int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4045 struct btrfs_root *root,
4046 struct btrfs_path *path,
4047 struct btrfs_key *new_key)
4048{
4049 struct extent_buffer *leaf;
4050 int ret;
4051 u32 item_size;
4052
4053 leaf = path->nodes[0];
4054 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4055 ret = setup_leaf_for_split(trans, root, path,
4056 item_size + sizeof(struct btrfs_item));
4057 if (ret)
4058 return ret;
4059
4060 path->slots[0]++;
143bede5
JM
4061 setup_items_for_insert(trans, root, path, new_key, &item_size,
4062 item_size, item_size +
4063 sizeof(struct btrfs_item), 1);
ad48fd75
YZ
4064 leaf = path->nodes[0];
4065 memcpy_extent_buffer(leaf,
4066 btrfs_item_ptr_offset(leaf, path->slots[0]),
4067 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4068 item_size);
4069 return 0;
4070}
4071
d352ac68
CM
4072/*
4073 * make the item pointed to by the path smaller. new_size indicates
4074 * how small to make it, and from_end tells us if we just chop bytes
4075 * off the end of the item or if we shift the item to chop bytes off
4076 * the front.
4077 */
143bede5
JM
4078void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4079 struct btrfs_root *root,
4080 struct btrfs_path *path,
4081 u32 new_size, int from_end)
b18c6685 4082{
b18c6685 4083 int slot;
5f39d397
CM
4084 struct extent_buffer *leaf;
4085 struct btrfs_item *item;
b18c6685
CM
4086 u32 nritems;
4087 unsigned int data_end;
4088 unsigned int old_data_start;
4089 unsigned int old_size;
4090 unsigned int size_diff;
4091 int i;
cfed81a0
CM
4092 struct btrfs_map_token token;
4093
4094 btrfs_init_map_token(&token);
b18c6685 4095
5f39d397 4096 leaf = path->nodes[0];
179e29e4
CM
4097 slot = path->slots[0];
4098
4099 old_size = btrfs_item_size_nr(leaf, slot);
4100 if (old_size == new_size)
143bede5 4101 return;
b18c6685 4102
5f39d397 4103 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
4104 data_end = leaf_data_end(root, leaf);
4105
5f39d397 4106 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 4107
b18c6685
CM
4108 size_diff = old_size - new_size;
4109
4110 BUG_ON(slot < 0);
4111 BUG_ON(slot >= nritems);
4112
4113 /*
4114 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4115 */
4116 /* first correct the data pointers */
4117 for (i = slot; i < nritems; i++) {
5f39d397
CM
4118 u32 ioff;
4119 item = btrfs_item_nr(leaf, i);
db94535d 4120
cfed81a0
CM
4121 ioff = btrfs_token_item_offset(leaf, item, &token);
4122 btrfs_set_token_item_offset(leaf, item,
4123 ioff + size_diff, &token);
b18c6685 4124 }
db94535d 4125
b18c6685 4126 /* shift the data */
179e29e4
CM
4127 if (from_end) {
4128 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4129 data_end + size_diff, btrfs_leaf_data(leaf) +
4130 data_end, old_data_start + new_size - data_end);
4131 } else {
4132 struct btrfs_disk_key disk_key;
4133 u64 offset;
4134
4135 btrfs_item_key(leaf, &disk_key, slot);
4136
4137 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4138 unsigned long ptr;
4139 struct btrfs_file_extent_item *fi;
4140
4141 fi = btrfs_item_ptr(leaf, slot,
4142 struct btrfs_file_extent_item);
4143 fi = (struct btrfs_file_extent_item *)(
4144 (unsigned long)fi - size_diff);
4145
4146 if (btrfs_file_extent_type(leaf, fi) ==
4147 BTRFS_FILE_EXTENT_INLINE) {
4148 ptr = btrfs_item_ptr_offset(leaf, slot);
4149 memmove_extent_buffer(leaf, ptr,
d397712b
CM
4150 (unsigned long)fi,
4151 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
4152 disk_bytenr));
4153 }
4154 }
4155
4156 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4157 data_end + size_diff, btrfs_leaf_data(leaf) +
4158 data_end, old_data_start - data_end);
4159
4160 offset = btrfs_disk_key_offset(&disk_key);
4161 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4162 btrfs_set_item_key(leaf, &disk_key, slot);
4163 if (slot == 0)
4164 fixup_low_keys(trans, root, path, &disk_key, 1);
4165 }
5f39d397
CM
4166
4167 item = btrfs_item_nr(leaf, slot);
4168 btrfs_set_item_size(leaf, item, new_size);
4169 btrfs_mark_buffer_dirty(leaf);
b18c6685 4170
5f39d397
CM
4171 if (btrfs_leaf_free_space(root, leaf) < 0) {
4172 btrfs_print_leaf(root, leaf);
b18c6685 4173 BUG();
5f39d397 4174 }
b18c6685
CM
4175}
4176
d352ac68
CM
4177/*
4178 * make the item pointed to by the path bigger, data_size is the new size.
4179 */
143bede5
JM
4180void btrfs_extend_item(struct btrfs_trans_handle *trans,
4181 struct btrfs_root *root, struct btrfs_path *path,
4182 u32 data_size)
6567e837 4183{
6567e837 4184 int slot;
5f39d397
CM
4185 struct extent_buffer *leaf;
4186 struct btrfs_item *item;
6567e837
CM
4187 u32 nritems;
4188 unsigned int data_end;
4189 unsigned int old_data;
4190 unsigned int old_size;
4191 int i;
cfed81a0
CM
4192 struct btrfs_map_token token;
4193
4194 btrfs_init_map_token(&token);
6567e837 4195
5f39d397 4196 leaf = path->nodes[0];
6567e837 4197
5f39d397 4198 nritems = btrfs_header_nritems(leaf);
6567e837
CM
4199 data_end = leaf_data_end(root, leaf);
4200
5f39d397
CM
4201 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4202 btrfs_print_leaf(root, leaf);
6567e837 4203 BUG();
5f39d397 4204 }
6567e837 4205 slot = path->slots[0];
5f39d397 4206 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
4207
4208 BUG_ON(slot < 0);
3326d1b0
CM
4209 if (slot >= nritems) {
4210 btrfs_print_leaf(root, leaf);
d397712b
CM
4211 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4212 slot, nritems);
3326d1b0
CM
4213 BUG_ON(1);
4214 }
6567e837
CM
4215
4216 /*
4217 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4218 */
4219 /* first correct the data pointers */
4220 for (i = slot; i < nritems; i++) {
5f39d397
CM
4221 u32 ioff;
4222 item = btrfs_item_nr(leaf, i);
db94535d 4223
cfed81a0
CM
4224 ioff = btrfs_token_item_offset(leaf, item, &token);
4225 btrfs_set_token_item_offset(leaf, item,
4226 ioff - data_size, &token);
6567e837 4227 }
5f39d397 4228
6567e837 4229 /* shift the data */
5f39d397 4230 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
4231 data_end - data_size, btrfs_leaf_data(leaf) +
4232 data_end, old_data - data_end);
5f39d397 4233
6567e837 4234 data_end = old_data;
5f39d397
CM
4235 old_size = btrfs_item_size_nr(leaf, slot);
4236 item = btrfs_item_nr(leaf, slot);
4237 btrfs_set_item_size(leaf, item, old_size + data_size);
4238 btrfs_mark_buffer_dirty(leaf);
6567e837 4239
5f39d397
CM
4240 if (btrfs_leaf_free_space(root, leaf) < 0) {
4241 btrfs_print_leaf(root, leaf);
6567e837 4242 BUG();
5f39d397 4243 }
6567e837
CM
4244}
4245
f3465ca4
JB
4246/*
4247 * Given a key and some data, insert items into the tree.
4248 * This does all the path init required, making room in the tree if needed.
4249 * Returns the number of keys that were inserted.
4250 */
4251int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
4252 struct btrfs_root *root,
4253 struct btrfs_path *path,
4254 struct btrfs_key *cpu_key, u32 *data_size,
4255 int nr)
4256{
4257 struct extent_buffer *leaf;
4258 struct btrfs_item *item;
4259 int ret = 0;
4260 int slot;
f3465ca4
JB
4261 int i;
4262 u32 nritems;
4263 u32 total_data = 0;
4264 u32 total_size = 0;
4265 unsigned int data_end;
4266 struct btrfs_disk_key disk_key;
4267 struct btrfs_key found_key;
cfed81a0
CM
4268 struct btrfs_map_token token;
4269
4270 btrfs_init_map_token(&token);
f3465ca4 4271
87b29b20
YZ
4272 for (i = 0; i < nr; i++) {
4273 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
4274 BTRFS_LEAF_DATA_SIZE(root)) {
4275 break;
4276 nr = i;
4277 }
f3465ca4 4278 total_data += data_size[i];
87b29b20
YZ
4279 total_size += data_size[i] + sizeof(struct btrfs_item);
4280 }
4281 BUG_ON(nr == 0);
f3465ca4 4282
f3465ca4
JB
4283 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4284 if (ret == 0)
4285 return -EEXIST;
4286 if (ret < 0)
4287 goto out;
4288
f3465ca4
JB
4289 leaf = path->nodes[0];
4290
4291 nritems = btrfs_header_nritems(leaf);
4292 data_end = leaf_data_end(root, leaf);
4293
4294 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4295 for (i = nr; i >= 0; i--) {
4296 total_data -= data_size[i];
4297 total_size -= data_size[i] + sizeof(struct btrfs_item);
4298 if (total_size < btrfs_leaf_free_space(root, leaf))
4299 break;
4300 }
4301 nr = i;
4302 }
4303
4304 slot = path->slots[0];
4305 BUG_ON(slot < 0);
4306
4307 if (slot != nritems) {
4308 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4309
4310 item = btrfs_item_nr(leaf, slot);
4311 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4312
4313 /* figure out how many keys we can insert in here */
4314 total_data = data_size[0];
4315 for (i = 1; i < nr; i++) {
5d4f98a2 4316 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
4317 break;
4318 total_data += data_size[i];
4319 }
4320 nr = i;
4321
4322 if (old_data < data_end) {
4323 btrfs_print_leaf(root, leaf);
d397712b 4324 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
4325 slot, old_data, data_end);
4326 BUG_ON(1);
4327 }
4328 /*
4329 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4330 */
4331 /* first correct the data pointers */
f3465ca4
JB
4332 for (i = slot; i < nritems; i++) {
4333 u32 ioff;
4334
4335 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4336 ioff = btrfs_token_item_offset(leaf, item, &token);
4337 btrfs_set_token_item_offset(leaf, item,
4338 ioff - total_data, &token);
f3465ca4 4339 }
f3465ca4
JB
4340 /* shift the items */
4341 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4342 btrfs_item_nr_offset(slot),
4343 (nritems - slot) * sizeof(struct btrfs_item));
4344
4345 /* shift the data */
4346 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4347 data_end - total_data, btrfs_leaf_data(leaf) +
4348 data_end, old_data - data_end);
4349 data_end = old_data;
4350 } else {
4351 /*
4352 * this sucks but it has to be done, if we are inserting at
4353 * the end of the leaf only insert 1 of the items, since we
4354 * have no way of knowing whats on the next leaf and we'd have
4355 * to drop our current locks to figure it out
4356 */
4357 nr = 1;
4358 }
4359
4360 /* setup the item for the new data */
4361 for (i = 0; i < nr; i++) {
4362 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4363 btrfs_set_item_key(leaf, &disk_key, slot + i);
4364 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4365 btrfs_set_token_item_offset(leaf, item,
4366 data_end - data_size[i], &token);
f3465ca4 4367 data_end -= data_size[i];
cfed81a0 4368 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
f3465ca4
JB
4369 }
4370 btrfs_set_header_nritems(leaf, nritems + nr);
4371 btrfs_mark_buffer_dirty(leaf);
4372
4373 ret = 0;
4374 if (slot == 0) {
4375 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4376 fixup_low_keys(trans, root, path, &disk_key, 1);
f3465ca4
JB
4377 }
4378
4379 if (btrfs_leaf_free_space(root, leaf) < 0) {
4380 btrfs_print_leaf(root, leaf);
4381 BUG();
4382 }
4383out:
4384 if (!ret)
4385 ret = nr;
4386 return ret;
4387}
4388
74123bd7 4389/*
44871b1b
CM
4390 * this is a helper for btrfs_insert_empty_items, the main goal here is
4391 * to save stack depth by doing the bulk of the work in a function
4392 * that doesn't call btrfs_search_slot
74123bd7 4393 */
143bede5
JM
4394void setup_items_for_insert(struct btrfs_trans_handle *trans,
4395 struct btrfs_root *root, struct btrfs_path *path,
4396 struct btrfs_key *cpu_key, u32 *data_size,
4397 u32 total_data, u32 total_size, int nr)
be0e5c09 4398{
5f39d397 4399 struct btrfs_item *item;
9c58309d 4400 int i;
7518a238 4401 u32 nritems;
be0e5c09 4402 unsigned int data_end;
e2fa7227 4403 struct btrfs_disk_key disk_key;
44871b1b
CM
4404 struct extent_buffer *leaf;
4405 int slot;
cfed81a0
CM
4406 struct btrfs_map_token token;
4407
4408 btrfs_init_map_token(&token);
e2fa7227 4409
5f39d397 4410 leaf = path->nodes[0];
44871b1b 4411 slot = path->slots[0];
74123bd7 4412
5f39d397 4413 nritems = btrfs_header_nritems(leaf);
123abc88 4414 data_end = leaf_data_end(root, leaf);
eb60ceac 4415
f25956cc 4416 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 4417 btrfs_print_leaf(root, leaf);
d397712b 4418 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 4419 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 4420 BUG();
d4dbff95 4421 }
5f39d397 4422
be0e5c09 4423 if (slot != nritems) {
5f39d397 4424 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 4425
5f39d397
CM
4426 if (old_data < data_end) {
4427 btrfs_print_leaf(root, leaf);
d397712b 4428 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
4429 slot, old_data, data_end);
4430 BUG_ON(1);
4431 }
be0e5c09
CM
4432 /*
4433 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4434 */
4435 /* first correct the data pointers */
0783fcfc 4436 for (i = slot; i < nritems; i++) {
5f39d397 4437 u32 ioff;
db94535d 4438
5f39d397 4439 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4440 ioff = btrfs_token_item_offset(leaf, item, &token);
4441 btrfs_set_token_item_offset(leaf, item,
4442 ioff - total_data, &token);
0783fcfc 4443 }
be0e5c09 4444 /* shift the items */
9c58309d 4445 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 4446 btrfs_item_nr_offset(slot),
d6025579 4447 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
4448
4449 /* shift the data */
5f39d397 4450 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 4451 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 4452 data_end, old_data - data_end);
be0e5c09
CM
4453 data_end = old_data;
4454 }
5f39d397 4455
62e2749e 4456 /* setup the item for the new data */
9c58309d
CM
4457 for (i = 0; i < nr; i++) {
4458 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4459 btrfs_set_item_key(leaf, &disk_key, slot + i);
4460 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4461 btrfs_set_token_item_offset(leaf, item,
4462 data_end - data_size[i], &token);
9c58309d 4463 data_end -= data_size[i];
cfed81a0 4464 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
9c58309d 4465 }
44871b1b 4466
9c58309d 4467 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed 4468
5a01a2e3
CM
4469 if (slot == 0) {
4470 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 4471 fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 4472 }
b9473439
CM
4473 btrfs_unlock_up_safe(path, 1);
4474 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 4475
5f39d397
CM
4476 if (btrfs_leaf_free_space(root, leaf) < 0) {
4477 btrfs_print_leaf(root, leaf);
be0e5c09 4478 BUG();
5f39d397 4479 }
44871b1b
CM
4480}
4481
4482/*
4483 * Given a key and some data, insert items into the tree.
4484 * This does all the path init required, making room in the tree if needed.
4485 */
4486int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4487 struct btrfs_root *root,
4488 struct btrfs_path *path,
4489 struct btrfs_key *cpu_key, u32 *data_size,
4490 int nr)
4491{
44871b1b
CM
4492 int ret = 0;
4493 int slot;
4494 int i;
4495 u32 total_size = 0;
4496 u32 total_data = 0;
4497
4498 for (i = 0; i < nr; i++)
4499 total_data += data_size[i];
4500
4501 total_size = total_data + (nr * sizeof(struct btrfs_item));
4502 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4503 if (ret == 0)
4504 return -EEXIST;
4505 if (ret < 0)
143bede5 4506 return ret;
44871b1b 4507
44871b1b
CM
4508 slot = path->slots[0];
4509 BUG_ON(slot < 0);
4510
143bede5 4511 setup_items_for_insert(trans, root, path, cpu_key, data_size,
44871b1b 4512 total_data, total_size, nr);
143bede5 4513 return 0;
62e2749e
CM
4514}
4515
4516/*
4517 * Given a key and some data, insert an item into the tree.
4518 * This does all the path init required, making room in the tree if needed.
4519 */
e089f05c
CM
4520int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4521 *root, struct btrfs_key *cpu_key, void *data, u32
4522 data_size)
62e2749e
CM
4523{
4524 int ret = 0;
2c90e5d6 4525 struct btrfs_path *path;
5f39d397
CM
4526 struct extent_buffer *leaf;
4527 unsigned long ptr;
62e2749e 4528
2c90e5d6 4529 path = btrfs_alloc_path();
db5b493a
TI
4530 if (!path)
4531 return -ENOMEM;
2c90e5d6 4532 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 4533 if (!ret) {
5f39d397
CM
4534 leaf = path->nodes[0];
4535 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4536 write_extent_buffer(leaf, data, ptr, data_size);
4537 btrfs_mark_buffer_dirty(leaf);
62e2749e 4538 }
2c90e5d6 4539 btrfs_free_path(path);
aa5d6bed 4540 return ret;
be0e5c09
CM
4541}
4542
74123bd7 4543/*
5de08d7d 4544 * delete the pointer from a given node.
74123bd7 4545 *
d352ac68
CM
4546 * the tree should have been previously balanced so the deletion does not
4547 * empty a node.
74123bd7 4548 */
143bede5 4549static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
f3ea38da
JS
4550 struct btrfs_path *path, int level, int slot,
4551 int tree_mod_log)
be0e5c09 4552{
5f39d397 4553 struct extent_buffer *parent = path->nodes[level];
7518a238 4554 u32 nritems;
f3ea38da 4555 int ret;
be0e5c09 4556
5f39d397 4557 nritems = btrfs_header_nritems(parent);
d397712b 4558 if (slot != nritems - 1) {
f3ea38da
JS
4559 if (tree_mod_log && level)
4560 tree_mod_log_eb_move(root->fs_info, parent, slot,
4561 slot + 1, nritems - slot - 1);
5f39d397
CM
4562 memmove_extent_buffer(parent,
4563 btrfs_node_key_ptr_offset(slot),
4564 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
4565 sizeof(struct btrfs_key_ptr) *
4566 (nritems - slot - 1));
f395694c 4567 } else if (tree_mod_log && level) {
f3ea38da
JS
4568 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4569 MOD_LOG_KEY_REMOVE);
4570 BUG_ON(ret < 0);
bb803951 4571 }
f3ea38da 4572
7518a238 4573 nritems--;
5f39d397 4574 btrfs_set_header_nritems(parent, nritems);
7518a238 4575 if (nritems == 0 && parent == root->node) {
5f39d397 4576 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 4577 /* just turn the root into a leaf and break */
5f39d397 4578 btrfs_set_header_level(root->node, 0);
bb803951 4579 } else if (slot == 0) {
5f39d397
CM
4580 struct btrfs_disk_key disk_key;
4581
4582 btrfs_node_key(parent, &disk_key, 0);
143bede5 4583 fixup_low_keys(trans, root, path, &disk_key, level + 1);
be0e5c09 4584 }
d6025579 4585 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
4586}
4587
323ac95b
CM
4588/*
4589 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 4590 * path->nodes[1].
323ac95b
CM
4591 *
4592 * This deletes the pointer in path->nodes[1] and frees the leaf
4593 * block extent. zero is returned if it all worked out, < 0 otherwise.
4594 *
4595 * The path must have already been setup for deleting the leaf, including
4596 * all the proper balancing. path->nodes[1] must be locked.
4597 */
143bede5
JM
4598static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4599 struct btrfs_root *root,
4600 struct btrfs_path *path,
4601 struct extent_buffer *leaf)
323ac95b 4602{
5d4f98a2 4603 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
f3ea38da 4604 del_ptr(trans, root, path, 1, path->slots[1], 1);
323ac95b 4605
4d081c41
CM
4606 /*
4607 * btrfs_free_extent is expensive, we want to make sure we
4608 * aren't holding any locks when we call it
4609 */
4610 btrfs_unlock_up_safe(path, 0);
4611
f0486c68
YZ
4612 root_sub_used(root, leaf->len);
4613
3083ee2e 4614 extent_buffer_get(leaf);
5581a51a 4615 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3083ee2e 4616 free_extent_buffer_stale(leaf);
323ac95b 4617}
74123bd7
CM
4618/*
4619 * delete the item at the leaf level in path. If that empties
4620 * the leaf, remove it from the tree
4621 */
85e21bac
CM
4622int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4623 struct btrfs_path *path, int slot, int nr)
be0e5c09 4624{
5f39d397
CM
4625 struct extent_buffer *leaf;
4626 struct btrfs_item *item;
85e21bac
CM
4627 int last_off;
4628 int dsize = 0;
aa5d6bed
CM
4629 int ret = 0;
4630 int wret;
85e21bac 4631 int i;
7518a238 4632 u32 nritems;
cfed81a0
CM
4633 struct btrfs_map_token token;
4634
4635 btrfs_init_map_token(&token);
be0e5c09 4636
5f39d397 4637 leaf = path->nodes[0];
85e21bac
CM
4638 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4639
4640 for (i = 0; i < nr; i++)
4641 dsize += btrfs_item_size_nr(leaf, slot + i);
4642
5f39d397 4643 nritems = btrfs_header_nritems(leaf);
be0e5c09 4644
85e21bac 4645 if (slot + nr != nritems) {
123abc88 4646 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
4647
4648 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
4649 data_end + dsize,
4650 btrfs_leaf_data(leaf) + data_end,
85e21bac 4651 last_off - data_end);
5f39d397 4652
85e21bac 4653 for (i = slot + nr; i < nritems; i++) {
5f39d397 4654 u32 ioff;
db94535d 4655
5f39d397 4656 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4657 ioff = btrfs_token_item_offset(leaf, item, &token);
4658 btrfs_set_token_item_offset(leaf, item,
4659 ioff + dsize, &token);
0783fcfc 4660 }
db94535d 4661
5f39d397 4662 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 4663 btrfs_item_nr_offset(slot + nr),
d6025579 4664 sizeof(struct btrfs_item) *
85e21bac 4665 (nritems - slot - nr));
be0e5c09 4666 }
85e21bac
CM
4667 btrfs_set_header_nritems(leaf, nritems - nr);
4668 nritems -= nr;
5f39d397 4669
74123bd7 4670 /* delete the leaf if we've emptied it */
7518a238 4671 if (nritems == 0) {
5f39d397
CM
4672 if (leaf == root->node) {
4673 btrfs_set_header_level(leaf, 0);
9a8dd150 4674 } else {
f0486c68
YZ
4675 btrfs_set_path_blocking(path);
4676 clean_tree_block(trans, root, leaf);
143bede5 4677 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 4678 }
be0e5c09 4679 } else {
7518a238 4680 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 4681 if (slot == 0) {
5f39d397
CM
4682 struct btrfs_disk_key disk_key;
4683
4684 btrfs_item_key(leaf, &disk_key, 0);
143bede5 4685 fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed 4686 }
aa5d6bed 4687
74123bd7 4688 /* delete the leaf if it is mostly empty */
d717aa1d 4689 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
4690 /* push_leaf_left fixes the path.
4691 * make sure the path still points to our leaf
4692 * for possible call to del_ptr below
4693 */
4920c9ac 4694 slot = path->slots[1];
5f39d397
CM
4695 extent_buffer_get(leaf);
4696
b9473439 4697 btrfs_set_path_blocking(path);
99d8f83c
CM
4698 wret = push_leaf_left(trans, root, path, 1, 1,
4699 1, (u32)-1);
54aa1f4d 4700 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 4701 ret = wret;
5f39d397
CM
4702
4703 if (path->nodes[0] == leaf &&
4704 btrfs_header_nritems(leaf)) {
99d8f83c
CM
4705 wret = push_leaf_right(trans, root, path, 1,
4706 1, 1, 0);
54aa1f4d 4707 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
4708 ret = wret;
4709 }
5f39d397
CM
4710
4711 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 4712 path->slots[1] = slot;
143bede5 4713 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 4714 free_extent_buffer(leaf);
143bede5 4715 ret = 0;
5de08d7d 4716 } else {
925baedd
CM
4717 /* if we're still in the path, make sure
4718 * we're dirty. Otherwise, one of the
4719 * push_leaf functions must have already
4720 * dirtied this buffer
4721 */
4722 if (path->nodes[0] == leaf)
4723 btrfs_mark_buffer_dirty(leaf);
5f39d397 4724 free_extent_buffer(leaf);
be0e5c09 4725 }
d5719762 4726 } else {
5f39d397 4727 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
4728 }
4729 }
aa5d6bed 4730 return ret;
be0e5c09
CM
4731}
4732
7bb86316 4733/*
925baedd 4734 * search the tree again to find a leaf with lesser keys
7bb86316
CM
4735 * returns 0 if it found something or 1 if there are no lesser leaves.
4736 * returns < 0 on io errors.
d352ac68
CM
4737 *
4738 * This may release the path, and so you may lose any locks held at the
4739 * time you call it.
7bb86316
CM
4740 */
4741int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4742{
925baedd
CM
4743 struct btrfs_key key;
4744 struct btrfs_disk_key found_key;
4745 int ret;
7bb86316 4746
925baedd 4747 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 4748
925baedd
CM
4749 if (key.offset > 0)
4750 key.offset--;
4751 else if (key.type > 0)
4752 key.type--;
4753 else if (key.objectid > 0)
4754 key.objectid--;
4755 else
4756 return 1;
7bb86316 4757
b3b4aa74 4758 btrfs_release_path(path);
925baedd
CM
4759 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4760 if (ret < 0)
4761 return ret;
4762 btrfs_item_key(path->nodes[0], &found_key, 0);
4763 ret = comp_keys(&found_key, &key);
4764 if (ret < 0)
4765 return 0;
4766 return 1;
7bb86316
CM
4767}
4768
3f157a2f
CM
4769/*
4770 * A helper function to walk down the tree starting at min_key, and looking
4771 * for nodes or leaves that are either in cache or have a minimum
d352ac68 4772 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
4773 *
4774 * This does not cow, but it does stuff the starting key it finds back
4775 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4776 * key and get a writable path.
4777 *
4778 * This does lock as it descends, and path->keep_locks should be set
4779 * to 1 by the caller.
4780 *
4781 * This honors path->lowest_level to prevent descent past a given level
4782 * of the tree.
4783 *
d352ac68
CM
4784 * min_trans indicates the oldest transaction that you are interested
4785 * in walking through. Any nodes or leaves older than min_trans are
4786 * skipped over (without reading them).
4787 *
3f157a2f
CM
4788 * returns zero if something useful was found, < 0 on error and 1 if there
4789 * was nothing in the tree that matched the search criteria.
4790 */
4791int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 4792 struct btrfs_key *max_key,
3f157a2f
CM
4793 struct btrfs_path *path, int cache_only,
4794 u64 min_trans)
4795{
4796 struct extent_buffer *cur;
4797 struct btrfs_key found_key;
4798 int slot;
9652480b 4799 int sret;
3f157a2f
CM
4800 u32 nritems;
4801 int level;
4802 int ret = 1;
4803
934d375b 4804 WARN_ON(!path->keep_locks);
3f157a2f 4805again:
bd681513 4806 cur = btrfs_read_lock_root_node(root);
3f157a2f 4807 level = btrfs_header_level(cur);
e02119d5 4808 WARN_ON(path->nodes[level]);
3f157a2f 4809 path->nodes[level] = cur;
bd681513 4810 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
4811
4812 if (btrfs_header_generation(cur) < min_trans) {
4813 ret = 1;
4814 goto out;
4815 }
d397712b 4816 while (1) {
3f157a2f
CM
4817 nritems = btrfs_header_nritems(cur);
4818 level = btrfs_header_level(cur);
9652480b 4819 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 4820
323ac95b
CM
4821 /* at the lowest level, we're done, setup the path and exit */
4822 if (level == path->lowest_level) {
e02119d5
CM
4823 if (slot >= nritems)
4824 goto find_next_key;
3f157a2f
CM
4825 ret = 0;
4826 path->slots[level] = slot;
4827 btrfs_item_key_to_cpu(cur, &found_key, slot);
4828 goto out;
4829 }
9652480b
Y
4830 if (sret && slot > 0)
4831 slot--;
3f157a2f
CM
4832 /*
4833 * check this node pointer against the cache_only and
4834 * min_trans parameters. If it isn't in cache or is too
4835 * old, skip to the next one.
4836 */
d397712b 4837 while (slot < nritems) {
3f157a2f
CM
4838 u64 blockptr;
4839 u64 gen;
4840 struct extent_buffer *tmp;
e02119d5
CM
4841 struct btrfs_disk_key disk_key;
4842
3f157a2f
CM
4843 blockptr = btrfs_node_blockptr(cur, slot);
4844 gen = btrfs_node_ptr_generation(cur, slot);
4845 if (gen < min_trans) {
4846 slot++;
4847 continue;
4848 }
4849 if (!cache_only)
4850 break;
4851
e02119d5
CM
4852 if (max_key) {
4853 btrfs_node_key(cur, &disk_key, slot);
4854 if (comp_keys(&disk_key, max_key) >= 0) {
4855 ret = 1;
4856 goto out;
4857 }
4858 }
4859
3f157a2f
CM
4860 tmp = btrfs_find_tree_block(root, blockptr,
4861 btrfs_level_size(root, level - 1));
4862
b9fab919 4863 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
3f157a2f
CM
4864 free_extent_buffer(tmp);
4865 break;
4866 }
4867 if (tmp)
4868 free_extent_buffer(tmp);
4869 slot++;
4870 }
e02119d5 4871find_next_key:
3f157a2f
CM
4872 /*
4873 * we didn't find a candidate key in this node, walk forward
4874 * and find another one
4875 */
4876 if (slot >= nritems) {
e02119d5 4877 path->slots[level] = slot;
b4ce94de 4878 btrfs_set_path_blocking(path);
e02119d5 4879 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 4880 cache_only, min_trans);
e02119d5 4881 if (sret == 0) {
b3b4aa74 4882 btrfs_release_path(path);
3f157a2f
CM
4883 goto again;
4884 } else {
4885 goto out;
4886 }
4887 }
4888 /* save our key for returning back */
4889 btrfs_node_key_to_cpu(cur, &found_key, slot);
4890 path->slots[level] = slot;
4891 if (level == path->lowest_level) {
4892 ret = 0;
f7c79f30 4893 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
4894 goto out;
4895 }
b4ce94de 4896 btrfs_set_path_blocking(path);
3f157a2f 4897 cur = read_node_slot(root, cur, slot);
79787eaa 4898 BUG_ON(!cur); /* -ENOMEM */
3f157a2f 4899
bd681513 4900 btrfs_tree_read_lock(cur);
b4ce94de 4901
bd681513 4902 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 4903 path->nodes[level - 1] = cur;
f7c79f30 4904 unlock_up(path, level, 1, 0, NULL);
bd681513 4905 btrfs_clear_path_blocking(path, NULL, 0);
3f157a2f
CM
4906 }
4907out:
4908 if (ret == 0)
4909 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4910 btrfs_set_path_blocking(path);
3f157a2f
CM
4911 return ret;
4912}
4913
4914/*
4915 * this is similar to btrfs_next_leaf, but does not try to preserve
4916 * and fixup the path. It looks for and returns the next key in the
4917 * tree based on the current path and the cache_only and min_trans
4918 * parameters.
4919 *
4920 * 0 is returned if another key is found, < 0 if there are any errors
4921 * and 1 is returned if there are no higher keys in the tree
4922 *
4923 * path->keep_locks should be set to 1 on the search made before
4924 * calling this function.
4925 */
e7a84565 4926int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
33c66f43 4927 struct btrfs_key *key, int level,
3f157a2f 4928 int cache_only, u64 min_trans)
e7a84565 4929{
e7a84565
CM
4930 int slot;
4931 struct extent_buffer *c;
4932
934d375b 4933 WARN_ON(!path->keep_locks);
d397712b 4934 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
4935 if (!path->nodes[level])
4936 return 1;
4937
4938 slot = path->slots[level] + 1;
4939 c = path->nodes[level];
3f157a2f 4940next:
e7a84565 4941 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
4942 int ret;
4943 int orig_lowest;
4944 struct btrfs_key cur_key;
4945 if (level + 1 >= BTRFS_MAX_LEVEL ||
4946 !path->nodes[level + 1])
e7a84565 4947 return 1;
33c66f43
YZ
4948
4949 if (path->locks[level + 1]) {
4950 level++;
4951 continue;
4952 }
4953
4954 slot = btrfs_header_nritems(c) - 1;
4955 if (level == 0)
4956 btrfs_item_key_to_cpu(c, &cur_key, slot);
4957 else
4958 btrfs_node_key_to_cpu(c, &cur_key, slot);
4959
4960 orig_lowest = path->lowest_level;
b3b4aa74 4961 btrfs_release_path(path);
33c66f43
YZ
4962 path->lowest_level = level;
4963 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4964 0, 0);
4965 path->lowest_level = orig_lowest;
4966 if (ret < 0)
4967 return ret;
4968
4969 c = path->nodes[level];
4970 slot = path->slots[level];
4971 if (ret == 0)
4972 slot++;
4973 goto next;
e7a84565 4974 }
33c66f43 4975
e7a84565
CM
4976 if (level == 0)
4977 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
4978 else {
4979 u64 blockptr = btrfs_node_blockptr(c, slot);
4980 u64 gen = btrfs_node_ptr_generation(c, slot);
4981
4982 if (cache_only) {
4983 struct extent_buffer *cur;
4984 cur = btrfs_find_tree_block(root, blockptr,
4985 btrfs_level_size(root, level - 1));
b9fab919
CM
4986 if (!cur ||
4987 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
3f157a2f
CM
4988 slot++;
4989 if (cur)
4990 free_extent_buffer(cur);
4991 goto next;
4992 }
4993 free_extent_buffer(cur);
4994 }
4995 if (gen < min_trans) {
4996 slot++;
4997 goto next;
4998 }
e7a84565 4999 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 5000 }
e7a84565
CM
5001 return 0;
5002 }
5003 return 1;
5004}
5005
97571fd0 5006/*
925baedd 5007 * search the tree again to find a leaf with greater keys
0f70abe2
CM
5008 * returns 0 if it found something or 1 if there are no greater leaves.
5009 * returns < 0 on io errors.
97571fd0 5010 */
234b63a0 5011int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
5012{
5013 int slot;
8e73f275 5014 int level;
5f39d397 5015 struct extent_buffer *c;
8e73f275 5016 struct extent_buffer *next;
925baedd
CM
5017 struct btrfs_key key;
5018 u32 nritems;
5019 int ret;
8e73f275 5020 int old_spinning = path->leave_spinning;
bd681513 5021 int next_rw_lock = 0;
925baedd
CM
5022
5023 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 5024 if (nritems == 0)
925baedd 5025 return 1;
925baedd 5026
8e73f275
CM
5027 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5028again:
5029 level = 1;
5030 next = NULL;
bd681513 5031 next_rw_lock = 0;
b3b4aa74 5032 btrfs_release_path(path);
8e73f275 5033
a2135011 5034 path->keep_locks = 1;
31533fb2 5035 path->leave_spinning = 1;
8e73f275 5036
925baedd
CM
5037 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5038 path->keep_locks = 0;
5039
5040 if (ret < 0)
5041 return ret;
5042
a2135011 5043 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
5044 /*
5045 * by releasing the path above we dropped all our locks. A balance
5046 * could have added more items next to the key that used to be
5047 * at the very end of the block. So, check again here and
5048 * advance the path if there are now more items available.
5049 */
a2135011 5050 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
5051 if (ret == 0)
5052 path->slots[0]++;
8e73f275 5053 ret = 0;
925baedd
CM
5054 goto done;
5055 }
d97e63b6 5056
d397712b 5057 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
5058 if (!path->nodes[level]) {
5059 ret = 1;
5060 goto done;
5061 }
5f39d397 5062
d97e63b6
CM
5063 slot = path->slots[level] + 1;
5064 c = path->nodes[level];
5f39d397 5065 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 5066 level++;
8e73f275
CM
5067 if (level == BTRFS_MAX_LEVEL) {
5068 ret = 1;
5069 goto done;
5070 }
d97e63b6
CM
5071 continue;
5072 }
5f39d397 5073
925baedd 5074 if (next) {
bd681513 5075 btrfs_tree_unlock_rw(next, next_rw_lock);
5f39d397 5076 free_extent_buffer(next);
925baedd 5077 }
5f39d397 5078
8e73f275 5079 next = c;
bd681513 5080 next_rw_lock = path->locks[level];
8e73f275 5081 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5082 slot, &key, 0);
8e73f275
CM
5083 if (ret == -EAGAIN)
5084 goto again;
5f39d397 5085
76a05b35 5086 if (ret < 0) {
b3b4aa74 5087 btrfs_release_path(path);
76a05b35
CM
5088 goto done;
5089 }
5090
5cd57b2c 5091 if (!path->skip_locking) {
bd681513 5092 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
5093 if (!ret) {
5094 btrfs_set_path_blocking(path);
bd681513 5095 btrfs_tree_read_lock(next);
31533fb2 5096 btrfs_clear_path_blocking(path, next,
bd681513 5097 BTRFS_READ_LOCK);
8e73f275 5098 }
31533fb2 5099 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5100 }
d97e63b6
CM
5101 break;
5102 }
5103 path->slots[level] = slot;
d397712b 5104 while (1) {
d97e63b6
CM
5105 level--;
5106 c = path->nodes[level];
925baedd 5107 if (path->locks[level])
bd681513 5108 btrfs_tree_unlock_rw(c, path->locks[level]);
8e73f275 5109
5f39d397 5110 free_extent_buffer(c);
d97e63b6
CM
5111 path->nodes[level] = next;
5112 path->slots[level] = 0;
a74a4b97 5113 if (!path->skip_locking)
bd681513 5114 path->locks[level] = next_rw_lock;
d97e63b6
CM
5115 if (!level)
5116 break;
b4ce94de 5117
8e73f275 5118 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5119 0, &key, 0);
8e73f275
CM
5120 if (ret == -EAGAIN)
5121 goto again;
5122
76a05b35 5123 if (ret < 0) {
b3b4aa74 5124 btrfs_release_path(path);
76a05b35
CM
5125 goto done;
5126 }
5127
5cd57b2c 5128 if (!path->skip_locking) {
bd681513 5129 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
5130 if (!ret) {
5131 btrfs_set_path_blocking(path);
bd681513 5132 btrfs_tree_read_lock(next);
31533fb2 5133 btrfs_clear_path_blocking(path, next,
bd681513
CM
5134 BTRFS_READ_LOCK);
5135 }
31533fb2 5136 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5137 }
d97e63b6 5138 }
8e73f275 5139 ret = 0;
925baedd 5140done:
f7c79f30 5141 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
5142 path->leave_spinning = old_spinning;
5143 if (!old_spinning)
5144 btrfs_set_path_blocking(path);
5145
5146 return ret;
d97e63b6 5147}
0b86a832 5148
3f157a2f
CM
5149/*
5150 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5151 * searching until it gets past min_objectid or finds an item of 'type'
5152 *
5153 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5154 */
0b86a832
CM
5155int btrfs_previous_item(struct btrfs_root *root,
5156 struct btrfs_path *path, u64 min_objectid,
5157 int type)
5158{
5159 struct btrfs_key found_key;
5160 struct extent_buffer *leaf;
e02119d5 5161 u32 nritems;
0b86a832
CM
5162 int ret;
5163
d397712b 5164 while (1) {
0b86a832 5165 if (path->slots[0] == 0) {
b4ce94de 5166 btrfs_set_path_blocking(path);
0b86a832
CM
5167 ret = btrfs_prev_leaf(root, path);
5168 if (ret != 0)
5169 return ret;
5170 } else {
5171 path->slots[0]--;
5172 }
5173 leaf = path->nodes[0];
e02119d5
CM
5174 nritems = btrfs_header_nritems(leaf);
5175 if (nritems == 0)
5176 return 1;
5177 if (path->slots[0] == nritems)
5178 path->slots[0]--;
5179
0b86a832 5180 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
5181 if (found_key.objectid < min_objectid)
5182 break;
0a4eefbb
YZ
5183 if (found_key.type == type)
5184 return 0;
e02119d5
CM
5185 if (found_key.objectid == min_objectid &&
5186 found_key.type < type)
5187 break;
0b86a832
CM
5188 }
5189 return 1;
5190}