]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/ctree.c
Btrfs: Make btrfs_dev_extent_chunk_tree_uuid() return unsigned long
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
bd989ba3 21#include <linux/rbtree.h>
eb60ceac
CM
22#include "ctree.h"
23#include "disk-io.h"
7f5c1516 24#include "transaction.h"
5f39d397 25#include "print-tree.h"
925baedd 26#include "locking.h"
9a8dd150 27
e089f05c
CM
28static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 31 *root, struct btrfs_key *ins_key,
cc0c5538 32 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
33static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 35 struct extent_buffer *src, int empty);
5f39d397
CM
36static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
afe5fea7
TI
40static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
f230475e
JS
42static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
48a3b636 44static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
d97e63b6 45
df24a2b9 46struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 47{
df24a2b9 48 struct btrfs_path *path;
e00f7308 49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 50 return path;
2c90e5d6
CM
51}
52
b4ce94de
CM
53/*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58{
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
bd681513
CM
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
b4ce94de
CM
68 }
69}
70
71/*
72 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
b4ce94de 78 */
4008c04a 79noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
bd681513 80 struct extent_buffer *held, int held_rw)
b4ce94de
CM
81{
82 int i;
4008c04a
CM
83
84#ifdef CONFIG_DEBUG_LOCK_ALLOC
85 /* lockdep really cares that we take all of these spinlocks
86 * in the right order. If any of the locks in the path are not
87 * currently blocking, it is going to complain. So, make really
88 * really sure by forcing the path to blocking before we clear
89 * the path blocking.
90 */
bd681513
CM
91 if (held) {
92 btrfs_set_lock_blocking_rw(held, held_rw);
93 if (held_rw == BTRFS_WRITE_LOCK)
94 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
95 else if (held_rw == BTRFS_READ_LOCK)
96 held_rw = BTRFS_READ_LOCK_BLOCKING;
97 }
4008c04a
CM
98 btrfs_set_path_blocking(p);
99#endif
100
101 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
bd681513
CM
102 if (p->nodes[i] && p->locks[i]) {
103 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
104 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
105 p->locks[i] = BTRFS_WRITE_LOCK;
106 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
107 p->locks[i] = BTRFS_READ_LOCK;
108 }
b4ce94de 109 }
4008c04a
CM
110
111#ifdef CONFIG_DEBUG_LOCK_ALLOC
112 if (held)
bd681513 113 btrfs_clear_lock_blocking_rw(held, held_rw);
4008c04a 114#endif
b4ce94de
CM
115}
116
d352ac68 117/* this also releases the path */
df24a2b9 118void btrfs_free_path(struct btrfs_path *p)
be0e5c09 119{
ff175d57
JJ
120 if (!p)
121 return;
b3b4aa74 122 btrfs_release_path(p);
df24a2b9 123 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
124}
125
d352ac68
CM
126/*
127 * path release drops references on the extent buffers in the path
128 * and it drops any locks held by this path
129 *
130 * It is safe to call this on paths that no locks or extent buffers held.
131 */
b3b4aa74 132noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
133{
134 int i;
a2135011 135
234b63a0 136 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 137 p->slots[i] = 0;
eb60ceac 138 if (!p->nodes[i])
925baedd
CM
139 continue;
140 if (p->locks[i]) {
bd681513 141 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
142 p->locks[i] = 0;
143 }
5f39d397 144 free_extent_buffer(p->nodes[i]);
3f157a2f 145 p->nodes[i] = NULL;
eb60ceac
CM
146 }
147}
148
d352ac68
CM
149/*
150 * safely gets a reference on the root node of a tree. A lock
151 * is not taken, so a concurrent writer may put a different node
152 * at the root of the tree. See btrfs_lock_root_node for the
153 * looping required.
154 *
155 * The extent buffer returned by this has a reference taken, so
156 * it won't disappear. It may stop being the root of the tree
157 * at any time because there are no locks held.
158 */
925baedd
CM
159struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160{
161 struct extent_buffer *eb;
240f62c8 162
3083ee2e
JB
163 while (1) {
164 rcu_read_lock();
165 eb = rcu_dereference(root->node);
166
167 /*
168 * RCU really hurts here, we could free up the root node because
169 * it was cow'ed but we may not get the new root node yet so do
170 * the inc_not_zero dance and if it doesn't work then
171 * synchronize_rcu and try again.
172 */
173 if (atomic_inc_not_zero(&eb->refs)) {
174 rcu_read_unlock();
175 break;
176 }
177 rcu_read_unlock();
178 synchronize_rcu();
179 }
925baedd
CM
180 return eb;
181}
182
d352ac68
CM
183/* loop around taking references on and locking the root node of the
184 * tree until you end up with a lock on the root. A locked buffer
185 * is returned, with a reference held.
186 */
925baedd
CM
187struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188{
189 struct extent_buffer *eb;
190
d397712b 191 while (1) {
925baedd
CM
192 eb = btrfs_root_node(root);
193 btrfs_tree_lock(eb);
240f62c8 194 if (eb == root->node)
925baedd 195 break;
925baedd
CM
196 btrfs_tree_unlock(eb);
197 free_extent_buffer(eb);
198 }
199 return eb;
200}
201
bd681513
CM
202/* loop around taking references on and locking the root node of the
203 * tree until you end up with a lock on the root. A locked buffer
204 * is returned, with a reference held.
205 */
48a3b636 206static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
bd681513
CM
207{
208 struct extent_buffer *eb;
209
210 while (1) {
211 eb = btrfs_root_node(root);
212 btrfs_tree_read_lock(eb);
213 if (eb == root->node)
214 break;
215 btrfs_tree_read_unlock(eb);
216 free_extent_buffer(eb);
217 }
218 return eb;
219}
220
d352ac68
CM
221/* cowonly root (everything not a reference counted cow subvolume), just get
222 * put onto a simple dirty list. transaction.c walks this to make sure they
223 * get properly updated on disk.
224 */
0b86a832
CM
225static void add_root_to_dirty_list(struct btrfs_root *root)
226{
e5846fc6 227 spin_lock(&root->fs_info->trans_lock);
0b86a832
CM
228 if (root->track_dirty && list_empty(&root->dirty_list)) {
229 list_add(&root->dirty_list,
230 &root->fs_info->dirty_cowonly_roots);
231 }
e5846fc6 232 spin_unlock(&root->fs_info->trans_lock);
0b86a832
CM
233}
234
d352ac68
CM
235/*
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
239 */
be20aa9d
CM
240int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
244{
245 struct extent_buffer *cow;
be20aa9d
CM
246 int ret = 0;
247 int level;
5d4f98a2 248 struct btrfs_disk_key disk_key;
be20aa9d
CM
249
250 WARN_ON(root->ref_cows && trans->transid !=
251 root->fs_info->running_transaction->transid);
252 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
5d4f98a2
YZ
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
31840ae1 259
5d4f98a2
YZ
260 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
261 new_root_objectid, &disk_key, level,
5581a51a 262 buf->start, 0);
5d4f98a2 263 if (IS_ERR(cow))
be20aa9d
CM
264 return PTR_ERR(cow);
265
266 copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 btrfs_set_header_bytenr(cow, cow->start);
268 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
269 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 BTRFS_HEADER_FLAG_RELOC);
272 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 else
275 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 276
2b82032c
YZ
277 write_extent_buffer(cow, root->fs_info->fsid,
278 (unsigned long)btrfs_header_fsid(cow),
279 BTRFS_FSID_SIZE);
280
be20aa9d 281 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 282 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 283 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 284 else
66d7e7f0 285 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
4aec2b52 286
be20aa9d
CM
287 if (ret)
288 return ret;
289
290 btrfs_mark_buffer_dirty(cow);
291 *cow_ret = cow;
292 return 0;
293}
294
bd989ba3
JS
295enum mod_log_op {
296 MOD_LOG_KEY_REPLACE,
297 MOD_LOG_KEY_ADD,
298 MOD_LOG_KEY_REMOVE,
299 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
300 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 MOD_LOG_MOVE_KEYS,
302 MOD_LOG_ROOT_REPLACE,
303};
304
305struct tree_mod_move {
306 int dst_slot;
307 int nr_items;
308};
309
310struct tree_mod_root {
311 u64 logical;
312 u8 level;
313};
314
315struct tree_mod_elem {
316 struct rb_node node;
317 u64 index; /* shifted logical */
097b8a7c 318 u64 seq;
bd989ba3
JS
319 enum mod_log_op op;
320
321 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 int slot;
323
324 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 u64 generation;
326
327 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 struct btrfs_disk_key key;
329 u64 blockptr;
330
331 /* this is used for op == MOD_LOG_MOVE_KEYS */
332 struct tree_mod_move move;
333
334 /* this is used for op == MOD_LOG_ROOT_REPLACE */
335 struct tree_mod_root old_root;
336};
337
097b8a7c 338static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
bd989ba3 339{
097b8a7c 340 read_lock(&fs_info->tree_mod_log_lock);
bd989ba3
JS
341}
342
097b8a7c
JS
343static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344{
345 read_unlock(&fs_info->tree_mod_log_lock);
346}
347
348static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349{
350 write_lock(&fs_info->tree_mod_log_lock);
351}
352
353static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354{
355 write_unlock(&fs_info->tree_mod_log_lock);
356}
357
fc36ed7e
JS
358/*
359 * Increment the upper half of tree_mod_seq, set lower half zero.
360 *
361 * Must be called with fs_info->tree_mod_seq_lock held.
362 */
363static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
364{
365 u64 seq = atomic64_read(&fs_info->tree_mod_seq);
366 seq &= 0xffffffff00000000ull;
367 seq += 1ull << 32;
368 atomic64_set(&fs_info->tree_mod_seq, seq);
369 return seq;
370}
371
372/*
373 * Increment the lower half of tree_mod_seq.
374 *
375 * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
376 * are generated should not technically require a spin lock here. (Rationale:
377 * incrementing the minor while incrementing the major seq number is between its
378 * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
379 * just returns a unique sequence number as usual.) We have decided to leave
380 * that requirement in here and rethink it once we notice it really imposes a
381 * problem on some workload.
382 */
383static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
384{
385 return atomic64_inc_return(&fs_info->tree_mod_seq);
386}
387
388/*
389 * return the last minor in the previous major tree_mod_seq number
390 */
391u64 btrfs_tree_mod_seq_prev(u64 seq)
392{
393 return (seq & 0xffffffff00000000ull) - 1ull;
394}
395
097b8a7c
JS
396/*
397 * This adds a new blocker to the tree mod log's blocker list if the @elem
398 * passed does not already have a sequence number set. So when a caller expects
399 * to record tree modifications, it should ensure to set elem->seq to zero
400 * before calling btrfs_get_tree_mod_seq.
401 * Returns a fresh, unused tree log modification sequence number, even if no new
402 * blocker was added.
403 */
404u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
405 struct seq_list *elem)
bd989ba3 406{
097b8a7c
JS
407 u64 seq;
408
409 tree_mod_log_write_lock(fs_info);
bd989ba3 410 spin_lock(&fs_info->tree_mod_seq_lock);
097b8a7c 411 if (!elem->seq) {
fc36ed7e 412 elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
097b8a7c
JS
413 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
414 }
fc36ed7e 415 seq = btrfs_inc_tree_mod_seq_minor(fs_info);
bd989ba3 416 spin_unlock(&fs_info->tree_mod_seq_lock);
097b8a7c
JS
417 tree_mod_log_write_unlock(fs_info);
418
419 return seq;
bd989ba3
JS
420}
421
422void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
423 struct seq_list *elem)
424{
425 struct rb_root *tm_root;
426 struct rb_node *node;
427 struct rb_node *next;
428 struct seq_list *cur_elem;
429 struct tree_mod_elem *tm;
430 u64 min_seq = (u64)-1;
431 u64 seq_putting = elem->seq;
432
433 if (!seq_putting)
434 return;
435
bd989ba3
JS
436 spin_lock(&fs_info->tree_mod_seq_lock);
437 list_del(&elem->list);
097b8a7c 438 elem->seq = 0;
bd989ba3
JS
439
440 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
097b8a7c 441 if (cur_elem->seq < min_seq) {
bd989ba3
JS
442 if (seq_putting > cur_elem->seq) {
443 /*
444 * blocker with lower sequence number exists, we
445 * cannot remove anything from the log
446 */
097b8a7c
JS
447 spin_unlock(&fs_info->tree_mod_seq_lock);
448 return;
bd989ba3
JS
449 }
450 min_seq = cur_elem->seq;
451 }
452 }
097b8a7c
JS
453 spin_unlock(&fs_info->tree_mod_seq_lock);
454
bd989ba3
JS
455 /*
456 * anything that's lower than the lowest existing (read: blocked)
457 * sequence number can be removed from the tree.
458 */
097b8a7c 459 tree_mod_log_write_lock(fs_info);
bd989ba3
JS
460 tm_root = &fs_info->tree_mod_log;
461 for (node = rb_first(tm_root); node; node = next) {
462 next = rb_next(node);
463 tm = container_of(node, struct tree_mod_elem, node);
097b8a7c 464 if (tm->seq > min_seq)
bd989ba3
JS
465 continue;
466 rb_erase(node, tm_root);
bd989ba3
JS
467 kfree(tm);
468 }
097b8a7c 469 tree_mod_log_write_unlock(fs_info);
bd989ba3
JS
470}
471
472/*
473 * key order of the log:
474 * index -> sequence
475 *
476 * the index is the shifted logical of the *new* root node for root replace
477 * operations, or the shifted logical of the affected block for all other
478 * operations.
479 */
480static noinline int
481__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
482{
483 struct rb_root *tm_root;
484 struct rb_node **new;
485 struct rb_node *parent = NULL;
486 struct tree_mod_elem *cur;
c8cc6341
JB
487 int ret = 0;
488
489 BUG_ON(!tm);
490
491 tree_mod_log_write_lock(fs_info);
492 if (list_empty(&fs_info->tree_mod_seq_list)) {
493 tree_mod_log_write_unlock(fs_info);
494 /*
495 * Ok we no longer care about logging modifications, free up tm
496 * and return 0. Any callers shouldn't be using tm after
497 * calling tree_mod_log_insert, but if they do we can just
498 * change this to return a special error code to let the callers
499 * do their own thing.
500 */
501 kfree(tm);
502 return 0;
503 }
bd989ba3 504
c8cc6341
JB
505 spin_lock(&fs_info->tree_mod_seq_lock);
506 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
507 spin_unlock(&fs_info->tree_mod_seq_lock);
bd989ba3 508
bd989ba3
JS
509 tm_root = &fs_info->tree_mod_log;
510 new = &tm_root->rb_node;
511 while (*new) {
512 cur = container_of(*new, struct tree_mod_elem, node);
513 parent = *new;
514 if (cur->index < tm->index)
515 new = &((*new)->rb_left);
516 else if (cur->index > tm->index)
517 new = &((*new)->rb_right);
097b8a7c 518 else if (cur->seq < tm->seq)
bd989ba3 519 new = &((*new)->rb_left);
097b8a7c 520 else if (cur->seq > tm->seq)
bd989ba3
JS
521 new = &((*new)->rb_right);
522 else {
c8cc6341 523 ret = -EEXIST;
bd989ba3 524 kfree(tm);
c8cc6341 525 goto out;
bd989ba3
JS
526 }
527 }
528
529 rb_link_node(&tm->node, parent, new);
530 rb_insert_color(&tm->node, tm_root);
c8cc6341
JB
531out:
532 tree_mod_log_write_unlock(fs_info);
533 return ret;
bd989ba3
JS
534}
535
097b8a7c
JS
536/*
537 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
538 * returns zero with the tree_mod_log_lock acquired. The caller must hold
539 * this until all tree mod log insertions are recorded in the rb tree and then
540 * call tree_mod_log_write_unlock() to release.
541 */
e9b7fd4d
JS
542static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
543 struct extent_buffer *eb) {
544 smp_mb();
545 if (list_empty(&(fs_info)->tree_mod_seq_list))
546 return 1;
097b8a7c
JS
547 if (eb && btrfs_header_level(eb) == 0)
548 return 1;
e9b7fd4d
JS
549 return 0;
550}
551
097b8a7c
JS
552static inline int
553__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
554 struct extent_buffer *eb, int slot,
555 enum mod_log_op op, gfp_t flags)
bd989ba3 556{
097b8a7c 557 struct tree_mod_elem *tm;
bd989ba3 558
c8cc6341
JB
559 tm = kzalloc(sizeof(*tm), flags);
560 if (!tm)
561 return -ENOMEM;
bd989ba3
JS
562
563 tm->index = eb->start >> PAGE_CACHE_SHIFT;
564 if (op != MOD_LOG_KEY_ADD) {
565 btrfs_node_key(eb, &tm->key, slot);
566 tm->blockptr = btrfs_node_blockptr(eb, slot);
567 }
568 tm->op = op;
569 tm->slot = slot;
570 tm->generation = btrfs_node_ptr_generation(eb, slot);
571
097b8a7c
JS
572 return __tree_mod_log_insert(fs_info, tm);
573}
574
575static noinline int
c8cc6341
JB
576tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
577 struct extent_buffer *eb, int slot,
578 enum mod_log_op op, gfp_t flags)
097b8a7c 579{
097b8a7c
JS
580 if (tree_mod_dont_log(fs_info, eb))
581 return 0;
582
c8cc6341 583 return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
097b8a7c
JS
584}
585
bd989ba3
JS
586static noinline int
587tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
588 struct extent_buffer *eb, int dst_slot, int src_slot,
589 int nr_items, gfp_t flags)
590{
591 struct tree_mod_elem *tm;
592 int ret;
593 int i;
594
f395694c
JS
595 if (tree_mod_dont_log(fs_info, eb))
596 return 0;
bd989ba3 597
01763a2e
JS
598 /*
599 * When we override something during the move, we log these removals.
600 * This can only happen when we move towards the beginning of the
601 * buffer, i.e. dst_slot < src_slot.
602 */
bd989ba3 603 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
c8cc6341
JB
604 ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
605 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
bd989ba3
JS
606 BUG_ON(ret < 0);
607 }
608
c8cc6341
JB
609 tm = kzalloc(sizeof(*tm), flags);
610 if (!tm)
611 return -ENOMEM;
f395694c 612
bd989ba3
JS
613 tm->index = eb->start >> PAGE_CACHE_SHIFT;
614 tm->slot = src_slot;
615 tm->move.dst_slot = dst_slot;
616 tm->move.nr_items = nr_items;
617 tm->op = MOD_LOG_MOVE_KEYS;
618
c8cc6341 619 return __tree_mod_log_insert(fs_info, tm);
bd989ba3
JS
620}
621
097b8a7c
JS
622static inline void
623__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
624{
625 int i;
626 u32 nritems;
627 int ret;
628
b12a3b1e
CM
629 if (btrfs_header_level(eb) == 0)
630 return;
631
097b8a7c
JS
632 nritems = btrfs_header_nritems(eb);
633 for (i = nritems - 1; i >= 0; i--) {
c8cc6341
JB
634 ret = __tree_mod_log_insert_key(fs_info, eb, i,
635 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
097b8a7c
JS
636 BUG_ON(ret < 0);
637 }
638}
639
bd989ba3
JS
640static noinline int
641tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
642 struct extent_buffer *old_root,
90f8d62e
JS
643 struct extent_buffer *new_root, gfp_t flags,
644 int log_removal)
bd989ba3
JS
645{
646 struct tree_mod_elem *tm;
bd989ba3 647
097b8a7c
JS
648 if (tree_mod_dont_log(fs_info, NULL))
649 return 0;
650
90f8d62e
JS
651 if (log_removal)
652 __tree_mod_log_free_eb(fs_info, old_root);
d9abbf1c 653
c8cc6341
JB
654 tm = kzalloc(sizeof(*tm), flags);
655 if (!tm)
656 return -ENOMEM;
bd989ba3
JS
657
658 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
659 tm->old_root.logical = old_root->start;
660 tm->old_root.level = btrfs_header_level(old_root);
661 tm->generation = btrfs_header_generation(old_root);
662 tm->op = MOD_LOG_ROOT_REPLACE;
663
c8cc6341 664 return __tree_mod_log_insert(fs_info, tm);
bd989ba3
JS
665}
666
667static struct tree_mod_elem *
668__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
669 int smallest)
670{
671 struct rb_root *tm_root;
672 struct rb_node *node;
673 struct tree_mod_elem *cur = NULL;
674 struct tree_mod_elem *found = NULL;
675 u64 index = start >> PAGE_CACHE_SHIFT;
676
097b8a7c 677 tree_mod_log_read_lock(fs_info);
bd989ba3
JS
678 tm_root = &fs_info->tree_mod_log;
679 node = tm_root->rb_node;
680 while (node) {
681 cur = container_of(node, struct tree_mod_elem, node);
682 if (cur->index < index) {
683 node = node->rb_left;
684 } else if (cur->index > index) {
685 node = node->rb_right;
097b8a7c 686 } else if (cur->seq < min_seq) {
bd989ba3
JS
687 node = node->rb_left;
688 } else if (!smallest) {
689 /* we want the node with the highest seq */
690 if (found)
097b8a7c 691 BUG_ON(found->seq > cur->seq);
bd989ba3
JS
692 found = cur;
693 node = node->rb_left;
097b8a7c 694 } else if (cur->seq > min_seq) {
bd989ba3
JS
695 /* we want the node with the smallest seq */
696 if (found)
097b8a7c 697 BUG_ON(found->seq < cur->seq);
bd989ba3
JS
698 found = cur;
699 node = node->rb_right;
700 } else {
701 found = cur;
702 break;
703 }
704 }
097b8a7c 705 tree_mod_log_read_unlock(fs_info);
bd989ba3
JS
706
707 return found;
708}
709
710/*
711 * this returns the element from the log with the smallest time sequence
712 * value that's in the log (the oldest log item). any element with a time
713 * sequence lower than min_seq will be ignored.
714 */
715static struct tree_mod_elem *
716tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
717 u64 min_seq)
718{
719 return __tree_mod_log_search(fs_info, start, min_seq, 1);
720}
721
722/*
723 * this returns the element from the log with the largest time sequence
724 * value that's in the log (the most recent log item). any element with
725 * a time sequence lower than min_seq will be ignored.
726 */
727static struct tree_mod_elem *
728tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
729{
730 return __tree_mod_log_search(fs_info, start, min_seq, 0);
731}
732
097b8a7c 733static noinline void
bd989ba3
JS
734tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
735 struct extent_buffer *src, unsigned long dst_offset,
90f8d62e 736 unsigned long src_offset, int nr_items)
bd989ba3
JS
737{
738 int ret;
739 int i;
740
e9b7fd4d 741 if (tree_mod_dont_log(fs_info, NULL))
bd989ba3
JS
742 return;
743
c8cc6341 744 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
bd989ba3
JS
745 return;
746
bd989ba3 747 for (i = 0; i < nr_items; i++) {
c8cc6341 748 ret = __tree_mod_log_insert_key(fs_info, src,
90f8d62e 749 i + src_offset,
c8cc6341 750 MOD_LOG_KEY_REMOVE, GFP_NOFS);
90f8d62e 751 BUG_ON(ret < 0);
c8cc6341 752 ret = __tree_mod_log_insert_key(fs_info, dst,
097b8a7c 753 i + dst_offset,
c8cc6341
JB
754 MOD_LOG_KEY_ADD,
755 GFP_NOFS);
bd989ba3
JS
756 BUG_ON(ret < 0);
757 }
758}
759
760static inline void
761tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
762 int dst_offset, int src_offset, int nr_items)
763{
764 int ret;
765 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
766 nr_items, GFP_NOFS);
767 BUG_ON(ret < 0);
768}
769
097b8a7c 770static noinline void
bd989ba3 771tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
32adf090 772 struct extent_buffer *eb, int slot, int atomic)
bd989ba3
JS
773{
774 int ret;
775
c8cc6341
JB
776 ret = __tree_mod_log_insert_key(fs_info, eb, slot,
777 MOD_LOG_KEY_REPLACE,
778 atomic ? GFP_ATOMIC : GFP_NOFS);
bd989ba3
JS
779 BUG_ON(ret < 0);
780}
781
097b8a7c
JS
782static noinline void
783tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
bd989ba3 784{
e9b7fd4d 785 if (tree_mod_dont_log(fs_info, eb))
bd989ba3 786 return;
097b8a7c 787 __tree_mod_log_free_eb(fs_info, eb);
bd989ba3
JS
788}
789
097b8a7c 790static noinline void
bd989ba3 791tree_mod_log_set_root_pointer(struct btrfs_root *root,
90f8d62e
JS
792 struct extent_buffer *new_root_node,
793 int log_removal)
bd989ba3
JS
794{
795 int ret;
bd989ba3 796 ret = tree_mod_log_insert_root(root->fs_info, root->node,
90f8d62e 797 new_root_node, GFP_NOFS, log_removal);
bd989ba3
JS
798 BUG_ON(ret < 0);
799}
800
5d4f98a2
YZ
801/*
802 * check if the tree block can be shared by multiple trees
803 */
804int btrfs_block_can_be_shared(struct btrfs_root *root,
805 struct extent_buffer *buf)
806{
807 /*
808 * Tree blocks not in refernece counted trees and tree roots
809 * are never shared. If a block was allocated after the last
810 * snapshot and the block was not allocated by tree relocation,
811 * we know the block is not shared.
812 */
813 if (root->ref_cows &&
814 buf != root->node && buf != root->commit_root &&
815 (btrfs_header_generation(buf) <=
816 btrfs_root_last_snapshot(&root->root_item) ||
817 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
818 return 1;
819#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
820 if (root->ref_cows &&
821 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
822 return 1;
823#endif
824 return 0;
825}
826
827static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
828 struct btrfs_root *root,
829 struct extent_buffer *buf,
f0486c68
YZ
830 struct extent_buffer *cow,
831 int *last_ref)
5d4f98a2
YZ
832{
833 u64 refs;
834 u64 owner;
835 u64 flags;
836 u64 new_flags = 0;
837 int ret;
838
839 /*
840 * Backrefs update rules:
841 *
842 * Always use full backrefs for extent pointers in tree block
843 * allocated by tree relocation.
844 *
845 * If a shared tree block is no longer referenced by its owner
846 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
847 * use full backrefs for extent pointers in tree block.
848 *
849 * If a tree block is been relocating
850 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
851 * use full backrefs for extent pointers in tree block.
852 * The reason for this is some operations (such as drop tree)
853 * are only allowed for blocks use full backrefs.
854 */
855
856 if (btrfs_block_can_be_shared(root, buf)) {
857 ret = btrfs_lookup_extent_info(trans, root, buf->start,
3173a18f
JB
858 btrfs_header_level(buf), 1,
859 &refs, &flags);
be1a5564
MF
860 if (ret)
861 return ret;
e5df9573
MF
862 if (refs == 0) {
863 ret = -EROFS;
864 btrfs_std_error(root->fs_info, ret);
865 return ret;
866 }
5d4f98a2
YZ
867 } else {
868 refs = 1;
869 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
870 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
871 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
872 else
873 flags = 0;
874 }
875
876 owner = btrfs_header_owner(buf);
877 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
878 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
879
880 if (refs > 1) {
881 if ((owner == root->root_key.objectid ||
882 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
883 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
66d7e7f0 884 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
79787eaa 885 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
886
887 if (root->root_key.objectid ==
888 BTRFS_TREE_RELOC_OBJECTID) {
66d7e7f0 889 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
79787eaa 890 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 891 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
79787eaa 892 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
893 }
894 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
895 } else {
896
897 if (root->root_key.objectid ==
898 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 899 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 900 else
66d7e7f0 901 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 902 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
903 }
904 if (new_flags != 0) {
b1c79e09
JB
905 int level = btrfs_header_level(buf);
906
5d4f98a2
YZ
907 ret = btrfs_set_disk_extent_flags(trans, root,
908 buf->start,
909 buf->len,
b1c79e09 910 new_flags, level, 0);
be1a5564
MF
911 if (ret)
912 return ret;
5d4f98a2
YZ
913 }
914 } else {
915 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
916 if (root->root_key.objectid ==
917 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 918 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 919 else
66d7e7f0 920 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 921 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 922 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
79787eaa 923 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
924 }
925 clean_tree_block(trans, root, buf);
f0486c68 926 *last_ref = 1;
5d4f98a2
YZ
927 }
928 return 0;
929}
930
d352ac68 931/*
d397712b
CM
932 * does the dirty work in cow of a single block. The parent block (if
933 * supplied) is updated to point to the new cow copy. The new buffer is marked
934 * dirty and returned locked. If you modify the block it needs to be marked
935 * dirty again.
d352ac68
CM
936 *
937 * search_start -- an allocation hint for the new block
938 *
d397712b
CM
939 * empty_size -- a hint that you plan on doing more cow. This is the size in
940 * bytes the allocator should try to find free next to the block it returns.
941 * This is just a hint and may be ignored by the allocator.
d352ac68 942 */
d397712b 943static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
944 struct btrfs_root *root,
945 struct extent_buffer *buf,
946 struct extent_buffer *parent, int parent_slot,
947 struct extent_buffer **cow_ret,
9fa8cfe7 948 u64 search_start, u64 empty_size)
02217ed2 949{
5d4f98a2 950 struct btrfs_disk_key disk_key;
5f39d397 951 struct extent_buffer *cow;
be1a5564 952 int level, ret;
f0486c68 953 int last_ref = 0;
925baedd 954 int unlock_orig = 0;
5d4f98a2 955 u64 parent_start;
7bb86316 956
925baedd
CM
957 if (*cow_ret == buf)
958 unlock_orig = 1;
959
b9447ef8 960 btrfs_assert_tree_locked(buf);
925baedd 961
7bb86316
CM
962 WARN_ON(root->ref_cows && trans->transid !=
963 root->fs_info->running_transaction->transid);
6702ed49 964 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 965
7bb86316 966 level = btrfs_header_level(buf);
31840ae1 967
5d4f98a2
YZ
968 if (level == 0)
969 btrfs_item_key(buf, &disk_key, 0);
970 else
971 btrfs_node_key(buf, &disk_key, 0);
972
973 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
974 if (parent)
975 parent_start = parent->start;
976 else
977 parent_start = 0;
978 } else
979 parent_start = 0;
980
981 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
982 root->root_key.objectid, &disk_key,
5581a51a 983 level, search_start, empty_size);
54aa1f4d
CM
984 if (IS_ERR(cow))
985 return PTR_ERR(cow);
6702ed49 986
b4ce94de
CM
987 /* cow is set to blocking by btrfs_init_new_buffer */
988
5f39d397 989 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 990 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 991 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
992 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
993 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
994 BTRFS_HEADER_FLAG_RELOC);
995 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
996 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
997 else
998 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 999
2b82032c
YZ
1000 write_extent_buffer(cow, root->fs_info->fsid,
1001 (unsigned long)btrfs_header_fsid(cow),
1002 BTRFS_FSID_SIZE);
1003
be1a5564 1004 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 1005 if (ret) {
79787eaa 1006 btrfs_abort_transaction(trans, root, ret);
b68dc2a9
MF
1007 return ret;
1008 }
1a40e23b 1009
3fd0a558
YZ
1010 if (root->ref_cows)
1011 btrfs_reloc_cow_block(trans, root, buf, cow);
1012
02217ed2 1013 if (buf == root->node) {
925baedd 1014 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
1015 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1016 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1017 parent_start = buf->start;
1018 else
1019 parent_start = 0;
925baedd 1020
5f39d397 1021 extent_buffer_get(cow);
90f8d62e 1022 tree_mod_log_set_root_pointer(root, cow, 1);
240f62c8 1023 rcu_assign_pointer(root->node, cow);
925baedd 1024
f0486c68 1025 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 1026 last_ref);
5f39d397 1027 free_extent_buffer(buf);
0b86a832 1028 add_root_to_dirty_list(root);
02217ed2 1029 } else {
5d4f98a2
YZ
1030 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1031 parent_start = parent->start;
1032 else
1033 parent_start = 0;
1034
1035 WARN_ON(trans->transid != btrfs_header_generation(parent));
f230475e 1036 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
c8cc6341 1037 MOD_LOG_KEY_REPLACE, GFP_NOFS);
5f39d397 1038 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 1039 cow->start);
74493f7a
CM
1040 btrfs_set_node_ptr_generation(parent, parent_slot,
1041 trans->transid);
d6025579 1042 btrfs_mark_buffer_dirty(parent);
7fb7d76f
JB
1043 if (last_ref)
1044 tree_mod_log_free_eb(root->fs_info, buf);
f0486c68 1045 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 1046 last_ref);
02217ed2 1047 }
925baedd
CM
1048 if (unlock_orig)
1049 btrfs_tree_unlock(buf);
3083ee2e 1050 free_extent_buffer_stale(buf);
ccd467d6 1051 btrfs_mark_buffer_dirty(cow);
2c90e5d6 1052 *cow_ret = cow;
02217ed2
CM
1053 return 0;
1054}
1055
5d9e75c4
JS
1056/*
1057 * returns the logical address of the oldest predecessor of the given root.
1058 * entries older than time_seq are ignored.
1059 */
1060static struct tree_mod_elem *
1061__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
30b0463a 1062 struct extent_buffer *eb_root, u64 time_seq)
5d9e75c4
JS
1063{
1064 struct tree_mod_elem *tm;
1065 struct tree_mod_elem *found = NULL;
30b0463a 1066 u64 root_logical = eb_root->start;
5d9e75c4
JS
1067 int looped = 0;
1068
1069 if (!time_seq)
35a3621b 1070 return NULL;
5d9e75c4
JS
1071
1072 /*
1073 * the very last operation that's logged for a root is the replacement
1074 * operation (if it is replaced at all). this has the index of the *new*
1075 * root, making it the very first operation that's logged for this root.
1076 */
1077 while (1) {
1078 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1079 time_seq);
1080 if (!looped && !tm)
35a3621b 1081 return NULL;
5d9e75c4 1082 /*
28da9fb4
JS
1083 * if there are no tree operation for the oldest root, we simply
1084 * return it. this should only happen if that (old) root is at
1085 * level 0.
5d9e75c4 1086 */
28da9fb4
JS
1087 if (!tm)
1088 break;
5d9e75c4 1089
28da9fb4
JS
1090 /*
1091 * if there's an operation that's not a root replacement, we
1092 * found the oldest version of our root. normally, we'll find a
1093 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1094 */
5d9e75c4
JS
1095 if (tm->op != MOD_LOG_ROOT_REPLACE)
1096 break;
1097
1098 found = tm;
1099 root_logical = tm->old_root.logical;
5d9e75c4
JS
1100 looped = 1;
1101 }
1102
a95236d9
JS
1103 /* if there's no old root to return, return what we found instead */
1104 if (!found)
1105 found = tm;
1106
5d9e75c4
JS
1107 return found;
1108}
1109
1110/*
1111 * tm is a pointer to the first operation to rewind within eb. then, all
1112 * previous operations will be rewinded (until we reach something older than
1113 * time_seq).
1114 */
1115static void
f1ca7e98
JB
1116__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1117 u64 time_seq, struct tree_mod_elem *first_tm)
5d9e75c4
JS
1118{
1119 u32 n;
1120 struct rb_node *next;
1121 struct tree_mod_elem *tm = first_tm;
1122 unsigned long o_dst;
1123 unsigned long o_src;
1124 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1125
1126 n = btrfs_header_nritems(eb);
f1ca7e98 1127 tree_mod_log_read_lock(fs_info);
097b8a7c 1128 while (tm && tm->seq >= time_seq) {
5d9e75c4
JS
1129 /*
1130 * all the operations are recorded with the operator used for
1131 * the modification. as we're going backwards, we do the
1132 * opposite of each operation here.
1133 */
1134 switch (tm->op) {
1135 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1136 BUG_ON(tm->slot < n);
1c697d4a 1137 /* Fallthrough */
95c80bb1 1138 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
4c3e6969 1139 case MOD_LOG_KEY_REMOVE:
5d9e75c4
JS
1140 btrfs_set_node_key(eb, &tm->key, tm->slot);
1141 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1142 btrfs_set_node_ptr_generation(eb, tm->slot,
1143 tm->generation);
4c3e6969 1144 n++;
5d9e75c4
JS
1145 break;
1146 case MOD_LOG_KEY_REPLACE:
1147 BUG_ON(tm->slot >= n);
1148 btrfs_set_node_key(eb, &tm->key, tm->slot);
1149 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1150 btrfs_set_node_ptr_generation(eb, tm->slot,
1151 tm->generation);
1152 break;
1153 case MOD_LOG_KEY_ADD:
19956c7e 1154 /* if a move operation is needed it's in the log */
5d9e75c4
JS
1155 n--;
1156 break;
1157 case MOD_LOG_MOVE_KEYS:
c3193108
JS
1158 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1159 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1160 memmove_extent_buffer(eb, o_dst, o_src,
5d9e75c4
JS
1161 tm->move.nr_items * p_size);
1162 break;
1163 case MOD_LOG_ROOT_REPLACE:
1164 /*
1165 * this operation is special. for roots, this must be
1166 * handled explicitly before rewinding.
1167 * for non-roots, this operation may exist if the node
1168 * was a root: root A -> child B; then A gets empty and
1169 * B is promoted to the new root. in the mod log, we'll
1170 * have a root-replace operation for B, a tree block
1171 * that is no root. we simply ignore that operation.
1172 */
1173 break;
1174 }
1175 next = rb_next(&tm->node);
1176 if (!next)
1177 break;
1178 tm = container_of(next, struct tree_mod_elem, node);
1179 if (tm->index != first_tm->index)
1180 break;
1181 }
f1ca7e98 1182 tree_mod_log_read_unlock(fs_info);
5d9e75c4
JS
1183 btrfs_set_header_nritems(eb, n);
1184}
1185
47fb091f
JS
1186/*
1187 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1188 * is returned. If rewind operations happen, a fresh buffer is returned. The
1189 * returned buffer is always read-locked. If the returned buffer is not the
1190 * input buffer, the lock on the input buffer is released and the input buffer
1191 * is freed (its refcount is decremented).
1192 */
5d9e75c4 1193static struct extent_buffer *
9ec72677
JB
1194tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1195 struct extent_buffer *eb, u64 time_seq)
5d9e75c4
JS
1196{
1197 struct extent_buffer *eb_rewin;
1198 struct tree_mod_elem *tm;
1199
1200 if (!time_seq)
1201 return eb;
1202
1203 if (btrfs_header_level(eb) == 0)
1204 return eb;
1205
1206 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1207 if (!tm)
1208 return eb;
1209
9ec72677
JB
1210 btrfs_set_path_blocking(path);
1211 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1212
5d9e75c4
JS
1213 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1214 BUG_ON(tm->slot != 0);
1215 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1216 fs_info->tree_root->nodesize);
db7f3436 1217 if (!eb_rewin) {
9ec72677 1218 btrfs_tree_read_unlock_blocking(eb);
db7f3436
JB
1219 free_extent_buffer(eb);
1220 return NULL;
1221 }
5d9e75c4
JS
1222 btrfs_set_header_bytenr(eb_rewin, eb->start);
1223 btrfs_set_header_backref_rev(eb_rewin,
1224 btrfs_header_backref_rev(eb));
1225 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
c3193108 1226 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
5d9e75c4
JS
1227 } else {
1228 eb_rewin = btrfs_clone_extent_buffer(eb);
db7f3436 1229 if (!eb_rewin) {
9ec72677 1230 btrfs_tree_read_unlock_blocking(eb);
db7f3436
JB
1231 free_extent_buffer(eb);
1232 return NULL;
1233 }
5d9e75c4
JS
1234 }
1235
9ec72677
JB
1236 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1237 btrfs_tree_read_unlock_blocking(eb);
5d9e75c4
JS
1238 free_extent_buffer(eb);
1239
47fb091f
JS
1240 extent_buffer_get(eb_rewin);
1241 btrfs_tree_read_lock(eb_rewin);
f1ca7e98 1242 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
57911b8b 1243 WARN_ON(btrfs_header_nritems(eb_rewin) >
2a745b14 1244 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
5d9e75c4
JS
1245
1246 return eb_rewin;
1247}
1248
8ba97a15
JS
1249/*
1250 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1251 * value. If there are no changes, the current root->root_node is returned. If
1252 * anything changed in between, there's a fresh buffer allocated on which the
1253 * rewind operations are done. In any case, the returned buffer is read locked.
1254 * Returns NULL on error (with no locks held).
1255 */
5d9e75c4
JS
1256static inline struct extent_buffer *
1257get_old_root(struct btrfs_root *root, u64 time_seq)
1258{
1259 struct tree_mod_elem *tm;
30b0463a
JS
1260 struct extent_buffer *eb = NULL;
1261 struct extent_buffer *eb_root;
7bfdcf7f 1262 struct extent_buffer *old;
a95236d9 1263 struct tree_mod_root *old_root = NULL;
4325edd0 1264 u64 old_generation = 0;
a95236d9 1265 u64 logical;
834328a8 1266 u32 blocksize;
5d9e75c4 1267
30b0463a
JS
1268 eb_root = btrfs_read_lock_root_node(root);
1269 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
5d9e75c4 1270 if (!tm)
30b0463a 1271 return eb_root;
5d9e75c4 1272
a95236d9
JS
1273 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1274 old_root = &tm->old_root;
1275 old_generation = tm->generation;
1276 logical = old_root->logical;
1277 } else {
30b0463a 1278 logical = eb_root->start;
a95236d9 1279 }
5d9e75c4 1280
a95236d9 1281 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
834328a8 1282 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
30b0463a
JS
1283 btrfs_tree_read_unlock(eb_root);
1284 free_extent_buffer(eb_root);
834328a8 1285 blocksize = btrfs_level_size(root, old_root->level);
7bfdcf7f 1286 old = read_tree_block(root, logical, blocksize, 0);
416bc658
JB
1287 if (!old || !extent_buffer_uptodate(old)) {
1288 free_extent_buffer(old);
834328a8
JS
1289 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1290 logical);
1291 WARN_ON(1);
1292 } else {
7bfdcf7f
LB
1293 eb = btrfs_clone_extent_buffer(old);
1294 free_extent_buffer(old);
834328a8
JS
1295 }
1296 } else if (old_root) {
30b0463a
JS
1297 btrfs_tree_read_unlock(eb_root);
1298 free_extent_buffer(eb_root);
28da9fb4 1299 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
834328a8 1300 } else {
9ec72677 1301 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
30b0463a 1302 eb = btrfs_clone_extent_buffer(eb_root);
9ec72677 1303 btrfs_tree_read_unlock_blocking(eb_root);
30b0463a 1304 free_extent_buffer(eb_root);
834328a8
JS
1305 }
1306
8ba97a15
JS
1307 if (!eb)
1308 return NULL;
d6381084 1309 extent_buffer_get(eb);
8ba97a15 1310 btrfs_tree_read_lock(eb);
a95236d9 1311 if (old_root) {
5d9e75c4
JS
1312 btrfs_set_header_bytenr(eb, eb->start);
1313 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
30b0463a 1314 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
a95236d9
JS
1315 btrfs_set_header_level(eb, old_root->level);
1316 btrfs_set_header_generation(eb, old_generation);
5d9e75c4 1317 }
28da9fb4 1318 if (tm)
f1ca7e98 1319 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
28da9fb4
JS
1320 else
1321 WARN_ON(btrfs_header_level(eb) != 0);
57911b8b 1322 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
5d9e75c4
JS
1323
1324 return eb;
1325}
1326
5b6602e7
JS
1327int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1328{
1329 struct tree_mod_elem *tm;
1330 int level;
30b0463a 1331 struct extent_buffer *eb_root = btrfs_root_node(root);
5b6602e7 1332
30b0463a 1333 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
5b6602e7
JS
1334 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1335 level = tm->old_root.level;
1336 } else {
30b0463a 1337 level = btrfs_header_level(eb_root);
5b6602e7 1338 }
30b0463a 1339 free_extent_buffer(eb_root);
5b6602e7
JS
1340
1341 return level;
1342}
1343
5d4f98a2
YZ
1344static inline int should_cow_block(struct btrfs_trans_handle *trans,
1345 struct btrfs_root *root,
1346 struct extent_buffer *buf)
1347{
f1ebcc74
LB
1348 /* ensure we can see the force_cow */
1349 smp_rmb();
1350
1351 /*
1352 * We do not need to cow a block if
1353 * 1) this block is not created or changed in this transaction;
1354 * 2) this block does not belong to TREE_RELOC tree;
1355 * 3) the root is not forced COW.
1356 *
1357 * What is forced COW:
1358 * when we create snapshot during commiting the transaction,
1359 * after we've finished coping src root, we must COW the shared
1360 * block to ensure the metadata consistency.
1361 */
5d4f98a2
YZ
1362 if (btrfs_header_generation(buf) == trans->transid &&
1363 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1364 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74
LB
1365 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1366 !root->force_cow)
5d4f98a2
YZ
1367 return 0;
1368 return 1;
1369}
1370
d352ac68
CM
1371/*
1372 * cows a single block, see __btrfs_cow_block for the real work.
1373 * This version of it has extra checks so that a block isn't cow'd more than
1374 * once per transaction, as long as it hasn't been written yet
1375 */
d397712b 1376noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
1377 struct btrfs_root *root, struct extent_buffer *buf,
1378 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 1379 struct extent_buffer **cow_ret)
6702ed49
CM
1380{
1381 u64 search_start;
f510cfec 1382 int ret;
dc17ff8f 1383
31b1a2bd
JL
1384 if (trans->transaction != root->fs_info->running_transaction)
1385 WARN(1, KERN_CRIT "trans %llu running %llu\n",
c1c9ff7c 1386 trans->transid,
6702ed49 1387 root->fs_info->running_transaction->transid);
31b1a2bd
JL
1388
1389 if (trans->transid != root->fs_info->generation)
1390 WARN(1, KERN_CRIT "trans %llu running %llu\n",
c1c9ff7c 1391 trans->transid, root->fs_info->generation);
dc17ff8f 1392
5d4f98a2 1393 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
1394 *cow_ret = buf;
1395 return 0;
1396 }
c487685d 1397
0b86a832 1398 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
1399
1400 if (parent)
1401 btrfs_set_lock_blocking(parent);
1402 btrfs_set_lock_blocking(buf);
1403
f510cfec 1404 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 1405 parent_slot, cow_ret, search_start, 0);
1abe9b8a 1406
1407 trace_btrfs_cow_block(root, buf, *cow_ret);
1408
f510cfec 1409 return ret;
6702ed49
CM
1410}
1411
d352ac68
CM
1412/*
1413 * helper function for defrag to decide if two blocks pointed to by a
1414 * node are actually close by
1415 */
6b80053d 1416static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 1417{
6b80053d 1418 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 1419 return 1;
6b80053d 1420 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
1421 return 1;
1422 return 0;
1423}
1424
081e9573
CM
1425/*
1426 * compare two keys in a memcmp fashion
1427 */
1428static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1429{
1430 struct btrfs_key k1;
1431
1432 btrfs_disk_key_to_cpu(&k1, disk);
1433
20736aba 1434 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
1435}
1436
f3465ca4
JB
1437/*
1438 * same as comp_keys only with two btrfs_key's
1439 */
5d4f98a2 1440int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
1441{
1442 if (k1->objectid > k2->objectid)
1443 return 1;
1444 if (k1->objectid < k2->objectid)
1445 return -1;
1446 if (k1->type > k2->type)
1447 return 1;
1448 if (k1->type < k2->type)
1449 return -1;
1450 if (k1->offset > k2->offset)
1451 return 1;
1452 if (k1->offset < k2->offset)
1453 return -1;
1454 return 0;
1455}
081e9573 1456
d352ac68
CM
1457/*
1458 * this is used by the defrag code to go through all the
1459 * leaves pointed to by a node and reallocate them so that
1460 * disk order is close to key order
1461 */
6702ed49 1462int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1463 struct btrfs_root *root, struct extent_buffer *parent,
de78b51a 1464 int start_slot, u64 *last_ret,
a6b6e75e 1465 struct btrfs_key *progress)
6702ed49 1466{
6b80053d 1467 struct extent_buffer *cur;
6702ed49 1468 u64 blocknr;
ca7a79ad 1469 u64 gen;
e9d0b13b
CM
1470 u64 search_start = *last_ret;
1471 u64 last_block = 0;
6702ed49
CM
1472 u64 other;
1473 u32 parent_nritems;
6702ed49
CM
1474 int end_slot;
1475 int i;
1476 int err = 0;
f2183bde 1477 int parent_level;
6b80053d
CM
1478 int uptodate;
1479 u32 blocksize;
081e9573
CM
1480 int progress_passed = 0;
1481 struct btrfs_disk_key disk_key;
6702ed49 1482
5708b959 1483 parent_level = btrfs_header_level(parent);
5708b959 1484
6c1500f2
JL
1485 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1486 WARN_ON(trans->transid != root->fs_info->generation);
86479a04 1487
6b80053d 1488 parent_nritems = btrfs_header_nritems(parent);
6b80053d 1489 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
1490 end_slot = parent_nritems;
1491
1492 if (parent_nritems == 1)
1493 return 0;
1494
b4ce94de
CM
1495 btrfs_set_lock_blocking(parent);
1496
6702ed49
CM
1497 for (i = start_slot; i < end_slot; i++) {
1498 int close = 1;
a6b6e75e 1499
081e9573
CM
1500 btrfs_node_key(parent, &disk_key, i);
1501 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1502 continue;
1503
1504 progress_passed = 1;
6b80053d 1505 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 1506 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
1507 if (last_block == 0)
1508 last_block = blocknr;
5708b959 1509
6702ed49 1510 if (i > 0) {
6b80053d
CM
1511 other = btrfs_node_blockptr(parent, i - 1);
1512 close = close_blocks(blocknr, other, blocksize);
6702ed49 1513 }
0ef3e66b 1514 if (!close && i < end_slot - 2) {
6b80053d
CM
1515 other = btrfs_node_blockptr(parent, i + 1);
1516 close = close_blocks(blocknr, other, blocksize);
6702ed49 1517 }
e9d0b13b
CM
1518 if (close) {
1519 last_block = blocknr;
6702ed49 1520 continue;
e9d0b13b 1521 }
6702ed49 1522
6b80053d
CM
1523 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1524 if (cur)
b9fab919 1525 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
6b80053d
CM
1526 else
1527 uptodate = 0;
5708b959 1528 if (!cur || !uptodate) {
6b80053d
CM
1529 if (!cur) {
1530 cur = read_tree_block(root, blocknr,
ca7a79ad 1531 blocksize, gen);
416bc658
JB
1532 if (!cur || !extent_buffer_uptodate(cur)) {
1533 free_extent_buffer(cur);
97d9a8a4 1534 return -EIO;
416bc658 1535 }
6b80053d 1536 } else if (!uptodate) {
018642a1
TI
1537 err = btrfs_read_buffer(cur, gen);
1538 if (err) {
1539 free_extent_buffer(cur);
1540 return err;
1541 }
f2183bde 1542 }
6702ed49 1543 }
e9d0b13b 1544 if (search_start == 0)
6b80053d 1545 search_start = last_block;
e9d0b13b 1546
e7a84565 1547 btrfs_tree_lock(cur);
b4ce94de 1548 btrfs_set_lock_blocking(cur);
6b80053d 1549 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 1550 &cur, search_start,
6b80053d 1551 min(16 * blocksize,
9fa8cfe7 1552 (end_slot - i) * blocksize));
252c38f0 1553 if (err) {
e7a84565 1554 btrfs_tree_unlock(cur);
6b80053d 1555 free_extent_buffer(cur);
6702ed49 1556 break;
252c38f0 1557 }
e7a84565
CM
1558 search_start = cur->start;
1559 last_block = cur->start;
f2183bde 1560 *last_ret = search_start;
e7a84565
CM
1561 btrfs_tree_unlock(cur);
1562 free_extent_buffer(cur);
6702ed49
CM
1563 }
1564 return err;
1565}
1566
74123bd7
CM
1567/*
1568 * The leaf data grows from end-to-front in the node.
1569 * this returns the address of the start of the last item,
1570 * which is the stop of the leaf data stack
1571 */
123abc88 1572static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 1573 struct extent_buffer *leaf)
be0e5c09 1574{
5f39d397 1575 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 1576 if (nr == 0)
123abc88 1577 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 1578 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
1579}
1580
aa5d6bed 1581
74123bd7 1582/*
5f39d397
CM
1583 * search for key in the extent_buffer. The items start at offset p,
1584 * and they are item_size apart. There are 'max' items in p.
1585 *
74123bd7
CM
1586 * the slot in the array is returned via slot, and it points to
1587 * the place where you would insert key if it is not found in
1588 * the array.
1589 *
1590 * slot may point to max if the key is bigger than all of the keys
1591 */
e02119d5
CM
1592static noinline int generic_bin_search(struct extent_buffer *eb,
1593 unsigned long p,
1594 int item_size, struct btrfs_key *key,
1595 int max, int *slot)
be0e5c09
CM
1596{
1597 int low = 0;
1598 int high = max;
1599 int mid;
1600 int ret;
479965d6 1601 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
1602 struct btrfs_disk_key unaligned;
1603 unsigned long offset;
5f39d397
CM
1604 char *kaddr = NULL;
1605 unsigned long map_start = 0;
1606 unsigned long map_len = 0;
479965d6 1607 int err;
be0e5c09 1608
d397712b 1609 while (low < high) {
be0e5c09 1610 mid = (low + high) / 2;
5f39d397
CM
1611 offset = p + mid * item_size;
1612
a6591715 1613 if (!kaddr || offset < map_start ||
5f39d397
CM
1614 (offset + sizeof(struct btrfs_disk_key)) >
1615 map_start + map_len) {
934d375b
CM
1616
1617 err = map_private_extent_buffer(eb, offset,
479965d6 1618 sizeof(struct btrfs_disk_key),
a6591715 1619 &kaddr, &map_start, &map_len);
479965d6
CM
1620
1621 if (!err) {
1622 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1623 map_start);
1624 } else {
1625 read_extent_buffer(eb, &unaligned,
1626 offset, sizeof(unaligned));
1627 tmp = &unaligned;
1628 }
5f39d397 1629
5f39d397
CM
1630 } else {
1631 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1632 map_start);
1633 }
be0e5c09
CM
1634 ret = comp_keys(tmp, key);
1635
1636 if (ret < 0)
1637 low = mid + 1;
1638 else if (ret > 0)
1639 high = mid;
1640 else {
1641 *slot = mid;
1642 return 0;
1643 }
1644 }
1645 *slot = low;
1646 return 1;
1647}
1648
97571fd0
CM
1649/*
1650 * simple bin_search frontend that does the right thing for
1651 * leaves vs nodes
1652 */
5f39d397
CM
1653static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1654 int level, int *slot)
be0e5c09 1655{
f775738f 1656 if (level == 0)
5f39d397
CM
1657 return generic_bin_search(eb,
1658 offsetof(struct btrfs_leaf, items),
0783fcfc 1659 sizeof(struct btrfs_item),
5f39d397 1660 key, btrfs_header_nritems(eb),
7518a238 1661 slot);
f775738f 1662 else
5f39d397
CM
1663 return generic_bin_search(eb,
1664 offsetof(struct btrfs_node, ptrs),
123abc88 1665 sizeof(struct btrfs_key_ptr),
5f39d397 1666 key, btrfs_header_nritems(eb),
7518a238 1667 slot);
be0e5c09
CM
1668}
1669
5d4f98a2
YZ
1670int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1671 int level, int *slot)
1672{
1673 return bin_search(eb, key, level, slot);
1674}
1675
f0486c68
YZ
1676static void root_add_used(struct btrfs_root *root, u32 size)
1677{
1678 spin_lock(&root->accounting_lock);
1679 btrfs_set_root_used(&root->root_item,
1680 btrfs_root_used(&root->root_item) + size);
1681 spin_unlock(&root->accounting_lock);
1682}
1683
1684static void root_sub_used(struct btrfs_root *root, u32 size)
1685{
1686 spin_lock(&root->accounting_lock);
1687 btrfs_set_root_used(&root->root_item,
1688 btrfs_root_used(&root->root_item) - size);
1689 spin_unlock(&root->accounting_lock);
1690}
1691
d352ac68
CM
1692/* given a node and slot number, this reads the blocks it points to. The
1693 * extent buffer is returned with a reference taken (but unlocked).
1694 * NULL is returned on error.
1695 */
e02119d5 1696static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 1697 struct extent_buffer *parent, int slot)
bb803951 1698{
ca7a79ad 1699 int level = btrfs_header_level(parent);
416bc658
JB
1700 struct extent_buffer *eb;
1701
bb803951
CM
1702 if (slot < 0)
1703 return NULL;
5f39d397 1704 if (slot >= btrfs_header_nritems(parent))
bb803951 1705 return NULL;
ca7a79ad
CM
1706
1707 BUG_ON(level == 0);
1708
416bc658
JB
1709 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1710 btrfs_level_size(root, level - 1),
1711 btrfs_node_ptr_generation(parent, slot));
1712 if (eb && !extent_buffer_uptodate(eb)) {
1713 free_extent_buffer(eb);
1714 eb = NULL;
1715 }
1716
1717 return eb;
bb803951
CM
1718}
1719
d352ac68
CM
1720/*
1721 * node level balancing, used to make sure nodes are in proper order for
1722 * item deletion. We balance from the top down, so we have to make sure
1723 * that a deletion won't leave an node completely empty later on.
1724 */
e02119d5 1725static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
1726 struct btrfs_root *root,
1727 struct btrfs_path *path, int level)
bb803951 1728{
5f39d397
CM
1729 struct extent_buffer *right = NULL;
1730 struct extent_buffer *mid;
1731 struct extent_buffer *left = NULL;
1732 struct extent_buffer *parent = NULL;
bb803951
CM
1733 int ret = 0;
1734 int wret;
1735 int pslot;
bb803951 1736 int orig_slot = path->slots[level];
79f95c82 1737 u64 orig_ptr;
bb803951
CM
1738
1739 if (level == 0)
1740 return 0;
1741
5f39d397 1742 mid = path->nodes[level];
b4ce94de 1743
bd681513
CM
1744 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1745 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
7bb86316
CM
1746 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1747
1d4f8a0c 1748 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1749
a05a9bb1 1750 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1751 parent = path->nodes[level + 1];
a05a9bb1
LZ
1752 pslot = path->slots[level + 1];
1753 }
bb803951 1754
40689478
CM
1755 /*
1756 * deal with the case where there is only one pointer in the root
1757 * by promoting the node below to a root
1758 */
5f39d397
CM
1759 if (!parent) {
1760 struct extent_buffer *child;
bb803951 1761
5f39d397 1762 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1763 return 0;
1764
1765 /* promote the child to a root */
5f39d397 1766 child = read_node_slot(root, mid, 0);
305a26af
MF
1767 if (!child) {
1768 ret = -EROFS;
1769 btrfs_std_error(root->fs_info, ret);
1770 goto enospc;
1771 }
1772
925baedd 1773 btrfs_tree_lock(child);
b4ce94de 1774 btrfs_set_lock_blocking(child);
9fa8cfe7 1775 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
1776 if (ret) {
1777 btrfs_tree_unlock(child);
1778 free_extent_buffer(child);
1779 goto enospc;
1780 }
2f375ab9 1781
90f8d62e 1782 tree_mod_log_set_root_pointer(root, child, 1);
240f62c8 1783 rcu_assign_pointer(root->node, child);
925baedd 1784
0b86a832 1785 add_root_to_dirty_list(root);
925baedd 1786 btrfs_tree_unlock(child);
b4ce94de 1787
925baedd 1788 path->locks[level] = 0;
bb803951 1789 path->nodes[level] = NULL;
5f39d397 1790 clean_tree_block(trans, root, mid);
925baedd 1791 btrfs_tree_unlock(mid);
bb803951 1792 /* once for the path */
5f39d397 1793 free_extent_buffer(mid);
f0486c68
YZ
1794
1795 root_sub_used(root, mid->len);
5581a51a 1796 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 1797 /* once for the root ptr */
3083ee2e 1798 free_extent_buffer_stale(mid);
f0486c68 1799 return 0;
bb803951 1800 }
5f39d397 1801 if (btrfs_header_nritems(mid) >
123abc88 1802 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
1803 return 0;
1804
5f39d397
CM
1805 left = read_node_slot(root, parent, pslot - 1);
1806 if (left) {
925baedd 1807 btrfs_tree_lock(left);
b4ce94de 1808 btrfs_set_lock_blocking(left);
5f39d397 1809 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1810 parent, pslot - 1, &left);
54aa1f4d
CM
1811 if (wret) {
1812 ret = wret;
1813 goto enospc;
1814 }
2cc58cf2 1815 }
5f39d397
CM
1816 right = read_node_slot(root, parent, pslot + 1);
1817 if (right) {
925baedd 1818 btrfs_tree_lock(right);
b4ce94de 1819 btrfs_set_lock_blocking(right);
5f39d397 1820 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1821 parent, pslot + 1, &right);
2cc58cf2
CM
1822 if (wret) {
1823 ret = wret;
1824 goto enospc;
1825 }
1826 }
1827
1828 /* first, try to make some room in the middle buffer */
5f39d397
CM
1829 if (left) {
1830 orig_slot += btrfs_header_nritems(left);
bce4eae9 1831 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1832 if (wret < 0)
1833 ret = wret;
bb803951 1834 }
79f95c82
CM
1835
1836 /*
1837 * then try to empty the right most buffer into the middle
1838 */
5f39d397 1839 if (right) {
971a1f66 1840 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1841 if (wret < 0 && wret != -ENOSPC)
79f95c82 1842 ret = wret;
5f39d397 1843 if (btrfs_header_nritems(right) == 0) {
5f39d397 1844 clean_tree_block(trans, root, right);
925baedd 1845 btrfs_tree_unlock(right);
afe5fea7 1846 del_ptr(root, path, level + 1, pslot + 1);
f0486c68 1847 root_sub_used(root, right->len);
5581a51a 1848 btrfs_free_tree_block(trans, root, right, 0, 1);
3083ee2e 1849 free_extent_buffer_stale(right);
f0486c68 1850 right = NULL;
bb803951 1851 } else {
5f39d397
CM
1852 struct btrfs_disk_key right_key;
1853 btrfs_node_key(right, &right_key, 0);
f230475e 1854 tree_mod_log_set_node_key(root->fs_info, parent,
32adf090 1855 pslot + 1, 0);
5f39d397
CM
1856 btrfs_set_node_key(parent, &right_key, pslot + 1);
1857 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1858 }
1859 }
5f39d397 1860 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1861 /*
1862 * we're not allowed to leave a node with one item in the
1863 * tree during a delete. A deletion from lower in the tree
1864 * could try to delete the only pointer in this node.
1865 * So, pull some keys from the left.
1866 * There has to be a left pointer at this point because
1867 * otherwise we would have pulled some pointers from the
1868 * right
1869 */
305a26af
MF
1870 if (!left) {
1871 ret = -EROFS;
1872 btrfs_std_error(root->fs_info, ret);
1873 goto enospc;
1874 }
5f39d397 1875 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1876 if (wret < 0) {
79f95c82 1877 ret = wret;
54aa1f4d
CM
1878 goto enospc;
1879 }
bce4eae9
CM
1880 if (wret == 1) {
1881 wret = push_node_left(trans, root, left, mid, 1);
1882 if (wret < 0)
1883 ret = wret;
1884 }
79f95c82
CM
1885 BUG_ON(wret == 1);
1886 }
5f39d397 1887 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1888 clean_tree_block(trans, root, mid);
925baedd 1889 btrfs_tree_unlock(mid);
afe5fea7 1890 del_ptr(root, path, level + 1, pslot);
f0486c68 1891 root_sub_used(root, mid->len);
5581a51a 1892 btrfs_free_tree_block(trans, root, mid, 0, 1);
3083ee2e 1893 free_extent_buffer_stale(mid);
f0486c68 1894 mid = NULL;
79f95c82
CM
1895 } else {
1896 /* update the parent key to reflect our changes */
5f39d397
CM
1897 struct btrfs_disk_key mid_key;
1898 btrfs_node_key(mid, &mid_key, 0);
32adf090 1899 tree_mod_log_set_node_key(root->fs_info, parent,
f230475e 1900 pslot, 0);
5f39d397
CM
1901 btrfs_set_node_key(parent, &mid_key, pslot);
1902 btrfs_mark_buffer_dirty(parent);
79f95c82 1903 }
bb803951 1904
79f95c82 1905 /* update the path */
5f39d397
CM
1906 if (left) {
1907 if (btrfs_header_nritems(left) > orig_slot) {
1908 extent_buffer_get(left);
925baedd 1909 /* left was locked after cow */
5f39d397 1910 path->nodes[level] = left;
bb803951
CM
1911 path->slots[level + 1] -= 1;
1912 path->slots[level] = orig_slot;
925baedd
CM
1913 if (mid) {
1914 btrfs_tree_unlock(mid);
5f39d397 1915 free_extent_buffer(mid);
925baedd 1916 }
bb803951 1917 } else {
5f39d397 1918 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1919 path->slots[level] = orig_slot;
1920 }
1921 }
79f95c82 1922 /* double check we haven't messed things up */
e20d96d6 1923 if (orig_ptr !=
5f39d397 1924 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1925 BUG();
54aa1f4d 1926enospc:
925baedd
CM
1927 if (right) {
1928 btrfs_tree_unlock(right);
5f39d397 1929 free_extent_buffer(right);
925baedd
CM
1930 }
1931 if (left) {
1932 if (path->nodes[level] != left)
1933 btrfs_tree_unlock(left);
5f39d397 1934 free_extent_buffer(left);
925baedd 1935 }
bb803951
CM
1936 return ret;
1937}
1938
d352ac68
CM
1939/* Node balancing for insertion. Here we only split or push nodes around
1940 * when they are completely full. This is also done top down, so we
1941 * have to be pessimistic.
1942 */
d397712b 1943static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1944 struct btrfs_root *root,
1945 struct btrfs_path *path, int level)
e66f709b 1946{
5f39d397
CM
1947 struct extent_buffer *right = NULL;
1948 struct extent_buffer *mid;
1949 struct extent_buffer *left = NULL;
1950 struct extent_buffer *parent = NULL;
e66f709b
CM
1951 int ret = 0;
1952 int wret;
1953 int pslot;
1954 int orig_slot = path->slots[level];
e66f709b
CM
1955
1956 if (level == 0)
1957 return 1;
1958
5f39d397 1959 mid = path->nodes[level];
7bb86316 1960 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 1961
a05a9bb1 1962 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1963 parent = path->nodes[level + 1];
a05a9bb1
LZ
1964 pslot = path->slots[level + 1];
1965 }
e66f709b 1966
5f39d397 1967 if (!parent)
e66f709b 1968 return 1;
e66f709b 1969
5f39d397 1970 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1971
1972 /* first, try to make some room in the middle buffer */
5f39d397 1973 if (left) {
e66f709b 1974 u32 left_nr;
925baedd
CM
1975
1976 btrfs_tree_lock(left);
b4ce94de
CM
1977 btrfs_set_lock_blocking(left);
1978
5f39d397 1979 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1980 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1981 wret = 1;
1982 } else {
5f39d397 1983 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1984 pslot - 1, &left);
54aa1f4d
CM
1985 if (ret)
1986 wret = 1;
1987 else {
54aa1f4d 1988 wret = push_node_left(trans, root,
971a1f66 1989 left, mid, 0);
54aa1f4d 1990 }
33ade1f8 1991 }
e66f709b
CM
1992 if (wret < 0)
1993 ret = wret;
1994 if (wret == 0) {
5f39d397 1995 struct btrfs_disk_key disk_key;
e66f709b 1996 orig_slot += left_nr;
5f39d397 1997 btrfs_node_key(mid, &disk_key, 0);
f230475e 1998 tree_mod_log_set_node_key(root->fs_info, parent,
32adf090 1999 pslot, 0);
5f39d397
CM
2000 btrfs_set_node_key(parent, &disk_key, pslot);
2001 btrfs_mark_buffer_dirty(parent);
2002 if (btrfs_header_nritems(left) > orig_slot) {
2003 path->nodes[level] = left;
e66f709b
CM
2004 path->slots[level + 1] -= 1;
2005 path->slots[level] = orig_slot;
925baedd 2006 btrfs_tree_unlock(mid);
5f39d397 2007 free_extent_buffer(mid);
e66f709b
CM
2008 } else {
2009 orig_slot -=
5f39d397 2010 btrfs_header_nritems(left);
e66f709b 2011 path->slots[level] = orig_slot;
925baedd 2012 btrfs_tree_unlock(left);
5f39d397 2013 free_extent_buffer(left);
e66f709b 2014 }
e66f709b
CM
2015 return 0;
2016 }
925baedd 2017 btrfs_tree_unlock(left);
5f39d397 2018 free_extent_buffer(left);
e66f709b 2019 }
925baedd 2020 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
2021
2022 /*
2023 * then try to empty the right most buffer into the middle
2024 */
5f39d397 2025 if (right) {
33ade1f8 2026 u32 right_nr;
b4ce94de 2027
925baedd 2028 btrfs_tree_lock(right);
b4ce94de
CM
2029 btrfs_set_lock_blocking(right);
2030
5f39d397 2031 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
2032 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2033 wret = 1;
2034 } else {
5f39d397
CM
2035 ret = btrfs_cow_block(trans, root, right,
2036 parent, pslot + 1,
9fa8cfe7 2037 &right);
54aa1f4d
CM
2038 if (ret)
2039 wret = 1;
2040 else {
54aa1f4d 2041 wret = balance_node_right(trans, root,
5f39d397 2042 right, mid);
54aa1f4d 2043 }
33ade1f8 2044 }
e66f709b
CM
2045 if (wret < 0)
2046 ret = wret;
2047 if (wret == 0) {
5f39d397
CM
2048 struct btrfs_disk_key disk_key;
2049
2050 btrfs_node_key(right, &disk_key, 0);
f230475e 2051 tree_mod_log_set_node_key(root->fs_info, parent,
32adf090 2052 pslot + 1, 0);
5f39d397
CM
2053 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2054 btrfs_mark_buffer_dirty(parent);
2055
2056 if (btrfs_header_nritems(mid) <= orig_slot) {
2057 path->nodes[level] = right;
e66f709b
CM
2058 path->slots[level + 1] += 1;
2059 path->slots[level] = orig_slot -
5f39d397 2060 btrfs_header_nritems(mid);
925baedd 2061 btrfs_tree_unlock(mid);
5f39d397 2062 free_extent_buffer(mid);
e66f709b 2063 } else {
925baedd 2064 btrfs_tree_unlock(right);
5f39d397 2065 free_extent_buffer(right);
e66f709b 2066 }
e66f709b
CM
2067 return 0;
2068 }
925baedd 2069 btrfs_tree_unlock(right);
5f39d397 2070 free_extent_buffer(right);
e66f709b 2071 }
e66f709b
CM
2072 return 1;
2073}
2074
3c69faec 2075/*
d352ac68
CM
2076 * readahead one full node of leaves, finding things that are close
2077 * to the block in 'slot', and triggering ra on them.
3c69faec 2078 */
c8c42864
CM
2079static void reada_for_search(struct btrfs_root *root,
2080 struct btrfs_path *path,
2081 int level, int slot, u64 objectid)
3c69faec 2082{
5f39d397 2083 struct extent_buffer *node;
01f46658 2084 struct btrfs_disk_key disk_key;
3c69faec 2085 u32 nritems;
3c69faec 2086 u64 search;
a7175319 2087 u64 target;
6b80053d 2088 u64 nread = 0;
cb25c2ea 2089 u64 gen;
3c69faec 2090 int direction = path->reada;
5f39d397 2091 struct extent_buffer *eb;
6b80053d
CM
2092 u32 nr;
2093 u32 blocksize;
2094 u32 nscan = 0;
db94535d 2095
a6b6e75e 2096 if (level != 1)
6702ed49
CM
2097 return;
2098
2099 if (!path->nodes[level])
3c69faec
CM
2100 return;
2101
5f39d397 2102 node = path->nodes[level];
925baedd 2103
3c69faec 2104 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
2105 blocksize = btrfs_level_size(root, level - 1);
2106 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
2107 if (eb) {
2108 free_extent_buffer(eb);
3c69faec
CM
2109 return;
2110 }
2111
a7175319 2112 target = search;
6b80053d 2113
5f39d397 2114 nritems = btrfs_header_nritems(node);
6b80053d 2115 nr = slot;
25b8b936 2116
d397712b 2117 while (1) {
6b80053d
CM
2118 if (direction < 0) {
2119 if (nr == 0)
2120 break;
2121 nr--;
2122 } else if (direction > 0) {
2123 nr++;
2124 if (nr >= nritems)
2125 break;
3c69faec 2126 }
01f46658
CM
2127 if (path->reada < 0 && objectid) {
2128 btrfs_node_key(node, &disk_key, nr);
2129 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2130 break;
2131 }
6b80053d 2132 search = btrfs_node_blockptr(node, nr);
a7175319
CM
2133 if ((search <= target && target - search <= 65536) ||
2134 (search > target && search - target <= 65536)) {
cb25c2ea 2135 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 2136 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
2137 nread += blocksize;
2138 }
2139 nscan++;
a7175319 2140 if ((nread > 65536 || nscan > 32))
6b80053d 2141 break;
3c69faec
CM
2142 }
2143}
925baedd 2144
0b08851f
JB
2145static noinline void reada_for_balance(struct btrfs_root *root,
2146 struct btrfs_path *path, int level)
b4ce94de
CM
2147{
2148 int slot;
2149 int nritems;
2150 struct extent_buffer *parent;
2151 struct extent_buffer *eb;
2152 u64 gen;
2153 u64 block1 = 0;
2154 u64 block2 = 0;
b4ce94de
CM
2155 int blocksize;
2156
8c594ea8 2157 parent = path->nodes[level + 1];
b4ce94de 2158 if (!parent)
0b08851f 2159 return;
b4ce94de
CM
2160
2161 nritems = btrfs_header_nritems(parent);
8c594ea8 2162 slot = path->slots[level + 1];
b4ce94de
CM
2163 blocksize = btrfs_level_size(root, level);
2164
2165 if (slot > 0) {
2166 block1 = btrfs_node_blockptr(parent, slot - 1);
2167 gen = btrfs_node_ptr_generation(parent, slot - 1);
2168 eb = btrfs_find_tree_block(root, block1, blocksize);
b9fab919
CM
2169 /*
2170 * if we get -eagain from btrfs_buffer_uptodate, we
2171 * don't want to return eagain here. That will loop
2172 * forever
2173 */
2174 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2175 block1 = 0;
2176 free_extent_buffer(eb);
2177 }
8c594ea8 2178 if (slot + 1 < nritems) {
b4ce94de
CM
2179 block2 = btrfs_node_blockptr(parent, slot + 1);
2180 gen = btrfs_node_ptr_generation(parent, slot + 1);
2181 eb = btrfs_find_tree_block(root, block2, blocksize);
b9fab919 2182 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
2183 block2 = 0;
2184 free_extent_buffer(eb);
2185 }
8c594ea8 2186
0b08851f
JB
2187 if (block1)
2188 readahead_tree_block(root, block1, blocksize, 0);
2189 if (block2)
2190 readahead_tree_block(root, block2, blocksize, 0);
b4ce94de
CM
2191}
2192
2193
d352ac68 2194/*
d397712b
CM
2195 * when we walk down the tree, it is usually safe to unlock the higher layers
2196 * in the tree. The exceptions are when our path goes through slot 0, because
2197 * operations on the tree might require changing key pointers higher up in the
2198 * tree.
d352ac68 2199 *
d397712b
CM
2200 * callers might also have set path->keep_locks, which tells this code to keep
2201 * the lock if the path points to the last slot in the block. This is part of
2202 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 2203 *
d397712b
CM
2204 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2205 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 2206 */
e02119d5 2207static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
2208 int lowest_unlock, int min_write_lock_level,
2209 int *write_lock_level)
925baedd
CM
2210{
2211 int i;
2212 int skip_level = level;
051e1b9f 2213 int no_skips = 0;
925baedd
CM
2214 struct extent_buffer *t;
2215
2216 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2217 if (!path->nodes[i])
2218 break;
2219 if (!path->locks[i])
2220 break;
051e1b9f 2221 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
2222 skip_level = i + 1;
2223 continue;
2224 }
051e1b9f 2225 if (!no_skips && path->keep_locks) {
925baedd
CM
2226 u32 nritems;
2227 t = path->nodes[i];
2228 nritems = btrfs_header_nritems(t);
051e1b9f 2229 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
2230 skip_level = i + 1;
2231 continue;
2232 }
2233 }
051e1b9f
CM
2234 if (skip_level < i && i >= lowest_unlock)
2235 no_skips = 1;
2236
925baedd
CM
2237 t = path->nodes[i];
2238 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
bd681513 2239 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 2240 path->locks[i] = 0;
f7c79f30
CM
2241 if (write_lock_level &&
2242 i > min_write_lock_level &&
2243 i <= *write_lock_level) {
2244 *write_lock_level = i - 1;
2245 }
925baedd
CM
2246 }
2247 }
2248}
2249
b4ce94de
CM
2250/*
2251 * This releases any locks held in the path starting at level and
2252 * going all the way up to the root.
2253 *
2254 * btrfs_search_slot will keep the lock held on higher nodes in a few
2255 * corner cases, such as COW of the block at slot zero in the node. This
2256 * ignores those rules, and it should only be called when there are no
2257 * more updates to be done higher up in the tree.
2258 */
2259noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2260{
2261 int i;
2262
09a2a8f9 2263 if (path->keep_locks)
b4ce94de
CM
2264 return;
2265
2266 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2267 if (!path->nodes[i])
12f4dacc 2268 continue;
b4ce94de 2269 if (!path->locks[i])
12f4dacc 2270 continue;
bd681513 2271 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
b4ce94de
CM
2272 path->locks[i] = 0;
2273 }
2274}
2275
c8c42864
CM
2276/*
2277 * helper function for btrfs_search_slot. The goal is to find a block
2278 * in cache without setting the path to blocking. If we find the block
2279 * we return zero and the path is unchanged.
2280 *
2281 * If we can't find the block, we set the path blocking and do some
2282 * reada. -EAGAIN is returned and the search must be repeated.
2283 */
2284static int
2285read_block_for_search(struct btrfs_trans_handle *trans,
2286 struct btrfs_root *root, struct btrfs_path *p,
2287 struct extent_buffer **eb_ret, int level, int slot,
5d9e75c4 2288 struct btrfs_key *key, u64 time_seq)
c8c42864
CM
2289{
2290 u64 blocknr;
2291 u64 gen;
2292 u32 blocksize;
2293 struct extent_buffer *b = *eb_ret;
2294 struct extent_buffer *tmp;
76a05b35 2295 int ret;
c8c42864
CM
2296
2297 blocknr = btrfs_node_blockptr(b, slot);
2298 gen = btrfs_node_ptr_generation(b, slot);
2299 blocksize = btrfs_level_size(root, level - 1);
2300
2301 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a 2302 if (tmp) {
b9fab919 2303 /* first we do an atomic uptodate check */
bdf7c00e
JB
2304 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2305 *eb_ret = tmp;
2306 return 0;
2307 }
2308
2309 /* the pages were up to date, but we failed
2310 * the generation number check. Do a full
2311 * read for the generation number that is correct.
2312 * We must do this without dropping locks so
2313 * we can trust our generation number
2314 */
2315 btrfs_set_path_blocking(p);
2316
2317 /* now we're allowed to do a blocking uptodate check */
2318 ret = btrfs_read_buffer(tmp, gen);
2319 if (!ret) {
2320 *eb_ret = tmp;
2321 return 0;
cb44921a 2322 }
bdf7c00e
JB
2323 free_extent_buffer(tmp);
2324 btrfs_release_path(p);
2325 return -EIO;
c8c42864
CM
2326 }
2327
2328 /*
2329 * reduce lock contention at high levels
2330 * of the btree by dropping locks before
76a05b35
CM
2331 * we read. Don't release the lock on the current
2332 * level because we need to walk this node to figure
2333 * out which blocks to read.
c8c42864 2334 */
8c594ea8
CM
2335 btrfs_unlock_up_safe(p, level + 1);
2336 btrfs_set_path_blocking(p);
2337
cb44921a 2338 free_extent_buffer(tmp);
c8c42864
CM
2339 if (p->reada)
2340 reada_for_search(root, p, level, slot, key->objectid);
2341
b3b4aa74 2342 btrfs_release_path(p);
76a05b35
CM
2343
2344 ret = -EAGAIN;
5bdd3536 2345 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
2346 if (tmp) {
2347 /*
2348 * If the read above didn't mark this buffer up to date,
2349 * it will never end up being up to date. Set ret to EIO now
2350 * and give up so that our caller doesn't loop forever
2351 * on our EAGAINs.
2352 */
b9fab919 2353 if (!btrfs_buffer_uptodate(tmp, 0, 0))
76a05b35 2354 ret = -EIO;
c8c42864 2355 free_extent_buffer(tmp);
76a05b35
CM
2356 }
2357 return ret;
c8c42864
CM
2358}
2359
2360/*
2361 * helper function for btrfs_search_slot. This does all of the checks
2362 * for node-level blocks and does any balancing required based on
2363 * the ins_len.
2364 *
2365 * If no extra work was required, zero is returned. If we had to
2366 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2367 * start over
2368 */
2369static int
2370setup_nodes_for_search(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
2372 struct extent_buffer *b, int level, int ins_len,
2373 int *write_lock_level)
c8c42864
CM
2374{
2375 int ret;
2376 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2377 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2378 int sret;
2379
bd681513
CM
2380 if (*write_lock_level < level + 1) {
2381 *write_lock_level = level + 1;
2382 btrfs_release_path(p);
2383 goto again;
2384 }
2385
c8c42864 2386 btrfs_set_path_blocking(p);
0b08851f 2387 reada_for_balance(root, p, level);
c8c42864 2388 sret = split_node(trans, root, p, level);
bd681513 2389 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2390
2391 BUG_ON(sret > 0);
2392 if (sret) {
2393 ret = sret;
2394 goto done;
2395 }
2396 b = p->nodes[level];
2397 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 2398 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
2399 int sret;
2400
bd681513
CM
2401 if (*write_lock_level < level + 1) {
2402 *write_lock_level = level + 1;
2403 btrfs_release_path(p);
2404 goto again;
2405 }
2406
c8c42864 2407 btrfs_set_path_blocking(p);
0b08851f 2408 reada_for_balance(root, p, level);
c8c42864 2409 sret = balance_level(trans, root, p, level);
bd681513 2410 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
2411
2412 if (sret) {
2413 ret = sret;
2414 goto done;
2415 }
2416 b = p->nodes[level];
2417 if (!b) {
b3b4aa74 2418 btrfs_release_path(p);
c8c42864
CM
2419 goto again;
2420 }
2421 BUG_ON(btrfs_header_nritems(b) == 1);
2422 }
2423 return 0;
2424
2425again:
2426 ret = -EAGAIN;
2427done:
2428 return ret;
2429}
2430
74123bd7
CM
2431/*
2432 * look for key in the tree. path is filled in with nodes along the way
2433 * if key is found, we return zero and you can find the item in the leaf
2434 * level of the path (level 0)
2435 *
2436 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
2437 * be inserted, and 1 is returned. If there are other errors during the
2438 * search a negative error number is returned.
97571fd0
CM
2439 *
2440 * if ins_len > 0, nodes and leaves will be split as we walk down the
2441 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2442 * possible)
74123bd7 2443 */
e089f05c
CM
2444int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2445 *root, struct btrfs_key *key, struct btrfs_path *p, int
2446 ins_len, int cow)
be0e5c09 2447{
5f39d397 2448 struct extent_buffer *b;
be0e5c09
CM
2449 int slot;
2450 int ret;
33c66f43 2451 int err;
be0e5c09 2452 int level;
925baedd 2453 int lowest_unlock = 1;
bd681513
CM
2454 int root_lock;
2455 /* everything at write_lock_level or lower must be write locked */
2456 int write_lock_level = 0;
9f3a7427 2457 u8 lowest_level = 0;
f7c79f30 2458 int min_write_lock_level;
9f3a7427 2459
6702ed49 2460 lowest_level = p->lowest_level;
323ac95b 2461 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 2462 WARN_ON(p->nodes[0] != NULL);
25179201 2463
bd681513 2464 if (ins_len < 0) {
925baedd 2465 lowest_unlock = 2;
65b51a00 2466
bd681513
CM
2467 /* when we are removing items, we might have to go up to level
2468 * two as we update tree pointers Make sure we keep write
2469 * for those levels as well
2470 */
2471 write_lock_level = 2;
2472 } else if (ins_len > 0) {
2473 /*
2474 * for inserting items, make sure we have a write lock on
2475 * level 1 so we can update keys
2476 */
2477 write_lock_level = 1;
2478 }
2479
2480 if (!cow)
2481 write_lock_level = -1;
2482
09a2a8f9 2483 if (cow && (p->keep_locks || p->lowest_level))
bd681513
CM
2484 write_lock_level = BTRFS_MAX_LEVEL;
2485
f7c79f30
CM
2486 min_write_lock_level = write_lock_level;
2487
bb803951 2488again:
bd681513
CM
2489 /*
2490 * we try very hard to do read locks on the root
2491 */
2492 root_lock = BTRFS_READ_LOCK;
2493 level = 0;
5d4f98a2 2494 if (p->search_commit_root) {
bd681513
CM
2495 /*
2496 * the commit roots are read only
2497 * so we always do read locks
2498 */
5d4f98a2
YZ
2499 b = root->commit_root;
2500 extent_buffer_get(b);
bd681513 2501 level = btrfs_header_level(b);
5d4f98a2 2502 if (!p->skip_locking)
bd681513 2503 btrfs_tree_read_lock(b);
5d4f98a2 2504 } else {
bd681513 2505 if (p->skip_locking) {
5d4f98a2 2506 b = btrfs_root_node(root);
bd681513
CM
2507 level = btrfs_header_level(b);
2508 } else {
2509 /* we don't know the level of the root node
2510 * until we actually have it read locked
2511 */
2512 b = btrfs_read_lock_root_node(root);
2513 level = btrfs_header_level(b);
2514 if (level <= write_lock_level) {
2515 /* whoops, must trade for write lock */
2516 btrfs_tree_read_unlock(b);
2517 free_extent_buffer(b);
2518 b = btrfs_lock_root_node(root);
2519 root_lock = BTRFS_WRITE_LOCK;
2520
2521 /* the level might have changed, check again */
2522 level = btrfs_header_level(b);
2523 }
2524 }
5d4f98a2 2525 }
bd681513
CM
2526 p->nodes[level] = b;
2527 if (!p->skip_locking)
2528 p->locks[level] = root_lock;
925baedd 2529
eb60ceac 2530 while (b) {
5f39d397 2531 level = btrfs_header_level(b);
65b51a00
CM
2532
2533 /*
2534 * setup the path here so we can release it under lock
2535 * contention with the cow code
2536 */
02217ed2 2537 if (cow) {
c8c42864
CM
2538 /*
2539 * if we don't really need to cow this block
2540 * then we don't want to set the path blocking,
2541 * so we test it here
2542 */
5d4f98a2 2543 if (!should_cow_block(trans, root, b))
65b51a00 2544 goto cow_done;
5d4f98a2 2545
b4ce94de
CM
2546 btrfs_set_path_blocking(p);
2547
bd681513
CM
2548 /*
2549 * must have write locks on this node and the
2550 * parent
2551 */
5124e00e
JB
2552 if (level > write_lock_level ||
2553 (level + 1 > write_lock_level &&
2554 level + 1 < BTRFS_MAX_LEVEL &&
2555 p->nodes[level + 1])) {
bd681513
CM
2556 write_lock_level = level + 1;
2557 btrfs_release_path(p);
2558 goto again;
2559 }
2560
33c66f43
YZ
2561 err = btrfs_cow_block(trans, root, b,
2562 p->nodes[level + 1],
2563 p->slots[level + 1], &b);
2564 if (err) {
33c66f43 2565 ret = err;
65b51a00 2566 goto done;
54aa1f4d 2567 }
02217ed2 2568 }
65b51a00 2569cow_done:
02217ed2 2570 BUG_ON(!cow && ins_len);
65b51a00 2571
eb60ceac 2572 p->nodes[level] = b;
bd681513 2573 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de
CM
2574
2575 /*
2576 * we have a lock on b and as long as we aren't changing
2577 * the tree, there is no way to for the items in b to change.
2578 * It is safe to drop the lock on our parent before we
2579 * go through the expensive btree search on b.
2580 *
2581 * If cow is true, then we might be changing slot zero,
2582 * which may require changing the parent. So, we can't
2583 * drop the lock until after we know which slot we're
2584 * operating on.
2585 */
2586 if (!cow)
2587 btrfs_unlock_up_safe(p, level + 1);
2588
5f39d397 2589 ret = bin_search(b, key, level, &slot);
b4ce94de 2590
5f39d397 2591 if (level != 0) {
33c66f43
YZ
2592 int dec = 0;
2593 if (ret && slot > 0) {
2594 dec = 1;
be0e5c09 2595 slot -= 1;
33c66f43 2596 }
be0e5c09 2597 p->slots[level] = slot;
33c66f43 2598 err = setup_nodes_for_search(trans, root, p, b, level,
bd681513 2599 ins_len, &write_lock_level);
33c66f43 2600 if (err == -EAGAIN)
c8c42864 2601 goto again;
33c66f43
YZ
2602 if (err) {
2603 ret = err;
c8c42864 2604 goto done;
33c66f43 2605 }
c8c42864
CM
2606 b = p->nodes[level];
2607 slot = p->slots[level];
b4ce94de 2608
bd681513
CM
2609 /*
2610 * slot 0 is special, if we change the key
2611 * we have to update the parent pointer
2612 * which means we must have a write lock
2613 * on the parent
2614 */
2615 if (slot == 0 && cow &&
2616 write_lock_level < level + 1) {
2617 write_lock_level = level + 1;
2618 btrfs_release_path(p);
2619 goto again;
2620 }
2621
f7c79f30
CM
2622 unlock_up(p, level, lowest_unlock,
2623 min_write_lock_level, &write_lock_level);
f9efa9c7 2624
925baedd 2625 if (level == lowest_level) {
33c66f43
YZ
2626 if (dec)
2627 p->slots[level]++;
5b21f2ed 2628 goto done;
925baedd 2629 }
ca7a79ad 2630
33c66f43 2631 err = read_block_for_search(trans, root, p,
5d9e75c4 2632 &b, level, slot, key, 0);
33c66f43 2633 if (err == -EAGAIN)
c8c42864 2634 goto again;
33c66f43
YZ
2635 if (err) {
2636 ret = err;
76a05b35 2637 goto done;
33c66f43 2638 }
76a05b35 2639
b4ce94de 2640 if (!p->skip_locking) {
bd681513
CM
2641 level = btrfs_header_level(b);
2642 if (level <= write_lock_level) {
2643 err = btrfs_try_tree_write_lock(b);
2644 if (!err) {
2645 btrfs_set_path_blocking(p);
2646 btrfs_tree_lock(b);
2647 btrfs_clear_path_blocking(p, b,
2648 BTRFS_WRITE_LOCK);
2649 }
2650 p->locks[level] = BTRFS_WRITE_LOCK;
2651 } else {
2652 err = btrfs_try_tree_read_lock(b);
2653 if (!err) {
2654 btrfs_set_path_blocking(p);
2655 btrfs_tree_read_lock(b);
2656 btrfs_clear_path_blocking(p, b,
2657 BTRFS_READ_LOCK);
2658 }
2659 p->locks[level] = BTRFS_READ_LOCK;
b4ce94de 2660 }
bd681513 2661 p->nodes[level] = b;
b4ce94de 2662 }
be0e5c09
CM
2663 } else {
2664 p->slots[level] = slot;
87b29b20
YZ
2665 if (ins_len > 0 &&
2666 btrfs_leaf_free_space(root, b) < ins_len) {
bd681513
CM
2667 if (write_lock_level < 1) {
2668 write_lock_level = 1;
2669 btrfs_release_path(p);
2670 goto again;
2671 }
2672
b4ce94de 2673 btrfs_set_path_blocking(p);
33c66f43
YZ
2674 err = split_leaf(trans, root, key,
2675 p, ins_len, ret == 0);
bd681513 2676 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de 2677
33c66f43
YZ
2678 BUG_ON(err > 0);
2679 if (err) {
2680 ret = err;
65b51a00
CM
2681 goto done;
2682 }
5c680ed6 2683 }
459931ec 2684 if (!p->search_for_split)
f7c79f30
CM
2685 unlock_up(p, level, lowest_unlock,
2686 min_write_lock_level, &write_lock_level);
65b51a00 2687 goto done;
be0e5c09
CM
2688 }
2689 }
65b51a00
CM
2690 ret = 1;
2691done:
b4ce94de
CM
2692 /*
2693 * we don't really know what they plan on doing with the path
2694 * from here on, so for now just mark it as blocking
2695 */
b9473439
CM
2696 if (!p->leave_spinning)
2697 btrfs_set_path_blocking(p);
76a05b35 2698 if (ret < 0)
b3b4aa74 2699 btrfs_release_path(p);
65b51a00 2700 return ret;
be0e5c09
CM
2701}
2702
5d9e75c4
JS
2703/*
2704 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2705 * current state of the tree together with the operations recorded in the tree
2706 * modification log to search for the key in a previous version of this tree, as
2707 * denoted by the time_seq parameter.
2708 *
2709 * Naturally, there is no support for insert, delete or cow operations.
2710 *
2711 * The resulting path and return value will be set up as if we called
2712 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2713 */
2714int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2715 struct btrfs_path *p, u64 time_seq)
2716{
2717 struct extent_buffer *b;
2718 int slot;
2719 int ret;
2720 int err;
2721 int level;
2722 int lowest_unlock = 1;
2723 u8 lowest_level = 0;
2724
2725 lowest_level = p->lowest_level;
2726 WARN_ON(p->nodes[0] != NULL);
2727
2728 if (p->search_commit_root) {
2729 BUG_ON(time_seq);
2730 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2731 }
2732
2733again:
5d9e75c4 2734 b = get_old_root(root, time_seq);
5d9e75c4 2735 level = btrfs_header_level(b);
5d9e75c4
JS
2736 p->locks[level] = BTRFS_READ_LOCK;
2737
2738 while (b) {
2739 level = btrfs_header_level(b);
2740 p->nodes[level] = b;
2741 btrfs_clear_path_blocking(p, NULL, 0);
2742
2743 /*
2744 * we have a lock on b and as long as we aren't changing
2745 * the tree, there is no way to for the items in b to change.
2746 * It is safe to drop the lock on our parent before we
2747 * go through the expensive btree search on b.
2748 */
2749 btrfs_unlock_up_safe(p, level + 1);
2750
2751 ret = bin_search(b, key, level, &slot);
2752
2753 if (level != 0) {
2754 int dec = 0;
2755 if (ret && slot > 0) {
2756 dec = 1;
2757 slot -= 1;
2758 }
2759 p->slots[level] = slot;
2760 unlock_up(p, level, lowest_unlock, 0, NULL);
2761
2762 if (level == lowest_level) {
2763 if (dec)
2764 p->slots[level]++;
2765 goto done;
2766 }
2767
2768 err = read_block_for_search(NULL, root, p, &b, level,
2769 slot, key, time_seq);
2770 if (err == -EAGAIN)
2771 goto again;
2772 if (err) {
2773 ret = err;
2774 goto done;
2775 }
2776
2777 level = btrfs_header_level(b);
2778 err = btrfs_try_tree_read_lock(b);
2779 if (!err) {
2780 btrfs_set_path_blocking(p);
2781 btrfs_tree_read_lock(b);
2782 btrfs_clear_path_blocking(p, b,
2783 BTRFS_READ_LOCK);
2784 }
9ec72677 2785 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
db7f3436
JB
2786 if (!b) {
2787 ret = -ENOMEM;
2788 goto done;
2789 }
5d9e75c4
JS
2790 p->locks[level] = BTRFS_READ_LOCK;
2791 p->nodes[level] = b;
5d9e75c4
JS
2792 } else {
2793 p->slots[level] = slot;
2794 unlock_up(p, level, lowest_unlock, 0, NULL);
2795 goto done;
2796 }
2797 }
2798 ret = 1;
2799done:
2800 if (!p->leave_spinning)
2801 btrfs_set_path_blocking(p);
2802 if (ret < 0)
2803 btrfs_release_path(p);
2804
2805 return ret;
2806}
2807
2f38b3e1
AJ
2808/*
2809 * helper to use instead of search slot if no exact match is needed but
2810 * instead the next or previous item should be returned.
2811 * When find_higher is true, the next higher item is returned, the next lower
2812 * otherwise.
2813 * When return_any and find_higher are both true, and no higher item is found,
2814 * return the next lower instead.
2815 * When return_any is true and find_higher is false, and no lower item is found,
2816 * return the next higher instead.
2817 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2818 * < 0 on error
2819 */
2820int btrfs_search_slot_for_read(struct btrfs_root *root,
2821 struct btrfs_key *key, struct btrfs_path *p,
2822 int find_higher, int return_any)
2823{
2824 int ret;
2825 struct extent_buffer *leaf;
2826
2827again:
2828 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2829 if (ret <= 0)
2830 return ret;
2831 /*
2832 * a return value of 1 means the path is at the position where the
2833 * item should be inserted. Normally this is the next bigger item,
2834 * but in case the previous item is the last in a leaf, path points
2835 * to the first free slot in the previous leaf, i.e. at an invalid
2836 * item.
2837 */
2838 leaf = p->nodes[0];
2839
2840 if (find_higher) {
2841 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2842 ret = btrfs_next_leaf(root, p);
2843 if (ret <= 0)
2844 return ret;
2845 if (!return_any)
2846 return 1;
2847 /*
2848 * no higher item found, return the next
2849 * lower instead
2850 */
2851 return_any = 0;
2852 find_higher = 0;
2853 btrfs_release_path(p);
2854 goto again;
2855 }
2856 } else {
e6793769
AJ
2857 if (p->slots[0] == 0) {
2858 ret = btrfs_prev_leaf(root, p);
2859 if (ret < 0)
2860 return ret;
2861 if (!ret) {
2862 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2863 return 0;
2f38b3e1 2864 }
e6793769
AJ
2865 if (!return_any)
2866 return 1;
2867 /*
2868 * no lower item found, return the next
2869 * higher instead
2870 */
2871 return_any = 0;
2872 find_higher = 1;
2873 btrfs_release_path(p);
2874 goto again;
2875 } else {
2f38b3e1
AJ
2876 --p->slots[0];
2877 }
2878 }
2879 return 0;
2880}
2881
74123bd7
CM
2882/*
2883 * adjust the pointers going up the tree, starting at level
2884 * making sure the right key of each node is points to 'key'.
2885 * This is used after shifting pointers to the left, so it stops
2886 * fixing up pointers when a given leaf/node is not in slot 0 of the
2887 * higher levels
aa5d6bed 2888 *
74123bd7 2889 */
d6a0a126 2890static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
143bede5 2891 struct btrfs_disk_key *key, int level)
be0e5c09
CM
2892{
2893 int i;
5f39d397
CM
2894 struct extent_buffer *t;
2895
234b63a0 2896 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 2897 int tslot = path->slots[i];
eb60ceac 2898 if (!path->nodes[i])
be0e5c09 2899 break;
5f39d397 2900 t = path->nodes[i];
32adf090 2901 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
5f39d397 2902 btrfs_set_node_key(t, key, tslot);
d6025579 2903 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
2904 if (tslot != 0)
2905 break;
2906 }
2907}
2908
31840ae1
ZY
2909/*
2910 * update item key.
2911 *
2912 * This function isn't completely safe. It's the caller's responsibility
2913 * that the new key won't break the order
2914 */
afe5fea7 2915void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
143bede5 2916 struct btrfs_key *new_key)
31840ae1
ZY
2917{
2918 struct btrfs_disk_key disk_key;
2919 struct extent_buffer *eb;
2920 int slot;
2921
2922 eb = path->nodes[0];
2923 slot = path->slots[0];
2924 if (slot > 0) {
2925 btrfs_item_key(eb, &disk_key, slot - 1);
143bede5 2926 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
31840ae1
ZY
2927 }
2928 if (slot < btrfs_header_nritems(eb) - 1) {
2929 btrfs_item_key(eb, &disk_key, slot + 1);
143bede5 2930 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
31840ae1
ZY
2931 }
2932
2933 btrfs_cpu_key_to_disk(&disk_key, new_key);
2934 btrfs_set_item_key(eb, &disk_key, slot);
2935 btrfs_mark_buffer_dirty(eb);
2936 if (slot == 0)
d6a0a126 2937 fixup_low_keys(root, path, &disk_key, 1);
31840ae1
ZY
2938}
2939
74123bd7
CM
2940/*
2941 * try to push data from one node into the next node left in the
79f95c82 2942 * tree.
aa5d6bed
CM
2943 *
2944 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2945 * error, and > 0 if there was no room in the left hand block.
74123bd7 2946 */
98ed5174
CM
2947static int push_node_left(struct btrfs_trans_handle *trans,
2948 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 2949 struct extent_buffer *src, int empty)
be0e5c09 2950{
be0e5c09 2951 int push_items = 0;
bb803951
CM
2952 int src_nritems;
2953 int dst_nritems;
aa5d6bed 2954 int ret = 0;
be0e5c09 2955
5f39d397
CM
2956 src_nritems = btrfs_header_nritems(src);
2957 dst_nritems = btrfs_header_nritems(dst);
123abc88 2958 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
2959 WARN_ON(btrfs_header_generation(src) != trans->transid);
2960 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 2961
bce4eae9 2962 if (!empty && src_nritems <= 8)
971a1f66
CM
2963 return 1;
2964
d397712b 2965 if (push_items <= 0)
be0e5c09
CM
2966 return 1;
2967
bce4eae9 2968 if (empty) {
971a1f66 2969 push_items = min(src_nritems, push_items);
bce4eae9
CM
2970 if (push_items < src_nritems) {
2971 /* leave at least 8 pointers in the node if
2972 * we aren't going to empty it
2973 */
2974 if (src_nritems - push_items < 8) {
2975 if (push_items <= 8)
2976 return 1;
2977 push_items -= 8;
2978 }
2979 }
2980 } else
2981 push_items = min(src_nritems - 8, push_items);
79f95c82 2982
f230475e 2983 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
90f8d62e 2984 push_items);
5f39d397
CM
2985 copy_extent_buffer(dst, src,
2986 btrfs_node_key_ptr_offset(dst_nritems),
2987 btrfs_node_key_ptr_offset(0),
d397712b 2988 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 2989
bb803951 2990 if (push_items < src_nritems) {
57911b8b
JS
2991 /*
2992 * don't call tree_mod_log_eb_move here, key removal was already
2993 * fully logged by tree_mod_log_eb_copy above.
2994 */
5f39d397
CM
2995 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2996 btrfs_node_key_ptr_offset(push_items),
2997 (src_nritems - push_items) *
2998 sizeof(struct btrfs_key_ptr));
2999 }
3000 btrfs_set_header_nritems(src, src_nritems - push_items);
3001 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3002 btrfs_mark_buffer_dirty(src);
3003 btrfs_mark_buffer_dirty(dst);
31840ae1 3004
79f95c82
CM
3005 return ret;
3006}
3007
3008/*
3009 * try to push data from one node into the next node right in the
3010 * tree.
3011 *
3012 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3013 * error, and > 0 if there was no room in the right hand block.
3014 *
3015 * this will only push up to 1/2 the contents of the left node over
3016 */
5f39d397
CM
3017static int balance_node_right(struct btrfs_trans_handle *trans,
3018 struct btrfs_root *root,
3019 struct extent_buffer *dst,
3020 struct extent_buffer *src)
79f95c82 3021{
79f95c82
CM
3022 int push_items = 0;
3023 int max_push;
3024 int src_nritems;
3025 int dst_nritems;
3026 int ret = 0;
79f95c82 3027
7bb86316
CM
3028 WARN_ON(btrfs_header_generation(src) != trans->transid);
3029 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3030
5f39d397
CM
3031 src_nritems = btrfs_header_nritems(src);
3032 dst_nritems = btrfs_header_nritems(dst);
123abc88 3033 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 3034 if (push_items <= 0)
79f95c82 3035 return 1;
bce4eae9 3036
d397712b 3037 if (src_nritems < 4)
bce4eae9 3038 return 1;
79f95c82
CM
3039
3040 max_push = src_nritems / 2 + 1;
3041 /* don't try to empty the node */
d397712b 3042 if (max_push >= src_nritems)
79f95c82 3043 return 1;
252c38f0 3044
79f95c82
CM
3045 if (max_push < push_items)
3046 push_items = max_push;
3047
f230475e 3048 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
5f39d397
CM
3049 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3050 btrfs_node_key_ptr_offset(0),
3051 (dst_nritems) *
3052 sizeof(struct btrfs_key_ptr));
d6025579 3053
f230475e 3054 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
90f8d62e 3055 src_nritems - push_items, push_items);
5f39d397
CM
3056 copy_extent_buffer(dst, src,
3057 btrfs_node_key_ptr_offset(0),
3058 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 3059 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 3060
5f39d397
CM
3061 btrfs_set_header_nritems(src, src_nritems - push_items);
3062 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 3063
5f39d397
CM
3064 btrfs_mark_buffer_dirty(src);
3065 btrfs_mark_buffer_dirty(dst);
31840ae1 3066
aa5d6bed 3067 return ret;
be0e5c09
CM
3068}
3069
97571fd0
CM
3070/*
3071 * helper function to insert a new root level in the tree.
3072 * A new node is allocated, and a single item is inserted to
3073 * point to the existing root
aa5d6bed
CM
3074 *
3075 * returns zero on success or < 0 on failure.
97571fd0 3076 */
d397712b 3077static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397 3078 struct btrfs_root *root,
fdd99c72 3079 struct btrfs_path *path, int level)
5c680ed6 3080{
7bb86316 3081 u64 lower_gen;
5f39d397
CM
3082 struct extent_buffer *lower;
3083 struct extent_buffer *c;
925baedd 3084 struct extent_buffer *old;
5f39d397 3085 struct btrfs_disk_key lower_key;
5c680ed6
CM
3086
3087 BUG_ON(path->nodes[level]);
3088 BUG_ON(path->nodes[level-1] != root->node);
3089
7bb86316
CM
3090 lower = path->nodes[level-1];
3091 if (level == 1)
3092 btrfs_item_key(lower, &lower_key, 0);
3093 else
3094 btrfs_node_key(lower, &lower_key, 0);
3095
31840ae1 3096 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 3097 root->root_key.objectid, &lower_key,
5581a51a 3098 level, root->node->start, 0);
5f39d397
CM
3099 if (IS_ERR(c))
3100 return PTR_ERR(c);
925baedd 3101
f0486c68
YZ
3102 root_add_used(root, root->nodesize);
3103
5d4f98a2 3104 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
3105 btrfs_set_header_nritems(c, 1);
3106 btrfs_set_header_level(c, level);
db94535d 3107 btrfs_set_header_bytenr(c, c->start);
5f39d397 3108 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 3109 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 3110 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
3111
3112 write_extent_buffer(c, root->fs_info->fsid,
3113 (unsigned long)btrfs_header_fsid(c),
3114 BTRFS_FSID_SIZE);
e17cade2
CM
3115
3116 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3117 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3118 BTRFS_UUID_SIZE);
3119
5f39d397 3120 btrfs_set_node_key(c, &lower_key, 0);
db94535d 3121 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 3122 lower_gen = btrfs_header_generation(lower);
31840ae1 3123 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
3124
3125 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 3126
5f39d397 3127 btrfs_mark_buffer_dirty(c);
d5719762 3128
925baedd 3129 old = root->node;
fdd99c72 3130 tree_mod_log_set_root_pointer(root, c, 0);
240f62c8 3131 rcu_assign_pointer(root->node, c);
925baedd
CM
3132
3133 /* the super has an extra ref to root->node */
3134 free_extent_buffer(old);
3135
0b86a832 3136 add_root_to_dirty_list(root);
5f39d397
CM
3137 extent_buffer_get(c);
3138 path->nodes[level] = c;
bd681513 3139 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
3140 path->slots[level] = 0;
3141 return 0;
3142}
3143
74123bd7
CM
3144/*
3145 * worker function to insert a single pointer in a node.
3146 * the node should have enough room for the pointer already
97571fd0 3147 *
74123bd7
CM
3148 * slot and level indicate where you want the key to go, and
3149 * blocknr is the block the key points to.
3150 */
143bede5
JM
3151static void insert_ptr(struct btrfs_trans_handle *trans,
3152 struct btrfs_root *root, struct btrfs_path *path,
3153 struct btrfs_disk_key *key, u64 bytenr,
c3e06965 3154 int slot, int level)
74123bd7 3155{
5f39d397 3156 struct extent_buffer *lower;
74123bd7 3157 int nritems;
f3ea38da 3158 int ret;
5c680ed6
CM
3159
3160 BUG_ON(!path->nodes[level]);
f0486c68 3161 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
3162 lower = path->nodes[level];
3163 nritems = btrfs_header_nritems(lower);
c293498b 3164 BUG_ON(slot > nritems);
143bede5 3165 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
74123bd7 3166 if (slot != nritems) {
c3e06965 3167 if (level)
f3ea38da
JS
3168 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3169 slot, nritems - slot);
5f39d397
CM
3170 memmove_extent_buffer(lower,
3171 btrfs_node_key_ptr_offset(slot + 1),
3172 btrfs_node_key_ptr_offset(slot),
d6025579 3173 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 3174 }
c3e06965 3175 if (level) {
f3ea38da 3176 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
c8cc6341 3177 MOD_LOG_KEY_ADD, GFP_NOFS);
f3ea38da
JS
3178 BUG_ON(ret < 0);
3179 }
5f39d397 3180 btrfs_set_node_key(lower, key, slot);
db94535d 3181 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
3182 WARN_ON(trans->transid == 0);
3183 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
3184 btrfs_set_header_nritems(lower, nritems + 1);
3185 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
3186}
3187
97571fd0
CM
3188/*
3189 * split the node at the specified level in path in two.
3190 * The path is corrected to point to the appropriate node after the split
3191 *
3192 * Before splitting this tries to make some room in the node by pushing
3193 * left and right, if either one works, it returns right away.
aa5d6bed
CM
3194 *
3195 * returns 0 on success and < 0 on failure
97571fd0 3196 */
e02119d5
CM
3197static noinline int split_node(struct btrfs_trans_handle *trans,
3198 struct btrfs_root *root,
3199 struct btrfs_path *path, int level)
be0e5c09 3200{
5f39d397
CM
3201 struct extent_buffer *c;
3202 struct extent_buffer *split;
3203 struct btrfs_disk_key disk_key;
be0e5c09 3204 int mid;
5c680ed6 3205 int ret;
7518a238 3206 u32 c_nritems;
eb60ceac 3207
5f39d397 3208 c = path->nodes[level];
7bb86316 3209 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 3210 if (c == root->node) {
d9abbf1c 3211 /*
90f8d62e
JS
3212 * trying to split the root, lets make a new one
3213 *
fdd99c72 3214 * tree mod log: We don't log_removal old root in
90f8d62e
JS
3215 * insert_new_root, because that root buffer will be kept as a
3216 * normal node. We are going to log removal of half of the
3217 * elements below with tree_mod_log_eb_copy. We're holding a
3218 * tree lock on the buffer, which is why we cannot race with
3219 * other tree_mod_log users.
d9abbf1c 3220 */
fdd99c72 3221 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
3222 if (ret)
3223 return ret;
b3612421 3224 } else {
e66f709b 3225 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
3226 c = path->nodes[level];
3227 if (!ret && btrfs_header_nritems(c) <
c448acf0 3228 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 3229 return 0;
54aa1f4d
CM
3230 if (ret < 0)
3231 return ret;
be0e5c09 3232 }
e66f709b 3233
5f39d397 3234 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
3235 mid = (c_nritems + 1) / 2;
3236 btrfs_node_key(c, &disk_key, mid);
7bb86316 3237
5d4f98a2 3238 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 3239 root->root_key.objectid,
5581a51a 3240 &disk_key, level, c->start, 0);
5f39d397
CM
3241 if (IS_ERR(split))
3242 return PTR_ERR(split);
3243
f0486c68
YZ
3244 root_add_used(root, root->nodesize);
3245
5d4f98a2 3246 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 3247 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 3248 btrfs_set_header_bytenr(split, split->start);
5f39d397 3249 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 3250 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3251 btrfs_set_header_owner(split, root->root_key.objectid);
3252 write_extent_buffer(split, root->fs_info->fsid,
3253 (unsigned long)btrfs_header_fsid(split),
3254 BTRFS_FSID_SIZE);
e17cade2
CM
3255 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3256 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3257 BTRFS_UUID_SIZE);
54aa1f4d 3258
90f8d62e 3259 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
5f39d397
CM
3260 copy_extent_buffer(split, c,
3261 btrfs_node_key_ptr_offset(0),
3262 btrfs_node_key_ptr_offset(mid),
3263 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3264 btrfs_set_header_nritems(split, c_nritems - mid);
3265 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
3266 ret = 0;
3267
5f39d397
CM
3268 btrfs_mark_buffer_dirty(c);
3269 btrfs_mark_buffer_dirty(split);
3270
143bede5 3271 insert_ptr(trans, root, path, &disk_key, split->start,
c3e06965 3272 path->slots[level + 1] + 1, level + 1);
aa5d6bed 3273
5de08d7d 3274 if (path->slots[level] >= mid) {
5c680ed6 3275 path->slots[level] -= mid;
925baedd 3276 btrfs_tree_unlock(c);
5f39d397
CM
3277 free_extent_buffer(c);
3278 path->nodes[level] = split;
5c680ed6
CM
3279 path->slots[level + 1] += 1;
3280 } else {
925baedd 3281 btrfs_tree_unlock(split);
5f39d397 3282 free_extent_buffer(split);
be0e5c09 3283 }
aa5d6bed 3284 return ret;
be0e5c09
CM
3285}
3286
74123bd7
CM
3287/*
3288 * how many bytes are required to store the items in a leaf. start
3289 * and nr indicate which items in the leaf to check. This totals up the
3290 * space used both by the item structs and the item data
3291 */
5f39d397 3292static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09 3293{
41be1f3b
JB
3294 struct btrfs_item *start_item;
3295 struct btrfs_item *end_item;
3296 struct btrfs_map_token token;
be0e5c09 3297 int data_len;
5f39d397 3298 int nritems = btrfs_header_nritems(l);
d4dbff95 3299 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
3300
3301 if (!nr)
3302 return 0;
41be1f3b
JB
3303 btrfs_init_map_token(&token);
3304 start_item = btrfs_item_nr(l, start);
3305 end_item = btrfs_item_nr(l, end);
3306 data_len = btrfs_token_item_offset(l, start_item, &token) +
3307 btrfs_token_item_size(l, start_item, &token);
3308 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
0783fcfc 3309 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 3310 WARN_ON(data_len < 0);
be0e5c09
CM
3311 return data_len;
3312}
3313
d4dbff95
CM
3314/*
3315 * The space between the end of the leaf items and
3316 * the start of the leaf data. IOW, how much room
3317 * the leaf has left for both items and data
3318 */
d397712b 3319noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 3320 struct extent_buffer *leaf)
d4dbff95 3321{
5f39d397
CM
3322 int nritems = btrfs_header_nritems(leaf);
3323 int ret;
3324 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3325 if (ret < 0) {
d397712b
CM
3326 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3327 "used %d nritems %d\n",
ae2f5411 3328 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
3329 leaf_space_used(leaf, 0, nritems), nritems);
3330 }
3331 return ret;
d4dbff95
CM
3332}
3333
99d8f83c
CM
3334/*
3335 * min slot controls the lowest index we're willing to push to the
3336 * right. We'll push up to and including min_slot, but no lower
3337 */
44871b1b
CM
3338static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3339 struct btrfs_root *root,
3340 struct btrfs_path *path,
3341 int data_size, int empty,
3342 struct extent_buffer *right,
99d8f83c
CM
3343 int free_space, u32 left_nritems,
3344 u32 min_slot)
00ec4c51 3345{
5f39d397 3346 struct extent_buffer *left = path->nodes[0];
44871b1b 3347 struct extent_buffer *upper = path->nodes[1];
cfed81a0 3348 struct btrfs_map_token token;
5f39d397 3349 struct btrfs_disk_key disk_key;
00ec4c51 3350 int slot;
34a38218 3351 u32 i;
00ec4c51
CM
3352 int push_space = 0;
3353 int push_items = 0;
0783fcfc 3354 struct btrfs_item *item;
34a38218 3355 u32 nr;
7518a238 3356 u32 right_nritems;
5f39d397 3357 u32 data_end;
db94535d 3358 u32 this_item_size;
00ec4c51 3359
cfed81a0
CM
3360 btrfs_init_map_token(&token);
3361
34a38218
CM
3362 if (empty)
3363 nr = 0;
3364 else
99d8f83c 3365 nr = max_t(u32, 1, min_slot);
34a38218 3366
31840ae1 3367 if (path->slots[0] >= left_nritems)
87b29b20 3368 push_space += data_size;
31840ae1 3369
44871b1b 3370 slot = path->slots[1];
34a38218
CM
3371 i = left_nritems - 1;
3372 while (i >= nr) {
5f39d397 3373 item = btrfs_item_nr(left, i);
db94535d 3374
31840ae1
ZY
3375 if (!empty && push_items > 0) {
3376 if (path->slots[0] > i)
3377 break;
3378 if (path->slots[0] == i) {
3379 int space = btrfs_leaf_free_space(root, left);
3380 if (space + push_space * 2 > free_space)
3381 break;
3382 }
3383 }
3384
00ec4c51 3385 if (path->slots[0] == i)
87b29b20 3386 push_space += data_size;
db94535d 3387
db94535d
CM
3388 this_item_size = btrfs_item_size(left, item);
3389 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 3390 break;
31840ae1 3391
00ec4c51 3392 push_items++;
db94535d 3393 push_space += this_item_size + sizeof(*item);
34a38218
CM
3394 if (i == 0)
3395 break;
3396 i--;
db94535d 3397 }
5f39d397 3398
925baedd
CM
3399 if (push_items == 0)
3400 goto out_unlock;
5f39d397 3401
6c1500f2 3402 WARN_ON(!empty && push_items == left_nritems);
5f39d397 3403
00ec4c51 3404 /* push left to right */
5f39d397 3405 right_nritems = btrfs_header_nritems(right);
34a38218 3406
5f39d397 3407 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 3408 push_space -= leaf_data_end(root, left);
5f39d397 3409
00ec4c51 3410 /* make room in the right data area */
5f39d397
CM
3411 data_end = leaf_data_end(root, right);
3412 memmove_extent_buffer(right,
3413 btrfs_leaf_data(right) + data_end - push_space,
3414 btrfs_leaf_data(right) + data_end,
3415 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3416
00ec4c51 3417 /* copy from the left data area */
5f39d397 3418 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
3419 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3420 btrfs_leaf_data(left) + leaf_data_end(root, left),
3421 push_space);
5f39d397
CM
3422
3423 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3424 btrfs_item_nr_offset(0),
3425 right_nritems * sizeof(struct btrfs_item));
3426
00ec4c51 3427 /* copy the items from left to right */
5f39d397
CM
3428 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3429 btrfs_item_nr_offset(left_nritems - push_items),
3430 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
3431
3432 /* update the item pointers */
7518a238 3433 right_nritems += push_items;
5f39d397 3434 btrfs_set_header_nritems(right, right_nritems);
123abc88 3435 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 3436 for (i = 0; i < right_nritems; i++) {
5f39d397 3437 item = btrfs_item_nr(right, i);
cfed81a0
CM
3438 push_space -= btrfs_token_item_size(right, item, &token);
3439 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d
CM
3440 }
3441
7518a238 3442 left_nritems -= push_items;
5f39d397 3443 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 3444
34a38218
CM
3445 if (left_nritems)
3446 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
3447 else
3448 clean_tree_block(trans, root, left);
3449
5f39d397 3450 btrfs_mark_buffer_dirty(right);
a429e513 3451
5f39d397
CM
3452 btrfs_item_key(right, &disk_key, 0);
3453 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 3454 btrfs_mark_buffer_dirty(upper);
02217ed2 3455
00ec4c51 3456 /* then fixup the leaf pointer in the path */
7518a238
CM
3457 if (path->slots[0] >= left_nritems) {
3458 path->slots[0] -= left_nritems;
925baedd
CM
3459 if (btrfs_header_nritems(path->nodes[0]) == 0)
3460 clean_tree_block(trans, root, path->nodes[0]);
3461 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3462 free_extent_buffer(path->nodes[0]);
3463 path->nodes[0] = right;
00ec4c51
CM
3464 path->slots[1] += 1;
3465 } else {
925baedd 3466 btrfs_tree_unlock(right);
5f39d397 3467 free_extent_buffer(right);
00ec4c51
CM
3468 }
3469 return 0;
925baedd
CM
3470
3471out_unlock:
3472 btrfs_tree_unlock(right);
3473 free_extent_buffer(right);
3474 return 1;
00ec4c51 3475}
925baedd 3476
44871b1b
CM
3477/*
3478 * push some data in the path leaf to the right, trying to free up at
3479 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3480 *
3481 * returns 1 if the push failed because the other node didn't have enough
3482 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
3483 *
3484 * this will push starting from min_slot to the end of the leaf. It won't
3485 * push any slot lower than min_slot
44871b1b
CM
3486 */
3487static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3488 *root, struct btrfs_path *path,
3489 int min_data_size, int data_size,
3490 int empty, u32 min_slot)
44871b1b
CM
3491{
3492 struct extent_buffer *left = path->nodes[0];
3493 struct extent_buffer *right;
3494 struct extent_buffer *upper;
3495 int slot;
3496 int free_space;
3497 u32 left_nritems;
3498 int ret;
3499
3500 if (!path->nodes[1])
3501 return 1;
3502
3503 slot = path->slots[1];
3504 upper = path->nodes[1];
3505 if (slot >= btrfs_header_nritems(upper) - 1)
3506 return 1;
3507
3508 btrfs_assert_tree_locked(path->nodes[1]);
3509
3510 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
3511 if (right == NULL)
3512 return 1;
3513
44871b1b
CM
3514 btrfs_tree_lock(right);
3515 btrfs_set_lock_blocking(right);
3516
3517 free_space = btrfs_leaf_free_space(root, right);
3518 if (free_space < data_size)
3519 goto out_unlock;
3520
3521 /* cow and double check */
3522 ret = btrfs_cow_block(trans, root, right, upper,
3523 slot + 1, &right);
3524 if (ret)
3525 goto out_unlock;
3526
3527 free_space = btrfs_leaf_free_space(root, right);
3528 if (free_space < data_size)
3529 goto out_unlock;
3530
3531 left_nritems = btrfs_header_nritems(left);
3532 if (left_nritems == 0)
3533 goto out_unlock;
3534
99d8f83c
CM
3535 return __push_leaf_right(trans, root, path, min_data_size, empty,
3536 right, free_space, left_nritems, min_slot);
44871b1b
CM
3537out_unlock:
3538 btrfs_tree_unlock(right);
3539 free_extent_buffer(right);
3540 return 1;
3541}
3542
74123bd7
CM
3543/*
3544 * push some data in the path leaf to the left, trying to free up at
3545 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3546 *
3547 * max_slot can put a limit on how far into the leaf we'll push items. The
3548 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3549 * items
74123bd7 3550 */
44871b1b
CM
3551static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3552 struct btrfs_root *root,
3553 struct btrfs_path *path, int data_size,
3554 int empty, struct extent_buffer *left,
99d8f83c
CM
3555 int free_space, u32 right_nritems,
3556 u32 max_slot)
be0e5c09 3557{
5f39d397
CM
3558 struct btrfs_disk_key disk_key;
3559 struct extent_buffer *right = path->nodes[0];
be0e5c09 3560 int i;
be0e5c09
CM
3561 int push_space = 0;
3562 int push_items = 0;
0783fcfc 3563 struct btrfs_item *item;
7518a238 3564 u32 old_left_nritems;
34a38218 3565 u32 nr;
aa5d6bed 3566 int ret = 0;
db94535d
CM
3567 u32 this_item_size;
3568 u32 old_left_item_size;
cfed81a0
CM
3569 struct btrfs_map_token token;
3570
3571 btrfs_init_map_token(&token);
be0e5c09 3572
34a38218 3573 if (empty)
99d8f83c 3574 nr = min(right_nritems, max_slot);
34a38218 3575 else
99d8f83c 3576 nr = min(right_nritems - 1, max_slot);
34a38218
CM
3577
3578 for (i = 0; i < nr; i++) {
5f39d397 3579 item = btrfs_item_nr(right, i);
db94535d 3580
31840ae1
ZY
3581 if (!empty && push_items > 0) {
3582 if (path->slots[0] < i)
3583 break;
3584 if (path->slots[0] == i) {
3585 int space = btrfs_leaf_free_space(root, right);
3586 if (space + push_space * 2 > free_space)
3587 break;
3588 }
3589 }
3590
be0e5c09 3591 if (path->slots[0] == i)
87b29b20 3592 push_space += data_size;
db94535d
CM
3593
3594 this_item_size = btrfs_item_size(right, item);
3595 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 3596 break;
db94535d 3597
be0e5c09 3598 push_items++;
db94535d
CM
3599 push_space += this_item_size + sizeof(*item);
3600 }
3601
be0e5c09 3602 if (push_items == 0) {
925baedd
CM
3603 ret = 1;
3604 goto out;
be0e5c09 3605 }
34a38218 3606 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 3607 WARN_ON(1);
5f39d397 3608
be0e5c09 3609 /* push data from right to left */
5f39d397
CM
3610 copy_extent_buffer(left, right,
3611 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3612 btrfs_item_nr_offset(0),
3613 push_items * sizeof(struct btrfs_item));
3614
123abc88 3615 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 3616 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
3617
3618 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
3619 leaf_data_end(root, left) - push_space,
3620 btrfs_leaf_data(right) +
5f39d397 3621 btrfs_item_offset_nr(right, push_items - 1),
d6025579 3622 push_space);
5f39d397 3623 old_left_nritems = btrfs_header_nritems(left);
87b29b20 3624 BUG_ON(old_left_nritems <= 0);
eb60ceac 3625
db94535d 3626 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 3627 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 3628 u32 ioff;
db94535d 3629
5f39d397 3630 item = btrfs_item_nr(left, i);
db94535d 3631
cfed81a0
CM
3632 ioff = btrfs_token_item_offset(left, item, &token);
3633 btrfs_set_token_item_offset(left, item,
3634 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3635 &token);
be0e5c09 3636 }
5f39d397 3637 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
3638
3639 /* fixup right node */
31b1a2bd
JL
3640 if (push_items > right_nritems)
3641 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
d397712b 3642 right_nritems);
34a38218
CM
3643
3644 if (push_items < right_nritems) {
3645 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3646 leaf_data_end(root, right);
3647 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3648 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3649 btrfs_leaf_data(right) +
3650 leaf_data_end(root, right), push_space);
3651
3652 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
3653 btrfs_item_nr_offset(push_items),
3654 (btrfs_header_nritems(right) - push_items) *
3655 sizeof(struct btrfs_item));
34a38218 3656 }
eef1c494
Y
3657 right_nritems -= push_items;
3658 btrfs_set_header_nritems(right, right_nritems);
123abc88 3659 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
3660 for (i = 0; i < right_nritems; i++) {
3661 item = btrfs_item_nr(right, i);
db94535d 3662
cfed81a0
CM
3663 push_space = push_space - btrfs_token_item_size(right,
3664 item, &token);
3665 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d 3666 }
eb60ceac 3667
5f39d397 3668 btrfs_mark_buffer_dirty(left);
34a38218
CM
3669 if (right_nritems)
3670 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
3671 else
3672 clean_tree_block(trans, root, right);
098f59c2 3673
5f39d397 3674 btrfs_item_key(right, &disk_key, 0);
d6a0a126 3675 fixup_low_keys(root, path, &disk_key, 1);
be0e5c09
CM
3676
3677 /* then fixup the leaf pointer in the path */
3678 if (path->slots[0] < push_items) {
3679 path->slots[0] += old_left_nritems;
925baedd 3680 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3681 free_extent_buffer(path->nodes[0]);
3682 path->nodes[0] = left;
be0e5c09
CM
3683 path->slots[1] -= 1;
3684 } else {
925baedd 3685 btrfs_tree_unlock(left);
5f39d397 3686 free_extent_buffer(left);
be0e5c09
CM
3687 path->slots[0] -= push_items;
3688 }
eb60ceac 3689 BUG_ON(path->slots[0] < 0);
aa5d6bed 3690 return ret;
925baedd
CM
3691out:
3692 btrfs_tree_unlock(left);
3693 free_extent_buffer(left);
3694 return ret;
be0e5c09
CM
3695}
3696
44871b1b
CM
3697/*
3698 * push some data in the path leaf to the left, trying to free up at
3699 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3700 *
3701 * max_slot can put a limit on how far into the leaf we'll push items. The
3702 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3703 * items
44871b1b
CM
3704 */
3705static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3706 *root, struct btrfs_path *path, int min_data_size,
3707 int data_size, int empty, u32 max_slot)
44871b1b
CM
3708{
3709 struct extent_buffer *right = path->nodes[0];
3710 struct extent_buffer *left;
3711 int slot;
3712 int free_space;
3713 u32 right_nritems;
3714 int ret = 0;
3715
3716 slot = path->slots[1];
3717 if (slot == 0)
3718 return 1;
3719 if (!path->nodes[1])
3720 return 1;
3721
3722 right_nritems = btrfs_header_nritems(right);
3723 if (right_nritems == 0)
3724 return 1;
3725
3726 btrfs_assert_tree_locked(path->nodes[1]);
3727
3728 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
3729 if (left == NULL)
3730 return 1;
3731
44871b1b
CM
3732 btrfs_tree_lock(left);
3733 btrfs_set_lock_blocking(left);
3734
3735 free_space = btrfs_leaf_free_space(root, left);
3736 if (free_space < data_size) {
3737 ret = 1;
3738 goto out;
3739 }
3740
3741 /* cow and double check */
3742 ret = btrfs_cow_block(trans, root, left,
3743 path->nodes[1], slot - 1, &left);
3744 if (ret) {
3745 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
3746 if (ret == -ENOSPC)
3747 ret = 1;
44871b1b
CM
3748 goto out;
3749 }
3750
3751 free_space = btrfs_leaf_free_space(root, left);
3752 if (free_space < data_size) {
3753 ret = 1;
3754 goto out;
3755 }
3756
99d8f83c
CM
3757 return __push_leaf_left(trans, root, path, min_data_size,
3758 empty, left, free_space, right_nritems,
3759 max_slot);
44871b1b
CM
3760out:
3761 btrfs_tree_unlock(left);
3762 free_extent_buffer(left);
3763 return ret;
3764}
3765
3766/*
3767 * split the path's leaf in two, making sure there is at least data_size
3768 * available for the resulting leaf level of the path.
44871b1b 3769 */
143bede5
JM
3770static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3771 struct btrfs_root *root,
3772 struct btrfs_path *path,
3773 struct extent_buffer *l,
3774 struct extent_buffer *right,
3775 int slot, int mid, int nritems)
44871b1b
CM
3776{
3777 int data_copy_size;
3778 int rt_data_off;
3779 int i;
44871b1b 3780 struct btrfs_disk_key disk_key;
cfed81a0
CM
3781 struct btrfs_map_token token;
3782
3783 btrfs_init_map_token(&token);
44871b1b
CM
3784
3785 nritems = nritems - mid;
3786 btrfs_set_header_nritems(right, nritems);
3787 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3788
3789 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3790 btrfs_item_nr_offset(mid),
3791 nritems * sizeof(struct btrfs_item));
3792
3793 copy_extent_buffer(right, l,
3794 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3795 data_copy_size, btrfs_leaf_data(l) +
3796 leaf_data_end(root, l), data_copy_size);
3797
3798 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3799 btrfs_item_end_nr(l, mid);
3800
3801 for (i = 0; i < nritems; i++) {
3802 struct btrfs_item *item = btrfs_item_nr(right, i);
3803 u32 ioff;
3804
cfed81a0
CM
3805 ioff = btrfs_token_item_offset(right, item, &token);
3806 btrfs_set_token_item_offset(right, item,
3807 ioff + rt_data_off, &token);
44871b1b
CM
3808 }
3809
44871b1b 3810 btrfs_set_header_nritems(l, mid);
44871b1b 3811 btrfs_item_key(right, &disk_key, 0);
143bede5 3812 insert_ptr(trans, root, path, &disk_key, right->start,
c3e06965 3813 path->slots[1] + 1, 1);
44871b1b
CM
3814
3815 btrfs_mark_buffer_dirty(right);
3816 btrfs_mark_buffer_dirty(l);
3817 BUG_ON(path->slots[0] != slot);
3818
44871b1b
CM
3819 if (mid <= slot) {
3820 btrfs_tree_unlock(path->nodes[0]);
3821 free_extent_buffer(path->nodes[0]);
3822 path->nodes[0] = right;
3823 path->slots[0] -= mid;
3824 path->slots[1] += 1;
3825 } else {
3826 btrfs_tree_unlock(right);
3827 free_extent_buffer(right);
3828 }
3829
3830 BUG_ON(path->slots[0] < 0);
44871b1b
CM
3831}
3832
99d8f83c
CM
3833/*
3834 * double splits happen when we need to insert a big item in the middle
3835 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3836 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3837 * A B C
3838 *
3839 * We avoid this by trying to push the items on either side of our target
3840 * into the adjacent leaves. If all goes well we can avoid the double split
3841 * completely.
3842 */
3843static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3844 struct btrfs_root *root,
3845 struct btrfs_path *path,
3846 int data_size)
3847{
3848 int ret;
3849 int progress = 0;
3850 int slot;
3851 u32 nritems;
3852
3853 slot = path->slots[0];
3854
3855 /*
3856 * try to push all the items after our slot into the
3857 * right leaf
3858 */
3859 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3860 if (ret < 0)
3861 return ret;
3862
3863 if (ret == 0)
3864 progress++;
3865
3866 nritems = btrfs_header_nritems(path->nodes[0]);
3867 /*
3868 * our goal is to get our slot at the start or end of a leaf. If
3869 * we've done so we're done
3870 */
3871 if (path->slots[0] == 0 || path->slots[0] == nritems)
3872 return 0;
3873
3874 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3875 return 0;
3876
3877 /* try to push all the items before our slot into the next leaf */
3878 slot = path->slots[0];
3879 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3880 if (ret < 0)
3881 return ret;
3882
3883 if (ret == 0)
3884 progress++;
3885
3886 if (progress)
3887 return 0;
3888 return 1;
3889}
3890
74123bd7
CM
3891/*
3892 * split the path's leaf in two, making sure there is at least data_size
3893 * available for the resulting leaf level of the path.
aa5d6bed
CM
3894 *
3895 * returns 0 if all went well and < 0 on failure.
74123bd7 3896 */
e02119d5
CM
3897static noinline int split_leaf(struct btrfs_trans_handle *trans,
3898 struct btrfs_root *root,
3899 struct btrfs_key *ins_key,
3900 struct btrfs_path *path, int data_size,
3901 int extend)
be0e5c09 3902{
5d4f98a2 3903 struct btrfs_disk_key disk_key;
5f39d397 3904 struct extent_buffer *l;
7518a238 3905 u32 nritems;
eb60ceac
CM
3906 int mid;
3907 int slot;
5f39d397 3908 struct extent_buffer *right;
d4dbff95 3909 int ret = 0;
aa5d6bed 3910 int wret;
5d4f98a2 3911 int split;
cc0c5538 3912 int num_doubles = 0;
99d8f83c 3913 int tried_avoid_double = 0;
aa5d6bed 3914
a5719521
YZ
3915 l = path->nodes[0];
3916 slot = path->slots[0];
3917 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3918 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3919 return -EOVERFLOW;
3920
40689478 3921 /* first try to make some room by pushing left and right */
33157e05 3922 if (data_size && path->nodes[1]) {
99d8f83c
CM
3923 wret = push_leaf_right(trans, root, path, data_size,
3924 data_size, 0, 0);
d397712b 3925 if (wret < 0)
eaee50e8 3926 return wret;
3685f791 3927 if (wret) {
99d8f83c
CM
3928 wret = push_leaf_left(trans, root, path, data_size,
3929 data_size, 0, (u32)-1);
3685f791
CM
3930 if (wret < 0)
3931 return wret;
3932 }
3933 l = path->nodes[0];
aa5d6bed 3934
3685f791 3935 /* did the pushes work? */
87b29b20 3936 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 3937 return 0;
3326d1b0 3938 }
aa5d6bed 3939
5c680ed6 3940 if (!path->nodes[1]) {
fdd99c72 3941 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
3942 if (ret)
3943 return ret;
3944 }
cc0c5538 3945again:
5d4f98a2 3946 split = 1;
cc0c5538 3947 l = path->nodes[0];
eb60ceac 3948 slot = path->slots[0];
5f39d397 3949 nritems = btrfs_header_nritems(l);
d397712b 3950 mid = (nritems + 1) / 2;
54aa1f4d 3951
5d4f98a2
YZ
3952 if (mid <= slot) {
3953 if (nritems == 1 ||
3954 leaf_space_used(l, mid, nritems - mid) + data_size >
3955 BTRFS_LEAF_DATA_SIZE(root)) {
3956 if (slot >= nritems) {
3957 split = 0;
3958 } else {
3959 mid = slot;
3960 if (mid != nritems &&
3961 leaf_space_used(l, mid, nritems - mid) +
3962 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3963 if (data_size && !tried_avoid_double)
3964 goto push_for_double;
5d4f98a2
YZ
3965 split = 2;
3966 }
3967 }
3968 }
3969 } else {
3970 if (leaf_space_used(l, 0, mid) + data_size >
3971 BTRFS_LEAF_DATA_SIZE(root)) {
3972 if (!extend && data_size && slot == 0) {
3973 split = 0;
3974 } else if ((extend || !data_size) && slot == 0) {
3975 mid = 1;
3976 } else {
3977 mid = slot;
3978 if (mid != nritems &&
3979 leaf_space_used(l, mid, nritems - mid) +
3980 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
3981 if (data_size && !tried_avoid_double)
3982 goto push_for_double;
5d4f98a2
YZ
3983 split = 2 ;
3984 }
3985 }
3986 }
3987 }
3988
3989 if (split == 0)
3990 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3991 else
3992 btrfs_item_key(l, &disk_key, mid);
3993
3994 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 3995 root->root_key.objectid,
5581a51a 3996 &disk_key, 0, l->start, 0);
f0486c68 3997 if (IS_ERR(right))
5f39d397 3998 return PTR_ERR(right);
f0486c68
YZ
3999
4000 root_add_used(root, root->leafsize);
5f39d397
CM
4001
4002 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 4003 btrfs_set_header_bytenr(right, right->start);
5f39d397 4004 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 4005 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
4006 btrfs_set_header_owner(right, root->root_key.objectid);
4007 btrfs_set_header_level(right, 0);
4008 write_extent_buffer(right, root->fs_info->fsid,
4009 (unsigned long)btrfs_header_fsid(right),
4010 BTRFS_FSID_SIZE);
e17cade2
CM
4011
4012 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4013 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4014 BTRFS_UUID_SIZE);
44871b1b 4015
5d4f98a2
YZ
4016 if (split == 0) {
4017 if (mid <= slot) {
4018 btrfs_set_header_nritems(right, 0);
143bede5 4019 insert_ptr(trans, root, path, &disk_key, right->start,
c3e06965 4020 path->slots[1] + 1, 1);
5d4f98a2
YZ
4021 btrfs_tree_unlock(path->nodes[0]);
4022 free_extent_buffer(path->nodes[0]);
4023 path->nodes[0] = right;
4024 path->slots[0] = 0;
4025 path->slots[1] += 1;
4026 } else {
4027 btrfs_set_header_nritems(right, 0);
143bede5 4028 insert_ptr(trans, root, path, &disk_key, right->start,
c3e06965 4029 path->slots[1], 1);
5d4f98a2
YZ
4030 btrfs_tree_unlock(path->nodes[0]);
4031 free_extent_buffer(path->nodes[0]);
4032 path->nodes[0] = right;
4033 path->slots[0] = 0;
143bede5 4034 if (path->slots[1] == 0)
d6a0a126 4035 fixup_low_keys(root, path, &disk_key, 1);
d4dbff95 4036 }
5d4f98a2
YZ
4037 btrfs_mark_buffer_dirty(right);
4038 return ret;
d4dbff95 4039 }
74123bd7 4040
143bede5 4041 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1 4042
5d4f98a2 4043 if (split == 2) {
cc0c5538
CM
4044 BUG_ON(num_doubles != 0);
4045 num_doubles++;
4046 goto again;
a429e513 4047 }
44871b1b 4048
143bede5 4049 return 0;
99d8f83c
CM
4050
4051push_for_double:
4052 push_for_double_split(trans, root, path, data_size);
4053 tried_avoid_double = 1;
4054 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4055 return 0;
4056 goto again;
be0e5c09
CM
4057}
4058
ad48fd75
YZ
4059static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4060 struct btrfs_root *root,
4061 struct btrfs_path *path, int ins_len)
459931ec 4062{
ad48fd75 4063 struct btrfs_key key;
459931ec 4064 struct extent_buffer *leaf;
ad48fd75
YZ
4065 struct btrfs_file_extent_item *fi;
4066 u64 extent_len = 0;
4067 u32 item_size;
4068 int ret;
459931ec
CM
4069
4070 leaf = path->nodes[0];
ad48fd75
YZ
4071 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4072
4073 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4074 key.type != BTRFS_EXTENT_CSUM_KEY);
4075
4076 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4077 return 0;
459931ec
CM
4078
4079 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
4080 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4081 fi = btrfs_item_ptr(leaf, path->slots[0],
4082 struct btrfs_file_extent_item);
4083 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4084 }
b3b4aa74 4085 btrfs_release_path(path);
459931ec 4086
459931ec 4087 path->keep_locks = 1;
ad48fd75
YZ
4088 path->search_for_split = 1;
4089 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 4090 path->search_for_split = 0;
ad48fd75
YZ
4091 if (ret < 0)
4092 goto err;
459931ec 4093
ad48fd75
YZ
4094 ret = -EAGAIN;
4095 leaf = path->nodes[0];
459931ec 4096 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
4097 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4098 goto err;
4099
109f6aef
CM
4100 /* the leaf has changed, it now has room. return now */
4101 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4102 goto err;
4103
ad48fd75
YZ
4104 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4105 fi = btrfs_item_ptr(leaf, path->slots[0],
4106 struct btrfs_file_extent_item);
4107 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4108 goto err;
459931ec
CM
4109 }
4110
b9473439 4111 btrfs_set_path_blocking(path);
ad48fd75 4112 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
4113 if (ret)
4114 goto err;
459931ec 4115
ad48fd75 4116 path->keep_locks = 0;
b9473439 4117 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
4118 return 0;
4119err:
4120 path->keep_locks = 0;
4121 return ret;
4122}
4123
4124static noinline int split_item(struct btrfs_trans_handle *trans,
4125 struct btrfs_root *root,
4126 struct btrfs_path *path,
4127 struct btrfs_key *new_key,
4128 unsigned long split_offset)
4129{
4130 struct extent_buffer *leaf;
4131 struct btrfs_item *item;
4132 struct btrfs_item *new_item;
4133 int slot;
4134 char *buf;
4135 u32 nritems;
4136 u32 item_size;
4137 u32 orig_offset;
4138 struct btrfs_disk_key disk_key;
4139
b9473439
CM
4140 leaf = path->nodes[0];
4141 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4142
b4ce94de
CM
4143 btrfs_set_path_blocking(path);
4144
459931ec
CM
4145 item = btrfs_item_nr(leaf, path->slots[0]);
4146 orig_offset = btrfs_item_offset(leaf, item);
4147 item_size = btrfs_item_size(leaf, item);
4148
459931ec 4149 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
4150 if (!buf)
4151 return -ENOMEM;
4152
459931ec
CM
4153 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4154 path->slots[0]), item_size);
459931ec 4155
ad48fd75 4156 slot = path->slots[0] + 1;
459931ec 4157 nritems = btrfs_header_nritems(leaf);
459931ec
CM
4158 if (slot != nritems) {
4159 /* shift the items */
4160 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
4161 btrfs_item_nr_offset(slot),
4162 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
4163 }
4164
4165 btrfs_cpu_key_to_disk(&disk_key, new_key);
4166 btrfs_set_item_key(leaf, &disk_key, slot);
4167
4168 new_item = btrfs_item_nr(leaf, slot);
4169
4170 btrfs_set_item_offset(leaf, new_item, orig_offset);
4171 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4172
4173 btrfs_set_item_offset(leaf, item,
4174 orig_offset + item_size - split_offset);
4175 btrfs_set_item_size(leaf, item, split_offset);
4176
4177 btrfs_set_header_nritems(leaf, nritems + 1);
4178
4179 /* write the data for the start of the original item */
4180 write_extent_buffer(leaf, buf,
4181 btrfs_item_ptr_offset(leaf, path->slots[0]),
4182 split_offset);
4183
4184 /* write the data for the new item */
4185 write_extent_buffer(leaf, buf + split_offset,
4186 btrfs_item_ptr_offset(leaf, slot),
4187 item_size - split_offset);
4188 btrfs_mark_buffer_dirty(leaf);
4189
ad48fd75 4190 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 4191 kfree(buf);
ad48fd75
YZ
4192 return 0;
4193}
4194
4195/*
4196 * This function splits a single item into two items,
4197 * giving 'new_key' to the new item and splitting the
4198 * old one at split_offset (from the start of the item).
4199 *
4200 * The path may be released by this operation. After
4201 * the split, the path is pointing to the old item. The
4202 * new item is going to be in the same node as the old one.
4203 *
4204 * Note, the item being split must be smaller enough to live alone on
4205 * a tree block with room for one extra struct btrfs_item
4206 *
4207 * This allows us to split the item in place, keeping a lock on the
4208 * leaf the entire time.
4209 */
4210int btrfs_split_item(struct btrfs_trans_handle *trans,
4211 struct btrfs_root *root,
4212 struct btrfs_path *path,
4213 struct btrfs_key *new_key,
4214 unsigned long split_offset)
4215{
4216 int ret;
4217 ret = setup_leaf_for_split(trans, root, path,
4218 sizeof(struct btrfs_item));
4219 if (ret)
4220 return ret;
4221
4222 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
4223 return ret;
4224}
4225
ad48fd75
YZ
4226/*
4227 * This function duplicate a item, giving 'new_key' to the new item.
4228 * It guarantees both items live in the same tree leaf and the new item
4229 * is contiguous with the original item.
4230 *
4231 * This allows us to split file extent in place, keeping a lock on the
4232 * leaf the entire time.
4233 */
4234int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4235 struct btrfs_root *root,
4236 struct btrfs_path *path,
4237 struct btrfs_key *new_key)
4238{
4239 struct extent_buffer *leaf;
4240 int ret;
4241 u32 item_size;
4242
4243 leaf = path->nodes[0];
4244 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4245 ret = setup_leaf_for_split(trans, root, path,
4246 item_size + sizeof(struct btrfs_item));
4247 if (ret)
4248 return ret;
4249
4250 path->slots[0]++;
afe5fea7 4251 setup_items_for_insert(root, path, new_key, &item_size,
143bede5
JM
4252 item_size, item_size +
4253 sizeof(struct btrfs_item), 1);
ad48fd75
YZ
4254 leaf = path->nodes[0];
4255 memcpy_extent_buffer(leaf,
4256 btrfs_item_ptr_offset(leaf, path->slots[0]),
4257 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4258 item_size);
4259 return 0;
4260}
4261
d352ac68
CM
4262/*
4263 * make the item pointed to by the path smaller. new_size indicates
4264 * how small to make it, and from_end tells us if we just chop bytes
4265 * off the end of the item or if we shift the item to chop bytes off
4266 * the front.
4267 */
afe5fea7 4268void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
143bede5 4269 u32 new_size, int from_end)
b18c6685 4270{
b18c6685 4271 int slot;
5f39d397
CM
4272 struct extent_buffer *leaf;
4273 struct btrfs_item *item;
b18c6685
CM
4274 u32 nritems;
4275 unsigned int data_end;
4276 unsigned int old_data_start;
4277 unsigned int old_size;
4278 unsigned int size_diff;
4279 int i;
cfed81a0
CM
4280 struct btrfs_map_token token;
4281
4282 btrfs_init_map_token(&token);
b18c6685 4283
5f39d397 4284 leaf = path->nodes[0];
179e29e4
CM
4285 slot = path->slots[0];
4286
4287 old_size = btrfs_item_size_nr(leaf, slot);
4288 if (old_size == new_size)
143bede5 4289 return;
b18c6685 4290
5f39d397 4291 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
4292 data_end = leaf_data_end(root, leaf);
4293
5f39d397 4294 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 4295
b18c6685
CM
4296 size_diff = old_size - new_size;
4297
4298 BUG_ON(slot < 0);
4299 BUG_ON(slot >= nritems);
4300
4301 /*
4302 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4303 */
4304 /* first correct the data pointers */
4305 for (i = slot; i < nritems; i++) {
5f39d397
CM
4306 u32 ioff;
4307 item = btrfs_item_nr(leaf, i);
db94535d 4308
cfed81a0
CM
4309 ioff = btrfs_token_item_offset(leaf, item, &token);
4310 btrfs_set_token_item_offset(leaf, item,
4311 ioff + size_diff, &token);
b18c6685 4312 }
db94535d 4313
b18c6685 4314 /* shift the data */
179e29e4
CM
4315 if (from_end) {
4316 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4317 data_end + size_diff, btrfs_leaf_data(leaf) +
4318 data_end, old_data_start + new_size - data_end);
4319 } else {
4320 struct btrfs_disk_key disk_key;
4321 u64 offset;
4322
4323 btrfs_item_key(leaf, &disk_key, slot);
4324
4325 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4326 unsigned long ptr;
4327 struct btrfs_file_extent_item *fi;
4328
4329 fi = btrfs_item_ptr(leaf, slot,
4330 struct btrfs_file_extent_item);
4331 fi = (struct btrfs_file_extent_item *)(
4332 (unsigned long)fi - size_diff);
4333
4334 if (btrfs_file_extent_type(leaf, fi) ==
4335 BTRFS_FILE_EXTENT_INLINE) {
4336 ptr = btrfs_item_ptr_offset(leaf, slot);
4337 memmove_extent_buffer(leaf, ptr,
d397712b
CM
4338 (unsigned long)fi,
4339 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
4340 disk_bytenr));
4341 }
4342 }
4343
4344 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4345 data_end + size_diff, btrfs_leaf_data(leaf) +
4346 data_end, old_data_start - data_end);
4347
4348 offset = btrfs_disk_key_offset(&disk_key);
4349 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4350 btrfs_set_item_key(leaf, &disk_key, slot);
4351 if (slot == 0)
d6a0a126 4352 fixup_low_keys(root, path, &disk_key, 1);
179e29e4 4353 }
5f39d397
CM
4354
4355 item = btrfs_item_nr(leaf, slot);
4356 btrfs_set_item_size(leaf, item, new_size);
4357 btrfs_mark_buffer_dirty(leaf);
b18c6685 4358
5f39d397
CM
4359 if (btrfs_leaf_free_space(root, leaf) < 0) {
4360 btrfs_print_leaf(root, leaf);
b18c6685 4361 BUG();
5f39d397 4362 }
b18c6685
CM
4363}
4364
d352ac68 4365/*
8f69dbd2 4366 * make the item pointed to by the path bigger, data_size is the added size.
d352ac68 4367 */
4b90c680 4368void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
143bede5 4369 u32 data_size)
6567e837 4370{
6567e837 4371 int slot;
5f39d397
CM
4372 struct extent_buffer *leaf;
4373 struct btrfs_item *item;
6567e837
CM
4374 u32 nritems;
4375 unsigned int data_end;
4376 unsigned int old_data;
4377 unsigned int old_size;
4378 int i;
cfed81a0
CM
4379 struct btrfs_map_token token;
4380
4381 btrfs_init_map_token(&token);
6567e837 4382
5f39d397 4383 leaf = path->nodes[0];
6567e837 4384
5f39d397 4385 nritems = btrfs_header_nritems(leaf);
6567e837
CM
4386 data_end = leaf_data_end(root, leaf);
4387
5f39d397
CM
4388 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4389 btrfs_print_leaf(root, leaf);
6567e837 4390 BUG();
5f39d397 4391 }
6567e837 4392 slot = path->slots[0];
5f39d397 4393 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
4394
4395 BUG_ON(slot < 0);
3326d1b0
CM
4396 if (slot >= nritems) {
4397 btrfs_print_leaf(root, leaf);
d397712b
CM
4398 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4399 slot, nritems);
3326d1b0
CM
4400 BUG_ON(1);
4401 }
6567e837
CM
4402
4403 /*
4404 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4405 */
4406 /* first correct the data pointers */
4407 for (i = slot; i < nritems; i++) {
5f39d397
CM
4408 u32 ioff;
4409 item = btrfs_item_nr(leaf, i);
db94535d 4410
cfed81a0
CM
4411 ioff = btrfs_token_item_offset(leaf, item, &token);
4412 btrfs_set_token_item_offset(leaf, item,
4413 ioff - data_size, &token);
6567e837 4414 }
5f39d397 4415
6567e837 4416 /* shift the data */
5f39d397 4417 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
4418 data_end - data_size, btrfs_leaf_data(leaf) +
4419 data_end, old_data - data_end);
5f39d397 4420
6567e837 4421 data_end = old_data;
5f39d397
CM
4422 old_size = btrfs_item_size_nr(leaf, slot);
4423 item = btrfs_item_nr(leaf, slot);
4424 btrfs_set_item_size(leaf, item, old_size + data_size);
4425 btrfs_mark_buffer_dirty(leaf);
6567e837 4426
5f39d397
CM
4427 if (btrfs_leaf_free_space(root, leaf) < 0) {
4428 btrfs_print_leaf(root, leaf);
6567e837 4429 BUG();
5f39d397 4430 }
6567e837
CM
4431}
4432
74123bd7 4433/*
44871b1b
CM
4434 * this is a helper for btrfs_insert_empty_items, the main goal here is
4435 * to save stack depth by doing the bulk of the work in a function
4436 * that doesn't call btrfs_search_slot
74123bd7 4437 */
afe5fea7 4438void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
143bede5
JM
4439 struct btrfs_key *cpu_key, u32 *data_size,
4440 u32 total_data, u32 total_size, int nr)
be0e5c09 4441{
5f39d397 4442 struct btrfs_item *item;
9c58309d 4443 int i;
7518a238 4444 u32 nritems;
be0e5c09 4445 unsigned int data_end;
e2fa7227 4446 struct btrfs_disk_key disk_key;
44871b1b
CM
4447 struct extent_buffer *leaf;
4448 int slot;
cfed81a0
CM
4449 struct btrfs_map_token token;
4450
4451 btrfs_init_map_token(&token);
e2fa7227 4452
5f39d397 4453 leaf = path->nodes[0];
44871b1b 4454 slot = path->slots[0];
74123bd7 4455
5f39d397 4456 nritems = btrfs_header_nritems(leaf);
123abc88 4457 data_end = leaf_data_end(root, leaf);
eb60ceac 4458
f25956cc 4459 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 4460 btrfs_print_leaf(root, leaf);
d397712b 4461 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 4462 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 4463 BUG();
d4dbff95 4464 }
5f39d397 4465
be0e5c09 4466 if (slot != nritems) {
5f39d397 4467 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 4468
5f39d397
CM
4469 if (old_data < data_end) {
4470 btrfs_print_leaf(root, leaf);
d397712b 4471 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
4472 slot, old_data, data_end);
4473 BUG_ON(1);
4474 }
be0e5c09
CM
4475 /*
4476 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4477 */
4478 /* first correct the data pointers */
0783fcfc 4479 for (i = slot; i < nritems; i++) {
5f39d397 4480 u32 ioff;
db94535d 4481
5f39d397 4482 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4483 ioff = btrfs_token_item_offset(leaf, item, &token);
4484 btrfs_set_token_item_offset(leaf, item,
4485 ioff - total_data, &token);
0783fcfc 4486 }
be0e5c09 4487 /* shift the items */
9c58309d 4488 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 4489 btrfs_item_nr_offset(slot),
d6025579 4490 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
4491
4492 /* shift the data */
5f39d397 4493 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 4494 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 4495 data_end, old_data - data_end);
be0e5c09
CM
4496 data_end = old_data;
4497 }
5f39d397 4498
62e2749e 4499 /* setup the item for the new data */
9c58309d
CM
4500 for (i = 0; i < nr; i++) {
4501 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4502 btrfs_set_item_key(leaf, &disk_key, slot + i);
4503 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
4504 btrfs_set_token_item_offset(leaf, item,
4505 data_end - data_size[i], &token);
9c58309d 4506 data_end -= data_size[i];
cfed81a0 4507 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
9c58309d 4508 }
44871b1b 4509
9c58309d 4510 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed 4511
5a01a2e3
CM
4512 if (slot == 0) {
4513 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
d6a0a126 4514 fixup_low_keys(root, path, &disk_key, 1);
5a01a2e3 4515 }
b9473439
CM
4516 btrfs_unlock_up_safe(path, 1);
4517 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 4518
5f39d397
CM
4519 if (btrfs_leaf_free_space(root, leaf) < 0) {
4520 btrfs_print_leaf(root, leaf);
be0e5c09 4521 BUG();
5f39d397 4522 }
44871b1b
CM
4523}
4524
4525/*
4526 * Given a key and some data, insert items into the tree.
4527 * This does all the path init required, making room in the tree if needed.
4528 */
4529int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4530 struct btrfs_root *root,
4531 struct btrfs_path *path,
4532 struct btrfs_key *cpu_key, u32 *data_size,
4533 int nr)
4534{
44871b1b
CM
4535 int ret = 0;
4536 int slot;
4537 int i;
4538 u32 total_size = 0;
4539 u32 total_data = 0;
4540
4541 for (i = 0; i < nr; i++)
4542 total_data += data_size[i];
4543
4544 total_size = total_data + (nr * sizeof(struct btrfs_item));
4545 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4546 if (ret == 0)
4547 return -EEXIST;
4548 if (ret < 0)
143bede5 4549 return ret;
44871b1b 4550
44871b1b
CM
4551 slot = path->slots[0];
4552 BUG_ON(slot < 0);
4553
afe5fea7 4554 setup_items_for_insert(root, path, cpu_key, data_size,
44871b1b 4555 total_data, total_size, nr);
143bede5 4556 return 0;
62e2749e
CM
4557}
4558
4559/*
4560 * Given a key and some data, insert an item into the tree.
4561 * This does all the path init required, making room in the tree if needed.
4562 */
e089f05c
CM
4563int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4564 *root, struct btrfs_key *cpu_key, void *data, u32
4565 data_size)
62e2749e
CM
4566{
4567 int ret = 0;
2c90e5d6 4568 struct btrfs_path *path;
5f39d397
CM
4569 struct extent_buffer *leaf;
4570 unsigned long ptr;
62e2749e 4571
2c90e5d6 4572 path = btrfs_alloc_path();
db5b493a
TI
4573 if (!path)
4574 return -ENOMEM;
2c90e5d6 4575 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 4576 if (!ret) {
5f39d397
CM
4577 leaf = path->nodes[0];
4578 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4579 write_extent_buffer(leaf, data, ptr, data_size);
4580 btrfs_mark_buffer_dirty(leaf);
62e2749e 4581 }
2c90e5d6 4582 btrfs_free_path(path);
aa5d6bed 4583 return ret;
be0e5c09
CM
4584}
4585
74123bd7 4586/*
5de08d7d 4587 * delete the pointer from a given node.
74123bd7 4588 *
d352ac68
CM
4589 * the tree should have been previously balanced so the deletion does not
4590 * empty a node.
74123bd7 4591 */
afe5fea7
TI
4592static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4593 int level, int slot)
be0e5c09 4594{
5f39d397 4595 struct extent_buffer *parent = path->nodes[level];
7518a238 4596 u32 nritems;
f3ea38da 4597 int ret;
be0e5c09 4598
5f39d397 4599 nritems = btrfs_header_nritems(parent);
d397712b 4600 if (slot != nritems - 1) {
0e411ece 4601 if (level)
f3ea38da
JS
4602 tree_mod_log_eb_move(root->fs_info, parent, slot,
4603 slot + 1, nritems - slot - 1);
5f39d397
CM
4604 memmove_extent_buffer(parent,
4605 btrfs_node_key_ptr_offset(slot),
4606 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
4607 sizeof(struct btrfs_key_ptr) *
4608 (nritems - slot - 1));
57ba86c0
CM
4609 } else if (level) {
4610 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
c8cc6341 4611 MOD_LOG_KEY_REMOVE, GFP_NOFS);
57ba86c0 4612 BUG_ON(ret < 0);
bb803951 4613 }
f3ea38da 4614
7518a238 4615 nritems--;
5f39d397 4616 btrfs_set_header_nritems(parent, nritems);
7518a238 4617 if (nritems == 0 && parent == root->node) {
5f39d397 4618 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 4619 /* just turn the root into a leaf and break */
5f39d397 4620 btrfs_set_header_level(root->node, 0);
bb803951 4621 } else if (slot == 0) {
5f39d397
CM
4622 struct btrfs_disk_key disk_key;
4623
4624 btrfs_node_key(parent, &disk_key, 0);
d6a0a126 4625 fixup_low_keys(root, path, &disk_key, level + 1);
be0e5c09 4626 }
d6025579 4627 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
4628}
4629
323ac95b
CM
4630/*
4631 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 4632 * path->nodes[1].
323ac95b
CM
4633 *
4634 * This deletes the pointer in path->nodes[1] and frees the leaf
4635 * block extent. zero is returned if it all worked out, < 0 otherwise.
4636 *
4637 * The path must have already been setup for deleting the leaf, including
4638 * all the proper balancing. path->nodes[1] must be locked.
4639 */
143bede5
JM
4640static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4641 struct btrfs_root *root,
4642 struct btrfs_path *path,
4643 struct extent_buffer *leaf)
323ac95b 4644{
5d4f98a2 4645 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
afe5fea7 4646 del_ptr(root, path, 1, path->slots[1]);
323ac95b 4647
4d081c41
CM
4648 /*
4649 * btrfs_free_extent is expensive, we want to make sure we
4650 * aren't holding any locks when we call it
4651 */
4652 btrfs_unlock_up_safe(path, 0);
4653
f0486c68
YZ
4654 root_sub_used(root, leaf->len);
4655
3083ee2e 4656 extent_buffer_get(leaf);
5581a51a 4657 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3083ee2e 4658 free_extent_buffer_stale(leaf);
323ac95b 4659}
74123bd7
CM
4660/*
4661 * delete the item at the leaf level in path. If that empties
4662 * the leaf, remove it from the tree
4663 */
85e21bac
CM
4664int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4665 struct btrfs_path *path, int slot, int nr)
be0e5c09 4666{
5f39d397
CM
4667 struct extent_buffer *leaf;
4668 struct btrfs_item *item;
85e21bac
CM
4669 int last_off;
4670 int dsize = 0;
aa5d6bed
CM
4671 int ret = 0;
4672 int wret;
85e21bac 4673 int i;
7518a238 4674 u32 nritems;
cfed81a0
CM
4675 struct btrfs_map_token token;
4676
4677 btrfs_init_map_token(&token);
be0e5c09 4678
5f39d397 4679 leaf = path->nodes[0];
85e21bac
CM
4680 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4681
4682 for (i = 0; i < nr; i++)
4683 dsize += btrfs_item_size_nr(leaf, slot + i);
4684
5f39d397 4685 nritems = btrfs_header_nritems(leaf);
be0e5c09 4686
85e21bac 4687 if (slot + nr != nritems) {
123abc88 4688 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
4689
4690 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
4691 data_end + dsize,
4692 btrfs_leaf_data(leaf) + data_end,
85e21bac 4693 last_off - data_end);
5f39d397 4694
85e21bac 4695 for (i = slot + nr; i < nritems; i++) {
5f39d397 4696 u32 ioff;
db94535d 4697
5f39d397 4698 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
4699 ioff = btrfs_token_item_offset(leaf, item, &token);
4700 btrfs_set_token_item_offset(leaf, item,
4701 ioff + dsize, &token);
0783fcfc 4702 }
db94535d 4703
5f39d397 4704 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 4705 btrfs_item_nr_offset(slot + nr),
d6025579 4706 sizeof(struct btrfs_item) *
85e21bac 4707 (nritems - slot - nr));
be0e5c09 4708 }
85e21bac
CM
4709 btrfs_set_header_nritems(leaf, nritems - nr);
4710 nritems -= nr;
5f39d397 4711
74123bd7 4712 /* delete the leaf if we've emptied it */
7518a238 4713 if (nritems == 0) {
5f39d397
CM
4714 if (leaf == root->node) {
4715 btrfs_set_header_level(leaf, 0);
9a8dd150 4716 } else {
f0486c68
YZ
4717 btrfs_set_path_blocking(path);
4718 clean_tree_block(trans, root, leaf);
143bede5 4719 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 4720 }
be0e5c09 4721 } else {
7518a238 4722 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 4723 if (slot == 0) {
5f39d397
CM
4724 struct btrfs_disk_key disk_key;
4725
4726 btrfs_item_key(leaf, &disk_key, 0);
d6a0a126 4727 fixup_low_keys(root, path, &disk_key, 1);
aa5d6bed 4728 }
aa5d6bed 4729
74123bd7 4730 /* delete the leaf if it is mostly empty */
d717aa1d 4731 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
4732 /* push_leaf_left fixes the path.
4733 * make sure the path still points to our leaf
4734 * for possible call to del_ptr below
4735 */
4920c9ac 4736 slot = path->slots[1];
5f39d397
CM
4737 extent_buffer_get(leaf);
4738
b9473439 4739 btrfs_set_path_blocking(path);
99d8f83c
CM
4740 wret = push_leaf_left(trans, root, path, 1, 1,
4741 1, (u32)-1);
54aa1f4d 4742 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 4743 ret = wret;
5f39d397
CM
4744
4745 if (path->nodes[0] == leaf &&
4746 btrfs_header_nritems(leaf)) {
99d8f83c
CM
4747 wret = push_leaf_right(trans, root, path, 1,
4748 1, 1, 0);
54aa1f4d 4749 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
4750 ret = wret;
4751 }
5f39d397
CM
4752
4753 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 4754 path->slots[1] = slot;
143bede5 4755 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 4756 free_extent_buffer(leaf);
143bede5 4757 ret = 0;
5de08d7d 4758 } else {
925baedd
CM
4759 /* if we're still in the path, make sure
4760 * we're dirty. Otherwise, one of the
4761 * push_leaf functions must have already
4762 * dirtied this buffer
4763 */
4764 if (path->nodes[0] == leaf)
4765 btrfs_mark_buffer_dirty(leaf);
5f39d397 4766 free_extent_buffer(leaf);
be0e5c09 4767 }
d5719762 4768 } else {
5f39d397 4769 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
4770 }
4771 }
aa5d6bed 4772 return ret;
be0e5c09
CM
4773}
4774
7bb86316 4775/*
925baedd 4776 * search the tree again to find a leaf with lesser keys
7bb86316
CM
4777 * returns 0 if it found something or 1 if there are no lesser leaves.
4778 * returns < 0 on io errors.
d352ac68
CM
4779 *
4780 * This may release the path, and so you may lose any locks held at the
4781 * time you call it.
7bb86316 4782 */
35a3621b 4783static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
7bb86316 4784{
925baedd
CM
4785 struct btrfs_key key;
4786 struct btrfs_disk_key found_key;
4787 int ret;
7bb86316 4788
925baedd 4789 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 4790
925baedd
CM
4791 if (key.offset > 0)
4792 key.offset--;
4793 else if (key.type > 0)
4794 key.type--;
4795 else if (key.objectid > 0)
4796 key.objectid--;
4797 else
4798 return 1;
7bb86316 4799
b3b4aa74 4800 btrfs_release_path(path);
925baedd
CM
4801 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4802 if (ret < 0)
4803 return ret;
4804 btrfs_item_key(path->nodes[0], &found_key, 0);
4805 ret = comp_keys(&found_key, &key);
4806 if (ret < 0)
4807 return 0;
4808 return 1;
7bb86316
CM
4809}
4810
3f157a2f
CM
4811/*
4812 * A helper function to walk down the tree starting at min_key, and looking
de78b51a
ES
4813 * for nodes or leaves that are have a minimum transaction id.
4814 * This is used by the btree defrag code, and tree logging
3f157a2f
CM
4815 *
4816 * This does not cow, but it does stuff the starting key it finds back
4817 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4818 * key and get a writable path.
4819 *
4820 * This does lock as it descends, and path->keep_locks should be set
4821 * to 1 by the caller.
4822 *
4823 * This honors path->lowest_level to prevent descent past a given level
4824 * of the tree.
4825 *
d352ac68
CM
4826 * min_trans indicates the oldest transaction that you are interested
4827 * in walking through. Any nodes or leaves older than min_trans are
4828 * skipped over (without reading them).
4829 *
3f157a2f
CM
4830 * returns zero if something useful was found, < 0 on error and 1 if there
4831 * was nothing in the tree that matched the search criteria.
4832 */
4833int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 4834 struct btrfs_key *max_key,
de78b51a 4835 struct btrfs_path *path,
3f157a2f
CM
4836 u64 min_trans)
4837{
4838 struct extent_buffer *cur;
4839 struct btrfs_key found_key;
4840 int slot;
9652480b 4841 int sret;
3f157a2f
CM
4842 u32 nritems;
4843 int level;
4844 int ret = 1;
4845
934d375b 4846 WARN_ON(!path->keep_locks);
3f157a2f 4847again:
bd681513 4848 cur = btrfs_read_lock_root_node(root);
3f157a2f 4849 level = btrfs_header_level(cur);
e02119d5 4850 WARN_ON(path->nodes[level]);
3f157a2f 4851 path->nodes[level] = cur;
bd681513 4852 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
4853
4854 if (btrfs_header_generation(cur) < min_trans) {
4855 ret = 1;
4856 goto out;
4857 }
d397712b 4858 while (1) {
3f157a2f
CM
4859 nritems = btrfs_header_nritems(cur);
4860 level = btrfs_header_level(cur);
9652480b 4861 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 4862
323ac95b
CM
4863 /* at the lowest level, we're done, setup the path and exit */
4864 if (level == path->lowest_level) {
e02119d5
CM
4865 if (slot >= nritems)
4866 goto find_next_key;
3f157a2f
CM
4867 ret = 0;
4868 path->slots[level] = slot;
4869 btrfs_item_key_to_cpu(cur, &found_key, slot);
4870 goto out;
4871 }
9652480b
Y
4872 if (sret && slot > 0)
4873 slot--;
3f157a2f 4874 /*
de78b51a
ES
4875 * check this node pointer against the min_trans parameters.
4876 * If it is too old, old, skip to the next one.
3f157a2f 4877 */
d397712b 4878 while (slot < nritems) {
3f157a2f
CM
4879 u64 blockptr;
4880 u64 gen;
e02119d5 4881
3f157a2f
CM
4882 blockptr = btrfs_node_blockptr(cur, slot);
4883 gen = btrfs_node_ptr_generation(cur, slot);
4884 if (gen < min_trans) {
4885 slot++;
4886 continue;
4887 }
de78b51a 4888 break;
3f157a2f 4889 }
e02119d5 4890find_next_key:
3f157a2f
CM
4891 /*
4892 * we didn't find a candidate key in this node, walk forward
4893 * and find another one
4894 */
4895 if (slot >= nritems) {
e02119d5 4896 path->slots[level] = slot;
b4ce94de 4897 btrfs_set_path_blocking(path);
e02119d5 4898 sret = btrfs_find_next_key(root, path, min_key, level,
de78b51a 4899 min_trans);
e02119d5 4900 if (sret == 0) {
b3b4aa74 4901 btrfs_release_path(path);
3f157a2f
CM
4902 goto again;
4903 } else {
4904 goto out;
4905 }
4906 }
4907 /* save our key for returning back */
4908 btrfs_node_key_to_cpu(cur, &found_key, slot);
4909 path->slots[level] = slot;
4910 if (level == path->lowest_level) {
4911 ret = 0;
f7c79f30 4912 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
4913 goto out;
4914 }
b4ce94de 4915 btrfs_set_path_blocking(path);
3f157a2f 4916 cur = read_node_slot(root, cur, slot);
79787eaa 4917 BUG_ON(!cur); /* -ENOMEM */
3f157a2f 4918
bd681513 4919 btrfs_tree_read_lock(cur);
b4ce94de 4920
bd681513 4921 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 4922 path->nodes[level - 1] = cur;
f7c79f30 4923 unlock_up(path, level, 1, 0, NULL);
bd681513 4924 btrfs_clear_path_blocking(path, NULL, 0);
3f157a2f
CM
4925 }
4926out:
4927 if (ret == 0)
4928 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4929 btrfs_set_path_blocking(path);
3f157a2f
CM
4930 return ret;
4931}
4932
7069830a
AB
4933static void tree_move_down(struct btrfs_root *root,
4934 struct btrfs_path *path,
4935 int *level, int root_level)
4936{
74dd17fb 4937 BUG_ON(*level == 0);
7069830a
AB
4938 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4939 path->slots[*level]);
4940 path->slots[*level - 1] = 0;
4941 (*level)--;
4942}
4943
4944static int tree_move_next_or_upnext(struct btrfs_root *root,
4945 struct btrfs_path *path,
4946 int *level, int root_level)
4947{
4948 int ret = 0;
4949 int nritems;
4950 nritems = btrfs_header_nritems(path->nodes[*level]);
4951
4952 path->slots[*level]++;
4953
74dd17fb 4954 while (path->slots[*level] >= nritems) {
7069830a
AB
4955 if (*level == root_level)
4956 return -1;
4957
4958 /* move upnext */
4959 path->slots[*level] = 0;
4960 free_extent_buffer(path->nodes[*level]);
4961 path->nodes[*level] = NULL;
4962 (*level)++;
4963 path->slots[*level]++;
4964
4965 nritems = btrfs_header_nritems(path->nodes[*level]);
4966 ret = 1;
4967 }
4968 return ret;
4969}
4970
4971/*
4972 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
4973 * or down.
4974 */
4975static int tree_advance(struct btrfs_root *root,
4976 struct btrfs_path *path,
4977 int *level, int root_level,
4978 int allow_down,
4979 struct btrfs_key *key)
4980{
4981 int ret;
4982
4983 if (*level == 0 || !allow_down) {
4984 ret = tree_move_next_or_upnext(root, path, level, root_level);
4985 } else {
4986 tree_move_down(root, path, level, root_level);
4987 ret = 0;
4988 }
4989 if (ret >= 0) {
4990 if (*level == 0)
4991 btrfs_item_key_to_cpu(path->nodes[*level], key,
4992 path->slots[*level]);
4993 else
4994 btrfs_node_key_to_cpu(path->nodes[*level], key,
4995 path->slots[*level]);
4996 }
4997 return ret;
4998}
4999
5000static int tree_compare_item(struct btrfs_root *left_root,
5001 struct btrfs_path *left_path,
5002 struct btrfs_path *right_path,
5003 char *tmp_buf)
5004{
5005 int cmp;
5006 int len1, len2;
5007 unsigned long off1, off2;
5008
5009 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5010 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5011 if (len1 != len2)
5012 return 1;
5013
5014 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5015 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5016 right_path->slots[0]);
5017
5018 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5019
5020 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5021 if (cmp)
5022 return 1;
5023 return 0;
5024}
5025
5026#define ADVANCE 1
5027#define ADVANCE_ONLY_NEXT -1
5028
5029/*
5030 * This function compares two trees and calls the provided callback for
5031 * every changed/new/deleted item it finds.
5032 * If shared tree blocks are encountered, whole subtrees are skipped, making
5033 * the compare pretty fast on snapshotted subvolumes.
5034 *
5035 * This currently works on commit roots only. As commit roots are read only,
5036 * we don't do any locking. The commit roots are protected with transactions.
5037 * Transactions are ended and rejoined when a commit is tried in between.
5038 *
5039 * This function checks for modifications done to the trees while comparing.
5040 * If it detects a change, it aborts immediately.
5041 */
5042int btrfs_compare_trees(struct btrfs_root *left_root,
5043 struct btrfs_root *right_root,
5044 btrfs_changed_cb_t changed_cb, void *ctx)
5045{
5046 int ret;
5047 int cmp;
5048 struct btrfs_trans_handle *trans = NULL;
5049 struct btrfs_path *left_path = NULL;
5050 struct btrfs_path *right_path = NULL;
5051 struct btrfs_key left_key;
5052 struct btrfs_key right_key;
5053 char *tmp_buf = NULL;
5054 int left_root_level;
5055 int right_root_level;
5056 int left_level;
5057 int right_level;
5058 int left_end_reached;
5059 int right_end_reached;
5060 int advance_left;
5061 int advance_right;
5062 u64 left_blockptr;
5063 u64 right_blockptr;
5064 u64 left_start_ctransid;
5065 u64 right_start_ctransid;
5066 u64 ctransid;
5067
5068 left_path = btrfs_alloc_path();
5069 if (!left_path) {
5070 ret = -ENOMEM;
5071 goto out;
5072 }
5073 right_path = btrfs_alloc_path();
5074 if (!right_path) {
5075 ret = -ENOMEM;
5076 goto out;
5077 }
5078
5079 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5080 if (!tmp_buf) {
5081 ret = -ENOMEM;
5082 goto out;
5083 }
5084
5085 left_path->search_commit_root = 1;
5086 left_path->skip_locking = 1;
5087 right_path->search_commit_root = 1;
5088 right_path->skip_locking = 1;
5089
5f3ab90a 5090 spin_lock(&left_root->root_item_lock);
7069830a 5091 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5f3ab90a 5092 spin_unlock(&left_root->root_item_lock);
7069830a 5093
5f3ab90a 5094 spin_lock(&right_root->root_item_lock);
7069830a 5095 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5f3ab90a 5096 spin_unlock(&right_root->root_item_lock);
7069830a
AB
5097
5098 trans = btrfs_join_transaction(left_root);
5099 if (IS_ERR(trans)) {
5100 ret = PTR_ERR(trans);
5101 trans = NULL;
5102 goto out;
5103 }
5104
5105 /*
5106 * Strategy: Go to the first items of both trees. Then do
5107 *
5108 * If both trees are at level 0
5109 * Compare keys of current items
5110 * If left < right treat left item as new, advance left tree
5111 * and repeat
5112 * If left > right treat right item as deleted, advance right tree
5113 * and repeat
5114 * If left == right do deep compare of items, treat as changed if
5115 * needed, advance both trees and repeat
5116 * If both trees are at the same level but not at level 0
5117 * Compare keys of current nodes/leafs
5118 * If left < right advance left tree and repeat
5119 * If left > right advance right tree and repeat
5120 * If left == right compare blockptrs of the next nodes/leafs
5121 * If they match advance both trees but stay at the same level
5122 * and repeat
5123 * If they don't match advance both trees while allowing to go
5124 * deeper and repeat
5125 * If tree levels are different
5126 * Advance the tree that needs it and repeat
5127 *
5128 * Advancing a tree means:
5129 * If we are at level 0, try to go to the next slot. If that's not
5130 * possible, go one level up and repeat. Stop when we found a level
5131 * where we could go to the next slot. We may at this point be on a
5132 * node or a leaf.
5133 *
5134 * If we are not at level 0 and not on shared tree blocks, go one
5135 * level deeper.
5136 *
5137 * If we are not at level 0 and on shared tree blocks, go one slot to
5138 * the right if possible or go up and right.
5139 */
5140
5141 left_level = btrfs_header_level(left_root->commit_root);
5142 left_root_level = left_level;
5143 left_path->nodes[left_level] = left_root->commit_root;
5144 extent_buffer_get(left_path->nodes[left_level]);
5145
5146 right_level = btrfs_header_level(right_root->commit_root);
5147 right_root_level = right_level;
5148 right_path->nodes[right_level] = right_root->commit_root;
5149 extent_buffer_get(right_path->nodes[right_level]);
5150
5151 if (left_level == 0)
5152 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5153 &left_key, left_path->slots[left_level]);
5154 else
5155 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5156 &left_key, left_path->slots[left_level]);
5157 if (right_level == 0)
5158 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5159 &right_key, right_path->slots[right_level]);
5160 else
5161 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5162 &right_key, right_path->slots[right_level]);
5163
5164 left_end_reached = right_end_reached = 0;
5165 advance_left = advance_right = 0;
5166
5167 while (1) {
5168 /*
5169 * We need to make sure the transaction does not get committed
5170 * while we do anything on commit roots. This means, we need to
5171 * join and leave transactions for every item that we process.
5172 */
5173 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5174 btrfs_release_path(left_path);
5175 btrfs_release_path(right_path);
5176
5177 ret = btrfs_end_transaction(trans, left_root);
5178 trans = NULL;
5179 if (ret < 0)
5180 goto out;
5181 }
5182 /* now rejoin the transaction */
5183 if (!trans) {
5184 trans = btrfs_join_transaction(left_root);
5185 if (IS_ERR(trans)) {
5186 ret = PTR_ERR(trans);
5187 trans = NULL;
5188 goto out;
5189 }
5190
5f3ab90a 5191 spin_lock(&left_root->root_item_lock);
7069830a 5192 ctransid = btrfs_root_ctransid(&left_root->root_item);
5f3ab90a 5193 spin_unlock(&left_root->root_item_lock);
7069830a
AB
5194 if (ctransid != left_start_ctransid)
5195 left_start_ctransid = 0;
5196
5f3ab90a 5197 spin_lock(&right_root->root_item_lock);
7069830a 5198 ctransid = btrfs_root_ctransid(&right_root->root_item);
5f3ab90a 5199 spin_unlock(&right_root->root_item_lock);
7069830a
AB
5200 if (ctransid != right_start_ctransid)
5201 right_start_ctransid = 0;
5202
5203 if (!left_start_ctransid || !right_start_ctransid) {
5204 WARN(1, KERN_WARNING
5205 "btrfs: btrfs_compare_tree detected "
5206 "a change in one of the trees while "
5207 "iterating. This is probably a "
5208 "bug.\n");
5209 ret = -EIO;
5210 goto out;
5211 }
5212
5213 /*
5214 * the commit root may have changed, so start again
5215 * where we stopped
5216 */
5217 left_path->lowest_level = left_level;
5218 right_path->lowest_level = right_level;
5219 ret = btrfs_search_slot(NULL, left_root,
5220 &left_key, left_path, 0, 0);
5221 if (ret < 0)
5222 goto out;
5223 ret = btrfs_search_slot(NULL, right_root,
5224 &right_key, right_path, 0, 0);
5225 if (ret < 0)
5226 goto out;
5227 }
5228
5229 if (advance_left && !left_end_reached) {
5230 ret = tree_advance(left_root, left_path, &left_level,
5231 left_root_level,
5232 advance_left != ADVANCE_ONLY_NEXT,
5233 &left_key);
5234 if (ret < 0)
5235 left_end_reached = ADVANCE;
5236 advance_left = 0;
5237 }
5238 if (advance_right && !right_end_reached) {
5239 ret = tree_advance(right_root, right_path, &right_level,
5240 right_root_level,
5241 advance_right != ADVANCE_ONLY_NEXT,
5242 &right_key);
5243 if (ret < 0)
5244 right_end_reached = ADVANCE;
5245 advance_right = 0;
5246 }
5247
5248 if (left_end_reached && right_end_reached) {
5249 ret = 0;
5250 goto out;
5251 } else if (left_end_reached) {
5252 if (right_level == 0) {
5253 ret = changed_cb(left_root, right_root,
5254 left_path, right_path,
5255 &right_key,
5256 BTRFS_COMPARE_TREE_DELETED,
5257 ctx);
5258 if (ret < 0)
5259 goto out;
5260 }
5261 advance_right = ADVANCE;
5262 continue;
5263 } else if (right_end_reached) {
5264 if (left_level == 0) {
5265 ret = changed_cb(left_root, right_root,
5266 left_path, right_path,
5267 &left_key,
5268 BTRFS_COMPARE_TREE_NEW,
5269 ctx);
5270 if (ret < 0)
5271 goto out;
5272 }
5273 advance_left = ADVANCE;
5274 continue;
5275 }
5276
5277 if (left_level == 0 && right_level == 0) {
5278 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5279 if (cmp < 0) {
5280 ret = changed_cb(left_root, right_root,
5281 left_path, right_path,
5282 &left_key,
5283 BTRFS_COMPARE_TREE_NEW,
5284 ctx);
5285 if (ret < 0)
5286 goto out;
5287 advance_left = ADVANCE;
5288 } else if (cmp > 0) {
5289 ret = changed_cb(left_root, right_root,
5290 left_path, right_path,
5291 &right_key,
5292 BTRFS_COMPARE_TREE_DELETED,
5293 ctx);
5294 if (ret < 0)
5295 goto out;
5296 advance_right = ADVANCE;
5297 } else {
ba5e8f2e
JB
5298 enum btrfs_compare_tree_result cmp;
5299
74dd17fb 5300 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7069830a
AB
5301 ret = tree_compare_item(left_root, left_path,
5302 right_path, tmp_buf);
ba5e8f2e
JB
5303 if (ret)
5304 cmp = BTRFS_COMPARE_TREE_CHANGED;
5305 else
5306 cmp = BTRFS_COMPARE_TREE_SAME;
5307 ret = changed_cb(left_root, right_root,
5308 left_path, right_path,
5309 &left_key, cmp, ctx);
5310 if (ret < 0)
5311 goto out;
7069830a
AB
5312 advance_left = ADVANCE;
5313 advance_right = ADVANCE;
5314 }
5315 } else if (left_level == right_level) {
5316 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5317 if (cmp < 0) {
5318 advance_left = ADVANCE;
5319 } else if (cmp > 0) {
5320 advance_right = ADVANCE;
5321 } else {
5322 left_blockptr = btrfs_node_blockptr(
5323 left_path->nodes[left_level],
5324 left_path->slots[left_level]);
5325 right_blockptr = btrfs_node_blockptr(
5326 right_path->nodes[right_level],
5327 right_path->slots[right_level]);
5328 if (left_blockptr == right_blockptr) {
5329 /*
5330 * As we're on a shared block, don't
5331 * allow to go deeper.
5332 */
5333 advance_left = ADVANCE_ONLY_NEXT;
5334 advance_right = ADVANCE_ONLY_NEXT;
5335 } else {
5336 advance_left = ADVANCE;
5337 advance_right = ADVANCE;
5338 }
5339 }
5340 } else if (left_level < right_level) {
5341 advance_right = ADVANCE;
5342 } else {
5343 advance_left = ADVANCE;
5344 }
5345 }
5346
5347out:
5348 btrfs_free_path(left_path);
5349 btrfs_free_path(right_path);
5350 kfree(tmp_buf);
5351
5352 if (trans) {
5353 if (!ret)
5354 ret = btrfs_end_transaction(trans, left_root);
5355 else
5356 btrfs_end_transaction(trans, left_root);
5357 }
5358
5359 return ret;
5360}
5361
3f157a2f
CM
5362/*
5363 * this is similar to btrfs_next_leaf, but does not try to preserve
5364 * and fixup the path. It looks for and returns the next key in the
de78b51a 5365 * tree based on the current path and the min_trans parameters.
3f157a2f
CM
5366 *
5367 * 0 is returned if another key is found, < 0 if there are any errors
5368 * and 1 is returned if there are no higher keys in the tree
5369 *
5370 * path->keep_locks should be set to 1 on the search made before
5371 * calling this function.
5372 */
e7a84565 5373int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
de78b51a 5374 struct btrfs_key *key, int level, u64 min_trans)
e7a84565 5375{
e7a84565
CM
5376 int slot;
5377 struct extent_buffer *c;
5378
934d375b 5379 WARN_ON(!path->keep_locks);
d397712b 5380 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
5381 if (!path->nodes[level])
5382 return 1;
5383
5384 slot = path->slots[level] + 1;
5385 c = path->nodes[level];
3f157a2f 5386next:
e7a84565 5387 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
5388 int ret;
5389 int orig_lowest;
5390 struct btrfs_key cur_key;
5391 if (level + 1 >= BTRFS_MAX_LEVEL ||
5392 !path->nodes[level + 1])
e7a84565 5393 return 1;
33c66f43
YZ
5394
5395 if (path->locks[level + 1]) {
5396 level++;
5397 continue;
5398 }
5399
5400 slot = btrfs_header_nritems(c) - 1;
5401 if (level == 0)
5402 btrfs_item_key_to_cpu(c, &cur_key, slot);
5403 else
5404 btrfs_node_key_to_cpu(c, &cur_key, slot);
5405
5406 orig_lowest = path->lowest_level;
b3b4aa74 5407 btrfs_release_path(path);
33c66f43
YZ
5408 path->lowest_level = level;
5409 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5410 0, 0);
5411 path->lowest_level = orig_lowest;
5412 if (ret < 0)
5413 return ret;
5414
5415 c = path->nodes[level];
5416 slot = path->slots[level];
5417 if (ret == 0)
5418 slot++;
5419 goto next;
e7a84565 5420 }
33c66f43 5421
e7a84565
CM
5422 if (level == 0)
5423 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f 5424 else {
3f157a2f
CM
5425 u64 gen = btrfs_node_ptr_generation(c, slot);
5426
3f157a2f
CM
5427 if (gen < min_trans) {
5428 slot++;
5429 goto next;
5430 }
e7a84565 5431 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 5432 }
e7a84565
CM
5433 return 0;
5434 }
5435 return 1;
5436}
5437
97571fd0 5438/*
925baedd 5439 * search the tree again to find a leaf with greater keys
0f70abe2
CM
5440 * returns 0 if it found something or 1 if there are no greater leaves.
5441 * returns < 0 on io errors.
97571fd0 5442 */
234b63a0 5443int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3d7806ec
JS
5444{
5445 return btrfs_next_old_leaf(root, path, 0);
5446}
5447
5448int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5449 u64 time_seq)
d97e63b6
CM
5450{
5451 int slot;
8e73f275 5452 int level;
5f39d397 5453 struct extent_buffer *c;
8e73f275 5454 struct extent_buffer *next;
925baedd
CM
5455 struct btrfs_key key;
5456 u32 nritems;
5457 int ret;
8e73f275 5458 int old_spinning = path->leave_spinning;
bd681513 5459 int next_rw_lock = 0;
925baedd
CM
5460
5461 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 5462 if (nritems == 0)
925baedd 5463 return 1;
925baedd 5464
8e73f275
CM
5465 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5466again:
5467 level = 1;
5468 next = NULL;
bd681513 5469 next_rw_lock = 0;
b3b4aa74 5470 btrfs_release_path(path);
8e73f275 5471
a2135011 5472 path->keep_locks = 1;
31533fb2 5473 path->leave_spinning = 1;
8e73f275 5474
3d7806ec
JS
5475 if (time_seq)
5476 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5477 else
5478 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
925baedd
CM
5479 path->keep_locks = 0;
5480
5481 if (ret < 0)
5482 return ret;
5483
a2135011 5484 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
5485 /*
5486 * by releasing the path above we dropped all our locks. A balance
5487 * could have added more items next to the key that used to be
5488 * at the very end of the block. So, check again here and
5489 * advance the path if there are now more items available.
5490 */
a2135011 5491 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
5492 if (ret == 0)
5493 path->slots[0]++;
8e73f275 5494 ret = 0;
925baedd
CM
5495 goto done;
5496 }
d97e63b6 5497
d397712b 5498 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
5499 if (!path->nodes[level]) {
5500 ret = 1;
5501 goto done;
5502 }
5f39d397 5503
d97e63b6
CM
5504 slot = path->slots[level] + 1;
5505 c = path->nodes[level];
5f39d397 5506 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 5507 level++;
8e73f275
CM
5508 if (level == BTRFS_MAX_LEVEL) {
5509 ret = 1;
5510 goto done;
5511 }
d97e63b6
CM
5512 continue;
5513 }
5f39d397 5514
925baedd 5515 if (next) {
bd681513 5516 btrfs_tree_unlock_rw(next, next_rw_lock);
5f39d397 5517 free_extent_buffer(next);
925baedd 5518 }
5f39d397 5519
8e73f275 5520 next = c;
bd681513 5521 next_rw_lock = path->locks[level];
8e73f275 5522 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5523 slot, &key, 0);
8e73f275
CM
5524 if (ret == -EAGAIN)
5525 goto again;
5f39d397 5526
76a05b35 5527 if (ret < 0) {
b3b4aa74 5528 btrfs_release_path(path);
76a05b35
CM
5529 goto done;
5530 }
5531
5cd57b2c 5532 if (!path->skip_locking) {
bd681513 5533 ret = btrfs_try_tree_read_lock(next);
d42244a0
JS
5534 if (!ret && time_seq) {
5535 /*
5536 * If we don't get the lock, we may be racing
5537 * with push_leaf_left, holding that lock while
5538 * itself waiting for the leaf we've currently
5539 * locked. To solve this situation, we give up
5540 * on our lock and cycle.
5541 */
cf538830 5542 free_extent_buffer(next);
d42244a0
JS
5543 btrfs_release_path(path);
5544 cond_resched();
5545 goto again;
5546 }
8e73f275
CM
5547 if (!ret) {
5548 btrfs_set_path_blocking(path);
bd681513 5549 btrfs_tree_read_lock(next);
31533fb2 5550 btrfs_clear_path_blocking(path, next,
bd681513 5551 BTRFS_READ_LOCK);
8e73f275 5552 }
31533fb2 5553 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5554 }
d97e63b6
CM
5555 break;
5556 }
5557 path->slots[level] = slot;
d397712b 5558 while (1) {
d97e63b6
CM
5559 level--;
5560 c = path->nodes[level];
925baedd 5561 if (path->locks[level])
bd681513 5562 btrfs_tree_unlock_rw(c, path->locks[level]);
8e73f275 5563
5f39d397 5564 free_extent_buffer(c);
d97e63b6
CM
5565 path->nodes[level] = next;
5566 path->slots[level] = 0;
a74a4b97 5567 if (!path->skip_locking)
bd681513 5568 path->locks[level] = next_rw_lock;
d97e63b6
CM
5569 if (!level)
5570 break;
b4ce94de 5571
8e73f275 5572 ret = read_block_for_search(NULL, root, path, &next, level,
5d9e75c4 5573 0, &key, 0);
8e73f275
CM
5574 if (ret == -EAGAIN)
5575 goto again;
5576
76a05b35 5577 if (ret < 0) {
b3b4aa74 5578 btrfs_release_path(path);
76a05b35
CM
5579 goto done;
5580 }
5581
5cd57b2c 5582 if (!path->skip_locking) {
bd681513 5583 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
5584 if (!ret) {
5585 btrfs_set_path_blocking(path);
bd681513 5586 btrfs_tree_read_lock(next);
31533fb2 5587 btrfs_clear_path_blocking(path, next,
bd681513
CM
5588 BTRFS_READ_LOCK);
5589 }
31533fb2 5590 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 5591 }
d97e63b6 5592 }
8e73f275 5593 ret = 0;
925baedd 5594done:
f7c79f30 5595 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
5596 path->leave_spinning = old_spinning;
5597 if (!old_spinning)
5598 btrfs_set_path_blocking(path);
5599
5600 return ret;
d97e63b6 5601}
0b86a832 5602
3f157a2f
CM
5603/*
5604 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5605 * searching until it gets past min_objectid or finds an item of 'type'
5606 *
5607 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5608 */
0b86a832
CM
5609int btrfs_previous_item(struct btrfs_root *root,
5610 struct btrfs_path *path, u64 min_objectid,
5611 int type)
5612{
5613 struct btrfs_key found_key;
5614 struct extent_buffer *leaf;
e02119d5 5615 u32 nritems;
0b86a832
CM
5616 int ret;
5617
d397712b 5618 while (1) {
0b86a832 5619 if (path->slots[0] == 0) {
b4ce94de 5620 btrfs_set_path_blocking(path);
0b86a832
CM
5621 ret = btrfs_prev_leaf(root, path);
5622 if (ret != 0)
5623 return ret;
5624 } else {
5625 path->slots[0]--;
5626 }
5627 leaf = path->nodes[0];
e02119d5
CM
5628 nritems = btrfs_header_nritems(leaf);
5629 if (nritems == 0)
5630 return 1;
5631 if (path->slots[0] == nritems)
5632 path->slots[0]--;
5633
0b86a832 5634 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
5635 if (found_key.objectid < min_objectid)
5636 break;
0a4eefbb
YZ
5637 if (found_key.type == type)
5638 return 0;
e02119d5
CM
5639 if (found_key.objectid == min_objectid &&
5640 found_key.type < type)
5641 break;
0b86a832
CM
5642 }
5643 return 1;
5644}