]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/extent-tree.c
Btrfs: let the user know space caching is enabled
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64 struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 int dump_block_groups);
67
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71 smp_mb();
72 return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77 return (cache->flags & bits) == bits;
78 }
79
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82 atomic_inc(&cache->count);
83 }
84
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
91 kfree(cache);
92 }
93 }
94
95 /*
96 * this adds the block group to the fs_info rb tree for the block group
97 * cache
98 */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100 struct btrfs_block_group_cache *block_group)
101 {
102 struct rb_node **p;
103 struct rb_node *parent = NULL;
104 struct btrfs_block_group_cache *cache;
105
106 spin_lock(&info->block_group_cache_lock);
107 p = &info->block_group_cache_tree.rb_node;
108
109 while (*p) {
110 parent = *p;
111 cache = rb_entry(parent, struct btrfs_block_group_cache,
112 cache_node);
113 if (block_group->key.objectid < cache->key.objectid) {
114 p = &(*p)->rb_left;
115 } else if (block_group->key.objectid > cache->key.objectid) {
116 p = &(*p)->rb_right;
117 } else {
118 spin_unlock(&info->block_group_cache_lock);
119 return -EEXIST;
120 }
121 }
122
123 rb_link_node(&block_group->cache_node, parent, p);
124 rb_insert_color(&block_group->cache_node,
125 &info->block_group_cache_tree);
126 spin_unlock(&info->block_group_cache_lock);
127
128 return 0;
129 }
130
131 /*
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
134 */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137 int contains)
138 {
139 struct btrfs_block_group_cache *cache, *ret = NULL;
140 struct rb_node *n;
141 u64 end, start;
142
143 spin_lock(&info->block_group_cache_lock);
144 n = info->block_group_cache_tree.rb_node;
145
146 while (n) {
147 cache = rb_entry(n, struct btrfs_block_group_cache,
148 cache_node);
149 end = cache->key.objectid + cache->key.offset - 1;
150 start = cache->key.objectid;
151
152 if (bytenr < start) {
153 if (!contains && (!ret || start < ret->key.objectid))
154 ret = cache;
155 n = n->rb_left;
156 } else if (bytenr > start) {
157 if (contains && bytenr <= end) {
158 ret = cache;
159 break;
160 }
161 n = n->rb_right;
162 } else {
163 ret = cache;
164 break;
165 }
166 }
167 if (ret)
168 btrfs_get_block_group(ret);
169 spin_unlock(&info->block_group_cache_lock);
170
171 return ret;
172 }
173
174 static int add_excluded_extent(struct btrfs_root *root,
175 u64 start, u64 num_bytes)
176 {
177 u64 end = start + num_bytes - 1;
178 set_extent_bits(&root->fs_info->freed_extents[0],
179 start, end, EXTENT_UPTODATE, GFP_NOFS);
180 set_extent_bits(&root->fs_info->freed_extents[1],
181 start, end, EXTENT_UPTODATE, GFP_NOFS);
182 return 0;
183 }
184
185 static void free_excluded_extents(struct btrfs_root *root,
186 struct btrfs_block_group_cache *cache)
187 {
188 u64 start, end;
189
190 start = cache->key.objectid;
191 end = start + cache->key.offset - 1;
192
193 clear_extent_bits(&root->fs_info->freed_extents[0],
194 start, end, EXTENT_UPTODATE, GFP_NOFS);
195 clear_extent_bits(&root->fs_info->freed_extents[1],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198
199 static int exclude_super_stripes(struct btrfs_root *root,
200 struct btrfs_block_group_cache *cache)
201 {
202 u64 bytenr;
203 u64 *logical;
204 int stripe_len;
205 int i, nr, ret;
206
207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 cache->bytes_super += stripe_len;
210 ret = add_excluded_extent(root, cache->key.objectid,
211 stripe_len);
212 BUG_ON(ret);
213 }
214
215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 bytenr = btrfs_sb_offset(i);
217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 cache->key.objectid, bytenr,
219 0, &logical, &nr, &stripe_len);
220 BUG_ON(ret);
221
222 while (nr--) {
223 cache->bytes_super += stripe_len;
224 ret = add_excluded_extent(root, logical[nr],
225 stripe_len);
226 BUG_ON(ret);
227 }
228
229 kfree(logical);
230 }
231 return 0;
232 }
233
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237 struct btrfs_caching_control *ctl;
238
239 spin_lock(&cache->lock);
240 if (cache->cached != BTRFS_CACHE_STARTED) {
241 spin_unlock(&cache->lock);
242 return NULL;
243 }
244
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache->caching_ctl) {
247 spin_unlock(&cache->lock);
248 return NULL;
249 }
250
251 ctl = cache->caching_ctl;
252 atomic_inc(&ctl->count);
253 spin_unlock(&cache->lock);
254 return ctl;
255 }
256
257 static void put_caching_control(struct btrfs_caching_control *ctl)
258 {
259 if (atomic_dec_and_test(&ctl->count))
260 kfree(ctl);
261 }
262
263 /*
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
267 */
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269 struct btrfs_fs_info *info, u64 start, u64 end)
270 {
271 u64 extent_start, extent_end, size, total_added = 0;
272 int ret;
273
274 while (start < end) {
275 ret = find_first_extent_bit(info->pinned_extents, start,
276 &extent_start, &extent_end,
277 EXTENT_DIRTY | EXTENT_UPTODATE);
278 if (ret)
279 break;
280
281 if (extent_start <= start) {
282 start = extent_end + 1;
283 } else if (extent_start > start && extent_start < end) {
284 size = extent_start - start;
285 total_added += size;
286 ret = btrfs_add_free_space(block_group, start,
287 size);
288 BUG_ON(ret);
289 start = extent_end + 1;
290 } else {
291 break;
292 }
293 }
294
295 if (start < end) {
296 size = end - start;
297 total_added += size;
298 ret = btrfs_add_free_space(block_group, start, size);
299 BUG_ON(ret);
300 }
301
302 return total_added;
303 }
304
305 static int caching_kthread(void *data)
306 {
307 struct btrfs_block_group_cache *block_group = data;
308 struct btrfs_fs_info *fs_info = block_group->fs_info;
309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310 struct btrfs_root *extent_root = fs_info->extent_root;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_key key;
314 u64 total_found = 0;
315 u64 last = 0;
316 u32 nritems;
317 int ret = 0;
318
319 path = btrfs_alloc_path();
320 if (!path)
321 return -ENOMEM;
322
323 exclude_super_stripes(extent_root, block_group);
324 spin_lock(&block_group->space_info->lock);
325 block_group->space_info->bytes_readonly += block_group->bytes_super;
326 spin_unlock(&block_group->space_info->lock);
327
328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
329
330 /*
331 * We don't want to deadlock with somebody trying to allocate a new
332 * extent for the extent root while also trying to search the extent
333 * root to add free space. So we skip locking and search the commit
334 * root, since its read-only
335 */
336 path->skip_locking = 1;
337 path->search_commit_root = 1;
338 path->reada = 2;
339
340 key.objectid = last;
341 key.offset = 0;
342 key.type = BTRFS_EXTENT_ITEM_KEY;
343 again:
344 mutex_lock(&caching_ctl->mutex);
345 /* need to make sure the commit_root doesn't disappear */
346 down_read(&fs_info->extent_commit_sem);
347
348 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
349 if (ret < 0)
350 goto err;
351
352 leaf = path->nodes[0];
353 nritems = btrfs_header_nritems(leaf);
354
355 while (1) {
356 smp_mb();
357 if (fs_info->closing > 1) {
358 last = (u64)-1;
359 break;
360 }
361
362 if (path->slots[0] < nritems) {
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364 } else {
365 ret = find_next_key(path, 0, &key);
366 if (ret)
367 break;
368
369 caching_ctl->progress = last;
370 btrfs_release_path(extent_root, path);
371 up_read(&fs_info->extent_commit_sem);
372 mutex_unlock(&caching_ctl->mutex);
373 if (btrfs_transaction_in_commit(fs_info))
374 schedule_timeout(1);
375 else
376 cond_resched();
377 goto again;
378 }
379
380 if (key.objectid < block_group->key.objectid) {
381 path->slots[0]++;
382 continue;
383 }
384
385 if (key.objectid >= block_group->key.objectid +
386 block_group->key.offset)
387 break;
388
389 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
390 total_found += add_new_free_space(block_group,
391 fs_info, last,
392 key.objectid);
393 last = key.objectid + key.offset;
394
395 if (total_found > (1024 * 1024 * 2)) {
396 total_found = 0;
397 wake_up(&caching_ctl->wait);
398 }
399 }
400 path->slots[0]++;
401 }
402 ret = 0;
403
404 total_found += add_new_free_space(block_group, fs_info, last,
405 block_group->key.objectid +
406 block_group->key.offset);
407 caching_ctl->progress = (u64)-1;
408
409 spin_lock(&block_group->lock);
410 block_group->caching_ctl = NULL;
411 block_group->cached = BTRFS_CACHE_FINISHED;
412 spin_unlock(&block_group->lock);
413
414 err:
415 btrfs_free_path(path);
416 up_read(&fs_info->extent_commit_sem);
417
418 free_excluded_extents(extent_root, block_group);
419
420 mutex_unlock(&caching_ctl->mutex);
421 wake_up(&caching_ctl->wait);
422
423 put_caching_control(caching_ctl);
424 atomic_dec(&block_group->space_info->caching_threads);
425 btrfs_put_block_group(block_group);
426
427 return 0;
428 }
429
430 static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans,
432 int load_cache_only)
433 {
434 struct btrfs_fs_info *fs_info = cache->fs_info;
435 struct btrfs_caching_control *caching_ctl;
436 struct task_struct *tsk;
437 int ret = 0;
438
439 smp_mb();
440 if (cache->cached != BTRFS_CACHE_NO)
441 return 0;
442
443 /*
444 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking.
446 */
447 if (!trans->transaction->in_commit) {
448 spin_lock(&cache->lock);
449 if (cache->cached != BTRFS_CACHE_NO) {
450 spin_unlock(&cache->lock);
451 return 0;
452 }
453 cache->cached = BTRFS_CACHE_STARTED;
454 spin_unlock(&cache->lock);
455
456 ret = load_free_space_cache(fs_info, cache);
457
458 spin_lock(&cache->lock);
459 if (ret == 1) {
460 cache->cached = BTRFS_CACHE_FINISHED;
461 cache->last_byte_to_unpin = (u64)-1;
462 } else {
463 cache->cached = BTRFS_CACHE_NO;
464 }
465 spin_unlock(&cache->lock);
466 if (ret == 1)
467 return 0;
468 }
469
470 if (load_cache_only)
471 return 0;
472
473 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
474 BUG_ON(!caching_ctl);
475
476 INIT_LIST_HEAD(&caching_ctl->list);
477 mutex_init(&caching_ctl->mutex);
478 init_waitqueue_head(&caching_ctl->wait);
479 caching_ctl->block_group = cache;
480 caching_ctl->progress = cache->key.objectid;
481 /* one for caching kthread, one for caching block group list */
482 atomic_set(&caching_ctl->count, 2);
483
484 spin_lock(&cache->lock);
485 if (cache->cached != BTRFS_CACHE_NO) {
486 spin_unlock(&cache->lock);
487 kfree(caching_ctl);
488 return 0;
489 }
490 cache->caching_ctl = caching_ctl;
491 cache->cached = BTRFS_CACHE_STARTED;
492 spin_unlock(&cache->lock);
493
494 down_write(&fs_info->extent_commit_sem);
495 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
496 up_write(&fs_info->extent_commit_sem);
497
498 atomic_inc(&cache->space_info->caching_threads);
499 btrfs_get_block_group(cache);
500
501 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
502 cache->key.objectid);
503 if (IS_ERR(tsk)) {
504 ret = PTR_ERR(tsk);
505 printk(KERN_ERR "error running thread %d\n", ret);
506 BUG();
507 }
508
509 return ret;
510 }
511
512 /*
513 * return the block group that starts at or after bytenr
514 */
515 static struct btrfs_block_group_cache *
516 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
517 {
518 struct btrfs_block_group_cache *cache;
519
520 cache = block_group_cache_tree_search(info, bytenr, 0);
521
522 return cache;
523 }
524
525 /*
526 * return the block group that contains the given bytenr
527 */
528 struct btrfs_block_group_cache *btrfs_lookup_block_group(
529 struct btrfs_fs_info *info,
530 u64 bytenr)
531 {
532 struct btrfs_block_group_cache *cache;
533
534 cache = block_group_cache_tree_search(info, bytenr, 1);
535
536 return cache;
537 }
538
539 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
540 u64 flags)
541 {
542 struct list_head *head = &info->space_info;
543 struct btrfs_space_info *found;
544
545 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
546 BTRFS_BLOCK_GROUP_METADATA;
547
548 rcu_read_lock();
549 list_for_each_entry_rcu(found, head, list) {
550 if (found->flags & flags) {
551 rcu_read_unlock();
552 return found;
553 }
554 }
555 rcu_read_unlock();
556 return NULL;
557 }
558
559 /*
560 * after adding space to the filesystem, we need to clear the full flags
561 * on all the space infos.
562 */
563 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
564 {
565 struct list_head *head = &info->space_info;
566 struct btrfs_space_info *found;
567
568 rcu_read_lock();
569 list_for_each_entry_rcu(found, head, list)
570 found->full = 0;
571 rcu_read_unlock();
572 }
573
574 static u64 div_factor(u64 num, int factor)
575 {
576 if (factor == 10)
577 return num;
578 num *= factor;
579 do_div(num, 10);
580 return num;
581 }
582
583 u64 btrfs_find_block_group(struct btrfs_root *root,
584 u64 search_start, u64 search_hint, int owner)
585 {
586 struct btrfs_block_group_cache *cache;
587 u64 used;
588 u64 last = max(search_hint, search_start);
589 u64 group_start = 0;
590 int full_search = 0;
591 int factor = 9;
592 int wrapped = 0;
593 again:
594 while (1) {
595 cache = btrfs_lookup_first_block_group(root->fs_info, last);
596 if (!cache)
597 break;
598
599 spin_lock(&cache->lock);
600 last = cache->key.objectid + cache->key.offset;
601 used = btrfs_block_group_used(&cache->item);
602
603 if ((full_search || !cache->ro) &&
604 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
605 if (used + cache->pinned + cache->reserved <
606 div_factor(cache->key.offset, factor)) {
607 group_start = cache->key.objectid;
608 spin_unlock(&cache->lock);
609 btrfs_put_block_group(cache);
610 goto found;
611 }
612 }
613 spin_unlock(&cache->lock);
614 btrfs_put_block_group(cache);
615 cond_resched();
616 }
617 if (!wrapped) {
618 last = search_start;
619 wrapped = 1;
620 goto again;
621 }
622 if (!full_search && factor < 10) {
623 last = search_start;
624 full_search = 1;
625 factor = 10;
626 goto again;
627 }
628 found:
629 return group_start;
630 }
631
632 /* simple helper to search for an existing extent at a given offset */
633 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
634 {
635 int ret;
636 struct btrfs_key key;
637 struct btrfs_path *path;
638
639 path = btrfs_alloc_path();
640 BUG_ON(!path);
641 key.objectid = start;
642 key.offset = len;
643 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
644 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
645 0, 0);
646 btrfs_free_path(path);
647 return ret;
648 }
649
650 /*
651 * helper function to lookup reference count and flags of extent.
652 *
653 * the head node for delayed ref is used to store the sum of all the
654 * reference count modifications queued up in the rbtree. the head
655 * node may also store the extent flags to set. This way you can check
656 * to see what the reference count and extent flags would be if all of
657 * the delayed refs are not processed.
658 */
659 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root, u64 bytenr,
661 u64 num_bytes, u64 *refs, u64 *flags)
662 {
663 struct btrfs_delayed_ref_head *head;
664 struct btrfs_delayed_ref_root *delayed_refs;
665 struct btrfs_path *path;
666 struct btrfs_extent_item *ei;
667 struct extent_buffer *leaf;
668 struct btrfs_key key;
669 u32 item_size;
670 u64 num_refs;
671 u64 extent_flags;
672 int ret;
673
674 path = btrfs_alloc_path();
675 if (!path)
676 return -ENOMEM;
677
678 key.objectid = bytenr;
679 key.type = BTRFS_EXTENT_ITEM_KEY;
680 key.offset = num_bytes;
681 if (!trans) {
682 path->skip_locking = 1;
683 path->search_commit_root = 1;
684 }
685 again:
686 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
687 &key, path, 0, 0);
688 if (ret < 0)
689 goto out_free;
690
691 if (ret == 0) {
692 leaf = path->nodes[0];
693 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
694 if (item_size >= sizeof(*ei)) {
695 ei = btrfs_item_ptr(leaf, path->slots[0],
696 struct btrfs_extent_item);
697 num_refs = btrfs_extent_refs(leaf, ei);
698 extent_flags = btrfs_extent_flags(leaf, ei);
699 } else {
700 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
701 struct btrfs_extent_item_v0 *ei0;
702 BUG_ON(item_size != sizeof(*ei0));
703 ei0 = btrfs_item_ptr(leaf, path->slots[0],
704 struct btrfs_extent_item_v0);
705 num_refs = btrfs_extent_refs_v0(leaf, ei0);
706 /* FIXME: this isn't correct for data */
707 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
708 #else
709 BUG();
710 #endif
711 }
712 BUG_ON(num_refs == 0);
713 } else {
714 num_refs = 0;
715 extent_flags = 0;
716 ret = 0;
717 }
718
719 if (!trans)
720 goto out;
721
722 delayed_refs = &trans->transaction->delayed_refs;
723 spin_lock(&delayed_refs->lock);
724 head = btrfs_find_delayed_ref_head(trans, bytenr);
725 if (head) {
726 if (!mutex_trylock(&head->mutex)) {
727 atomic_inc(&head->node.refs);
728 spin_unlock(&delayed_refs->lock);
729
730 btrfs_release_path(root->fs_info->extent_root, path);
731
732 mutex_lock(&head->mutex);
733 mutex_unlock(&head->mutex);
734 btrfs_put_delayed_ref(&head->node);
735 goto again;
736 }
737 if (head->extent_op && head->extent_op->update_flags)
738 extent_flags |= head->extent_op->flags_to_set;
739 else
740 BUG_ON(num_refs == 0);
741
742 num_refs += head->node.ref_mod;
743 mutex_unlock(&head->mutex);
744 }
745 spin_unlock(&delayed_refs->lock);
746 out:
747 WARN_ON(num_refs == 0);
748 if (refs)
749 *refs = num_refs;
750 if (flags)
751 *flags = extent_flags;
752 out_free:
753 btrfs_free_path(path);
754 return ret;
755 }
756
757 /*
758 * Back reference rules. Back refs have three main goals:
759 *
760 * 1) differentiate between all holders of references to an extent so that
761 * when a reference is dropped we can make sure it was a valid reference
762 * before freeing the extent.
763 *
764 * 2) Provide enough information to quickly find the holders of an extent
765 * if we notice a given block is corrupted or bad.
766 *
767 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
768 * maintenance. This is actually the same as #2, but with a slightly
769 * different use case.
770 *
771 * There are two kinds of back refs. The implicit back refs is optimized
772 * for pointers in non-shared tree blocks. For a given pointer in a block,
773 * back refs of this kind provide information about the block's owner tree
774 * and the pointer's key. These information allow us to find the block by
775 * b-tree searching. The full back refs is for pointers in tree blocks not
776 * referenced by their owner trees. The location of tree block is recorded
777 * in the back refs. Actually the full back refs is generic, and can be
778 * used in all cases the implicit back refs is used. The major shortcoming
779 * of the full back refs is its overhead. Every time a tree block gets
780 * COWed, we have to update back refs entry for all pointers in it.
781 *
782 * For a newly allocated tree block, we use implicit back refs for
783 * pointers in it. This means most tree related operations only involve
784 * implicit back refs. For a tree block created in old transaction, the
785 * only way to drop a reference to it is COW it. So we can detect the
786 * event that tree block loses its owner tree's reference and do the
787 * back refs conversion.
788 *
789 * When a tree block is COW'd through a tree, there are four cases:
790 *
791 * The reference count of the block is one and the tree is the block's
792 * owner tree. Nothing to do in this case.
793 *
794 * The reference count of the block is one and the tree is not the
795 * block's owner tree. In this case, full back refs is used for pointers
796 * in the block. Remove these full back refs, add implicit back refs for
797 * every pointers in the new block.
798 *
799 * The reference count of the block is greater than one and the tree is
800 * the block's owner tree. In this case, implicit back refs is used for
801 * pointers in the block. Add full back refs for every pointers in the
802 * block, increase lower level extents' reference counts. The original
803 * implicit back refs are entailed to the new block.
804 *
805 * The reference count of the block is greater than one and the tree is
806 * not the block's owner tree. Add implicit back refs for every pointer in
807 * the new block, increase lower level extents' reference count.
808 *
809 * Back Reference Key composing:
810 *
811 * The key objectid corresponds to the first byte in the extent,
812 * The key type is used to differentiate between types of back refs.
813 * There are different meanings of the key offset for different types
814 * of back refs.
815 *
816 * File extents can be referenced by:
817 *
818 * - multiple snapshots, subvolumes, or different generations in one subvol
819 * - different files inside a single subvolume
820 * - different offsets inside a file (bookend extents in file.c)
821 *
822 * The extent ref structure for the implicit back refs has fields for:
823 *
824 * - Objectid of the subvolume root
825 * - objectid of the file holding the reference
826 * - original offset in the file
827 * - how many bookend extents
828 *
829 * The key offset for the implicit back refs is hash of the first
830 * three fields.
831 *
832 * The extent ref structure for the full back refs has field for:
833 *
834 * - number of pointers in the tree leaf
835 *
836 * The key offset for the implicit back refs is the first byte of
837 * the tree leaf
838 *
839 * When a file extent is allocated, The implicit back refs is used.
840 * the fields are filled in:
841 *
842 * (root_key.objectid, inode objectid, offset in file, 1)
843 *
844 * When a file extent is removed file truncation, we find the
845 * corresponding implicit back refs and check the following fields:
846 *
847 * (btrfs_header_owner(leaf), inode objectid, offset in file)
848 *
849 * Btree extents can be referenced by:
850 *
851 * - Different subvolumes
852 *
853 * Both the implicit back refs and the full back refs for tree blocks
854 * only consist of key. The key offset for the implicit back refs is
855 * objectid of block's owner tree. The key offset for the full back refs
856 * is the first byte of parent block.
857 *
858 * When implicit back refs is used, information about the lowest key and
859 * level of the tree block are required. These information are stored in
860 * tree block info structure.
861 */
862
863 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
864 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
865 struct btrfs_root *root,
866 struct btrfs_path *path,
867 u64 owner, u32 extra_size)
868 {
869 struct btrfs_extent_item *item;
870 struct btrfs_extent_item_v0 *ei0;
871 struct btrfs_extent_ref_v0 *ref0;
872 struct btrfs_tree_block_info *bi;
873 struct extent_buffer *leaf;
874 struct btrfs_key key;
875 struct btrfs_key found_key;
876 u32 new_size = sizeof(*item);
877 u64 refs;
878 int ret;
879
880 leaf = path->nodes[0];
881 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
882
883 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
884 ei0 = btrfs_item_ptr(leaf, path->slots[0],
885 struct btrfs_extent_item_v0);
886 refs = btrfs_extent_refs_v0(leaf, ei0);
887
888 if (owner == (u64)-1) {
889 while (1) {
890 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
891 ret = btrfs_next_leaf(root, path);
892 if (ret < 0)
893 return ret;
894 BUG_ON(ret > 0);
895 leaf = path->nodes[0];
896 }
897 btrfs_item_key_to_cpu(leaf, &found_key,
898 path->slots[0]);
899 BUG_ON(key.objectid != found_key.objectid);
900 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
901 path->slots[0]++;
902 continue;
903 }
904 ref0 = btrfs_item_ptr(leaf, path->slots[0],
905 struct btrfs_extent_ref_v0);
906 owner = btrfs_ref_objectid_v0(leaf, ref0);
907 break;
908 }
909 }
910 btrfs_release_path(root, path);
911
912 if (owner < BTRFS_FIRST_FREE_OBJECTID)
913 new_size += sizeof(*bi);
914
915 new_size -= sizeof(*ei0);
916 ret = btrfs_search_slot(trans, root, &key, path,
917 new_size + extra_size, 1);
918 if (ret < 0)
919 return ret;
920 BUG_ON(ret);
921
922 ret = btrfs_extend_item(trans, root, path, new_size);
923 BUG_ON(ret);
924
925 leaf = path->nodes[0];
926 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
927 btrfs_set_extent_refs(leaf, item, refs);
928 /* FIXME: get real generation */
929 btrfs_set_extent_generation(leaf, item, 0);
930 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
931 btrfs_set_extent_flags(leaf, item,
932 BTRFS_EXTENT_FLAG_TREE_BLOCK |
933 BTRFS_BLOCK_FLAG_FULL_BACKREF);
934 bi = (struct btrfs_tree_block_info *)(item + 1);
935 /* FIXME: get first key of the block */
936 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
937 btrfs_set_tree_block_level(leaf, bi, (int)owner);
938 } else {
939 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
940 }
941 btrfs_mark_buffer_dirty(leaf);
942 return 0;
943 }
944 #endif
945
946 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
947 {
948 u32 high_crc = ~(u32)0;
949 u32 low_crc = ~(u32)0;
950 __le64 lenum;
951
952 lenum = cpu_to_le64(root_objectid);
953 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
954 lenum = cpu_to_le64(owner);
955 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
956 lenum = cpu_to_le64(offset);
957 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
958
959 return ((u64)high_crc << 31) ^ (u64)low_crc;
960 }
961
962 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
963 struct btrfs_extent_data_ref *ref)
964 {
965 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
966 btrfs_extent_data_ref_objectid(leaf, ref),
967 btrfs_extent_data_ref_offset(leaf, ref));
968 }
969
970 static int match_extent_data_ref(struct extent_buffer *leaf,
971 struct btrfs_extent_data_ref *ref,
972 u64 root_objectid, u64 owner, u64 offset)
973 {
974 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
975 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
976 btrfs_extent_data_ref_offset(leaf, ref) != offset)
977 return 0;
978 return 1;
979 }
980
981 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
982 struct btrfs_root *root,
983 struct btrfs_path *path,
984 u64 bytenr, u64 parent,
985 u64 root_objectid,
986 u64 owner, u64 offset)
987 {
988 struct btrfs_key key;
989 struct btrfs_extent_data_ref *ref;
990 struct extent_buffer *leaf;
991 u32 nritems;
992 int ret;
993 int recow;
994 int err = -ENOENT;
995
996 key.objectid = bytenr;
997 if (parent) {
998 key.type = BTRFS_SHARED_DATA_REF_KEY;
999 key.offset = parent;
1000 } else {
1001 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1002 key.offset = hash_extent_data_ref(root_objectid,
1003 owner, offset);
1004 }
1005 again:
1006 recow = 0;
1007 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1008 if (ret < 0) {
1009 err = ret;
1010 goto fail;
1011 }
1012
1013 if (parent) {
1014 if (!ret)
1015 return 0;
1016 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1017 key.type = BTRFS_EXTENT_REF_V0_KEY;
1018 btrfs_release_path(root, path);
1019 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1020 if (ret < 0) {
1021 err = ret;
1022 goto fail;
1023 }
1024 if (!ret)
1025 return 0;
1026 #endif
1027 goto fail;
1028 }
1029
1030 leaf = path->nodes[0];
1031 nritems = btrfs_header_nritems(leaf);
1032 while (1) {
1033 if (path->slots[0] >= nritems) {
1034 ret = btrfs_next_leaf(root, path);
1035 if (ret < 0)
1036 err = ret;
1037 if (ret)
1038 goto fail;
1039
1040 leaf = path->nodes[0];
1041 nritems = btrfs_header_nritems(leaf);
1042 recow = 1;
1043 }
1044
1045 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1046 if (key.objectid != bytenr ||
1047 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1048 goto fail;
1049
1050 ref = btrfs_item_ptr(leaf, path->slots[0],
1051 struct btrfs_extent_data_ref);
1052
1053 if (match_extent_data_ref(leaf, ref, root_objectid,
1054 owner, offset)) {
1055 if (recow) {
1056 btrfs_release_path(root, path);
1057 goto again;
1058 }
1059 err = 0;
1060 break;
1061 }
1062 path->slots[0]++;
1063 }
1064 fail:
1065 return err;
1066 }
1067
1068 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1069 struct btrfs_root *root,
1070 struct btrfs_path *path,
1071 u64 bytenr, u64 parent,
1072 u64 root_objectid, u64 owner,
1073 u64 offset, int refs_to_add)
1074 {
1075 struct btrfs_key key;
1076 struct extent_buffer *leaf;
1077 u32 size;
1078 u32 num_refs;
1079 int ret;
1080
1081 key.objectid = bytenr;
1082 if (parent) {
1083 key.type = BTRFS_SHARED_DATA_REF_KEY;
1084 key.offset = parent;
1085 size = sizeof(struct btrfs_shared_data_ref);
1086 } else {
1087 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1088 key.offset = hash_extent_data_ref(root_objectid,
1089 owner, offset);
1090 size = sizeof(struct btrfs_extent_data_ref);
1091 }
1092
1093 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1094 if (ret && ret != -EEXIST)
1095 goto fail;
1096
1097 leaf = path->nodes[0];
1098 if (parent) {
1099 struct btrfs_shared_data_ref *ref;
1100 ref = btrfs_item_ptr(leaf, path->slots[0],
1101 struct btrfs_shared_data_ref);
1102 if (ret == 0) {
1103 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1104 } else {
1105 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1106 num_refs += refs_to_add;
1107 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1108 }
1109 } else {
1110 struct btrfs_extent_data_ref *ref;
1111 while (ret == -EEXIST) {
1112 ref = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_extent_data_ref);
1114 if (match_extent_data_ref(leaf, ref, root_objectid,
1115 owner, offset))
1116 break;
1117 btrfs_release_path(root, path);
1118 key.offset++;
1119 ret = btrfs_insert_empty_item(trans, root, path, &key,
1120 size);
1121 if (ret && ret != -EEXIST)
1122 goto fail;
1123
1124 leaf = path->nodes[0];
1125 }
1126 ref = btrfs_item_ptr(leaf, path->slots[0],
1127 struct btrfs_extent_data_ref);
1128 if (ret == 0) {
1129 btrfs_set_extent_data_ref_root(leaf, ref,
1130 root_objectid);
1131 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1132 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1133 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1134 } else {
1135 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1136 num_refs += refs_to_add;
1137 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1138 }
1139 }
1140 btrfs_mark_buffer_dirty(leaf);
1141 ret = 0;
1142 fail:
1143 btrfs_release_path(root, path);
1144 return ret;
1145 }
1146
1147 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1148 struct btrfs_root *root,
1149 struct btrfs_path *path,
1150 int refs_to_drop)
1151 {
1152 struct btrfs_key key;
1153 struct btrfs_extent_data_ref *ref1 = NULL;
1154 struct btrfs_shared_data_ref *ref2 = NULL;
1155 struct extent_buffer *leaf;
1156 u32 num_refs = 0;
1157 int ret = 0;
1158
1159 leaf = path->nodes[0];
1160 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161
1162 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1163 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1164 struct btrfs_extent_data_ref);
1165 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1166 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1167 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1168 struct btrfs_shared_data_ref);
1169 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1170 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1171 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1172 struct btrfs_extent_ref_v0 *ref0;
1173 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_extent_ref_v0);
1175 num_refs = btrfs_ref_count_v0(leaf, ref0);
1176 #endif
1177 } else {
1178 BUG();
1179 }
1180
1181 BUG_ON(num_refs < refs_to_drop);
1182 num_refs -= refs_to_drop;
1183
1184 if (num_refs == 0) {
1185 ret = btrfs_del_item(trans, root, path);
1186 } else {
1187 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1188 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1189 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1190 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1191 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1192 else {
1193 struct btrfs_extent_ref_v0 *ref0;
1194 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1195 struct btrfs_extent_ref_v0);
1196 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1197 }
1198 #endif
1199 btrfs_mark_buffer_dirty(leaf);
1200 }
1201 return ret;
1202 }
1203
1204 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1205 struct btrfs_path *path,
1206 struct btrfs_extent_inline_ref *iref)
1207 {
1208 struct btrfs_key key;
1209 struct extent_buffer *leaf;
1210 struct btrfs_extent_data_ref *ref1;
1211 struct btrfs_shared_data_ref *ref2;
1212 u32 num_refs = 0;
1213
1214 leaf = path->nodes[0];
1215 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1216 if (iref) {
1217 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1218 BTRFS_EXTENT_DATA_REF_KEY) {
1219 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1220 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1221 } else {
1222 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1223 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1224 }
1225 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1226 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1227 struct btrfs_extent_data_ref);
1228 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1229 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1230 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1231 struct btrfs_shared_data_ref);
1232 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1233 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1235 struct btrfs_extent_ref_v0 *ref0;
1236 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1237 struct btrfs_extent_ref_v0);
1238 num_refs = btrfs_ref_count_v0(leaf, ref0);
1239 #endif
1240 } else {
1241 WARN_ON(1);
1242 }
1243 return num_refs;
1244 }
1245
1246 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1247 struct btrfs_root *root,
1248 struct btrfs_path *path,
1249 u64 bytenr, u64 parent,
1250 u64 root_objectid)
1251 {
1252 struct btrfs_key key;
1253 int ret;
1254
1255 key.objectid = bytenr;
1256 if (parent) {
1257 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1258 key.offset = parent;
1259 } else {
1260 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1261 key.offset = root_objectid;
1262 }
1263
1264 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1265 if (ret > 0)
1266 ret = -ENOENT;
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268 if (ret == -ENOENT && parent) {
1269 btrfs_release_path(root, path);
1270 key.type = BTRFS_EXTENT_REF_V0_KEY;
1271 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1272 if (ret > 0)
1273 ret = -ENOENT;
1274 }
1275 #endif
1276 return ret;
1277 }
1278
1279 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1280 struct btrfs_root *root,
1281 struct btrfs_path *path,
1282 u64 bytenr, u64 parent,
1283 u64 root_objectid)
1284 {
1285 struct btrfs_key key;
1286 int ret;
1287
1288 key.objectid = bytenr;
1289 if (parent) {
1290 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1291 key.offset = parent;
1292 } else {
1293 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1294 key.offset = root_objectid;
1295 }
1296
1297 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1298 btrfs_release_path(root, path);
1299 return ret;
1300 }
1301
1302 static inline int extent_ref_type(u64 parent, u64 owner)
1303 {
1304 int type;
1305 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1306 if (parent > 0)
1307 type = BTRFS_SHARED_BLOCK_REF_KEY;
1308 else
1309 type = BTRFS_TREE_BLOCK_REF_KEY;
1310 } else {
1311 if (parent > 0)
1312 type = BTRFS_SHARED_DATA_REF_KEY;
1313 else
1314 type = BTRFS_EXTENT_DATA_REF_KEY;
1315 }
1316 return type;
1317 }
1318
1319 static int find_next_key(struct btrfs_path *path, int level,
1320 struct btrfs_key *key)
1321
1322 {
1323 for (; level < BTRFS_MAX_LEVEL; level++) {
1324 if (!path->nodes[level])
1325 break;
1326 if (path->slots[level] + 1 >=
1327 btrfs_header_nritems(path->nodes[level]))
1328 continue;
1329 if (level == 0)
1330 btrfs_item_key_to_cpu(path->nodes[level], key,
1331 path->slots[level] + 1);
1332 else
1333 btrfs_node_key_to_cpu(path->nodes[level], key,
1334 path->slots[level] + 1);
1335 return 0;
1336 }
1337 return 1;
1338 }
1339
1340 /*
1341 * look for inline back ref. if back ref is found, *ref_ret is set
1342 * to the address of inline back ref, and 0 is returned.
1343 *
1344 * if back ref isn't found, *ref_ret is set to the address where it
1345 * should be inserted, and -ENOENT is returned.
1346 *
1347 * if insert is true and there are too many inline back refs, the path
1348 * points to the extent item, and -EAGAIN is returned.
1349 *
1350 * NOTE: inline back refs are ordered in the same way that back ref
1351 * items in the tree are ordered.
1352 */
1353 static noinline_for_stack
1354 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1355 struct btrfs_root *root,
1356 struct btrfs_path *path,
1357 struct btrfs_extent_inline_ref **ref_ret,
1358 u64 bytenr, u64 num_bytes,
1359 u64 parent, u64 root_objectid,
1360 u64 owner, u64 offset, int insert)
1361 {
1362 struct btrfs_key key;
1363 struct extent_buffer *leaf;
1364 struct btrfs_extent_item *ei;
1365 struct btrfs_extent_inline_ref *iref;
1366 u64 flags;
1367 u64 item_size;
1368 unsigned long ptr;
1369 unsigned long end;
1370 int extra_size;
1371 int type;
1372 int want;
1373 int ret;
1374 int err = 0;
1375
1376 key.objectid = bytenr;
1377 key.type = BTRFS_EXTENT_ITEM_KEY;
1378 key.offset = num_bytes;
1379
1380 want = extent_ref_type(parent, owner);
1381 if (insert) {
1382 extra_size = btrfs_extent_inline_ref_size(want);
1383 path->keep_locks = 1;
1384 } else
1385 extra_size = -1;
1386 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1387 if (ret < 0) {
1388 err = ret;
1389 goto out;
1390 }
1391 BUG_ON(ret);
1392
1393 leaf = path->nodes[0];
1394 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1395 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1396 if (item_size < sizeof(*ei)) {
1397 if (!insert) {
1398 err = -ENOENT;
1399 goto out;
1400 }
1401 ret = convert_extent_item_v0(trans, root, path, owner,
1402 extra_size);
1403 if (ret < 0) {
1404 err = ret;
1405 goto out;
1406 }
1407 leaf = path->nodes[0];
1408 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1409 }
1410 #endif
1411 BUG_ON(item_size < sizeof(*ei));
1412
1413 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1414 flags = btrfs_extent_flags(leaf, ei);
1415
1416 ptr = (unsigned long)(ei + 1);
1417 end = (unsigned long)ei + item_size;
1418
1419 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1420 ptr += sizeof(struct btrfs_tree_block_info);
1421 BUG_ON(ptr > end);
1422 } else {
1423 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1424 }
1425
1426 err = -ENOENT;
1427 while (1) {
1428 if (ptr >= end) {
1429 WARN_ON(ptr > end);
1430 break;
1431 }
1432 iref = (struct btrfs_extent_inline_ref *)ptr;
1433 type = btrfs_extent_inline_ref_type(leaf, iref);
1434 if (want < type)
1435 break;
1436 if (want > type) {
1437 ptr += btrfs_extent_inline_ref_size(type);
1438 continue;
1439 }
1440
1441 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1442 struct btrfs_extent_data_ref *dref;
1443 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1444 if (match_extent_data_ref(leaf, dref, root_objectid,
1445 owner, offset)) {
1446 err = 0;
1447 break;
1448 }
1449 if (hash_extent_data_ref_item(leaf, dref) <
1450 hash_extent_data_ref(root_objectid, owner, offset))
1451 break;
1452 } else {
1453 u64 ref_offset;
1454 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1455 if (parent > 0) {
1456 if (parent == ref_offset) {
1457 err = 0;
1458 break;
1459 }
1460 if (ref_offset < parent)
1461 break;
1462 } else {
1463 if (root_objectid == ref_offset) {
1464 err = 0;
1465 break;
1466 }
1467 if (ref_offset < root_objectid)
1468 break;
1469 }
1470 }
1471 ptr += btrfs_extent_inline_ref_size(type);
1472 }
1473 if (err == -ENOENT && insert) {
1474 if (item_size + extra_size >=
1475 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1476 err = -EAGAIN;
1477 goto out;
1478 }
1479 /*
1480 * To add new inline back ref, we have to make sure
1481 * there is no corresponding back ref item.
1482 * For simplicity, we just do not add new inline back
1483 * ref if there is any kind of item for this block
1484 */
1485 if (find_next_key(path, 0, &key) == 0 &&
1486 key.objectid == bytenr &&
1487 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1488 err = -EAGAIN;
1489 goto out;
1490 }
1491 }
1492 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1493 out:
1494 if (insert) {
1495 path->keep_locks = 0;
1496 btrfs_unlock_up_safe(path, 1);
1497 }
1498 return err;
1499 }
1500
1501 /*
1502 * helper to add new inline back ref
1503 */
1504 static noinline_for_stack
1505 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1506 struct btrfs_root *root,
1507 struct btrfs_path *path,
1508 struct btrfs_extent_inline_ref *iref,
1509 u64 parent, u64 root_objectid,
1510 u64 owner, u64 offset, int refs_to_add,
1511 struct btrfs_delayed_extent_op *extent_op)
1512 {
1513 struct extent_buffer *leaf;
1514 struct btrfs_extent_item *ei;
1515 unsigned long ptr;
1516 unsigned long end;
1517 unsigned long item_offset;
1518 u64 refs;
1519 int size;
1520 int type;
1521 int ret;
1522
1523 leaf = path->nodes[0];
1524 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1525 item_offset = (unsigned long)iref - (unsigned long)ei;
1526
1527 type = extent_ref_type(parent, owner);
1528 size = btrfs_extent_inline_ref_size(type);
1529
1530 ret = btrfs_extend_item(trans, root, path, size);
1531 BUG_ON(ret);
1532
1533 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1534 refs = btrfs_extent_refs(leaf, ei);
1535 refs += refs_to_add;
1536 btrfs_set_extent_refs(leaf, ei, refs);
1537 if (extent_op)
1538 __run_delayed_extent_op(extent_op, leaf, ei);
1539
1540 ptr = (unsigned long)ei + item_offset;
1541 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1542 if (ptr < end - size)
1543 memmove_extent_buffer(leaf, ptr + size, ptr,
1544 end - size - ptr);
1545
1546 iref = (struct btrfs_extent_inline_ref *)ptr;
1547 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1548 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1549 struct btrfs_extent_data_ref *dref;
1550 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1551 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1552 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1553 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1554 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1555 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1556 struct btrfs_shared_data_ref *sref;
1557 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1558 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1559 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1560 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1561 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1562 } else {
1563 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1564 }
1565 btrfs_mark_buffer_dirty(leaf);
1566 return 0;
1567 }
1568
1569 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1570 struct btrfs_root *root,
1571 struct btrfs_path *path,
1572 struct btrfs_extent_inline_ref **ref_ret,
1573 u64 bytenr, u64 num_bytes, u64 parent,
1574 u64 root_objectid, u64 owner, u64 offset)
1575 {
1576 int ret;
1577
1578 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1579 bytenr, num_bytes, parent,
1580 root_objectid, owner, offset, 0);
1581 if (ret != -ENOENT)
1582 return ret;
1583
1584 btrfs_release_path(root, path);
1585 *ref_ret = NULL;
1586
1587 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1588 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1589 root_objectid);
1590 } else {
1591 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1592 root_objectid, owner, offset);
1593 }
1594 return ret;
1595 }
1596
1597 /*
1598 * helper to update/remove inline back ref
1599 */
1600 static noinline_for_stack
1601 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1602 struct btrfs_root *root,
1603 struct btrfs_path *path,
1604 struct btrfs_extent_inline_ref *iref,
1605 int refs_to_mod,
1606 struct btrfs_delayed_extent_op *extent_op)
1607 {
1608 struct extent_buffer *leaf;
1609 struct btrfs_extent_item *ei;
1610 struct btrfs_extent_data_ref *dref = NULL;
1611 struct btrfs_shared_data_ref *sref = NULL;
1612 unsigned long ptr;
1613 unsigned long end;
1614 u32 item_size;
1615 int size;
1616 int type;
1617 int ret;
1618 u64 refs;
1619
1620 leaf = path->nodes[0];
1621 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622 refs = btrfs_extent_refs(leaf, ei);
1623 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1624 refs += refs_to_mod;
1625 btrfs_set_extent_refs(leaf, ei, refs);
1626 if (extent_op)
1627 __run_delayed_extent_op(extent_op, leaf, ei);
1628
1629 type = btrfs_extent_inline_ref_type(leaf, iref);
1630
1631 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1632 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1633 refs = btrfs_extent_data_ref_count(leaf, dref);
1634 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1635 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1636 refs = btrfs_shared_data_ref_count(leaf, sref);
1637 } else {
1638 refs = 1;
1639 BUG_ON(refs_to_mod != -1);
1640 }
1641
1642 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1643 refs += refs_to_mod;
1644
1645 if (refs > 0) {
1646 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1647 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1648 else
1649 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1650 } else {
1651 size = btrfs_extent_inline_ref_size(type);
1652 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1653 ptr = (unsigned long)iref;
1654 end = (unsigned long)ei + item_size;
1655 if (ptr + size < end)
1656 memmove_extent_buffer(leaf, ptr, ptr + size,
1657 end - ptr - size);
1658 item_size -= size;
1659 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1660 BUG_ON(ret);
1661 }
1662 btrfs_mark_buffer_dirty(leaf);
1663 return 0;
1664 }
1665
1666 static noinline_for_stack
1667 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1668 struct btrfs_root *root,
1669 struct btrfs_path *path,
1670 u64 bytenr, u64 num_bytes, u64 parent,
1671 u64 root_objectid, u64 owner,
1672 u64 offset, int refs_to_add,
1673 struct btrfs_delayed_extent_op *extent_op)
1674 {
1675 struct btrfs_extent_inline_ref *iref;
1676 int ret;
1677
1678 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1679 bytenr, num_bytes, parent,
1680 root_objectid, owner, offset, 1);
1681 if (ret == 0) {
1682 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1683 ret = update_inline_extent_backref(trans, root, path, iref,
1684 refs_to_add, extent_op);
1685 } else if (ret == -ENOENT) {
1686 ret = setup_inline_extent_backref(trans, root, path, iref,
1687 parent, root_objectid,
1688 owner, offset, refs_to_add,
1689 extent_op);
1690 }
1691 return ret;
1692 }
1693
1694 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1695 struct btrfs_root *root,
1696 struct btrfs_path *path,
1697 u64 bytenr, u64 parent, u64 root_objectid,
1698 u64 owner, u64 offset, int refs_to_add)
1699 {
1700 int ret;
1701 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1702 BUG_ON(refs_to_add != 1);
1703 ret = insert_tree_block_ref(trans, root, path, bytenr,
1704 parent, root_objectid);
1705 } else {
1706 ret = insert_extent_data_ref(trans, root, path, bytenr,
1707 parent, root_objectid,
1708 owner, offset, refs_to_add);
1709 }
1710 return ret;
1711 }
1712
1713 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1714 struct btrfs_root *root,
1715 struct btrfs_path *path,
1716 struct btrfs_extent_inline_ref *iref,
1717 int refs_to_drop, int is_data)
1718 {
1719 int ret;
1720
1721 BUG_ON(!is_data && refs_to_drop != 1);
1722 if (iref) {
1723 ret = update_inline_extent_backref(trans, root, path, iref,
1724 -refs_to_drop, NULL);
1725 } else if (is_data) {
1726 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1727 } else {
1728 ret = btrfs_del_item(trans, root, path);
1729 }
1730 return ret;
1731 }
1732
1733 static void btrfs_issue_discard(struct block_device *bdev,
1734 u64 start, u64 len)
1735 {
1736 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1737 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1738 }
1739
1740 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1741 u64 num_bytes)
1742 {
1743 int ret;
1744 u64 map_length = num_bytes;
1745 struct btrfs_multi_bio *multi = NULL;
1746
1747 if (!btrfs_test_opt(root, DISCARD))
1748 return 0;
1749
1750 /* Tell the block device(s) that the sectors can be discarded */
1751 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1752 bytenr, &map_length, &multi, 0);
1753 if (!ret) {
1754 struct btrfs_bio_stripe *stripe = multi->stripes;
1755 int i;
1756
1757 if (map_length > num_bytes)
1758 map_length = num_bytes;
1759
1760 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1761 btrfs_issue_discard(stripe->dev->bdev,
1762 stripe->physical,
1763 map_length);
1764 }
1765 kfree(multi);
1766 }
1767
1768 return ret;
1769 }
1770
1771 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1772 struct btrfs_root *root,
1773 u64 bytenr, u64 num_bytes, u64 parent,
1774 u64 root_objectid, u64 owner, u64 offset)
1775 {
1776 int ret;
1777 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1778 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1779
1780 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1781 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1782 parent, root_objectid, (int)owner,
1783 BTRFS_ADD_DELAYED_REF, NULL);
1784 } else {
1785 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1786 parent, root_objectid, owner, offset,
1787 BTRFS_ADD_DELAYED_REF, NULL);
1788 }
1789 return ret;
1790 }
1791
1792 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1793 struct btrfs_root *root,
1794 u64 bytenr, u64 num_bytes,
1795 u64 parent, u64 root_objectid,
1796 u64 owner, u64 offset, int refs_to_add,
1797 struct btrfs_delayed_extent_op *extent_op)
1798 {
1799 struct btrfs_path *path;
1800 struct extent_buffer *leaf;
1801 struct btrfs_extent_item *item;
1802 u64 refs;
1803 int ret;
1804 int err = 0;
1805
1806 path = btrfs_alloc_path();
1807 if (!path)
1808 return -ENOMEM;
1809
1810 path->reada = 1;
1811 path->leave_spinning = 1;
1812 /* this will setup the path even if it fails to insert the back ref */
1813 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1814 path, bytenr, num_bytes, parent,
1815 root_objectid, owner, offset,
1816 refs_to_add, extent_op);
1817 if (ret == 0)
1818 goto out;
1819
1820 if (ret != -EAGAIN) {
1821 err = ret;
1822 goto out;
1823 }
1824
1825 leaf = path->nodes[0];
1826 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1827 refs = btrfs_extent_refs(leaf, item);
1828 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1829 if (extent_op)
1830 __run_delayed_extent_op(extent_op, leaf, item);
1831
1832 btrfs_mark_buffer_dirty(leaf);
1833 btrfs_release_path(root->fs_info->extent_root, path);
1834
1835 path->reada = 1;
1836 path->leave_spinning = 1;
1837
1838 /* now insert the actual backref */
1839 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1840 path, bytenr, parent, root_objectid,
1841 owner, offset, refs_to_add);
1842 BUG_ON(ret);
1843 out:
1844 btrfs_free_path(path);
1845 return err;
1846 }
1847
1848 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1849 struct btrfs_root *root,
1850 struct btrfs_delayed_ref_node *node,
1851 struct btrfs_delayed_extent_op *extent_op,
1852 int insert_reserved)
1853 {
1854 int ret = 0;
1855 struct btrfs_delayed_data_ref *ref;
1856 struct btrfs_key ins;
1857 u64 parent = 0;
1858 u64 ref_root = 0;
1859 u64 flags = 0;
1860
1861 ins.objectid = node->bytenr;
1862 ins.offset = node->num_bytes;
1863 ins.type = BTRFS_EXTENT_ITEM_KEY;
1864
1865 ref = btrfs_delayed_node_to_data_ref(node);
1866 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1867 parent = ref->parent;
1868 else
1869 ref_root = ref->root;
1870
1871 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1872 if (extent_op) {
1873 BUG_ON(extent_op->update_key);
1874 flags |= extent_op->flags_to_set;
1875 }
1876 ret = alloc_reserved_file_extent(trans, root,
1877 parent, ref_root, flags,
1878 ref->objectid, ref->offset,
1879 &ins, node->ref_mod);
1880 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1881 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1882 node->num_bytes, parent,
1883 ref_root, ref->objectid,
1884 ref->offset, node->ref_mod,
1885 extent_op);
1886 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1887 ret = __btrfs_free_extent(trans, root, node->bytenr,
1888 node->num_bytes, parent,
1889 ref_root, ref->objectid,
1890 ref->offset, node->ref_mod,
1891 extent_op);
1892 } else {
1893 BUG();
1894 }
1895 return ret;
1896 }
1897
1898 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1899 struct extent_buffer *leaf,
1900 struct btrfs_extent_item *ei)
1901 {
1902 u64 flags = btrfs_extent_flags(leaf, ei);
1903 if (extent_op->update_flags) {
1904 flags |= extent_op->flags_to_set;
1905 btrfs_set_extent_flags(leaf, ei, flags);
1906 }
1907
1908 if (extent_op->update_key) {
1909 struct btrfs_tree_block_info *bi;
1910 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1911 bi = (struct btrfs_tree_block_info *)(ei + 1);
1912 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1913 }
1914 }
1915
1916 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1917 struct btrfs_root *root,
1918 struct btrfs_delayed_ref_node *node,
1919 struct btrfs_delayed_extent_op *extent_op)
1920 {
1921 struct btrfs_key key;
1922 struct btrfs_path *path;
1923 struct btrfs_extent_item *ei;
1924 struct extent_buffer *leaf;
1925 u32 item_size;
1926 int ret;
1927 int err = 0;
1928
1929 path = btrfs_alloc_path();
1930 if (!path)
1931 return -ENOMEM;
1932
1933 key.objectid = node->bytenr;
1934 key.type = BTRFS_EXTENT_ITEM_KEY;
1935 key.offset = node->num_bytes;
1936
1937 path->reada = 1;
1938 path->leave_spinning = 1;
1939 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1940 path, 0, 1);
1941 if (ret < 0) {
1942 err = ret;
1943 goto out;
1944 }
1945 if (ret > 0) {
1946 err = -EIO;
1947 goto out;
1948 }
1949
1950 leaf = path->nodes[0];
1951 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1952 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1953 if (item_size < sizeof(*ei)) {
1954 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1955 path, (u64)-1, 0);
1956 if (ret < 0) {
1957 err = ret;
1958 goto out;
1959 }
1960 leaf = path->nodes[0];
1961 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1962 }
1963 #endif
1964 BUG_ON(item_size < sizeof(*ei));
1965 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1966 __run_delayed_extent_op(extent_op, leaf, ei);
1967
1968 btrfs_mark_buffer_dirty(leaf);
1969 out:
1970 btrfs_free_path(path);
1971 return err;
1972 }
1973
1974 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1975 struct btrfs_root *root,
1976 struct btrfs_delayed_ref_node *node,
1977 struct btrfs_delayed_extent_op *extent_op,
1978 int insert_reserved)
1979 {
1980 int ret = 0;
1981 struct btrfs_delayed_tree_ref *ref;
1982 struct btrfs_key ins;
1983 u64 parent = 0;
1984 u64 ref_root = 0;
1985
1986 ins.objectid = node->bytenr;
1987 ins.offset = node->num_bytes;
1988 ins.type = BTRFS_EXTENT_ITEM_KEY;
1989
1990 ref = btrfs_delayed_node_to_tree_ref(node);
1991 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1992 parent = ref->parent;
1993 else
1994 ref_root = ref->root;
1995
1996 BUG_ON(node->ref_mod != 1);
1997 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1998 BUG_ON(!extent_op || !extent_op->update_flags ||
1999 !extent_op->update_key);
2000 ret = alloc_reserved_tree_block(trans, root,
2001 parent, ref_root,
2002 extent_op->flags_to_set,
2003 &extent_op->key,
2004 ref->level, &ins);
2005 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2006 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2007 node->num_bytes, parent, ref_root,
2008 ref->level, 0, 1, extent_op);
2009 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2010 ret = __btrfs_free_extent(trans, root, node->bytenr,
2011 node->num_bytes, parent, ref_root,
2012 ref->level, 0, 1, extent_op);
2013 } else {
2014 BUG();
2015 }
2016 return ret;
2017 }
2018
2019 /* helper function to actually process a single delayed ref entry */
2020 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2021 struct btrfs_root *root,
2022 struct btrfs_delayed_ref_node *node,
2023 struct btrfs_delayed_extent_op *extent_op,
2024 int insert_reserved)
2025 {
2026 int ret;
2027 if (btrfs_delayed_ref_is_head(node)) {
2028 struct btrfs_delayed_ref_head *head;
2029 /*
2030 * we've hit the end of the chain and we were supposed
2031 * to insert this extent into the tree. But, it got
2032 * deleted before we ever needed to insert it, so all
2033 * we have to do is clean up the accounting
2034 */
2035 BUG_ON(extent_op);
2036 head = btrfs_delayed_node_to_head(node);
2037 if (insert_reserved) {
2038 btrfs_pin_extent(root, node->bytenr,
2039 node->num_bytes, 1);
2040 if (head->is_data) {
2041 ret = btrfs_del_csums(trans, root,
2042 node->bytenr,
2043 node->num_bytes);
2044 BUG_ON(ret);
2045 }
2046 }
2047 mutex_unlock(&head->mutex);
2048 return 0;
2049 }
2050
2051 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2052 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2053 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2054 insert_reserved);
2055 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2056 node->type == BTRFS_SHARED_DATA_REF_KEY)
2057 ret = run_delayed_data_ref(trans, root, node, extent_op,
2058 insert_reserved);
2059 else
2060 BUG();
2061 return ret;
2062 }
2063
2064 static noinline struct btrfs_delayed_ref_node *
2065 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2066 {
2067 struct rb_node *node;
2068 struct btrfs_delayed_ref_node *ref;
2069 int action = BTRFS_ADD_DELAYED_REF;
2070 again:
2071 /*
2072 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2073 * this prevents ref count from going down to zero when
2074 * there still are pending delayed ref.
2075 */
2076 node = rb_prev(&head->node.rb_node);
2077 while (1) {
2078 if (!node)
2079 break;
2080 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2081 rb_node);
2082 if (ref->bytenr != head->node.bytenr)
2083 break;
2084 if (ref->action == action)
2085 return ref;
2086 node = rb_prev(node);
2087 }
2088 if (action == BTRFS_ADD_DELAYED_REF) {
2089 action = BTRFS_DROP_DELAYED_REF;
2090 goto again;
2091 }
2092 return NULL;
2093 }
2094
2095 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2096 struct btrfs_root *root,
2097 struct list_head *cluster)
2098 {
2099 struct btrfs_delayed_ref_root *delayed_refs;
2100 struct btrfs_delayed_ref_node *ref;
2101 struct btrfs_delayed_ref_head *locked_ref = NULL;
2102 struct btrfs_delayed_extent_op *extent_op;
2103 int ret;
2104 int count = 0;
2105 int must_insert_reserved = 0;
2106
2107 delayed_refs = &trans->transaction->delayed_refs;
2108 while (1) {
2109 if (!locked_ref) {
2110 /* pick a new head ref from the cluster list */
2111 if (list_empty(cluster))
2112 break;
2113
2114 locked_ref = list_entry(cluster->next,
2115 struct btrfs_delayed_ref_head, cluster);
2116
2117 /* grab the lock that says we are going to process
2118 * all the refs for this head */
2119 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2120
2121 /*
2122 * we may have dropped the spin lock to get the head
2123 * mutex lock, and that might have given someone else
2124 * time to free the head. If that's true, it has been
2125 * removed from our list and we can move on.
2126 */
2127 if (ret == -EAGAIN) {
2128 locked_ref = NULL;
2129 count++;
2130 continue;
2131 }
2132 }
2133
2134 /*
2135 * record the must insert reserved flag before we
2136 * drop the spin lock.
2137 */
2138 must_insert_reserved = locked_ref->must_insert_reserved;
2139 locked_ref->must_insert_reserved = 0;
2140
2141 extent_op = locked_ref->extent_op;
2142 locked_ref->extent_op = NULL;
2143
2144 /*
2145 * locked_ref is the head node, so we have to go one
2146 * node back for any delayed ref updates
2147 */
2148 ref = select_delayed_ref(locked_ref);
2149 if (!ref) {
2150 /* All delayed refs have been processed, Go ahead
2151 * and send the head node to run_one_delayed_ref,
2152 * so that any accounting fixes can happen
2153 */
2154 ref = &locked_ref->node;
2155
2156 if (extent_op && must_insert_reserved) {
2157 kfree(extent_op);
2158 extent_op = NULL;
2159 }
2160
2161 if (extent_op) {
2162 spin_unlock(&delayed_refs->lock);
2163
2164 ret = run_delayed_extent_op(trans, root,
2165 ref, extent_op);
2166 BUG_ON(ret);
2167 kfree(extent_op);
2168
2169 cond_resched();
2170 spin_lock(&delayed_refs->lock);
2171 continue;
2172 }
2173
2174 list_del_init(&locked_ref->cluster);
2175 locked_ref = NULL;
2176 }
2177
2178 ref->in_tree = 0;
2179 rb_erase(&ref->rb_node, &delayed_refs->root);
2180 delayed_refs->num_entries--;
2181
2182 spin_unlock(&delayed_refs->lock);
2183
2184 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2185 must_insert_reserved);
2186 BUG_ON(ret);
2187
2188 btrfs_put_delayed_ref(ref);
2189 kfree(extent_op);
2190 count++;
2191
2192 cond_resched();
2193 spin_lock(&delayed_refs->lock);
2194 }
2195 return count;
2196 }
2197
2198 /*
2199 * this starts processing the delayed reference count updates and
2200 * extent insertions we have queued up so far. count can be
2201 * 0, which means to process everything in the tree at the start
2202 * of the run (but not newly added entries), or it can be some target
2203 * number you'd like to process.
2204 */
2205 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2206 struct btrfs_root *root, unsigned long count)
2207 {
2208 struct rb_node *node;
2209 struct btrfs_delayed_ref_root *delayed_refs;
2210 struct btrfs_delayed_ref_node *ref;
2211 struct list_head cluster;
2212 int ret;
2213 int run_all = count == (unsigned long)-1;
2214 int run_most = 0;
2215
2216 if (root == root->fs_info->extent_root)
2217 root = root->fs_info->tree_root;
2218
2219 delayed_refs = &trans->transaction->delayed_refs;
2220 INIT_LIST_HEAD(&cluster);
2221 again:
2222 spin_lock(&delayed_refs->lock);
2223 if (count == 0) {
2224 count = delayed_refs->num_entries * 2;
2225 run_most = 1;
2226 }
2227 while (1) {
2228 if (!(run_all || run_most) &&
2229 delayed_refs->num_heads_ready < 64)
2230 break;
2231
2232 /*
2233 * go find something we can process in the rbtree. We start at
2234 * the beginning of the tree, and then build a cluster
2235 * of refs to process starting at the first one we are able to
2236 * lock
2237 */
2238 ret = btrfs_find_ref_cluster(trans, &cluster,
2239 delayed_refs->run_delayed_start);
2240 if (ret)
2241 break;
2242
2243 ret = run_clustered_refs(trans, root, &cluster);
2244 BUG_ON(ret < 0);
2245
2246 count -= min_t(unsigned long, ret, count);
2247
2248 if (count == 0)
2249 break;
2250 }
2251
2252 if (run_all) {
2253 node = rb_first(&delayed_refs->root);
2254 if (!node)
2255 goto out;
2256 count = (unsigned long)-1;
2257
2258 while (node) {
2259 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2260 rb_node);
2261 if (btrfs_delayed_ref_is_head(ref)) {
2262 struct btrfs_delayed_ref_head *head;
2263
2264 head = btrfs_delayed_node_to_head(ref);
2265 atomic_inc(&ref->refs);
2266
2267 spin_unlock(&delayed_refs->lock);
2268 mutex_lock(&head->mutex);
2269 mutex_unlock(&head->mutex);
2270
2271 btrfs_put_delayed_ref(ref);
2272 cond_resched();
2273 goto again;
2274 }
2275 node = rb_next(node);
2276 }
2277 spin_unlock(&delayed_refs->lock);
2278 schedule_timeout(1);
2279 goto again;
2280 }
2281 out:
2282 spin_unlock(&delayed_refs->lock);
2283 return 0;
2284 }
2285
2286 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2287 struct btrfs_root *root,
2288 u64 bytenr, u64 num_bytes, u64 flags,
2289 int is_data)
2290 {
2291 struct btrfs_delayed_extent_op *extent_op;
2292 int ret;
2293
2294 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2295 if (!extent_op)
2296 return -ENOMEM;
2297
2298 extent_op->flags_to_set = flags;
2299 extent_op->update_flags = 1;
2300 extent_op->update_key = 0;
2301 extent_op->is_data = is_data ? 1 : 0;
2302
2303 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2304 if (ret)
2305 kfree(extent_op);
2306 return ret;
2307 }
2308
2309 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2310 struct btrfs_root *root,
2311 struct btrfs_path *path,
2312 u64 objectid, u64 offset, u64 bytenr)
2313 {
2314 struct btrfs_delayed_ref_head *head;
2315 struct btrfs_delayed_ref_node *ref;
2316 struct btrfs_delayed_data_ref *data_ref;
2317 struct btrfs_delayed_ref_root *delayed_refs;
2318 struct rb_node *node;
2319 int ret = 0;
2320
2321 ret = -ENOENT;
2322 delayed_refs = &trans->transaction->delayed_refs;
2323 spin_lock(&delayed_refs->lock);
2324 head = btrfs_find_delayed_ref_head(trans, bytenr);
2325 if (!head)
2326 goto out;
2327
2328 if (!mutex_trylock(&head->mutex)) {
2329 atomic_inc(&head->node.refs);
2330 spin_unlock(&delayed_refs->lock);
2331
2332 btrfs_release_path(root->fs_info->extent_root, path);
2333
2334 mutex_lock(&head->mutex);
2335 mutex_unlock(&head->mutex);
2336 btrfs_put_delayed_ref(&head->node);
2337 return -EAGAIN;
2338 }
2339
2340 node = rb_prev(&head->node.rb_node);
2341 if (!node)
2342 goto out_unlock;
2343
2344 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2345
2346 if (ref->bytenr != bytenr)
2347 goto out_unlock;
2348
2349 ret = 1;
2350 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2351 goto out_unlock;
2352
2353 data_ref = btrfs_delayed_node_to_data_ref(ref);
2354
2355 node = rb_prev(node);
2356 if (node) {
2357 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2358 if (ref->bytenr == bytenr)
2359 goto out_unlock;
2360 }
2361
2362 if (data_ref->root != root->root_key.objectid ||
2363 data_ref->objectid != objectid || data_ref->offset != offset)
2364 goto out_unlock;
2365
2366 ret = 0;
2367 out_unlock:
2368 mutex_unlock(&head->mutex);
2369 out:
2370 spin_unlock(&delayed_refs->lock);
2371 return ret;
2372 }
2373
2374 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2375 struct btrfs_root *root,
2376 struct btrfs_path *path,
2377 u64 objectid, u64 offset, u64 bytenr)
2378 {
2379 struct btrfs_root *extent_root = root->fs_info->extent_root;
2380 struct extent_buffer *leaf;
2381 struct btrfs_extent_data_ref *ref;
2382 struct btrfs_extent_inline_ref *iref;
2383 struct btrfs_extent_item *ei;
2384 struct btrfs_key key;
2385 u32 item_size;
2386 int ret;
2387
2388 key.objectid = bytenr;
2389 key.offset = (u64)-1;
2390 key.type = BTRFS_EXTENT_ITEM_KEY;
2391
2392 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2393 if (ret < 0)
2394 goto out;
2395 BUG_ON(ret == 0);
2396
2397 ret = -ENOENT;
2398 if (path->slots[0] == 0)
2399 goto out;
2400
2401 path->slots[0]--;
2402 leaf = path->nodes[0];
2403 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2404
2405 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2406 goto out;
2407
2408 ret = 1;
2409 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2411 if (item_size < sizeof(*ei)) {
2412 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2413 goto out;
2414 }
2415 #endif
2416 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2417
2418 if (item_size != sizeof(*ei) +
2419 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2420 goto out;
2421
2422 if (btrfs_extent_generation(leaf, ei) <=
2423 btrfs_root_last_snapshot(&root->root_item))
2424 goto out;
2425
2426 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2427 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2428 BTRFS_EXTENT_DATA_REF_KEY)
2429 goto out;
2430
2431 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2432 if (btrfs_extent_refs(leaf, ei) !=
2433 btrfs_extent_data_ref_count(leaf, ref) ||
2434 btrfs_extent_data_ref_root(leaf, ref) !=
2435 root->root_key.objectid ||
2436 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2437 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2438 goto out;
2439
2440 ret = 0;
2441 out:
2442 return ret;
2443 }
2444
2445 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2446 struct btrfs_root *root,
2447 u64 objectid, u64 offset, u64 bytenr)
2448 {
2449 struct btrfs_path *path;
2450 int ret;
2451 int ret2;
2452
2453 path = btrfs_alloc_path();
2454 if (!path)
2455 return -ENOENT;
2456
2457 do {
2458 ret = check_committed_ref(trans, root, path, objectid,
2459 offset, bytenr);
2460 if (ret && ret != -ENOENT)
2461 goto out;
2462
2463 ret2 = check_delayed_ref(trans, root, path, objectid,
2464 offset, bytenr);
2465 } while (ret2 == -EAGAIN);
2466
2467 if (ret2 && ret2 != -ENOENT) {
2468 ret = ret2;
2469 goto out;
2470 }
2471
2472 if (ret != -ENOENT || ret2 != -ENOENT)
2473 ret = 0;
2474 out:
2475 btrfs_free_path(path);
2476 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2477 WARN_ON(ret > 0);
2478 return ret;
2479 }
2480
2481 #if 0
2482 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2483 struct extent_buffer *buf, u32 nr_extents)
2484 {
2485 struct btrfs_key key;
2486 struct btrfs_file_extent_item *fi;
2487 u64 root_gen;
2488 u32 nritems;
2489 int i;
2490 int level;
2491 int ret = 0;
2492 int shared = 0;
2493
2494 if (!root->ref_cows)
2495 return 0;
2496
2497 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2498 shared = 0;
2499 root_gen = root->root_key.offset;
2500 } else {
2501 shared = 1;
2502 root_gen = trans->transid - 1;
2503 }
2504
2505 level = btrfs_header_level(buf);
2506 nritems = btrfs_header_nritems(buf);
2507
2508 if (level == 0) {
2509 struct btrfs_leaf_ref *ref;
2510 struct btrfs_extent_info *info;
2511
2512 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2513 if (!ref) {
2514 ret = -ENOMEM;
2515 goto out;
2516 }
2517
2518 ref->root_gen = root_gen;
2519 ref->bytenr = buf->start;
2520 ref->owner = btrfs_header_owner(buf);
2521 ref->generation = btrfs_header_generation(buf);
2522 ref->nritems = nr_extents;
2523 info = ref->extents;
2524
2525 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2526 u64 disk_bytenr;
2527 btrfs_item_key_to_cpu(buf, &key, i);
2528 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2529 continue;
2530 fi = btrfs_item_ptr(buf, i,
2531 struct btrfs_file_extent_item);
2532 if (btrfs_file_extent_type(buf, fi) ==
2533 BTRFS_FILE_EXTENT_INLINE)
2534 continue;
2535 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2536 if (disk_bytenr == 0)
2537 continue;
2538
2539 info->bytenr = disk_bytenr;
2540 info->num_bytes =
2541 btrfs_file_extent_disk_num_bytes(buf, fi);
2542 info->objectid = key.objectid;
2543 info->offset = key.offset;
2544 info++;
2545 }
2546
2547 ret = btrfs_add_leaf_ref(root, ref, shared);
2548 if (ret == -EEXIST && shared) {
2549 struct btrfs_leaf_ref *old;
2550 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2551 BUG_ON(!old);
2552 btrfs_remove_leaf_ref(root, old);
2553 btrfs_free_leaf_ref(root, old);
2554 ret = btrfs_add_leaf_ref(root, ref, shared);
2555 }
2556 WARN_ON(ret);
2557 btrfs_free_leaf_ref(root, ref);
2558 }
2559 out:
2560 return ret;
2561 }
2562
2563 /* when a block goes through cow, we update the reference counts of
2564 * everything that block points to. The internal pointers of the block
2565 * can be in just about any order, and it is likely to have clusters of
2566 * things that are close together and clusters of things that are not.
2567 *
2568 * To help reduce the seeks that come with updating all of these reference
2569 * counts, sort them by byte number before actual updates are done.
2570 *
2571 * struct refsort is used to match byte number to slot in the btree block.
2572 * we sort based on the byte number and then use the slot to actually
2573 * find the item.
2574 *
2575 * struct refsort is smaller than strcut btrfs_item and smaller than
2576 * struct btrfs_key_ptr. Since we're currently limited to the page size
2577 * for a btree block, there's no way for a kmalloc of refsorts for a
2578 * single node to be bigger than a page.
2579 */
2580 struct refsort {
2581 u64 bytenr;
2582 u32 slot;
2583 };
2584
2585 /*
2586 * for passing into sort()
2587 */
2588 static int refsort_cmp(const void *a_void, const void *b_void)
2589 {
2590 const struct refsort *a = a_void;
2591 const struct refsort *b = b_void;
2592
2593 if (a->bytenr < b->bytenr)
2594 return -1;
2595 if (a->bytenr > b->bytenr)
2596 return 1;
2597 return 0;
2598 }
2599 #endif
2600
2601 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2602 struct btrfs_root *root,
2603 struct extent_buffer *buf,
2604 int full_backref, int inc)
2605 {
2606 u64 bytenr;
2607 u64 num_bytes;
2608 u64 parent;
2609 u64 ref_root;
2610 u32 nritems;
2611 struct btrfs_key key;
2612 struct btrfs_file_extent_item *fi;
2613 int i;
2614 int level;
2615 int ret = 0;
2616 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2617 u64, u64, u64, u64, u64, u64);
2618
2619 ref_root = btrfs_header_owner(buf);
2620 nritems = btrfs_header_nritems(buf);
2621 level = btrfs_header_level(buf);
2622
2623 if (!root->ref_cows && level == 0)
2624 return 0;
2625
2626 if (inc)
2627 process_func = btrfs_inc_extent_ref;
2628 else
2629 process_func = btrfs_free_extent;
2630
2631 if (full_backref)
2632 parent = buf->start;
2633 else
2634 parent = 0;
2635
2636 for (i = 0; i < nritems; i++) {
2637 if (level == 0) {
2638 btrfs_item_key_to_cpu(buf, &key, i);
2639 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2640 continue;
2641 fi = btrfs_item_ptr(buf, i,
2642 struct btrfs_file_extent_item);
2643 if (btrfs_file_extent_type(buf, fi) ==
2644 BTRFS_FILE_EXTENT_INLINE)
2645 continue;
2646 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2647 if (bytenr == 0)
2648 continue;
2649
2650 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2651 key.offset -= btrfs_file_extent_offset(buf, fi);
2652 ret = process_func(trans, root, bytenr, num_bytes,
2653 parent, ref_root, key.objectid,
2654 key.offset);
2655 if (ret)
2656 goto fail;
2657 } else {
2658 bytenr = btrfs_node_blockptr(buf, i);
2659 num_bytes = btrfs_level_size(root, level - 1);
2660 ret = process_func(trans, root, bytenr, num_bytes,
2661 parent, ref_root, level - 1, 0);
2662 if (ret)
2663 goto fail;
2664 }
2665 }
2666 return 0;
2667 fail:
2668 BUG();
2669 return ret;
2670 }
2671
2672 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2673 struct extent_buffer *buf, int full_backref)
2674 {
2675 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2676 }
2677
2678 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2679 struct extent_buffer *buf, int full_backref)
2680 {
2681 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2682 }
2683
2684 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2685 struct btrfs_root *root,
2686 struct btrfs_path *path,
2687 struct btrfs_block_group_cache *cache)
2688 {
2689 int ret;
2690 struct btrfs_root *extent_root = root->fs_info->extent_root;
2691 unsigned long bi;
2692 struct extent_buffer *leaf;
2693
2694 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2695 if (ret < 0)
2696 goto fail;
2697 BUG_ON(ret);
2698
2699 leaf = path->nodes[0];
2700 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2701 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2702 btrfs_mark_buffer_dirty(leaf);
2703 btrfs_release_path(extent_root, path);
2704 fail:
2705 if (ret)
2706 return ret;
2707 return 0;
2708
2709 }
2710
2711 static struct btrfs_block_group_cache *
2712 next_block_group(struct btrfs_root *root,
2713 struct btrfs_block_group_cache *cache)
2714 {
2715 struct rb_node *node;
2716 spin_lock(&root->fs_info->block_group_cache_lock);
2717 node = rb_next(&cache->cache_node);
2718 btrfs_put_block_group(cache);
2719 if (node) {
2720 cache = rb_entry(node, struct btrfs_block_group_cache,
2721 cache_node);
2722 btrfs_get_block_group(cache);
2723 } else
2724 cache = NULL;
2725 spin_unlock(&root->fs_info->block_group_cache_lock);
2726 return cache;
2727 }
2728
2729 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2730 struct btrfs_trans_handle *trans,
2731 struct btrfs_path *path)
2732 {
2733 struct btrfs_root *root = block_group->fs_info->tree_root;
2734 struct inode *inode = NULL;
2735 u64 alloc_hint = 0;
2736 int num_pages = 0;
2737 int retries = 0;
2738 int ret = 0;
2739
2740 /*
2741 * If this block group is smaller than 100 megs don't bother caching the
2742 * block group.
2743 */
2744 if (block_group->key.offset < (100 * 1024 * 1024)) {
2745 spin_lock(&block_group->lock);
2746 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2747 spin_unlock(&block_group->lock);
2748 return 0;
2749 }
2750
2751 again:
2752 inode = lookup_free_space_inode(root, block_group, path);
2753 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2754 ret = PTR_ERR(inode);
2755 btrfs_release_path(root, path);
2756 goto out;
2757 }
2758
2759 if (IS_ERR(inode)) {
2760 BUG_ON(retries);
2761 retries++;
2762
2763 if (block_group->ro)
2764 goto out_free;
2765
2766 ret = create_free_space_inode(root, trans, block_group, path);
2767 if (ret)
2768 goto out_free;
2769 goto again;
2770 }
2771
2772 /*
2773 * We want to set the generation to 0, that way if anything goes wrong
2774 * from here on out we know not to trust this cache when we load up next
2775 * time.
2776 */
2777 BTRFS_I(inode)->generation = 0;
2778 ret = btrfs_update_inode(trans, root, inode);
2779 WARN_ON(ret);
2780
2781 if (i_size_read(inode) > 0) {
2782 ret = btrfs_truncate_free_space_cache(root, trans, path,
2783 inode);
2784 if (ret)
2785 goto out_put;
2786 }
2787
2788 spin_lock(&block_group->lock);
2789 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2790 spin_unlock(&block_group->lock);
2791 goto out_put;
2792 }
2793 spin_unlock(&block_group->lock);
2794
2795 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2796 if (!num_pages)
2797 num_pages = 1;
2798
2799 /*
2800 * Just to make absolutely sure we have enough space, we're going to
2801 * preallocate 12 pages worth of space for each block group. In
2802 * practice we ought to use at most 8, but we need extra space so we can
2803 * add our header and have a terminator between the extents and the
2804 * bitmaps.
2805 */
2806 num_pages *= 16;
2807 num_pages *= PAGE_CACHE_SIZE;
2808
2809 ret = btrfs_check_data_free_space(inode, num_pages);
2810 if (ret)
2811 goto out_put;
2812
2813 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2814 num_pages, num_pages,
2815 &alloc_hint);
2816 btrfs_free_reserved_data_space(inode, num_pages);
2817 out_put:
2818 iput(inode);
2819 out_free:
2820 btrfs_release_path(root, path);
2821 out:
2822 spin_lock(&block_group->lock);
2823 if (ret)
2824 block_group->disk_cache_state = BTRFS_DC_ERROR;
2825 else
2826 block_group->disk_cache_state = BTRFS_DC_SETUP;
2827 spin_unlock(&block_group->lock);
2828
2829 return ret;
2830 }
2831
2832 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2833 struct btrfs_root *root)
2834 {
2835 struct btrfs_block_group_cache *cache;
2836 int err = 0;
2837 struct btrfs_path *path;
2838 u64 last = 0;
2839
2840 path = btrfs_alloc_path();
2841 if (!path)
2842 return -ENOMEM;
2843
2844 again:
2845 while (1) {
2846 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2847 while (cache) {
2848 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2849 break;
2850 cache = next_block_group(root, cache);
2851 }
2852 if (!cache) {
2853 if (last == 0)
2854 break;
2855 last = 0;
2856 continue;
2857 }
2858 err = cache_save_setup(cache, trans, path);
2859 last = cache->key.objectid + cache->key.offset;
2860 btrfs_put_block_group(cache);
2861 }
2862
2863 while (1) {
2864 if (last == 0) {
2865 err = btrfs_run_delayed_refs(trans, root,
2866 (unsigned long)-1);
2867 BUG_ON(err);
2868 }
2869
2870 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2871 while (cache) {
2872 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2873 btrfs_put_block_group(cache);
2874 goto again;
2875 }
2876
2877 if (cache->dirty)
2878 break;
2879 cache = next_block_group(root, cache);
2880 }
2881 if (!cache) {
2882 if (last == 0)
2883 break;
2884 last = 0;
2885 continue;
2886 }
2887
2888 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2889 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2890 cache->dirty = 0;
2891 last = cache->key.objectid + cache->key.offset;
2892
2893 err = write_one_cache_group(trans, root, path, cache);
2894 BUG_ON(err);
2895 btrfs_put_block_group(cache);
2896 }
2897
2898 while (1) {
2899 /*
2900 * I don't think this is needed since we're just marking our
2901 * preallocated extent as written, but just in case it can't
2902 * hurt.
2903 */
2904 if (last == 0) {
2905 err = btrfs_run_delayed_refs(trans, root,
2906 (unsigned long)-1);
2907 BUG_ON(err);
2908 }
2909
2910 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2911 while (cache) {
2912 /*
2913 * Really this shouldn't happen, but it could if we
2914 * couldn't write the entire preallocated extent and
2915 * splitting the extent resulted in a new block.
2916 */
2917 if (cache->dirty) {
2918 btrfs_put_block_group(cache);
2919 goto again;
2920 }
2921 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2922 break;
2923 cache = next_block_group(root, cache);
2924 }
2925 if (!cache) {
2926 if (last == 0)
2927 break;
2928 last = 0;
2929 continue;
2930 }
2931
2932 btrfs_write_out_cache(root, trans, cache, path);
2933
2934 /*
2935 * If we didn't have an error then the cache state is still
2936 * NEED_WRITE, so we can set it to WRITTEN.
2937 */
2938 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2939 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2940 last = cache->key.objectid + cache->key.offset;
2941 btrfs_put_block_group(cache);
2942 }
2943
2944 btrfs_free_path(path);
2945 return 0;
2946 }
2947
2948 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2949 {
2950 struct btrfs_block_group_cache *block_group;
2951 int readonly = 0;
2952
2953 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2954 if (!block_group || block_group->ro)
2955 readonly = 1;
2956 if (block_group)
2957 btrfs_put_block_group(block_group);
2958 return readonly;
2959 }
2960
2961 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2962 u64 total_bytes, u64 bytes_used,
2963 struct btrfs_space_info **space_info)
2964 {
2965 struct btrfs_space_info *found;
2966 int i;
2967 int factor;
2968
2969 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2970 BTRFS_BLOCK_GROUP_RAID10))
2971 factor = 2;
2972 else
2973 factor = 1;
2974
2975 found = __find_space_info(info, flags);
2976 if (found) {
2977 spin_lock(&found->lock);
2978 found->total_bytes += total_bytes;
2979 found->bytes_used += bytes_used;
2980 found->disk_used += bytes_used * factor;
2981 found->full = 0;
2982 spin_unlock(&found->lock);
2983 *space_info = found;
2984 return 0;
2985 }
2986 found = kzalloc(sizeof(*found), GFP_NOFS);
2987 if (!found)
2988 return -ENOMEM;
2989
2990 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2991 INIT_LIST_HEAD(&found->block_groups[i]);
2992 init_rwsem(&found->groups_sem);
2993 spin_lock_init(&found->lock);
2994 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2995 BTRFS_BLOCK_GROUP_SYSTEM |
2996 BTRFS_BLOCK_GROUP_METADATA);
2997 found->total_bytes = total_bytes;
2998 found->bytes_used = bytes_used;
2999 found->disk_used = bytes_used * factor;
3000 found->bytes_pinned = 0;
3001 found->bytes_reserved = 0;
3002 found->bytes_readonly = 0;
3003 found->bytes_may_use = 0;
3004 found->full = 0;
3005 found->force_alloc = 0;
3006 *space_info = found;
3007 list_add_rcu(&found->list, &info->space_info);
3008 atomic_set(&found->caching_threads, 0);
3009 return 0;
3010 }
3011
3012 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3013 {
3014 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3015 BTRFS_BLOCK_GROUP_RAID1 |
3016 BTRFS_BLOCK_GROUP_RAID10 |
3017 BTRFS_BLOCK_GROUP_DUP);
3018 if (extra_flags) {
3019 if (flags & BTRFS_BLOCK_GROUP_DATA)
3020 fs_info->avail_data_alloc_bits |= extra_flags;
3021 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3022 fs_info->avail_metadata_alloc_bits |= extra_flags;
3023 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3024 fs_info->avail_system_alloc_bits |= extra_flags;
3025 }
3026 }
3027
3028 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3029 {
3030 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3031
3032 if (num_devices == 1)
3033 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3034 if (num_devices < 4)
3035 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3036
3037 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3038 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3039 BTRFS_BLOCK_GROUP_RAID10))) {
3040 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3041 }
3042
3043 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3044 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3045 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3046 }
3047
3048 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3049 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3050 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3051 (flags & BTRFS_BLOCK_GROUP_DUP)))
3052 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3053 return flags;
3054 }
3055
3056 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3057 {
3058 if (flags & BTRFS_BLOCK_GROUP_DATA)
3059 flags |= root->fs_info->avail_data_alloc_bits &
3060 root->fs_info->data_alloc_profile;
3061 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3062 flags |= root->fs_info->avail_system_alloc_bits &
3063 root->fs_info->system_alloc_profile;
3064 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3065 flags |= root->fs_info->avail_metadata_alloc_bits &
3066 root->fs_info->metadata_alloc_profile;
3067 return btrfs_reduce_alloc_profile(root, flags);
3068 }
3069
3070 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3071 {
3072 u64 flags;
3073
3074 if (data)
3075 flags = BTRFS_BLOCK_GROUP_DATA;
3076 else if (root == root->fs_info->chunk_root)
3077 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3078 else
3079 flags = BTRFS_BLOCK_GROUP_METADATA;
3080
3081 return get_alloc_profile(root, flags);
3082 }
3083
3084 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3085 {
3086 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3087 BTRFS_BLOCK_GROUP_DATA);
3088 }
3089
3090 /*
3091 * This will check the space that the inode allocates from to make sure we have
3092 * enough space for bytes.
3093 */
3094 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3095 {
3096 struct btrfs_space_info *data_sinfo;
3097 struct btrfs_root *root = BTRFS_I(inode)->root;
3098 u64 used;
3099 int ret = 0, committed = 0, alloc_chunk = 1;
3100
3101 /* make sure bytes are sectorsize aligned */
3102 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3103
3104 if (root == root->fs_info->tree_root) {
3105 alloc_chunk = 0;
3106 committed = 1;
3107 }
3108
3109 data_sinfo = BTRFS_I(inode)->space_info;
3110 if (!data_sinfo)
3111 goto alloc;
3112
3113 again:
3114 /* make sure we have enough space to handle the data first */
3115 spin_lock(&data_sinfo->lock);
3116 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3117 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3118 data_sinfo->bytes_may_use;
3119
3120 if (used + bytes > data_sinfo->total_bytes) {
3121 struct btrfs_trans_handle *trans;
3122
3123 /*
3124 * if we don't have enough free bytes in this space then we need
3125 * to alloc a new chunk.
3126 */
3127 if (!data_sinfo->full && alloc_chunk) {
3128 u64 alloc_target;
3129
3130 data_sinfo->force_alloc = 1;
3131 spin_unlock(&data_sinfo->lock);
3132 alloc:
3133 alloc_target = btrfs_get_alloc_profile(root, 1);
3134 trans = btrfs_join_transaction(root, 1);
3135 if (IS_ERR(trans))
3136 return PTR_ERR(trans);
3137
3138 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3139 bytes + 2 * 1024 * 1024,
3140 alloc_target, 0);
3141 btrfs_end_transaction(trans, root);
3142 if (ret < 0)
3143 return ret;
3144
3145 if (!data_sinfo) {
3146 btrfs_set_inode_space_info(root, inode);
3147 data_sinfo = BTRFS_I(inode)->space_info;
3148 }
3149 goto again;
3150 }
3151 spin_unlock(&data_sinfo->lock);
3152
3153 /* commit the current transaction and try again */
3154 if (!committed && !root->fs_info->open_ioctl_trans) {
3155 committed = 1;
3156 trans = btrfs_join_transaction(root, 1);
3157 if (IS_ERR(trans))
3158 return PTR_ERR(trans);
3159 ret = btrfs_commit_transaction(trans, root);
3160 if (ret)
3161 return ret;
3162 goto again;
3163 }
3164
3165 #if 0 /* I hope we never need this code again, just in case */
3166 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3167 "%llu bytes_reserved, " "%llu bytes_pinned, "
3168 "%llu bytes_readonly, %llu may use %llu total\n",
3169 (unsigned long long)bytes,
3170 (unsigned long long)data_sinfo->bytes_used,
3171 (unsigned long long)data_sinfo->bytes_reserved,
3172 (unsigned long long)data_sinfo->bytes_pinned,
3173 (unsigned long long)data_sinfo->bytes_readonly,
3174 (unsigned long long)data_sinfo->bytes_may_use,
3175 (unsigned long long)data_sinfo->total_bytes);
3176 #endif
3177 return -ENOSPC;
3178 }
3179 data_sinfo->bytes_may_use += bytes;
3180 BTRFS_I(inode)->reserved_bytes += bytes;
3181 spin_unlock(&data_sinfo->lock);
3182
3183 return 0;
3184 }
3185
3186 /*
3187 * called when we are clearing an delalloc extent from the
3188 * inode's io_tree or there was an error for whatever reason
3189 * after calling btrfs_check_data_free_space
3190 */
3191 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3192 {
3193 struct btrfs_root *root = BTRFS_I(inode)->root;
3194 struct btrfs_space_info *data_sinfo;
3195
3196 /* make sure bytes are sectorsize aligned */
3197 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3198
3199 data_sinfo = BTRFS_I(inode)->space_info;
3200 spin_lock(&data_sinfo->lock);
3201 data_sinfo->bytes_may_use -= bytes;
3202 BTRFS_I(inode)->reserved_bytes -= bytes;
3203 spin_unlock(&data_sinfo->lock);
3204 }
3205
3206 static void force_metadata_allocation(struct btrfs_fs_info *info)
3207 {
3208 struct list_head *head = &info->space_info;
3209 struct btrfs_space_info *found;
3210
3211 rcu_read_lock();
3212 list_for_each_entry_rcu(found, head, list) {
3213 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3214 found->force_alloc = 1;
3215 }
3216 rcu_read_unlock();
3217 }
3218
3219 static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3220 u64 alloc_bytes)
3221 {
3222 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3223
3224 if (sinfo->bytes_used + sinfo->bytes_reserved +
3225 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3226 return 0;
3227
3228 if (sinfo->bytes_used + sinfo->bytes_reserved +
3229 alloc_bytes < div_factor(num_bytes, 8))
3230 return 0;
3231
3232 return 1;
3233 }
3234
3235 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3236 struct btrfs_root *extent_root, u64 alloc_bytes,
3237 u64 flags, int force)
3238 {
3239 struct btrfs_space_info *space_info;
3240 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3241 int ret = 0;
3242
3243 mutex_lock(&fs_info->chunk_mutex);
3244
3245 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3246
3247 space_info = __find_space_info(extent_root->fs_info, flags);
3248 if (!space_info) {
3249 ret = update_space_info(extent_root->fs_info, flags,
3250 0, 0, &space_info);
3251 BUG_ON(ret);
3252 }
3253 BUG_ON(!space_info);
3254
3255 spin_lock(&space_info->lock);
3256 if (space_info->force_alloc)
3257 force = 1;
3258 if (space_info->full) {
3259 spin_unlock(&space_info->lock);
3260 goto out;
3261 }
3262
3263 if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
3264 spin_unlock(&space_info->lock);
3265 goto out;
3266 }
3267 spin_unlock(&space_info->lock);
3268
3269 /*
3270 * If we have mixed data/metadata chunks we want to make sure we keep
3271 * allocating mixed chunks instead of individual chunks.
3272 */
3273 if (btrfs_mixed_space_info(space_info))
3274 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3275
3276 /*
3277 * if we're doing a data chunk, go ahead and make sure that
3278 * we keep a reasonable number of metadata chunks allocated in the
3279 * FS as well.
3280 */
3281 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3282 fs_info->data_chunk_allocations++;
3283 if (!(fs_info->data_chunk_allocations %
3284 fs_info->metadata_ratio))
3285 force_metadata_allocation(fs_info);
3286 }
3287
3288 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3289 spin_lock(&space_info->lock);
3290 if (ret)
3291 space_info->full = 1;
3292 else
3293 ret = 1;
3294 space_info->force_alloc = 0;
3295 spin_unlock(&space_info->lock);
3296 out:
3297 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3298 return ret;
3299 }
3300
3301 static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3302 struct btrfs_root *root,
3303 struct btrfs_space_info *sinfo, u64 num_bytes)
3304 {
3305 int ret;
3306 int end_trans = 0;
3307
3308 if (sinfo->full)
3309 return 0;
3310
3311 spin_lock(&sinfo->lock);
3312 ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3313 spin_unlock(&sinfo->lock);
3314 if (!ret)
3315 return 0;
3316
3317 if (!trans) {
3318 trans = btrfs_join_transaction(root, 1);
3319 BUG_ON(IS_ERR(trans));
3320 end_trans = 1;
3321 }
3322
3323 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3324 num_bytes + 2 * 1024 * 1024,
3325 get_alloc_profile(root, sinfo->flags), 0);
3326
3327 if (end_trans)
3328 btrfs_end_transaction(trans, root);
3329
3330 return ret == 1 ? 1 : 0;
3331 }
3332
3333 /*
3334 * shrink metadata reservation for delalloc
3335 */
3336 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3337 struct btrfs_root *root, u64 to_reclaim)
3338 {
3339 struct btrfs_block_rsv *block_rsv;
3340 u64 reserved;
3341 u64 max_reclaim;
3342 u64 reclaimed = 0;
3343 int pause = 1;
3344 int ret;
3345
3346 block_rsv = &root->fs_info->delalloc_block_rsv;
3347 spin_lock(&block_rsv->lock);
3348 reserved = block_rsv->reserved;
3349 spin_unlock(&block_rsv->lock);
3350
3351 if (reserved == 0)
3352 return 0;
3353
3354 max_reclaim = min(reserved, to_reclaim);
3355
3356 while (1) {
3357 ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3358 if (!ret) {
3359 __set_current_state(TASK_INTERRUPTIBLE);
3360 schedule_timeout(pause);
3361 pause <<= 1;
3362 if (pause > HZ / 10)
3363 pause = HZ / 10;
3364 } else {
3365 pause = 1;
3366 }
3367
3368 spin_lock(&block_rsv->lock);
3369 if (reserved > block_rsv->reserved)
3370 reclaimed = reserved - block_rsv->reserved;
3371 reserved = block_rsv->reserved;
3372 spin_unlock(&block_rsv->lock);
3373
3374 if (reserved == 0 || reclaimed >= max_reclaim)
3375 break;
3376
3377 if (trans && trans->transaction->blocked)
3378 return -EAGAIN;
3379 }
3380 return reclaimed >= to_reclaim;
3381 }
3382
3383 static int should_retry_reserve(struct btrfs_trans_handle *trans,
3384 struct btrfs_root *root,
3385 struct btrfs_block_rsv *block_rsv,
3386 u64 num_bytes, int *retries)
3387 {
3388 struct btrfs_space_info *space_info = block_rsv->space_info;
3389 int ret;
3390
3391 if ((*retries) > 2)
3392 return -ENOSPC;
3393
3394 ret = maybe_allocate_chunk(trans, root, space_info, num_bytes);
3395 if (ret)
3396 return 1;
3397
3398 if (trans && trans->transaction->in_commit)
3399 return -ENOSPC;
3400
3401 ret = shrink_delalloc(trans, root, num_bytes);
3402 if (ret)
3403 return ret;
3404
3405 spin_lock(&space_info->lock);
3406 if (space_info->bytes_pinned < num_bytes)
3407 ret = 1;
3408 spin_unlock(&space_info->lock);
3409 if (ret)
3410 return -ENOSPC;
3411
3412 (*retries)++;
3413
3414 if (trans)
3415 return -EAGAIN;
3416
3417 trans = btrfs_join_transaction(root, 1);
3418 BUG_ON(IS_ERR(trans));
3419 ret = btrfs_commit_transaction(trans, root);
3420 BUG_ON(ret);
3421
3422 return 1;
3423 }
3424
3425 static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
3426 u64 num_bytes)
3427 {
3428 struct btrfs_space_info *space_info = block_rsv->space_info;
3429 u64 unused;
3430 int ret = -ENOSPC;
3431
3432 spin_lock(&space_info->lock);
3433 unused = space_info->bytes_used + space_info->bytes_reserved +
3434 space_info->bytes_pinned + space_info->bytes_readonly;
3435
3436 if (unused < space_info->total_bytes)
3437 unused = space_info->total_bytes - unused;
3438 else
3439 unused = 0;
3440
3441 if (unused >= num_bytes) {
3442 if (block_rsv->priority >= 10) {
3443 space_info->bytes_reserved += num_bytes;
3444 ret = 0;
3445 } else {
3446 if ((unused + block_rsv->reserved) *
3447 block_rsv->priority >=
3448 (num_bytes + block_rsv->reserved) * 10) {
3449 space_info->bytes_reserved += num_bytes;
3450 ret = 0;
3451 }
3452 }
3453 }
3454 spin_unlock(&space_info->lock);
3455
3456 return ret;
3457 }
3458
3459 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3460 struct btrfs_root *root)
3461 {
3462 struct btrfs_block_rsv *block_rsv;
3463 if (root->ref_cows)
3464 block_rsv = trans->block_rsv;
3465 else
3466 block_rsv = root->block_rsv;
3467
3468 if (!block_rsv)
3469 block_rsv = &root->fs_info->empty_block_rsv;
3470
3471 return block_rsv;
3472 }
3473
3474 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3475 u64 num_bytes)
3476 {
3477 int ret = -ENOSPC;
3478 spin_lock(&block_rsv->lock);
3479 if (block_rsv->reserved >= num_bytes) {
3480 block_rsv->reserved -= num_bytes;
3481 if (block_rsv->reserved < block_rsv->size)
3482 block_rsv->full = 0;
3483 ret = 0;
3484 }
3485 spin_unlock(&block_rsv->lock);
3486 return ret;
3487 }
3488
3489 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3490 u64 num_bytes, int update_size)
3491 {
3492 spin_lock(&block_rsv->lock);
3493 block_rsv->reserved += num_bytes;
3494 if (update_size)
3495 block_rsv->size += num_bytes;
3496 else if (block_rsv->reserved >= block_rsv->size)
3497 block_rsv->full = 1;
3498 spin_unlock(&block_rsv->lock);
3499 }
3500
3501 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3502 struct btrfs_block_rsv *dest, u64 num_bytes)
3503 {
3504 struct btrfs_space_info *space_info = block_rsv->space_info;
3505
3506 spin_lock(&block_rsv->lock);
3507 if (num_bytes == (u64)-1)
3508 num_bytes = block_rsv->size;
3509 block_rsv->size -= num_bytes;
3510 if (block_rsv->reserved >= block_rsv->size) {
3511 num_bytes = block_rsv->reserved - block_rsv->size;
3512 block_rsv->reserved = block_rsv->size;
3513 block_rsv->full = 1;
3514 } else {
3515 num_bytes = 0;
3516 }
3517 spin_unlock(&block_rsv->lock);
3518
3519 if (num_bytes > 0) {
3520 if (dest) {
3521 block_rsv_add_bytes(dest, num_bytes, 0);
3522 } else {
3523 spin_lock(&space_info->lock);
3524 space_info->bytes_reserved -= num_bytes;
3525 spin_unlock(&space_info->lock);
3526 }
3527 }
3528 }
3529
3530 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3531 struct btrfs_block_rsv *dst, u64 num_bytes)
3532 {
3533 int ret;
3534
3535 ret = block_rsv_use_bytes(src, num_bytes);
3536 if (ret)
3537 return ret;
3538
3539 block_rsv_add_bytes(dst, num_bytes, 1);
3540 return 0;
3541 }
3542
3543 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3544 {
3545 memset(rsv, 0, sizeof(*rsv));
3546 spin_lock_init(&rsv->lock);
3547 atomic_set(&rsv->usage, 1);
3548 rsv->priority = 6;
3549 INIT_LIST_HEAD(&rsv->list);
3550 }
3551
3552 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3553 {
3554 struct btrfs_block_rsv *block_rsv;
3555 struct btrfs_fs_info *fs_info = root->fs_info;
3556 u64 alloc_target;
3557
3558 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3559 if (!block_rsv)
3560 return NULL;
3561
3562 btrfs_init_block_rsv(block_rsv);
3563
3564 alloc_target = btrfs_get_alloc_profile(root, 0);
3565 block_rsv->space_info = __find_space_info(fs_info,
3566 BTRFS_BLOCK_GROUP_METADATA);
3567
3568 return block_rsv;
3569 }
3570
3571 void btrfs_free_block_rsv(struct btrfs_root *root,
3572 struct btrfs_block_rsv *rsv)
3573 {
3574 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3575 btrfs_block_rsv_release(root, rsv, (u64)-1);
3576 if (!rsv->durable)
3577 kfree(rsv);
3578 }
3579 }
3580
3581 /*
3582 * make the block_rsv struct be able to capture freed space.
3583 * the captured space will re-add to the the block_rsv struct
3584 * after transaction commit
3585 */
3586 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3587 struct btrfs_block_rsv *block_rsv)
3588 {
3589 block_rsv->durable = 1;
3590 mutex_lock(&fs_info->durable_block_rsv_mutex);
3591 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3592 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3593 }
3594
3595 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3596 struct btrfs_root *root,
3597 struct btrfs_block_rsv *block_rsv,
3598 u64 num_bytes, int *retries)
3599 {
3600 int ret;
3601
3602 if (num_bytes == 0)
3603 return 0;
3604 again:
3605 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3606 if (!ret) {
3607 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3608 return 0;
3609 }
3610
3611 ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries);
3612 if (ret > 0)
3613 goto again;
3614
3615 return ret;
3616 }
3617
3618 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3619 struct btrfs_root *root,
3620 struct btrfs_block_rsv *block_rsv,
3621 u64 min_reserved, int min_factor)
3622 {
3623 u64 num_bytes = 0;
3624 int commit_trans = 0;
3625 int ret = -ENOSPC;
3626
3627 if (!block_rsv)
3628 return 0;
3629
3630 spin_lock(&block_rsv->lock);
3631 if (min_factor > 0)
3632 num_bytes = div_factor(block_rsv->size, min_factor);
3633 if (min_reserved > num_bytes)
3634 num_bytes = min_reserved;
3635
3636 if (block_rsv->reserved >= num_bytes) {
3637 ret = 0;
3638 } else {
3639 num_bytes -= block_rsv->reserved;
3640 if (block_rsv->durable &&
3641 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3642 commit_trans = 1;
3643 }
3644 spin_unlock(&block_rsv->lock);
3645 if (!ret)
3646 return 0;
3647
3648 if (block_rsv->refill_used) {
3649 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3650 if (!ret) {
3651 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3652 return 0;
3653 }
3654 }
3655
3656 if (commit_trans) {
3657 if (trans)
3658 return -EAGAIN;
3659
3660 trans = btrfs_join_transaction(root, 1);
3661 BUG_ON(IS_ERR(trans));
3662 ret = btrfs_commit_transaction(trans, root);
3663 return 0;
3664 }
3665
3666 WARN_ON(1);
3667 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3668 block_rsv->size, block_rsv->reserved,
3669 block_rsv->freed[0], block_rsv->freed[1]);
3670
3671 return -ENOSPC;
3672 }
3673
3674 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3675 struct btrfs_block_rsv *dst_rsv,
3676 u64 num_bytes)
3677 {
3678 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3679 }
3680
3681 void btrfs_block_rsv_release(struct btrfs_root *root,
3682 struct btrfs_block_rsv *block_rsv,
3683 u64 num_bytes)
3684 {
3685 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3686 if (global_rsv->full || global_rsv == block_rsv ||
3687 block_rsv->space_info != global_rsv->space_info)
3688 global_rsv = NULL;
3689 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3690 }
3691
3692 /*
3693 * helper to calculate size of global block reservation.
3694 * the desired value is sum of space used by extent tree,
3695 * checksum tree and root tree
3696 */
3697 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3698 {
3699 struct btrfs_space_info *sinfo;
3700 u64 num_bytes;
3701 u64 meta_used;
3702 u64 data_used;
3703 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3704 #if 0
3705 /*
3706 * per tree used space accounting can be inaccuracy, so we
3707 * can't rely on it.
3708 */
3709 spin_lock(&fs_info->extent_root->accounting_lock);
3710 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3711 spin_unlock(&fs_info->extent_root->accounting_lock);
3712
3713 spin_lock(&fs_info->csum_root->accounting_lock);
3714 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3715 spin_unlock(&fs_info->csum_root->accounting_lock);
3716
3717 spin_lock(&fs_info->tree_root->accounting_lock);
3718 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3719 spin_unlock(&fs_info->tree_root->accounting_lock);
3720 #endif
3721 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3722 spin_lock(&sinfo->lock);
3723 data_used = sinfo->bytes_used;
3724 spin_unlock(&sinfo->lock);
3725
3726 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3727 spin_lock(&sinfo->lock);
3728 meta_used = sinfo->bytes_used;
3729 spin_unlock(&sinfo->lock);
3730
3731 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3732 csum_size * 2;
3733 num_bytes += div64_u64(data_used + meta_used, 50);
3734
3735 if (num_bytes * 3 > meta_used)
3736 num_bytes = div64_u64(meta_used, 3);
3737
3738 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3739 }
3740
3741 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3742 {
3743 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3744 struct btrfs_space_info *sinfo = block_rsv->space_info;
3745 u64 num_bytes;
3746
3747 num_bytes = calc_global_metadata_size(fs_info);
3748
3749 spin_lock(&block_rsv->lock);
3750 spin_lock(&sinfo->lock);
3751
3752 block_rsv->size = num_bytes;
3753
3754 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3755 sinfo->bytes_reserved + sinfo->bytes_readonly;
3756
3757 if (sinfo->total_bytes > num_bytes) {
3758 num_bytes = sinfo->total_bytes - num_bytes;
3759 block_rsv->reserved += num_bytes;
3760 sinfo->bytes_reserved += num_bytes;
3761 }
3762
3763 if (block_rsv->reserved >= block_rsv->size) {
3764 num_bytes = block_rsv->reserved - block_rsv->size;
3765 sinfo->bytes_reserved -= num_bytes;
3766 block_rsv->reserved = block_rsv->size;
3767 block_rsv->full = 1;
3768 }
3769 #if 0
3770 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3771 block_rsv->size, block_rsv->reserved);
3772 #endif
3773 spin_unlock(&sinfo->lock);
3774 spin_unlock(&block_rsv->lock);
3775 }
3776
3777 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3778 {
3779 struct btrfs_space_info *space_info;
3780
3781 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3782 fs_info->chunk_block_rsv.space_info = space_info;
3783 fs_info->chunk_block_rsv.priority = 10;
3784
3785 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3786 fs_info->global_block_rsv.space_info = space_info;
3787 fs_info->global_block_rsv.priority = 10;
3788 fs_info->global_block_rsv.refill_used = 1;
3789 fs_info->delalloc_block_rsv.space_info = space_info;
3790 fs_info->trans_block_rsv.space_info = space_info;
3791 fs_info->empty_block_rsv.space_info = space_info;
3792 fs_info->empty_block_rsv.priority = 10;
3793
3794 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3795 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3796 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3797 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3798 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3799
3800 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3801
3802 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3803
3804 update_global_block_rsv(fs_info);
3805 }
3806
3807 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3808 {
3809 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3810 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3811 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3812 WARN_ON(fs_info->trans_block_rsv.size > 0);
3813 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3814 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3815 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3816 }
3817
3818 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3819 {
3820 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3821 3 * num_items;
3822 }
3823
3824 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3825 struct btrfs_root *root,
3826 int num_items, int *retries)
3827 {
3828 u64 num_bytes;
3829 int ret;
3830
3831 if (num_items == 0 || root->fs_info->chunk_root == root)
3832 return 0;
3833
3834 num_bytes = calc_trans_metadata_size(root, num_items);
3835 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3836 num_bytes, retries);
3837 if (!ret) {
3838 trans->bytes_reserved += num_bytes;
3839 trans->block_rsv = &root->fs_info->trans_block_rsv;
3840 }
3841 return ret;
3842 }
3843
3844 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3845 struct btrfs_root *root)
3846 {
3847 if (!trans->bytes_reserved)
3848 return;
3849
3850 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3851 btrfs_block_rsv_release(root, trans->block_rsv,
3852 trans->bytes_reserved);
3853 trans->bytes_reserved = 0;
3854 }
3855
3856 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3857 struct inode *inode)
3858 {
3859 struct btrfs_root *root = BTRFS_I(inode)->root;
3860 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3861 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3862
3863 /*
3864 * one for deleting orphan item, one for updating inode and
3865 * two for calling btrfs_truncate_inode_items.
3866 *
3867 * btrfs_truncate_inode_items is a delete operation, it frees
3868 * more space than it uses in most cases. So two units of
3869 * metadata space should be enough for calling it many times.
3870 * If all of the metadata space is used, we can commit
3871 * transaction and use space it freed.
3872 */
3873 u64 num_bytes = calc_trans_metadata_size(root, 4);
3874 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3875 }
3876
3877 void btrfs_orphan_release_metadata(struct inode *inode)
3878 {
3879 struct btrfs_root *root = BTRFS_I(inode)->root;
3880 u64 num_bytes = calc_trans_metadata_size(root, 4);
3881 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3882 }
3883
3884 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3885 struct btrfs_pending_snapshot *pending)
3886 {
3887 struct btrfs_root *root = pending->root;
3888 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3889 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3890 /*
3891 * two for root back/forward refs, two for directory entries
3892 * and one for root of the snapshot.
3893 */
3894 u64 num_bytes = calc_trans_metadata_size(root, 5);
3895 dst_rsv->space_info = src_rsv->space_info;
3896 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3897 }
3898
3899 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3900 {
3901 return num_bytes >>= 3;
3902 }
3903
3904 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3905 {
3906 struct btrfs_root *root = BTRFS_I(inode)->root;
3907 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3908 u64 to_reserve;
3909 int nr_extents;
3910 int retries = 0;
3911 int ret;
3912
3913 if (btrfs_transaction_in_commit(root->fs_info))
3914 schedule_timeout(1);
3915
3916 num_bytes = ALIGN(num_bytes, root->sectorsize);
3917 again:
3918 spin_lock(&BTRFS_I(inode)->accounting_lock);
3919 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3920 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3921 nr_extents -= BTRFS_I(inode)->reserved_extents;
3922 to_reserve = calc_trans_metadata_size(root, nr_extents);
3923 } else {
3924 nr_extents = 0;
3925 to_reserve = 0;
3926 }
3927
3928 to_reserve += calc_csum_metadata_size(inode, num_bytes);
3929 ret = reserve_metadata_bytes(block_rsv, to_reserve);
3930 if (ret) {
3931 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3932 ret = should_retry_reserve(NULL, root, block_rsv, to_reserve,
3933 &retries);
3934 if (ret > 0)
3935 goto again;
3936 return ret;
3937 }
3938
3939 BTRFS_I(inode)->reserved_extents += nr_extents;
3940 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3941 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3942
3943 block_rsv_add_bytes(block_rsv, to_reserve, 1);
3944
3945 if (block_rsv->size > 512 * 1024 * 1024)
3946 shrink_delalloc(NULL, root, to_reserve);
3947
3948 return 0;
3949 }
3950
3951 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3952 {
3953 struct btrfs_root *root = BTRFS_I(inode)->root;
3954 u64 to_free;
3955 int nr_extents;
3956
3957 num_bytes = ALIGN(num_bytes, root->sectorsize);
3958 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3959
3960 spin_lock(&BTRFS_I(inode)->accounting_lock);
3961 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
3962 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
3963 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
3964 BTRFS_I(inode)->reserved_extents -= nr_extents;
3965 } else {
3966 nr_extents = 0;
3967 }
3968 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3969
3970 to_free = calc_csum_metadata_size(inode, num_bytes);
3971 if (nr_extents > 0)
3972 to_free += calc_trans_metadata_size(root, nr_extents);
3973
3974 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
3975 to_free);
3976 }
3977
3978 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
3979 {
3980 int ret;
3981
3982 ret = btrfs_check_data_free_space(inode, num_bytes);
3983 if (ret)
3984 return ret;
3985
3986 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
3987 if (ret) {
3988 btrfs_free_reserved_data_space(inode, num_bytes);
3989 return ret;
3990 }
3991
3992 return 0;
3993 }
3994
3995 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
3996 {
3997 btrfs_delalloc_release_metadata(inode, num_bytes);
3998 btrfs_free_reserved_data_space(inode, num_bytes);
3999 }
4000
4001 static int update_block_group(struct btrfs_trans_handle *trans,
4002 struct btrfs_root *root,
4003 u64 bytenr, u64 num_bytes, int alloc)
4004 {
4005 struct btrfs_block_group_cache *cache = NULL;
4006 struct btrfs_fs_info *info = root->fs_info;
4007 u64 total = num_bytes;
4008 u64 old_val;
4009 u64 byte_in_group;
4010 int factor;
4011
4012 /* block accounting for super block */
4013 spin_lock(&info->delalloc_lock);
4014 old_val = btrfs_super_bytes_used(&info->super_copy);
4015 if (alloc)
4016 old_val += num_bytes;
4017 else
4018 old_val -= num_bytes;
4019 btrfs_set_super_bytes_used(&info->super_copy, old_val);
4020 spin_unlock(&info->delalloc_lock);
4021
4022 while (total) {
4023 cache = btrfs_lookup_block_group(info, bytenr);
4024 if (!cache)
4025 return -1;
4026 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4027 BTRFS_BLOCK_GROUP_RAID1 |
4028 BTRFS_BLOCK_GROUP_RAID10))
4029 factor = 2;
4030 else
4031 factor = 1;
4032 /*
4033 * If this block group has free space cache written out, we
4034 * need to make sure to load it if we are removing space. This
4035 * is because we need the unpinning stage to actually add the
4036 * space back to the block group, otherwise we will leak space.
4037 */
4038 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4039 cache_block_group(cache, trans, 1);
4040
4041 byte_in_group = bytenr - cache->key.objectid;
4042 WARN_ON(byte_in_group > cache->key.offset);
4043
4044 spin_lock(&cache->space_info->lock);
4045 spin_lock(&cache->lock);
4046
4047 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4048 cache->disk_cache_state < BTRFS_DC_CLEAR)
4049 cache->disk_cache_state = BTRFS_DC_CLEAR;
4050
4051 cache->dirty = 1;
4052 old_val = btrfs_block_group_used(&cache->item);
4053 num_bytes = min(total, cache->key.offset - byte_in_group);
4054 if (alloc) {
4055 old_val += num_bytes;
4056 btrfs_set_block_group_used(&cache->item, old_val);
4057 cache->reserved -= num_bytes;
4058 cache->space_info->bytes_reserved -= num_bytes;
4059 cache->space_info->bytes_used += num_bytes;
4060 cache->space_info->disk_used += num_bytes * factor;
4061 spin_unlock(&cache->lock);
4062 spin_unlock(&cache->space_info->lock);
4063 } else {
4064 old_val -= num_bytes;
4065 btrfs_set_block_group_used(&cache->item, old_val);
4066 cache->pinned += num_bytes;
4067 cache->space_info->bytes_pinned += num_bytes;
4068 cache->space_info->bytes_used -= num_bytes;
4069 cache->space_info->disk_used -= num_bytes * factor;
4070 spin_unlock(&cache->lock);
4071 spin_unlock(&cache->space_info->lock);
4072
4073 set_extent_dirty(info->pinned_extents,
4074 bytenr, bytenr + num_bytes - 1,
4075 GFP_NOFS | __GFP_NOFAIL);
4076 }
4077 btrfs_put_block_group(cache);
4078 total -= num_bytes;
4079 bytenr += num_bytes;
4080 }
4081 return 0;
4082 }
4083
4084 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4085 {
4086 struct btrfs_block_group_cache *cache;
4087 u64 bytenr;
4088
4089 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4090 if (!cache)
4091 return 0;
4092
4093 bytenr = cache->key.objectid;
4094 btrfs_put_block_group(cache);
4095
4096 return bytenr;
4097 }
4098
4099 static int pin_down_extent(struct btrfs_root *root,
4100 struct btrfs_block_group_cache *cache,
4101 u64 bytenr, u64 num_bytes, int reserved)
4102 {
4103 spin_lock(&cache->space_info->lock);
4104 spin_lock(&cache->lock);
4105 cache->pinned += num_bytes;
4106 cache->space_info->bytes_pinned += num_bytes;
4107 if (reserved) {
4108 cache->reserved -= num_bytes;
4109 cache->space_info->bytes_reserved -= num_bytes;
4110 }
4111 spin_unlock(&cache->lock);
4112 spin_unlock(&cache->space_info->lock);
4113
4114 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4115 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4116 return 0;
4117 }
4118
4119 /*
4120 * this function must be called within transaction
4121 */
4122 int btrfs_pin_extent(struct btrfs_root *root,
4123 u64 bytenr, u64 num_bytes, int reserved)
4124 {
4125 struct btrfs_block_group_cache *cache;
4126
4127 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4128 BUG_ON(!cache);
4129
4130 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4131
4132 btrfs_put_block_group(cache);
4133 return 0;
4134 }
4135
4136 /*
4137 * update size of reserved extents. this function may return -EAGAIN
4138 * if 'reserve' is true or 'sinfo' is false.
4139 */
4140 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4141 u64 num_bytes, int reserve, int sinfo)
4142 {
4143 int ret = 0;
4144 if (sinfo) {
4145 struct btrfs_space_info *space_info = cache->space_info;
4146 spin_lock(&space_info->lock);
4147 spin_lock(&cache->lock);
4148 if (reserve) {
4149 if (cache->ro) {
4150 ret = -EAGAIN;
4151 } else {
4152 cache->reserved += num_bytes;
4153 space_info->bytes_reserved += num_bytes;
4154 }
4155 } else {
4156 if (cache->ro)
4157 space_info->bytes_readonly += num_bytes;
4158 cache->reserved -= num_bytes;
4159 space_info->bytes_reserved -= num_bytes;
4160 }
4161 spin_unlock(&cache->lock);
4162 spin_unlock(&space_info->lock);
4163 } else {
4164 spin_lock(&cache->lock);
4165 if (cache->ro) {
4166 ret = -EAGAIN;
4167 } else {
4168 if (reserve)
4169 cache->reserved += num_bytes;
4170 else
4171 cache->reserved -= num_bytes;
4172 }
4173 spin_unlock(&cache->lock);
4174 }
4175 return ret;
4176 }
4177
4178 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4179 struct btrfs_root *root)
4180 {
4181 struct btrfs_fs_info *fs_info = root->fs_info;
4182 struct btrfs_caching_control *next;
4183 struct btrfs_caching_control *caching_ctl;
4184 struct btrfs_block_group_cache *cache;
4185
4186 down_write(&fs_info->extent_commit_sem);
4187
4188 list_for_each_entry_safe(caching_ctl, next,
4189 &fs_info->caching_block_groups, list) {
4190 cache = caching_ctl->block_group;
4191 if (block_group_cache_done(cache)) {
4192 cache->last_byte_to_unpin = (u64)-1;
4193 list_del_init(&caching_ctl->list);
4194 put_caching_control(caching_ctl);
4195 } else {
4196 cache->last_byte_to_unpin = caching_ctl->progress;
4197 }
4198 }
4199
4200 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4201 fs_info->pinned_extents = &fs_info->freed_extents[1];
4202 else
4203 fs_info->pinned_extents = &fs_info->freed_extents[0];
4204
4205 up_write(&fs_info->extent_commit_sem);
4206
4207 update_global_block_rsv(fs_info);
4208 return 0;
4209 }
4210
4211 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4212 {
4213 struct btrfs_fs_info *fs_info = root->fs_info;
4214 struct btrfs_block_group_cache *cache = NULL;
4215 u64 len;
4216
4217 while (start <= end) {
4218 if (!cache ||
4219 start >= cache->key.objectid + cache->key.offset) {
4220 if (cache)
4221 btrfs_put_block_group(cache);
4222 cache = btrfs_lookup_block_group(fs_info, start);
4223 BUG_ON(!cache);
4224 }
4225
4226 len = cache->key.objectid + cache->key.offset - start;
4227 len = min(len, end + 1 - start);
4228
4229 if (start < cache->last_byte_to_unpin) {
4230 len = min(len, cache->last_byte_to_unpin - start);
4231 btrfs_add_free_space(cache, start, len);
4232 }
4233
4234 start += len;
4235
4236 spin_lock(&cache->space_info->lock);
4237 spin_lock(&cache->lock);
4238 cache->pinned -= len;
4239 cache->space_info->bytes_pinned -= len;
4240 if (cache->ro) {
4241 cache->space_info->bytes_readonly += len;
4242 } else if (cache->reserved_pinned > 0) {
4243 len = min(len, cache->reserved_pinned);
4244 cache->reserved_pinned -= len;
4245 cache->space_info->bytes_reserved += len;
4246 }
4247 spin_unlock(&cache->lock);
4248 spin_unlock(&cache->space_info->lock);
4249 }
4250
4251 if (cache)
4252 btrfs_put_block_group(cache);
4253 return 0;
4254 }
4255
4256 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4257 struct btrfs_root *root)
4258 {
4259 struct btrfs_fs_info *fs_info = root->fs_info;
4260 struct extent_io_tree *unpin;
4261 struct btrfs_block_rsv *block_rsv;
4262 struct btrfs_block_rsv *next_rsv;
4263 u64 start;
4264 u64 end;
4265 int idx;
4266 int ret;
4267
4268 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4269 unpin = &fs_info->freed_extents[1];
4270 else
4271 unpin = &fs_info->freed_extents[0];
4272
4273 while (1) {
4274 ret = find_first_extent_bit(unpin, 0, &start, &end,
4275 EXTENT_DIRTY);
4276 if (ret)
4277 break;
4278
4279 ret = btrfs_discard_extent(root, start, end + 1 - start);
4280
4281 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4282 unpin_extent_range(root, start, end);
4283 cond_resched();
4284 }
4285
4286 mutex_lock(&fs_info->durable_block_rsv_mutex);
4287 list_for_each_entry_safe(block_rsv, next_rsv,
4288 &fs_info->durable_block_rsv_list, list) {
4289
4290 idx = trans->transid & 0x1;
4291 if (block_rsv->freed[idx] > 0) {
4292 block_rsv_add_bytes(block_rsv,
4293 block_rsv->freed[idx], 0);
4294 block_rsv->freed[idx] = 0;
4295 }
4296 if (atomic_read(&block_rsv->usage) == 0) {
4297 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4298
4299 if (block_rsv->freed[0] == 0 &&
4300 block_rsv->freed[1] == 0) {
4301 list_del_init(&block_rsv->list);
4302 kfree(block_rsv);
4303 }
4304 } else {
4305 btrfs_block_rsv_release(root, block_rsv, 0);
4306 }
4307 }
4308 mutex_unlock(&fs_info->durable_block_rsv_mutex);
4309
4310 return 0;
4311 }
4312
4313 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4314 struct btrfs_root *root,
4315 u64 bytenr, u64 num_bytes, u64 parent,
4316 u64 root_objectid, u64 owner_objectid,
4317 u64 owner_offset, int refs_to_drop,
4318 struct btrfs_delayed_extent_op *extent_op)
4319 {
4320 struct btrfs_key key;
4321 struct btrfs_path *path;
4322 struct btrfs_fs_info *info = root->fs_info;
4323 struct btrfs_root *extent_root = info->extent_root;
4324 struct extent_buffer *leaf;
4325 struct btrfs_extent_item *ei;
4326 struct btrfs_extent_inline_ref *iref;
4327 int ret;
4328 int is_data;
4329 int extent_slot = 0;
4330 int found_extent = 0;
4331 int num_to_del = 1;
4332 u32 item_size;
4333 u64 refs;
4334
4335 path = btrfs_alloc_path();
4336 if (!path)
4337 return -ENOMEM;
4338
4339 path->reada = 1;
4340 path->leave_spinning = 1;
4341
4342 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4343 BUG_ON(!is_data && refs_to_drop != 1);
4344
4345 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4346 bytenr, num_bytes, parent,
4347 root_objectid, owner_objectid,
4348 owner_offset);
4349 if (ret == 0) {
4350 extent_slot = path->slots[0];
4351 while (extent_slot >= 0) {
4352 btrfs_item_key_to_cpu(path->nodes[0], &key,
4353 extent_slot);
4354 if (key.objectid != bytenr)
4355 break;
4356 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4357 key.offset == num_bytes) {
4358 found_extent = 1;
4359 break;
4360 }
4361 if (path->slots[0] - extent_slot > 5)
4362 break;
4363 extent_slot--;
4364 }
4365 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4366 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4367 if (found_extent && item_size < sizeof(*ei))
4368 found_extent = 0;
4369 #endif
4370 if (!found_extent) {
4371 BUG_ON(iref);
4372 ret = remove_extent_backref(trans, extent_root, path,
4373 NULL, refs_to_drop,
4374 is_data);
4375 BUG_ON(ret);
4376 btrfs_release_path(extent_root, path);
4377 path->leave_spinning = 1;
4378
4379 key.objectid = bytenr;
4380 key.type = BTRFS_EXTENT_ITEM_KEY;
4381 key.offset = num_bytes;
4382
4383 ret = btrfs_search_slot(trans, extent_root,
4384 &key, path, -1, 1);
4385 if (ret) {
4386 printk(KERN_ERR "umm, got %d back from search"
4387 ", was looking for %llu\n", ret,
4388 (unsigned long long)bytenr);
4389 btrfs_print_leaf(extent_root, path->nodes[0]);
4390 }
4391 BUG_ON(ret);
4392 extent_slot = path->slots[0];
4393 }
4394 } else {
4395 btrfs_print_leaf(extent_root, path->nodes[0]);
4396 WARN_ON(1);
4397 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4398 "parent %llu root %llu owner %llu offset %llu\n",
4399 (unsigned long long)bytenr,
4400 (unsigned long long)parent,
4401 (unsigned long long)root_objectid,
4402 (unsigned long long)owner_objectid,
4403 (unsigned long long)owner_offset);
4404 }
4405
4406 leaf = path->nodes[0];
4407 item_size = btrfs_item_size_nr(leaf, extent_slot);
4408 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4409 if (item_size < sizeof(*ei)) {
4410 BUG_ON(found_extent || extent_slot != path->slots[0]);
4411 ret = convert_extent_item_v0(trans, extent_root, path,
4412 owner_objectid, 0);
4413 BUG_ON(ret < 0);
4414
4415 btrfs_release_path(extent_root, path);
4416 path->leave_spinning = 1;
4417
4418 key.objectid = bytenr;
4419 key.type = BTRFS_EXTENT_ITEM_KEY;
4420 key.offset = num_bytes;
4421
4422 ret = btrfs_search_slot(trans, extent_root, &key, path,
4423 -1, 1);
4424 if (ret) {
4425 printk(KERN_ERR "umm, got %d back from search"
4426 ", was looking for %llu\n", ret,
4427 (unsigned long long)bytenr);
4428 btrfs_print_leaf(extent_root, path->nodes[0]);
4429 }
4430 BUG_ON(ret);
4431 extent_slot = path->slots[0];
4432 leaf = path->nodes[0];
4433 item_size = btrfs_item_size_nr(leaf, extent_slot);
4434 }
4435 #endif
4436 BUG_ON(item_size < sizeof(*ei));
4437 ei = btrfs_item_ptr(leaf, extent_slot,
4438 struct btrfs_extent_item);
4439 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4440 struct btrfs_tree_block_info *bi;
4441 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4442 bi = (struct btrfs_tree_block_info *)(ei + 1);
4443 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4444 }
4445
4446 refs = btrfs_extent_refs(leaf, ei);
4447 BUG_ON(refs < refs_to_drop);
4448 refs -= refs_to_drop;
4449
4450 if (refs > 0) {
4451 if (extent_op)
4452 __run_delayed_extent_op(extent_op, leaf, ei);
4453 /*
4454 * In the case of inline back ref, reference count will
4455 * be updated by remove_extent_backref
4456 */
4457 if (iref) {
4458 BUG_ON(!found_extent);
4459 } else {
4460 btrfs_set_extent_refs(leaf, ei, refs);
4461 btrfs_mark_buffer_dirty(leaf);
4462 }
4463 if (found_extent) {
4464 ret = remove_extent_backref(trans, extent_root, path,
4465 iref, refs_to_drop,
4466 is_data);
4467 BUG_ON(ret);
4468 }
4469 } else {
4470 if (found_extent) {
4471 BUG_ON(is_data && refs_to_drop !=
4472 extent_data_ref_count(root, path, iref));
4473 if (iref) {
4474 BUG_ON(path->slots[0] != extent_slot);
4475 } else {
4476 BUG_ON(path->slots[0] != extent_slot + 1);
4477 path->slots[0] = extent_slot;
4478 num_to_del = 2;
4479 }
4480 }
4481
4482 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4483 num_to_del);
4484 BUG_ON(ret);
4485 btrfs_release_path(extent_root, path);
4486
4487 if (is_data) {
4488 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4489 BUG_ON(ret);
4490 } else {
4491 invalidate_mapping_pages(info->btree_inode->i_mapping,
4492 bytenr >> PAGE_CACHE_SHIFT,
4493 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4494 }
4495
4496 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4497 BUG_ON(ret);
4498 }
4499 btrfs_free_path(path);
4500 return ret;
4501 }
4502
4503 /*
4504 * when we free an block, it is possible (and likely) that we free the last
4505 * delayed ref for that extent as well. This searches the delayed ref tree for
4506 * a given extent, and if there are no other delayed refs to be processed, it
4507 * removes it from the tree.
4508 */
4509 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4510 struct btrfs_root *root, u64 bytenr)
4511 {
4512 struct btrfs_delayed_ref_head *head;
4513 struct btrfs_delayed_ref_root *delayed_refs;
4514 struct btrfs_delayed_ref_node *ref;
4515 struct rb_node *node;
4516 int ret = 0;
4517
4518 delayed_refs = &trans->transaction->delayed_refs;
4519 spin_lock(&delayed_refs->lock);
4520 head = btrfs_find_delayed_ref_head(trans, bytenr);
4521 if (!head)
4522 goto out;
4523
4524 node = rb_prev(&head->node.rb_node);
4525 if (!node)
4526 goto out;
4527
4528 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4529
4530 /* there are still entries for this ref, we can't drop it */
4531 if (ref->bytenr == bytenr)
4532 goto out;
4533
4534 if (head->extent_op) {
4535 if (!head->must_insert_reserved)
4536 goto out;
4537 kfree(head->extent_op);
4538 head->extent_op = NULL;
4539 }
4540
4541 /*
4542 * waiting for the lock here would deadlock. If someone else has it
4543 * locked they are already in the process of dropping it anyway
4544 */
4545 if (!mutex_trylock(&head->mutex))
4546 goto out;
4547
4548 /*
4549 * at this point we have a head with no other entries. Go
4550 * ahead and process it.
4551 */
4552 head->node.in_tree = 0;
4553 rb_erase(&head->node.rb_node, &delayed_refs->root);
4554
4555 delayed_refs->num_entries--;
4556
4557 /*
4558 * we don't take a ref on the node because we're removing it from the
4559 * tree, so we just steal the ref the tree was holding.
4560 */
4561 delayed_refs->num_heads--;
4562 if (list_empty(&head->cluster))
4563 delayed_refs->num_heads_ready--;
4564
4565 list_del_init(&head->cluster);
4566 spin_unlock(&delayed_refs->lock);
4567
4568 BUG_ON(head->extent_op);
4569 if (head->must_insert_reserved)
4570 ret = 1;
4571
4572 mutex_unlock(&head->mutex);
4573 btrfs_put_delayed_ref(&head->node);
4574 return ret;
4575 out:
4576 spin_unlock(&delayed_refs->lock);
4577 return 0;
4578 }
4579
4580 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4581 struct btrfs_root *root,
4582 struct extent_buffer *buf,
4583 u64 parent, int last_ref)
4584 {
4585 struct btrfs_block_rsv *block_rsv;
4586 struct btrfs_block_group_cache *cache = NULL;
4587 int ret;
4588
4589 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4590 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4591 parent, root->root_key.objectid,
4592 btrfs_header_level(buf),
4593 BTRFS_DROP_DELAYED_REF, NULL);
4594 BUG_ON(ret);
4595 }
4596
4597 if (!last_ref)
4598 return;
4599
4600 block_rsv = get_block_rsv(trans, root);
4601 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4602 if (block_rsv->space_info != cache->space_info)
4603 goto out;
4604
4605 if (btrfs_header_generation(buf) == trans->transid) {
4606 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4607 ret = check_ref_cleanup(trans, root, buf->start);
4608 if (!ret)
4609 goto pin;
4610 }
4611
4612 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4613 pin_down_extent(root, cache, buf->start, buf->len, 1);
4614 goto pin;
4615 }
4616
4617 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4618
4619 btrfs_add_free_space(cache, buf->start, buf->len);
4620 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4621 if (ret == -EAGAIN) {
4622 /* block group became read-only */
4623 update_reserved_bytes(cache, buf->len, 0, 1);
4624 goto out;
4625 }
4626
4627 ret = 1;
4628 spin_lock(&block_rsv->lock);
4629 if (block_rsv->reserved < block_rsv->size) {
4630 block_rsv->reserved += buf->len;
4631 ret = 0;
4632 }
4633 spin_unlock(&block_rsv->lock);
4634
4635 if (ret) {
4636 spin_lock(&cache->space_info->lock);
4637 cache->space_info->bytes_reserved -= buf->len;
4638 spin_unlock(&cache->space_info->lock);
4639 }
4640 goto out;
4641 }
4642 pin:
4643 if (block_rsv->durable && !cache->ro) {
4644 ret = 0;
4645 spin_lock(&cache->lock);
4646 if (!cache->ro) {
4647 cache->reserved_pinned += buf->len;
4648 ret = 1;
4649 }
4650 spin_unlock(&cache->lock);
4651
4652 if (ret) {
4653 spin_lock(&block_rsv->lock);
4654 block_rsv->freed[trans->transid & 0x1] += buf->len;
4655 spin_unlock(&block_rsv->lock);
4656 }
4657 }
4658 out:
4659 btrfs_put_block_group(cache);
4660 }
4661
4662 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4663 struct btrfs_root *root,
4664 u64 bytenr, u64 num_bytes, u64 parent,
4665 u64 root_objectid, u64 owner, u64 offset)
4666 {
4667 int ret;
4668
4669 /*
4670 * tree log blocks never actually go into the extent allocation
4671 * tree, just update pinning info and exit early.
4672 */
4673 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4674 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4675 /* unlocks the pinned mutex */
4676 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4677 ret = 0;
4678 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4679 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4680 parent, root_objectid, (int)owner,
4681 BTRFS_DROP_DELAYED_REF, NULL);
4682 BUG_ON(ret);
4683 } else {
4684 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4685 parent, root_objectid, owner,
4686 offset, BTRFS_DROP_DELAYED_REF, NULL);
4687 BUG_ON(ret);
4688 }
4689 return ret;
4690 }
4691
4692 static u64 stripe_align(struct btrfs_root *root, u64 val)
4693 {
4694 u64 mask = ((u64)root->stripesize - 1);
4695 u64 ret = (val + mask) & ~mask;
4696 return ret;
4697 }
4698
4699 /*
4700 * when we wait for progress in the block group caching, its because
4701 * our allocation attempt failed at least once. So, we must sleep
4702 * and let some progress happen before we try again.
4703 *
4704 * This function will sleep at least once waiting for new free space to
4705 * show up, and then it will check the block group free space numbers
4706 * for our min num_bytes. Another option is to have it go ahead
4707 * and look in the rbtree for a free extent of a given size, but this
4708 * is a good start.
4709 */
4710 static noinline int
4711 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4712 u64 num_bytes)
4713 {
4714 struct btrfs_caching_control *caching_ctl;
4715 DEFINE_WAIT(wait);
4716
4717 caching_ctl = get_caching_control(cache);
4718 if (!caching_ctl)
4719 return 0;
4720
4721 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4722 (cache->free_space >= num_bytes));
4723
4724 put_caching_control(caching_ctl);
4725 return 0;
4726 }
4727
4728 static noinline int
4729 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4730 {
4731 struct btrfs_caching_control *caching_ctl;
4732 DEFINE_WAIT(wait);
4733
4734 caching_ctl = get_caching_control(cache);
4735 if (!caching_ctl)
4736 return 0;
4737
4738 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4739
4740 put_caching_control(caching_ctl);
4741 return 0;
4742 }
4743
4744 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4745 {
4746 int index;
4747 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4748 index = 0;
4749 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4750 index = 1;
4751 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4752 index = 2;
4753 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4754 index = 3;
4755 else
4756 index = 4;
4757 return index;
4758 }
4759
4760 enum btrfs_loop_type {
4761 LOOP_FIND_IDEAL = 0,
4762 LOOP_CACHING_NOWAIT = 1,
4763 LOOP_CACHING_WAIT = 2,
4764 LOOP_ALLOC_CHUNK = 3,
4765 LOOP_NO_EMPTY_SIZE = 4,
4766 };
4767
4768 /*
4769 * walks the btree of allocated extents and find a hole of a given size.
4770 * The key ins is changed to record the hole:
4771 * ins->objectid == block start
4772 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4773 * ins->offset == number of blocks
4774 * Any available blocks before search_start are skipped.
4775 */
4776 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4777 struct btrfs_root *orig_root,
4778 u64 num_bytes, u64 empty_size,
4779 u64 search_start, u64 search_end,
4780 u64 hint_byte, struct btrfs_key *ins,
4781 int data)
4782 {
4783 int ret = 0;
4784 struct btrfs_root *root = orig_root->fs_info->extent_root;
4785 struct btrfs_free_cluster *last_ptr = NULL;
4786 struct btrfs_block_group_cache *block_group = NULL;
4787 int empty_cluster = 2 * 1024 * 1024;
4788 int allowed_chunk_alloc = 0;
4789 int done_chunk_alloc = 0;
4790 struct btrfs_space_info *space_info;
4791 int last_ptr_loop = 0;
4792 int loop = 0;
4793 int index = 0;
4794 bool found_uncached_bg = false;
4795 bool failed_cluster_refill = false;
4796 bool failed_alloc = false;
4797 bool use_cluster = true;
4798 u64 ideal_cache_percent = 0;
4799 u64 ideal_cache_offset = 0;
4800
4801 WARN_ON(num_bytes < root->sectorsize);
4802 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4803 ins->objectid = 0;
4804 ins->offset = 0;
4805
4806 space_info = __find_space_info(root->fs_info, data);
4807 if (!space_info) {
4808 printk(KERN_ERR "No space info for %d\n", data);
4809 return -ENOSPC;
4810 }
4811
4812 /*
4813 * If the space info is for both data and metadata it means we have a
4814 * small filesystem and we can't use the clustering stuff.
4815 */
4816 if (btrfs_mixed_space_info(space_info))
4817 use_cluster = false;
4818
4819 if (orig_root->ref_cows || empty_size)
4820 allowed_chunk_alloc = 1;
4821
4822 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4823 last_ptr = &root->fs_info->meta_alloc_cluster;
4824 if (!btrfs_test_opt(root, SSD))
4825 empty_cluster = 64 * 1024;
4826 }
4827
4828 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4829 btrfs_test_opt(root, SSD)) {
4830 last_ptr = &root->fs_info->data_alloc_cluster;
4831 }
4832
4833 if (last_ptr) {
4834 spin_lock(&last_ptr->lock);
4835 if (last_ptr->block_group)
4836 hint_byte = last_ptr->window_start;
4837 spin_unlock(&last_ptr->lock);
4838 }
4839
4840 search_start = max(search_start, first_logical_byte(root, 0));
4841 search_start = max(search_start, hint_byte);
4842
4843 if (!last_ptr)
4844 empty_cluster = 0;
4845
4846 if (search_start == hint_byte) {
4847 ideal_cache:
4848 block_group = btrfs_lookup_block_group(root->fs_info,
4849 search_start);
4850 /*
4851 * we don't want to use the block group if it doesn't match our
4852 * allocation bits, or if its not cached.
4853 *
4854 * However if we are re-searching with an ideal block group
4855 * picked out then we don't care that the block group is cached.
4856 */
4857 if (block_group && block_group_bits(block_group, data) &&
4858 (block_group->cached != BTRFS_CACHE_NO ||
4859 search_start == ideal_cache_offset)) {
4860 down_read(&space_info->groups_sem);
4861 if (list_empty(&block_group->list) ||
4862 block_group->ro) {
4863 /*
4864 * someone is removing this block group,
4865 * we can't jump into the have_block_group
4866 * target because our list pointers are not
4867 * valid
4868 */
4869 btrfs_put_block_group(block_group);
4870 up_read(&space_info->groups_sem);
4871 } else {
4872 index = get_block_group_index(block_group);
4873 goto have_block_group;
4874 }
4875 } else if (block_group) {
4876 btrfs_put_block_group(block_group);
4877 }
4878 }
4879 search:
4880 down_read(&space_info->groups_sem);
4881 list_for_each_entry(block_group, &space_info->block_groups[index],
4882 list) {
4883 u64 offset;
4884 int cached;
4885
4886 btrfs_get_block_group(block_group);
4887 search_start = block_group->key.objectid;
4888
4889 have_block_group:
4890 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4891 u64 free_percent;
4892
4893 ret = cache_block_group(block_group, trans, 1);
4894 if (block_group->cached == BTRFS_CACHE_FINISHED)
4895 goto have_block_group;
4896
4897 free_percent = btrfs_block_group_used(&block_group->item);
4898 free_percent *= 100;
4899 free_percent = div64_u64(free_percent,
4900 block_group->key.offset);
4901 free_percent = 100 - free_percent;
4902 if (free_percent > ideal_cache_percent &&
4903 likely(!block_group->ro)) {
4904 ideal_cache_offset = block_group->key.objectid;
4905 ideal_cache_percent = free_percent;
4906 }
4907
4908 /*
4909 * We only want to start kthread caching if we are at
4910 * the point where we will wait for caching to make
4911 * progress, or if our ideal search is over and we've
4912 * found somebody to start caching.
4913 */
4914 if (loop > LOOP_CACHING_NOWAIT ||
4915 (loop > LOOP_FIND_IDEAL &&
4916 atomic_read(&space_info->caching_threads) < 2)) {
4917 ret = cache_block_group(block_group, trans, 0);
4918 BUG_ON(ret);
4919 }
4920 found_uncached_bg = true;
4921
4922 /*
4923 * If loop is set for cached only, try the next block
4924 * group.
4925 */
4926 if (loop == LOOP_FIND_IDEAL)
4927 goto loop;
4928 }
4929
4930 cached = block_group_cache_done(block_group);
4931 if (unlikely(!cached))
4932 found_uncached_bg = true;
4933
4934 if (unlikely(block_group->ro))
4935 goto loop;
4936
4937 /*
4938 * Ok we want to try and use the cluster allocator, so lets look
4939 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4940 * have tried the cluster allocator plenty of times at this
4941 * point and not have found anything, so we are likely way too
4942 * fragmented for the clustering stuff to find anything, so lets
4943 * just skip it and let the allocator find whatever block it can
4944 * find
4945 */
4946 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4947 /*
4948 * the refill lock keeps out other
4949 * people trying to start a new cluster
4950 */
4951 spin_lock(&last_ptr->refill_lock);
4952 if (last_ptr->block_group &&
4953 (last_ptr->block_group->ro ||
4954 !block_group_bits(last_ptr->block_group, data))) {
4955 offset = 0;
4956 goto refill_cluster;
4957 }
4958
4959 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4960 num_bytes, search_start);
4961 if (offset) {
4962 /* we have a block, we're done */
4963 spin_unlock(&last_ptr->refill_lock);
4964 goto checks;
4965 }
4966
4967 spin_lock(&last_ptr->lock);
4968 /*
4969 * whoops, this cluster doesn't actually point to
4970 * this block group. Get a ref on the block
4971 * group is does point to and try again
4972 */
4973 if (!last_ptr_loop && last_ptr->block_group &&
4974 last_ptr->block_group != block_group) {
4975
4976 btrfs_put_block_group(block_group);
4977 block_group = last_ptr->block_group;
4978 btrfs_get_block_group(block_group);
4979 spin_unlock(&last_ptr->lock);
4980 spin_unlock(&last_ptr->refill_lock);
4981
4982 last_ptr_loop = 1;
4983 search_start = block_group->key.objectid;
4984 /*
4985 * we know this block group is properly
4986 * in the list because
4987 * btrfs_remove_block_group, drops the
4988 * cluster before it removes the block
4989 * group from the list
4990 */
4991 goto have_block_group;
4992 }
4993 spin_unlock(&last_ptr->lock);
4994 refill_cluster:
4995 /*
4996 * this cluster didn't work out, free it and
4997 * start over
4998 */
4999 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5000
5001 last_ptr_loop = 0;
5002
5003 /* allocate a cluster in this block group */
5004 ret = btrfs_find_space_cluster(trans, root,
5005 block_group, last_ptr,
5006 offset, num_bytes,
5007 empty_cluster + empty_size);
5008 if (ret == 0) {
5009 /*
5010 * now pull our allocation out of this
5011 * cluster
5012 */
5013 offset = btrfs_alloc_from_cluster(block_group,
5014 last_ptr, num_bytes,
5015 search_start);
5016 if (offset) {
5017 /* we found one, proceed */
5018 spin_unlock(&last_ptr->refill_lock);
5019 goto checks;
5020 }
5021 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5022 && !failed_cluster_refill) {
5023 spin_unlock(&last_ptr->refill_lock);
5024
5025 failed_cluster_refill = true;
5026 wait_block_group_cache_progress(block_group,
5027 num_bytes + empty_cluster + empty_size);
5028 goto have_block_group;
5029 }
5030
5031 /*
5032 * at this point we either didn't find a cluster
5033 * or we weren't able to allocate a block from our
5034 * cluster. Free the cluster we've been trying
5035 * to use, and go to the next block group
5036 */
5037 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5038 spin_unlock(&last_ptr->refill_lock);
5039 goto loop;
5040 }
5041
5042 offset = btrfs_find_space_for_alloc(block_group, search_start,
5043 num_bytes, empty_size);
5044 /*
5045 * If we didn't find a chunk, and we haven't failed on this
5046 * block group before, and this block group is in the middle of
5047 * caching and we are ok with waiting, then go ahead and wait
5048 * for progress to be made, and set failed_alloc to true.
5049 *
5050 * If failed_alloc is true then we've already waited on this
5051 * block group once and should move on to the next block group.
5052 */
5053 if (!offset && !failed_alloc && !cached &&
5054 loop > LOOP_CACHING_NOWAIT) {
5055 wait_block_group_cache_progress(block_group,
5056 num_bytes + empty_size);
5057 failed_alloc = true;
5058 goto have_block_group;
5059 } else if (!offset) {
5060 goto loop;
5061 }
5062 checks:
5063 search_start = stripe_align(root, offset);
5064 /* move on to the next group */
5065 if (search_start + num_bytes >= search_end) {
5066 btrfs_add_free_space(block_group, offset, num_bytes);
5067 goto loop;
5068 }
5069
5070 /* move on to the next group */
5071 if (search_start + num_bytes >
5072 block_group->key.objectid + block_group->key.offset) {
5073 btrfs_add_free_space(block_group, offset, num_bytes);
5074 goto loop;
5075 }
5076
5077 ins->objectid = search_start;
5078 ins->offset = num_bytes;
5079
5080 if (offset < search_start)
5081 btrfs_add_free_space(block_group, offset,
5082 search_start - offset);
5083 BUG_ON(offset > search_start);
5084
5085 ret = update_reserved_bytes(block_group, num_bytes, 1,
5086 (data & BTRFS_BLOCK_GROUP_DATA));
5087 if (ret == -EAGAIN) {
5088 btrfs_add_free_space(block_group, offset, num_bytes);
5089 goto loop;
5090 }
5091
5092 /* we are all good, lets return */
5093 ins->objectid = search_start;
5094 ins->offset = num_bytes;
5095
5096 if (offset < search_start)
5097 btrfs_add_free_space(block_group, offset,
5098 search_start - offset);
5099 BUG_ON(offset > search_start);
5100 break;
5101 loop:
5102 failed_cluster_refill = false;
5103 failed_alloc = false;
5104 BUG_ON(index != get_block_group_index(block_group));
5105 btrfs_put_block_group(block_group);
5106 }
5107 up_read(&space_info->groups_sem);
5108
5109 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5110 goto search;
5111
5112 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5113 * for them to make caching progress. Also
5114 * determine the best possible bg to cache
5115 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5116 * caching kthreads as we move along
5117 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5118 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5119 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5120 * again
5121 */
5122 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5123 (found_uncached_bg || empty_size || empty_cluster ||
5124 allowed_chunk_alloc)) {
5125 index = 0;
5126 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5127 found_uncached_bg = false;
5128 loop++;
5129 if (!ideal_cache_percent &&
5130 atomic_read(&space_info->caching_threads))
5131 goto search;
5132
5133 /*
5134 * 1 of the following 2 things have happened so far
5135 *
5136 * 1) We found an ideal block group for caching that
5137 * is mostly full and will cache quickly, so we might
5138 * as well wait for it.
5139 *
5140 * 2) We searched for cached only and we didn't find
5141 * anything, and we didn't start any caching kthreads
5142 * either, so chances are we will loop through and
5143 * start a couple caching kthreads, and then come back
5144 * around and just wait for them. This will be slower
5145 * because we will have 2 caching kthreads reading at
5146 * the same time when we could have just started one
5147 * and waited for it to get far enough to give us an
5148 * allocation, so go ahead and go to the wait caching
5149 * loop.
5150 */
5151 loop = LOOP_CACHING_WAIT;
5152 search_start = ideal_cache_offset;
5153 ideal_cache_percent = 0;
5154 goto ideal_cache;
5155 } else if (loop == LOOP_FIND_IDEAL) {
5156 /*
5157 * Didn't find a uncached bg, wait on anything we find
5158 * next.
5159 */
5160 loop = LOOP_CACHING_WAIT;
5161 goto search;
5162 }
5163
5164 if (loop < LOOP_CACHING_WAIT) {
5165 loop++;
5166 goto search;
5167 }
5168
5169 if (loop == LOOP_ALLOC_CHUNK) {
5170 empty_size = 0;
5171 empty_cluster = 0;
5172 }
5173
5174 if (allowed_chunk_alloc) {
5175 ret = do_chunk_alloc(trans, root, num_bytes +
5176 2 * 1024 * 1024, data, 1);
5177 allowed_chunk_alloc = 0;
5178 done_chunk_alloc = 1;
5179 } else if (!done_chunk_alloc) {
5180 space_info->force_alloc = 1;
5181 }
5182
5183 if (loop < LOOP_NO_EMPTY_SIZE) {
5184 loop++;
5185 goto search;
5186 }
5187 ret = -ENOSPC;
5188 } else if (!ins->objectid) {
5189 ret = -ENOSPC;
5190 }
5191
5192 /* we found what we needed */
5193 if (ins->objectid) {
5194 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5195 trans->block_group = block_group->key.objectid;
5196
5197 btrfs_put_block_group(block_group);
5198 ret = 0;
5199 }
5200
5201 return ret;
5202 }
5203
5204 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5205 int dump_block_groups)
5206 {
5207 struct btrfs_block_group_cache *cache;
5208 int index = 0;
5209
5210 spin_lock(&info->lock);
5211 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5212 (unsigned long long)(info->total_bytes - info->bytes_used -
5213 info->bytes_pinned - info->bytes_reserved -
5214 info->bytes_readonly),
5215 (info->full) ? "" : "not ");
5216 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5217 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5218 (unsigned long long)info->total_bytes,
5219 (unsigned long long)info->bytes_used,
5220 (unsigned long long)info->bytes_pinned,
5221 (unsigned long long)info->bytes_reserved,
5222 (unsigned long long)info->bytes_may_use,
5223 (unsigned long long)info->bytes_readonly);
5224 spin_unlock(&info->lock);
5225
5226 if (!dump_block_groups)
5227 return;
5228
5229 down_read(&info->groups_sem);
5230 again:
5231 list_for_each_entry(cache, &info->block_groups[index], list) {
5232 spin_lock(&cache->lock);
5233 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5234 "%llu pinned %llu reserved\n",
5235 (unsigned long long)cache->key.objectid,
5236 (unsigned long long)cache->key.offset,
5237 (unsigned long long)btrfs_block_group_used(&cache->item),
5238 (unsigned long long)cache->pinned,
5239 (unsigned long long)cache->reserved);
5240 btrfs_dump_free_space(cache, bytes);
5241 spin_unlock(&cache->lock);
5242 }
5243 if (++index < BTRFS_NR_RAID_TYPES)
5244 goto again;
5245 up_read(&info->groups_sem);
5246 }
5247
5248 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5249 struct btrfs_root *root,
5250 u64 num_bytes, u64 min_alloc_size,
5251 u64 empty_size, u64 hint_byte,
5252 u64 search_end, struct btrfs_key *ins,
5253 u64 data)
5254 {
5255 int ret;
5256 u64 search_start = 0;
5257
5258 data = btrfs_get_alloc_profile(root, data);
5259 again:
5260 /*
5261 * the only place that sets empty_size is btrfs_realloc_node, which
5262 * is not called recursively on allocations
5263 */
5264 if (empty_size || root->ref_cows)
5265 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5266 num_bytes + 2 * 1024 * 1024, data, 0);
5267
5268 WARN_ON(num_bytes < root->sectorsize);
5269 ret = find_free_extent(trans, root, num_bytes, empty_size,
5270 search_start, search_end, hint_byte,
5271 ins, data);
5272
5273 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5274 num_bytes = num_bytes >> 1;
5275 num_bytes = num_bytes & ~(root->sectorsize - 1);
5276 num_bytes = max(num_bytes, min_alloc_size);
5277 do_chunk_alloc(trans, root->fs_info->extent_root,
5278 num_bytes, data, 1);
5279 goto again;
5280 }
5281 if (ret == -ENOSPC) {
5282 struct btrfs_space_info *sinfo;
5283
5284 sinfo = __find_space_info(root->fs_info, data);
5285 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5286 "wanted %llu\n", (unsigned long long)data,
5287 (unsigned long long)num_bytes);
5288 dump_space_info(sinfo, num_bytes, 1);
5289 }
5290
5291 return ret;
5292 }
5293
5294 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5295 {
5296 struct btrfs_block_group_cache *cache;
5297 int ret = 0;
5298
5299 cache = btrfs_lookup_block_group(root->fs_info, start);
5300 if (!cache) {
5301 printk(KERN_ERR "Unable to find block group for %llu\n",
5302 (unsigned long long)start);
5303 return -ENOSPC;
5304 }
5305
5306 ret = btrfs_discard_extent(root, start, len);
5307
5308 btrfs_add_free_space(cache, start, len);
5309 update_reserved_bytes(cache, len, 0, 1);
5310 btrfs_put_block_group(cache);
5311
5312 return ret;
5313 }
5314
5315 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5316 struct btrfs_root *root,
5317 u64 parent, u64 root_objectid,
5318 u64 flags, u64 owner, u64 offset,
5319 struct btrfs_key *ins, int ref_mod)
5320 {
5321 int ret;
5322 struct btrfs_fs_info *fs_info = root->fs_info;
5323 struct btrfs_extent_item *extent_item;
5324 struct btrfs_extent_inline_ref *iref;
5325 struct btrfs_path *path;
5326 struct extent_buffer *leaf;
5327 int type;
5328 u32 size;
5329
5330 if (parent > 0)
5331 type = BTRFS_SHARED_DATA_REF_KEY;
5332 else
5333 type = BTRFS_EXTENT_DATA_REF_KEY;
5334
5335 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5336
5337 path = btrfs_alloc_path();
5338 BUG_ON(!path);
5339
5340 path->leave_spinning = 1;
5341 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5342 ins, size);
5343 BUG_ON(ret);
5344
5345 leaf = path->nodes[0];
5346 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5347 struct btrfs_extent_item);
5348 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5349 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5350 btrfs_set_extent_flags(leaf, extent_item,
5351 flags | BTRFS_EXTENT_FLAG_DATA);
5352
5353 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5354 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5355 if (parent > 0) {
5356 struct btrfs_shared_data_ref *ref;
5357 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5358 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5359 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5360 } else {
5361 struct btrfs_extent_data_ref *ref;
5362 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5363 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5364 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5365 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5366 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5367 }
5368
5369 btrfs_mark_buffer_dirty(path->nodes[0]);
5370 btrfs_free_path(path);
5371
5372 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5373 if (ret) {
5374 printk(KERN_ERR "btrfs update block group failed for %llu "
5375 "%llu\n", (unsigned long long)ins->objectid,
5376 (unsigned long long)ins->offset);
5377 BUG();
5378 }
5379 return ret;
5380 }
5381
5382 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5383 struct btrfs_root *root,
5384 u64 parent, u64 root_objectid,
5385 u64 flags, struct btrfs_disk_key *key,
5386 int level, struct btrfs_key *ins)
5387 {
5388 int ret;
5389 struct btrfs_fs_info *fs_info = root->fs_info;
5390 struct btrfs_extent_item *extent_item;
5391 struct btrfs_tree_block_info *block_info;
5392 struct btrfs_extent_inline_ref *iref;
5393 struct btrfs_path *path;
5394 struct extent_buffer *leaf;
5395 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5396
5397 path = btrfs_alloc_path();
5398 BUG_ON(!path);
5399
5400 path->leave_spinning = 1;
5401 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5402 ins, size);
5403 BUG_ON(ret);
5404
5405 leaf = path->nodes[0];
5406 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5407 struct btrfs_extent_item);
5408 btrfs_set_extent_refs(leaf, extent_item, 1);
5409 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5410 btrfs_set_extent_flags(leaf, extent_item,
5411 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5412 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5413
5414 btrfs_set_tree_block_key(leaf, block_info, key);
5415 btrfs_set_tree_block_level(leaf, block_info, level);
5416
5417 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5418 if (parent > 0) {
5419 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5420 btrfs_set_extent_inline_ref_type(leaf, iref,
5421 BTRFS_SHARED_BLOCK_REF_KEY);
5422 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5423 } else {
5424 btrfs_set_extent_inline_ref_type(leaf, iref,
5425 BTRFS_TREE_BLOCK_REF_KEY);
5426 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5427 }
5428
5429 btrfs_mark_buffer_dirty(leaf);
5430 btrfs_free_path(path);
5431
5432 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5433 if (ret) {
5434 printk(KERN_ERR "btrfs update block group failed for %llu "
5435 "%llu\n", (unsigned long long)ins->objectid,
5436 (unsigned long long)ins->offset);
5437 BUG();
5438 }
5439 return ret;
5440 }
5441
5442 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5443 struct btrfs_root *root,
5444 u64 root_objectid, u64 owner,
5445 u64 offset, struct btrfs_key *ins)
5446 {
5447 int ret;
5448
5449 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5450
5451 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5452 0, root_objectid, owner, offset,
5453 BTRFS_ADD_DELAYED_EXTENT, NULL);
5454 return ret;
5455 }
5456
5457 /*
5458 * this is used by the tree logging recovery code. It records that
5459 * an extent has been allocated and makes sure to clear the free
5460 * space cache bits as well
5461 */
5462 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5463 struct btrfs_root *root,
5464 u64 root_objectid, u64 owner, u64 offset,
5465 struct btrfs_key *ins)
5466 {
5467 int ret;
5468 struct btrfs_block_group_cache *block_group;
5469 struct btrfs_caching_control *caching_ctl;
5470 u64 start = ins->objectid;
5471 u64 num_bytes = ins->offset;
5472
5473 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5474 cache_block_group(block_group, trans, 0);
5475 caching_ctl = get_caching_control(block_group);
5476
5477 if (!caching_ctl) {
5478 BUG_ON(!block_group_cache_done(block_group));
5479 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5480 BUG_ON(ret);
5481 } else {
5482 mutex_lock(&caching_ctl->mutex);
5483
5484 if (start >= caching_ctl->progress) {
5485 ret = add_excluded_extent(root, start, num_bytes);
5486 BUG_ON(ret);
5487 } else if (start + num_bytes <= caching_ctl->progress) {
5488 ret = btrfs_remove_free_space(block_group,
5489 start, num_bytes);
5490 BUG_ON(ret);
5491 } else {
5492 num_bytes = caching_ctl->progress - start;
5493 ret = btrfs_remove_free_space(block_group,
5494 start, num_bytes);
5495 BUG_ON(ret);
5496
5497 start = caching_ctl->progress;
5498 num_bytes = ins->objectid + ins->offset -
5499 caching_ctl->progress;
5500 ret = add_excluded_extent(root, start, num_bytes);
5501 BUG_ON(ret);
5502 }
5503
5504 mutex_unlock(&caching_ctl->mutex);
5505 put_caching_control(caching_ctl);
5506 }
5507
5508 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5509 BUG_ON(ret);
5510 btrfs_put_block_group(block_group);
5511 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5512 0, owner, offset, ins, 1);
5513 return ret;
5514 }
5515
5516 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5517 struct btrfs_root *root,
5518 u64 bytenr, u32 blocksize,
5519 int level)
5520 {
5521 struct extent_buffer *buf;
5522
5523 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5524 if (!buf)
5525 return ERR_PTR(-ENOMEM);
5526 btrfs_set_header_generation(buf, trans->transid);
5527 btrfs_set_buffer_lockdep_class(buf, level);
5528 btrfs_tree_lock(buf);
5529 clean_tree_block(trans, root, buf);
5530
5531 btrfs_set_lock_blocking(buf);
5532 btrfs_set_buffer_uptodate(buf);
5533
5534 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5535 /*
5536 * we allow two log transactions at a time, use different
5537 * EXENT bit to differentiate dirty pages.
5538 */
5539 if (root->log_transid % 2 == 0)
5540 set_extent_dirty(&root->dirty_log_pages, buf->start,
5541 buf->start + buf->len - 1, GFP_NOFS);
5542 else
5543 set_extent_new(&root->dirty_log_pages, buf->start,
5544 buf->start + buf->len - 1, GFP_NOFS);
5545 } else {
5546 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5547 buf->start + buf->len - 1, GFP_NOFS);
5548 }
5549 trans->blocks_used++;
5550 /* this returns a buffer locked for blocking */
5551 return buf;
5552 }
5553
5554 static struct btrfs_block_rsv *
5555 use_block_rsv(struct btrfs_trans_handle *trans,
5556 struct btrfs_root *root, u32 blocksize)
5557 {
5558 struct btrfs_block_rsv *block_rsv;
5559 int ret;
5560
5561 block_rsv = get_block_rsv(trans, root);
5562
5563 if (block_rsv->size == 0) {
5564 ret = reserve_metadata_bytes(block_rsv, blocksize);
5565 if (ret)
5566 return ERR_PTR(ret);
5567 return block_rsv;
5568 }
5569
5570 ret = block_rsv_use_bytes(block_rsv, blocksize);
5571 if (!ret)
5572 return block_rsv;
5573
5574 WARN_ON(1);
5575 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
5576 block_rsv->size, block_rsv->reserved,
5577 block_rsv->freed[0], block_rsv->freed[1]);
5578
5579 return ERR_PTR(-ENOSPC);
5580 }
5581
5582 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5583 {
5584 block_rsv_add_bytes(block_rsv, blocksize, 0);
5585 block_rsv_release_bytes(block_rsv, NULL, 0);
5586 }
5587
5588 /*
5589 * finds a free extent and does all the dirty work required for allocation
5590 * returns the key for the extent through ins, and a tree buffer for
5591 * the first block of the extent through buf.
5592 *
5593 * returns the tree buffer or NULL.
5594 */
5595 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5596 struct btrfs_root *root, u32 blocksize,
5597 u64 parent, u64 root_objectid,
5598 struct btrfs_disk_key *key, int level,
5599 u64 hint, u64 empty_size)
5600 {
5601 struct btrfs_key ins;
5602 struct btrfs_block_rsv *block_rsv;
5603 struct extent_buffer *buf;
5604 u64 flags = 0;
5605 int ret;
5606
5607
5608 block_rsv = use_block_rsv(trans, root, blocksize);
5609 if (IS_ERR(block_rsv))
5610 return ERR_CAST(block_rsv);
5611
5612 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5613 empty_size, hint, (u64)-1, &ins, 0);
5614 if (ret) {
5615 unuse_block_rsv(block_rsv, blocksize);
5616 return ERR_PTR(ret);
5617 }
5618
5619 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5620 blocksize, level);
5621 BUG_ON(IS_ERR(buf));
5622
5623 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5624 if (parent == 0)
5625 parent = ins.objectid;
5626 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5627 } else
5628 BUG_ON(parent > 0);
5629
5630 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5631 struct btrfs_delayed_extent_op *extent_op;
5632 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5633 BUG_ON(!extent_op);
5634 if (key)
5635 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5636 else
5637 memset(&extent_op->key, 0, sizeof(extent_op->key));
5638 extent_op->flags_to_set = flags;
5639 extent_op->update_key = 1;
5640 extent_op->update_flags = 1;
5641 extent_op->is_data = 0;
5642
5643 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5644 ins.offset, parent, root_objectid,
5645 level, BTRFS_ADD_DELAYED_EXTENT,
5646 extent_op);
5647 BUG_ON(ret);
5648 }
5649 return buf;
5650 }
5651
5652 struct walk_control {
5653 u64 refs[BTRFS_MAX_LEVEL];
5654 u64 flags[BTRFS_MAX_LEVEL];
5655 struct btrfs_key update_progress;
5656 int stage;
5657 int level;
5658 int shared_level;
5659 int update_ref;
5660 int keep_locks;
5661 int reada_slot;
5662 int reada_count;
5663 };
5664
5665 #define DROP_REFERENCE 1
5666 #define UPDATE_BACKREF 2
5667
5668 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5669 struct btrfs_root *root,
5670 struct walk_control *wc,
5671 struct btrfs_path *path)
5672 {
5673 u64 bytenr;
5674 u64 generation;
5675 u64 refs;
5676 u64 flags;
5677 u64 last = 0;
5678 u32 nritems;
5679 u32 blocksize;
5680 struct btrfs_key key;
5681 struct extent_buffer *eb;
5682 int ret;
5683 int slot;
5684 int nread = 0;
5685
5686 if (path->slots[wc->level] < wc->reada_slot) {
5687 wc->reada_count = wc->reada_count * 2 / 3;
5688 wc->reada_count = max(wc->reada_count, 2);
5689 } else {
5690 wc->reada_count = wc->reada_count * 3 / 2;
5691 wc->reada_count = min_t(int, wc->reada_count,
5692 BTRFS_NODEPTRS_PER_BLOCK(root));
5693 }
5694
5695 eb = path->nodes[wc->level];
5696 nritems = btrfs_header_nritems(eb);
5697 blocksize = btrfs_level_size(root, wc->level - 1);
5698
5699 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5700 if (nread >= wc->reada_count)
5701 break;
5702
5703 cond_resched();
5704 bytenr = btrfs_node_blockptr(eb, slot);
5705 generation = btrfs_node_ptr_generation(eb, slot);
5706
5707 if (slot == path->slots[wc->level])
5708 goto reada;
5709
5710 if (wc->stage == UPDATE_BACKREF &&
5711 generation <= root->root_key.offset)
5712 continue;
5713
5714 /* We don't lock the tree block, it's OK to be racy here */
5715 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5716 &refs, &flags);
5717 BUG_ON(ret);
5718 BUG_ON(refs == 0);
5719
5720 if (wc->stage == DROP_REFERENCE) {
5721 if (refs == 1)
5722 goto reada;
5723
5724 if (wc->level == 1 &&
5725 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5726 continue;
5727 if (!wc->update_ref ||
5728 generation <= root->root_key.offset)
5729 continue;
5730 btrfs_node_key_to_cpu(eb, &key, slot);
5731 ret = btrfs_comp_cpu_keys(&key,
5732 &wc->update_progress);
5733 if (ret < 0)
5734 continue;
5735 } else {
5736 if (wc->level == 1 &&
5737 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5738 continue;
5739 }
5740 reada:
5741 ret = readahead_tree_block(root, bytenr, blocksize,
5742 generation);
5743 if (ret)
5744 break;
5745 last = bytenr + blocksize;
5746 nread++;
5747 }
5748 wc->reada_slot = slot;
5749 }
5750
5751 /*
5752 * hepler to process tree block while walking down the tree.
5753 *
5754 * when wc->stage == UPDATE_BACKREF, this function updates
5755 * back refs for pointers in the block.
5756 *
5757 * NOTE: return value 1 means we should stop walking down.
5758 */
5759 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5760 struct btrfs_root *root,
5761 struct btrfs_path *path,
5762 struct walk_control *wc, int lookup_info)
5763 {
5764 int level = wc->level;
5765 struct extent_buffer *eb = path->nodes[level];
5766 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5767 int ret;
5768
5769 if (wc->stage == UPDATE_BACKREF &&
5770 btrfs_header_owner(eb) != root->root_key.objectid)
5771 return 1;
5772
5773 /*
5774 * when reference count of tree block is 1, it won't increase
5775 * again. once full backref flag is set, we never clear it.
5776 */
5777 if (lookup_info &&
5778 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5779 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5780 BUG_ON(!path->locks[level]);
5781 ret = btrfs_lookup_extent_info(trans, root,
5782 eb->start, eb->len,
5783 &wc->refs[level],
5784 &wc->flags[level]);
5785 BUG_ON(ret);
5786 BUG_ON(wc->refs[level] == 0);
5787 }
5788
5789 if (wc->stage == DROP_REFERENCE) {
5790 if (wc->refs[level] > 1)
5791 return 1;
5792
5793 if (path->locks[level] && !wc->keep_locks) {
5794 btrfs_tree_unlock(eb);
5795 path->locks[level] = 0;
5796 }
5797 return 0;
5798 }
5799
5800 /* wc->stage == UPDATE_BACKREF */
5801 if (!(wc->flags[level] & flag)) {
5802 BUG_ON(!path->locks[level]);
5803 ret = btrfs_inc_ref(trans, root, eb, 1);
5804 BUG_ON(ret);
5805 ret = btrfs_dec_ref(trans, root, eb, 0);
5806 BUG_ON(ret);
5807 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5808 eb->len, flag, 0);
5809 BUG_ON(ret);
5810 wc->flags[level] |= flag;
5811 }
5812
5813 /*
5814 * the block is shared by multiple trees, so it's not good to
5815 * keep the tree lock
5816 */
5817 if (path->locks[level] && level > 0) {
5818 btrfs_tree_unlock(eb);
5819 path->locks[level] = 0;
5820 }
5821 return 0;
5822 }
5823
5824 /*
5825 * hepler to process tree block pointer.
5826 *
5827 * when wc->stage == DROP_REFERENCE, this function checks
5828 * reference count of the block pointed to. if the block
5829 * is shared and we need update back refs for the subtree
5830 * rooted at the block, this function changes wc->stage to
5831 * UPDATE_BACKREF. if the block is shared and there is no
5832 * need to update back, this function drops the reference
5833 * to the block.
5834 *
5835 * NOTE: return value 1 means we should stop walking down.
5836 */
5837 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5838 struct btrfs_root *root,
5839 struct btrfs_path *path,
5840 struct walk_control *wc, int *lookup_info)
5841 {
5842 u64 bytenr;
5843 u64 generation;
5844 u64 parent;
5845 u32 blocksize;
5846 struct btrfs_key key;
5847 struct extent_buffer *next;
5848 int level = wc->level;
5849 int reada = 0;
5850 int ret = 0;
5851
5852 generation = btrfs_node_ptr_generation(path->nodes[level],
5853 path->slots[level]);
5854 /*
5855 * if the lower level block was created before the snapshot
5856 * was created, we know there is no need to update back refs
5857 * for the subtree
5858 */
5859 if (wc->stage == UPDATE_BACKREF &&
5860 generation <= root->root_key.offset) {
5861 *lookup_info = 1;
5862 return 1;
5863 }
5864
5865 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5866 blocksize = btrfs_level_size(root, level - 1);
5867
5868 next = btrfs_find_tree_block(root, bytenr, blocksize);
5869 if (!next) {
5870 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5871 if (!next)
5872 return -ENOMEM;
5873 reada = 1;
5874 }
5875 btrfs_tree_lock(next);
5876 btrfs_set_lock_blocking(next);
5877
5878 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5879 &wc->refs[level - 1],
5880 &wc->flags[level - 1]);
5881 BUG_ON(ret);
5882 BUG_ON(wc->refs[level - 1] == 0);
5883 *lookup_info = 0;
5884
5885 if (wc->stage == DROP_REFERENCE) {
5886 if (wc->refs[level - 1] > 1) {
5887 if (level == 1 &&
5888 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5889 goto skip;
5890
5891 if (!wc->update_ref ||
5892 generation <= root->root_key.offset)
5893 goto skip;
5894
5895 btrfs_node_key_to_cpu(path->nodes[level], &key,
5896 path->slots[level]);
5897 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5898 if (ret < 0)
5899 goto skip;
5900
5901 wc->stage = UPDATE_BACKREF;
5902 wc->shared_level = level - 1;
5903 }
5904 } else {
5905 if (level == 1 &&
5906 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5907 goto skip;
5908 }
5909
5910 if (!btrfs_buffer_uptodate(next, generation)) {
5911 btrfs_tree_unlock(next);
5912 free_extent_buffer(next);
5913 next = NULL;
5914 *lookup_info = 1;
5915 }
5916
5917 if (!next) {
5918 if (reada && level == 1)
5919 reada_walk_down(trans, root, wc, path);
5920 next = read_tree_block(root, bytenr, blocksize, generation);
5921 btrfs_tree_lock(next);
5922 btrfs_set_lock_blocking(next);
5923 }
5924
5925 level--;
5926 BUG_ON(level != btrfs_header_level(next));
5927 path->nodes[level] = next;
5928 path->slots[level] = 0;
5929 path->locks[level] = 1;
5930 wc->level = level;
5931 if (wc->level == 1)
5932 wc->reada_slot = 0;
5933 return 0;
5934 skip:
5935 wc->refs[level - 1] = 0;
5936 wc->flags[level - 1] = 0;
5937 if (wc->stage == DROP_REFERENCE) {
5938 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5939 parent = path->nodes[level]->start;
5940 } else {
5941 BUG_ON(root->root_key.objectid !=
5942 btrfs_header_owner(path->nodes[level]));
5943 parent = 0;
5944 }
5945
5946 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5947 root->root_key.objectid, level - 1, 0);
5948 BUG_ON(ret);
5949 }
5950 btrfs_tree_unlock(next);
5951 free_extent_buffer(next);
5952 *lookup_info = 1;
5953 return 1;
5954 }
5955
5956 /*
5957 * hepler to process tree block while walking up the tree.
5958 *
5959 * when wc->stage == DROP_REFERENCE, this function drops
5960 * reference count on the block.
5961 *
5962 * when wc->stage == UPDATE_BACKREF, this function changes
5963 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5964 * to UPDATE_BACKREF previously while processing the block.
5965 *
5966 * NOTE: return value 1 means we should stop walking up.
5967 */
5968 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5969 struct btrfs_root *root,
5970 struct btrfs_path *path,
5971 struct walk_control *wc)
5972 {
5973 int ret;
5974 int level = wc->level;
5975 struct extent_buffer *eb = path->nodes[level];
5976 u64 parent = 0;
5977
5978 if (wc->stage == UPDATE_BACKREF) {
5979 BUG_ON(wc->shared_level < level);
5980 if (level < wc->shared_level)
5981 goto out;
5982
5983 ret = find_next_key(path, level + 1, &wc->update_progress);
5984 if (ret > 0)
5985 wc->update_ref = 0;
5986
5987 wc->stage = DROP_REFERENCE;
5988 wc->shared_level = -1;
5989 path->slots[level] = 0;
5990
5991 /*
5992 * check reference count again if the block isn't locked.
5993 * we should start walking down the tree again if reference
5994 * count is one.
5995 */
5996 if (!path->locks[level]) {
5997 BUG_ON(level == 0);
5998 btrfs_tree_lock(eb);
5999 btrfs_set_lock_blocking(eb);
6000 path->locks[level] = 1;
6001
6002 ret = btrfs_lookup_extent_info(trans, root,
6003 eb->start, eb->len,
6004 &wc->refs[level],
6005 &wc->flags[level]);
6006 BUG_ON(ret);
6007 BUG_ON(wc->refs[level] == 0);
6008 if (wc->refs[level] == 1) {
6009 btrfs_tree_unlock(eb);
6010 path->locks[level] = 0;
6011 return 1;
6012 }
6013 }
6014 }
6015
6016 /* wc->stage == DROP_REFERENCE */
6017 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6018
6019 if (wc->refs[level] == 1) {
6020 if (level == 0) {
6021 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6022 ret = btrfs_dec_ref(trans, root, eb, 1);
6023 else
6024 ret = btrfs_dec_ref(trans, root, eb, 0);
6025 BUG_ON(ret);
6026 }
6027 /* make block locked assertion in clean_tree_block happy */
6028 if (!path->locks[level] &&
6029 btrfs_header_generation(eb) == trans->transid) {
6030 btrfs_tree_lock(eb);
6031 btrfs_set_lock_blocking(eb);
6032 path->locks[level] = 1;
6033 }
6034 clean_tree_block(trans, root, eb);
6035 }
6036
6037 if (eb == root->node) {
6038 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6039 parent = eb->start;
6040 else
6041 BUG_ON(root->root_key.objectid !=
6042 btrfs_header_owner(eb));
6043 } else {
6044 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6045 parent = path->nodes[level + 1]->start;
6046 else
6047 BUG_ON(root->root_key.objectid !=
6048 btrfs_header_owner(path->nodes[level + 1]));
6049 }
6050
6051 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6052 out:
6053 wc->refs[level] = 0;
6054 wc->flags[level] = 0;
6055 return 0;
6056 }
6057
6058 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6059 struct btrfs_root *root,
6060 struct btrfs_path *path,
6061 struct walk_control *wc)
6062 {
6063 int level = wc->level;
6064 int lookup_info = 1;
6065 int ret;
6066
6067 while (level >= 0) {
6068 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6069 if (ret > 0)
6070 break;
6071
6072 if (level == 0)
6073 break;
6074
6075 if (path->slots[level] >=
6076 btrfs_header_nritems(path->nodes[level]))
6077 break;
6078
6079 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6080 if (ret > 0) {
6081 path->slots[level]++;
6082 continue;
6083 } else if (ret < 0)
6084 return ret;
6085 level = wc->level;
6086 }
6087 return 0;
6088 }
6089
6090 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6091 struct btrfs_root *root,
6092 struct btrfs_path *path,
6093 struct walk_control *wc, int max_level)
6094 {
6095 int level = wc->level;
6096 int ret;
6097
6098 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6099 while (level < max_level && path->nodes[level]) {
6100 wc->level = level;
6101 if (path->slots[level] + 1 <
6102 btrfs_header_nritems(path->nodes[level])) {
6103 path->slots[level]++;
6104 return 0;
6105 } else {
6106 ret = walk_up_proc(trans, root, path, wc);
6107 if (ret > 0)
6108 return 0;
6109
6110 if (path->locks[level]) {
6111 btrfs_tree_unlock(path->nodes[level]);
6112 path->locks[level] = 0;
6113 }
6114 free_extent_buffer(path->nodes[level]);
6115 path->nodes[level] = NULL;
6116 level++;
6117 }
6118 }
6119 return 1;
6120 }
6121
6122 /*
6123 * drop a subvolume tree.
6124 *
6125 * this function traverses the tree freeing any blocks that only
6126 * referenced by the tree.
6127 *
6128 * when a shared tree block is found. this function decreases its
6129 * reference count by one. if update_ref is true, this function
6130 * also make sure backrefs for the shared block and all lower level
6131 * blocks are properly updated.
6132 */
6133 int btrfs_drop_snapshot(struct btrfs_root *root,
6134 struct btrfs_block_rsv *block_rsv, int update_ref)
6135 {
6136 struct btrfs_path *path;
6137 struct btrfs_trans_handle *trans;
6138 struct btrfs_root *tree_root = root->fs_info->tree_root;
6139 struct btrfs_root_item *root_item = &root->root_item;
6140 struct walk_control *wc;
6141 struct btrfs_key key;
6142 int err = 0;
6143 int ret;
6144 int level;
6145
6146 path = btrfs_alloc_path();
6147 BUG_ON(!path);
6148
6149 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6150 BUG_ON(!wc);
6151
6152 trans = btrfs_start_transaction(tree_root, 0);
6153 if (block_rsv)
6154 trans->block_rsv = block_rsv;
6155
6156 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6157 level = btrfs_header_level(root->node);
6158 path->nodes[level] = btrfs_lock_root_node(root);
6159 btrfs_set_lock_blocking(path->nodes[level]);
6160 path->slots[level] = 0;
6161 path->locks[level] = 1;
6162 memset(&wc->update_progress, 0,
6163 sizeof(wc->update_progress));
6164 } else {
6165 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6166 memcpy(&wc->update_progress, &key,
6167 sizeof(wc->update_progress));
6168
6169 level = root_item->drop_level;
6170 BUG_ON(level == 0);
6171 path->lowest_level = level;
6172 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6173 path->lowest_level = 0;
6174 if (ret < 0) {
6175 err = ret;
6176 goto out;
6177 }
6178 WARN_ON(ret > 0);
6179
6180 /*
6181 * unlock our path, this is safe because only this
6182 * function is allowed to delete this snapshot
6183 */
6184 btrfs_unlock_up_safe(path, 0);
6185
6186 level = btrfs_header_level(root->node);
6187 while (1) {
6188 btrfs_tree_lock(path->nodes[level]);
6189 btrfs_set_lock_blocking(path->nodes[level]);
6190
6191 ret = btrfs_lookup_extent_info(trans, root,
6192 path->nodes[level]->start,
6193 path->nodes[level]->len,
6194 &wc->refs[level],
6195 &wc->flags[level]);
6196 BUG_ON(ret);
6197 BUG_ON(wc->refs[level] == 0);
6198
6199 if (level == root_item->drop_level)
6200 break;
6201
6202 btrfs_tree_unlock(path->nodes[level]);
6203 WARN_ON(wc->refs[level] != 1);
6204 level--;
6205 }
6206 }
6207
6208 wc->level = level;
6209 wc->shared_level = -1;
6210 wc->stage = DROP_REFERENCE;
6211 wc->update_ref = update_ref;
6212 wc->keep_locks = 0;
6213 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6214
6215 while (1) {
6216 ret = walk_down_tree(trans, root, path, wc);
6217 if (ret < 0) {
6218 err = ret;
6219 break;
6220 }
6221
6222 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6223 if (ret < 0) {
6224 err = ret;
6225 break;
6226 }
6227
6228 if (ret > 0) {
6229 BUG_ON(wc->stage != DROP_REFERENCE);
6230 break;
6231 }
6232
6233 if (wc->stage == DROP_REFERENCE) {
6234 level = wc->level;
6235 btrfs_node_key(path->nodes[level],
6236 &root_item->drop_progress,
6237 path->slots[level]);
6238 root_item->drop_level = level;
6239 }
6240
6241 BUG_ON(wc->level == 0);
6242 if (btrfs_should_end_transaction(trans, tree_root)) {
6243 ret = btrfs_update_root(trans, tree_root,
6244 &root->root_key,
6245 root_item);
6246 BUG_ON(ret);
6247
6248 btrfs_end_transaction_throttle(trans, tree_root);
6249 trans = btrfs_start_transaction(tree_root, 0);
6250 if (block_rsv)
6251 trans->block_rsv = block_rsv;
6252 }
6253 }
6254 btrfs_release_path(root, path);
6255 BUG_ON(err);
6256
6257 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6258 BUG_ON(ret);
6259
6260 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6261 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6262 NULL, NULL);
6263 BUG_ON(ret < 0);
6264 if (ret > 0) {
6265 ret = btrfs_del_orphan_item(trans, tree_root,
6266 root->root_key.objectid);
6267 BUG_ON(ret);
6268 }
6269 }
6270
6271 if (root->in_radix) {
6272 btrfs_free_fs_root(tree_root->fs_info, root);
6273 } else {
6274 free_extent_buffer(root->node);
6275 free_extent_buffer(root->commit_root);
6276 kfree(root);
6277 }
6278 out:
6279 btrfs_end_transaction_throttle(trans, tree_root);
6280 kfree(wc);
6281 btrfs_free_path(path);
6282 return err;
6283 }
6284
6285 /*
6286 * drop subtree rooted at tree block 'node'.
6287 *
6288 * NOTE: this function will unlock and release tree block 'node'
6289 */
6290 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6291 struct btrfs_root *root,
6292 struct extent_buffer *node,
6293 struct extent_buffer *parent)
6294 {
6295 struct btrfs_path *path;
6296 struct walk_control *wc;
6297 int level;
6298 int parent_level;
6299 int ret = 0;
6300 int wret;
6301
6302 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6303
6304 path = btrfs_alloc_path();
6305 BUG_ON(!path);
6306
6307 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6308 BUG_ON(!wc);
6309
6310 btrfs_assert_tree_locked(parent);
6311 parent_level = btrfs_header_level(parent);
6312 extent_buffer_get(parent);
6313 path->nodes[parent_level] = parent;
6314 path->slots[parent_level] = btrfs_header_nritems(parent);
6315
6316 btrfs_assert_tree_locked(node);
6317 level = btrfs_header_level(node);
6318 path->nodes[level] = node;
6319 path->slots[level] = 0;
6320 path->locks[level] = 1;
6321
6322 wc->refs[parent_level] = 1;
6323 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6324 wc->level = level;
6325 wc->shared_level = -1;
6326 wc->stage = DROP_REFERENCE;
6327 wc->update_ref = 0;
6328 wc->keep_locks = 1;
6329 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6330
6331 while (1) {
6332 wret = walk_down_tree(trans, root, path, wc);
6333 if (wret < 0) {
6334 ret = wret;
6335 break;
6336 }
6337
6338 wret = walk_up_tree(trans, root, path, wc, parent_level);
6339 if (wret < 0)
6340 ret = wret;
6341 if (wret != 0)
6342 break;
6343 }
6344
6345 kfree(wc);
6346 btrfs_free_path(path);
6347 return ret;
6348 }
6349
6350 #if 0
6351 static unsigned long calc_ra(unsigned long start, unsigned long last,
6352 unsigned long nr)
6353 {
6354 return min(last, start + nr - 1);
6355 }
6356
6357 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6358 u64 len)
6359 {
6360 u64 page_start;
6361 u64 page_end;
6362 unsigned long first_index;
6363 unsigned long last_index;
6364 unsigned long i;
6365 struct page *page;
6366 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6367 struct file_ra_state *ra;
6368 struct btrfs_ordered_extent *ordered;
6369 unsigned int total_read = 0;
6370 unsigned int total_dirty = 0;
6371 int ret = 0;
6372
6373 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6374
6375 mutex_lock(&inode->i_mutex);
6376 first_index = start >> PAGE_CACHE_SHIFT;
6377 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6378
6379 /* make sure the dirty trick played by the caller work */
6380 ret = invalidate_inode_pages2_range(inode->i_mapping,
6381 first_index, last_index);
6382 if (ret)
6383 goto out_unlock;
6384
6385 file_ra_state_init(ra, inode->i_mapping);
6386
6387 for (i = first_index ; i <= last_index; i++) {
6388 if (total_read % ra->ra_pages == 0) {
6389 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6390 calc_ra(i, last_index, ra->ra_pages));
6391 }
6392 total_read++;
6393 again:
6394 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6395 BUG_ON(1);
6396 page = grab_cache_page(inode->i_mapping, i);
6397 if (!page) {
6398 ret = -ENOMEM;
6399 goto out_unlock;
6400 }
6401 if (!PageUptodate(page)) {
6402 btrfs_readpage(NULL, page);
6403 lock_page(page);
6404 if (!PageUptodate(page)) {
6405 unlock_page(page);
6406 page_cache_release(page);
6407 ret = -EIO;
6408 goto out_unlock;
6409 }
6410 }
6411 wait_on_page_writeback(page);
6412
6413 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6414 page_end = page_start + PAGE_CACHE_SIZE - 1;
6415 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6416
6417 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6418 if (ordered) {
6419 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6420 unlock_page(page);
6421 page_cache_release(page);
6422 btrfs_start_ordered_extent(inode, ordered, 1);
6423 btrfs_put_ordered_extent(ordered);
6424 goto again;
6425 }
6426 set_page_extent_mapped(page);
6427
6428 if (i == first_index)
6429 set_extent_bits(io_tree, page_start, page_end,
6430 EXTENT_BOUNDARY, GFP_NOFS);
6431 btrfs_set_extent_delalloc(inode, page_start, page_end);
6432
6433 set_page_dirty(page);
6434 total_dirty++;
6435
6436 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6437 unlock_page(page);
6438 page_cache_release(page);
6439 }
6440
6441 out_unlock:
6442 kfree(ra);
6443 mutex_unlock(&inode->i_mutex);
6444 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6445 return ret;
6446 }
6447
6448 static noinline int relocate_data_extent(struct inode *reloc_inode,
6449 struct btrfs_key *extent_key,
6450 u64 offset)
6451 {
6452 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6453 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6454 struct extent_map *em;
6455 u64 start = extent_key->objectid - offset;
6456 u64 end = start + extent_key->offset - 1;
6457
6458 em = alloc_extent_map(GFP_NOFS);
6459 BUG_ON(!em || IS_ERR(em));
6460
6461 em->start = start;
6462 em->len = extent_key->offset;
6463 em->block_len = extent_key->offset;
6464 em->block_start = extent_key->objectid;
6465 em->bdev = root->fs_info->fs_devices->latest_bdev;
6466 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6467
6468 /* setup extent map to cheat btrfs_readpage */
6469 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6470 while (1) {
6471 int ret;
6472 write_lock(&em_tree->lock);
6473 ret = add_extent_mapping(em_tree, em);
6474 write_unlock(&em_tree->lock);
6475 if (ret != -EEXIST) {
6476 free_extent_map(em);
6477 break;
6478 }
6479 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6480 }
6481 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6482
6483 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6484 }
6485
6486 struct btrfs_ref_path {
6487 u64 extent_start;
6488 u64 nodes[BTRFS_MAX_LEVEL];
6489 u64 root_objectid;
6490 u64 root_generation;
6491 u64 owner_objectid;
6492 u32 num_refs;
6493 int lowest_level;
6494 int current_level;
6495 int shared_level;
6496
6497 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6498 u64 new_nodes[BTRFS_MAX_LEVEL];
6499 };
6500
6501 struct disk_extent {
6502 u64 ram_bytes;
6503 u64 disk_bytenr;
6504 u64 disk_num_bytes;
6505 u64 offset;
6506 u64 num_bytes;
6507 u8 compression;
6508 u8 encryption;
6509 u16 other_encoding;
6510 };
6511
6512 static int is_cowonly_root(u64 root_objectid)
6513 {
6514 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6515 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6516 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6517 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6518 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6519 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6520 return 1;
6521 return 0;
6522 }
6523
6524 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6525 struct btrfs_root *extent_root,
6526 struct btrfs_ref_path *ref_path,
6527 int first_time)
6528 {
6529 struct extent_buffer *leaf;
6530 struct btrfs_path *path;
6531 struct btrfs_extent_ref *ref;
6532 struct btrfs_key key;
6533 struct btrfs_key found_key;
6534 u64 bytenr;
6535 u32 nritems;
6536 int level;
6537 int ret = 1;
6538
6539 path = btrfs_alloc_path();
6540 if (!path)
6541 return -ENOMEM;
6542
6543 if (first_time) {
6544 ref_path->lowest_level = -1;
6545 ref_path->current_level = -1;
6546 ref_path->shared_level = -1;
6547 goto walk_up;
6548 }
6549 walk_down:
6550 level = ref_path->current_level - 1;
6551 while (level >= -1) {
6552 u64 parent;
6553 if (level < ref_path->lowest_level)
6554 break;
6555
6556 if (level >= 0)
6557 bytenr = ref_path->nodes[level];
6558 else
6559 bytenr = ref_path->extent_start;
6560 BUG_ON(bytenr == 0);
6561
6562 parent = ref_path->nodes[level + 1];
6563 ref_path->nodes[level + 1] = 0;
6564 ref_path->current_level = level;
6565 BUG_ON(parent == 0);
6566
6567 key.objectid = bytenr;
6568 key.offset = parent + 1;
6569 key.type = BTRFS_EXTENT_REF_KEY;
6570
6571 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6572 if (ret < 0)
6573 goto out;
6574 BUG_ON(ret == 0);
6575
6576 leaf = path->nodes[0];
6577 nritems = btrfs_header_nritems(leaf);
6578 if (path->slots[0] >= nritems) {
6579 ret = btrfs_next_leaf(extent_root, path);
6580 if (ret < 0)
6581 goto out;
6582 if (ret > 0)
6583 goto next;
6584 leaf = path->nodes[0];
6585 }
6586
6587 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6588 if (found_key.objectid == bytenr &&
6589 found_key.type == BTRFS_EXTENT_REF_KEY) {
6590 if (level < ref_path->shared_level)
6591 ref_path->shared_level = level;
6592 goto found;
6593 }
6594 next:
6595 level--;
6596 btrfs_release_path(extent_root, path);
6597 cond_resched();
6598 }
6599 /* reached lowest level */
6600 ret = 1;
6601 goto out;
6602 walk_up:
6603 level = ref_path->current_level;
6604 while (level < BTRFS_MAX_LEVEL - 1) {
6605 u64 ref_objectid;
6606
6607 if (level >= 0)
6608 bytenr = ref_path->nodes[level];
6609 else
6610 bytenr = ref_path->extent_start;
6611
6612 BUG_ON(bytenr == 0);
6613
6614 key.objectid = bytenr;
6615 key.offset = 0;
6616 key.type = BTRFS_EXTENT_REF_KEY;
6617
6618 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6619 if (ret < 0)
6620 goto out;
6621
6622 leaf = path->nodes[0];
6623 nritems = btrfs_header_nritems(leaf);
6624 if (path->slots[0] >= nritems) {
6625 ret = btrfs_next_leaf(extent_root, path);
6626 if (ret < 0)
6627 goto out;
6628 if (ret > 0) {
6629 /* the extent was freed by someone */
6630 if (ref_path->lowest_level == level)
6631 goto out;
6632 btrfs_release_path(extent_root, path);
6633 goto walk_down;
6634 }
6635 leaf = path->nodes[0];
6636 }
6637
6638 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6639 if (found_key.objectid != bytenr ||
6640 found_key.type != BTRFS_EXTENT_REF_KEY) {
6641 /* the extent was freed by someone */
6642 if (ref_path->lowest_level == level) {
6643 ret = 1;
6644 goto out;
6645 }
6646 btrfs_release_path(extent_root, path);
6647 goto walk_down;
6648 }
6649 found:
6650 ref = btrfs_item_ptr(leaf, path->slots[0],
6651 struct btrfs_extent_ref);
6652 ref_objectid = btrfs_ref_objectid(leaf, ref);
6653 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6654 if (first_time) {
6655 level = (int)ref_objectid;
6656 BUG_ON(level >= BTRFS_MAX_LEVEL);
6657 ref_path->lowest_level = level;
6658 ref_path->current_level = level;
6659 ref_path->nodes[level] = bytenr;
6660 } else {
6661 WARN_ON(ref_objectid != level);
6662 }
6663 } else {
6664 WARN_ON(level != -1);
6665 }
6666 first_time = 0;
6667
6668 if (ref_path->lowest_level == level) {
6669 ref_path->owner_objectid = ref_objectid;
6670 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6671 }
6672
6673 /*
6674 * the block is tree root or the block isn't in reference
6675 * counted tree.
6676 */
6677 if (found_key.objectid == found_key.offset ||
6678 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6679 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6680 ref_path->root_generation =
6681 btrfs_ref_generation(leaf, ref);
6682 if (level < 0) {
6683 /* special reference from the tree log */
6684 ref_path->nodes[0] = found_key.offset;
6685 ref_path->current_level = 0;
6686 }
6687 ret = 0;
6688 goto out;
6689 }
6690
6691 level++;
6692 BUG_ON(ref_path->nodes[level] != 0);
6693 ref_path->nodes[level] = found_key.offset;
6694 ref_path->current_level = level;
6695
6696 /*
6697 * the reference was created in the running transaction,
6698 * no need to continue walking up.
6699 */
6700 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6701 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6702 ref_path->root_generation =
6703 btrfs_ref_generation(leaf, ref);
6704 ret = 0;
6705 goto out;
6706 }
6707
6708 btrfs_release_path(extent_root, path);
6709 cond_resched();
6710 }
6711 /* reached max tree level, but no tree root found. */
6712 BUG();
6713 out:
6714 btrfs_free_path(path);
6715 return ret;
6716 }
6717
6718 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6719 struct btrfs_root *extent_root,
6720 struct btrfs_ref_path *ref_path,
6721 u64 extent_start)
6722 {
6723 memset(ref_path, 0, sizeof(*ref_path));
6724 ref_path->extent_start = extent_start;
6725
6726 return __next_ref_path(trans, extent_root, ref_path, 1);
6727 }
6728
6729 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6730 struct btrfs_root *extent_root,
6731 struct btrfs_ref_path *ref_path)
6732 {
6733 return __next_ref_path(trans, extent_root, ref_path, 0);
6734 }
6735
6736 static noinline int get_new_locations(struct inode *reloc_inode,
6737 struct btrfs_key *extent_key,
6738 u64 offset, int no_fragment,
6739 struct disk_extent **extents,
6740 int *nr_extents)
6741 {
6742 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6743 struct btrfs_path *path;
6744 struct btrfs_file_extent_item *fi;
6745 struct extent_buffer *leaf;
6746 struct disk_extent *exts = *extents;
6747 struct btrfs_key found_key;
6748 u64 cur_pos;
6749 u64 last_byte;
6750 u32 nritems;
6751 int nr = 0;
6752 int max = *nr_extents;
6753 int ret;
6754
6755 WARN_ON(!no_fragment && *extents);
6756 if (!exts) {
6757 max = 1;
6758 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6759 if (!exts)
6760 return -ENOMEM;
6761 }
6762
6763 path = btrfs_alloc_path();
6764 BUG_ON(!path);
6765
6766 cur_pos = extent_key->objectid - offset;
6767 last_byte = extent_key->objectid + extent_key->offset;
6768 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6769 cur_pos, 0);
6770 if (ret < 0)
6771 goto out;
6772 if (ret > 0) {
6773 ret = -ENOENT;
6774 goto out;
6775 }
6776
6777 while (1) {
6778 leaf = path->nodes[0];
6779 nritems = btrfs_header_nritems(leaf);
6780 if (path->slots[0] >= nritems) {
6781 ret = btrfs_next_leaf(root, path);
6782 if (ret < 0)
6783 goto out;
6784 if (ret > 0)
6785 break;
6786 leaf = path->nodes[0];
6787 }
6788
6789 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6790 if (found_key.offset != cur_pos ||
6791 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6792 found_key.objectid != reloc_inode->i_ino)
6793 break;
6794
6795 fi = btrfs_item_ptr(leaf, path->slots[0],
6796 struct btrfs_file_extent_item);
6797 if (btrfs_file_extent_type(leaf, fi) !=
6798 BTRFS_FILE_EXTENT_REG ||
6799 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6800 break;
6801
6802 if (nr == max) {
6803 struct disk_extent *old = exts;
6804 max *= 2;
6805 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6806 memcpy(exts, old, sizeof(*exts) * nr);
6807 if (old != *extents)
6808 kfree(old);
6809 }
6810
6811 exts[nr].disk_bytenr =
6812 btrfs_file_extent_disk_bytenr(leaf, fi);
6813 exts[nr].disk_num_bytes =
6814 btrfs_file_extent_disk_num_bytes(leaf, fi);
6815 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6816 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6817 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6818 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6819 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6820 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6821 fi);
6822 BUG_ON(exts[nr].offset > 0);
6823 BUG_ON(exts[nr].compression || exts[nr].encryption);
6824 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6825
6826 cur_pos += exts[nr].num_bytes;
6827 nr++;
6828
6829 if (cur_pos + offset >= last_byte)
6830 break;
6831
6832 if (no_fragment) {
6833 ret = 1;
6834 goto out;
6835 }
6836 path->slots[0]++;
6837 }
6838
6839 BUG_ON(cur_pos + offset > last_byte);
6840 if (cur_pos + offset < last_byte) {
6841 ret = -ENOENT;
6842 goto out;
6843 }
6844 ret = 0;
6845 out:
6846 btrfs_free_path(path);
6847 if (ret) {
6848 if (exts != *extents)
6849 kfree(exts);
6850 } else {
6851 *extents = exts;
6852 *nr_extents = nr;
6853 }
6854 return ret;
6855 }
6856
6857 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6858 struct btrfs_root *root,
6859 struct btrfs_path *path,
6860 struct btrfs_key *extent_key,
6861 struct btrfs_key *leaf_key,
6862 struct btrfs_ref_path *ref_path,
6863 struct disk_extent *new_extents,
6864 int nr_extents)
6865 {
6866 struct extent_buffer *leaf;
6867 struct btrfs_file_extent_item *fi;
6868 struct inode *inode = NULL;
6869 struct btrfs_key key;
6870 u64 lock_start = 0;
6871 u64 lock_end = 0;
6872 u64 num_bytes;
6873 u64 ext_offset;
6874 u64 search_end = (u64)-1;
6875 u32 nritems;
6876 int nr_scaned = 0;
6877 int extent_locked = 0;
6878 int extent_type;
6879 int ret;
6880
6881 memcpy(&key, leaf_key, sizeof(key));
6882 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6883 if (key.objectid < ref_path->owner_objectid ||
6884 (key.objectid == ref_path->owner_objectid &&
6885 key.type < BTRFS_EXTENT_DATA_KEY)) {
6886 key.objectid = ref_path->owner_objectid;
6887 key.type = BTRFS_EXTENT_DATA_KEY;
6888 key.offset = 0;
6889 }
6890 }
6891
6892 while (1) {
6893 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6894 if (ret < 0)
6895 goto out;
6896
6897 leaf = path->nodes[0];
6898 nritems = btrfs_header_nritems(leaf);
6899 next:
6900 if (extent_locked && ret > 0) {
6901 /*
6902 * the file extent item was modified by someone
6903 * before the extent got locked.
6904 */
6905 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6906 lock_end, GFP_NOFS);
6907 extent_locked = 0;
6908 }
6909
6910 if (path->slots[0] >= nritems) {
6911 if (++nr_scaned > 2)
6912 break;
6913
6914 BUG_ON(extent_locked);
6915 ret = btrfs_next_leaf(root, path);
6916 if (ret < 0)
6917 goto out;
6918 if (ret > 0)
6919 break;
6920 leaf = path->nodes[0];
6921 nritems = btrfs_header_nritems(leaf);
6922 }
6923
6924 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6925
6926 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6927 if ((key.objectid > ref_path->owner_objectid) ||
6928 (key.objectid == ref_path->owner_objectid &&
6929 key.type > BTRFS_EXTENT_DATA_KEY) ||
6930 key.offset >= search_end)
6931 break;
6932 }
6933
6934 if (inode && key.objectid != inode->i_ino) {
6935 BUG_ON(extent_locked);
6936 btrfs_release_path(root, path);
6937 mutex_unlock(&inode->i_mutex);
6938 iput(inode);
6939 inode = NULL;
6940 continue;
6941 }
6942
6943 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6944 path->slots[0]++;
6945 ret = 1;
6946 goto next;
6947 }
6948 fi = btrfs_item_ptr(leaf, path->slots[0],
6949 struct btrfs_file_extent_item);
6950 extent_type = btrfs_file_extent_type(leaf, fi);
6951 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6952 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6953 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6954 extent_key->objectid)) {
6955 path->slots[0]++;
6956 ret = 1;
6957 goto next;
6958 }
6959
6960 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6961 ext_offset = btrfs_file_extent_offset(leaf, fi);
6962
6963 if (search_end == (u64)-1) {
6964 search_end = key.offset - ext_offset +
6965 btrfs_file_extent_ram_bytes(leaf, fi);
6966 }
6967
6968 if (!extent_locked) {
6969 lock_start = key.offset;
6970 lock_end = lock_start + num_bytes - 1;
6971 } else {
6972 if (lock_start > key.offset ||
6973 lock_end + 1 < key.offset + num_bytes) {
6974 unlock_extent(&BTRFS_I(inode)->io_tree,
6975 lock_start, lock_end, GFP_NOFS);
6976 extent_locked = 0;
6977 }
6978 }
6979
6980 if (!inode) {
6981 btrfs_release_path(root, path);
6982
6983 inode = btrfs_iget_locked(root->fs_info->sb,
6984 key.objectid, root);
6985 if (inode->i_state & I_NEW) {
6986 BTRFS_I(inode)->root = root;
6987 BTRFS_I(inode)->location.objectid =
6988 key.objectid;
6989 BTRFS_I(inode)->location.type =
6990 BTRFS_INODE_ITEM_KEY;
6991 BTRFS_I(inode)->location.offset = 0;
6992 btrfs_read_locked_inode(inode);
6993 unlock_new_inode(inode);
6994 }
6995 /*
6996 * some code call btrfs_commit_transaction while
6997 * holding the i_mutex, so we can't use mutex_lock
6998 * here.
6999 */
7000 if (is_bad_inode(inode) ||
7001 !mutex_trylock(&inode->i_mutex)) {
7002 iput(inode);
7003 inode = NULL;
7004 key.offset = (u64)-1;
7005 goto skip;
7006 }
7007 }
7008
7009 if (!extent_locked) {
7010 struct btrfs_ordered_extent *ordered;
7011
7012 btrfs_release_path(root, path);
7013
7014 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7015 lock_end, GFP_NOFS);
7016 ordered = btrfs_lookup_first_ordered_extent(inode,
7017 lock_end);
7018 if (ordered &&
7019 ordered->file_offset <= lock_end &&
7020 ordered->file_offset + ordered->len > lock_start) {
7021 unlock_extent(&BTRFS_I(inode)->io_tree,
7022 lock_start, lock_end, GFP_NOFS);
7023 btrfs_start_ordered_extent(inode, ordered, 1);
7024 btrfs_put_ordered_extent(ordered);
7025 key.offset += num_bytes;
7026 goto skip;
7027 }
7028 if (ordered)
7029 btrfs_put_ordered_extent(ordered);
7030
7031 extent_locked = 1;
7032 continue;
7033 }
7034
7035 if (nr_extents == 1) {
7036 /* update extent pointer in place */
7037 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7038 new_extents[0].disk_bytenr);
7039 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7040 new_extents[0].disk_num_bytes);
7041 btrfs_mark_buffer_dirty(leaf);
7042
7043 btrfs_drop_extent_cache(inode, key.offset,
7044 key.offset + num_bytes - 1, 0);
7045
7046 ret = btrfs_inc_extent_ref(trans, root,
7047 new_extents[0].disk_bytenr,
7048 new_extents[0].disk_num_bytes,
7049 leaf->start,
7050 root->root_key.objectid,
7051 trans->transid,
7052 key.objectid);
7053 BUG_ON(ret);
7054
7055 ret = btrfs_free_extent(trans, root,
7056 extent_key->objectid,
7057 extent_key->offset,
7058 leaf->start,
7059 btrfs_header_owner(leaf),
7060 btrfs_header_generation(leaf),
7061 key.objectid, 0);
7062 BUG_ON(ret);
7063
7064 btrfs_release_path(root, path);
7065 key.offset += num_bytes;
7066 } else {
7067 BUG_ON(1);
7068 #if 0
7069 u64 alloc_hint;
7070 u64 extent_len;
7071 int i;
7072 /*
7073 * drop old extent pointer at first, then insert the
7074 * new pointers one bye one
7075 */
7076 btrfs_release_path(root, path);
7077 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7078 key.offset + num_bytes,
7079 key.offset, &alloc_hint);
7080 BUG_ON(ret);
7081
7082 for (i = 0; i < nr_extents; i++) {
7083 if (ext_offset >= new_extents[i].num_bytes) {
7084 ext_offset -= new_extents[i].num_bytes;
7085 continue;
7086 }
7087 extent_len = min(new_extents[i].num_bytes -
7088 ext_offset, num_bytes);
7089
7090 ret = btrfs_insert_empty_item(trans, root,
7091 path, &key,
7092 sizeof(*fi));
7093 BUG_ON(ret);
7094
7095 leaf = path->nodes[0];
7096 fi = btrfs_item_ptr(leaf, path->slots[0],
7097 struct btrfs_file_extent_item);
7098 btrfs_set_file_extent_generation(leaf, fi,
7099 trans->transid);
7100 btrfs_set_file_extent_type(leaf, fi,
7101 BTRFS_FILE_EXTENT_REG);
7102 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7103 new_extents[i].disk_bytenr);
7104 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7105 new_extents[i].disk_num_bytes);
7106 btrfs_set_file_extent_ram_bytes(leaf, fi,
7107 new_extents[i].ram_bytes);
7108
7109 btrfs_set_file_extent_compression(leaf, fi,
7110 new_extents[i].compression);
7111 btrfs_set_file_extent_encryption(leaf, fi,
7112 new_extents[i].encryption);
7113 btrfs_set_file_extent_other_encoding(leaf, fi,
7114 new_extents[i].other_encoding);
7115
7116 btrfs_set_file_extent_num_bytes(leaf, fi,
7117 extent_len);
7118 ext_offset += new_extents[i].offset;
7119 btrfs_set_file_extent_offset(leaf, fi,
7120 ext_offset);
7121 btrfs_mark_buffer_dirty(leaf);
7122
7123 btrfs_drop_extent_cache(inode, key.offset,
7124 key.offset + extent_len - 1, 0);
7125
7126 ret = btrfs_inc_extent_ref(trans, root,
7127 new_extents[i].disk_bytenr,
7128 new_extents[i].disk_num_bytes,
7129 leaf->start,
7130 root->root_key.objectid,
7131 trans->transid, key.objectid);
7132 BUG_ON(ret);
7133 btrfs_release_path(root, path);
7134
7135 inode_add_bytes(inode, extent_len);
7136
7137 ext_offset = 0;
7138 num_bytes -= extent_len;
7139 key.offset += extent_len;
7140
7141 if (num_bytes == 0)
7142 break;
7143 }
7144 BUG_ON(i >= nr_extents);
7145 #endif
7146 }
7147
7148 if (extent_locked) {
7149 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7150 lock_end, GFP_NOFS);
7151 extent_locked = 0;
7152 }
7153 skip:
7154 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7155 key.offset >= search_end)
7156 break;
7157
7158 cond_resched();
7159 }
7160 ret = 0;
7161 out:
7162 btrfs_release_path(root, path);
7163 if (inode) {
7164 mutex_unlock(&inode->i_mutex);
7165 if (extent_locked) {
7166 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7167 lock_end, GFP_NOFS);
7168 }
7169 iput(inode);
7170 }
7171 return ret;
7172 }
7173
7174 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7175 struct btrfs_root *root,
7176 struct extent_buffer *buf, u64 orig_start)
7177 {
7178 int level;
7179 int ret;
7180
7181 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7182 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7183
7184 level = btrfs_header_level(buf);
7185 if (level == 0) {
7186 struct btrfs_leaf_ref *ref;
7187 struct btrfs_leaf_ref *orig_ref;
7188
7189 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7190 if (!orig_ref)
7191 return -ENOENT;
7192
7193 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7194 if (!ref) {
7195 btrfs_free_leaf_ref(root, orig_ref);
7196 return -ENOMEM;
7197 }
7198
7199 ref->nritems = orig_ref->nritems;
7200 memcpy(ref->extents, orig_ref->extents,
7201 sizeof(ref->extents[0]) * ref->nritems);
7202
7203 btrfs_free_leaf_ref(root, orig_ref);
7204
7205 ref->root_gen = trans->transid;
7206 ref->bytenr = buf->start;
7207 ref->owner = btrfs_header_owner(buf);
7208 ref->generation = btrfs_header_generation(buf);
7209
7210 ret = btrfs_add_leaf_ref(root, ref, 0);
7211 WARN_ON(ret);
7212 btrfs_free_leaf_ref(root, ref);
7213 }
7214 return 0;
7215 }
7216
7217 static noinline int invalidate_extent_cache(struct btrfs_root *root,
7218 struct extent_buffer *leaf,
7219 struct btrfs_block_group_cache *group,
7220 struct btrfs_root *target_root)
7221 {
7222 struct btrfs_key key;
7223 struct inode *inode = NULL;
7224 struct btrfs_file_extent_item *fi;
7225 struct extent_state *cached_state = NULL;
7226 u64 num_bytes;
7227 u64 skip_objectid = 0;
7228 u32 nritems;
7229 u32 i;
7230
7231 nritems = btrfs_header_nritems(leaf);
7232 for (i = 0; i < nritems; i++) {
7233 btrfs_item_key_to_cpu(leaf, &key, i);
7234 if (key.objectid == skip_objectid ||
7235 key.type != BTRFS_EXTENT_DATA_KEY)
7236 continue;
7237 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7238 if (btrfs_file_extent_type(leaf, fi) ==
7239 BTRFS_FILE_EXTENT_INLINE)
7240 continue;
7241 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7242 continue;
7243 if (!inode || inode->i_ino != key.objectid) {
7244 iput(inode);
7245 inode = btrfs_ilookup(target_root->fs_info->sb,
7246 key.objectid, target_root, 1);
7247 }
7248 if (!inode) {
7249 skip_objectid = key.objectid;
7250 continue;
7251 }
7252 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7253
7254 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7255 key.offset + num_bytes - 1, 0, &cached_state,
7256 GFP_NOFS);
7257 btrfs_drop_extent_cache(inode, key.offset,
7258 key.offset + num_bytes - 1, 1);
7259 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7260 key.offset + num_bytes - 1, &cached_state,
7261 GFP_NOFS);
7262 cond_resched();
7263 }
7264 iput(inode);
7265 return 0;
7266 }
7267
7268 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7269 struct btrfs_root *root,
7270 struct extent_buffer *leaf,
7271 struct btrfs_block_group_cache *group,
7272 struct inode *reloc_inode)
7273 {
7274 struct btrfs_key key;
7275 struct btrfs_key extent_key;
7276 struct btrfs_file_extent_item *fi;
7277 struct btrfs_leaf_ref *ref;
7278 struct disk_extent *new_extent;
7279 u64 bytenr;
7280 u64 num_bytes;
7281 u32 nritems;
7282 u32 i;
7283 int ext_index;
7284 int nr_extent;
7285 int ret;
7286
7287 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7288 BUG_ON(!new_extent);
7289
7290 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7291 BUG_ON(!ref);
7292
7293 ext_index = -1;
7294 nritems = btrfs_header_nritems(leaf);
7295 for (i = 0; i < nritems; i++) {
7296 btrfs_item_key_to_cpu(leaf, &key, i);
7297 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7298 continue;
7299 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7300 if (btrfs_file_extent_type(leaf, fi) ==
7301 BTRFS_FILE_EXTENT_INLINE)
7302 continue;
7303 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7304 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7305 if (bytenr == 0)
7306 continue;
7307
7308 ext_index++;
7309 if (bytenr >= group->key.objectid + group->key.offset ||
7310 bytenr + num_bytes <= group->key.objectid)
7311 continue;
7312
7313 extent_key.objectid = bytenr;
7314 extent_key.offset = num_bytes;
7315 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7316 nr_extent = 1;
7317 ret = get_new_locations(reloc_inode, &extent_key,
7318 group->key.objectid, 1,
7319 &new_extent, &nr_extent);
7320 if (ret > 0)
7321 continue;
7322 BUG_ON(ret < 0);
7323
7324 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7325 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7326 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7327 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7328
7329 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7330 new_extent->disk_bytenr);
7331 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7332 new_extent->disk_num_bytes);
7333 btrfs_mark_buffer_dirty(leaf);
7334
7335 ret = btrfs_inc_extent_ref(trans, root,
7336 new_extent->disk_bytenr,
7337 new_extent->disk_num_bytes,
7338 leaf->start,
7339 root->root_key.objectid,
7340 trans->transid, key.objectid);
7341 BUG_ON(ret);
7342
7343 ret = btrfs_free_extent(trans, root,
7344 bytenr, num_bytes, leaf->start,
7345 btrfs_header_owner(leaf),
7346 btrfs_header_generation(leaf),
7347 key.objectid, 0);
7348 BUG_ON(ret);
7349 cond_resched();
7350 }
7351 kfree(new_extent);
7352 BUG_ON(ext_index + 1 != ref->nritems);
7353 btrfs_free_leaf_ref(root, ref);
7354 return 0;
7355 }
7356
7357 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7358 struct btrfs_root *root)
7359 {
7360 struct btrfs_root *reloc_root;
7361 int ret;
7362
7363 if (root->reloc_root) {
7364 reloc_root = root->reloc_root;
7365 root->reloc_root = NULL;
7366 list_add(&reloc_root->dead_list,
7367 &root->fs_info->dead_reloc_roots);
7368
7369 btrfs_set_root_bytenr(&reloc_root->root_item,
7370 reloc_root->node->start);
7371 btrfs_set_root_level(&root->root_item,
7372 btrfs_header_level(reloc_root->node));
7373 memset(&reloc_root->root_item.drop_progress, 0,
7374 sizeof(struct btrfs_disk_key));
7375 reloc_root->root_item.drop_level = 0;
7376
7377 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7378 &reloc_root->root_key,
7379 &reloc_root->root_item);
7380 BUG_ON(ret);
7381 }
7382 return 0;
7383 }
7384
7385 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7386 {
7387 struct btrfs_trans_handle *trans;
7388 struct btrfs_root *reloc_root;
7389 struct btrfs_root *prev_root = NULL;
7390 struct list_head dead_roots;
7391 int ret;
7392 unsigned long nr;
7393
7394 INIT_LIST_HEAD(&dead_roots);
7395 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7396
7397 while (!list_empty(&dead_roots)) {
7398 reloc_root = list_entry(dead_roots.prev,
7399 struct btrfs_root, dead_list);
7400 list_del_init(&reloc_root->dead_list);
7401
7402 BUG_ON(reloc_root->commit_root != NULL);
7403 while (1) {
7404 trans = btrfs_join_transaction(root, 1);
7405 BUG_ON(!trans);
7406
7407 mutex_lock(&root->fs_info->drop_mutex);
7408 ret = btrfs_drop_snapshot(trans, reloc_root);
7409 if (ret != -EAGAIN)
7410 break;
7411 mutex_unlock(&root->fs_info->drop_mutex);
7412
7413 nr = trans->blocks_used;
7414 ret = btrfs_end_transaction(trans, root);
7415 BUG_ON(ret);
7416 btrfs_btree_balance_dirty(root, nr);
7417 }
7418
7419 free_extent_buffer(reloc_root->node);
7420
7421 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7422 &reloc_root->root_key);
7423 BUG_ON(ret);
7424 mutex_unlock(&root->fs_info->drop_mutex);
7425
7426 nr = trans->blocks_used;
7427 ret = btrfs_end_transaction(trans, root);
7428 BUG_ON(ret);
7429 btrfs_btree_balance_dirty(root, nr);
7430
7431 kfree(prev_root);
7432 prev_root = reloc_root;
7433 }
7434 if (prev_root) {
7435 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7436 kfree(prev_root);
7437 }
7438 return 0;
7439 }
7440
7441 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7442 {
7443 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7444 return 0;
7445 }
7446
7447 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7448 {
7449 struct btrfs_root *reloc_root;
7450 struct btrfs_trans_handle *trans;
7451 struct btrfs_key location;
7452 int found;
7453 int ret;
7454
7455 mutex_lock(&root->fs_info->tree_reloc_mutex);
7456 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7457 BUG_ON(ret);
7458 found = !list_empty(&root->fs_info->dead_reloc_roots);
7459 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7460
7461 if (found) {
7462 trans = btrfs_start_transaction(root, 1);
7463 BUG_ON(!trans);
7464 ret = btrfs_commit_transaction(trans, root);
7465 BUG_ON(ret);
7466 }
7467
7468 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7469 location.offset = (u64)-1;
7470 location.type = BTRFS_ROOT_ITEM_KEY;
7471
7472 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7473 BUG_ON(!reloc_root);
7474 btrfs_orphan_cleanup(reloc_root);
7475 return 0;
7476 }
7477
7478 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7479 struct btrfs_root *root)
7480 {
7481 struct btrfs_root *reloc_root;
7482 struct extent_buffer *eb;
7483 struct btrfs_root_item *root_item;
7484 struct btrfs_key root_key;
7485 int ret;
7486
7487 BUG_ON(!root->ref_cows);
7488 if (root->reloc_root)
7489 return 0;
7490
7491 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7492 BUG_ON(!root_item);
7493
7494 ret = btrfs_copy_root(trans, root, root->commit_root,
7495 &eb, BTRFS_TREE_RELOC_OBJECTID);
7496 BUG_ON(ret);
7497
7498 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7499 root_key.offset = root->root_key.objectid;
7500 root_key.type = BTRFS_ROOT_ITEM_KEY;
7501
7502 memcpy(root_item, &root->root_item, sizeof(root_item));
7503 btrfs_set_root_refs(root_item, 0);
7504 btrfs_set_root_bytenr(root_item, eb->start);
7505 btrfs_set_root_level(root_item, btrfs_header_level(eb));
7506 btrfs_set_root_generation(root_item, trans->transid);
7507
7508 btrfs_tree_unlock(eb);
7509 free_extent_buffer(eb);
7510
7511 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7512 &root_key, root_item);
7513 BUG_ON(ret);
7514 kfree(root_item);
7515
7516 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7517 &root_key);
7518 BUG_ON(!reloc_root);
7519 reloc_root->last_trans = trans->transid;
7520 reloc_root->commit_root = NULL;
7521 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7522
7523 root->reloc_root = reloc_root;
7524 return 0;
7525 }
7526
7527 /*
7528 * Core function of space balance.
7529 *
7530 * The idea is using reloc trees to relocate tree blocks in reference
7531 * counted roots. There is one reloc tree for each subvol, and all
7532 * reloc trees share same root key objectid. Reloc trees are snapshots
7533 * of the latest committed roots of subvols (root->commit_root).
7534 *
7535 * To relocate a tree block referenced by a subvol, there are two steps.
7536 * COW the block through subvol's reloc tree, then update block pointer
7537 * in the subvol to point to the new block. Since all reloc trees share
7538 * same root key objectid, doing special handing for tree blocks owned
7539 * by them is easy. Once a tree block has been COWed in one reloc tree,
7540 * we can use the resulting new block directly when the same block is
7541 * required to COW again through other reloc trees. By this way, relocated
7542 * tree blocks are shared between reloc trees, so they are also shared
7543 * between subvols.
7544 */
7545 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7546 struct btrfs_root *root,
7547 struct btrfs_path *path,
7548 struct btrfs_key *first_key,
7549 struct btrfs_ref_path *ref_path,
7550 struct btrfs_block_group_cache *group,
7551 struct inode *reloc_inode)
7552 {
7553 struct btrfs_root *reloc_root;
7554 struct extent_buffer *eb = NULL;
7555 struct btrfs_key *keys;
7556 u64 *nodes;
7557 int level;
7558 int shared_level;
7559 int lowest_level = 0;
7560 int ret;
7561
7562 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7563 lowest_level = ref_path->owner_objectid;
7564
7565 if (!root->ref_cows) {
7566 path->lowest_level = lowest_level;
7567 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7568 BUG_ON(ret < 0);
7569 path->lowest_level = 0;
7570 btrfs_release_path(root, path);
7571 return 0;
7572 }
7573
7574 mutex_lock(&root->fs_info->tree_reloc_mutex);
7575 ret = init_reloc_tree(trans, root);
7576 BUG_ON(ret);
7577 reloc_root = root->reloc_root;
7578
7579 shared_level = ref_path->shared_level;
7580 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7581
7582 keys = ref_path->node_keys;
7583 nodes = ref_path->new_nodes;
7584 memset(&keys[shared_level + 1], 0,
7585 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7586 memset(&nodes[shared_level + 1], 0,
7587 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7588
7589 if (nodes[lowest_level] == 0) {
7590 path->lowest_level = lowest_level;
7591 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7592 0, 1);
7593 BUG_ON(ret);
7594 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7595 eb = path->nodes[level];
7596 if (!eb || eb == reloc_root->node)
7597 break;
7598 nodes[level] = eb->start;
7599 if (level == 0)
7600 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7601 else
7602 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7603 }
7604 if (nodes[0] &&
7605 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7606 eb = path->nodes[0];
7607 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7608 group, reloc_inode);
7609 BUG_ON(ret);
7610 }
7611 btrfs_release_path(reloc_root, path);
7612 } else {
7613 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7614 lowest_level);
7615 BUG_ON(ret);
7616 }
7617
7618 /*
7619 * replace tree blocks in the fs tree with tree blocks in
7620 * the reloc tree.
7621 */
7622 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7623 BUG_ON(ret < 0);
7624
7625 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7626 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7627 0, 0);
7628 BUG_ON(ret);
7629 extent_buffer_get(path->nodes[0]);
7630 eb = path->nodes[0];
7631 btrfs_release_path(reloc_root, path);
7632 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7633 BUG_ON(ret);
7634 free_extent_buffer(eb);
7635 }
7636
7637 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7638 path->lowest_level = 0;
7639 return 0;
7640 }
7641
7642 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7643 struct btrfs_root *root,
7644 struct btrfs_path *path,
7645 struct btrfs_key *first_key,
7646 struct btrfs_ref_path *ref_path)
7647 {
7648 int ret;
7649
7650 ret = relocate_one_path(trans, root, path, first_key,
7651 ref_path, NULL, NULL);
7652 BUG_ON(ret);
7653
7654 return 0;
7655 }
7656
7657 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7658 struct btrfs_root *extent_root,
7659 struct btrfs_path *path,
7660 struct btrfs_key *extent_key)
7661 {
7662 int ret;
7663
7664 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7665 if (ret)
7666 goto out;
7667 ret = btrfs_del_item(trans, extent_root, path);
7668 out:
7669 btrfs_release_path(extent_root, path);
7670 return ret;
7671 }
7672
7673 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7674 struct btrfs_ref_path *ref_path)
7675 {
7676 struct btrfs_key root_key;
7677
7678 root_key.objectid = ref_path->root_objectid;
7679 root_key.type = BTRFS_ROOT_ITEM_KEY;
7680 if (is_cowonly_root(ref_path->root_objectid))
7681 root_key.offset = 0;
7682 else
7683 root_key.offset = (u64)-1;
7684
7685 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7686 }
7687
7688 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7689 struct btrfs_path *path,
7690 struct btrfs_key *extent_key,
7691 struct btrfs_block_group_cache *group,
7692 struct inode *reloc_inode, int pass)
7693 {
7694 struct btrfs_trans_handle *trans;
7695 struct btrfs_root *found_root;
7696 struct btrfs_ref_path *ref_path = NULL;
7697 struct disk_extent *new_extents = NULL;
7698 int nr_extents = 0;
7699 int loops;
7700 int ret;
7701 int level;
7702 struct btrfs_key first_key;
7703 u64 prev_block = 0;
7704
7705
7706 trans = btrfs_start_transaction(extent_root, 1);
7707 BUG_ON(!trans);
7708
7709 if (extent_key->objectid == 0) {
7710 ret = del_extent_zero(trans, extent_root, path, extent_key);
7711 goto out;
7712 }
7713
7714 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7715 if (!ref_path) {
7716 ret = -ENOMEM;
7717 goto out;
7718 }
7719
7720 for (loops = 0; ; loops++) {
7721 if (loops == 0) {
7722 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7723 extent_key->objectid);
7724 } else {
7725 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7726 }
7727 if (ret < 0)
7728 goto out;
7729 if (ret > 0)
7730 break;
7731
7732 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7733 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7734 continue;
7735
7736 found_root = read_ref_root(extent_root->fs_info, ref_path);
7737 BUG_ON(!found_root);
7738 /*
7739 * for reference counted tree, only process reference paths
7740 * rooted at the latest committed root.
7741 */
7742 if (found_root->ref_cows &&
7743 ref_path->root_generation != found_root->root_key.offset)
7744 continue;
7745
7746 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7747 if (pass == 0) {
7748 /*
7749 * copy data extents to new locations
7750 */
7751 u64 group_start = group->key.objectid;
7752 ret = relocate_data_extent(reloc_inode,
7753 extent_key,
7754 group_start);
7755 if (ret < 0)
7756 goto out;
7757 break;
7758 }
7759 level = 0;
7760 } else {
7761 level = ref_path->owner_objectid;
7762 }
7763
7764 if (prev_block != ref_path->nodes[level]) {
7765 struct extent_buffer *eb;
7766 u64 block_start = ref_path->nodes[level];
7767 u64 block_size = btrfs_level_size(found_root, level);
7768
7769 eb = read_tree_block(found_root, block_start,
7770 block_size, 0);
7771 btrfs_tree_lock(eb);
7772 BUG_ON(level != btrfs_header_level(eb));
7773
7774 if (level == 0)
7775 btrfs_item_key_to_cpu(eb, &first_key, 0);
7776 else
7777 btrfs_node_key_to_cpu(eb, &first_key, 0);
7778
7779 btrfs_tree_unlock(eb);
7780 free_extent_buffer(eb);
7781 prev_block = block_start;
7782 }
7783
7784 mutex_lock(&extent_root->fs_info->trans_mutex);
7785 btrfs_record_root_in_trans(found_root);
7786 mutex_unlock(&extent_root->fs_info->trans_mutex);
7787 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7788 /*
7789 * try to update data extent references while
7790 * keeping metadata shared between snapshots.
7791 */
7792 if (pass == 1) {
7793 ret = relocate_one_path(trans, found_root,
7794 path, &first_key, ref_path,
7795 group, reloc_inode);
7796 if (ret < 0)
7797 goto out;
7798 continue;
7799 }
7800 /*
7801 * use fallback method to process the remaining
7802 * references.
7803 */
7804 if (!new_extents) {
7805 u64 group_start = group->key.objectid;
7806 new_extents = kmalloc(sizeof(*new_extents),
7807 GFP_NOFS);
7808 nr_extents = 1;
7809 ret = get_new_locations(reloc_inode,
7810 extent_key,
7811 group_start, 1,
7812 &new_extents,
7813 &nr_extents);
7814 if (ret)
7815 goto out;
7816 }
7817 ret = replace_one_extent(trans, found_root,
7818 path, extent_key,
7819 &first_key, ref_path,
7820 new_extents, nr_extents);
7821 } else {
7822 ret = relocate_tree_block(trans, found_root, path,
7823 &first_key, ref_path);
7824 }
7825 if (ret < 0)
7826 goto out;
7827 }
7828 ret = 0;
7829 out:
7830 btrfs_end_transaction(trans, extent_root);
7831 kfree(new_extents);
7832 kfree(ref_path);
7833 return ret;
7834 }
7835 #endif
7836
7837 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7838 {
7839 u64 num_devices;
7840 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7841 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7842
7843 num_devices = root->fs_info->fs_devices->rw_devices;
7844 if (num_devices == 1) {
7845 stripped |= BTRFS_BLOCK_GROUP_DUP;
7846 stripped = flags & ~stripped;
7847
7848 /* turn raid0 into single device chunks */
7849 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7850 return stripped;
7851
7852 /* turn mirroring into duplication */
7853 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7854 BTRFS_BLOCK_GROUP_RAID10))
7855 return stripped | BTRFS_BLOCK_GROUP_DUP;
7856 return flags;
7857 } else {
7858 /* they already had raid on here, just return */
7859 if (flags & stripped)
7860 return flags;
7861
7862 stripped |= BTRFS_BLOCK_GROUP_DUP;
7863 stripped = flags & ~stripped;
7864
7865 /* switch duplicated blocks with raid1 */
7866 if (flags & BTRFS_BLOCK_GROUP_DUP)
7867 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7868
7869 /* turn single device chunks into raid0 */
7870 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7871 }
7872 return flags;
7873 }
7874
7875 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7876 {
7877 struct btrfs_space_info *sinfo = cache->space_info;
7878 u64 num_bytes;
7879 int ret = -ENOSPC;
7880
7881 if (cache->ro)
7882 return 0;
7883
7884 spin_lock(&sinfo->lock);
7885 spin_lock(&cache->lock);
7886 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7887 cache->bytes_super - btrfs_block_group_used(&cache->item);
7888
7889 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7890 sinfo->bytes_may_use + sinfo->bytes_readonly +
7891 cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7892 sinfo->bytes_readonly += num_bytes;
7893 sinfo->bytes_reserved += cache->reserved_pinned;
7894 cache->reserved_pinned = 0;
7895 cache->ro = 1;
7896 ret = 0;
7897 }
7898 spin_unlock(&cache->lock);
7899 spin_unlock(&sinfo->lock);
7900 return ret;
7901 }
7902
7903 int btrfs_set_block_group_ro(struct btrfs_root *root,
7904 struct btrfs_block_group_cache *cache)
7905
7906 {
7907 struct btrfs_trans_handle *trans;
7908 u64 alloc_flags;
7909 int ret;
7910
7911 BUG_ON(cache->ro);
7912
7913 trans = btrfs_join_transaction(root, 1);
7914 BUG_ON(IS_ERR(trans));
7915
7916 alloc_flags = update_block_group_flags(root, cache->flags);
7917 if (alloc_flags != cache->flags)
7918 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7919
7920 ret = set_block_group_ro(cache);
7921 if (!ret)
7922 goto out;
7923 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7924 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7925 if (ret < 0)
7926 goto out;
7927 ret = set_block_group_ro(cache);
7928 out:
7929 btrfs_end_transaction(trans, root);
7930 return ret;
7931 }
7932
7933 int btrfs_set_block_group_rw(struct btrfs_root *root,
7934 struct btrfs_block_group_cache *cache)
7935 {
7936 struct btrfs_space_info *sinfo = cache->space_info;
7937 u64 num_bytes;
7938
7939 BUG_ON(!cache->ro);
7940
7941 spin_lock(&sinfo->lock);
7942 spin_lock(&cache->lock);
7943 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7944 cache->bytes_super - btrfs_block_group_used(&cache->item);
7945 sinfo->bytes_readonly -= num_bytes;
7946 cache->ro = 0;
7947 spin_unlock(&cache->lock);
7948 spin_unlock(&sinfo->lock);
7949 return 0;
7950 }
7951
7952 /*
7953 * checks to see if its even possible to relocate this block group.
7954 *
7955 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7956 * ok to go ahead and try.
7957 */
7958 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7959 {
7960 struct btrfs_block_group_cache *block_group;
7961 struct btrfs_space_info *space_info;
7962 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7963 struct btrfs_device *device;
7964 int full = 0;
7965 int ret = 0;
7966
7967 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7968
7969 /* odd, couldn't find the block group, leave it alone */
7970 if (!block_group)
7971 return -1;
7972
7973 /* no bytes used, we're good */
7974 if (!btrfs_block_group_used(&block_group->item))
7975 goto out;
7976
7977 space_info = block_group->space_info;
7978 spin_lock(&space_info->lock);
7979
7980 full = space_info->full;
7981
7982 /*
7983 * if this is the last block group we have in this space, we can't
7984 * relocate it unless we're able to allocate a new chunk below.
7985 *
7986 * Otherwise, we need to make sure we have room in the space to handle
7987 * all of the extents from this block group. If we can, we're good
7988 */
7989 if ((space_info->total_bytes != block_group->key.offset) &&
7990 (space_info->bytes_used + space_info->bytes_reserved +
7991 space_info->bytes_pinned + space_info->bytes_readonly +
7992 btrfs_block_group_used(&block_group->item) <
7993 space_info->total_bytes)) {
7994 spin_unlock(&space_info->lock);
7995 goto out;
7996 }
7997 spin_unlock(&space_info->lock);
7998
7999 /*
8000 * ok we don't have enough space, but maybe we have free space on our
8001 * devices to allocate new chunks for relocation, so loop through our
8002 * alloc devices and guess if we have enough space. However, if we
8003 * were marked as full, then we know there aren't enough chunks, and we
8004 * can just return.
8005 */
8006 ret = -1;
8007 if (full)
8008 goto out;
8009
8010 mutex_lock(&root->fs_info->chunk_mutex);
8011 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8012 u64 min_free = btrfs_block_group_used(&block_group->item);
8013 u64 dev_offset, max_avail;
8014
8015 /*
8016 * check to make sure we can actually find a chunk with enough
8017 * space to fit our block group in.
8018 */
8019 if (device->total_bytes > device->bytes_used + min_free) {
8020 ret = find_free_dev_extent(NULL, device, min_free,
8021 &dev_offset, &max_avail);
8022 if (!ret)
8023 break;
8024 ret = -1;
8025 }
8026 }
8027 mutex_unlock(&root->fs_info->chunk_mutex);
8028 out:
8029 btrfs_put_block_group(block_group);
8030 return ret;
8031 }
8032
8033 static int find_first_block_group(struct btrfs_root *root,
8034 struct btrfs_path *path, struct btrfs_key *key)
8035 {
8036 int ret = 0;
8037 struct btrfs_key found_key;
8038 struct extent_buffer *leaf;
8039 int slot;
8040
8041 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8042 if (ret < 0)
8043 goto out;
8044
8045 while (1) {
8046 slot = path->slots[0];
8047 leaf = path->nodes[0];
8048 if (slot >= btrfs_header_nritems(leaf)) {
8049 ret = btrfs_next_leaf(root, path);
8050 if (ret == 0)
8051 continue;
8052 if (ret < 0)
8053 goto out;
8054 break;
8055 }
8056 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8057
8058 if (found_key.objectid >= key->objectid &&
8059 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8060 ret = 0;
8061 goto out;
8062 }
8063 path->slots[0]++;
8064 }
8065 out:
8066 return ret;
8067 }
8068
8069 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8070 {
8071 struct btrfs_block_group_cache *block_group;
8072 u64 last = 0;
8073
8074 while (1) {
8075 struct inode *inode;
8076
8077 block_group = btrfs_lookup_first_block_group(info, last);
8078 while (block_group) {
8079 spin_lock(&block_group->lock);
8080 if (block_group->iref)
8081 break;
8082 spin_unlock(&block_group->lock);
8083 block_group = next_block_group(info->tree_root,
8084 block_group);
8085 }
8086 if (!block_group) {
8087 if (last == 0)
8088 break;
8089 last = 0;
8090 continue;
8091 }
8092
8093 inode = block_group->inode;
8094 block_group->iref = 0;
8095 block_group->inode = NULL;
8096 spin_unlock(&block_group->lock);
8097 iput(inode);
8098 last = block_group->key.objectid + block_group->key.offset;
8099 btrfs_put_block_group(block_group);
8100 }
8101 }
8102
8103 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8104 {
8105 struct btrfs_block_group_cache *block_group;
8106 struct btrfs_space_info *space_info;
8107 struct btrfs_caching_control *caching_ctl;
8108 struct rb_node *n;
8109
8110 down_write(&info->extent_commit_sem);
8111 while (!list_empty(&info->caching_block_groups)) {
8112 caching_ctl = list_entry(info->caching_block_groups.next,
8113 struct btrfs_caching_control, list);
8114 list_del(&caching_ctl->list);
8115 put_caching_control(caching_ctl);
8116 }
8117 up_write(&info->extent_commit_sem);
8118
8119 spin_lock(&info->block_group_cache_lock);
8120 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8121 block_group = rb_entry(n, struct btrfs_block_group_cache,
8122 cache_node);
8123 rb_erase(&block_group->cache_node,
8124 &info->block_group_cache_tree);
8125 spin_unlock(&info->block_group_cache_lock);
8126
8127 down_write(&block_group->space_info->groups_sem);
8128 list_del(&block_group->list);
8129 up_write(&block_group->space_info->groups_sem);
8130
8131 if (block_group->cached == BTRFS_CACHE_STARTED)
8132 wait_block_group_cache_done(block_group);
8133
8134 btrfs_remove_free_space_cache(block_group);
8135 btrfs_put_block_group(block_group);
8136
8137 spin_lock(&info->block_group_cache_lock);
8138 }
8139 spin_unlock(&info->block_group_cache_lock);
8140
8141 /* now that all the block groups are freed, go through and
8142 * free all the space_info structs. This is only called during
8143 * the final stages of unmount, and so we know nobody is
8144 * using them. We call synchronize_rcu() once before we start,
8145 * just to be on the safe side.
8146 */
8147 synchronize_rcu();
8148
8149 release_global_block_rsv(info);
8150
8151 while(!list_empty(&info->space_info)) {
8152 space_info = list_entry(info->space_info.next,
8153 struct btrfs_space_info,
8154 list);
8155 if (space_info->bytes_pinned > 0 ||
8156 space_info->bytes_reserved > 0) {
8157 WARN_ON(1);
8158 dump_space_info(space_info, 0, 0);
8159 }
8160 list_del(&space_info->list);
8161 kfree(space_info);
8162 }
8163 return 0;
8164 }
8165
8166 static void __link_block_group(struct btrfs_space_info *space_info,
8167 struct btrfs_block_group_cache *cache)
8168 {
8169 int index = get_block_group_index(cache);
8170
8171 down_write(&space_info->groups_sem);
8172 list_add_tail(&cache->list, &space_info->block_groups[index]);
8173 up_write(&space_info->groups_sem);
8174 }
8175
8176 int btrfs_read_block_groups(struct btrfs_root *root)
8177 {
8178 struct btrfs_path *path;
8179 int ret;
8180 struct btrfs_block_group_cache *cache;
8181 struct btrfs_fs_info *info = root->fs_info;
8182 struct btrfs_space_info *space_info;
8183 struct btrfs_key key;
8184 struct btrfs_key found_key;
8185 struct extent_buffer *leaf;
8186 int need_clear = 0;
8187 u64 cache_gen;
8188
8189 root = info->extent_root;
8190 key.objectid = 0;
8191 key.offset = 0;
8192 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8193 path = btrfs_alloc_path();
8194 if (!path)
8195 return -ENOMEM;
8196
8197 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8198 if (cache_gen != 0 &&
8199 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8200 need_clear = 1;
8201 if (btrfs_test_opt(root, CLEAR_CACHE))
8202 need_clear = 1;
8203 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
8204 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
8205
8206 while (1) {
8207 ret = find_first_block_group(root, path, &key);
8208 if (ret > 0)
8209 break;
8210 if (ret != 0)
8211 goto error;
8212
8213 leaf = path->nodes[0];
8214 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8215 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8216 if (!cache) {
8217 ret = -ENOMEM;
8218 goto error;
8219 }
8220
8221 atomic_set(&cache->count, 1);
8222 spin_lock_init(&cache->lock);
8223 spin_lock_init(&cache->tree_lock);
8224 cache->fs_info = info;
8225 INIT_LIST_HEAD(&cache->list);
8226 INIT_LIST_HEAD(&cache->cluster_list);
8227
8228 if (need_clear)
8229 cache->disk_cache_state = BTRFS_DC_CLEAR;
8230
8231 /*
8232 * we only want to have 32k of ram per block group for keeping
8233 * track of free space, and if we pass 1/2 of that we want to
8234 * start converting things over to using bitmaps
8235 */
8236 cache->extents_thresh = ((1024 * 32) / 2) /
8237 sizeof(struct btrfs_free_space);
8238
8239 read_extent_buffer(leaf, &cache->item,
8240 btrfs_item_ptr_offset(leaf, path->slots[0]),
8241 sizeof(cache->item));
8242 memcpy(&cache->key, &found_key, sizeof(found_key));
8243
8244 key.objectid = found_key.objectid + found_key.offset;
8245 btrfs_release_path(root, path);
8246 cache->flags = btrfs_block_group_flags(&cache->item);
8247 cache->sectorsize = root->sectorsize;
8248
8249 /*
8250 * check for two cases, either we are full, and therefore
8251 * don't need to bother with the caching work since we won't
8252 * find any space, or we are empty, and we can just add all
8253 * the space in and be done with it. This saves us _alot_ of
8254 * time, particularly in the full case.
8255 */
8256 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8257 exclude_super_stripes(root, cache);
8258 cache->last_byte_to_unpin = (u64)-1;
8259 cache->cached = BTRFS_CACHE_FINISHED;
8260 free_excluded_extents(root, cache);
8261 } else if (btrfs_block_group_used(&cache->item) == 0) {
8262 exclude_super_stripes(root, cache);
8263 cache->last_byte_to_unpin = (u64)-1;
8264 cache->cached = BTRFS_CACHE_FINISHED;
8265 add_new_free_space(cache, root->fs_info,
8266 found_key.objectid,
8267 found_key.objectid +
8268 found_key.offset);
8269 free_excluded_extents(root, cache);
8270 }
8271
8272 ret = update_space_info(info, cache->flags, found_key.offset,
8273 btrfs_block_group_used(&cache->item),
8274 &space_info);
8275 BUG_ON(ret);
8276 cache->space_info = space_info;
8277 spin_lock(&cache->space_info->lock);
8278 cache->space_info->bytes_readonly += cache->bytes_super;
8279 spin_unlock(&cache->space_info->lock);
8280
8281 __link_block_group(space_info, cache);
8282
8283 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8284 BUG_ON(ret);
8285
8286 set_avail_alloc_bits(root->fs_info, cache->flags);
8287 if (btrfs_chunk_readonly(root, cache->key.objectid))
8288 set_block_group_ro(cache);
8289 }
8290
8291 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8292 if (!(get_alloc_profile(root, space_info->flags) &
8293 (BTRFS_BLOCK_GROUP_RAID10 |
8294 BTRFS_BLOCK_GROUP_RAID1 |
8295 BTRFS_BLOCK_GROUP_DUP)))
8296 continue;
8297 /*
8298 * avoid allocating from un-mirrored block group if there are
8299 * mirrored block groups.
8300 */
8301 list_for_each_entry(cache, &space_info->block_groups[3], list)
8302 set_block_group_ro(cache);
8303 list_for_each_entry(cache, &space_info->block_groups[4], list)
8304 set_block_group_ro(cache);
8305 }
8306
8307 init_global_block_rsv(info);
8308 ret = 0;
8309 error:
8310 btrfs_free_path(path);
8311 return ret;
8312 }
8313
8314 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8315 struct btrfs_root *root, u64 bytes_used,
8316 u64 type, u64 chunk_objectid, u64 chunk_offset,
8317 u64 size)
8318 {
8319 int ret;
8320 struct btrfs_root *extent_root;
8321 struct btrfs_block_group_cache *cache;
8322
8323 extent_root = root->fs_info->extent_root;
8324
8325 root->fs_info->last_trans_log_full_commit = trans->transid;
8326
8327 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8328 if (!cache)
8329 return -ENOMEM;
8330
8331 cache->key.objectid = chunk_offset;
8332 cache->key.offset = size;
8333 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8334 cache->sectorsize = root->sectorsize;
8335 cache->fs_info = root->fs_info;
8336
8337 /*
8338 * we only want to have 32k of ram per block group for keeping track
8339 * of free space, and if we pass 1/2 of that we want to start
8340 * converting things over to using bitmaps
8341 */
8342 cache->extents_thresh = ((1024 * 32) / 2) /
8343 sizeof(struct btrfs_free_space);
8344 atomic_set(&cache->count, 1);
8345 spin_lock_init(&cache->lock);
8346 spin_lock_init(&cache->tree_lock);
8347 INIT_LIST_HEAD(&cache->list);
8348 INIT_LIST_HEAD(&cache->cluster_list);
8349
8350 btrfs_set_block_group_used(&cache->item, bytes_used);
8351 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8352 cache->flags = type;
8353 btrfs_set_block_group_flags(&cache->item, type);
8354
8355 cache->last_byte_to_unpin = (u64)-1;
8356 cache->cached = BTRFS_CACHE_FINISHED;
8357 exclude_super_stripes(root, cache);
8358
8359 add_new_free_space(cache, root->fs_info, chunk_offset,
8360 chunk_offset + size);
8361
8362 free_excluded_extents(root, cache);
8363
8364 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8365 &cache->space_info);
8366 BUG_ON(ret);
8367
8368 spin_lock(&cache->space_info->lock);
8369 cache->space_info->bytes_readonly += cache->bytes_super;
8370 spin_unlock(&cache->space_info->lock);
8371
8372 __link_block_group(cache->space_info, cache);
8373
8374 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8375 BUG_ON(ret);
8376
8377 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8378 sizeof(cache->item));
8379 BUG_ON(ret);
8380
8381 set_avail_alloc_bits(extent_root->fs_info, type);
8382
8383 return 0;
8384 }
8385
8386 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8387 struct btrfs_root *root, u64 group_start)
8388 {
8389 struct btrfs_path *path;
8390 struct btrfs_block_group_cache *block_group;
8391 struct btrfs_free_cluster *cluster;
8392 struct btrfs_root *tree_root = root->fs_info->tree_root;
8393 struct btrfs_key key;
8394 struct inode *inode;
8395 int ret;
8396
8397 root = root->fs_info->extent_root;
8398
8399 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8400 BUG_ON(!block_group);
8401 BUG_ON(!block_group->ro);
8402
8403 /* make sure this block group isn't part of an allocation cluster */
8404 cluster = &root->fs_info->data_alloc_cluster;
8405 spin_lock(&cluster->refill_lock);
8406 btrfs_return_cluster_to_free_space(block_group, cluster);
8407 spin_unlock(&cluster->refill_lock);
8408
8409 /*
8410 * make sure this block group isn't part of a metadata
8411 * allocation cluster
8412 */
8413 cluster = &root->fs_info->meta_alloc_cluster;
8414 spin_lock(&cluster->refill_lock);
8415 btrfs_return_cluster_to_free_space(block_group, cluster);
8416 spin_unlock(&cluster->refill_lock);
8417
8418 path = btrfs_alloc_path();
8419 BUG_ON(!path);
8420
8421 inode = lookup_free_space_inode(root, block_group, path);
8422 if (!IS_ERR(inode)) {
8423 btrfs_orphan_add(trans, inode);
8424 clear_nlink(inode);
8425 /* One for the block groups ref */
8426 spin_lock(&block_group->lock);
8427 if (block_group->iref) {
8428 block_group->iref = 0;
8429 block_group->inode = NULL;
8430 spin_unlock(&block_group->lock);
8431 iput(inode);
8432 } else {
8433 spin_unlock(&block_group->lock);
8434 }
8435 /* One for our lookup ref */
8436 iput(inode);
8437 }
8438
8439 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8440 key.offset = block_group->key.objectid;
8441 key.type = 0;
8442
8443 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8444 if (ret < 0)
8445 goto out;
8446 if (ret > 0)
8447 btrfs_release_path(tree_root, path);
8448 if (ret == 0) {
8449 ret = btrfs_del_item(trans, tree_root, path);
8450 if (ret)
8451 goto out;
8452 btrfs_release_path(tree_root, path);
8453 }
8454
8455 spin_lock(&root->fs_info->block_group_cache_lock);
8456 rb_erase(&block_group->cache_node,
8457 &root->fs_info->block_group_cache_tree);
8458 spin_unlock(&root->fs_info->block_group_cache_lock);
8459
8460 down_write(&block_group->space_info->groups_sem);
8461 /*
8462 * we must use list_del_init so people can check to see if they
8463 * are still on the list after taking the semaphore
8464 */
8465 list_del_init(&block_group->list);
8466 up_write(&block_group->space_info->groups_sem);
8467
8468 if (block_group->cached == BTRFS_CACHE_STARTED)
8469 wait_block_group_cache_done(block_group);
8470
8471 btrfs_remove_free_space_cache(block_group);
8472
8473 spin_lock(&block_group->space_info->lock);
8474 block_group->space_info->total_bytes -= block_group->key.offset;
8475 block_group->space_info->bytes_readonly -= block_group->key.offset;
8476 spin_unlock(&block_group->space_info->lock);
8477
8478 memcpy(&key, &block_group->key, sizeof(key));
8479
8480 btrfs_clear_space_info_full(root->fs_info);
8481
8482 btrfs_put_block_group(block_group);
8483 btrfs_put_block_group(block_group);
8484
8485 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8486 if (ret > 0)
8487 ret = -EIO;
8488 if (ret < 0)
8489 goto out;
8490
8491 ret = btrfs_del_item(trans, root, path);
8492 out:
8493 btrfs_free_path(path);
8494 return ret;
8495 }