]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/btrfs/extent-tree.c
btrfs: clear bio reference after submit_one_bio()
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
46 *
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated. This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
52 *
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
54 *
55 */
56 enum {
57 CHUNK_ALLOC_NO_FORCE = 0,
58 CHUNK_ALLOC_LIMITED = 1,
59 CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63 * Control how reservations are dealt with.
64 *
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67 * ENOSPC accounting
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 * bytes_may_use as the ENOSPC accounting is done elsewhere
70 */
71 enum {
72 RESERVE_FREE = 0,
73 RESERVE_ALLOC = 1,
74 RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78 struct btrfs_root *root, u64 bytenr,
79 u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81 struct btrfs_root *root,
82 u64 bytenr, u64 num_bytes, u64 parent,
83 u64 root_objectid, u64 owner_objectid,
84 u64 owner_offset, int refs_to_drop,
85 struct btrfs_delayed_extent_op *extra_op,
86 int no_quota);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88 struct extent_buffer *leaf,
89 struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91 struct btrfs_root *root,
92 u64 parent, u64 root_objectid,
93 u64 flags, u64 owner, u64 offset,
94 struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96 struct btrfs_root *root,
97 u64 parent, u64 root_objectid,
98 u64 flags, struct btrfs_disk_key *key,
99 int level, struct btrfs_key *ins,
100 int no_quota);
101 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
102 struct btrfs_root *extent_root, u64 flags,
103 int force);
104 static int find_next_key(struct btrfs_path *path, int level,
105 struct btrfs_key *key);
106 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
107 int dump_block_groups);
108 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
109 u64 num_bytes, int reserve,
110 int delalloc);
111 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
112 u64 num_bytes);
113 int btrfs_pin_extent(struct btrfs_root *root,
114 u64 bytenr, u64 num_bytes, int reserved);
115
116 static noinline int
117 block_group_cache_done(struct btrfs_block_group_cache *cache)
118 {
119 smp_mb();
120 return cache->cached == BTRFS_CACHE_FINISHED ||
121 cache->cached == BTRFS_CACHE_ERROR;
122 }
123
124 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
125 {
126 return (cache->flags & bits) == bits;
127 }
128
129 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
130 {
131 atomic_inc(&cache->count);
132 }
133
134 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
135 {
136 if (atomic_dec_and_test(&cache->count)) {
137 WARN_ON(cache->pinned > 0);
138 WARN_ON(cache->reserved > 0);
139 kfree(cache->free_space_ctl);
140 kfree(cache);
141 }
142 }
143
144 /*
145 * this adds the block group to the fs_info rb tree for the block group
146 * cache
147 */
148 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
149 struct btrfs_block_group_cache *block_group)
150 {
151 struct rb_node **p;
152 struct rb_node *parent = NULL;
153 struct btrfs_block_group_cache *cache;
154
155 spin_lock(&info->block_group_cache_lock);
156 p = &info->block_group_cache_tree.rb_node;
157
158 while (*p) {
159 parent = *p;
160 cache = rb_entry(parent, struct btrfs_block_group_cache,
161 cache_node);
162 if (block_group->key.objectid < cache->key.objectid) {
163 p = &(*p)->rb_left;
164 } else if (block_group->key.objectid > cache->key.objectid) {
165 p = &(*p)->rb_right;
166 } else {
167 spin_unlock(&info->block_group_cache_lock);
168 return -EEXIST;
169 }
170 }
171
172 rb_link_node(&block_group->cache_node, parent, p);
173 rb_insert_color(&block_group->cache_node,
174 &info->block_group_cache_tree);
175
176 if (info->first_logical_byte > block_group->key.objectid)
177 info->first_logical_byte = block_group->key.objectid;
178
179 spin_unlock(&info->block_group_cache_lock);
180
181 return 0;
182 }
183
184 /*
185 * This will return the block group at or after bytenr if contains is 0, else
186 * it will return the block group that contains the bytenr
187 */
188 static struct btrfs_block_group_cache *
189 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
190 int contains)
191 {
192 struct btrfs_block_group_cache *cache, *ret = NULL;
193 struct rb_node *n;
194 u64 end, start;
195
196 spin_lock(&info->block_group_cache_lock);
197 n = info->block_group_cache_tree.rb_node;
198
199 while (n) {
200 cache = rb_entry(n, struct btrfs_block_group_cache,
201 cache_node);
202 end = cache->key.objectid + cache->key.offset - 1;
203 start = cache->key.objectid;
204
205 if (bytenr < start) {
206 if (!contains && (!ret || start < ret->key.objectid))
207 ret = cache;
208 n = n->rb_left;
209 } else if (bytenr > start) {
210 if (contains && bytenr <= end) {
211 ret = cache;
212 break;
213 }
214 n = n->rb_right;
215 } else {
216 ret = cache;
217 break;
218 }
219 }
220 if (ret) {
221 btrfs_get_block_group(ret);
222 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
223 info->first_logical_byte = ret->key.objectid;
224 }
225 spin_unlock(&info->block_group_cache_lock);
226
227 return ret;
228 }
229
230 static int add_excluded_extent(struct btrfs_root *root,
231 u64 start, u64 num_bytes)
232 {
233 u64 end = start + num_bytes - 1;
234 set_extent_bits(&root->fs_info->freed_extents[0],
235 start, end, EXTENT_UPTODATE, GFP_NOFS);
236 set_extent_bits(&root->fs_info->freed_extents[1],
237 start, end, EXTENT_UPTODATE, GFP_NOFS);
238 return 0;
239 }
240
241 static void free_excluded_extents(struct btrfs_root *root,
242 struct btrfs_block_group_cache *cache)
243 {
244 u64 start, end;
245
246 start = cache->key.objectid;
247 end = start + cache->key.offset - 1;
248
249 clear_extent_bits(&root->fs_info->freed_extents[0],
250 start, end, EXTENT_UPTODATE, GFP_NOFS);
251 clear_extent_bits(&root->fs_info->freed_extents[1],
252 start, end, EXTENT_UPTODATE, GFP_NOFS);
253 }
254
255 static int exclude_super_stripes(struct btrfs_root *root,
256 struct btrfs_block_group_cache *cache)
257 {
258 u64 bytenr;
259 u64 *logical;
260 int stripe_len;
261 int i, nr, ret;
262
263 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
264 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
265 cache->bytes_super += stripe_len;
266 ret = add_excluded_extent(root, cache->key.objectid,
267 stripe_len);
268 if (ret)
269 return ret;
270 }
271
272 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
273 bytenr = btrfs_sb_offset(i);
274 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
275 cache->key.objectid, bytenr,
276 0, &logical, &nr, &stripe_len);
277 if (ret)
278 return ret;
279
280 while (nr--) {
281 u64 start, len;
282
283 if (logical[nr] > cache->key.objectid +
284 cache->key.offset)
285 continue;
286
287 if (logical[nr] + stripe_len <= cache->key.objectid)
288 continue;
289
290 start = logical[nr];
291 if (start < cache->key.objectid) {
292 start = cache->key.objectid;
293 len = (logical[nr] + stripe_len) - start;
294 } else {
295 len = min_t(u64, stripe_len,
296 cache->key.objectid +
297 cache->key.offset - start);
298 }
299
300 cache->bytes_super += len;
301 ret = add_excluded_extent(root, start, len);
302 if (ret) {
303 kfree(logical);
304 return ret;
305 }
306 }
307
308 kfree(logical);
309 }
310 return 0;
311 }
312
313 static struct btrfs_caching_control *
314 get_caching_control(struct btrfs_block_group_cache *cache)
315 {
316 struct btrfs_caching_control *ctl;
317
318 spin_lock(&cache->lock);
319 if (!cache->caching_ctl) {
320 spin_unlock(&cache->lock);
321 return NULL;
322 }
323
324 ctl = cache->caching_ctl;
325 atomic_inc(&ctl->count);
326 spin_unlock(&cache->lock);
327 return ctl;
328 }
329
330 static void put_caching_control(struct btrfs_caching_control *ctl)
331 {
332 if (atomic_dec_and_test(&ctl->count))
333 kfree(ctl);
334 }
335
336 /*
337 * this is only called by cache_block_group, since we could have freed extents
338 * we need to check the pinned_extents for any extents that can't be used yet
339 * since their free space will be released as soon as the transaction commits.
340 */
341 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
342 struct btrfs_fs_info *info, u64 start, u64 end)
343 {
344 u64 extent_start, extent_end, size, total_added = 0;
345 int ret;
346
347 while (start < end) {
348 ret = find_first_extent_bit(info->pinned_extents, start,
349 &extent_start, &extent_end,
350 EXTENT_DIRTY | EXTENT_UPTODATE,
351 NULL);
352 if (ret)
353 break;
354
355 if (extent_start <= start) {
356 start = extent_end + 1;
357 } else if (extent_start > start && extent_start < end) {
358 size = extent_start - start;
359 total_added += size;
360 ret = btrfs_add_free_space(block_group, start,
361 size);
362 BUG_ON(ret); /* -ENOMEM or logic error */
363 start = extent_end + 1;
364 } else {
365 break;
366 }
367 }
368
369 if (start < end) {
370 size = end - start;
371 total_added += size;
372 ret = btrfs_add_free_space(block_group, start, size);
373 BUG_ON(ret); /* -ENOMEM or logic error */
374 }
375
376 return total_added;
377 }
378
379 static noinline void caching_thread(struct btrfs_work *work)
380 {
381 struct btrfs_block_group_cache *block_group;
382 struct btrfs_fs_info *fs_info;
383 struct btrfs_caching_control *caching_ctl;
384 struct btrfs_root *extent_root;
385 struct btrfs_path *path;
386 struct extent_buffer *leaf;
387 struct btrfs_key key;
388 u64 total_found = 0;
389 u64 last = 0;
390 u32 nritems;
391 int ret = -ENOMEM;
392
393 caching_ctl = container_of(work, struct btrfs_caching_control, work);
394 block_group = caching_ctl->block_group;
395 fs_info = block_group->fs_info;
396 extent_root = fs_info->extent_root;
397
398 path = btrfs_alloc_path();
399 if (!path)
400 goto out;
401
402 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
403
404 /*
405 * We don't want to deadlock with somebody trying to allocate a new
406 * extent for the extent root while also trying to search the extent
407 * root to add free space. So we skip locking and search the commit
408 * root, since its read-only
409 */
410 path->skip_locking = 1;
411 path->search_commit_root = 1;
412 path->reada = 1;
413
414 key.objectid = last;
415 key.offset = 0;
416 key.type = BTRFS_EXTENT_ITEM_KEY;
417 again:
418 mutex_lock(&caching_ctl->mutex);
419 /* need to make sure the commit_root doesn't disappear */
420 down_read(&fs_info->commit_root_sem);
421
422 next:
423 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
424 if (ret < 0)
425 goto err;
426
427 leaf = path->nodes[0];
428 nritems = btrfs_header_nritems(leaf);
429
430 while (1) {
431 if (btrfs_fs_closing(fs_info) > 1) {
432 last = (u64)-1;
433 break;
434 }
435
436 if (path->slots[0] < nritems) {
437 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
438 } else {
439 ret = find_next_key(path, 0, &key);
440 if (ret)
441 break;
442
443 if (need_resched() ||
444 rwsem_is_contended(&fs_info->commit_root_sem)) {
445 caching_ctl->progress = last;
446 btrfs_release_path(path);
447 up_read(&fs_info->commit_root_sem);
448 mutex_unlock(&caching_ctl->mutex);
449 cond_resched();
450 goto again;
451 }
452
453 ret = btrfs_next_leaf(extent_root, path);
454 if (ret < 0)
455 goto err;
456 if (ret)
457 break;
458 leaf = path->nodes[0];
459 nritems = btrfs_header_nritems(leaf);
460 continue;
461 }
462
463 if (key.objectid < last) {
464 key.objectid = last;
465 key.offset = 0;
466 key.type = BTRFS_EXTENT_ITEM_KEY;
467
468 caching_ctl->progress = last;
469 btrfs_release_path(path);
470 goto next;
471 }
472
473 if (key.objectid < block_group->key.objectid) {
474 path->slots[0]++;
475 continue;
476 }
477
478 if (key.objectid >= block_group->key.objectid +
479 block_group->key.offset)
480 break;
481
482 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
483 key.type == BTRFS_METADATA_ITEM_KEY) {
484 total_found += add_new_free_space(block_group,
485 fs_info, last,
486 key.objectid);
487 if (key.type == BTRFS_METADATA_ITEM_KEY)
488 last = key.objectid +
489 fs_info->tree_root->nodesize;
490 else
491 last = key.objectid + key.offset;
492
493 if (total_found > (1024 * 1024 * 2)) {
494 total_found = 0;
495 wake_up(&caching_ctl->wait);
496 }
497 }
498 path->slots[0]++;
499 }
500 ret = 0;
501
502 total_found += add_new_free_space(block_group, fs_info, last,
503 block_group->key.objectid +
504 block_group->key.offset);
505 caching_ctl->progress = (u64)-1;
506
507 spin_lock(&block_group->lock);
508 block_group->caching_ctl = NULL;
509 block_group->cached = BTRFS_CACHE_FINISHED;
510 spin_unlock(&block_group->lock);
511
512 err:
513 btrfs_free_path(path);
514 up_read(&fs_info->commit_root_sem);
515
516 free_excluded_extents(extent_root, block_group);
517
518 mutex_unlock(&caching_ctl->mutex);
519 out:
520 if (ret) {
521 spin_lock(&block_group->lock);
522 block_group->caching_ctl = NULL;
523 block_group->cached = BTRFS_CACHE_ERROR;
524 spin_unlock(&block_group->lock);
525 }
526 wake_up(&caching_ctl->wait);
527
528 put_caching_control(caching_ctl);
529 btrfs_put_block_group(block_group);
530 }
531
532 static int cache_block_group(struct btrfs_block_group_cache *cache,
533 int load_cache_only)
534 {
535 DEFINE_WAIT(wait);
536 struct btrfs_fs_info *fs_info = cache->fs_info;
537 struct btrfs_caching_control *caching_ctl;
538 int ret = 0;
539
540 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
541 if (!caching_ctl)
542 return -ENOMEM;
543
544 INIT_LIST_HEAD(&caching_ctl->list);
545 mutex_init(&caching_ctl->mutex);
546 init_waitqueue_head(&caching_ctl->wait);
547 caching_ctl->block_group = cache;
548 caching_ctl->progress = cache->key.objectid;
549 atomic_set(&caching_ctl->count, 1);
550 btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
551 caching_thread, NULL, NULL);
552
553 spin_lock(&cache->lock);
554 /*
555 * This should be a rare occasion, but this could happen I think in the
556 * case where one thread starts to load the space cache info, and then
557 * some other thread starts a transaction commit which tries to do an
558 * allocation while the other thread is still loading the space cache
559 * info. The previous loop should have kept us from choosing this block
560 * group, but if we've moved to the state where we will wait on caching
561 * block groups we need to first check if we're doing a fast load here,
562 * so we can wait for it to finish, otherwise we could end up allocating
563 * from a block group who's cache gets evicted for one reason or
564 * another.
565 */
566 while (cache->cached == BTRFS_CACHE_FAST) {
567 struct btrfs_caching_control *ctl;
568
569 ctl = cache->caching_ctl;
570 atomic_inc(&ctl->count);
571 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572 spin_unlock(&cache->lock);
573
574 schedule();
575
576 finish_wait(&ctl->wait, &wait);
577 put_caching_control(ctl);
578 spin_lock(&cache->lock);
579 }
580
581 if (cache->cached != BTRFS_CACHE_NO) {
582 spin_unlock(&cache->lock);
583 kfree(caching_ctl);
584 return 0;
585 }
586 WARN_ON(cache->caching_ctl);
587 cache->caching_ctl = caching_ctl;
588 cache->cached = BTRFS_CACHE_FAST;
589 spin_unlock(&cache->lock);
590
591 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592 mutex_lock(&caching_ctl->mutex);
593 ret = load_free_space_cache(fs_info, cache);
594
595 spin_lock(&cache->lock);
596 if (ret == 1) {
597 cache->caching_ctl = NULL;
598 cache->cached = BTRFS_CACHE_FINISHED;
599 cache->last_byte_to_unpin = (u64)-1;
600 caching_ctl->progress = (u64)-1;
601 } else {
602 if (load_cache_only) {
603 cache->caching_ctl = NULL;
604 cache->cached = BTRFS_CACHE_NO;
605 } else {
606 cache->cached = BTRFS_CACHE_STARTED;
607 cache->has_caching_ctl = 1;
608 }
609 }
610 spin_unlock(&cache->lock);
611 mutex_unlock(&caching_ctl->mutex);
612
613 wake_up(&caching_ctl->wait);
614 if (ret == 1) {
615 put_caching_control(caching_ctl);
616 free_excluded_extents(fs_info->extent_root, cache);
617 return 0;
618 }
619 } else {
620 /*
621 * We are not going to do the fast caching, set cached to the
622 * appropriate value and wakeup any waiters.
623 */
624 spin_lock(&cache->lock);
625 if (load_cache_only) {
626 cache->caching_ctl = NULL;
627 cache->cached = BTRFS_CACHE_NO;
628 } else {
629 cache->cached = BTRFS_CACHE_STARTED;
630 cache->has_caching_ctl = 1;
631 }
632 spin_unlock(&cache->lock);
633 wake_up(&caching_ctl->wait);
634 }
635
636 if (load_cache_only) {
637 put_caching_control(caching_ctl);
638 return 0;
639 }
640
641 down_write(&fs_info->commit_root_sem);
642 atomic_inc(&caching_ctl->count);
643 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
644 up_write(&fs_info->commit_root_sem);
645
646 btrfs_get_block_group(cache);
647
648 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
649
650 return ret;
651 }
652
653 /*
654 * return the block group that starts at or after bytenr
655 */
656 static struct btrfs_block_group_cache *
657 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
658 {
659 struct btrfs_block_group_cache *cache;
660
661 cache = block_group_cache_tree_search(info, bytenr, 0);
662
663 return cache;
664 }
665
666 /*
667 * return the block group that contains the given bytenr
668 */
669 struct btrfs_block_group_cache *btrfs_lookup_block_group(
670 struct btrfs_fs_info *info,
671 u64 bytenr)
672 {
673 struct btrfs_block_group_cache *cache;
674
675 cache = block_group_cache_tree_search(info, bytenr, 1);
676
677 return cache;
678 }
679
680 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
681 u64 flags)
682 {
683 struct list_head *head = &info->space_info;
684 struct btrfs_space_info *found;
685
686 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
687
688 rcu_read_lock();
689 list_for_each_entry_rcu(found, head, list) {
690 if (found->flags & flags) {
691 rcu_read_unlock();
692 return found;
693 }
694 }
695 rcu_read_unlock();
696 return NULL;
697 }
698
699 /*
700 * after adding space to the filesystem, we need to clear the full flags
701 * on all the space infos.
702 */
703 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
704 {
705 struct list_head *head = &info->space_info;
706 struct btrfs_space_info *found;
707
708 rcu_read_lock();
709 list_for_each_entry_rcu(found, head, list)
710 found->full = 0;
711 rcu_read_unlock();
712 }
713
714 /* simple helper to search for an existing data extent at a given offset */
715 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
716 {
717 int ret;
718 struct btrfs_key key;
719 struct btrfs_path *path;
720
721 path = btrfs_alloc_path();
722 if (!path)
723 return -ENOMEM;
724
725 key.objectid = start;
726 key.offset = len;
727 key.type = BTRFS_EXTENT_ITEM_KEY;
728 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
729 0, 0);
730 btrfs_free_path(path);
731 return ret;
732 }
733
734 /*
735 * helper function to lookup reference count and flags of a tree block.
736 *
737 * the head node for delayed ref is used to store the sum of all the
738 * reference count modifications queued up in the rbtree. the head
739 * node may also store the extent flags to set. This way you can check
740 * to see what the reference count and extent flags would be if all of
741 * the delayed refs are not processed.
742 */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744 struct btrfs_root *root, u64 bytenr,
745 u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747 struct btrfs_delayed_ref_head *head;
748 struct btrfs_delayed_ref_root *delayed_refs;
749 struct btrfs_path *path;
750 struct btrfs_extent_item *ei;
751 struct extent_buffer *leaf;
752 struct btrfs_key key;
753 u32 item_size;
754 u64 num_refs;
755 u64 extent_flags;
756 int ret;
757
758 /*
759 * If we don't have skinny metadata, don't bother doing anything
760 * different
761 */
762 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763 offset = root->nodesize;
764 metadata = 0;
765 }
766
767 path = btrfs_alloc_path();
768 if (!path)
769 return -ENOMEM;
770
771 if (!trans) {
772 path->skip_locking = 1;
773 path->search_commit_root = 1;
774 }
775
776 search_again:
777 key.objectid = bytenr;
778 key.offset = offset;
779 if (metadata)
780 key.type = BTRFS_METADATA_ITEM_KEY;
781 else
782 key.type = BTRFS_EXTENT_ITEM_KEY;
783
784 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
785 &key, path, 0, 0);
786 if (ret < 0)
787 goto out_free;
788
789 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
790 if (path->slots[0]) {
791 path->slots[0]--;
792 btrfs_item_key_to_cpu(path->nodes[0], &key,
793 path->slots[0]);
794 if (key.objectid == bytenr &&
795 key.type == BTRFS_EXTENT_ITEM_KEY &&
796 key.offset == root->nodesize)
797 ret = 0;
798 }
799 }
800
801 if (ret == 0) {
802 leaf = path->nodes[0];
803 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
804 if (item_size >= sizeof(*ei)) {
805 ei = btrfs_item_ptr(leaf, path->slots[0],
806 struct btrfs_extent_item);
807 num_refs = btrfs_extent_refs(leaf, ei);
808 extent_flags = btrfs_extent_flags(leaf, ei);
809 } else {
810 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
811 struct btrfs_extent_item_v0 *ei0;
812 BUG_ON(item_size != sizeof(*ei0));
813 ei0 = btrfs_item_ptr(leaf, path->slots[0],
814 struct btrfs_extent_item_v0);
815 num_refs = btrfs_extent_refs_v0(leaf, ei0);
816 /* FIXME: this isn't correct for data */
817 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
818 #else
819 BUG();
820 #endif
821 }
822 BUG_ON(num_refs == 0);
823 } else {
824 num_refs = 0;
825 extent_flags = 0;
826 ret = 0;
827 }
828
829 if (!trans)
830 goto out;
831
832 delayed_refs = &trans->transaction->delayed_refs;
833 spin_lock(&delayed_refs->lock);
834 head = btrfs_find_delayed_ref_head(trans, bytenr);
835 if (head) {
836 if (!mutex_trylock(&head->mutex)) {
837 atomic_inc(&head->node.refs);
838 spin_unlock(&delayed_refs->lock);
839
840 btrfs_release_path(path);
841
842 /*
843 * Mutex was contended, block until it's released and try
844 * again
845 */
846 mutex_lock(&head->mutex);
847 mutex_unlock(&head->mutex);
848 btrfs_put_delayed_ref(&head->node);
849 goto search_again;
850 }
851 spin_lock(&head->lock);
852 if (head->extent_op && head->extent_op->update_flags)
853 extent_flags |= head->extent_op->flags_to_set;
854 else
855 BUG_ON(num_refs == 0);
856
857 num_refs += head->node.ref_mod;
858 spin_unlock(&head->lock);
859 mutex_unlock(&head->mutex);
860 }
861 spin_unlock(&delayed_refs->lock);
862 out:
863 WARN_ON(num_refs == 0);
864 if (refs)
865 *refs = num_refs;
866 if (flags)
867 *flags = extent_flags;
868 out_free:
869 btrfs_free_path(path);
870 return ret;
871 }
872
873 /*
874 * Back reference rules. Back refs have three main goals:
875 *
876 * 1) differentiate between all holders of references to an extent so that
877 * when a reference is dropped we can make sure it was a valid reference
878 * before freeing the extent.
879 *
880 * 2) Provide enough information to quickly find the holders of an extent
881 * if we notice a given block is corrupted or bad.
882 *
883 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
884 * maintenance. This is actually the same as #2, but with a slightly
885 * different use case.
886 *
887 * There are two kinds of back refs. The implicit back refs is optimized
888 * for pointers in non-shared tree blocks. For a given pointer in a block,
889 * back refs of this kind provide information about the block's owner tree
890 * and the pointer's key. These information allow us to find the block by
891 * b-tree searching. The full back refs is for pointers in tree blocks not
892 * referenced by their owner trees. The location of tree block is recorded
893 * in the back refs. Actually the full back refs is generic, and can be
894 * used in all cases the implicit back refs is used. The major shortcoming
895 * of the full back refs is its overhead. Every time a tree block gets
896 * COWed, we have to update back refs entry for all pointers in it.
897 *
898 * For a newly allocated tree block, we use implicit back refs for
899 * pointers in it. This means most tree related operations only involve
900 * implicit back refs. For a tree block created in old transaction, the
901 * only way to drop a reference to it is COW it. So we can detect the
902 * event that tree block loses its owner tree's reference and do the
903 * back refs conversion.
904 *
905 * When a tree block is COW'd through a tree, there are four cases:
906 *
907 * The reference count of the block is one and the tree is the block's
908 * owner tree. Nothing to do in this case.
909 *
910 * The reference count of the block is one and the tree is not the
911 * block's owner tree. In this case, full back refs is used for pointers
912 * in the block. Remove these full back refs, add implicit back refs for
913 * every pointers in the new block.
914 *
915 * The reference count of the block is greater than one and the tree is
916 * the block's owner tree. In this case, implicit back refs is used for
917 * pointers in the block. Add full back refs for every pointers in the
918 * block, increase lower level extents' reference counts. The original
919 * implicit back refs are entailed to the new block.
920 *
921 * The reference count of the block is greater than one and the tree is
922 * not the block's owner tree. Add implicit back refs for every pointer in
923 * the new block, increase lower level extents' reference count.
924 *
925 * Back Reference Key composing:
926 *
927 * The key objectid corresponds to the first byte in the extent,
928 * The key type is used to differentiate between types of back refs.
929 * There are different meanings of the key offset for different types
930 * of back refs.
931 *
932 * File extents can be referenced by:
933 *
934 * - multiple snapshots, subvolumes, or different generations in one subvol
935 * - different files inside a single subvolume
936 * - different offsets inside a file (bookend extents in file.c)
937 *
938 * The extent ref structure for the implicit back refs has fields for:
939 *
940 * - Objectid of the subvolume root
941 * - objectid of the file holding the reference
942 * - original offset in the file
943 * - how many bookend extents
944 *
945 * The key offset for the implicit back refs is hash of the first
946 * three fields.
947 *
948 * The extent ref structure for the full back refs has field for:
949 *
950 * - number of pointers in the tree leaf
951 *
952 * The key offset for the implicit back refs is the first byte of
953 * the tree leaf
954 *
955 * When a file extent is allocated, The implicit back refs is used.
956 * the fields are filled in:
957 *
958 * (root_key.objectid, inode objectid, offset in file, 1)
959 *
960 * When a file extent is removed file truncation, we find the
961 * corresponding implicit back refs and check the following fields:
962 *
963 * (btrfs_header_owner(leaf), inode objectid, offset in file)
964 *
965 * Btree extents can be referenced by:
966 *
967 * - Different subvolumes
968 *
969 * Both the implicit back refs and the full back refs for tree blocks
970 * only consist of key. The key offset for the implicit back refs is
971 * objectid of block's owner tree. The key offset for the full back refs
972 * is the first byte of parent block.
973 *
974 * When implicit back refs is used, information about the lowest key and
975 * level of the tree block are required. These information are stored in
976 * tree block info structure.
977 */
978
979 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
980 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
981 struct btrfs_root *root,
982 struct btrfs_path *path,
983 u64 owner, u32 extra_size)
984 {
985 struct btrfs_extent_item *item;
986 struct btrfs_extent_item_v0 *ei0;
987 struct btrfs_extent_ref_v0 *ref0;
988 struct btrfs_tree_block_info *bi;
989 struct extent_buffer *leaf;
990 struct btrfs_key key;
991 struct btrfs_key found_key;
992 u32 new_size = sizeof(*item);
993 u64 refs;
994 int ret;
995
996 leaf = path->nodes[0];
997 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
998
999 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1000 ei0 = btrfs_item_ptr(leaf, path->slots[0],
1001 struct btrfs_extent_item_v0);
1002 refs = btrfs_extent_refs_v0(leaf, ei0);
1003
1004 if (owner == (u64)-1) {
1005 while (1) {
1006 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1007 ret = btrfs_next_leaf(root, path);
1008 if (ret < 0)
1009 return ret;
1010 BUG_ON(ret > 0); /* Corruption */
1011 leaf = path->nodes[0];
1012 }
1013 btrfs_item_key_to_cpu(leaf, &found_key,
1014 path->slots[0]);
1015 BUG_ON(key.objectid != found_key.objectid);
1016 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1017 path->slots[0]++;
1018 continue;
1019 }
1020 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1021 struct btrfs_extent_ref_v0);
1022 owner = btrfs_ref_objectid_v0(leaf, ref0);
1023 break;
1024 }
1025 }
1026 btrfs_release_path(path);
1027
1028 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1029 new_size += sizeof(*bi);
1030
1031 new_size -= sizeof(*ei0);
1032 ret = btrfs_search_slot(trans, root, &key, path,
1033 new_size + extra_size, 1);
1034 if (ret < 0)
1035 return ret;
1036 BUG_ON(ret); /* Corruption */
1037
1038 btrfs_extend_item(root, path, new_size);
1039
1040 leaf = path->nodes[0];
1041 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1042 btrfs_set_extent_refs(leaf, item, refs);
1043 /* FIXME: get real generation */
1044 btrfs_set_extent_generation(leaf, item, 0);
1045 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1046 btrfs_set_extent_flags(leaf, item,
1047 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1048 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1049 bi = (struct btrfs_tree_block_info *)(item + 1);
1050 /* FIXME: get first key of the block */
1051 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1052 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1053 } else {
1054 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1055 }
1056 btrfs_mark_buffer_dirty(leaf);
1057 return 0;
1058 }
1059 #endif
1060
1061 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1062 {
1063 u32 high_crc = ~(u32)0;
1064 u32 low_crc = ~(u32)0;
1065 __le64 lenum;
1066
1067 lenum = cpu_to_le64(root_objectid);
1068 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1069 lenum = cpu_to_le64(owner);
1070 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1071 lenum = cpu_to_le64(offset);
1072 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1073
1074 return ((u64)high_crc << 31) ^ (u64)low_crc;
1075 }
1076
1077 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1078 struct btrfs_extent_data_ref *ref)
1079 {
1080 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1081 btrfs_extent_data_ref_objectid(leaf, ref),
1082 btrfs_extent_data_ref_offset(leaf, ref));
1083 }
1084
1085 static int match_extent_data_ref(struct extent_buffer *leaf,
1086 struct btrfs_extent_data_ref *ref,
1087 u64 root_objectid, u64 owner, u64 offset)
1088 {
1089 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1090 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1091 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1092 return 0;
1093 return 1;
1094 }
1095
1096 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1097 struct btrfs_root *root,
1098 struct btrfs_path *path,
1099 u64 bytenr, u64 parent,
1100 u64 root_objectid,
1101 u64 owner, u64 offset)
1102 {
1103 struct btrfs_key key;
1104 struct btrfs_extent_data_ref *ref;
1105 struct extent_buffer *leaf;
1106 u32 nritems;
1107 int ret;
1108 int recow;
1109 int err = -ENOENT;
1110
1111 key.objectid = bytenr;
1112 if (parent) {
1113 key.type = BTRFS_SHARED_DATA_REF_KEY;
1114 key.offset = parent;
1115 } else {
1116 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1117 key.offset = hash_extent_data_ref(root_objectid,
1118 owner, offset);
1119 }
1120 again:
1121 recow = 0;
1122 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1123 if (ret < 0) {
1124 err = ret;
1125 goto fail;
1126 }
1127
1128 if (parent) {
1129 if (!ret)
1130 return 0;
1131 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132 key.type = BTRFS_EXTENT_REF_V0_KEY;
1133 btrfs_release_path(path);
1134 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1135 if (ret < 0) {
1136 err = ret;
1137 goto fail;
1138 }
1139 if (!ret)
1140 return 0;
1141 #endif
1142 goto fail;
1143 }
1144
1145 leaf = path->nodes[0];
1146 nritems = btrfs_header_nritems(leaf);
1147 while (1) {
1148 if (path->slots[0] >= nritems) {
1149 ret = btrfs_next_leaf(root, path);
1150 if (ret < 0)
1151 err = ret;
1152 if (ret)
1153 goto fail;
1154
1155 leaf = path->nodes[0];
1156 nritems = btrfs_header_nritems(leaf);
1157 recow = 1;
1158 }
1159
1160 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161 if (key.objectid != bytenr ||
1162 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1163 goto fail;
1164
1165 ref = btrfs_item_ptr(leaf, path->slots[0],
1166 struct btrfs_extent_data_ref);
1167
1168 if (match_extent_data_ref(leaf, ref, root_objectid,
1169 owner, offset)) {
1170 if (recow) {
1171 btrfs_release_path(path);
1172 goto again;
1173 }
1174 err = 0;
1175 break;
1176 }
1177 path->slots[0]++;
1178 }
1179 fail:
1180 return err;
1181 }
1182
1183 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1184 struct btrfs_root *root,
1185 struct btrfs_path *path,
1186 u64 bytenr, u64 parent,
1187 u64 root_objectid, u64 owner,
1188 u64 offset, int refs_to_add)
1189 {
1190 struct btrfs_key key;
1191 struct extent_buffer *leaf;
1192 u32 size;
1193 u32 num_refs;
1194 int ret;
1195
1196 key.objectid = bytenr;
1197 if (parent) {
1198 key.type = BTRFS_SHARED_DATA_REF_KEY;
1199 key.offset = parent;
1200 size = sizeof(struct btrfs_shared_data_ref);
1201 } else {
1202 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1203 key.offset = hash_extent_data_ref(root_objectid,
1204 owner, offset);
1205 size = sizeof(struct btrfs_extent_data_ref);
1206 }
1207
1208 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1209 if (ret && ret != -EEXIST)
1210 goto fail;
1211
1212 leaf = path->nodes[0];
1213 if (parent) {
1214 struct btrfs_shared_data_ref *ref;
1215 ref = btrfs_item_ptr(leaf, path->slots[0],
1216 struct btrfs_shared_data_ref);
1217 if (ret == 0) {
1218 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1219 } else {
1220 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1221 num_refs += refs_to_add;
1222 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1223 }
1224 } else {
1225 struct btrfs_extent_data_ref *ref;
1226 while (ret == -EEXIST) {
1227 ref = btrfs_item_ptr(leaf, path->slots[0],
1228 struct btrfs_extent_data_ref);
1229 if (match_extent_data_ref(leaf, ref, root_objectid,
1230 owner, offset))
1231 break;
1232 btrfs_release_path(path);
1233 key.offset++;
1234 ret = btrfs_insert_empty_item(trans, root, path, &key,
1235 size);
1236 if (ret && ret != -EEXIST)
1237 goto fail;
1238
1239 leaf = path->nodes[0];
1240 }
1241 ref = btrfs_item_ptr(leaf, path->slots[0],
1242 struct btrfs_extent_data_ref);
1243 if (ret == 0) {
1244 btrfs_set_extent_data_ref_root(leaf, ref,
1245 root_objectid);
1246 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1247 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1248 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1249 } else {
1250 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1251 num_refs += refs_to_add;
1252 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1253 }
1254 }
1255 btrfs_mark_buffer_dirty(leaf);
1256 ret = 0;
1257 fail:
1258 btrfs_release_path(path);
1259 return ret;
1260 }
1261
1262 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1263 struct btrfs_root *root,
1264 struct btrfs_path *path,
1265 int refs_to_drop, int *last_ref)
1266 {
1267 struct btrfs_key key;
1268 struct btrfs_extent_data_ref *ref1 = NULL;
1269 struct btrfs_shared_data_ref *ref2 = NULL;
1270 struct extent_buffer *leaf;
1271 u32 num_refs = 0;
1272 int ret = 0;
1273
1274 leaf = path->nodes[0];
1275 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1276
1277 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1278 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1279 struct btrfs_extent_data_ref);
1280 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1281 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1282 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1283 struct btrfs_shared_data_ref);
1284 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1285 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1286 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1287 struct btrfs_extent_ref_v0 *ref0;
1288 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1289 struct btrfs_extent_ref_v0);
1290 num_refs = btrfs_ref_count_v0(leaf, ref0);
1291 #endif
1292 } else {
1293 BUG();
1294 }
1295
1296 BUG_ON(num_refs < refs_to_drop);
1297 num_refs -= refs_to_drop;
1298
1299 if (num_refs == 0) {
1300 ret = btrfs_del_item(trans, root, path);
1301 *last_ref = 1;
1302 } else {
1303 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1304 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1305 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1306 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1307 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1308 else {
1309 struct btrfs_extent_ref_v0 *ref0;
1310 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1311 struct btrfs_extent_ref_v0);
1312 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1313 }
1314 #endif
1315 btrfs_mark_buffer_dirty(leaf);
1316 }
1317 return ret;
1318 }
1319
1320 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1321 struct btrfs_path *path,
1322 struct btrfs_extent_inline_ref *iref)
1323 {
1324 struct btrfs_key key;
1325 struct extent_buffer *leaf;
1326 struct btrfs_extent_data_ref *ref1;
1327 struct btrfs_shared_data_ref *ref2;
1328 u32 num_refs = 0;
1329
1330 leaf = path->nodes[0];
1331 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1332 if (iref) {
1333 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1334 BTRFS_EXTENT_DATA_REF_KEY) {
1335 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1336 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1337 } else {
1338 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1339 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1340 }
1341 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1342 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1343 struct btrfs_extent_data_ref);
1344 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1346 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1347 struct btrfs_shared_data_ref);
1348 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1349 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1350 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1351 struct btrfs_extent_ref_v0 *ref0;
1352 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1353 struct btrfs_extent_ref_v0);
1354 num_refs = btrfs_ref_count_v0(leaf, ref0);
1355 #endif
1356 } else {
1357 WARN_ON(1);
1358 }
1359 return num_refs;
1360 }
1361
1362 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1363 struct btrfs_root *root,
1364 struct btrfs_path *path,
1365 u64 bytenr, u64 parent,
1366 u64 root_objectid)
1367 {
1368 struct btrfs_key key;
1369 int ret;
1370
1371 key.objectid = bytenr;
1372 if (parent) {
1373 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1374 key.offset = parent;
1375 } else {
1376 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1377 key.offset = root_objectid;
1378 }
1379
1380 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1381 if (ret > 0)
1382 ret = -ENOENT;
1383 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1384 if (ret == -ENOENT && parent) {
1385 btrfs_release_path(path);
1386 key.type = BTRFS_EXTENT_REF_V0_KEY;
1387 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388 if (ret > 0)
1389 ret = -ENOENT;
1390 }
1391 #endif
1392 return ret;
1393 }
1394
1395 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1396 struct btrfs_root *root,
1397 struct btrfs_path *path,
1398 u64 bytenr, u64 parent,
1399 u64 root_objectid)
1400 {
1401 struct btrfs_key key;
1402 int ret;
1403
1404 key.objectid = bytenr;
1405 if (parent) {
1406 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1407 key.offset = parent;
1408 } else {
1409 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1410 key.offset = root_objectid;
1411 }
1412
1413 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1414 btrfs_release_path(path);
1415 return ret;
1416 }
1417
1418 static inline int extent_ref_type(u64 parent, u64 owner)
1419 {
1420 int type;
1421 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1422 if (parent > 0)
1423 type = BTRFS_SHARED_BLOCK_REF_KEY;
1424 else
1425 type = BTRFS_TREE_BLOCK_REF_KEY;
1426 } else {
1427 if (parent > 0)
1428 type = BTRFS_SHARED_DATA_REF_KEY;
1429 else
1430 type = BTRFS_EXTENT_DATA_REF_KEY;
1431 }
1432 return type;
1433 }
1434
1435 static int find_next_key(struct btrfs_path *path, int level,
1436 struct btrfs_key *key)
1437
1438 {
1439 for (; level < BTRFS_MAX_LEVEL; level++) {
1440 if (!path->nodes[level])
1441 break;
1442 if (path->slots[level] + 1 >=
1443 btrfs_header_nritems(path->nodes[level]))
1444 continue;
1445 if (level == 0)
1446 btrfs_item_key_to_cpu(path->nodes[level], key,
1447 path->slots[level] + 1);
1448 else
1449 btrfs_node_key_to_cpu(path->nodes[level], key,
1450 path->slots[level] + 1);
1451 return 0;
1452 }
1453 return 1;
1454 }
1455
1456 /*
1457 * look for inline back ref. if back ref is found, *ref_ret is set
1458 * to the address of inline back ref, and 0 is returned.
1459 *
1460 * if back ref isn't found, *ref_ret is set to the address where it
1461 * should be inserted, and -ENOENT is returned.
1462 *
1463 * if insert is true and there are too many inline back refs, the path
1464 * points to the extent item, and -EAGAIN is returned.
1465 *
1466 * NOTE: inline back refs are ordered in the same way that back ref
1467 * items in the tree are ordered.
1468 */
1469 static noinline_for_stack
1470 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1471 struct btrfs_root *root,
1472 struct btrfs_path *path,
1473 struct btrfs_extent_inline_ref **ref_ret,
1474 u64 bytenr, u64 num_bytes,
1475 u64 parent, u64 root_objectid,
1476 u64 owner, u64 offset, int insert)
1477 {
1478 struct btrfs_key key;
1479 struct extent_buffer *leaf;
1480 struct btrfs_extent_item *ei;
1481 struct btrfs_extent_inline_ref *iref;
1482 u64 flags;
1483 u64 item_size;
1484 unsigned long ptr;
1485 unsigned long end;
1486 int extra_size;
1487 int type;
1488 int want;
1489 int ret;
1490 int err = 0;
1491 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1492 SKINNY_METADATA);
1493
1494 key.objectid = bytenr;
1495 key.type = BTRFS_EXTENT_ITEM_KEY;
1496 key.offset = num_bytes;
1497
1498 want = extent_ref_type(parent, owner);
1499 if (insert) {
1500 extra_size = btrfs_extent_inline_ref_size(want);
1501 path->keep_locks = 1;
1502 } else
1503 extra_size = -1;
1504
1505 /*
1506 * Owner is our parent level, so we can just add one to get the level
1507 * for the block we are interested in.
1508 */
1509 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1510 key.type = BTRFS_METADATA_ITEM_KEY;
1511 key.offset = owner;
1512 }
1513
1514 again:
1515 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1516 if (ret < 0) {
1517 err = ret;
1518 goto out;
1519 }
1520
1521 /*
1522 * We may be a newly converted file system which still has the old fat
1523 * extent entries for metadata, so try and see if we have one of those.
1524 */
1525 if (ret > 0 && skinny_metadata) {
1526 skinny_metadata = false;
1527 if (path->slots[0]) {
1528 path->slots[0]--;
1529 btrfs_item_key_to_cpu(path->nodes[0], &key,
1530 path->slots[0]);
1531 if (key.objectid == bytenr &&
1532 key.type == BTRFS_EXTENT_ITEM_KEY &&
1533 key.offset == num_bytes)
1534 ret = 0;
1535 }
1536 if (ret) {
1537 key.objectid = bytenr;
1538 key.type = BTRFS_EXTENT_ITEM_KEY;
1539 key.offset = num_bytes;
1540 btrfs_release_path(path);
1541 goto again;
1542 }
1543 }
1544
1545 if (ret && !insert) {
1546 err = -ENOENT;
1547 goto out;
1548 } else if (WARN_ON(ret)) {
1549 err = -EIO;
1550 goto out;
1551 }
1552
1553 leaf = path->nodes[0];
1554 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1555 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1556 if (item_size < sizeof(*ei)) {
1557 if (!insert) {
1558 err = -ENOENT;
1559 goto out;
1560 }
1561 ret = convert_extent_item_v0(trans, root, path, owner,
1562 extra_size);
1563 if (ret < 0) {
1564 err = ret;
1565 goto out;
1566 }
1567 leaf = path->nodes[0];
1568 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1569 }
1570 #endif
1571 BUG_ON(item_size < sizeof(*ei));
1572
1573 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1574 flags = btrfs_extent_flags(leaf, ei);
1575
1576 ptr = (unsigned long)(ei + 1);
1577 end = (unsigned long)ei + item_size;
1578
1579 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1580 ptr += sizeof(struct btrfs_tree_block_info);
1581 BUG_ON(ptr > end);
1582 }
1583
1584 err = -ENOENT;
1585 while (1) {
1586 if (ptr >= end) {
1587 WARN_ON(ptr > end);
1588 break;
1589 }
1590 iref = (struct btrfs_extent_inline_ref *)ptr;
1591 type = btrfs_extent_inline_ref_type(leaf, iref);
1592 if (want < type)
1593 break;
1594 if (want > type) {
1595 ptr += btrfs_extent_inline_ref_size(type);
1596 continue;
1597 }
1598
1599 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1600 struct btrfs_extent_data_ref *dref;
1601 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1602 if (match_extent_data_ref(leaf, dref, root_objectid,
1603 owner, offset)) {
1604 err = 0;
1605 break;
1606 }
1607 if (hash_extent_data_ref_item(leaf, dref) <
1608 hash_extent_data_ref(root_objectid, owner, offset))
1609 break;
1610 } else {
1611 u64 ref_offset;
1612 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1613 if (parent > 0) {
1614 if (parent == ref_offset) {
1615 err = 0;
1616 break;
1617 }
1618 if (ref_offset < parent)
1619 break;
1620 } else {
1621 if (root_objectid == ref_offset) {
1622 err = 0;
1623 break;
1624 }
1625 if (ref_offset < root_objectid)
1626 break;
1627 }
1628 }
1629 ptr += btrfs_extent_inline_ref_size(type);
1630 }
1631 if (err == -ENOENT && insert) {
1632 if (item_size + extra_size >=
1633 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1634 err = -EAGAIN;
1635 goto out;
1636 }
1637 /*
1638 * To add new inline back ref, we have to make sure
1639 * there is no corresponding back ref item.
1640 * For simplicity, we just do not add new inline back
1641 * ref if there is any kind of item for this block
1642 */
1643 if (find_next_key(path, 0, &key) == 0 &&
1644 key.objectid == bytenr &&
1645 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1646 err = -EAGAIN;
1647 goto out;
1648 }
1649 }
1650 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1651 out:
1652 if (insert) {
1653 path->keep_locks = 0;
1654 btrfs_unlock_up_safe(path, 1);
1655 }
1656 return err;
1657 }
1658
1659 /*
1660 * helper to add new inline back ref
1661 */
1662 static noinline_for_stack
1663 void setup_inline_extent_backref(struct btrfs_root *root,
1664 struct btrfs_path *path,
1665 struct btrfs_extent_inline_ref *iref,
1666 u64 parent, u64 root_objectid,
1667 u64 owner, u64 offset, int refs_to_add,
1668 struct btrfs_delayed_extent_op *extent_op)
1669 {
1670 struct extent_buffer *leaf;
1671 struct btrfs_extent_item *ei;
1672 unsigned long ptr;
1673 unsigned long end;
1674 unsigned long item_offset;
1675 u64 refs;
1676 int size;
1677 int type;
1678
1679 leaf = path->nodes[0];
1680 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681 item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683 type = extent_ref_type(parent, owner);
1684 size = btrfs_extent_inline_ref_size(type);
1685
1686 btrfs_extend_item(root, path, size);
1687
1688 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689 refs = btrfs_extent_refs(leaf, ei);
1690 refs += refs_to_add;
1691 btrfs_set_extent_refs(leaf, ei, refs);
1692 if (extent_op)
1693 __run_delayed_extent_op(extent_op, leaf, ei);
1694
1695 ptr = (unsigned long)ei + item_offset;
1696 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697 if (ptr < end - size)
1698 memmove_extent_buffer(leaf, ptr + size, ptr,
1699 end - size - ptr);
1700
1701 iref = (struct btrfs_extent_inline_ref *)ptr;
1702 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704 struct btrfs_extent_data_ref *dref;
1705 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711 struct btrfs_shared_data_ref *sref;
1712 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717 } else {
1718 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719 }
1720 btrfs_mark_buffer_dirty(leaf);
1721 }
1722
1723 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724 struct btrfs_root *root,
1725 struct btrfs_path *path,
1726 struct btrfs_extent_inline_ref **ref_ret,
1727 u64 bytenr, u64 num_bytes, u64 parent,
1728 u64 root_objectid, u64 owner, u64 offset)
1729 {
1730 int ret;
1731
1732 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733 bytenr, num_bytes, parent,
1734 root_objectid, owner, offset, 0);
1735 if (ret != -ENOENT)
1736 return ret;
1737
1738 btrfs_release_path(path);
1739 *ref_ret = NULL;
1740
1741 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743 root_objectid);
1744 } else {
1745 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746 root_objectid, owner, offset);
1747 }
1748 return ret;
1749 }
1750
1751 /*
1752 * helper to update/remove inline back ref
1753 */
1754 static noinline_for_stack
1755 void update_inline_extent_backref(struct btrfs_root *root,
1756 struct btrfs_path *path,
1757 struct btrfs_extent_inline_ref *iref,
1758 int refs_to_mod,
1759 struct btrfs_delayed_extent_op *extent_op,
1760 int *last_ref)
1761 {
1762 struct extent_buffer *leaf;
1763 struct btrfs_extent_item *ei;
1764 struct btrfs_extent_data_ref *dref = NULL;
1765 struct btrfs_shared_data_ref *sref = NULL;
1766 unsigned long ptr;
1767 unsigned long end;
1768 u32 item_size;
1769 int size;
1770 int type;
1771 u64 refs;
1772
1773 leaf = path->nodes[0];
1774 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1775 refs = btrfs_extent_refs(leaf, ei);
1776 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1777 refs += refs_to_mod;
1778 btrfs_set_extent_refs(leaf, ei, refs);
1779 if (extent_op)
1780 __run_delayed_extent_op(extent_op, leaf, ei);
1781
1782 type = btrfs_extent_inline_ref_type(leaf, iref);
1783
1784 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1785 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1786 refs = btrfs_extent_data_ref_count(leaf, dref);
1787 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1789 refs = btrfs_shared_data_ref_count(leaf, sref);
1790 } else {
1791 refs = 1;
1792 BUG_ON(refs_to_mod != -1);
1793 }
1794
1795 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1796 refs += refs_to_mod;
1797
1798 if (refs > 0) {
1799 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1800 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1801 else
1802 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1803 } else {
1804 *last_ref = 1;
1805 size = btrfs_extent_inline_ref_size(type);
1806 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1807 ptr = (unsigned long)iref;
1808 end = (unsigned long)ei + item_size;
1809 if (ptr + size < end)
1810 memmove_extent_buffer(leaf, ptr, ptr + size,
1811 end - ptr - size);
1812 item_size -= size;
1813 btrfs_truncate_item(root, path, item_size, 1);
1814 }
1815 btrfs_mark_buffer_dirty(leaf);
1816 }
1817
1818 static noinline_for_stack
1819 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1820 struct btrfs_root *root,
1821 struct btrfs_path *path,
1822 u64 bytenr, u64 num_bytes, u64 parent,
1823 u64 root_objectid, u64 owner,
1824 u64 offset, int refs_to_add,
1825 struct btrfs_delayed_extent_op *extent_op)
1826 {
1827 struct btrfs_extent_inline_ref *iref;
1828 int ret;
1829
1830 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1831 bytenr, num_bytes, parent,
1832 root_objectid, owner, offset, 1);
1833 if (ret == 0) {
1834 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1835 update_inline_extent_backref(root, path, iref,
1836 refs_to_add, extent_op, NULL);
1837 } else if (ret == -ENOENT) {
1838 setup_inline_extent_backref(root, path, iref, parent,
1839 root_objectid, owner, offset,
1840 refs_to_add, extent_op);
1841 ret = 0;
1842 }
1843 return ret;
1844 }
1845
1846 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1847 struct btrfs_root *root,
1848 struct btrfs_path *path,
1849 u64 bytenr, u64 parent, u64 root_objectid,
1850 u64 owner, u64 offset, int refs_to_add)
1851 {
1852 int ret;
1853 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1854 BUG_ON(refs_to_add != 1);
1855 ret = insert_tree_block_ref(trans, root, path, bytenr,
1856 parent, root_objectid);
1857 } else {
1858 ret = insert_extent_data_ref(trans, root, path, bytenr,
1859 parent, root_objectid,
1860 owner, offset, refs_to_add);
1861 }
1862 return ret;
1863 }
1864
1865 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1866 struct btrfs_root *root,
1867 struct btrfs_path *path,
1868 struct btrfs_extent_inline_ref *iref,
1869 int refs_to_drop, int is_data, int *last_ref)
1870 {
1871 int ret = 0;
1872
1873 BUG_ON(!is_data && refs_to_drop != 1);
1874 if (iref) {
1875 update_inline_extent_backref(root, path, iref,
1876 -refs_to_drop, NULL, last_ref);
1877 } else if (is_data) {
1878 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1879 last_ref);
1880 } else {
1881 *last_ref = 1;
1882 ret = btrfs_del_item(trans, root, path);
1883 }
1884 return ret;
1885 }
1886
1887 static int btrfs_issue_discard(struct block_device *bdev,
1888 u64 start, u64 len)
1889 {
1890 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891 }
1892
1893 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894 u64 num_bytes, u64 *actual_bytes)
1895 {
1896 int ret;
1897 u64 discarded_bytes = 0;
1898 struct btrfs_bio *bbio = NULL;
1899
1900
1901 /* Tell the block device(s) that the sectors can be discarded */
1902 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903 bytenr, &num_bytes, &bbio, 0);
1904 /* Error condition is -ENOMEM */
1905 if (!ret) {
1906 struct btrfs_bio_stripe *stripe = bbio->stripes;
1907 int i;
1908
1909
1910 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911 if (!stripe->dev->can_discard)
1912 continue;
1913
1914 ret = btrfs_issue_discard(stripe->dev->bdev,
1915 stripe->physical,
1916 stripe->length);
1917 if (!ret)
1918 discarded_bytes += stripe->length;
1919 else if (ret != -EOPNOTSUPP)
1920 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922 /*
1923 * Just in case we get back EOPNOTSUPP for some reason,
1924 * just ignore the return value so we don't screw up
1925 * people calling discard_extent.
1926 */
1927 ret = 0;
1928 }
1929 btrfs_put_bbio(bbio);
1930 }
1931
1932 if (actual_bytes)
1933 *actual_bytes = discarded_bytes;
1934
1935
1936 if (ret == -EOPNOTSUPP)
1937 ret = 0;
1938 return ret;
1939 }
1940
1941 /* Can return -ENOMEM */
1942 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943 struct btrfs_root *root,
1944 u64 bytenr, u64 num_bytes, u64 parent,
1945 u64 root_objectid, u64 owner, u64 offset,
1946 int no_quota)
1947 {
1948 int ret;
1949 struct btrfs_fs_info *fs_info = root->fs_info;
1950
1951 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1952 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1953
1954 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1955 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1956 num_bytes,
1957 parent, root_objectid, (int)owner,
1958 BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1959 } else {
1960 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1961 num_bytes,
1962 parent, root_objectid, owner, offset,
1963 BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1964 }
1965 return ret;
1966 }
1967
1968 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1969 struct btrfs_root *root,
1970 u64 bytenr, u64 num_bytes,
1971 u64 parent, u64 root_objectid,
1972 u64 owner, u64 offset, int refs_to_add,
1973 int no_quota,
1974 struct btrfs_delayed_extent_op *extent_op)
1975 {
1976 struct btrfs_fs_info *fs_info = root->fs_info;
1977 struct btrfs_path *path;
1978 struct extent_buffer *leaf;
1979 struct btrfs_extent_item *item;
1980 struct btrfs_key key;
1981 u64 refs;
1982 int ret;
1983 enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1984
1985 path = btrfs_alloc_path();
1986 if (!path)
1987 return -ENOMEM;
1988
1989 if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1990 no_quota = 1;
1991
1992 path->reada = 1;
1993 path->leave_spinning = 1;
1994 /* this will setup the path even if it fails to insert the back ref */
1995 ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1996 bytenr, num_bytes, parent,
1997 root_objectid, owner, offset,
1998 refs_to_add, extent_op);
1999 if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2000 goto out;
2001 /*
2002 * Ok we were able to insert an inline extent and it appears to be a new
2003 * reference, deal with the qgroup accounting.
2004 */
2005 if (!ret && !no_quota) {
2006 ASSERT(root->fs_info->quota_enabled);
2007 leaf = path->nodes[0];
2008 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2009 item = btrfs_item_ptr(leaf, path->slots[0],
2010 struct btrfs_extent_item);
2011 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2012 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2013 btrfs_release_path(path);
2014
2015 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2016 bytenr, num_bytes, type, 0);
2017 goto out;
2018 }
2019
2020 /*
2021 * Ok we had -EAGAIN which means we didn't have space to insert and
2022 * inline extent ref, so just update the reference count and add a
2023 * normal backref.
2024 */
2025 leaf = path->nodes[0];
2026 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2027 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2028 refs = btrfs_extent_refs(leaf, item);
2029 if (refs)
2030 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2031 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2032 if (extent_op)
2033 __run_delayed_extent_op(extent_op, leaf, item);
2034
2035 btrfs_mark_buffer_dirty(leaf);
2036 btrfs_release_path(path);
2037
2038 if (!no_quota) {
2039 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2040 bytenr, num_bytes, type, 0);
2041 if (ret)
2042 goto out;
2043 }
2044
2045 path->reada = 1;
2046 path->leave_spinning = 1;
2047 /* now insert the actual backref */
2048 ret = insert_extent_backref(trans, root->fs_info->extent_root,
2049 path, bytenr, parent, root_objectid,
2050 owner, offset, refs_to_add);
2051 if (ret)
2052 btrfs_abort_transaction(trans, root, ret);
2053 out:
2054 btrfs_free_path(path);
2055 return ret;
2056 }
2057
2058 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2059 struct btrfs_root *root,
2060 struct btrfs_delayed_ref_node *node,
2061 struct btrfs_delayed_extent_op *extent_op,
2062 int insert_reserved)
2063 {
2064 int ret = 0;
2065 struct btrfs_delayed_data_ref *ref;
2066 struct btrfs_key ins;
2067 u64 parent = 0;
2068 u64 ref_root = 0;
2069 u64 flags = 0;
2070
2071 ins.objectid = node->bytenr;
2072 ins.offset = node->num_bytes;
2073 ins.type = BTRFS_EXTENT_ITEM_KEY;
2074
2075 ref = btrfs_delayed_node_to_data_ref(node);
2076 trace_run_delayed_data_ref(node, ref, node->action);
2077
2078 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2079 parent = ref->parent;
2080 ref_root = ref->root;
2081
2082 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2083 if (extent_op)
2084 flags |= extent_op->flags_to_set;
2085 ret = alloc_reserved_file_extent(trans, root,
2086 parent, ref_root, flags,
2087 ref->objectid, ref->offset,
2088 &ins, node->ref_mod);
2089 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2090 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2091 node->num_bytes, parent,
2092 ref_root, ref->objectid,
2093 ref->offset, node->ref_mod,
2094 node->no_quota, extent_op);
2095 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2096 ret = __btrfs_free_extent(trans, root, node->bytenr,
2097 node->num_bytes, parent,
2098 ref_root, ref->objectid,
2099 ref->offset, node->ref_mod,
2100 extent_op, node->no_quota);
2101 } else {
2102 BUG();
2103 }
2104 return ret;
2105 }
2106
2107 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2108 struct extent_buffer *leaf,
2109 struct btrfs_extent_item *ei)
2110 {
2111 u64 flags = btrfs_extent_flags(leaf, ei);
2112 if (extent_op->update_flags) {
2113 flags |= extent_op->flags_to_set;
2114 btrfs_set_extent_flags(leaf, ei, flags);
2115 }
2116
2117 if (extent_op->update_key) {
2118 struct btrfs_tree_block_info *bi;
2119 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2120 bi = (struct btrfs_tree_block_info *)(ei + 1);
2121 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2122 }
2123 }
2124
2125 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2126 struct btrfs_root *root,
2127 struct btrfs_delayed_ref_node *node,
2128 struct btrfs_delayed_extent_op *extent_op)
2129 {
2130 struct btrfs_key key;
2131 struct btrfs_path *path;
2132 struct btrfs_extent_item *ei;
2133 struct extent_buffer *leaf;
2134 u32 item_size;
2135 int ret;
2136 int err = 0;
2137 int metadata = !extent_op->is_data;
2138
2139 if (trans->aborted)
2140 return 0;
2141
2142 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2143 metadata = 0;
2144
2145 path = btrfs_alloc_path();
2146 if (!path)
2147 return -ENOMEM;
2148
2149 key.objectid = node->bytenr;
2150
2151 if (metadata) {
2152 key.type = BTRFS_METADATA_ITEM_KEY;
2153 key.offset = extent_op->level;
2154 } else {
2155 key.type = BTRFS_EXTENT_ITEM_KEY;
2156 key.offset = node->num_bytes;
2157 }
2158
2159 again:
2160 path->reada = 1;
2161 path->leave_spinning = 1;
2162 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2163 path, 0, 1);
2164 if (ret < 0) {
2165 err = ret;
2166 goto out;
2167 }
2168 if (ret > 0) {
2169 if (metadata) {
2170 if (path->slots[0] > 0) {
2171 path->slots[0]--;
2172 btrfs_item_key_to_cpu(path->nodes[0], &key,
2173 path->slots[0]);
2174 if (key.objectid == node->bytenr &&
2175 key.type == BTRFS_EXTENT_ITEM_KEY &&
2176 key.offset == node->num_bytes)
2177 ret = 0;
2178 }
2179 if (ret > 0) {
2180 btrfs_release_path(path);
2181 metadata = 0;
2182
2183 key.objectid = node->bytenr;
2184 key.offset = node->num_bytes;
2185 key.type = BTRFS_EXTENT_ITEM_KEY;
2186 goto again;
2187 }
2188 } else {
2189 err = -EIO;
2190 goto out;
2191 }
2192 }
2193
2194 leaf = path->nodes[0];
2195 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2196 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2197 if (item_size < sizeof(*ei)) {
2198 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2199 path, (u64)-1, 0);
2200 if (ret < 0) {
2201 err = ret;
2202 goto out;
2203 }
2204 leaf = path->nodes[0];
2205 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2206 }
2207 #endif
2208 BUG_ON(item_size < sizeof(*ei));
2209 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2210 __run_delayed_extent_op(extent_op, leaf, ei);
2211
2212 btrfs_mark_buffer_dirty(leaf);
2213 out:
2214 btrfs_free_path(path);
2215 return err;
2216 }
2217
2218 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2219 struct btrfs_root *root,
2220 struct btrfs_delayed_ref_node *node,
2221 struct btrfs_delayed_extent_op *extent_op,
2222 int insert_reserved)
2223 {
2224 int ret = 0;
2225 struct btrfs_delayed_tree_ref *ref;
2226 struct btrfs_key ins;
2227 u64 parent = 0;
2228 u64 ref_root = 0;
2229 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2230 SKINNY_METADATA);
2231
2232 ref = btrfs_delayed_node_to_tree_ref(node);
2233 trace_run_delayed_tree_ref(node, ref, node->action);
2234
2235 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2236 parent = ref->parent;
2237 ref_root = ref->root;
2238
2239 ins.objectid = node->bytenr;
2240 if (skinny_metadata) {
2241 ins.offset = ref->level;
2242 ins.type = BTRFS_METADATA_ITEM_KEY;
2243 } else {
2244 ins.offset = node->num_bytes;
2245 ins.type = BTRFS_EXTENT_ITEM_KEY;
2246 }
2247
2248 BUG_ON(node->ref_mod != 1);
2249 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2250 BUG_ON(!extent_op || !extent_op->update_flags);
2251 ret = alloc_reserved_tree_block(trans, root,
2252 parent, ref_root,
2253 extent_op->flags_to_set,
2254 &extent_op->key,
2255 ref->level, &ins,
2256 node->no_quota);
2257 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2258 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2259 node->num_bytes, parent, ref_root,
2260 ref->level, 0, 1, node->no_quota,
2261 extent_op);
2262 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2263 ret = __btrfs_free_extent(trans, root, node->bytenr,
2264 node->num_bytes, parent, ref_root,
2265 ref->level, 0, 1, extent_op,
2266 node->no_quota);
2267 } else {
2268 BUG();
2269 }
2270 return ret;
2271 }
2272
2273 /* helper function to actually process a single delayed ref entry */
2274 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *root,
2276 struct btrfs_delayed_ref_node *node,
2277 struct btrfs_delayed_extent_op *extent_op,
2278 int insert_reserved)
2279 {
2280 int ret = 0;
2281
2282 if (trans->aborted) {
2283 if (insert_reserved)
2284 btrfs_pin_extent(root, node->bytenr,
2285 node->num_bytes, 1);
2286 return 0;
2287 }
2288
2289 if (btrfs_delayed_ref_is_head(node)) {
2290 struct btrfs_delayed_ref_head *head;
2291 /*
2292 * we've hit the end of the chain and we were supposed
2293 * to insert this extent into the tree. But, it got
2294 * deleted before we ever needed to insert it, so all
2295 * we have to do is clean up the accounting
2296 */
2297 BUG_ON(extent_op);
2298 head = btrfs_delayed_node_to_head(node);
2299 trace_run_delayed_ref_head(node, head, node->action);
2300
2301 if (insert_reserved) {
2302 btrfs_pin_extent(root, node->bytenr,
2303 node->num_bytes, 1);
2304 if (head->is_data) {
2305 ret = btrfs_del_csums(trans, root,
2306 node->bytenr,
2307 node->num_bytes);
2308 }
2309 }
2310 return ret;
2311 }
2312
2313 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2314 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2315 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2316 insert_reserved);
2317 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2318 node->type == BTRFS_SHARED_DATA_REF_KEY)
2319 ret = run_delayed_data_ref(trans, root, node, extent_op,
2320 insert_reserved);
2321 else
2322 BUG();
2323 return ret;
2324 }
2325
2326 static noinline struct btrfs_delayed_ref_node *
2327 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2328 {
2329 struct rb_node *node;
2330 struct btrfs_delayed_ref_node *ref, *last = NULL;;
2331
2332 /*
2333 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2334 * this prevents ref count from going down to zero when
2335 * there still are pending delayed ref.
2336 */
2337 node = rb_first(&head->ref_root);
2338 while (node) {
2339 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2340 rb_node);
2341 if (ref->action == BTRFS_ADD_DELAYED_REF)
2342 return ref;
2343 else if (last == NULL)
2344 last = ref;
2345 node = rb_next(node);
2346 }
2347 return last;
2348 }
2349
2350 /*
2351 * Returns 0 on success or if called with an already aborted transaction.
2352 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2353 */
2354 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2355 struct btrfs_root *root,
2356 unsigned long nr)
2357 {
2358 struct btrfs_delayed_ref_root *delayed_refs;
2359 struct btrfs_delayed_ref_node *ref;
2360 struct btrfs_delayed_ref_head *locked_ref = NULL;
2361 struct btrfs_delayed_extent_op *extent_op;
2362 struct btrfs_fs_info *fs_info = root->fs_info;
2363 ktime_t start = ktime_get();
2364 int ret;
2365 unsigned long count = 0;
2366 unsigned long actual_count = 0;
2367 int must_insert_reserved = 0;
2368
2369 delayed_refs = &trans->transaction->delayed_refs;
2370 while (1) {
2371 if (!locked_ref) {
2372 if (count >= nr)
2373 break;
2374
2375 spin_lock(&delayed_refs->lock);
2376 locked_ref = btrfs_select_ref_head(trans);
2377 if (!locked_ref) {
2378 spin_unlock(&delayed_refs->lock);
2379 break;
2380 }
2381
2382 /* grab the lock that says we are going to process
2383 * all the refs for this head */
2384 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2385 spin_unlock(&delayed_refs->lock);
2386 /*
2387 * we may have dropped the spin lock to get the head
2388 * mutex lock, and that might have given someone else
2389 * time to free the head. If that's true, it has been
2390 * removed from our list and we can move on.
2391 */
2392 if (ret == -EAGAIN) {
2393 locked_ref = NULL;
2394 count++;
2395 continue;
2396 }
2397 }
2398
2399 /*
2400 * We need to try and merge add/drops of the same ref since we
2401 * can run into issues with relocate dropping the implicit ref
2402 * and then it being added back again before the drop can
2403 * finish. If we merged anything we need to re-loop so we can
2404 * get a good ref.
2405 */
2406 spin_lock(&locked_ref->lock);
2407 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2408 locked_ref);
2409
2410 /*
2411 * locked_ref is the head node, so we have to go one
2412 * node back for any delayed ref updates
2413 */
2414 ref = select_delayed_ref(locked_ref);
2415
2416 if (ref && ref->seq &&
2417 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2418 spin_unlock(&locked_ref->lock);
2419 btrfs_delayed_ref_unlock(locked_ref);
2420 spin_lock(&delayed_refs->lock);
2421 locked_ref->processing = 0;
2422 delayed_refs->num_heads_ready++;
2423 spin_unlock(&delayed_refs->lock);
2424 locked_ref = NULL;
2425 cond_resched();
2426 count++;
2427 continue;
2428 }
2429
2430 /*
2431 * record the must insert reserved flag before we
2432 * drop the spin lock.
2433 */
2434 must_insert_reserved = locked_ref->must_insert_reserved;
2435 locked_ref->must_insert_reserved = 0;
2436
2437 extent_op = locked_ref->extent_op;
2438 locked_ref->extent_op = NULL;
2439
2440 if (!ref) {
2441
2442
2443 /* All delayed refs have been processed, Go ahead
2444 * and send the head node to run_one_delayed_ref,
2445 * so that any accounting fixes can happen
2446 */
2447 ref = &locked_ref->node;
2448
2449 if (extent_op && must_insert_reserved) {
2450 btrfs_free_delayed_extent_op(extent_op);
2451 extent_op = NULL;
2452 }
2453
2454 if (extent_op) {
2455 spin_unlock(&locked_ref->lock);
2456 ret = run_delayed_extent_op(trans, root,
2457 ref, extent_op);
2458 btrfs_free_delayed_extent_op(extent_op);
2459
2460 if (ret) {
2461 /*
2462 * Need to reset must_insert_reserved if
2463 * there was an error so the abort stuff
2464 * can cleanup the reserved space
2465 * properly.
2466 */
2467 if (must_insert_reserved)
2468 locked_ref->must_insert_reserved = 1;
2469 locked_ref->processing = 0;
2470 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2471 btrfs_delayed_ref_unlock(locked_ref);
2472 return ret;
2473 }
2474 continue;
2475 }
2476
2477 /*
2478 * Need to drop our head ref lock and re-aqcuire the
2479 * delayed ref lock and then re-check to make sure
2480 * nobody got added.
2481 */
2482 spin_unlock(&locked_ref->lock);
2483 spin_lock(&delayed_refs->lock);
2484 spin_lock(&locked_ref->lock);
2485 if (rb_first(&locked_ref->ref_root) ||
2486 locked_ref->extent_op) {
2487 spin_unlock(&locked_ref->lock);
2488 spin_unlock(&delayed_refs->lock);
2489 continue;
2490 }
2491 ref->in_tree = 0;
2492 delayed_refs->num_heads--;
2493 rb_erase(&locked_ref->href_node,
2494 &delayed_refs->href_root);
2495 spin_unlock(&delayed_refs->lock);
2496 } else {
2497 actual_count++;
2498 ref->in_tree = 0;
2499 rb_erase(&ref->rb_node, &locked_ref->ref_root);
2500 }
2501 atomic_dec(&delayed_refs->num_entries);
2502
2503 if (!btrfs_delayed_ref_is_head(ref)) {
2504 /*
2505 * when we play the delayed ref, also correct the
2506 * ref_mod on head
2507 */
2508 switch (ref->action) {
2509 case BTRFS_ADD_DELAYED_REF:
2510 case BTRFS_ADD_DELAYED_EXTENT:
2511 locked_ref->node.ref_mod -= ref->ref_mod;
2512 break;
2513 case BTRFS_DROP_DELAYED_REF:
2514 locked_ref->node.ref_mod += ref->ref_mod;
2515 break;
2516 default:
2517 WARN_ON(1);
2518 }
2519 }
2520 spin_unlock(&locked_ref->lock);
2521
2522 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2523 must_insert_reserved);
2524
2525 btrfs_free_delayed_extent_op(extent_op);
2526 if (ret) {
2527 locked_ref->processing = 0;
2528 btrfs_delayed_ref_unlock(locked_ref);
2529 btrfs_put_delayed_ref(ref);
2530 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2531 return ret;
2532 }
2533
2534 /*
2535 * If this node is a head, that means all the refs in this head
2536 * have been dealt with, and we will pick the next head to deal
2537 * with, so we must unlock the head and drop it from the cluster
2538 * list before we release it.
2539 */
2540 if (btrfs_delayed_ref_is_head(ref)) {
2541 btrfs_delayed_ref_unlock(locked_ref);
2542 locked_ref = NULL;
2543 }
2544 btrfs_put_delayed_ref(ref);
2545 count++;
2546 cond_resched();
2547 }
2548
2549 /*
2550 * We don't want to include ref heads since we can have empty ref heads
2551 * and those will drastically skew our runtime down since we just do
2552 * accounting, no actual extent tree updates.
2553 */
2554 if (actual_count > 0) {
2555 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2556 u64 avg;
2557
2558 /*
2559 * We weigh the current average higher than our current runtime
2560 * to avoid large swings in the average.
2561 */
2562 spin_lock(&delayed_refs->lock);
2563 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2564 avg = div64_u64(avg, 4);
2565 fs_info->avg_delayed_ref_runtime = avg;
2566 spin_unlock(&delayed_refs->lock);
2567 }
2568 return 0;
2569 }
2570
2571 #ifdef SCRAMBLE_DELAYED_REFS
2572 /*
2573 * Normally delayed refs get processed in ascending bytenr order. This
2574 * correlates in most cases to the order added. To expose dependencies on this
2575 * order, we start to process the tree in the middle instead of the beginning
2576 */
2577 static u64 find_middle(struct rb_root *root)
2578 {
2579 struct rb_node *n = root->rb_node;
2580 struct btrfs_delayed_ref_node *entry;
2581 int alt = 1;
2582 u64 middle;
2583 u64 first = 0, last = 0;
2584
2585 n = rb_first(root);
2586 if (n) {
2587 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2588 first = entry->bytenr;
2589 }
2590 n = rb_last(root);
2591 if (n) {
2592 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2593 last = entry->bytenr;
2594 }
2595 n = root->rb_node;
2596
2597 while (n) {
2598 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2599 WARN_ON(!entry->in_tree);
2600
2601 middle = entry->bytenr;
2602
2603 if (alt)
2604 n = n->rb_left;
2605 else
2606 n = n->rb_right;
2607
2608 alt = 1 - alt;
2609 }
2610 return middle;
2611 }
2612 #endif
2613
2614 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2615 {
2616 u64 num_bytes;
2617
2618 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2619 sizeof(struct btrfs_extent_inline_ref));
2620 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2621 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2622
2623 /*
2624 * We don't ever fill up leaves all the way so multiply by 2 just to be
2625 * closer to what we're really going to want to ouse.
2626 */
2627 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2628 }
2629
2630 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2631 struct btrfs_root *root)
2632 {
2633 struct btrfs_block_rsv *global_rsv;
2634 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2635 u64 num_bytes;
2636 int ret = 0;
2637
2638 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2639 num_heads = heads_to_leaves(root, num_heads);
2640 if (num_heads > 1)
2641 num_bytes += (num_heads - 1) * root->nodesize;
2642 num_bytes <<= 1;
2643 global_rsv = &root->fs_info->global_block_rsv;
2644
2645 /*
2646 * If we can't allocate any more chunks lets make sure we have _lots_ of
2647 * wiggle room since running delayed refs can create more delayed refs.
2648 */
2649 if (global_rsv->space_info->full)
2650 num_bytes <<= 1;
2651
2652 spin_lock(&global_rsv->lock);
2653 if (global_rsv->reserved <= num_bytes)
2654 ret = 1;
2655 spin_unlock(&global_rsv->lock);
2656 return ret;
2657 }
2658
2659 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2660 struct btrfs_root *root)
2661 {
2662 struct btrfs_fs_info *fs_info = root->fs_info;
2663 u64 num_entries =
2664 atomic_read(&trans->transaction->delayed_refs.num_entries);
2665 u64 avg_runtime;
2666 u64 val;
2667
2668 smp_mb();
2669 avg_runtime = fs_info->avg_delayed_ref_runtime;
2670 val = num_entries * avg_runtime;
2671 if (num_entries * avg_runtime >= NSEC_PER_SEC)
2672 return 1;
2673 if (val >= NSEC_PER_SEC / 2)
2674 return 2;
2675
2676 return btrfs_check_space_for_delayed_refs(trans, root);
2677 }
2678
2679 struct async_delayed_refs {
2680 struct btrfs_root *root;
2681 int count;
2682 int error;
2683 int sync;
2684 struct completion wait;
2685 struct btrfs_work work;
2686 };
2687
2688 static void delayed_ref_async_start(struct btrfs_work *work)
2689 {
2690 struct async_delayed_refs *async;
2691 struct btrfs_trans_handle *trans;
2692 int ret;
2693
2694 async = container_of(work, struct async_delayed_refs, work);
2695
2696 trans = btrfs_join_transaction(async->root);
2697 if (IS_ERR(trans)) {
2698 async->error = PTR_ERR(trans);
2699 goto done;
2700 }
2701
2702 /*
2703 * trans->sync means that when we call end_transaciton, we won't
2704 * wait on delayed refs
2705 */
2706 trans->sync = true;
2707 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2708 if (ret)
2709 async->error = ret;
2710
2711 ret = btrfs_end_transaction(trans, async->root);
2712 if (ret && !async->error)
2713 async->error = ret;
2714 done:
2715 if (async->sync)
2716 complete(&async->wait);
2717 else
2718 kfree(async);
2719 }
2720
2721 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2722 unsigned long count, int wait)
2723 {
2724 struct async_delayed_refs *async;
2725 int ret;
2726
2727 async = kmalloc(sizeof(*async), GFP_NOFS);
2728 if (!async)
2729 return -ENOMEM;
2730
2731 async->root = root->fs_info->tree_root;
2732 async->count = count;
2733 async->error = 0;
2734 if (wait)
2735 async->sync = 1;
2736 else
2737 async->sync = 0;
2738 init_completion(&async->wait);
2739
2740 btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2741 delayed_ref_async_start, NULL, NULL);
2742
2743 btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2744
2745 if (wait) {
2746 wait_for_completion(&async->wait);
2747 ret = async->error;
2748 kfree(async);
2749 return ret;
2750 }
2751 return 0;
2752 }
2753
2754 /*
2755 * this starts processing the delayed reference count updates and
2756 * extent insertions we have queued up so far. count can be
2757 * 0, which means to process everything in the tree at the start
2758 * of the run (but not newly added entries), or it can be some target
2759 * number you'd like to process.
2760 *
2761 * Returns 0 on success or if called with an aborted transaction
2762 * Returns <0 on error and aborts the transaction
2763 */
2764 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2765 struct btrfs_root *root, unsigned long count)
2766 {
2767 struct rb_node *node;
2768 struct btrfs_delayed_ref_root *delayed_refs;
2769 struct btrfs_delayed_ref_head *head;
2770 int ret;
2771 int run_all = count == (unsigned long)-1;
2772
2773 /* We'll clean this up in btrfs_cleanup_transaction */
2774 if (trans->aborted)
2775 return 0;
2776
2777 if (root == root->fs_info->extent_root)
2778 root = root->fs_info->tree_root;
2779
2780 delayed_refs = &trans->transaction->delayed_refs;
2781 if (count == 0)
2782 count = atomic_read(&delayed_refs->num_entries) * 2;
2783
2784 again:
2785 #ifdef SCRAMBLE_DELAYED_REFS
2786 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2787 #endif
2788 ret = __btrfs_run_delayed_refs(trans, root, count);
2789 if (ret < 0) {
2790 btrfs_abort_transaction(trans, root, ret);
2791 return ret;
2792 }
2793
2794 if (run_all) {
2795 if (!list_empty(&trans->new_bgs))
2796 btrfs_create_pending_block_groups(trans, root);
2797
2798 spin_lock(&delayed_refs->lock);
2799 node = rb_first(&delayed_refs->href_root);
2800 if (!node) {
2801 spin_unlock(&delayed_refs->lock);
2802 goto out;
2803 }
2804 count = (unsigned long)-1;
2805
2806 while (node) {
2807 head = rb_entry(node, struct btrfs_delayed_ref_head,
2808 href_node);
2809 if (btrfs_delayed_ref_is_head(&head->node)) {
2810 struct btrfs_delayed_ref_node *ref;
2811
2812 ref = &head->node;
2813 atomic_inc(&ref->refs);
2814
2815 spin_unlock(&delayed_refs->lock);
2816 /*
2817 * Mutex was contended, block until it's
2818 * released and try again
2819 */
2820 mutex_lock(&head->mutex);
2821 mutex_unlock(&head->mutex);
2822
2823 btrfs_put_delayed_ref(ref);
2824 cond_resched();
2825 goto again;
2826 } else {
2827 WARN_ON(1);
2828 }
2829 node = rb_next(node);
2830 }
2831 spin_unlock(&delayed_refs->lock);
2832 cond_resched();
2833 goto again;
2834 }
2835 out:
2836 ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2837 if (ret)
2838 return ret;
2839 assert_qgroups_uptodate(trans);
2840 return 0;
2841 }
2842
2843 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *root,
2845 u64 bytenr, u64 num_bytes, u64 flags,
2846 int level, int is_data)
2847 {
2848 struct btrfs_delayed_extent_op *extent_op;
2849 int ret;
2850
2851 extent_op = btrfs_alloc_delayed_extent_op();
2852 if (!extent_op)
2853 return -ENOMEM;
2854
2855 extent_op->flags_to_set = flags;
2856 extent_op->update_flags = 1;
2857 extent_op->update_key = 0;
2858 extent_op->is_data = is_data ? 1 : 0;
2859 extent_op->level = level;
2860
2861 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2862 num_bytes, extent_op);
2863 if (ret)
2864 btrfs_free_delayed_extent_op(extent_op);
2865 return ret;
2866 }
2867
2868 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2869 struct btrfs_root *root,
2870 struct btrfs_path *path,
2871 u64 objectid, u64 offset, u64 bytenr)
2872 {
2873 struct btrfs_delayed_ref_head *head;
2874 struct btrfs_delayed_ref_node *ref;
2875 struct btrfs_delayed_data_ref *data_ref;
2876 struct btrfs_delayed_ref_root *delayed_refs;
2877 struct rb_node *node;
2878 int ret = 0;
2879
2880 delayed_refs = &trans->transaction->delayed_refs;
2881 spin_lock(&delayed_refs->lock);
2882 head = btrfs_find_delayed_ref_head(trans, bytenr);
2883 if (!head) {
2884 spin_unlock(&delayed_refs->lock);
2885 return 0;
2886 }
2887
2888 if (!mutex_trylock(&head->mutex)) {
2889 atomic_inc(&head->node.refs);
2890 spin_unlock(&delayed_refs->lock);
2891
2892 btrfs_release_path(path);
2893
2894 /*
2895 * Mutex was contended, block until it's released and let
2896 * caller try again
2897 */
2898 mutex_lock(&head->mutex);
2899 mutex_unlock(&head->mutex);
2900 btrfs_put_delayed_ref(&head->node);
2901 return -EAGAIN;
2902 }
2903 spin_unlock(&delayed_refs->lock);
2904
2905 spin_lock(&head->lock);
2906 node = rb_first(&head->ref_root);
2907 while (node) {
2908 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2909 node = rb_next(node);
2910
2911 /* If it's a shared ref we know a cross reference exists */
2912 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2913 ret = 1;
2914 break;
2915 }
2916
2917 data_ref = btrfs_delayed_node_to_data_ref(ref);
2918
2919 /*
2920 * If our ref doesn't match the one we're currently looking at
2921 * then we have a cross reference.
2922 */
2923 if (data_ref->root != root->root_key.objectid ||
2924 data_ref->objectid != objectid ||
2925 data_ref->offset != offset) {
2926 ret = 1;
2927 break;
2928 }
2929 }
2930 spin_unlock(&head->lock);
2931 mutex_unlock(&head->mutex);
2932 return ret;
2933 }
2934
2935 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2936 struct btrfs_root *root,
2937 struct btrfs_path *path,
2938 u64 objectid, u64 offset, u64 bytenr)
2939 {
2940 struct btrfs_root *extent_root = root->fs_info->extent_root;
2941 struct extent_buffer *leaf;
2942 struct btrfs_extent_data_ref *ref;
2943 struct btrfs_extent_inline_ref *iref;
2944 struct btrfs_extent_item *ei;
2945 struct btrfs_key key;
2946 u32 item_size;
2947 int ret;
2948
2949 key.objectid = bytenr;
2950 key.offset = (u64)-1;
2951 key.type = BTRFS_EXTENT_ITEM_KEY;
2952
2953 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2954 if (ret < 0)
2955 goto out;
2956 BUG_ON(ret == 0); /* Corruption */
2957
2958 ret = -ENOENT;
2959 if (path->slots[0] == 0)
2960 goto out;
2961
2962 path->slots[0]--;
2963 leaf = path->nodes[0];
2964 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2965
2966 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2967 goto out;
2968
2969 ret = 1;
2970 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2971 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2972 if (item_size < sizeof(*ei)) {
2973 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2974 goto out;
2975 }
2976 #endif
2977 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2978
2979 if (item_size != sizeof(*ei) +
2980 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2981 goto out;
2982
2983 if (btrfs_extent_generation(leaf, ei) <=
2984 btrfs_root_last_snapshot(&root->root_item))
2985 goto out;
2986
2987 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2988 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2989 BTRFS_EXTENT_DATA_REF_KEY)
2990 goto out;
2991
2992 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2993 if (btrfs_extent_refs(leaf, ei) !=
2994 btrfs_extent_data_ref_count(leaf, ref) ||
2995 btrfs_extent_data_ref_root(leaf, ref) !=
2996 root->root_key.objectid ||
2997 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2998 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2999 goto out;
3000
3001 ret = 0;
3002 out:
3003 return ret;
3004 }
3005
3006 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3007 struct btrfs_root *root,
3008 u64 objectid, u64 offset, u64 bytenr)
3009 {
3010 struct btrfs_path *path;
3011 int ret;
3012 int ret2;
3013
3014 path = btrfs_alloc_path();
3015 if (!path)
3016 return -ENOENT;
3017
3018 do {
3019 ret = check_committed_ref(trans, root, path, objectid,
3020 offset, bytenr);
3021 if (ret && ret != -ENOENT)
3022 goto out;
3023
3024 ret2 = check_delayed_ref(trans, root, path, objectid,
3025 offset, bytenr);
3026 } while (ret2 == -EAGAIN);
3027
3028 if (ret2 && ret2 != -ENOENT) {
3029 ret = ret2;
3030 goto out;
3031 }
3032
3033 if (ret != -ENOENT || ret2 != -ENOENT)
3034 ret = 0;
3035 out:
3036 btrfs_free_path(path);
3037 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3038 WARN_ON(ret > 0);
3039 return ret;
3040 }
3041
3042 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3043 struct btrfs_root *root,
3044 struct extent_buffer *buf,
3045 int full_backref, int inc)
3046 {
3047 u64 bytenr;
3048 u64 num_bytes;
3049 u64 parent;
3050 u64 ref_root;
3051 u32 nritems;
3052 struct btrfs_key key;
3053 struct btrfs_file_extent_item *fi;
3054 int i;
3055 int level;
3056 int ret = 0;
3057 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3058 u64, u64, u64, u64, u64, u64, int);
3059
3060
3061 if (btrfs_test_is_dummy_root(root))
3062 return 0;
3063
3064 ref_root = btrfs_header_owner(buf);
3065 nritems = btrfs_header_nritems(buf);
3066 level = btrfs_header_level(buf);
3067
3068 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3069 return 0;
3070
3071 if (inc)
3072 process_func = btrfs_inc_extent_ref;
3073 else
3074 process_func = btrfs_free_extent;
3075
3076 if (full_backref)
3077 parent = buf->start;
3078 else
3079 parent = 0;
3080
3081 for (i = 0; i < nritems; i++) {
3082 if (level == 0) {
3083 btrfs_item_key_to_cpu(buf, &key, i);
3084 if (key.type != BTRFS_EXTENT_DATA_KEY)
3085 continue;
3086 fi = btrfs_item_ptr(buf, i,
3087 struct btrfs_file_extent_item);
3088 if (btrfs_file_extent_type(buf, fi) ==
3089 BTRFS_FILE_EXTENT_INLINE)
3090 continue;
3091 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3092 if (bytenr == 0)
3093 continue;
3094
3095 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3096 key.offset -= btrfs_file_extent_offset(buf, fi);
3097 ret = process_func(trans, root, bytenr, num_bytes,
3098 parent, ref_root, key.objectid,
3099 key.offset, 1);
3100 if (ret)
3101 goto fail;
3102 } else {
3103 bytenr = btrfs_node_blockptr(buf, i);
3104 num_bytes = root->nodesize;
3105 ret = process_func(trans, root, bytenr, num_bytes,
3106 parent, ref_root, level - 1, 0,
3107 1);
3108 if (ret)
3109 goto fail;
3110 }
3111 }
3112 return 0;
3113 fail:
3114 return ret;
3115 }
3116
3117 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3118 struct extent_buffer *buf, int full_backref)
3119 {
3120 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3121 }
3122
3123 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3124 struct extent_buffer *buf, int full_backref)
3125 {
3126 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3127 }
3128
3129 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3130 struct btrfs_root *root,
3131 struct btrfs_path *path,
3132 struct btrfs_block_group_cache *cache)
3133 {
3134 int ret;
3135 struct btrfs_root *extent_root = root->fs_info->extent_root;
3136 unsigned long bi;
3137 struct extent_buffer *leaf;
3138
3139 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3140 if (ret) {
3141 if (ret > 0)
3142 ret = -ENOENT;
3143 goto fail;
3144 }
3145
3146 leaf = path->nodes[0];
3147 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3148 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3149 btrfs_mark_buffer_dirty(leaf);
3150 btrfs_release_path(path);
3151 fail:
3152 if (ret)
3153 btrfs_abort_transaction(trans, root, ret);
3154 return ret;
3155
3156 }
3157
3158 static struct btrfs_block_group_cache *
3159 next_block_group(struct btrfs_root *root,
3160 struct btrfs_block_group_cache *cache)
3161 {
3162 struct rb_node *node;
3163
3164 spin_lock(&root->fs_info->block_group_cache_lock);
3165
3166 /* If our block group was removed, we need a full search. */
3167 if (RB_EMPTY_NODE(&cache->cache_node)) {
3168 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3169
3170 spin_unlock(&root->fs_info->block_group_cache_lock);
3171 btrfs_put_block_group(cache);
3172 cache = btrfs_lookup_first_block_group(root->fs_info,
3173 next_bytenr);
3174 return cache;
3175 }
3176 node = rb_next(&cache->cache_node);
3177 btrfs_put_block_group(cache);
3178 if (node) {
3179 cache = rb_entry(node, struct btrfs_block_group_cache,
3180 cache_node);
3181 btrfs_get_block_group(cache);
3182 } else
3183 cache = NULL;
3184 spin_unlock(&root->fs_info->block_group_cache_lock);
3185 return cache;
3186 }
3187
3188 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3189 struct btrfs_trans_handle *trans,
3190 struct btrfs_path *path)
3191 {
3192 struct btrfs_root *root = block_group->fs_info->tree_root;
3193 struct inode *inode = NULL;
3194 u64 alloc_hint = 0;
3195 int dcs = BTRFS_DC_ERROR;
3196 int num_pages = 0;
3197 int retries = 0;
3198 int ret = 0;
3199
3200 /*
3201 * If this block group is smaller than 100 megs don't bother caching the
3202 * block group.
3203 */
3204 if (block_group->key.offset < (100 * 1024 * 1024)) {
3205 spin_lock(&block_group->lock);
3206 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3207 spin_unlock(&block_group->lock);
3208 return 0;
3209 }
3210
3211 again:
3212 inode = lookup_free_space_inode(root, block_group, path);
3213 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3214 ret = PTR_ERR(inode);
3215 btrfs_release_path(path);
3216 goto out;
3217 }
3218
3219 if (IS_ERR(inode)) {
3220 BUG_ON(retries);
3221 retries++;
3222
3223 if (block_group->ro)
3224 goto out_free;
3225
3226 ret = create_free_space_inode(root, trans, block_group, path);
3227 if (ret)
3228 goto out_free;
3229 goto again;
3230 }
3231
3232 /* We've already setup this transaction, go ahead and exit */
3233 if (block_group->cache_generation == trans->transid &&
3234 i_size_read(inode)) {
3235 dcs = BTRFS_DC_SETUP;
3236 goto out_put;
3237 }
3238
3239 /*
3240 * We want to set the generation to 0, that way if anything goes wrong
3241 * from here on out we know not to trust this cache when we load up next
3242 * time.
3243 */
3244 BTRFS_I(inode)->generation = 0;
3245 ret = btrfs_update_inode(trans, root, inode);
3246 WARN_ON(ret);
3247
3248 if (i_size_read(inode) > 0) {
3249 ret = btrfs_check_trunc_cache_free_space(root,
3250 &root->fs_info->global_block_rsv);
3251 if (ret)
3252 goto out_put;
3253
3254 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3255 if (ret)
3256 goto out_put;
3257 }
3258
3259 spin_lock(&block_group->lock);
3260 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3261 !btrfs_test_opt(root, SPACE_CACHE) ||
3262 block_group->delalloc_bytes) {
3263 /*
3264 * don't bother trying to write stuff out _if_
3265 * a) we're not cached,
3266 * b) we're with nospace_cache mount option.
3267 */
3268 dcs = BTRFS_DC_WRITTEN;
3269 spin_unlock(&block_group->lock);
3270 goto out_put;
3271 }
3272 spin_unlock(&block_group->lock);
3273
3274 /*
3275 * Try to preallocate enough space based on how big the block group is.
3276 * Keep in mind this has to include any pinned space which could end up
3277 * taking up quite a bit since it's not folded into the other space
3278 * cache.
3279 */
3280 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3281 if (!num_pages)
3282 num_pages = 1;
3283
3284 num_pages *= 16;
3285 num_pages *= PAGE_CACHE_SIZE;
3286
3287 ret = btrfs_check_data_free_space(inode, num_pages);
3288 if (ret)
3289 goto out_put;
3290
3291 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3292 num_pages, num_pages,
3293 &alloc_hint);
3294 if (!ret)
3295 dcs = BTRFS_DC_SETUP;
3296 btrfs_free_reserved_data_space(inode, num_pages);
3297
3298 out_put:
3299 iput(inode);
3300 out_free:
3301 btrfs_release_path(path);
3302 out:
3303 spin_lock(&block_group->lock);
3304 if (!ret && dcs == BTRFS_DC_SETUP)
3305 block_group->cache_generation = trans->transid;
3306 block_group->disk_cache_state = dcs;
3307 spin_unlock(&block_group->lock);
3308
3309 return ret;
3310 }
3311
3312 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3313 struct btrfs_root *root)
3314 {
3315 struct btrfs_block_group_cache *cache;
3316 struct btrfs_transaction *cur_trans = trans->transaction;
3317 int ret = 0;
3318 struct btrfs_path *path;
3319
3320 if (list_empty(&cur_trans->dirty_bgs))
3321 return 0;
3322
3323 path = btrfs_alloc_path();
3324 if (!path)
3325 return -ENOMEM;
3326
3327 /*
3328 * We don't need the lock here since we are protected by the transaction
3329 * commit. We want to do the cache_save_setup first and then run the
3330 * delayed refs to make sure we have the best chance at doing this all
3331 * in one shot.
3332 */
3333 while (!list_empty(&cur_trans->dirty_bgs)) {
3334 cache = list_first_entry(&cur_trans->dirty_bgs,
3335 struct btrfs_block_group_cache,
3336 dirty_list);
3337 list_del_init(&cache->dirty_list);
3338 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3339 cache_save_setup(cache, trans, path);
3340 if (!ret)
3341 ret = btrfs_run_delayed_refs(trans, root,
3342 (unsigned long) -1);
3343 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
3344 btrfs_write_out_cache(root, trans, cache, path);
3345 if (!ret)
3346 ret = write_one_cache_group(trans, root, path, cache);
3347 btrfs_put_block_group(cache);
3348 }
3349
3350 btrfs_free_path(path);
3351 return ret;
3352 }
3353
3354 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3355 {
3356 struct btrfs_block_group_cache *block_group;
3357 int readonly = 0;
3358
3359 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3360 if (!block_group || block_group->ro)
3361 readonly = 1;
3362 if (block_group)
3363 btrfs_put_block_group(block_group);
3364 return readonly;
3365 }
3366
3367 static const char *alloc_name(u64 flags)
3368 {
3369 switch (flags) {
3370 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3371 return "mixed";
3372 case BTRFS_BLOCK_GROUP_METADATA:
3373 return "metadata";
3374 case BTRFS_BLOCK_GROUP_DATA:
3375 return "data";
3376 case BTRFS_BLOCK_GROUP_SYSTEM:
3377 return "system";
3378 default:
3379 WARN_ON(1);
3380 return "invalid-combination";
3381 };
3382 }
3383
3384 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3385 u64 total_bytes, u64 bytes_used,
3386 struct btrfs_space_info **space_info)
3387 {
3388 struct btrfs_space_info *found;
3389 int i;
3390 int factor;
3391 int ret;
3392
3393 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3394 BTRFS_BLOCK_GROUP_RAID10))
3395 factor = 2;
3396 else
3397 factor = 1;
3398
3399 found = __find_space_info(info, flags);
3400 if (found) {
3401 spin_lock(&found->lock);
3402 found->total_bytes += total_bytes;
3403 found->disk_total += total_bytes * factor;
3404 found->bytes_used += bytes_used;
3405 found->disk_used += bytes_used * factor;
3406 found->full = 0;
3407 spin_unlock(&found->lock);
3408 *space_info = found;
3409 return 0;
3410 }
3411 found = kzalloc(sizeof(*found), GFP_NOFS);
3412 if (!found)
3413 return -ENOMEM;
3414
3415 ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3416 if (ret) {
3417 kfree(found);
3418 return ret;
3419 }
3420
3421 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3422 INIT_LIST_HEAD(&found->block_groups[i]);
3423 init_rwsem(&found->groups_sem);
3424 spin_lock_init(&found->lock);
3425 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3426 found->total_bytes = total_bytes;
3427 found->disk_total = total_bytes * factor;
3428 found->bytes_used = bytes_used;
3429 found->disk_used = bytes_used * factor;
3430 found->bytes_pinned = 0;
3431 found->bytes_reserved = 0;
3432 found->bytes_readonly = 0;
3433 found->bytes_may_use = 0;
3434 found->full = 0;
3435 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3436 found->chunk_alloc = 0;
3437 found->flush = 0;
3438 init_waitqueue_head(&found->wait);
3439 INIT_LIST_HEAD(&found->ro_bgs);
3440
3441 ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3442 info->space_info_kobj, "%s",
3443 alloc_name(found->flags));
3444 if (ret) {
3445 kfree(found);
3446 return ret;
3447 }
3448
3449 *space_info = found;
3450 list_add_rcu(&found->list, &info->space_info);
3451 if (flags & BTRFS_BLOCK_GROUP_DATA)
3452 info->data_sinfo = found;
3453
3454 return ret;
3455 }
3456
3457 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3458 {
3459 u64 extra_flags = chunk_to_extended(flags) &
3460 BTRFS_EXTENDED_PROFILE_MASK;
3461
3462 write_seqlock(&fs_info->profiles_lock);
3463 if (flags & BTRFS_BLOCK_GROUP_DATA)
3464 fs_info->avail_data_alloc_bits |= extra_flags;
3465 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3466 fs_info->avail_metadata_alloc_bits |= extra_flags;
3467 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3468 fs_info->avail_system_alloc_bits |= extra_flags;
3469 write_sequnlock(&fs_info->profiles_lock);
3470 }
3471
3472 /*
3473 * returns target flags in extended format or 0 if restripe for this
3474 * chunk_type is not in progress
3475 *
3476 * should be called with either volume_mutex or balance_lock held
3477 */
3478 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3479 {
3480 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3481 u64 target = 0;
3482
3483 if (!bctl)
3484 return 0;
3485
3486 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3487 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3488 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3489 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3490 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3491 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3492 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3493 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3494 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3495 }
3496
3497 return target;
3498 }
3499
3500 /*
3501 * @flags: available profiles in extended format (see ctree.h)
3502 *
3503 * Returns reduced profile in chunk format. If profile changing is in
3504 * progress (either running or paused) picks the target profile (if it's
3505 * already available), otherwise falls back to plain reducing.
3506 */
3507 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3508 {
3509 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3510 u64 target;
3511 u64 tmp;
3512
3513 /*
3514 * see if restripe for this chunk_type is in progress, if so
3515 * try to reduce to the target profile
3516 */
3517 spin_lock(&root->fs_info->balance_lock);
3518 target = get_restripe_target(root->fs_info, flags);
3519 if (target) {
3520 /* pick target profile only if it's already available */
3521 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3522 spin_unlock(&root->fs_info->balance_lock);
3523 return extended_to_chunk(target);
3524 }
3525 }
3526 spin_unlock(&root->fs_info->balance_lock);
3527
3528 /* First, mask out the RAID levels which aren't possible */
3529 if (num_devices == 1)
3530 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3531 BTRFS_BLOCK_GROUP_RAID5);
3532 if (num_devices < 3)
3533 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3534 if (num_devices < 4)
3535 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3536
3537 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3538 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3539 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3540 flags &= ~tmp;
3541
3542 if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3543 tmp = BTRFS_BLOCK_GROUP_RAID6;
3544 else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3545 tmp = BTRFS_BLOCK_GROUP_RAID5;
3546 else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3547 tmp = BTRFS_BLOCK_GROUP_RAID10;
3548 else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3549 tmp = BTRFS_BLOCK_GROUP_RAID1;
3550 else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3551 tmp = BTRFS_BLOCK_GROUP_RAID0;
3552
3553 return extended_to_chunk(flags | tmp);
3554 }
3555
3556 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3557 {
3558 unsigned seq;
3559 u64 flags;
3560
3561 do {
3562 flags = orig_flags;
3563 seq = read_seqbegin(&root->fs_info->profiles_lock);
3564
3565 if (flags & BTRFS_BLOCK_GROUP_DATA)
3566 flags |= root->fs_info->avail_data_alloc_bits;
3567 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3568 flags |= root->fs_info->avail_system_alloc_bits;
3569 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3570 flags |= root->fs_info->avail_metadata_alloc_bits;
3571 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3572
3573 return btrfs_reduce_alloc_profile(root, flags);
3574 }
3575
3576 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3577 {
3578 u64 flags;
3579 u64 ret;
3580
3581 if (data)
3582 flags = BTRFS_BLOCK_GROUP_DATA;
3583 else if (root == root->fs_info->chunk_root)
3584 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3585 else
3586 flags = BTRFS_BLOCK_GROUP_METADATA;
3587
3588 ret = get_alloc_profile(root, flags);
3589 return ret;
3590 }
3591
3592 /*
3593 * This will check the space that the inode allocates from to make sure we have
3594 * enough space for bytes.
3595 */
3596 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3597 {
3598 struct btrfs_space_info *data_sinfo;
3599 struct btrfs_root *root = BTRFS_I(inode)->root;
3600 struct btrfs_fs_info *fs_info = root->fs_info;
3601 u64 used;
3602 int ret = 0, committed = 0, alloc_chunk = 1;
3603
3604 /* make sure bytes are sectorsize aligned */
3605 bytes = ALIGN(bytes, root->sectorsize);
3606
3607 if (btrfs_is_free_space_inode(inode)) {
3608 committed = 1;
3609 ASSERT(current->journal_info);
3610 }
3611
3612 data_sinfo = fs_info->data_sinfo;
3613 if (!data_sinfo)
3614 goto alloc;
3615
3616 again:
3617 /* make sure we have enough space to handle the data first */
3618 spin_lock(&data_sinfo->lock);
3619 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3620 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3621 data_sinfo->bytes_may_use;
3622
3623 if (used + bytes > data_sinfo->total_bytes) {
3624 struct btrfs_trans_handle *trans;
3625
3626 /*
3627 * if we don't have enough free bytes in this space then we need
3628 * to alloc a new chunk.
3629 */
3630 if (!data_sinfo->full && alloc_chunk) {
3631 u64 alloc_target;
3632
3633 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3634 spin_unlock(&data_sinfo->lock);
3635 alloc:
3636 alloc_target = btrfs_get_alloc_profile(root, 1);
3637 /*
3638 * It is ugly that we don't call nolock join
3639 * transaction for the free space inode case here.
3640 * But it is safe because we only do the data space
3641 * reservation for the free space cache in the
3642 * transaction context, the common join transaction
3643 * just increase the counter of the current transaction
3644 * handler, doesn't try to acquire the trans_lock of
3645 * the fs.
3646 */
3647 trans = btrfs_join_transaction(root);
3648 if (IS_ERR(trans))
3649 return PTR_ERR(trans);
3650
3651 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3652 alloc_target,
3653 CHUNK_ALLOC_NO_FORCE);
3654 btrfs_end_transaction(trans, root);
3655 if (ret < 0) {
3656 if (ret != -ENOSPC)
3657 return ret;
3658 else
3659 goto commit_trans;
3660 }
3661
3662 if (!data_sinfo)
3663 data_sinfo = fs_info->data_sinfo;
3664
3665 goto again;
3666 }
3667
3668 /*
3669 * If we don't have enough pinned space to deal with this
3670 * allocation don't bother committing the transaction.
3671 */
3672 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3673 bytes) < 0)
3674 committed = 1;
3675 spin_unlock(&data_sinfo->lock);
3676
3677 /* commit the current transaction and try again */
3678 commit_trans:
3679 if (!committed &&
3680 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3681 committed = 1;
3682
3683 trans = btrfs_join_transaction(root);
3684 if (IS_ERR(trans))
3685 return PTR_ERR(trans);
3686 ret = btrfs_commit_transaction(trans, root);
3687 if (ret)
3688 return ret;
3689 goto again;
3690 }
3691
3692 trace_btrfs_space_reservation(root->fs_info,
3693 "space_info:enospc",
3694 data_sinfo->flags, bytes, 1);
3695 return -ENOSPC;
3696 }
3697 data_sinfo->bytes_may_use += bytes;
3698 trace_btrfs_space_reservation(root->fs_info, "space_info",
3699 data_sinfo->flags, bytes, 1);
3700 spin_unlock(&data_sinfo->lock);
3701
3702 return 0;
3703 }
3704
3705 /*
3706 * Called if we need to clear a data reservation for this inode.
3707 */
3708 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3709 {
3710 struct btrfs_root *root = BTRFS_I(inode)->root;
3711 struct btrfs_space_info *data_sinfo;
3712
3713 /* make sure bytes are sectorsize aligned */
3714 bytes = ALIGN(bytes, root->sectorsize);
3715
3716 data_sinfo = root->fs_info->data_sinfo;
3717 spin_lock(&data_sinfo->lock);
3718 WARN_ON(data_sinfo->bytes_may_use < bytes);
3719 data_sinfo->bytes_may_use -= bytes;
3720 trace_btrfs_space_reservation(root->fs_info, "space_info",
3721 data_sinfo->flags, bytes, 0);
3722 spin_unlock(&data_sinfo->lock);
3723 }
3724
3725 static void force_metadata_allocation(struct btrfs_fs_info *info)
3726 {
3727 struct list_head *head = &info->space_info;
3728 struct btrfs_space_info *found;
3729
3730 rcu_read_lock();
3731 list_for_each_entry_rcu(found, head, list) {
3732 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3733 found->force_alloc = CHUNK_ALLOC_FORCE;
3734 }
3735 rcu_read_unlock();
3736 }
3737
3738 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3739 {
3740 return (global->size << 1);
3741 }
3742
3743 static int should_alloc_chunk(struct btrfs_root *root,
3744 struct btrfs_space_info *sinfo, int force)
3745 {
3746 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3747 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3748 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3749 u64 thresh;
3750
3751 if (force == CHUNK_ALLOC_FORCE)
3752 return 1;
3753
3754 /*
3755 * We need to take into account the global rsv because for all intents
3756 * and purposes it's used space. Don't worry about locking the
3757 * global_rsv, it doesn't change except when the transaction commits.
3758 */
3759 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3760 num_allocated += calc_global_rsv_need_space(global_rsv);
3761
3762 /*
3763 * in limited mode, we want to have some free space up to
3764 * about 1% of the FS size.
3765 */
3766 if (force == CHUNK_ALLOC_LIMITED) {
3767 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3768 thresh = max_t(u64, 64 * 1024 * 1024,
3769 div_factor_fine(thresh, 1));
3770
3771 if (num_bytes - num_allocated < thresh)
3772 return 1;
3773 }
3774
3775 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3776 return 0;
3777 return 1;
3778 }
3779
3780 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3781 {
3782 u64 num_dev;
3783
3784 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3785 BTRFS_BLOCK_GROUP_RAID0 |
3786 BTRFS_BLOCK_GROUP_RAID5 |
3787 BTRFS_BLOCK_GROUP_RAID6))
3788 num_dev = root->fs_info->fs_devices->rw_devices;
3789 else if (type & BTRFS_BLOCK_GROUP_RAID1)
3790 num_dev = 2;
3791 else
3792 num_dev = 1; /* DUP or single */
3793
3794 /* metadata for updaing devices and chunk tree */
3795 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3796 }
3797
3798 static void check_system_chunk(struct btrfs_trans_handle *trans,
3799 struct btrfs_root *root, u64 type)
3800 {
3801 struct btrfs_space_info *info;
3802 u64 left;
3803 u64 thresh;
3804
3805 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3806 spin_lock(&info->lock);
3807 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3808 info->bytes_reserved - info->bytes_readonly;
3809 spin_unlock(&info->lock);
3810
3811 thresh = get_system_chunk_thresh(root, type);
3812 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3813 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3814 left, thresh, type);
3815 dump_space_info(info, 0, 0);
3816 }
3817
3818 if (left < thresh) {
3819 u64 flags;
3820
3821 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3822 btrfs_alloc_chunk(trans, root, flags);
3823 }
3824 }
3825
3826 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3827 struct btrfs_root *extent_root, u64 flags, int force)
3828 {
3829 struct btrfs_space_info *space_info;
3830 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3831 int wait_for_alloc = 0;
3832 int ret = 0;
3833
3834 /* Don't re-enter if we're already allocating a chunk */
3835 if (trans->allocating_chunk)
3836 return -ENOSPC;
3837
3838 space_info = __find_space_info(extent_root->fs_info, flags);
3839 if (!space_info) {
3840 ret = update_space_info(extent_root->fs_info, flags,
3841 0, 0, &space_info);
3842 BUG_ON(ret); /* -ENOMEM */
3843 }
3844 BUG_ON(!space_info); /* Logic error */
3845
3846 again:
3847 spin_lock(&space_info->lock);
3848 if (force < space_info->force_alloc)
3849 force = space_info->force_alloc;
3850 if (space_info->full) {
3851 if (should_alloc_chunk(extent_root, space_info, force))
3852 ret = -ENOSPC;
3853 else
3854 ret = 0;
3855 spin_unlock(&space_info->lock);
3856 return ret;
3857 }
3858
3859 if (!should_alloc_chunk(extent_root, space_info, force)) {
3860 spin_unlock(&space_info->lock);
3861 return 0;
3862 } else if (space_info->chunk_alloc) {
3863 wait_for_alloc = 1;
3864 } else {
3865 space_info->chunk_alloc = 1;
3866 }
3867
3868 spin_unlock(&space_info->lock);
3869
3870 mutex_lock(&fs_info->chunk_mutex);
3871
3872 /*
3873 * The chunk_mutex is held throughout the entirety of a chunk
3874 * allocation, so once we've acquired the chunk_mutex we know that the
3875 * other guy is done and we need to recheck and see if we should
3876 * allocate.
3877 */
3878 if (wait_for_alloc) {
3879 mutex_unlock(&fs_info->chunk_mutex);
3880 wait_for_alloc = 0;
3881 goto again;
3882 }
3883
3884 trans->allocating_chunk = true;
3885
3886 /*
3887 * If we have mixed data/metadata chunks we want to make sure we keep
3888 * allocating mixed chunks instead of individual chunks.
3889 */
3890 if (btrfs_mixed_space_info(space_info))
3891 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3892
3893 /*
3894 * if we're doing a data chunk, go ahead and make sure that
3895 * we keep a reasonable number of metadata chunks allocated in the
3896 * FS as well.
3897 */
3898 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3899 fs_info->data_chunk_allocations++;
3900 if (!(fs_info->data_chunk_allocations %
3901 fs_info->metadata_ratio))
3902 force_metadata_allocation(fs_info);
3903 }
3904
3905 /*
3906 * Check if we have enough space in SYSTEM chunk because we may need
3907 * to update devices.
3908 */
3909 check_system_chunk(trans, extent_root, flags);
3910
3911 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3912 trans->allocating_chunk = false;
3913
3914 spin_lock(&space_info->lock);
3915 if (ret < 0 && ret != -ENOSPC)
3916 goto out;
3917 if (ret)
3918 space_info->full = 1;
3919 else
3920 ret = 1;
3921
3922 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3923 out:
3924 space_info->chunk_alloc = 0;
3925 spin_unlock(&space_info->lock);
3926 mutex_unlock(&fs_info->chunk_mutex);
3927 return ret;
3928 }
3929
3930 static int can_overcommit(struct btrfs_root *root,
3931 struct btrfs_space_info *space_info, u64 bytes,
3932 enum btrfs_reserve_flush_enum flush)
3933 {
3934 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3935 u64 profile = btrfs_get_alloc_profile(root, 0);
3936 u64 space_size;
3937 u64 avail;
3938 u64 used;
3939
3940 used = space_info->bytes_used + space_info->bytes_reserved +
3941 space_info->bytes_pinned + space_info->bytes_readonly;
3942
3943 /*
3944 * We only want to allow over committing if we have lots of actual space
3945 * free, but if we don't have enough space to handle the global reserve
3946 * space then we could end up having a real enospc problem when trying
3947 * to allocate a chunk or some other such important allocation.
3948 */
3949 spin_lock(&global_rsv->lock);
3950 space_size = calc_global_rsv_need_space(global_rsv);
3951 spin_unlock(&global_rsv->lock);
3952 if (used + space_size >= space_info->total_bytes)
3953 return 0;
3954
3955 used += space_info->bytes_may_use;
3956
3957 spin_lock(&root->fs_info->free_chunk_lock);
3958 avail = root->fs_info->free_chunk_space;
3959 spin_unlock(&root->fs_info->free_chunk_lock);
3960
3961 /*
3962 * If we have dup, raid1 or raid10 then only half of the free
3963 * space is actually useable. For raid56, the space info used
3964 * doesn't include the parity drive, so we don't have to
3965 * change the math
3966 */
3967 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3968 BTRFS_BLOCK_GROUP_RAID1 |
3969 BTRFS_BLOCK_GROUP_RAID10))
3970 avail >>= 1;
3971
3972 /*
3973 * If we aren't flushing all things, let us overcommit up to
3974 * 1/2th of the space. If we can flush, don't let us overcommit
3975 * too much, let it overcommit up to 1/8 of the space.
3976 */
3977 if (flush == BTRFS_RESERVE_FLUSH_ALL)
3978 avail >>= 3;
3979 else
3980 avail >>= 1;
3981
3982 if (used + bytes < space_info->total_bytes + avail)
3983 return 1;
3984 return 0;
3985 }
3986
3987 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3988 unsigned long nr_pages, int nr_items)
3989 {
3990 struct super_block *sb = root->fs_info->sb;
3991
3992 if (down_read_trylock(&sb->s_umount)) {
3993 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3994 up_read(&sb->s_umount);
3995 } else {
3996 /*
3997 * We needn't worry the filesystem going from r/w to r/o though
3998 * we don't acquire ->s_umount mutex, because the filesystem
3999 * should guarantee the delalloc inodes list be empty after
4000 * the filesystem is readonly(all dirty pages are written to
4001 * the disk).
4002 */
4003 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4004 if (!current->journal_info)
4005 btrfs_wait_ordered_roots(root->fs_info, nr_items);
4006 }
4007 }
4008
4009 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4010 {
4011 u64 bytes;
4012 int nr;
4013
4014 bytes = btrfs_calc_trans_metadata_size(root, 1);
4015 nr = (int)div64_u64(to_reclaim, bytes);
4016 if (!nr)
4017 nr = 1;
4018 return nr;
4019 }
4020
4021 #define EXTENT_SIZE_PER_ITEM (256 * 1024)
4022
4023 /*
4024 * shrink metadata reservation for delalloc
4025 */
4026 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4027 bool wait_ordered)
4028 {
4029 struct btrfs_block_rsv *block_rsv;
4030 struct btrfs_space_info *space_info;
4031 struct btrfs_trans_handle *trans;
4032 u64 delalloc_bytes;
4033 u64 max_reclaim;
4034 long time_left;
4035 unsigned long nr_pages;
4036 int loops;
4037 int items;
4038 enum btrfs_reserve_flush_enum flush;
4039
4040 /* Calc the number of the pages we need flush for space reservation */
4041 items = calc_reclaim_items_nr(root, to_reclaim);
4042 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4043
4044 trans = (struct btrfs_trans_handle *)current->journal_info;
4045 block_rsv = &root->fs_info->delalloc_block_rsv;
4046 space_info = block_rsv->space_info;
4047
4048 delalloc_bytes = percpu_counter_sum_positive(
4049 &root->fs_info->delalloc_bytes);
4050 if (delalloc_bytes == 0) {
4051 if (trans)
4052 return;
4053 if (wait_ordered)
4054 btrfs_wait_ordered_roots(root->fs_info, items);
4055 return;
4056 }
4057
4058 loops = 0;
4059 while (delalloc_bytes && loops < 3) {
4060 max_reclaim = min(delalloc_bytes, to_reclaim);
4061 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4062 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4063 /*
4064 * We need to wait for the async pages to actually start before
4065 * we do anything.
4066 */
4067 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4068 if (!max_reclaim)
4069 goto skip_async;
4070
4071 if (max_reclaim <= nr_pages)
4072 max_reclaim = 0;
4073 else
4074 max_reclaim -= nr_pages;
4075
4076 wait_event(root->fs_info->async_submit_wait,
4077 atomic_read(&root->fs_info->async_delalloc_pages) <=
4078 (int)max_reclaim);
4079 skip_async:
4080 if (!trans)
4081 flush = BTRFS_RESERVE_FLUSH_ALL;
4082 else
4083 flush = BTRFS_RESERVE_NO_FLUSH;
4084 spin_lock(&space_info->lock);
4085 if (can_overcommit(root, space_info, orig, flush)) {
4086 spin_unlock(&space_info->lock);
4087 break;
4088 }
4089 spin_unlock(&space_info->lock);
4090
4091 loops++;
4092 if (wait_ordered && !trans) {
4093 btrfs_wait_ordered_roots(root->fs_info, items);
4094 } else {
4095 time_left = schedule_timeout_killable(1);
4096 if (time_left)
4097 break;
4098 }
4099 delalloc_bytes = percpu_counter_sum_positive(
4100 &root->fs_info->delalloc_bytes);
4101 }
4102 }
4103
4104 /**
4105 * maybe_commit_transaction - possibly commit the transaction if its ok to
4106 * @root - the root we're allocating for
4107 * @bytes - the number of bytes we want to reserve
4108 * @force - force the commit
4109 *
4110 * This will check to make sure that committing the transaction will actually
4111 * get us somewhere and then commit the transaction if it does. Otherwise it
4112 * will return -ENOSPC.
4113 */
4114 static int may_commit_transaction(struct btrfs_root *root,
4115 struct btrfs_space_info *space_info,
4116 u64 bytes, int force)
4117 {
4118 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4119 struct btrfs_trans_handle *trans;
4120
4121 trans = (struct btrfs_trans_handle *)current->journal_info;
4122 if (trans)
4123 return -EAGAIN;
4124
4125 if (force)
4126 goto commit;
4127
4128 /* See if there is enough pinned space to make this reservation */
4129 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4130 bytes) >= 0)
4131 goto commit;
4132
4133 /*
4134 * See if there is some space in the delayed insertion reservation for
4135 * this reservation.
4136 */
4137 if (space_info != delayed_rsv->space_info)
4138 return -ENOSPC;
4139
4140 spin_lock(&delayed_rsv->lock);
4141 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4142 bytes - delayed_rsv->size) >= 0) {
4143 spin_unlock(&delayed_rsv->lock);
4144 return -ENOSPC;
4145 }
4146 spin_unlock(&delayed_rsv->lock);
4147
4148 commit:
4149 trans = btrfs_join_transaction(root);
4150 if (IS_ERR(trans))
4151 return -ENOSPC;
4152
4153 return btrfs_commit_transaction(trans, root);
4154 }
4155
4156 enum flush_state {
4157 FLUSH_DELAYED_ITEMS_NR = 1,
4158 FLUSH_DELAYED_ITEMS = 2,
4159 FLUSH_DELALLOC = 3,
4160 FLUSH_DELALLOC_WAIT = 4,
4161 ALLOC_CHUNK = 5,
4162 COMMIT_TRANS = 6,
4163 };
4164
4165 static int flush_space(struct btrfs_root *root,
4166 struct btrfs_space_info *space_info, u64 num_bytes,
4167 u64 orig_bytes, int state)
4168 {
4169 struct btrfs_trans_handle *trans;
4170 int nr;
4171 int ret = 0;
4172
4173 switch (state) {
4174 case FLUSH_DELAYED_ITEMS_NR:
4175 case FLUSH_DELAYED_ITEMS:
4176 if (state == FLUSH_DELAYED_ITEMS_NR)
4177 nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4178 else
4179 nr = -1;
4180
4181 trans = btrfs_join_transaction(root);
4182 if (IS_ERR(trans)) {
4183 ret = PTR_ERR(trans);
4184 break;
4185 }
4186 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4187 btrfs_end_transaction(trans, root);
4188 break;
4189 case FLUSH_DELALLOC:
4190 case FLUSH_DELALLOC_WAIT:
4191 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4192 state == FLUSH_DELALLOC_WAIT);
4193 break;
4194 case ALLOC_CHUNK:
4195 trans = btrfs_join_transaction(root);
4196 if (IS_ERR(trans)) {
4197 ret = PTR_ERR(trans);
4198 break;
4199 }
4200 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4201 btrfs_get_alloc_profile(root, 0),
4202 CHUNK_ALLOC_NO_FORCE);
4203 btrfs_end_transaction(trans, root);
4204 if (ret == -ENOSPC)
4205 ret = 0;
4206 break;
4207 case COMMIT_TRANS:
4208 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4209 break;
4210 default:
4211 ret = -ENOSPC;
4212 break;
4213 }
4214
4215 return ret;
4216 }
4217
4218 static inline u64
4219 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4220 struct btrfs_space_info *space_info)
4221 {
4222 u64 used;
4223 u64 expected;
4224 u64 to_reclaim;
4225
4226 to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4227 16 * 1024 * 1024);
4228 spin_lock(&space_info->lock);
4229 if (can_overcommit(root, space_info, to_reclaim,
4230 BTRFS_RESERVE_FLUSH_ALL)) {
4231 to_reclaim = 0;
4232 goto out;
4233 }
4234
4235 used = space_info->bytes_used + space_info->bytes_reserved +
4236 space_info->bytes_pinned + space_info->bytes_readonly +
4237 space_info->bytes_may_use;
4238 if (can_overcommit(root, space_info, 1024 * 1024,
4239 BTRFS_RESERVE_FLUSH_ALL))
4240 expected = div_factor_fine(space_info->total_bytes, 95);
4241 else
4242 expected = div_factor_fine(space_info->total_bytes, 90);
4243
4244 if (used > expected)
4245 to_reclaim = used - expected;
4246 else
4247 to_reclaim = 0;
4248 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4249 space_info->bytes_reserved);
4250 out:
4251 spin_unlock(&space_info->lock);
4252
4253 return to_reclaim;
4254 }
4255
4256 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4257 struct btrfs_fs_info *fs_info, u64 used)
4258 {
4259 return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4260 !btrfs_fs_closing(fs_info) &&
4261 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4262 }
4263
4264 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4265 struct btrfs_fs_info *fs_info,
4266 int flush_state)
4267 {
4268 u64 used;
4269
4270 spin_lock(&space_info->lock);
4271 /*
4272 * We run out of space and have not got any free space via flush_space,
4273 * so don't bother doing async reclaim.
4274 */
4275 if (flush_state > COMMIT_TRANS && space_info->full) {
4276 spin_unlock(&space_info->lock);
4277 return 0;
4278 }
4279
4280 used = space_info->bytes_used + space_info->bytes_reserved +
4281 space_info->bytes_pinned + space_info->bytes_readonly +
4282 space_info->bytes_may_use;
4283 if (need_do_async_reclaim(space_info, fs_info, used)) {
4284 spin_unlock(&space_info->lock);
4285 return 1;
4286 }
4287 spin_unlock(&space_info->lock);
4288
4289 return 0;
4290 }
4291
4292 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4293 {
4294 struct btrfs_fs_info *fs_info;
4295 struct btrfs_space_info *space_info;
4296 u64 to_reclaim;
4297 int flush_state;
4298
4299 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4300 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4301
4302 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4303 space_info);
4304 if (!to_reclaim)
4305 return;
4306
4307 flush_state = FLUSH_DELAYED_ITEMS_NR;
4308 do {
4309 flush_space(fs_info->fs_root, space_info, to_reclaim,
4310 to_reclaim, flush_state);
4311 flush_state++;
4312 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4313 flush_state))
4314 return;
4315 } while (flush_state <= COMMIT_TRANS);
4316
4317 if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4318 queue_work(system_unbound_wq, work);
4319 }
4320
4321 void btrfs_init_async_reclaim_work(struct work_struct *work)
4322 {
4323 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4324 }
4325
4326 /**
4327 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4328 * @root - the root we're allocating for
4329 * @block_rsv - the block_rsv we're allocating for
4330 * @orig_bytes - the number of bytes we want
4331 * @flush - whether or not we can flush to make our reservation
4332 *
4333 * This will reserve orgi_bytes number of bytes from the space info associated
4334 * with the block_rsv. If there is not enough space it will make an attempt to
4335 * flush out space to make room. It will do this by flushing delalloc if
4336 * possible or committing the transaction. If flush is 0 then no attempts to
4337 * regain reservations will be made and this will fail if there is not enough
4338 * space already.
4339 */
4340 static int reserve_metadata_bytes(struct btrfs_root *root,
4341 struct btrfs_block_rsv *block_rsv,
4342 u64 orig_bytes,
4343 enum btrfs_reserve_flush_enum flush)
4344 {
4345 struct btrfs_space_info *space_info = block_rsv->space_info;
4346 u64 used;
4347 u64 num_bytes = orig_bytes;
4348 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4349 int ret = 0;
4350 bool flushing = false;
4351
4352 again:
4353 ret = 0;
4354 spin_lock(&space_info->lock);
4355 /*
4356 * We only want to wait if somebody other than us is flushing and we
4357 * are actually allowed to flush all things.
4358 */
4359 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4360 space_info->flush) {
4361 spin_unlock(&space_info->lock);
4362 /*
4363 * If we have a trans handle we can't wait because the flusher
4364 * may have to commit the transaction, which would mean we would
4365 * deadlock since we are waiting for the flusher to finish, but
4366 * hold the current transaction open.
4367 */
4368 if (current->journal_info)
4369 return -EAGAIN;
4370 ret = wait_event_killable(space_info->wait, !space_info->flush);
4371 /* Must have been killed, return */
4372 if (ret)
4373 return -EINTR;
4374
4375 spin_lock(&space_info->lock);
4376 }
4377
4378 ret = -ENOSPC;
4379 used = space_info->bytes_used + space_info->bytes_reserved +
4380 space_info->bytes_pinned + space_info->bytes_readonly +
4381 space_info->bytes_may_use;
4382
4383 /*
4384 * The idea here is that we've not already over-reserved the block group
4385 * then we can go ahead and save our reservation first and then start
4386 * flushing if we need to. Otherwise if we've already overcommitted
4387 * lets start flushing stuff first and then come back and try to make
4388 * our reservation.
4389 */
4390 if (used <= space_info->total_bytes) {
4391 if (used + orig_bytes <= space_info->total_bytes) {
4392 space_info->bytes_may_use += orig_bytes;
4393 trace_btrfs_space_reservation(root->fs_info,
4394 "space_info", space_info->flags, orig_bytes, 1);
4395 ret = 0;
4396 } else {
4397 /*
4398 * Ok set num_bytes to orig_bytes since we aren't
4399 * overocmmitted, this way we only try and reclaim what
4400 * we need.
4401 */
4402 num_bytes = orig_bytes;
4403 }
4404 } else {
4405 /*
4406 * Ok we're over committed, set num_bytes to the overcommitted
4407 * amount plus the amount of bytes that we need for this
4408 * reservation.
4409 */
4410 num_bytes = used - space_info->total_bytes +
4411 (orig_bytes * 2);
4412 }
4413
4414 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4415 space_info->bytes_may_use += orig_bytes;
4416 trace_btrfs_space_reservation(root->fs_info, "space_info",
4417 space_info->flags, orig_bytes,
4418 1);
4419 ret = 0;
4420 }
4421
4422 /*
4423 * Couldn't make our reservation, save our place so while we're trying
4424 * to reclaim space we can actually use it instead of somebody else
4425 * stealing it from us.
4426 *
4427 * We make the other tasks wait for the flush only when we can flush
4428 * all things.
4429 */
4430 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4431 flushing = true;
4432 space_info->flush = 1;
4433 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4434 used += orig_bytes;
4435 /*
4436 * We will do the space reservation dance during log replay,
4437 * which means we won't have fs_info->fs_root set, so don't do
4438 * the async reclaim as we will panic.
4439 */
4440 if (!root->fs_info->log_root_recovering &&
4441 need_do_async_reclaim(space_info, root->fs_info, used) &&
4442 !work_busy(&root->fs_info->async_reclaim_work))
4443 queue_work(system_unbound_wq,
4444 &root->fs_info->async_reclaim_work);
4445 }
4446 spin_unlock(&space_info->lock);
4447
4448 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4449 goto out;
4450
4451 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4452 flush_state);
4453 flush_state++;
4454
4455 /*
4456 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4457 * would happen. So skip delalloc flush.
4458 */
4459 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4460 (flush_state == FLUSH_DELALLOC ||
4461 flush_state == FLUSH_DELALLOC_WAIT))
4462 flush_state = ALLOC_CHUNK;
4463
4464 if (!ret)
4465 goto again;
4466 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4467 flush_state < COMMIT_TRANS)
4468 goto again;
4469 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4470 flush_state <= COMMIT_TRANS)
4471 goto again;
4472
4473 out:
4474 if (ret == -ENOSPC &&
4475 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4476 struct btrfs_block_rsv *global_rsv =
4477 &root->fs_info->global_block_rsv;
4478
4479 if (block_rsv != global_rsv &&
4480 !block_rsv_use_bytes(global_rsv, orig_bytes))
4481 ret = 0;
4482 }
4483 if (ret == -ENOSPC)
4484 trace_btrfs_space_reservation(root->fs_info,
4485 "space_info:enospc",
4486 space_info->flags, orig_bytes, 1);
4487 if (flushing) {
4488 spin_lock(&space_info->lock);
4489 space_info->flush = 0;
4490 wake_up_all(&space_info->wait);
4491 spin_unlock(&space_info->lock);
4492 }
4493 return ret;
4494 }
4495
4496 static struct btrfs_block_rsv *get_block_rsv(
4497 const struct btrfs_trans_handle *trans,
4498 const struct btrfs_root *root)
4499 {
4500 struct btrfs_block_rsv *block_rsv = NULL;
4501
4502 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4503 block_rsv = trans->block_rsv;
4504
4505 if (root == root->fs_info->csum_root && trans->adding_csums)
4506 block_rsv = trans->block_rsv;
4507
4508 if (root == root->fs_info->uuid_root)
4509 block_rsv = trans->block_rsv;
4510
4511 if (!block_rsv)
4512 block_rsv = root->block_rsv;
4513
4514 if (!block_rsv)
4515 block_rsv = &root->fs_info->empty_block_rsv;
4516
4517 return block_rsv;
4518 }
4519
4520 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4521 u64 num_bytes)
4522 {
4523 int ret = -ENOSPC;
4524 spin_lock(&block_rsv->lock);
4525 if (block_rsv->reserved >= num_bytes) {
4526 block_rsv->reserved -= num_bytes;
4527 if (block_rsv->reserved < block_rsv->size)
4528 block_rsv->full = 0;
4529 ret = 0;
4530 }
4531 spin_unlock(&block_rsv->lock);
4532 return ret;
4533 }
4534
4535 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4536 u64 num_bytes, int update_size)
4537 {
4538 spin_lock(&block_rsv->lock);
4539 block_rsv->reserved += num_bytes;
4540 if (update_size)
4541 block_rsv->size += num_bytes;
4542 else if (block_rsv->reserved >= block_rsv->size)
4543 block_rsv->full = 1;
4544 spin_unlock(&block_rsv->lock);
4545 }
4546
4547 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4548 struct btrfs_block_rsv *dest, u64 num_bytes,
4549 int min_factor)
4550 {
4551 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4552 u64 min_bytes;
4553
4554 if (global_rsv->space_info != dest->space_info)
4555 return -ENOSPC;
4556
4557 spin_lock(&global_rsv->lock);
4558 min_bytes = div_factor(global_rsv->size, min_factor);
4559 if (global_rsv->reserved < min_bytes + num_bytes) {
4560 spin_unlock(&global_rsv->lock);
4561 return -ENOSPC;
4562 }
4563 global_rsv->reserved -= num_bytes;
4564 if (global_rsv->reserved < global_rsv->size)
4565 global_rsv->full = 0;
4566 spin_unlock(&global_rsv->lock);
4567
4568 block_rsv_add_bytes(dest, num_bytes, 1);
4569 return 0;
4570 }
4571
4572 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4573 struct btrfs_block_rsv *block_rsv,
4574 struct btrfs_block_rsv *dest, u64 num_bytes)
4575 {
4576 struct btrfs_space_info *space_info = block_rsv->space_info;
4577
4578 spin_lock(&block_rsv->lock);
4579 if (num_bytes == (u64)-1)
4580 num_bytes = block_rsv->size;
4581 block_rsv->size -= num_bytes;
4582 if (block_rsv->reserved >= block_rsv->size) {
4583 num_bytes = block_rsv->reserved - block_rsv->size;
4584 block_rsv->reserved = block_rsv->size;
4585 block_rsv->full = 1;
4586 } else {
4587 num_bytes = 0;
4588 }
4589 spin_unlock(&block_rsv->lock);
4590
4591 if (num_bytes > 0) {
4592 if (dest) {
4593 spin_lock(&dest->lock);
4594 if (!dest->full) {
4595 u64 bytes_to_add;
4596
4597 bytes_to_add = dest->size - dest->reserved;
4598 bytes_to_add = min(num_bytes, bytes_to_add);
4599 dest->reserved += bytes_to_add;
4600 if (dest->reserved >= dest->size)
4601 dest->full = 1;
4602 num_bytes -= bytes_to_add;
4603 }
4604 spin_unlock(&dest->lock);
4605 }
4606 if (num_bytes) {
4607 spin_lock(&space_info->lock);
4608 space_info->bytes_may_use -= num_bytes;
4609 trace_btrfs_space_reservation(fs_info, "space_info",
4610 space_info->flags, num_bytes, 0);
4611 spin_unlock(&space_info->lock);
4612 }
4613 }
4614 }
4615
4616 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4617 struct btrfs_block_rsv *dst, u64 num_bytes)
4618 {
4619 int ret;
4620
4621 ret = block_rsv_use_bytes(src, num_bytes);
4622 if (ret)
4623 return ret;
4624
4625 block_rsv_add_bytes(dst, num_bytes, 1);
4626 return 0;
4627 }
4628
4629 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4630 {
4631 memset(rsv, 0, sizeof(*rsv));
4632 spin_lock_init(&rsv->lock);
4633 rsv->type = type;
4634 }
4635
4636 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4637 unsigned short type)
4638 {
4639 struct btrfs_block_rsv *block_rsv;
4640 struct btrfs_fs_info *fs_info = root->fs_info;
4641
4642 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4643 if (!block_rsv)
4644 return NULL;
4645
4646 btrfs_init_block_rsv(block_rsv, type);
4647 block_rsv->space_info = __find_space_info(fs_info,
4648 BTRFS_BLOCK_GROUP_METADATA);
4649 return block_rsv;
4650 }
4651
4652 void btrfs_free_block_rsv(struct btrfs_root *root,
4653 struct btrfs_block_rsv *rsv)
4654 {
4655 if (!rsv)
4656 return;
4657 btrfs_block_rsv_release(root, rsv, (u64)-1);
4658 kfree(rsv);
4659 }
4660
4661 int btrfs_block_rsv_add(struct btrfs_root *root,
4662 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4663 enum btrfs_reserve_flush_enum flush)
4664 {
4665 int ret;
4666
4667 if (num_bytes == 0)
4668 return 0;
4669
4670 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4671 if (!ret) {
4672 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4673 return 0;
4674 }
4675
4676 return ret;
4677 }
4678
4679 int btrfs_block_rsv_check(struct btrfs_root *root,
4680 struct btrfs_block_rsv *block_rsv, int min_factor)
4681 {
4682 u64 num_bytes = 0;
4683 int ret = -ENOSPC;
4684
4685 if (!block_rsv)
4686 return 0;
4687
4688 spin_lock(&block_rsv->lock);
4689 num_bytes = div_factor(block_rsv->size, min_factor);
4690 if (block_rsv->reserved >= num_bytes)
4691 ret = 0;
4692 spin_unlock(&block_rsv->lock);
4693
4694 return ret;
4695 }
4696
4697 int btrfs_block_rsv_refill(struct btrfs_root *root,
4698 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4699 enum btrfs_reserve_flush_enum flush)
4700 {
4701 u64 num_bytes = 0;
4702 int ret = -ENOSPC;
4703
4704 if (!block_rsv)
4705 return 0;
4706
4707 spin_lock(&block_rsv->lock);
4708 num_bytes = min_reserved;
4709 if (block_rsv->reserved >= num_bytes)
4710 ret = 0;
4711 else
4712 num_bytes -= block_rsv->reserved;
4713 spin_unlock(&block_rsv->lock);
4714
4715 if (!ret)
4716 return 0;
4717
4718 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4719 if (!ret) {
4720 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4721 return 0;
4722 }
4723
4724 return ret;
4725 }
4726
4727 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4728 struct btrfs_block_rsv *dst_rsv,
4729 u64 num_bytes)
4730 {
4731 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4732 }
4733
4734 void btrfs_block_rsv_release(struct btrfs_root *root,
4735 struct btrfs_block_rsv *block_rsv,
4736 u64 num_bytes)
4737 {
4738 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4739 if (global_rsv == block_rsv ||
4740 block_rsv->space_info != global_rsv->space_info)
4741 global_rsv = NULL;
4742 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4743 num_bytes);
4744 }
4745
4746 /*
4747 * helper to calculate size of global block reservation.
4748 * the desired value is sum of space used by extent tree,
4749 * checksum tree and root tree
4750 */
4751 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4752 {
4753 struct btrfs_space_info *sinfo;
4754 u64 num_bytes;
4755 u64 meta_used;
4756 u64 data_used;
4757 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4758
4759 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4760 spin_lock(&sinfo->lock);
4761 data_used = sinfo->bytes_used;
4762 spin_unlock(&sinfo->lock);
4763
4764 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4765 spin_lock(&sinfo->lock);
4766 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4767 data_used = 0;
4768 meta_used = sinfo->bytes_used;
4769 spin_unlock(&sinfo->lock);
4770
4771 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4772 csum_size * 2;
4773 num_bytes += div64_u64(data_used + meta_used, 50);
4774
4775 if (num_bytes * 3 > meta_used)
4776 num_bytes = div64_u64(meta_used, 3);
4777
4778 return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4779 }
4780
4781 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4782 {
4783 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4784 struct btrfs_space_info *sinfo = block_rsv->space_info;
4785 u64 num_bytes;
4786
4787 num_bytes = calc_global_metadata_size(fs_info);
4788
4789 spin_lock(&sinfo->lock);
4790 spin_lock(&block_rsv->lock);
4791
4792 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4793
4794 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4795 sinfo->bytes_reserved + sinfo->bytes_readonly +
4796 sinfo->bytes_may_use;
4797
4798 if (sinfo->total_bytes > num_bytes) {
4799 num_bytes = sinfo->total_bytes - num_bytes;
4800 block_rsv->reserved += num_bytes;
4801 sinfo->bytes_may_use += num_bytes;
4802 trace_btrfs_space_reservation(fs_info, "space_info",
4803 sinfo->flags, num_bytes, 1);
4804 }
4805
4806 if (block_rsv->reserved >= block_rsv->size) {
4807 num_bytes = block_rsv->reserved - block_rsv->size;
4808 sinfo->bytes_may_use -= num_bytes;
4809 trace_btrfs_space_reservation(fs_info, "space_info",
4810 sinfo->flags, num_bytes, 0);
4811 block_rsv->reserved = block_rsv->size;
4812 block_rsv->full = 1;
4813 }
4814
4815 spin_unlock(&block_rsv->lock);
4816 spin_unlock(&sinfo->lock);
4817 }
4818
4819 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4820 {
4821 struct btrfs_space_info *space_info;
4822
4823 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4824 fs_info->chunk_block_rsv.space_info = space_info;
4825
4826 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4827 fs_info->global_block_rsv.space_info = space_info;
4828 fs_info->delalloc_block_rsv.space_info = space_info;
4829 fs_info->trans_block_rsv.space_info = space_info;
4830 fs_info->empty_block_rsv.space_info = space_info;
4831 fs_info->delayed_block_rsv.space_info = space_info;
4832
4833 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4834 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4835 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4836 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4837 if (fs_info->quota_root)
4838 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4839 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4840
4841 update_global_block_rsv(fs_info);
4842 }
4843
4844 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4845 {
4846 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4847 (u64)-1);
4848 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4849 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4850 WARN_ON(fs_info->trans_block_rsv.size > 0);
4851 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4852 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4853 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4854 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4855 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4856 }
4857
4858 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4859 struct btrfs_root *root)
4860 {
4861 if (!trans->block_rsv)
4862 return;
4863
4864 if (!trans->bytes_reserved)
4865 return;
4866
4867 trace_btrfs_space_reservation(root->fs_info, "transaction",
4868 trans->transid, trans->bytes_reserved, 0);
4869 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4870 trans->bytes_reserved = 0;
4871 }
4872
4873 /* Can only return 0 or -ENOSPC */
4874 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4875 struct inode *inode)
4876 {
4877 struct btrfs_root *root = BTRFS_I(inode)->root;
4878 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4879 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4880
4881 /*
4882 * We need to hold space in order to delete our orphan item once we've
4883 * added it, so this takes the reservation so we can release it later
4884 * when we are truly done with the orphan item.
4885 */
4886 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4887 trace_btrfs_space_reservation(root->fs_info, "orphan",
4888 btrfs_ino(inode), num_bytes, 1);
4889 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4890 }
4891
4892 void btrfs_orphan_release_metadata(struct inode *inode)
4893 {
4894 struct btrfs_root *root = BTRFS_I(inode)->root;
4895 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4896 trace_btrfs_space_reservation(root->fs_info, "orphan",
4897 btrfs_ino(inode), num_bytes, 0);
4898 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4899 }
4900
4901 /*
4902 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4903 * root: the root of the parent directory
4904 * rsv: block reservation
4905 * items: the number of items that we need do reservation
4906 * qgroup_reserved: used to return the reserved size in qgroup
4907 *
4908 * This function is used to reserve the space for snapshot/subvolume
4909 * creation and deletion. Those operations are different with the
4910 * common file/directory operations, they change two fs/file trees
4911 * and root tree, the number of items that the qgroup reserves is
4912 * different with the free space reservation. So we can not use
4913 * the space reseravtion mechanism in start_transaction().
4914 */
4915 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4916 struct btrfs_block_rsv *rsv,
4917 int items,
4918 u64 *qgroup_reserved,
4919 bool use_global_rsv)
4920 {
4921 u64 num_bytes;
4922 int ret;
4923 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4924
4925 if (root->fs_info->quota_enabled) {
4926 /* One for parent inode, two for dir entries */
4927 num_bytes = 3 * root->nodesize;
4928 ret = btrfs_qgroup_reserve(root, num_bytes);
4929 if (ret)
4930 return ret;
4931 } else {
4932 num_bytes = 0;
4933 }
4934
4935 *qgroup_reserved = num_bytes;
4936
4937 num_bytes = btrfs_calc_trans_metadata_size(root, items);
4938 rsv->space_info = __find_space_info(root->fs_info,
4939 BTRFS_BLOCK_GROUP_METADATA);
4940 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4941 BTRFS_RESERVE_FLUSH_ALL);
4942
4943 if (ret == -ENOSPC && use_global_rsv)
4944 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4945
4946 if (ret) {
4947 if (*qgroup_reserved)
4948 btrfs_qgroup_free(root, *qgroup_reserved);
4949 }
4950
4951 return ret;
4952 }
4953
4954 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4955 struct btrfs_block_rsv *rsv,
4956 u64 qgroup_reserved)
4957 {
4958 btrfs_block_rsv_release(root, rsv, (u64)-1);
4959 if (qgroup_reserved)
4960 btrfs_qgroup_free(root, qgroup_reserved);
4961 }
4962
4963 /**
4964 * drop_outstanding_extent - drop an outstanding extent
4965 * @inode: the inode we're dropping the extent for
4966 *
4967 * This is called when we are freeing up an outstanding extent, either called
4968 * after an error or after an extent is written. This will return the number of
4969 * reserved extents that need to be freed. This must be called with
4970 * BTRFS_I(inode)->lock held.
4971 */
4972 static unsigned drop_outstanding_extent(struct inode *inode)
4973 {
4974 unsigned drop_inode_space = 0;
4975 unsigned dropped_extents = 0;
4976
4977 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4978 BTRFS_I(inode)->outstanding_extents--;
4979
4980 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4981 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4982 &BTRFS_I(inode)->runtime_flags))
4983 drop_inode_space = 1;
4984
4985 /*
4986 * If we have more or the same amount of outsanding extents than we have
4987 * reserved then we need to leave the reserved extents count alone.
4988 */
4989 if (BTRFS_I(inode)->outstanding_extents >=
4990 BTRFS_I(inode)->reserved_extents)
4991 return drop_inode_space;
4992
4993 dropped_extents = BTRFS_I(inode)->reserved_extents -
4994 BTRFS_I(inode)->outstanding_extents;
4995 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4996 return dropped_extents + drop_inode_space;
4997 }
4998
4999 /**
5000 * calc_csum_metadata_size - return the amount of metada space that must be
5001 * reserved/free'd for the given bytes.
5002 * @inode: the inode we're manipulating
5003 * @num_bytes: the number of bytes in question
5004 * @reserve: 1 if we are reserving space, 0 if we are freeing space
5005 *
5006 * This adjusts the number of csum_bytes in the inode and then returns the
5007 * correct amount of metadata that must either be reserved or freed. We
5008 * calculate how many checksums we can fit into one leaf and then divide the
5009 * number of bytes that will need to be checksumed by this value to figure out
5010 * how many checksums will be required. If we are adding bytes then the number
5011 * may go up and we will return the number of additional bytes that must be
5012 * reserved. If it is going down we will return the number of bytes that must
5013 * be freed.
5014 *
5015 * This must be called with BTRFS_I(inode)->lock held.
5016 */
5017 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5018 int reserve)
5019 {
5020 struct btrfs_root *root = BTRFS_I(inode)->root;
5021 u64 csum_size;
5022 int num_csums_per_leaf;
5023 int num_csums;
5024 int old_csums;
5025
5026 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5027 BTRFS_I(inode)->csum_bytes == 0)
5028 return 0;
5029
5030 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5031 if (reserve)
5032 BTRFS_I(inode)->csum_bytes += num_bytes;
5033 else
5034 BTRFS_I(inode)->csum_bytes -= num_bytes;
5035 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5036 num_csums_per_leaf = (int)div64_u64(csum_size,
5037 sizeof(struct btrfs_csum_item) +
5038 sizeof(struct btrfs_disk_key));
5039 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5040 num_csums = num_csums + num_csums_per_leaf - 1;
5041 num_csums = num_csums / num_csums_per_leaf;
5042
5043 old_csums = old_csums + num_csums_per_leaf - 1;
5044 old_csums = old_csums / num_csums_per_leaf;
5045
5046 /* No change, no need to reserve more */
5047 if (old_csums == num_csums)
5048 return 0;
5049
5050 if (reserve)
5051 return btrfs_calc_trans_metadata_size(root,
5052 num_csums - old_csums);
5053
5054 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5055 }
5056
5057 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5058 {
5059 struct btrfs_root *root = BTRFS_I(inode)->root;
5060 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5061 u64 to_reserve = 0;
5062 u64 csum_bytes;
5063 unsigned nr_extents = 0;
5064 int extra_reserve = 0;
5065 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5066 int ret = 0;
5067 bool delalloc_lock = true;
5068 u64 to_free = 0;
5069 unsigned dropped;
5070
5071 /* If we are a free space inode we need to not flush since we will be in
5072 * the middle of a transaction commit. We also don't need the delalloc
5073 * mutex since we won't race with anybody. We need this mostly to make
5074 * lockdep shut its filthy mouth.
5075 */
5076 if (btrfs_is_free_space_inode(inode)) {
5077 flush = BTRFS_RESERVE_NO_FLUSH;
5078 delalloc_lock = false;
5079 }
5080
5081 if (flush != BTRFS_RESERVE_NO_FLUSH &&
5082 btrfs_transaction_in_commit(root->fs_info))
5083 schedule_timeout(1);
5084
5085 if (delalloc_lock)
5086 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5087
5088 num_bytes = ALIGN(num_bytes, root->sectorsize);
5089
5090 spin_lock(&BTRFS_I(inode)->lock);
5091 BTRFS_I(inode)->outstanding_extents++;
5092
5093 if (BTRFS_I(inode)->outstanding_extents >
5094 BTRFS_I(inode)->reserved_extents)
5095 nr_extents = BTRFS_I(inode)->outstanding_extents -
5096 BTRFS_I(inode)->reserved_extents;
5097
5098 /*
5099 * Add an item to reserve for updating the inode when we complete the
5100 * delalloc io.
5101 */
5102 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5103 &BTRFS_I(inode)->runtime_flags)) {
5104 nr_extents++;
5105 extra_reserve = 1;
5106 }
5107
5108 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5109 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5110 csum_bytes = BTRFS_I(inode)->csum_bytes;
5111 spin_unlock(&BTRFS_I(inode)->lock);
5112
5113 if (root->fs_info->quota_enabled) {
5114 ret = btrfs_qgroup_reserve(root, num_bytes +
5115 nr_extents * root->nodesize);
5116 if (ret)
5117 goto out_fail;
5118 }
5119
5120 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5121 if (unlikely(ret)) {
5122 if (root->fs_info->quota_enabled)
5123 btrfs_qgroup_free(root, num_bytes +
5124 nr_extents * root->nodesize);
5125 goto out_fail;
5126 }
5127
5128 spin_lock(&BTRFS_I(inode)->lock);
5129 if (extra_reserve) {
5130 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5131 &BTRFS_I(inode)->runtime_flags);
5132 nr_extents--;
5133 }
5134 BTRFS_I(inode)->reserved_extents += nr_extents;
5135 spin_unlock(&BTRFS_I(inode)->lock);
5136
5137 if (delalloc_lock)
5138 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5139
5140 if (to_reserve)
5141 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5142 btrfs_ino(inode), to_reserve, 1);
5143 block_rsv_add_bytes(block_rsv, to_reserve, 1);
5144
5145 return 0;
5146
5147 out_fail:
5148 spin_lock(&BTRFS_I(inode)->lock);
5149 dropped = drop_outstanding_extent(inode);
5150 /*
5151 * If the inodes csum_bytes is the same as the original
5152 * csum_bytes then we know we haven't raced with any free()ers
5153 * so we can just reduce our inodes csum bytes and carry on.
5154 */
5155 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5156 calc_csum_metadata_size(inode, num_bytes, 0);
5157 } else {
5158 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5159 u64 bytes;
5160
5161 /*
5162 * This is tricky, but first we need to figure out how much we
5163 * free'd from any free-ers that occured during this
5164 * reservation, so we reset ->csum_bytes to the csum_bytes
5165 * before we dropped our lock, and then call the free for the
5166 * number of bytes that were freed while we were trying our
5167 * reservation.
5168 */
5169 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5170 BTRFS_I(inode)->csum_bytes = csum_bytes;
5171 to_free = calc_csum_metadata_size(inode, bytes, 0);
5172
5173
5174 /*
5175 * Now we need to see how much we would have freed had we not
5176 * been making this reservation and our ->csum_bytes were not
5177 * artificially inflated.
5178 */
5179 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5180 bytes = csum_bytes - orig_csum_bytes;
5181 bytes = calc_csum_metadata_size(inode, bytes, 0);
5182
5183 /*
5184 * Now reset ->csum_bytes to what it should be. If bytes is
5185 * more than to_free then we would have free'd more space had we
5186 * not had an artificially high ->csum_bytes, so we need to free
5187 * the remainder. If bytes is the same or less then we don't
5188 * need to do anything, the other free-ers did the correct
5189 * thing.
5190 */
5191 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5192 if (bytes > to_free)
5193 to_free = bytes - to_free;
5194 else
5195 to_free = 0;
5196 }
5197 spin_unlock(&BTRFS_I(inode)->lock);
5198 if (dropped)
5199 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5200
5201 if (to_free) {
5202 btrfs_block_rsv_release(root, block_rsv, to_free);
5203 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5204 btrfs_ino(inode), to_free, 0);
5205 }
5206 if (delalloc_lock)
5207 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5208 return ret;
5209 }
5210
5211 /**
5212 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5213 * @inode: the inode to release the reservation for
5214 * @num_bytes: the number of bytes we're releasing
5215 *
5216 * This will release the metadata reservation for an inode. This can be called
5217 * once we complete IO for a given set of bytes to release their metadata
5218 * reservations.
5219 */
5220 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5221 {
5222 struct btrfs_root *root = BTRFS_I(inode)->root;
5223 u64 to_free = 0;
5224 unsigned dropped;
5225
5226 num_bytes = ALIGN(num_bytes, root->sectorsize);
5227 spin_lock(&BTRFS_I(inode)->lock);
5228 dropped = drop_outstanding_extent(inode);
5229
5230 if (num_bytes)
5231 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5232 spin_unlock(&BTRFS_I(inode)->lock);
5233 if (dropped > 0)
5234 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5235
5236 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5237 btrfs_ino(inode), to_free, 0);
5238 if (root->fs_info->quota_enabled) {
5239 btrfs_qgroup_free(root, num_bytes +
5240 dropped * root->nodesize);
5241 }
5242
5243 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5244 to_free);
5245 }
5246
5247 /**
5248 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5249 * @inode: inode we're writing to
5250 * @num_bytes: the number of bytes we want to allocate
5251 *
5252 * This will do the following things
5253 *
5254 * o reserve space in the data space info for num_bytes
5255 * o reserve space in the metadata space info based on number of outstanding
5256 * extents and how much csums will be needed
5257 * o add to the inodes ->delalloc_bytes
5258 * o add it to the fs_info's delalloc inodes list.
5259 *
5260 * This will return 0 for success and -ENOSPC if there is no space left.
5261 */
5262 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5263 {
5264 int ret;
5265
5266 ret = btrfs_check_data_free_space(inode, num_bytes);
5267 if (ret)
5268 return ret;
5269
5270 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5271 if (ret) {
5272 btrfs_free_reserved_data_space(inode, num_bytes);
5273 return ret;
5274 }
5275
5276 return 0;
5277 }
5278
5279 /**
5280 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5281 * @inode: inode we're releasing space for
5282 * @num_bytes: the number of bytes we want to free up
5283 *
5284 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5285 * called in the case that we don't need the metadata AND data reservations
5286 * anymore. So if there is an error or we insert an inline extent.
5287 *
5288 * This function will release the metadata space that was not used and will
5289 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5290 * list if there are no delalloc bytes left.
5291 */
5292 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5293 {
5294 btrfs_delalloc_release_metadata(inode, num_bytes);
5295 btrfs_free_reserved_data_space(inode, num_bytes);
5296 }
5297
5298 static int update_block_group(struct btrfs_trans_handle *trans,
5299 struct btrfs_root *root, u64 bytenr,
5300 u64 num_bytes, int alloc)
5301 {
5302 struct btrfs_block_group_cache *cache = NULL;
5303 struct btrfs_fs_info *info = root->fs_info;
5304 u64 total = num_bytes;
5305 u64 old_val;
5306 u64 byte_in_group;
5307 int factor;
5308
5309 /* block accounting for super block */
5310 spin_lock(&info->delalloc_root_lock);
5311 old_val = btrfs_super_bytes_used(info->super_copy);
5312 if (alloc)
5313 old_val += num_bytes;
5314 else
5315 old_val -= num_bytes;
5316 btrfs_set_super_bytes_used(info->super_copy, old_val);
5317 spin_unlock(&info->delalloc_root_lock);
5318
5319 while (total) {
5320 cache = btrfs_lookup_block_group(info, bytenr);
5321 if (!cache)
5322 return -ENOENT;
5323 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5324 BTRFS_BLOCK_GROUP_RAID1 |
5325 BTRFS_BLOCK_GROUP_RAID10))
5326 factor = 2;
5327 else
5328 factor = 1;
5329 /*
5330 * If this block group has free space cache written out, we
5331 * need to make sure to load it if we are removing space. This
5332 * is because we need the unpinning stage to actually add the
5333 * space back to the block group, otherwise we will leak space.
5334 */
5335 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5336 cache_block_group(cache, 1);
5337
5338 spin_lock(&trans->transaction->dirty_bgs_lock);
5339 if (list_empty(&cache->dirty_list)) {
5340 list_add_tail(&cache->dirty_list,
5341 &trans->transaction->dirty_bgs);
5342 btrfs_get_block_group(cache);
5343 }
5344 spin_unlock(&trans->transaction->dirty_bgs_lock);
5345
5346 byte_in_group = bytenr - cache->key.objectid;
5347 WARN_ON(byte_in_group > cache->key.offset);
5348
5349 spin_lock(&cache->space_info->lock);
5350 spin_lock(&cache->lock);
5351
5352 if (btrfs_test_opt(root, SPACE_CACHE) &&
5353 cache->disk_cache_state < BTRFS_DC_CLEAR)
5354 cache->disk_cache_state = BTRFS_DC_CLEAR;
5355
5356 old_val = btrfs_block_group_used(&cache->item);
5357 num_bytes = min(total, cache->key.offset - byte_in_group);
5358 if (alloc) {
5359 old_val += num_bytes;
5360 btrfs_set_block_group_used(&cache->item, old_val);
5361 cache->reserved -= num_bytes;
5362 cache->space_info->bytes_reserved -= num_bytes;
5363 cache->space_info->bytes_used += num_bytes;
5364 cache->space_info->disk_used += num_bytes * factor;
5365 spin_unlock(&cache->lock);
5366 spin_unlock(&cache->space_info->lock);
5367 } else {
5368 old_val -= num_bytes;
5369 btrfs_set_block_group_used(&cache->item, old_val);
5370 cache->pinned += num_bytes;
5371 cache->space_info->bytes_pinned += num_bytes;
5372 cache->space_info->bytes_used -= num_bytes;
5373 cache->space_info->disk_used -= num_bytes * factor;
5374 spin_unlock(&cache->lock);
5375 spin_unlock(&cache->space_info->lock);
5376
5377 set_extent_dirty(info->pinned_extents,
5378 bytenr, bytenr + num_bytes - 1,
5379 GFP_NOFS | __GFP_NOFAIL);
5380 /*
5381 * No longer have used bytes in this block group, queue
5382 * it for deletion.
5383 */
5384 if (old_val == 0) {
5385 spin_lock(&info->unused_bgs_lock);
5386 if (list_empty(&cache->bg_list)) {
5387 btrfs_get_block_group(cache);
5388 list_add_tail(&cache->bg_list,
5389 &info->unused_bgs);
5390 }
5391 spin_unlock(&info->unused_bgs_lock);
5392 }
5393 }
5394 btrfs_put_block_group(cache);
5395 total -= num_bytes;
5396 bytenr += num_bytes;
5397 }
5398 return 0;
5399 }
5400
5401 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5402 {
5403 struct btrfs_block_group_cache *cache;
5404 u64 bytenr;
5405
5406 spin_lock(&root->fs_info->block_group_cache_lock);
5407 bytenr = root->fs_info->first_logical_byte;
5408 spin_unlock(&root->fs_info->block_group_cache_lock);
5409
5410 if (bytenr < (u64)-1)
5411 return bytenr;
5412
5413 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5414 if (!cache)
5415 return 0;
5416
5417 bytenr = cache->key.objectid;
5418 btrfs_put_block_group(cache);
5419
5420 return bytenr;
5421 }
5422
5423 static int pin_down_extent(struct btrfs_root *root,
5424 struct btrfs_block_group_cache *cache,
5425 u64 bytenr, u64 num_bytes, int reserved)
5426 {
5427 spin_lock(&cache->space_info->lock);
5428 spin_lock(&cache->lock);
5429 cache->pinned += num_bytes;
5430 cache->space_info->bytes_pinned += num_bytes;
5431 if (reserved) {
5432 cache->reserved -= num_bytes;
5433 cache->space_info->bytes_reserved -= num_bytes;
5434 }
5435 spin_unlock(&cache->lock);
5436 spin_unlock(&cache->space_info->lock);
5437
5438 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5439 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5440 if (reserved)
5441 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5442 return 0;
5443 }
5444
5445 /*
5446 * this function must be called within transaction
5447 */
5448 int btrfs_pin_extent(struct btrfs_root *root,
5449 u64 bytenr, u64 num_bytes, int reserved)
5450 {
5451 struct btrfs_block_group_cache *cache;
5452
5453 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5454 BUG_ON(!cache); /* Logic error */
5455
5456 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5457
5458 btrfs_put_block_group(cache);
5459 return 0;
5460 }
5461
5462 /*
5463 * this function must be called within transaction
5464 */
5465 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5466 u64 bytenr, u64 num_bytes)
5467 {
5468 struct btrfs_block_group_cache *cache;
5469 int ret;
5470
5471 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5472 if (!cache)
5473 return -EINVAL;
5474
5475 /*
5476 * pull in the free space cache (if any) so that our pin
5477 * removes the free space from the cache. We have load_only set
5478 * to one because the slow code to read in the free extents does check
5479 * the pinned extents.
5480 */
5481 cache_block_group(cache, 1);
5482
5483 pin_down_extent(root, cache, bytenr, num_bytes, 0);
5484
5485 /* remove us from the free space cache (if we're there at all) */
5486 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5487 btrfs_put_block_group(cache);
5488 return ret;
5489 }
5490
5491 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5492 {
5493 int ret;
5494 struct btrfs_block_group_cache *block_group;
5495 struct btrfs_caching_control *caching_ctl;
5496
5497 block_group = btrfs_lookup_block_group(root->fs_info, start);
5498 if (!block_group)
5499 return -EINVAL;
5500
5501 cache_block_group(block_group, 0);
5502 caching_ctl = get_caching_control(block_group);
5503
5504 if (!caching_ctl) {
5505 /* Logic error */
5506 BUG_ON(!block_group_cache_done(block_group));
5507 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5508 } else {
5509 mutex_lock(&caching_ctl->mutex);
5510
5511 if (start >= caching_ctl->progress) {
5512 ret = add_excluded_extent(root, start, num_bytes);
5513 } else if (start + num_bytes <= caching_ctl->progress) {
5514 ret = btrfs_remove_free_space(block_group,
5515 start, num_bytes);
5516 } else {
5517 num_bytes = caching_ctl->progress - start;
5518 ret = btrfs_remove_free_space(block_group,
5519 start, num_bytes);
5520 if (ret)
5521 goto out_lock;
5522
5523 num_bytes = (start + num_bytes) -
5524 caching_ctl->progress;
5525 start = caching_ctl->progress;
5526 ret = add_excluded_extent(root, start, num_bytes);
5527 }
5528 out_lock:
5529 mutex_unlock(&caching_ctl->mutex);
5530 put_caching_control(caching_ctl);
5531 }
5532 btrfs_put_block_group(block_group);
5533 return ret;
5534 }
5535
5536 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5537 struct extent_buffer *eb)
5538 {
5539 struct btrfs_file_extent_item *item;
5540 struct btrfs_key key;
5541 int found_type;
5542 int i;
5543
5544 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5545 return 0;
5546
5547 for (i = 0; i < btrfs_header_nritems(eb); i++) {
5548 btrfs_item_key_to_cpu(eb, &key, i);
5549 if (key.type != BTRFS_EXTENT_DATA_KEY)
5550 continue;
5551 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5552 found_type = btrfs_file_extent_type(eb, item);
5553 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5554 continue;
5555 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5556 continue;
5557 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5558 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5559 __exclude_logged_extent(log, key.objectid, key.offset);
5560 }
5561
5562 return 0;
5563 }
5564
5565 /**
5566 * btrfs_update_reserved_bytes - update the block_group and space info counters
5567 * @cache: The cache we are manipulating
5568 * @num_bytes: The number of bytes in question
5569 * @reserve: One of the reservation enums
5570 * @delalloc: The blocks are allocated for the delalloc write
5571 *
5572 * This is called by the allocator when it reserves space, or by somebody who is
5573 * freeing space that was never actually used on disk. For example if you
5574 * reserve some space for a new leaf in transaction A and before transaction A
5575 * commits you free that leaf, you call this with reserve set to 0 in order to
5576 * clear the reservation.
5577 *
5578 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5579 * ENOSPC accounting. For data we handle the reservation through clearing the
5580 * delalloc bits in the io_tree. We have to do this since we could end up
5581 * allocating less disk space for the amount of data we have reserved in the
5582 * case of compression.
5583 *
5584 * If this is a reservation and the block group has become read only we cannot
5585 * make the reservation and return -EAGAIN, otherwise this function always
5586 * succeeds.
5587 */
5588 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5589 u64 num_bytes, int reserve, int delalloc)
5590 {
5591 struct btrfs_space_info *space_info = cache->space_info;
5592 int ret = 0;
5593
5594 spin_lock(&space_info->lock);
5595 spin_lock(&cache->lock);
5596 if (reserve != RESERVE_FREE) {
5597 if (cache->ro) {
5598 ret = -EAGAIN;
5599 } else {
5600 cache->reserved += num_bytes;
5601 space_info->bytes_reserved += num_bytes;
5602 if (reserve == RESERVE_ALLOC) {
5603 trace_btrfs_space_reservation(cache->fs_info,
5604 "space_info", space_info->flags,
5605 num_bytes, 0);
5606 space_info->bytes_may_use -= num_bytes;
5607 }
5608
5609 if (delalloc)
5610 cache->delalloc_bytes += num_bytes;
5611 }
5612 } else {
5613 if (cache->ro)
5614 space_info->bytes_readonly += num_bytes;
5615 cache->reserved -= num_bytes;
5616 space_info->bytes_reserved -= num_bytes;
5617
5618 if (delalloc)
5619 cache->delalloc_bytes -= num_bytes;
5620 }
5621 spin_unlock(&cache->lock);
5622 spin_unlock(&space_info->lock);
5623 return ret;
5624 }
5625
5626 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5627 struct btrfs_root *root)
5628 {
5629 struct btrfs_fs_info *fs_info = root->fs_info;
5630 struct btrfs_caching_control *next;
5631 struct btrfs_caching_control *caching_ctl;
5632 struct btrfs_block_group_cache *cache;
5633
5634 down_write(&fs_info->commit_root_sem);
5635
5636 list_for_each_entry_safe(caching_ctl, next,
5637 &fs_info->caching_block_groups, list) {
5638 cache = caching_ctl->block_group;
5639 if (block_group_cache_done(cache)) {
5640 cache->last_byte_to_unpin = (u64)-1;
5641 list_del_init(&caching_ctl->list);
5642 put_caching_control(caching_ctl);
5643 } else {
5644 cache->last_byte_to_unpin = caching_ctl->progress;
5645 }
5646 }
5647
5648 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5649 fs_info->pinned_extents = &fs_info->freed_extents[1];
5650 else
5651 fs_info->pinned_extents = &fs_info->freed_extents[0];
5652
5653 up_write(&fs_info->commit_root_sem);
5654
5655 update_global_block_rsv(fs_info);
5656 }
5657
5658 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
5659 const bool return_free_space)
5660 {
5661 struct btrfs_fs_info *fs_info = root->fs_info;
5662 struct btrfs_block_group_cache *cache = NULL;
5663 struct btrfs_space_info *space_info;
5664 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5665 u64 len;
5666 bool readonly;
5667
5668 while (start <= end) {
5669 readonly = false;
5670 if (!cache ||
5671 start >= cache->key.objectid + cache->key.offset) {
5672 if (cache)
5673 btrfs_put_block_group(cache);
5674 cache = btrfs_lookup_block_group(fs_info, start);
5675 BUG_ON(!cache); /* Logic error */
5676 }
5677
5678 len = cache->key.objectid + cache->key.offset - start;
5679 len = min(len, end + 1 - start);
5680
5681 if (start < cache->last_byte_to_unpin) {
5682 len = min(len, cache->last_byte_to_unpin - start);
5683 if (return_free_space)
5684 btrfs_add_free_space(cache, start, len);
5685 }
5686
5687 start += len;
5688 space_info = cache->space_info;
5689
5690 spin_lock(&space_info->lock);
5691 spin_lock(&cache->lock);
5692 cache->pinned -= len;
5693 space_info->bytes_pinned -= len;
5694 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5695 if (cache->ro) {
5696 space_info->bytes_readonly += len;
5697 readonly = true;
5698 }
5699 spin_unlock(&cache->lock);
5700 if (!readonly && global_rsv->space_info == space_info) {
5701 spin_lock(&global_rsv->lock);
5702 if (!global_rsv->full) {
5703 len = min(len, global_rsv->size -
5704 global_rsv->reserved);
5705 global_rsv->reserved += len;
5706 space_info->bytes_may_use += len;
5707 if (global_rsv->reserved >= global_rsv->size)
5708 global_rsv->full = 1;
5709 }
5710 spin_unlock(&global_rsv->lock);
5711 }
5712 spin_unlock(&space_info->lock);
5713 }
5714
5715 if (cache)
5716 btrfs_put_block_group(cache);
5717 return 0;
5718 }
5719
5720 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5721 struct btrfs_root *root)
5722 {
5723 struct btrfs_fs_info *fs_info = root->fs_info;
5724 struct extent_io_tree *unpin;
5725 u64 start;
5726 u64 end;
5727 int ret;
5728
5729 if (trans->aborted)
5730 return 0;
5731
5732 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5733 unpin = &fs_info->freed_extents[1];
5734 else
5735 unpin = &fs_info->freed_extents[0];
5736
5737 while (1) {
5738 mutex_lock(&fs_info->unused_bg_unpin_mutex);
5739 ret = find_first_extent_bit(unpin, 0, &start, &end,
5740 EXTENT_DIRTY, NULL);
5741 if (ret) {
5742 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5743 break;
5744 }
5745
5746 if (btrfs_test_opt(root, DISCARD))
5747 ret = btrfs_discard_extent(root, start,
5748 end + 1 - start, NULL);
5749
5750 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5751 unpin_extent_range(root, start, end, true);
5752 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5753 cond_resched();
5754 }
5755
5756 return 0;
5757 }
5758
5759 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5760 u64 owner, u64 root_objectid)
5761 {
5762 struct btrfs_space_info *space_info;
5763 u64 flags;
5764
5765 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5766 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5767 flags = BTRFS_BLOCK_GROUP_SYSTEM;
5768 else
5769 flags = BTRFS_BLOCK_GROUP_METADATA;
5770 } else {
5771 flags = BTRFS_BLOCK_GROUP_DATA;
5772 }
5773
5774 space_info = __find_space_info(fs_info, flags);
5775 BUG_ON(!space_info); /* Logic bug */
5776 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5777 }
5778
5779
5780 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5781 struct btrfs_root *root,
5782 u64 bytenr, u64 num_bytes, u64 parent,
5783 u64 root_objectid, u64 owner_objectid,
5784 u64 owner_offset, int refs_to_drop,
5785 struct btrfs_delayed_extent_op *extent_op,
5786 int no_quota)
5787 {
5788 struct btrfs_key key;
5789 struct btrfs_path *path;
5790 struct btrfs_fs_info *info = root->fs_info;
5791 struct btrfs_root *extent_root = info->extent_root;
5792 struct extent_buffer *leaf;
5793 struct btrfs_extent_item *ei;
5794 struct btrfs_extent_inline_ref *iref;
5795 int ret;
5796 int is_data;
5797 int extent_slot = 0;
5798 int found_extent = 0;
5799 int num_to_del = 1;
5800 u32 item_size;
5801 u64 refs;
5802 int last_ref = 0;
5803 enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5804 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5805 SKINNY_METADATA);
5806
5807 if (!info->quota_enabled || !is_fstree(root_objectid))
5808 no_quota = 1;
5809
5810 path = btrfs_alloc_path();
5811 if (!path)
5812 return -ENOMEM;
5813
5814 path->reada = 1;
5815 path->leave_spinning = 1;
5816
5817 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5818 BUG_ON(!is_data && refs_to_drop != 1);
5819
5820 if (is_data)
5821 skinny_metadata = 0;
5822
5823 ret = lookup_extent_backref(trans, extent_root, path, &iref,
5824 bytenr, num_bytes, parent,
5825 root_objectid, owner_objectid,
5826 owner_offset);
5827 if (ret == 0) {
5828 extent_slot = path->slots[0];
5829 while (extent_slot >= 0) {
5830 btrfs_item_key_to_cpu(path->nodes[0], &key,
5831 extent_slot);
5832 if (key.objectid != bytenr)
5833 break;
5834 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5835 key.offset == num_bytes) {
5836 found_extent = 1;
5837 break;
5838 }
5839 if (key.type == BTRFS_METADATA_ITEM_KEY &&
5840 key.offset == owner_objectid) {
5841 found_extent = 1;
5842 break;
5843 }
5844 if (path->slots[0] - extent_slot > 5)
5845 break;
5846 extent_slot--;
5847 }
5848 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5849 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5850 if (found_extent && item_size < sizeof(*ei))
5851 found_extent = 0;
5852 #endif
5853 if (!found_extent) {
5854 BUG_ON(iref);
5855 ret = remove_extent_backref(trans, extent_root, path,
5856 NULL, refs_to_drop,
5857 is_data, &last_ref);
5858 if (ret) {
5859 btrfs_abort_transaction(trans, extent_root, ret);
5860 goto out;
5861 }
5862 btrfs_release_path(path);
5863 path->leave_spinning = 1;
5864
5865 key.objectid = bytenr;
5866 key.type = BTRFS_EXTENT_ITEM_KEY;
5867 key.offset = num_bytes;
5868
5869 if (!is_data && skinny_metadata) {
5870 key.type = BTRFS_METADATA_ITEM_KEY;
5871 key.offset = owner_objectid;
5872 }
5873
5874 ret = btrfs_search_slot(trans, extent_root,
5875 &key, path, -1, 1);
5876 if (ret > 0 && skinny_metadata && path->slots[0]) {
5877 /*
5878 * Couldn't find our skinny metadata item,
5879 * see if we have ye olde extent item.
5880 */
5881 path->slots[0]--;
5882 btrfs_item_key_to_cpu(path->nodes[0], &key,
5883 path->slots[0]);
5884 if (key.objectid == bytenr &&
5885 key.type == BTRFS_EXTENT_ITEM_KEY &&
5886 key.offset == num_bytes)
5887 ret = 0;
5888 }
5889
5890 if (ret > 0 && skinny_metadata) {
5891 skinny_metadata = false;
5892 key.objectid = bytenr;
5893 key.type = BTRFS_EXTENT_ITEM_KEY;
5894 key.offset = num_bytes;
5895 btrfs_release_path(path);
5896 ret = btrfs_search_slot(trans, extent_root,
5897 &key, path, -1, 1);
5898 }
5899
5900 if (ret) {
5901 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5902 ret, bytenr);
5903 if (ret > 0)
5904 btrfs_print_leaf(extent_root,
5905 path->nodes[0]);
5906 }
5907 if (ret < 0) {
5908 btrfs_abort_transaction(trans, extent_root, ret);
5909 goto out;
5910 }
5911 extent_slot = path->slots[0];
5912 }
5913 } else if (WARN_ON(ret == -ENOENT)) {
5914 btrfs_print_leaf(extent_root, path->nodes[0]);
5915 btrfs_err(info,
5916 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
5917 bytenr, parent, root_objectid, owner_objectid,
5918 owner_offset);
5919 btrfs_abort_transaction(trans, extent_root, ret);
5920 goto out;
5921 } else {
5922 btrfs_abort_transaction(trans, extent_root, ret);
5923 goto out;
5924 }
5925
5926 leaf = path->nodes[0];
5927 item_size = btrfs_item_size_nr(leaf, extent_slot);
5928 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5929 if (item_size < sizeof(*ei)) {
5930 BUG_ON(found_extent || extent_slot != path->slots[0]);
5931 ret = convert_extent_item_v0(trans, extent_root, path,
5932 owner_objectid, 0);
5933 if (ret < 0) {
5934 btrfs_abort_transaction(trans, extent_root, ret);
5935 goto out;
5936 }
5937
5938 btrfs_release_path(path);
5939 path->leave_spinning = 1;
5940
5941 key.objectid = bytenr;
5942 key.type = BTRFS_EXTENT_ITEM_KEY;
5943 key.offset = num_bytes;
5944
5945 ret = btrfs_search_slot(trans, extent_root, &key, path,
5946 -1, 1);
5947 if (ret) {
5948 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5949 ret, bytenr);
5950 btrfs_print_leaf(extent_root, path->nodes[0]);
5951 }
5952 if (ret < 0) {
5953 btrfs_abort_transaction(trans, extent_root, ret);
5954 goto out;
5955 }
5956
5957 extent_slot = path->slots[0];
5958 leaf = path->nodes[0];
5959 item_size = btrfs_item_size_nr(leaf, extent_slot);
5960 }
5961 #endif
5962 BUG_ON(item_size < sizeof(*ei));
5963 ei = btrfs_item_ptr(leaf, extent_slot,
5964 struct btrfs_extent_item);
5965 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5966 key.type == BTRFS_EXTENT_ITEM_KEY) {
5967 struct btrfs_tree_block_info *bi;
5968 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5969 bi = (struct btrfs_tree_block_info *)(ei + 1);
5970 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5971 }
5972
5973 refs = btrfs_extent_refs(leaf, ei);
5974 if (refs < refs_to_drop) {
5975 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5976 "for bytenr %Lu", refs_to_drop, refs, bytenr);
5977 ret = -EINVAL;
5978 btrfs_abort_transaction(trans, extent_root, ret);
5979 goto out;
5980 }
5981 refs -= refs_to_drop;
5982
5983 if (refs > 0) {
5984 type = BTRFS_QGROUP_OPER_SUB_SHARED;
5985 if (extent_op)
5986 __run_delayed_extent_op(extent_op, leaf, ei);
5987 /*
5988 * In the case of inline back ref, reference count will
5989 * be updated by remove_extent_backref
5990 */
5991 if (iref) {
5992 BUG_ON(!found_extent);
5993 } else {
5994 btrfs_set_extent_refs(leaf, ei, refs);
5995 btrfs_mark_buffer_dirty(leaf);
5996 }
5997 if (found_extent) {
5998 ret = remove_extent_backref(trans, extent_root, path,
5999 iref, refs_to_drop,
6000 is_data, &last_ref);
6001 if (ret) {
6002 btrfs_abort_transaction(trans, extent_root, ret);
6003 goto out;
6004 }
6005 }
6006 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6007 root_objectid);
6008 } else {
6009 if (found_extent) {
6010 BUG_ON(is_data && refs_to_drop !=
6011 extent_data_ref_count(root, path, iref));
6012 if (iref) {
6013 BUG_ON(path->slots[0] != extent_slot);
6014 } else {
6015 BUG_ON(path->slots[0] != extent_slot + 1);
6016 path->slots[0] = extent_slot;
6017 num_to_del = 2;
6018 }
6019 }
6020
6021 last_ref = 1;
6022 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6023 num_to_del);
6024 if (ret) {
6025 btrfs_abort_transaction(trans, extent_root, ret);
6026 goto out;
6027 }
6028 btrfs_release_path(path);
6029
6030 if (is_data) {
6031 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6032 if (ret) {
6033 btrfs_abort_transaction(trans, extent_root, ret);
6034 goto out;
6035 }
6036 }
6037
6038 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6039 if (ret) {
6040 btrfs_abort_transaction(trans, extent_root, ret);
6041 goto out;
6042 }
6043 }
6044 btrfs_release_path(path);
6045
6046 /* Deal with the quota accounting */
6047 if (!ret && last_ref && !no_quota) {
6048 int mod_seq = 0;
6049
6050 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6051 type == BTRFS_QGROUP_OPER_SUB_SHARED)
6052 mod_seq = 1;
6053
6054 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6055 bytenr, num_bytes, type,
6056 mod_seq);
6057 }
6058 out:
6059 btrfs_free_path(path);
6060 return ret;
6061 }
6062
6063 /*
6064 * when we free an block, it is possible (and likely) that we free the last
6065 * delayed ref for that extent as well. This searches the delayed ref tree for
6066 * a given extent, and if there are no other delayed refs to be processed, it
6067 * removes it from the tree.
6068 */
6069 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6070 struct btrfs_root *root, u64 bytenr)
6071 {
6072 struct btrfs_delayed_ref_head *head;
6073 struct btrfs_delayed_ref_root *delayed_refs;
6074 int ret = 0;
6075
6076 delayed_refs = &trans->transaction->delayed_refs;
6077 spin_lock(&delayed_refs->lock);
6078 head = btrfs_find_delayed_ref_head(trans, bytenr);
6079 if (!head)
6080 goto out_delayed_unlock;
6081
6082 spin_lock(&head->lock);
6083 if (rb_first(&head->ref_root))
6084 goto out;
6085
6086 if (head->extent_op) {
6087 if (!head->must_insert_reserved)
6088 goto out;
6089 btrfs_free_delayed_extent_op(head->extent_op);
6090 head->extent_op = NULL;
6091 }
6092
6093 /*
6094 * waiting for the lock here would deadlock. If someone else has it
6095 * locked they are already in the process of dropping it anyway
6096 */
6097 if (!mutex_trylock(&head->mutex))
6098 goto out;
6099
6100 /*
6101 * at this point we have a head with no other entries. Go
6102 * ahead and process it.
6103 */
6104 head->node.in_tree = 0;
6105 rb_erase(&head->href_node, &delayed_refs->href_root);
6106
6107 atomic_dec(&delayed_refs->num_entries);
6108
6109 /*
6110 * we don't take a ref on the node because we're removing it from the
6111 * tree, so we just steal the ref the tree was holding.
6112 */
6113 delayed_refs->num_heads--;
6114 if (head->processing == 0)
6115 delayed_refs->num_heads_ready--;
6116 head->processing = 0;
6117 spin_unlock(&head->lock);
6118 spin_unlock(&delayed_refs->lock);
6119
6120 BUG_ON(head->extent_op);
6121 if (head->must_insert_reserved)
6122 ret = 1;
6123
6124 mutex_unlock(&head->mutex);
6125 btrfs_put_delayed_ref(&head->node);
6126 return ret;
6127 out:
6128 spin_unlock(&head->lock);
6129
6130 out_delayed_unlock:
6131 spin_unlock(&delayed_refs->lock);
6132 return 0;
6133 }
6134
6135 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6136 struct btrfs_root *root,
6137 struct extent_buffer *buf,
6138 u64 parent, int last_ref)
6139 {
6140 int pin = 1;
6141 int ret;
6142
6143 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6144 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6145 buf->start, buf->len,
6146 parent, root->root_key.objectid,
6147 btrfs_header_level(buf),
6148 BTRFS_DROP_DELAYED_REF, NULL, 0);
6149 BUG_ON(ret); /* -ENOMEM */
6150 }
6151
6152 if (!last_ref)
6153 return;
6154
6155 if (btrfs_header_generation(buf) == trans->transid) {
6156 struct btrfs_block_group_cache *cache;
6157
6158 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6159 ret = check_ref_cleanup(trans, root, buf->start);
6160 if (!ret)
6161 goto out;
6162 }
6163
6164 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6165
6166 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6167 pin_down_extent(root, cache, buf->start, buf->len, 1);
6168 btrfs_put_block_group(cache);
6169 goto out;
6170 }
6171
6172 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6173
6174 btrfs_add_free_space(cache, buf->start, buf->len);
6175 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6176 btrfs_put_block_group(cache);
6177 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6178 pin = 0;
6179 }
6180 out:
6181 if (pin)
6182 add_pinned_bytes(root->fs_info, buf->len,
6183 btrfs_header_level(buf),
6184 root->root_key.objectid);
6185
6186 /*
6187 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6188 * anymore.
6189 */
6190 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6191 }
6192
6193 /* Can return -ENOMEM */
6194 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6195 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6196 u64 owner, u64 offset, int no_quota)
6197 {
6198 int ret;
6199 struct btrfs_fs_info *fs_info = root->fs_info;
6200
6201 if (btrfs_test_is_dummy_root(root))
6202 return 0;
6203
6204 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6205
6206 /*
6207 * tree log blocks never actually go into the extent allocation
6208 * tree, just update pinning info and exit early.
6209 */
6210 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6211 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6212 /* unlocks the pinned mutex */
6213 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6214 ret = 0;
6215 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6216 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6217 num_bytes,
6218 parent, root_objectid, (int)owner,
6219 BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6220 } else {
6221 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6222 num_bytes,
6223 parent, root_objectid, owner,
6224 offset, BTRFS_DROP_DELAYED_REF,
6225 NULL, no_quota);
6226 }
6227 return ret;
6228 }
6229
6230 /*
6231 * when we wait for progress in the block group caching, its because
6232 * our allocation attempt failed at least once. So, we must sleep
6233 * and let some progress happen before we try again.
6234 *
6235 * This function will sleep at least once waiting for new free space to
6236 * show up, and then it will check the block group free space numbers
6237 * for our min num_bytes. Another option is to have it go ahead
6238 * and look in the rbtree for a free extent of a given size, but this
6239 * is a good start.
6240 *
6241 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6242 * any of the information in this block group.
6243 */
6244 static noinline void
6245 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6246 u64 num_bytes)
6247 {
6248 struct btrfs_caching_control *caching_ctl;
6249
6250 caching_ctl = get_caching_control(cache);
6251 if (!caching_ctl)
6252 return;
6253
6254 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6255 (cache->free_space_ctl->free_space >= num_bytes));
6256
6257 put_caching_control(caching_ctl);
6258 }
6259
6260 static noinline int
6261 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6262 {
6263 struct btrfs_caching_control *caching_ctl;
6264 int ret = 0;
6265
6266 caching_ctl = get_caching_control(cache);
6267 if (!caching_ctl)
6268 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6269
6270 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6271 if (cache->cached == BTRFS_CACHE_ERROR)
6272 ret = -EIO;
6273 put_caching_control(caching_ctl);
6274 return ret;
6275 }
6276
6277 int __get_raid_index(u64 flags)
6278 {
6279 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6280 return BTRFS_RAID_RAID10;
6281 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6282 return BTRFS_RAID_RAID1;
6283 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6284 return BTRFS_RAID_DUP;
6285 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6286 return BTRFS_RAID_RAID0;
6287 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6288 return BTRFS_RAID_RAID5;
6289 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6290 return BTRFS_RAID_RAID6;
6291
6292 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6293 }
6294
6295 int get_block_group_index(struct btrfs_block_group_cache *cache)
6296 {
6297 return __get_raid_index(cache->flags);
6298 }
6299
6300 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6301 [BTRFS_RAID_RAID10] = "raid10",
6302 [BTRFS_RAID_RAID1] = "raid1",
6303 [BTRFS_RAID_DUP] = "dup",
6304 [BTRFS_RAID_RAID0] = "raid0",
6305 [BTRFS_RAID_SINGLE] = "single",
6306 [BTRFS_RAID_RAID5] = "raid5",
6307 [BTRFS_RAID_RAID6] = "raid6",
6308 };
6309
6310 static const char *get_raid_name(enum btrfs_raid_types type)
6311 {
6312 if (type >= BTRFS_NR_RAID_TYPES)
6313 return NULL;
6314
6315 return btrfs_raid_type_names[type];
6316 }
6317
6318 enum btrfs_loop_type {
6319 LOOP_CACHING_NOWAIT = 0,
6320 LOOP_CACHING_WAIT = 1,
6321 LOOP_ALLOC_CHUNK = 2,
6322 LOOP_NO_EMPTY_SIZE = 3,
6323 };
6324
6325 static inline void
6326 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6327 int delalloc)
6328 {
6329 if (delalloc)
6330 down_read(&cache->data_rwsem);
6331 }
6332
6333 static inline void
6334 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6335 int delalloc)
6336 {
6337 btrfs_get_block_group(cache);
6338 if (delalloc)
6339 down_read(&cache->data_rwsem);
6340 }
6341
6342 static struct btrfs_block_group_cache *
6343 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6344 struct btrfs_free_cluster *cluster,
6345 int delalloc)
6346 {
6347 struct btrfs_block_group_cache *used_bg;
6348 bool locked = false;
6349 again:
6350 spin_lock(&cluster->refill_lock);
6351 if (locked) {
6352 if (used_bg == cluster->block_group)
6353 return used_bg;
6354
6355 up_read(&used_bg->data_rwsem);
6356 btrfs_put_block_group(used_bg);
6357 }
6358
6359 used_bg = cluster->block_group;
6360 if (!used_bg)
6361 return NULL;
6362
6363 if (used_bg == block_group)
6364 return used_bg;
6365
6366 btrfs_get_block_group(used_bg);
6367
6368 if (!delalloc)
6369 return used_bg;
6370
6371 if (down_read_trylock(&used_bg->data_rwsem))
6372 return used_bg;
6373
6374 spin_unlock(&cluster->refill_lock);
6375 down_read(&used_bg->data_rwsem);
6376 locked = true;
6377 goto again;
6378 }
6379
6380 static inline void
6381 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6382 int delalloc)
6383 {
6384 if (delalloc)
6385 up_read(&cache->data_rwsem);
6386 btrfs_put_block_group(cache);
6387 }
6388
6389 /*
6390 * walks the btree of allocated extents and find a hole of a given size.
6391 * The key ins is changed to record the hole:
6392 * ins->objectid == start position
6393 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6394 * ins->offset == the size of the hole.
6395 * Any available blocks before search_start are skipped.
6396 *
6397 * If there is no suitable free space, we will record the max size of
6398 * the free space extent currently.
6399 */
6400 static noinline int find_free_extent(struct btrfs_root *orig_root,
6401 u64 num_bytes, u64 empty_size,
6402 u64 hint_byte, struct btrfs_key *ins,
6403 u64 flags, int delalloc)
6404 {
6405 int ret = 0;
6406 struct btrfs_root *root = orig_root->fs_info->extent_root;
6407 struct btrfs_free_cluster *last_ptr = NULL;
6408 struct btrfs_block_group_cache *block_group = NULL;
6409 u64 search_start = 0;
6410 u64 max_extent_size = 0;
6411 int empty_cluster = 2 * 1024 * 1024;
6412 struct btrfs_space_info *space_info;
6413 int loop = 0;
6414 int index = __get_raid_index(flags);
6415 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6416 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6417 bool failed_cluster_refill = false;
6418 bool failed_alloc = false;
6419 bool use_cluster = true;
6420 bool have_caching_bg = false;
6421
6422 WARN_ON(num_bytes < root->sectorsize);
6423 ins->type = BTRFS_EXTENT_ITEM_KEY;
6424 ins->objectid = 0;
6425 ins->offset = 0;
6426
6427 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6428
6429 space_info = __find_space_info(root->fs_info, flags);
6430 if (!space_info) {
6431 btrfs_err(root->fs_info, "No space info for %llu", flags);
6432 return -ENOSPC;
6433 }
6434
6435 /*
6436 * If the space info is for both data and metadata it means we have a
6437 * small filesystem and we can't use the clustering stuff.
6438 */
6439 if (btrfs_mixed_space_info(space_info))
6440 use_cluster = false;
6441
6442 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6443 last_ptr = &root->fs_info->meta_alloc_cluster;
6444 if (!btrfs_test_opt(root, SSD))
6445 empty_cluster = 64 * 1024;
6446 }
6447
6448 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6449 btrfs_test_opt(root, SSD)) {
6450 last_ptr = &root->fs_info->data_alloc_cluster;
6451 }
6452
6453 if (last_ptr) {
6454 spin_lock(&last_ptr->lock);
6455 if (last_ptr->block_group)
6456 hint_byte = last_ptr->window_start;
6457 spin_unlock(&last_ptr->lock);
6458 }
6459
6460 search_start = max(search_start, first_logical_byte(root, 0));
6461 search_start = max(search_start, hint_byte);
6462
6463 if (!last_ptr)
6464 empty_cluster = 0;
6465
6466 if (search_start == hint_byte) {
6467 block_group = btrfs_lookup_block_group(root->fs_info,
6468 search_start);
6469 /*
6470 * we don't want to use the block group if it doesn't match our
6471 * allocation bits, or if its not cached.
6472 *
6473 * However if we are re-searching with an ideal block group
6474 * picked out then we don't care that the block group is cached.
6475 */
6476 if (block_group && block_group_bits(block_group, flags) &&
6477 block_group->cached != BTRFS_CACHE_NO) {
6478 down_read(&space_info->groups_sem);
6479 if (list_empty(&block_group->list) ||
6480 block_group->ro) {
6481 /*
6482 * someone is removing this block group,
6483 * we can't jump into the have_block_group
6484 * target because our list pointers are not
6485 * valid
6486 */
6487 btrfs_put_block_group(block_group);
6488 up_read(&space_info->groups_sem);
6489 } else {
6490 index = get_block_group_index(block_group);
6491 btrfs_lock_block_group(block_group, delalloc);
6492 goto have_block_group;
6493 }
6494 } else if (block_group) {
6495 btrfs_put_block_group(block_group);
6496 }
6497 }
6498 search:
6499 have_caching_bg = false;
6500 down_read(&space_info->groups_sem);
6501 list_for_each_entry(block_group, &space_info->block_groups[index],
6502 list) {
6503 u64 offset;
6504 int cached;
6505
6506 btrfs_grab_block_group(block_group, delalloc);
6507 search_start = block_group->key.objectid;
6508
6509 /*
6510 * this can happen if we end up cycling through all the
6511 * raid types, but we want to make sure we only allocate
6512 * for the proper type.
6513 */
6514 if (!block_group_bits(block_group, flags)) {
6515 u64 extra = BTRFS_BLOCK_GROUP_DUP |
6516 BTRFS_BLOCK_GROUP_RAID1 |
6517 BTRFS_BLOCK_GROUP_RAID5 |
6518 BTRFS_BLOCK_GROUP_RAID6 |
6519 BTRFS_BLOCK_GROUP_RAID10;
6520
6521 /*
6522 * if they asked for extra copies and this block group
6523 * doesn't provide them, bail. This does allow us to
6524 * fill raid0 from raid1.
6525 */
6526 if ((flags & extra) && !(block_group->flags & extra))
6527 goto loop;
6528 }
6529
6530 have_block_group:
6531 cached = block_group_cache_done(block_group);
6532 if (unlikely(!cached)) {
6533 ret = cache_block_group(block_group, 0);
6534 BUG_ON(ret < 0);
6535 ret = 0;
6536 }
6537
6538 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6539 goto loop;
6540 if (unlikely(block_group->ro))
6541 goto loop;
6542
6543 /*
6544 * Ok we want to try and use the cluster allocator, so
6545 * lets look there
6546 */
6547 if (last_ptr) {
6548 struct btrfs_block_group_cache *used_block_group;
6549 unsigned long aligned_cluster;
6550 /*
6551 * the refill lock keeps out other
6552 * people trying to start a new cluster
6553 */
6554 used_block_group = btrfs_lock_cluster(block_group,
6555 last_ptr,
6556 delalloc);
6557 if (!used_block_group)
6558 goto refill_cluster;
6559
6560 if (used_block_group != block_group &&
6561 (used_block_group->ro ||
6562 !block_group_bits(used_block_group, flags)))
6563 goto release_cluster;
6564
6565 offset = btrfs_alloc_from_cluster(used_block_group,
6566 last_ptr,
6567 num_bytes,
6568 used_block_group->key.objectid,
6569 &max_extent_size);
6570 if (offset) {
6571 /* we have a block, we're done */
6572 spin_unlock(&last_ptr->refill_lock);
6573 trace_btrfs_reserve_extent_cluster(root,
6574 used_block_group,
6575 search_start, num_bytes);
6576 if (used_block_group != block_group) {
6577 btrfs_release_block_group(block_group,
6578 delalloc);
6579 block_group = used_block_group;
6580 }
6581 goto checks;
6582 }
6583
6584 WARN_ON(last_ptr->block_group != used_block_group);
6585 release_cluster:
6586 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6587 * set up a new clusters, so lets just skip it
6588 * and let the allocator find whatever block
6589 * it can find. If we reach this point, we
6590 * will have tried the cluster allocator
6591 * plenty of times and not have found
6592 * anything, so we are likely way too
6593 * fragmented for the clustering stuff to find
6594 * anything.
6595 *
6596 * However, if the cluster is taken from the
6597 * current block group, release the cluster
6598 * first, so that we stand a better chance of
6599 * succeeding in the unclustered
6600 * allocation. */
6601 if (loop >= LOOP_NO_EMPTY_SIZE &&
6602 used_block_group != block_group) {
6603 spin_unlock(&last_ptr->refill_lock);
6604 btrfs_release_block_group(used_block_group,
6605 delalloc);
6606 goto unclustered_alloc;
6607 }
6608
6609 /*
6610 * this cluster didn't work out, free it and
6611 * start over
6612 */
6613 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6614
6615 if (used_block_group != block_group)
6616 btrfs_release_block_group(used_block_group,
6617 delalloc);
6618 refill_cluster:
6619 if (loop >= LOOP_NO_EMPTY_SIZE) {
6620 spin_unlock(&last_ptr->refill_lock);
6621 goto unclustered_alloc;
6622 }
6623
6624 aligned_cluster = max_t(unsigned long,
6625 empty_cluster + empty_size,
6626 block_group->full_stripe_len);
6627
6628 /* allocate a cluster in this block group */
6629 ret = btrfs_find_space_cluster(root, block_group,
6630 last_ptr, search_start,
6631 num_bytes,
6632 aligned_cluster);
6633 if (ret == 0) {
6634 /*
6635 * now pull our allocation out of this
6636 * cluster
6637 */
6638 offset = btrfs_alloc_from_cluster(block_group,
6639 last_ptr,
6640 num_bytes,
6641 search_start,
6642 &max_extent_size);
6643 if (offset) {
6644 /* we found one, proceed */
6645 spin_unlock(&last_ptr->refill_lock);
6646 trace_btrfs_reserve_extent_cluster(root,
6647 block_group, search_start,
6648 num_bytes);
6649 goto checks;
6650 }
6651 } else if (!cached && loop > LOOP_CACHING_NOWAIT
6652 && !failed_cluster_refill) {
6653 spin_unlock(&last_ptr->refill_lock);
6654
6655 failed_cluster_refill = true;
6656 wait_block_group_cache_progress(block_group,
6657 num_bytes + empty_cluster + empty_size);
6658 goto have_block_group;
6659 }
6660
6661 /*
6662 * at this point we either didn't find a cluster
6663 * or we weren't able to allocate a block from our
6664 * cluster. Free the cluster we've been trying
6665 * to use, and go to the next block group
6666 */
6667 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6668 spin_unlock(&last_ptr->refill_lock);
6669 goto loop;
6670 }
6671
6672 unclustered_alloc:
6673 spin_lock(&block_group->free_space_ctl->tree_lock);
6674 if (cached &&
6675 block_group->free_space_ctl->free_space <
6676 num_bytes + empty_cluster + empty_size) {
6677 if (block_group->free_space_ctl->free_space >
6678 max_extent_size)
6679 max_extent_size =
6680 block_group->free_space_ctl->free_space;
6681 spin_unlock(&block_group->free_space_ctl->tree_lock);
6682 goto loop;
6683 }
6684 spin_unlock(&block_group->free_space_ctl->tree_lock);
6685
6686 offset = btrfs_find_space_for_alloc(block_group, search_start,
6687 num_bytes, empty_size,
6688 &max_extent_size);
6689 /*
6690 * If we didn't find a chunk, and we haven't failed on this
6691 * block group before, and this block group is in the middle of
6692 * caching and we are ok with waiting, then go ahead and wait
6693 * for progress to be made, and set failed_alloc to true.
6694 *
6695 * If failed_alloc is true then we've already waited on this
6696 * block group once and should move on to the next block group.
6697 */
6698 if (!offset && !failed_alloc && !cached &&
6699 loop > LOOP_CACHING_NOWAIT) {
6700 wait_block_group_cache_progress(block_group,
6701 num_bytes + empty_size);
6702 failed_alloc = true;
6703 goto have_block_group;
6704 } else if (!offset) {
6705 if (!cached)
6706 have_caching_bg = true;
6707 goto loop;
6708 }
6709 checks:
6710 search_start = ALIGN(offset, root->stripesize);
6711
6712 /* move on to the next group */
6713 if (search_start + num_bytes >
6714 block_group->key.objectid + block_group->key.offset) {
6715 btrfs_add_free_space(block_group, offset, num_bytes);
6716 goto loop;
6717 }
6718
6719 if (offset < search_start)
6720 btrfs_add_free_space(block_group, offset,
6721 search_start - offset);
6722 BUG_ON(offset > search_start);
6723
6724 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6725 alloc_type, delalloc);
6726 if (ret == -EAGAIN) {
6727 btrfs_add_free_space(block_group, offset, num_bytes);
6728 goto loop;
6729 }
6730
6731 /* we are all good, lets return */
6732 ins->objectid = search_start;
6733 ins->offset = num_bytes;
6734
6735 trace_btrfs_reserve_extent(orig_root, block_group,
6736 search_start, num_bytes);
6737 btrfs_release_block_group(block_group, delalloc);
6738 break;
6739 loop:
6740 failed_cluster_refill = false;
6741 failed_alloc = false;
6742 BUG_ON(index != get_block_group_index(block_group));
6743 btrfs_release_block_group(block_group, delalloc);
6744 }
6745 up_read(&space_info->groups_sem);
6746
6747 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6748 goto search;
6749
6750 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6751 goto search;
6752
6753 /*
6754 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6755 * caching kthreads as we move along
6756 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6757 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6758 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6759 * again
6760 */
6761 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6762 index = 0;
6763 loop++;
6764 if (loop == LOOP_ALLOC_CHUNK) {
6765 struct btrfs_trans_handle *trans;
6766 int exist = 0;
6767
6768 trans = current->journal_info;
6769 if (trans)
6770 exist = 1;
6771 else
6772 trans = btrfs_join_transaction(root);
6773
6774 if (IS_ERR(trans)) {
6775 ret = PTR_ERR(trans);
6776 goto out;
6777 }
6778
6779 ret = do_chunk_alloc(trans, root, flags,
6780 CHUNK_ALLOC_FORCE);
6781 /*
6782 * Do not bail out on ENOSPC since we
6783 * can do more things.
6784 */
6785 if (ret < 0 && ret != -ENOSPC)
6786 btrfs_abort_transaction(trans,
6787 root, ret);
6788 else
6789 ret = 0;
6790 if (!exist)
6791 btrfs_end_transaction(trans, root);
6792 if (ret)
6793 goto out;
6794 }
6795
6796 if (loop == LOOP_NO_EMPTY_SIZE) {
6797 empty_size = 0;
6798 empty_cluster = 0;
6799 }
6800
6801 goto search;
6802 } else if (!ins->objectid) {
6803 ret = -ENOSPC;
6804 } else if (ins->objectid) {
6805 ret = 0;
6806 }
6807 out:
6808 if (ret == -ENOSPC)
6809 ins->offset = max_extent_size;
6810 return ret;
6811 }
6812
6813 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6814 int dump_block_groups)
6815 {
6816 struct btrfs_block_group_cache *cache;
6817 int index = 0;
6818
6819 spin_lock(&info->lock);
6820 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6821 info->flags,
6822 info->total_bytes - info->bytes_used - info->bytes_pinned -
6823 info->bytes_reserved - info->bytes_readonly,
6824 (info->full) ? "" : "not ");
6825 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6826 "reserved=%llu, may_use=%llu, readonly=%llu\n",
6827 info->total_bytes, info->bytes_used, info->bytes_pinned,
6828 info->bytes_reserved, info->bytes_may_use,
6829 info->bytes_readonly);
6830 spin_unlock(&info->lock);
6831
6832 if (!dump_block_groups)
6833 return;
6834
6835 down_read(&info->groups_sem);
6836 again:
6837 list_for_each_entry(cache, &info->block_groups[index], list) {
6838 spin_lock(&cache->lock);
6839 printk(KERN_INFO "BTRFS: "
6840 "block group %llu has %llu bytes, "
6841 "%llu used %llu pinned %llu reserved %s\n",
6842 cache->key.objectid, cache->key.offset,
6843 btrfs_block_group_used(&cache->item), cache->pinned,
6844 cache->reserved, cache->ro ? "[readonly]" : "");
6845 btrfs_dump_free_space(cache, bytes);
6846 spin_unlock(&cache->lock);
6847 }
6848 if (++index < BTRFS_NR_RAID_TYPES)
6849 goto again;
6850 up_read(&info->groups_sem);
6851 }
6852
6853 int btrfs_reserve_extent(struct btrfs_root *root,
6854 u64 num_bytes, u64 min_alloc_size,
6855 u64 empty_size, u64 hint_byte,
6856 struct btrfs_key *ins, int is_data, int delalloc)
6857 {
6858 bool final_tried = false;
6859 u64 flags;
6860 int ret;
6861
6862 flags = btrfs_get_alloc_profile(root, is_data);
6863 again:
6864 WARN_ON(num_bytes < root->sectorsize);
6865 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6866 flags, delalloc);
6867
6868 if (ret == -ENOSPC) {
6869 if (!final_tried && ins->offset) {
6870 num_bytes = min(num_bytes >> 1, ins->offset);
6871 num_bytes = round_down(num_bytes, root->sectorsize);
6872 num_bytes = max(num_bytes, min_alloc_size);
6873 if (num_bytes == min_alloc_size)
6874 final_tried = true;
6875 goto again;
6876 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6877 struct btrfs_space_info *sinfo;
6878
6879 sinfo = __find_space_info(root->fs_info, flags);
6880 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6881 flags, num_bytes);
6882 if (sinfo)
6883 dump_space_info(sinfo, num_bytes, 1);
6884 }
6885 }
6886
6887 return ret;
6888 }
6889
6890 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6891 u64 start, u64 len,
6892 int pin, int delalloc)
6893 {
6894 struct btrfs_block_group_cache *cache;
6895 int ret = 0;
6896
6897 cache = btrfs_lookup_block_group(root->fs_info, start);
6898 if (!cache) {
6899 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6900 start);
6901 return -ENOSPC;
6902 }
6903
6904 if (btrfs_test_opt(root, DISCARD))
6905 ret = btrfs_discard_extent(root, start, len, NULL);
6906
6907 if (pin)
6908 pin_down_extent(root, cache, start, len, 1);
6909 else {
6910 btrfs_add_free_space(cache, start, len);
6911 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6912 }
6913 btrfs_put_block_group(cache);
6914
6915 trace_btrfs_reserved_extent_free(root, start, len);
6916
6917 return ret;
6918 }
6919
6920 int btrfs_free_reserved_extent(struct btrfs_root *root,
6921 u64 start, u64 len, int delalloc)
6922 {
6923 return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6924 }
6925
6926 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6927 u64 start, u64 len)
6928 {
6929 return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6930 }
6931
6932 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6933 struct btrfs_root *root,
6934 u64 parent, u64 root_objectid,
6935 u64 flags, u64 owner, u64 offset,
6936 struct btrfs_key *ins, int ref_mod)
6937 {
6938 int ret;
6939 struct btrfs_fs_info *fs_info = root->fs_info;
6940 struct btrfs_extent_item *extent_item;
6941 struct btrfs_extent_inline_ref *iref;
6942 struct btrfs_path *path;
6943 struct extent_buffer *leaf;
6944 int type;
6945 u32 size;
6946
6947 if (parent > 0)
6948 type = BTRFS_SHARED_DATA_REF_KEY;
6949 else
6950 type = BTRFS_EXTENT_DATA_REF_KEY;
6951
6952 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6953
6954 path = btrfs_alloc_path();
6955 if (!path)
6956 return -ENOMEM;
6957
6958 path->leave_spinning = 1;
6959 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6960 ins, size);
6961 if (ret) {
6962 btrfs_free_path(path);
6963 return ret;
6964 }
6965
6966 leaf = path->nodes[0];
6967 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6968 struct btrfs_extent_item);
6969 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6970 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6971 btrfs_set_extent_flags(leaf, extent_item,
6972 flags | BTRFS_EXTENT_FLAG_DATA);
6973
6974 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6975 btrfs_set_extent_inline_ref_type(leaf, iref, type);
6976 if (parent > 0) {
6977 struct btrfs_shared_data_ref *ref;
6978 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6979 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6980 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6981 } else {
6982 struct btrfs_extent_data_ref *ref;
6983 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6984 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6985 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6986 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6987 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6988 }
6989
6990 btrfs_mark_buffer_dirty(path->nodes[0]);
6991 btrfs_free_path(path);
6992
6993 /* Always set parent to 0 here since its exclusive anyway. */
6994 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
6995 ins->objectid, ins->offset,
6996 BTRFS_QGROUP_OPER_ADD_EXCL, 0);
6997 if (ret)
6998 return ret;
6999
7000 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7001 if (ret) { /* -ENOENT, logic error */
7002 btrfs_err(fs_info, "update block group failed for %llu %llu",
7003 ins->objectid, ins->offset);
7004 BUG();
7005 }
7006 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7007 return ret;
7008 }
7009
7010 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7011 struct btrfs_root *root,
7012 u64 parent, u64 root_objectid,
7013 u64 flags, struct btrfs_disk_key *key,
7014 int level, struct btrfs_key *ins,
7015 int no_quota)
7016 {
7017 int ret;
7018 struct btrfs_fs_info *fs_info = root->fs_info;
7019 struct btrfs_extent_item *extent_item;
7020 struct btrfs_tree_block_info *block_info;
7021 struct btrfs_extent_inline_ref *iref;
7022 struct btrfs_path *path;
7023 struct extent_buffer *leaf;
7024 u32 size = sizeof(*extent_item) + sizeof(*iref);
7025 u64 num_bytes = ins->offset;
7026 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7027 SKINNY_METADATA);
7028
7029 if (!skinny_metadata)
7030 size += sizeof(*block_info);
7031
7032 path = btrfs_alloc_path();
7033 if (!path) {
7034 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7035 root->nodesize);
7036 return -ENOMEM;
7037 }
7038
7039 path->leave_spinning = 1;
7040 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7041 ins, size);
7042 if (ret) {
7043 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7044 root->nodesize);
7045 btrfs_free_path(path);
7046 return ret;
7047 }
7048
7049 leaf = path->nodes[0];
7050 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7051 struct btrfs_extent_item);
7052 btrfs_set_extent_refs(leaf, extent_item, 1);
7053 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7054 btrfs_set_extent_flags(leaf, extent_item,
7055 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7056
7057 if (skinny_metadata) {
7058 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7059 num_bytes = root->nodesize;
7060 } else {
7061 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7062 btrfs_set_tree_block_key(leaf, block_info, key);
7063 btrfs_set_tree_block_level(leaf, block_info, level);
7064 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7065 }
7066
7067 if (parent > 0) {
7068 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7069 btrfs_set_extent_inline_ref_type(leaf, iref,
7070 BTRFS_SHARED_BLOCK_REF_KEY);
7071 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7072 } else {
7073 btrfs_set_extent_inline_ref_type(leaf, iref,
7074 BTRFS_TREE_BLOCK_REF_KEY);
7075 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7076 }
7077
7078 btrfs_mark_buffer_dirty(leaf);
7079 btrfs_free_path(path);
7080
7081 if (!no_quota) {
7082 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7083 ins->objectid, num_bytes,
7084 BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7085 if (ret)
7086 return ret;
7087 }
7088
7089 ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7090 1);
7091 if (ret) { /* -ENOENT, logic error */
7092 btrfs_err(fs_info, "update block group failed for %llu %llu",
7093 ins->objectid, ins->offset);
7094 BUG();
7095 }
7096
7097 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7098 return ret;
7099 }
7100
7101 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7102 struct btrfs_root *root,
7103 u64 root_objectid, u64 owner,
7104 u64 offset, struct btrfs_key *ins)
7105 {
7106 int ret;
7107
7108 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7109
7110 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7111 ins->offset, 0,
7112 root_objectid, owner, offset,
7113 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7114 return ret;
7115 }
7116
7117 /*
7118 * this is used by the tree logging recovery code. It records that
7119 * an extent has been allocated and makes sure to clear the free
7120 * space cache bits as well
7121 */
7122 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7123 struct btrfs_root *root,
7124 u64 root_objectid, u64 owner, u64 offset,
7125 struct btrfs_key *ins)
7126 {
7127 int ret;
7128 struct btrfs_block_group_cache *block_group;
7129
7130 /*
7131 * Mixed block groups will exclude before processing the log so we only
7132 * need to do the exlude dance if this fs isn't mixed.
7133 */
7134 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7135 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7136 if (ret)
7137 return ret;
7138 }
7139
7140 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7141 if (!block_group)
7142 return -EINVAL;
7143
7144 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7145 RESERVE_ALLOC_NO_ACCOUNT, 0);
7146 BUG_ON(ret); /* logic error */
7147 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7148 0, owner, offset, ins, 1);
7149 btrfs_put_block_group(block_group);
7150 return ret;
7151 }
7152
7153 static struct extent_buffer *
7154 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7155 u64 bytenr, int level)
7156 {
7157 struct extent_buffer *buf;
7158
7159 buf = btrfs_find_create_tree_block(root, bytenr);
7160 if (!buf)
7161 return ERR_PTR(-ENOMEM);
7162 btrfs_set_header_generation(buf, trans->transid);
7163 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7164 btrfs_tree_lock(buf);
7165 clean_tree_block(trans, root, buf);
7166 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7167
7168 btrfs_set_lock_blocking(buf);
7169 btrfs_set_buffer_uptodate(buf);
7170
7171 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7172 buf->log_index = root->log_transid % 2;
7173 /*
7174 * we allow two log transactions at a time, use different
7175 * EXENT bit to differentiate dirty pages.
7176 */
7177 if (buf->log_index == 0)
7178 set_extent_dirty(&root->dirty_log_pages, buf->start,
7179 buf->start + buf->len - 1, GFP_NOFS);
7180 else
7181 set_extent_new(&root->dirty_log_pages, buf->start,
7182 buf->start + buf->len - 1, GFP_NOFS);
7183 } else {
7184 buf->log_index = -1;
7185 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7186 buf->start + buf->len - 1, GFP_NOFS);
7187 }
7188 trans->blocks_used++;
7189 /* this returns a buffer locked for blocking */
7190 return buf;
7191 }
7192
7193 static struct btrfs_block_rsv *
7194 use_block_rsv(struct btrfs_trans_handle *trans,
7195 struct btrfs_root *root, u32 blocksize)
7196 {
7197 struct btrfs_block_rsv *block_rsv;
7198 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7199 int ret;
7200 bool global_updated = false;
7201
7202 block_rsv = get_block_rsv(trans, root);
7203
7204 if (unlikely(block_rsv->size == 0))
7205 goto try_reserve;
7206 again:
7207 ret = block_rsv_use_bytes(block_rsv, blocksize);
7208 if (!ret)
7209 return block_rsv;
7210
7211 if (block_rsv->failfast)
7212 return ERR_PTR(ret);
7213
7214 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7215 global_updated = true;
7216 update_global_block_rsv(root->fs_info);
7217 goto again;
7218 }
7219
7220 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7221 static DEFINE_RATELIMIT_STATE(_rs,
7222 DEFAULT_RATELIMIT_INTERVAL * 10,
7223 /*DEFAULT_RATELIMIT_BURST*/ 1);
7224 if (__ratelimit(&_rs))
7225 WARN(1, KERN_DEBUG
7226 "BTRFS: block rsv returned %d\n", ret);
7227 }
7228 try_reserve:
7229 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7230 BTRFS_RESERVE_NO_FLUSH);
7231 if (!ret)
7232 return block_rsv;
7233 /*
7234 * If we couldn't reserve metadata bytes try and use some from
7235 * the global reserve if its space type is the same as the global
7236 * reservation.
7237 */
7238 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7239 block_rsv->space_info == global_rsv->space_info) {
7240 ret = block_rsv_use_bytes(global_rsv, blocksize);
7241 if (!ret)
7242 return global_rsv;
7243 }
7244 return ERR_PTR(ret);
7245 }
7246
7247 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7248 struct btrfs_block_rsv *block_rsv, u32 blocksize)
7249 {
7250 block_rsv_add_bytes(block_rsv, blocksize, 0);
7251 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7252 }
7253
7254 /*
7255 * finds a free extent and does all the dirty work required for allocation
7256 * returns the key for the extent through ins, and a tree buffer for
7257 * the first block of the extent through buf.
7258 *
7259 * returns the tree buffer or NULL.
7260 */
7261 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7262 struct btrfs_root *root,
7263 u64 parent, u64 root_objectid,
7264 struct btrfs_disk_key *key, int level,
7265 u64 hint, u64 empty_size)
7266 {
7267 struct btrfs_key ins;
7268 struct btrfs_block_rsv *block_rsv;
7269 struct extent_buffer *buf;
7270 u64 flags = 0;
7271 int ret;
7272 u32 blocksize = root->nodesize;
7273 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7274 SKINNY_METADATA);
7275
7276 if (btrfs_test_is_dummy_root(root)) {
7277 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7278 level);
7279 if (!IS_ERR(buf))
7280 root->alloc_bytenr += blocksize;
7281 return buf;
7282 }
7283
7284 block_rsv = use_block_rsv(trans, root, blocksize);
7285 if (IS_ERR(block_rsv))
7286 return ERR_CAST(block_rsv);
7287
7288 ret = btrfs_reserve_extent(root, blocksize, blocksize,
7289 empty_size, hint, &ins, 0, 0);
7290 if (ret) {
7291 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7292 return ERR_PTR(ret);
7293 }
7294
7295 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7296 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7297
7298 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7299 if (parent == 0)
7300 parent = ins.objectid;
7301 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7302 } else
7303 BUG_ON(parent > 0);
7304
7305 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7306 struct btrfs_delayed_extent_op *extent_op;
7307 extent_op = btrfs_alloc_delayed_extent_op();
7308 BUG_ON(!extent_op); /* -ENOMEM */
7309 if (key)
7310 memcpy(&extent_op->key, key, sizeof(extent_op->key));
7311 else
7312 memset(&extent_op->key, 0, sizeof(extent_op->key));
7313 extent_op->flags_to_set = flags;
7314 if (skinny_metadata)
7315 extent_op->update_key = 0;
7316 else
7317 extent_op->update_key = 1;
7318 extent_op->update_flags = 1;
7319 extent_op->is_data = 0;
7320 extent_op->level = level;
7321
7322 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7323 ins.objectid,
7324 ins.offset, parent, root_objectid,
7325 level, BTRFS_ADD_DELAYED_EXTENT,
7326 extent_op, 0);
7327 BUG_ON(ret); /* -ENOMEM */
7328 }
7329 return buf;
7330 }
7331
7332 struct walk_control {
7333 u64 refs[BTRFS_MAX_LEVEL];
7334 u64 flags[BTRFS_MAX_LEVEL];
7335 struct btrfs_key update_progress;
7336 int stage;
7337 int level;
7338 int shared_level;
7339 int update_ref;
7340 int keep_locks;
7341 int reada_slot;
7342 int reada_count;
7343 int for_reloc;
7344 };
7345
7346 #define DROP_REFERENCE 1
7347 #define UPDATE_BACKREF 2
7348
7349 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7350 struct btrfs_root *root,
7351 struct walk_control *wc,
7352 struct btrfs_path *path)
7353 {
7354 u64 bytenr;
7355 u64 generation;
7356 u64 refs;
7357 u64 flags;
7358 u32 nritems;
7359 u32 blocksize;
7360 struct btrfs_key key;
7361 struct extent_buffer *eb;
7362 int ret;
7363 int slot;
7364 int nread = 0;
7365
7366 if (path->slots[wc->level] < wc->reada_slot) {
7367 wc->reada_count = wc->reada_count * 2 / 3;
7368 wc->reada_count = max(wc->reada_count, 2);
7369 } else {
7370 wc->reada_count = wc->reada_count * 3 / 2;
7371 wc->reada_count = min_t(int, wc->reada_count,
7372 BTRFS_NODEPTRS_PER_BLOCK(root));
7373 }
7374
7375 eb = path->nodes[wc->level];
7376 nritems = btrfs_header_nritems(eb);
7377 blocksize = root->nodesize;
7378
7379 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7380 if (nread >= wc->reada_count)
7381 break;
7382
7383 cond_resched();
7384 bytenr = btrfs_node_blockptr(eb, slot);
7385 generation = btrfs_node_ptr_generation(eb, slot);
7386
7387 if (slot == path->slots[wc->level])
7388 goto reada;
7389
7390 if (wc->stage == UPDATE_BACKREF &&
7391 generation <= root->root_key.offset)
7392 continue;
7393
7394 /* We don't lock the tree block, it's OK to be racy here */
7395 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7396 wc->level - 1, 1, &refs,
7397 &flags);
7398 /* We don't care about errors in readahead. */
7399 if (ret < 0)
7400 continue;
7401 BUG_ON(refs == 0);
7402
7403 if (wc->stage == DROP_REFERENCE) {
7404 if (refs == 1)
7405 goto reada;
7406
7407 if (wc->level == 1 &&
7408 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7409 continue;
7410 if (!wc->update_ref ||
7411 generation <= root->root_key.offset)
7412 continue;
7413 btrfs_node_key_to_cpu(eb, &key, slot);
7414 ret = btrfs_comp_cpu_keys(&key,
7415 &wc->update_progress);
7416 if (ret < 0)
7417 continue;
7418 } else {
7419 if (wc->level == 1 &&
7420 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7421 continue;
7422 }
7423 reada:
7424 readahead_tree_block(root, bytenr);
7425 nread++;
7426 }
7427 wc->reada_slot = slot;
7428 }
7429
7430 static int account_leaf_items(struct btrfs_trans_handle *trans,
7431 struct btrfs_root *root,
7432 struct extent_buffer *eb)
7433 {
7434 int nr = btrfs_header_nritems(eb);
7435 int i, extent_type, ret;
7436 struct btrfs_key key;
7437 struct btrfs_file_extent_item *fi;
7438 u64 bytenr, num_bytes;
7439
7440 for (i = 0; i < nr; i++) {
7441 btrfs_item_key_to_cpu(eb, &key, i);
7442
7443 if (key.type != BTRFS_EXTENT_DATA_KEY)
7444 continue;
7445
7446 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7447 /* filter out non qgroup-accountable extents */
7448 extent_type = btrfs_file_extent_type(eb, fi);
7449
7450 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7451 continue;
7452
7453 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7454 if (!bytenr)
7455 continue;
7456
7457 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7458
7459 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7460 root->objectid,
7461 bytenr, num_bytes,
7462 BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7463 if (ret)
7464 return ret;
7465 }
7466 return 0;
7467 }
7468
7469 /*
7470 * Walk up the tree from the bottom, freeing leaves and any interior
7471 * nodes which have had all slots visited. If a node (leaf or
7472 * interior) is freed, the node above it will have it's slot
7473 * incremented. The root node will never be freed.
7474 *
7475 * At the end of this function, we should have a path which has all
7476 * slots incremented to the next position for a search. If we need to
7477 * read a new node it will be NULL and the node above it will have the
7478 * correct slot selected for a later read.
7479 *
7480 * If we increment the root nodes slot counter past the number of
7481 * elements, 1 is returned to signal completion of the search.
7482 */
7483 static int adjust_slots_upwards(struct btrfs_root *root,
7484 struct btrfs_path *path, int root_level)
7485 {
7486 int level = 0;
7487 int nr, slot;
7488 struct extent_buffer *eb;
7489
7490 if (root_level == 0)
7491 return 1;
7492
7493 while (level <= root_level) {
7494 eb = path->nodes[level];
7495 nr = btrfs_header_nritems(eb);
7496 path->slots[level]++;
7497 slot = path->slots[level];
7498 if (slot >= nr || level == 0) {
7499 /*
7500 * Don't free the root - we will detect this
7501 * condition after our loop and return a
7502 * positive value for caller to stop walking the tree.
7503 */
7504 if (level != root_level) {
7505 btrfs_tree_unlock_rw(eb, path->locks[level]);
7506 path->locks[level] = 0;
7507
7508 free_extent_buffer(eb);
7509 path->nodes[level] = NULL;
7510 path->slots[level] = 0;
7511 }
7512 } else {
7513 /*
7514 * We have a valid slot to walk back down
7515 * from. Stop here so caller can process these
7516 * new nodes.
7517 */
7518 break;
7519 }
7520
7521 level++;
7522 }
7523
7524 eb = path->nodes[root_level];
7525 if (path->slots[root_level] >= btrfs_header_nritems(eb))
7526 return 1;
7527
7528 return 0;
7529 }
7530
7531 /*
7532 * root_eb is the subtree root and is locked before this function is called.
7533 */
7534 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7535 struct btrfs_root *root,
7536 struct extent_buffer *root_eb,
7537 u64 root_gen,
7538 int root_level)
7539 {
7540 int ret = 0;
7541 int level;
7542 struct extent_buffer *eb = root_eb;
7543 struct btrfs_path *path = NULL;
7544
7545 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7546 BUG_ON(root_eb == NULL);
7547
7548 if (!root->fs_info->quota_enabled)
7549 return 0;
7550
7551 if (!extent_buffer_uptodate(root_eb)) {
7552 ret = btrfs_read_buffer(root_eb, root_gen);
7553 if (ret)
7554 goto out;
7555 }
7556
7557 if (root_level == 0) {
7558 ret = account_leaf_items(trans, root, root_eb);
7559 goto out;
7560 }
7561
7562 path = btrfs_alloc_path();
7563 if (!path)
7564 return -ENOMEM;
7565
7566 /*
7567 * Walk down the tree. Missing extent blocks are filled in as
7568 * we go. Metadata is accounted every time we read a new
7569 * extent block.
7570 *
7571 * When we reach a leaf, we account for file extent items in it,
7572 * walk back up the tree (adjusting slot pointers as we go)
7573 * and restart the search process.
7574 */
7575 extent_buffer_get(root_eb); /* For path */
7576 path->nodes[root_level] = root_eb;
7577 path->slots[root_level] = 0;
7578 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7579 walk_down:
7580 level = root_level;
7581 while (level >= 0) {
7582 if (path->nodes[level] == NULL) {
7583 int parent_slot;
7584 u64 child_gen;
7585 u64 child_bytenr;
7586
7587 /* We need to get child blockptr/gen from
7588 * parent before we can read it. */
7589 eb = path->nodes[level + 1];
7590 parent_slot = path->slots[level + 1];
7591 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7592 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7593
7594 eb = read_tree_block(root, child_bytenr, child_gen);
7595 if (!eb || !extent_buffer_uptodate(eb)) {
7596 ret = -EIO;
7597 goto out;
7598 }
7599
7600 path->nodes[level] = eb;
7601 path->slots[level] = 0;
7602
7603 btrfs_tree_read_lock(eb);
7604 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7605 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7606
7607 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7608 root->objectid,
7609 child_bytenr,
7610 root->nodesize,
7611 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7612 0);
7613 if (ret)
7614 goto out;
7615
7616 }
7617
7618 if (level == 0) {
7619 ret = account_leaf_items(trans, root, path->nodes[level]);
7620 if (ret)
7621 goto out;
7622
7623 /* Nonzero return here means we completed our search */
7624 ret = adjust_slots_upwards(root, path, root_level);
7625 if (ret)
7626 break;
7627
7628 /* Restart search with new slots */
7629 goto walk_down;
7630 }
7631
7632 level--;
7633 }
7634
7635 ret = 0;
7636 out:
7637 btrfs_free_path(path);
7638
7639 return ret;
7640 }
7641
7642 /*
7643 * helper to process tree block while walking down the tree.
7644 *
7645 * when wc->stage == UPDATE_BACKREF, this function updates
7646 * back refs for pointers in the block.
7647 *
7648 * NOTE: return value 1 means we should stop walking down.
7649 */
7650 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7651 struct btrfs_root *root,
7652 struct btrfs_path *path,
7653 struct walk_control *wc, int lookup_info)
7654 {
7655 int level = wc->level;
7656 struct extent_buffer *eb = path->nodes[level];
7657 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7658 int ret;
7659
7660 if (wc->stage == UPDATE_BACKREF &&
7661 btrfs_header_owner(eb) != root->root_key.objectid)
7662 return 1;
7663
7664 /*
7665 * when reference count of tree block is 1, it won't increase
7666 * again. once full backref flag is set, we never clear it.
7667 */
7668 if (lookup_info &&
7669 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7670 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7671 BUG_ON(!path->locks[level]);
7672 ret = btrfs_lookup_extent_info(trans, root,
7673 eb->start, level, 1,
7674 &wc->refs[level],
7675 &wc->flags[level]);
7676 BUG_ON(ret == -ENOMEM);
7677 if (ret)
7678 return ret;
7679 BUG_ON(wc->refs[level] == 0);
7680 }
7681
7682 if (wc->stage == DROP_REFERENCE) {
7683 if (wc->refs[level] > 1)
7684 return 1;
7685
7686 if (path->locks[level] && !wc->keep_locks) {
7687 btrfs_tree_unlock_rw(eb, path->locks[level]);
7688 path->locks[level] = 0;
7689 }
7690 return 0;
7691 }
7692
7693 /* wc->stage == UPDATE_BACKREF */
7694 if (!(wc->flags[level] & flag)) {
7695 BUG_ON(!path->locks[level]);
7696 ret = btrfs_inc_ref(trans, root, eb, 1);
7697 BUG_ON(ret); /* -ENOMEM */
7698 ret = btrfs_dec_ref(trans, root, eb, 0);
7699 BUG_ON(ret); /* -ENOMEM */
7700 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7701 eb->len, flag,
7702 btrfs_header_level(eb), 0);
7703 BUG_ON(ret); /* -ENOMEM */
7704 wc->flags[level] |= flag;
7705 }
7706
7707 /*
7708 * the block is shared by multiple trees, so it's not good to
7709 * keep the tree lock
7710 */
7711 if (path->locks[level] && level > 0) {
7712 btrfs_tree_unlock_rw(eb, path->locks[level]);
7713 path->locks[level] = 0;
7714 }
7715 return 0;
7716 }
7717
7718 /*
7719 * helper to process tree block pointer.
7720 *
7721 * when wc->stage == DROP_REFERENCE, this function checks
7722 * reference count of the block pointed to. if the block
7723 * is shared and we need update back refs for the subtree
7724 * rooted at the block, this function changes wc->stage to
7725 * UPDATE_BACKREF. if the block is shared and there is no
7726 * need to update back, this function drops the reference
7727 * to the block.
7728 *
7729 * NOTE: return value 1 means we should stop walking down.
7730 */
7731 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7732 struct btrfs_root *root,
7733 struct btrfs_path *path,
7734 struct walk_control *wc, int *lookup_info)
7735 {
7736 u64 bytenr;
7737 u64 generation;
7738 u64 parent;
7739 u32 blocksize;
7740 struct btrfs_key key;
7741 struct extent_buffer *next;
7742 int level = wc->level;
7743 int reada = 0;
7744 int ret = 0;
7745 bool need_account = false;
7746
7747 generation = btrfs_node_ptr_generation(path->nodes[level],
7748 path->slots[level]);
7749 /*
7750 * if the lower level block was created before the snapshot
7751 * was created, we know there is no need to update back refs
7752 * for the subtree
7753 */
7754 if (wc->stage == UPDATE_BACKREF &&
7755 generation <= root->root_key.offset) {
7756 *lookup_info = 1;
7757 return 1;
7758 }
7759
7760 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7761 blocksize = root->nodesize;
7762
7763 next = btrfs_find_tree_block(root, bytenr);
7764 if (!next) {
7765 next = btrfs_find_create_tree_block(root, bytenr);
7766 if (!next)
7767 return -ENOMEM;
7768 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7769 level - 1);
7770 reada = 1;
7771 }
7772 btrfs_tree_lock(next);
7773 btrfs_set_lock_blocking(next);
7774
7775 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7776 &wc->refs[level - 1],
7777 &wc->flags[level - 1]);
7778 if (ret < 0) {
7779 btrfs_tree_unlock(next);
7780 return ret;
7781 }
7782
7783 if (unlikely(wc->refs[level - 1] == 0)) {
7784 btrfs_err(root->fs_info, "Missing references.");
7785 BUG();
7786 }
7787 *lookup_info = 0;
7788
7789 if (wc->stage == DROP_REFERENCE) {
7790 if (wc->refs[level - 1] > 1) {
7791 need_account = true;
7792 if (level == 1 &&
7793 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7794 goto skip;
7795
7796 if (!wc->update_ref ||
7797 generation <= root->root_key.offset)
7798 goto skip;
7799
7800 btrfs_node_key_to_cpu(path->nodes[level], &key,
7801 path->slots[level]);
7802 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7803 if (ret < 0)
7804 goto skip;
7805
7806 wc->stage = UPDATE_BACKREF;
7807 wc->shared_level = level - 1;
7808 }
7809 } else {
7810 if (level == 1 &&
7811 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7812 goto skip;
7813 }
7814
7815 if (!btrfs_buffer_uptodate(next, generation, 0)) {
7816 btrfs_tree_unlock(next);
7817 free_extent_buffer(next);
7818 next = NULL;
7819 *lookup_info = 1;
7820 }
7821
7822 if (!next) {
7823 if (reada && level == 1)
7824 reada_walk_down(trans, root, wc, path);
7825 next = read_tree_block(root, bytenr, generation);
7826 if (!next || !extent_buffer_uptodate(next)) {
7827 free_extent_buffer(next);
7828 return -EIO;
7829 }
7830 btrfs_tree_lock(next);
7831 btrfs_set_lock_blocking(next);
7832 }
7833
7834 level--;
7835 BUG_ON(level != btrfs_header_level(next));
7836 path->nodes[level] = next;
7837 path->slots[level] = 0;
7838 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7839 wc->level = level;
7840 if (wc->level == 1)
7841 wc->reada_slot = 0;
7842 return 0;
7843 skip:
7844 wc->refs[level - 1] = 0;
7845 wc->flags[level - 1] = 0;
7846 if (wc->stage == DROP_REFERENCE) {
7847 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7848 parent = path->nodes[level]->start;
7849 } else {
7850 BUG_ON(root->root_key.objectid !=
7851 btrfs_header_owner(path->nodes[level]));
7852 parent = 0;
7853 }
7854
7855 if (need_account) {
7856 ret = account_shared_subtree(trans, root, next,
7857 generation, level - 1);
7858 if (ret) {
7859 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7860 "%d accounting shared subtree. Quota "
7861 "is out of sync, rescan required.\n",
7862 root->fs_info->sb->s_id, ret);
7863 }
7864 }
7865 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7866 root->root_key.objectid, level - 1, 0, 0);
7867 BUG_ON(ret); /* -ENOMEM */
7868 }
7869 btrfs_tree_unlock(next);
7870 free_extent_buffer(next);
7871 *lookup_info = 1;
7872 return 1;
7873 }
7874
7875 /*
7876 * helper to process tree block while walking up the tree.
7877 *
7878 * when wc->stage == DROP_REFERENCE, this function drops
7879 * reference count on the block.
7880 *
7881 * when wc->stage == UPDATE_BACKREF, this function changes
7882 * wc->stage back to DROP_REFERENCE if we changed wc->stage
7883 * to UPDATE_BACKREF previously while processing the block.
7884 *
7885 * NOTE: return value 1 means we should stop walking up.
7886 */
7887 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7888 struct btrfs_root *root,
7889 struct btrfs_path *path,
7890 struct walk_control *wc)
7891 {
7892 int ret;
7893 int level = wc->level;
7894 struct extent_buffer *eb = path->nodes[level];
7895 u64 parent = 0;
7896
7897 if (wc->stage == UPDATE_BACKREF) {
7898 BUG_ON(wc->shared_level < level);
7899 if (level < wc->shared_level)
7900 goto out;
7901
7902 ret = find_next_key(path, level + 1, &wc->update_progress);
7903 if (ret > 0)
7904 wc->update_ref = 0;
7905
7906 wc->stage = DROP_REFERENCE;
7907 wc->shared_level = -1;
7908 path->slots[level] = 0;
7909
7910 /*
7911 * check reference count again if the block isn't locked.
7912 * we should start walking down the tree again if reference
7913 * count is one.
7914 */
7915 if (!path->locks[level]) {
7916 BUG_ON(level == 0);
7917 btrfs_tree_lock(eb);
7918 btrfs_set_lock_blocking(eb);
7919 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7920
7921 ret = btrfs_lookup_extent_info(trans, root,
7922 eb->start, level, 1,
7923 &wc->refs[level],
7924 &wc->flags[level]);
7925 if (ret < 0) {
7926 btrfs_tree_unlock_rw(eb, path->locks[level]);
7927 path->locks[level] = 0;
7928 return ret;
7929 }
7930 BUG_ON(wc->refs[level] == 0);
7931 if (wc->refs[level] == 1) {
7932 btrfs_tree_unlock_rw(eb, path->locks[level]);
7933 path->locks[level] = 0;
7934 return 1;
7935 }
7936 }
7937 }
7938
7939 /* wc->stage == DROP_REFERENCE */
7940 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7941
7942 if (wc->refs[level] == 1) {
7943 if (level == 0) {
7944 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7945 ret = btrfs_dec_ref(trans, root, eb, 1);
7946 else
7947 ret = btrfs_dec_ref(trans, root, eb, 0);
7948 BUG_ON(ret); /* -ENOMEM */
7949 ret = account_leaf_items(trans, root, eb);
7950 if (ret) {
7951 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7952 "%d accounting leaf items. Quota "
7953 "is out of sync, rescan required.\n",
7954 root->fs_info->sb->s_id, ret);
7955 }
7956 }
7957 /* make block locked assertion in clean_tree_block happy */
7958 if (!path->locks[level] &&
7959 btrfs_header_generation(eb) == trans->transid) {
7960 btrfs_tree_lock(eb);
7961 btrfs_set_lock_blocking(eb);
7962 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7963 }
7964 clean_tree_block(trans, root, eb);
7965 }
7966
7967 if (eb == root->node) {
7968 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7969 parent = eb->start;
7970 else
7971 BUG_ON(root->root_key.objectid !=
7972 btrfs_header_owner(eb));
7973 } else {
7974 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7975 parent = path->nodes[level + 1]->start;
7976 else
7977 BUG_ON(root->root_key.objectid !=
7978 btrfs_header_owner(path->nodes[level + 1]));
7979 }
7980
7981 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7982 out:
7983 wc->refs[level] = 0;
7984 wc->flags[level] = 0;
7985 return 0;
7986 }
7987
7988 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7989 struct btrfs_root *root,
7990 struct btrfs_path *path,
7991 struct walk_control *wc)
7992 {
7993 int level = wc->level;
7994 int lookup_info = 1;
7995 int ret;
7996
7997 while (level >= 0) {
7998 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7999 if (ret > 0)
8000 break;
8001
8002 if (level == 0)
8003 break;
8004
8005 if (path->slots[level] >=
8006 btrfs_header_nritems(path->nodes[level]))
8007 break;
8008
8009 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8010 if (ret > 0) {
8011 path->slots[level]++;
8012 continue;
8013 } else if (ret < 0)
8014 return ret;
8015 level = wc->level;
8016 }
8017 return 0;
8018 }
8019
8020 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8021 struct btrfs_root *root,
8022 struct btrfs_path *path,
8023 struct walk_control *wc, int max_level)
8024 {
8025 int level = wc->level;
8026 int ret;
8027
8028 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8029 while (level < max_level && path->nodes[level]) {
8030 wc->level = level;
8031 if (path->slots[level] + 1 <
8032 btrfs_header_nritems(path->nodes[level])) {
8033 path->slots[level]++;
8034 return 0;
8035 } else {
8036 ret = walk_up_proc(trans, root, path, wc);
8037 if (ret > 0)
8038 return 0;
8039
8040 if (path->locks[level]) {
8041 btrfs_tree_unlock_rw(path->nodes[level],
8042 path->locks[level]);
8043 path->locks[level] = 0;
8044 }
8045 free_extent_buffer(path->nodes[level]);
8046 path->nodes[level] = NULL;
8047 level++;
8048 }
8049 }
8050 return 1;
8051 }
8052
8053 /*
8054 * drop a subvolume tree.
8055 *
8056 * this function traverses the tree freeing any blocks that only
8057 * referenced by the tree.
8058 *
8059 * when a shared tree block is found. this function decreases its
8060 * reference count by one. if update_ref is true, this function
8061 * also make sure backrefs for the shared block and all lower level
8062 * blocks are properly updated.
8063 *
8064 * If called with for_reloc == 0, may exit early with -EAGAIN
8065 */
8066 int btrfs_drop_snapshot(struct btrfs_root *root,
8067 struct btrfs_block_rsv *block_rsv, int update_ref,
8068 int for_reloc)
8069 {
8070 struct btrfs_path *path;
8071 struct btrfs_trans_handle *trans;
8072 struct btrfs_root *tree_root = root->fs_info->tree_root;
8073 struct btrfs_root_item *root_item = &root->root_item;
8074 struct walk_control *wc;
8075 struct btrfs_key key;
8076 int err = 0;
8077 int ret;
8078 int level;
8079 bool root_dropped = false;
8080
8081 btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8082
8083 path = btrfs_alloc_path();
8084 if (!path) {
8085 err = -ENOMEM;
8086 goto out;
8087 }
8088
8089 wc = kzalloc(sizeof(*wc), GFP_NOFS);
8090 if (!wc) {
8091 btrfs_free_path(path);
8092 err = -ENOMEM;
8093 goto out;
8094 }
8095
8096 trans = btrfs_start_transaction(tree_root, 0);
8097 if (IS_ERR(trans)) {
8098 err = PTR_ERR(trans);
8099 goto out_free;
8100 }
8101
8102 if (block_rsv)
8103 trans->block_rsv = block_rsv;
8104
8105 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8106 level = btrfs_header_level(root->node);
8107 path->nodes[level] = btrfs_lock_root_node(root);
8108 btrfs_set_lock_blocking(path->nodes[level]);
8109 path->slots[level] = 0;
8110 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8111 memset(&wc->update_progress, 0,
8112 sizeof(wc->update_progress));
8113 } else {
8114 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8115 memcpy(&wc->update_progress, &key,
8116 sizeof(wc->update_progress));
8117
8118 level = root_item->drop_level;
8119 BUG_ON(level == 0);
8120 path->lowest_level = level;
8121 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8122 path->lowest_level = 0;
8123 if (ret < 0) {
8124 err = ret;
8125 goto out_end_trans;
8126 }
8127 WARN_ON(ret > 0);
8128
8129 /*
8130 * unlock our path, this is safe because only this
8131 * function is allowed to delete this snapshot
8132 */
8133 btrfs_unlock_up_safe(path, 0);
8134
8135 level = btrfs_header_level(root->node);
8136 while (1) {
8137 btrfs_tree_lock(path->nodes[level]);
8138 btrfs_set_lock_blocking(path->nodes[level]);
8139 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8140
8141 ret = btrfs_lookup_extent_info(trans, root,
8142 path->nodes[level]->start,
8143 level, 1, &wc->refs[level],
8144 &wc->flags[level]);
8145 if (ret < 0) {
8146 err = ret;
8147 goto out_end_trans;
8148 }
8149 BUG_ON(wc->refs[level] == 0);
8150
8151 if (level == root_item->drop_level)
8152 break;
8153
8154 btrfs_tree_unlock(path->nodes[level]);
8155 path->locks[level] = 0;
8156 WARN_ON(wc->refs[level] != 1);
8157 level--;
8158 }
8159 }
8160
8161 wc->level = level;
8162 wc->shared_level = -1;
8163 wc->stage = DROP_REFERENCE;
8164 wc->update_ref = update_ref;
8165 wc->keep_locks = 0;
8166 wc->for_reloc = for_reloc;
8167 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8168
8169 while (1) {
8170
8171 ret = walk_down_tree(trans, root, path, wc);
8172 if (ret < 0) {
8173 err = ret;
8174 break;
8175 }
8176
8177 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8178 if (ret < 0) {
8179 err = ret;
8180 break;
8181 }
8182
8183 if (ret > 0) {
8184 BUG_ON(wc->stage != DROP_REFERENCE);
8185 break;
8186 }
8187
8188 if (wc->stage == DROP_REFERENCE) {
8189 level = wc->level;
8190 btrfs_node_key(path->nodes[level],
8191 &root_item->drop_progress,
8192 path->slots[level]);
8193 root_item->drop_level = level;
8194 }
8195
8196 BUG_ON(wc->level == 0);
8197 if (btrfs_should_end_transaction(trans, tree_root) ||
8198 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8199 ret = btrfs_update_root(trans, tree_root,
8200 &root->root_key,
8201 root_item);
8202 if (ret) {
8203 btrfs_abort_transaction(trans, tree_root, ret);
8204 err = ret;
8205 goto out_end_trans;
8206 }
8207
8208 /*
8209 * Qgroup update accounting is run from
8210 * delayed ref handling. This usually works
8211 * out because delayed refs are normally the
8212 * only way qgroup updates are added. However,
8213 * we may have added updates during our tree
8214 * walk so run qgroups here to make sure we
8215 * don't lose any updates.
8216 */
8217 ret = btrfs_delayed_qgroup_accounting(trans,
8218 root->fs_info);
8219 if (ret)
8220 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8221 "running qgroup updates "
8222 "during snapshot delete. "
8223 "Quota is out of sync, "
8224 "rescan required.\n", ret);
8225
8226 btrfs_end_transaction_throttle(trans, tree_root);
8227 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8228 pr_debug("BTRFS: drop snapshot early exit\n");
8229 err = -EAGAIN;
8230 goto out_free;
8231 }
8232
8233 trans = btrfs_start_transaction(tree_root, 0);
8234 if (IS_ERR(trans)) {
8235 err = PTR_ERR(trans);
8236 goto out_free;
8237 }
8238 if (block_rsv)
8239 trans->block_rsv = block_rsv;
8240 }
8241 }
8242 btrfs_release_path(path);
8243 if (err)
8244 goto out_end_trans;
8245
8246 ret = btrfs_del_root(trans, tree_root, &root->root_key);
8247 if (ret) {
8248 btrfs_abort_transaction(trans, tree_root, ret);
8249 goto out_end_trans;
8250 }
8251
8252 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8253 ret = btrfs_find_root(tree_root, &root->root_key, path,
8254 NULL, NULL);
8255 if (ret < 0) {
8256 btrfs_abort_transaction(trans, tree_root, ret);
8257 err = ret;
8258 goto out_end_trans;
8259 } else if (ret > 0) {
8260 /* if we fail to delete the orphan item this time
8261 * around, it'll get picked up the next time.
8262 *
8263 * The most common failure here is just -ENOENT.
8264 */
8265 btrfs_del_orphan_item(trans, tree_root,
8266 root->root_key.objectid);
8267 }
8268 }
8269
8270 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8271 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8272 } else {
8273 free_extent_buffer(root->node);
8274 free_extent_buffer(root->commit_root);
8275 btrfs_put_fs_root(root);
8276 }
8277 root_dropped = true;
8278 out_end_trans:
8279 ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8280 if (ret)
8281 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8282 "running qgroup updates "
8283 "during snapshot delete. "
8284 "Quota is out of sync, "
8285 "rescan required.\n", ret);
8286
8287 btrfs_end_transaction_throttle(trans, tree_root);
8288 out_free:
8289 kfree(wc);
8290 btrfs_free_path(path);
8291 out:
8292 /*
8293 * So if we need to stop dropping the snapshot for whatever reason we
8294 * need to make sure to add it back to the dead root list so that we
8295 * keep trying to do the work later. This also cleans up roots if we
8296 * don't have it in the radix (like when we recover after a power fail
8297 * or unmount) so we don't leak memory.
8298 */
8299 if (!for_reloc && root_dropped == false)
8300 btrfs_add_dead_root(root);
8301 if (err && err != -EAGAIN)
8302 btrfs_std_error(root->fs_info, err);
8303 return err;
8304 }
8305
8306 /*
8307 * drop subtree rooted at tree block 'node'.
8308 *
8309 * NOTE: this function will unlock and release tree block 'node'
8310 * only used by relocation code
8311 */
8312 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8313 struct btrfs_root *root,
8314 struct extent_buffer *node,
8315 struct extent_buffer *parent)
8316 {
8317 struct btrfs_path *path;
8318 struct walk_control *wc;
8319 int level;
8320 int parent_level;
8321 int ret = 0;
8322 int wret;
8323
8324 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8325
8326 path = btrfs_alloc_path();
8327 if (!path)
8328 return -ENOMEM;
8329
8330 wc = kzalloc(sizeof(*wc), GFP_NOFS);
8331 if (!wc) {
8332 btrfs_free_path(path);
8333 return -ENOMEM;
8334 }
8335
8336 btrfs_assert_tree_locked(parent);
8337 parent_level = btrfs_header_level(parent);
8338 extent_buffer_get(parent);
8339 path->nodes[parent_level] = parent;
8340 path->slots[parent_level] = btrfs_header_nritems(parent);
8341
8342 btrfs_assert_tree_locked(node);
8343 level = btrfs_header_level(node);
8344 path->nodes[level] = node;
8345 path->slots[level] = 0;
8346 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8347
8348 wc->refs[parent_level] = 1;
8349 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8350 wc->level = level;
8351 wc->shared_level = -1;
8352 wc->stage = DROP_REFERENCE;
8353 wc->update_ref = 0;
8354 wc->keep_locks = 1;
8355 wc->for_reloc = 1;
8356 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8357
8358 while (1) {
8359 wret = walk_down_tree(trans, root, path, wc);
8360 if (wret < 0) {
8361 ret = wret;
8362 break;
8363 }
8364
8365 wret = walk_up_tree(trans, root, path, wc, parent_level);
8366 if (wret < 0)
8367 ret = wret;
8368 if (wret != 0)
8369 break;
8370 }
8371
8372 kfree(wc);
8373 btrfs_free_path(path);
8374 return ret;
8375 }
8376
8377 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8378 {
8379 u64 num_devices;
8380 u64 stripped;
8381
8382 /*
8383 * if restripe for this chunk_type is on pick target profile and
8384 * return, otherwise do the usual balance
8385 */
8386 stripped = get_restripe_target(root->fs_info, flags);
8387 if (stripped)
8388 return extended_to_chunk(stripped);
8389
8390 num_devices = root->fs_info->fs_devices->rw_devices;
8391
8392 stripped = BTRFS_BLOCK_GROUP_RAID0 |
8393 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8394 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8395
8396 if (num_devices == 1) {
8397 stripped |= BTRFS_BLOCK_GROUP_DUP;
8398 stripped = flags & ~stripped;
8399
8400 /* turn raid0 into single device chunks */
8401 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8402 return stripped;
8403
8404 /* turn mirroring into duplication */
8405 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8406 BTRFS_BLOCK_GROUP_RAID10))
8407 return stripped | BTRFS_BLOCK_GROUP_DUP;
8408 } else {
8409 /* they already had raid on here, just return */
8410 if (flags & stripped)
8411 return flags;
8412
8413 stripped |= BTRFS_BLOCK_GROUP_DUP;
8414 stripped = flags & ~stripped;
8415
8416 /* switch duplicated blocks with raid1 */
8417 if (flags & BTRFS_BLOCK_GROUP_DUP)
8418 return stripped | BTRFS_BLOCK_GROUP_RAID1;
8419
8420 /* this is drive concat, leave it alone */
8421 }
8422
8423 return flags;
8424 }
8425
8426 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8427 {
8428 struct btrfs_space_info *sinfo = cache->space_info;
8429 u64 num_bytes;
8430 u64 min_allocable_bytes;
8431 int ret = -ENOSPC;
8432
8433
8434 /*
8435 * We need some metadata space and system metadata space for
8436 * allocating chunks in some corner cases until we force to set
8437 * it to be readonly.
8438 */
8439 if ((sinfo->flags &
8440 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8441 !force)
8442 min_allocable_bytes = 1 * 1024 * 1024;
8443 else
8444 min_allocable_bytes = 0;
8445
8446 spin_lock(&sinfo->lock);
8447 spin_lock(&cache->lock);
8448
8449 if (cache->ro) {
8450 ret = 0;
8451 goto out;
8452 }
8453
8454 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8455 cache->bytes_super - btrfs_block_group_used(&cache->item);
8456
8457 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8458 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8459 min_allocable_bytes <= sinfo->total_bytes) {
8460 sinfo->bytes_readonly += num_bytes;
8461 cache->ro = 1;
8462 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8463 ret = 0;
8464 }
8465 out:
8466 spin_unlock(&cache->lock);
8467 spin_unlock(&sinfo->lock);
8468 return ret;
8469 }
8470
8471 int btrfs_set_block_group_ro(struct btrfs_root *root,
8472 struct btrfs_block_group_cache *cache)
8473
8474 {
8475 struct btrfs_trans_handle *trans;
8476 u64 alloc_flags;
8477 int ret;
8478
8479 BUG_ON(cache->ro);
8480
8481 trans = btrfs_join_transaction(root);
8482 if (IS_ERR(trans))
8483 return PTR_ERR(trans);
8484
8485 alloc_flags = update_block_group_flags(root, cache->flags);
8486 if (alloc_flags != cache->flags) {
8487 ret = do_chunk_alloc(trans, root, alloc_flags,
8488 CHUNK_ALLOC_FORCE);
8489 if (ret < 0)
8490 goto out;
8491 }
8492
8493 ret = set_block_group_ro(cache, 0);
8494 if (!ret)
8495 goto out;
8496 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8497 ret = do_chunk_alloc(trans, root, alloc_flags,
8498 CHUNK_ALLOC_FORCE);
8499 if (ret < 0)
8500 goto out;
8501 ret = set_block_group_ro(cache, 0);
8502 out:
8503 btrfs_end_transaction(trans, root);
8504 return ret;
8505 }
8506
8507 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8508 struct btrfs_root *root, u64 type)
8509 {
8510 u64 alloc_flags = get_alloc_profile(root, type);
8511 return do_chunk_alloc(trans, root, alloc_flags,
8512 CHUNK_ALLOC_FORCE);
8513 }
8514
8515 /*
8516 * helper to account the unused space of all the readonly block group in the
8517 * space_info. takes mirrors into account.
8518 */
8519 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8520 {
8521 struct btrfs_block_group_cache *block_group;
8522 u64 free_bytes = 0;
8523 int factor;
8524
8525 /* It's df, we don't care if it's racey */
8526 if (list_empty(&sinfo->ro_bgs))
8527 return 0;
8528
8529 spin_lock(&sinfo->lock);
8530 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8531 spin_lock(&block_group->lock);
8532
8533 if (!block_group->ro) {
8534 spin_unlock(&block_group->lock);
8535 continue;
8536 }
8537
8538 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8539 BTRFS_BLOCK_GROUP_RAID10 |
8540 BTRFS_BLOCK_GROUP_DUP))
8541 factor = 2;
8542 else
8543 factor = 1;
8544
8545 free_bytes += (block_group->key.offset -
8546 btrfs_block_group_used(&block_group->item)) *
8547 factor;
8548
8549 spin_unlock(&block_group->lock);
8550 }
8551 spin_unlock(&sinfo->lock);
8552
8553 return free_bytes;
8554 }
8555
8556 void btrfs_set_block_group_rw(struct btrfs_root *root,
8557 struct btrfs_block_group_cache *cache)
8558 {
8559 struct btrfs_space_info *sinfo = cache->space_info;
8560 u64 num_bytes;
8561
8562 BUG_ON(!cache->ro);
8563
8564 spin_lock(&sinfo->lock);
8565 spin_lock(&cache->lock);
8566 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8567 cache->bytes_super - btrfs_block_group_used(&cache->item);
8568 sinfo->bytes_readonly -= num_bytes;
8569 cache->ro = 0;
8570 list_del_init(&cache->ro_list);
8571 spin_unlock(&cache->lock);
8572 spin_unlock(&sinfo->lock);
8573 }
8574
8575 /*
8576 * checks to see if its even possible to relocate this block group.
8577 *
8578 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8579 * ok to go ahead and try.
8580 */
8581 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8582 {
8583 struct btrfs_block_group_cache *block_group;
8584 struct btrfs_space_info *space_info;
8585 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8586 struct btrfs_device *device;
8587 struct btrfs_trans_handle *trans;
8588 u64 min_free;
8589 u64 dev_min = 1;
8590 u64 dev_nr = 0;
8591 u64 target;
8592 int index;
8593 int full = 0;
8594 int ret = 0;
8595
8596 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8597
8598 /* odd, couldn't find the block group, leave it alone */
8599 if (!block_group)
8600 return -1;
8601
8602 min_free = btrfs_block_group_used(&block_group->item);
8603
8604 /* no bytes used, we're good */
8605 if (!min_free)
8606 goto out;
8607
8608 space_info = block_group->space_info;
8609 spin_lock(&space_info->lock);
8610
8611 full = space_info->full;
8612
8613 /*
8614 * if this is the last block group we have in this space, we can't
8615 * relocate it unless we're able to allocate a new chunk below.
8616 *
8617 * Otherwise, we need to make sure we have room in the space to handle
8618 * all of the extents from this block group. If we can, we're good
8619 */
8620 if ((space_info->total_bytes != block_group->key.offset) &&
8621 (space_info->bytes_used + space_info->bytes_reserved +
8622 space_info->bytes_pinned + space_info->bytes_readonly +
8623 min_free < space_info->total_bytes)) {
8624 spin_unlock(&space_info->lock);
8625 goto out;
8626 }
8627 spin_unlock(&space_info->lock);
8628
8629 /*
8630 * ok we don't have enough space, but maybe we have free space on our
8631 * devices to allocate new chunks for relocation, so loop through our
8632 * alloc devices and guess if we have enough space. if this block
8633 * group is going to be restriped, run checks against the target
8634 * profile instead of the current one.
8635 */
8636 ret = -1;
8637
8638 /*
8639 * index:
8640 * 0: raid10
8641 * 1: raid1
8642 * 2: dup
8643 * 3: raid0
8644 * 4: single
8645 */
8646 target = get_restripe_target(root->fs_info, block_group->flags);
8647 if (target) {
8648 index = __get_raid_index(extended_to_chunk(target));
8649 } else {
8650 /*
8651 * this is just a balance, so if we were marked as full
8652 * we know there is no space for a new chunk
8653 */
8654 if (full)
8655 goto out;
8656
8657 index = get_block_group_index(block_group);
8658 }
8659
8660 if (index == BTRFS_RAID_RAID10) {
8661 dev_min = 4;
8662 /* Divide by 2 */
8663 min_free >>= 1;
8664 } else if (index == BTRFS_RAID_RAID1) {
8665 dev_min = 2;
8666 } else if (index == BTRFS_RAID_DUP) {
8667 /* Multiply by 2 */
8668 min_free <<= 1;
8669 } else if (index == BTRFS_RAID_RAID0) {
8670 dev_min = fs_devices->rw_devices;
8671 do_div(min_free, dev_min);
8672 }
8673
8674 /* We need to do this so that we can look at pending chunks */
8675 trans = btrfs_join_transaction(root);
8676 if (IS_ERR(trans)) {
8677 ret = PTR_ERR(trans);
8678 goto out;
8679 }
8680
8681 mutex_lock(&root->fs_info->chunk_mutex);
8682 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8683 u64 dev_offset;
8684
8685 /*
8686 * check to make sure we can actually find a chunk with enough
8687 * space to fit our block group in.
8688 */
8689 if (device->total_bytes > device->bytes_used + min_free &&
8690 !device->is_tgtdev_for_dev_replace) {
8691 ret = find_free_dev_extent(trans, device, min_free,
8692 &dev_offset, NULL);
8693 if (!ret)
8694 dev_nr++;
8695
8696 if (dev_nr >= dev_min)
8697 break;
8698
8699 ret = -1;
8700 }
8701 }
8702 mutex_unlock(&root->fs_info->chunk_mutex);
8703 btrfs_end_transaction(trans, root);
8704 out:
8705 btrfs_put_block_group(block_group);
8706 return ret;
8707 }
8708
8709 static int find_first_block_group(struct btrfs_root *root,
8710 struct btrfs_path *path, struct btrfs_key *key)
8711 {
8712 int ret = 0;
8713 struct btrfs_key found_key;
8714 struct extent_buffer *leaf;
8715 int slot;
8716
8717 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8718 if (ret < 0)
8719 goto out;
8720
8721 while (1) {
8722 slot = path->slots[0];
8723 leaf = path->nodes[0];
8724 if (slot >= btrfs_header_nritems(leaf)) {
8725 ret = btrfs_next_leaf(root, path);
8726 if (ret == 0)
8727 continue;
8728 if (ret < 0)
8729 goto out;
8730 break;
8731 }
8732 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8733
8734 if (found_key.objectid >= key->objectid &&
8735 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8736 ret = 0;
8737 goto out;
8738 }
8739 path->slots[0]++;
8740 }
8741 out:
8742 return ret;
8743 }
8744
8745 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8746 {
8747 struct btrfs_block_group_cache *block_group;
8748 u64 last = 0;
8749
8750 while (1) {
8751 struct inode *inode;
8752
8753 block_group = btrfs_lookup_first_block_group(info, last);
8754 while (block_group) {
8755 spin_lock(&block_group->lock);
8756 if (block_group->iref)
8757 break;
8758 spin_unlock(&block_group->lock);
8759 block_group = next_block_group(info->tree_root,
8760 block_group);
8761 }
8762 if (!block_group) {
8763 if (last == 0)
8764 break;
8765 last = 0;
8766 continue;
8767 }
8768
8769 inode = block_group->inode;
8770 block_group->iref = 0;
8771 block_group->inode = NULL;
8772 spin_unlock(&block_group->lock);
8773 iput(inode);
8774 last = block_group->key.objectid + block_group->key.offset;
8775 btrfs_put_block_group(block_group);
8776 }
8777 }
8778
8779 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8780 {
8781 struct btrfs_block_group_cache *block_group;
8782 struct btrfs_space_info *space_info;
8783 struct btrfs_caching_control *caching_ctl;
8784 struct rb_node *n;
8785
8786 down_write(&info->commit_root_sem);
8787 while (!list_empty(&info->caching_block_groups)) {
8788 caching_ctl = list_entry(info->caching_block_groups.next,
8789 struct btrfs_caching_control, list);
8790 list_del(&caching_ctl->list);
8791 put_caching_control(caching_ctl);
8792 }
8793 up_write(&info->commit_root_sem);
8794
8795 spin_lock(&info->unused_bgs_lock);
8796 while (!list_empty(&info->unused_bgs)) {
8797 block_group = list_first_entry(&info->unused_bgs,
8798 struct btrfs_block_group_cache,
8799 bg_list);
8800 list_del_init(&block_group->bg_list);
8801 btrfs_put_block_group(block_group);
8802 }
8803 spin_unlock(&info->unused_bgs_lock);
8804
8805 spin_lock(&info->block_group_cache_lock);
8806 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8807 block_group = rb_entry(n, struct btrfs_block_group_cache,
8808 cache_node);
8809 rb_erase(&block_group->cache_node,
8810 &info->block_group_cache_tree);
8811 RB_CLEAR_NODE(&block_group->cache_node);
8812 spin_unlock(&info->block_group_cache_lock);
8813
8814 down_write(&block_group->space_info->groups_sem);
8815 list_del(&block_group->list);
8816 up_write(&block_group->space_info->groups_sem);
8817
8818 if (block_group->cached == BTRFS_CACHE_STARTED)
8819 wait_block_group_cache_done(block_group);
8820
8821 /*
8822 * We haven't cached this block group, which means we could
8823 * possibly have excluded extents on this block group.
8824 */
8825 if (block_group->cached == BTRFS_CACHE_NO ||
8826 block_group->cached == BTRFS_CACHE_ERROR)
8827 free_excluded_extents(info->extent_root, block_group);
8828
8829 btrfs_remove_free_space_cache(block_group);
8830 btrfs_put_block_group(block_group);
8831
8832 spin_lock(&info->block_group_cache_lock);
8833 }
8834 spin_unlock(&info->block_group_cache_lock);
8835
8836 /* now that all the block groups are freed, go through and
8837 * free all the space_info structs. This is only called during
8838 * the final stages of unmount, and so we know nobody is
8839 * using them. We call synchronize_rcu() once before we start,
8840 * just to be on the safe side.
8841 */
8842 synchronize_rcu();
8843
8844 release_global_block_rsv(info);
8845
8846 while (!list_empty(&info->space_info)) {
8847 int i;
8848
8849 space_info = list_entry(info->space_info.next,
8850 struct btrfs_space_info,
8851 list);
8852 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8853 if (WARN_ON(space_info->bytes_pinned > 0 ||
8854 space_info->bytes_reserved > 0 ||
8855 space_info->bytes_may_use > 0)) {
8856 dump_space_info(space_info, 0, 0);
8857 }
8858 }
8859 list_del(&space_info->list);
8860 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8861 struct kobject *kobj;
8862 kobj = space_info->block_group_kobjs[i];
8863 space_info->block_group_kobjs[i] = NULL;
8864 if (kobj) {
8865 kobject_del(kobj);
8866 kobject_put(kobj);
8867 }
8868 }
8869 kobject_del(&space_info->kobj);
8870 kobject_put(&space_info->kobj);
8871 }
8872 return 0;
8873 }
8874
8875 static void __link_block_group(struct btrfs_space_info *space_info,
8876 struct btrfs_block_group_cache *cache)
8877 {
8878 int index = get_block_group_index(cache);
8879 bool first = false;
8880
8881 down_write(&space_info->groups_sem);
8882 if (list_empty(&space_info->block_groups[index]))
8883 first = true;
8884 list_add_tail(&cache->list, &space_info->block_groups[index]);
8885 up_write(&space_info->groups_sem);
8886
8887 if (first) {
8888 struct raid_kobject *rkobj;
8889 int ret;
8890
8891 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8892 if (!rkobj)
8893 goto out_err;
8894 rkobj->raid_type = index;
8895 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8896 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8897 "%s", get_raid_name(index));
8898 if (ret) {
8899 kobject_put(&rkobj->kobj);
8900 goto out_err;
8901 }
8902 space_info->block_group_kobjs[index] = &rkobj->kobj;
8903 }
8904
8905 return;
8906 out_err:
8907 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8908 }
8909
8910 static struct btrfs_block_group_cache *
8911 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8912 {
8913 struct btrfs_block_group_cache *cache;
8914
8915 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8916 if (!cache)
8917 return NULL;
8918
8919 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8920 GFP_NOFS);
8921 if (!cache->free_space_ctl) {
8922 kfree(cache);
8923 return NULL;
8924 }
8925
8926 cache->key.objectid = start;
8927 cache->key.offset = size;
8928 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8929
8930 cache->sectorsize = root->sectorsize;
8931 cache->fs_info = root->fs_info;
8932 cache->full_stripe_len = btrfs_full_stripe_len(root,
8933 &root->fs_info->mapping_tree,
8934 start);
8935 atomic_set(&cache->count, 1);
8936 spin_lock_init(&cache->lock);
8937 init_rwsem(&cache->data_rwsem);
8938 INIT_LIST_HEAD(&cache->list);
8939 INIT_LIST_HEAD(&cache->cluster_list);
8940 INIT_LIST_HEAD(&cache->bg_list);
8941 INIT_LIST_HEAD(&cache->ro_list);
8942 INIT_LIST_HEAD(&cache->dirty_list);
8943 btrfs_init_free_space_ctl(cache);
8944 atomic_set(&cache->trimming, 0);
8945
8946 return cache;
8947 }
8948
8949 int btrfs_read_block_groups(struct btrfs_root *root)
8950 {
8951 struct btrfs_path *path;
8952 int ret;
8953 struct btrfs_block_group_cache *cache;
8954 struct btrfs_fs_info *info = root->fs_info;
8955 struct btrfs_space_info *space_info;
8956 struct btrfs_key key;
8957 struct btrfs_key found_key;
8958 struct extent_buffer *leaf;
8959 int need_clear = 0;
8960 u64 cache_gen;
8961
8962 root = info->extent_root;
8963 key.objectid = 0;
8964 key.offset = 0;
8965 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8966 path = btrfs_alloc_path();
8967 if (!path)
8968 return -ENOMEM;
8969 path->reada = 1;
8970
8971 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8972 if (btrfs_test_opt(root, SPACE_CACHE) &&
8973 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8974 need_clear = 1;
8975 if (btrfs_test_opt(root, CLEAR_CACHE))
8976 need_clear = 1;
8977
8978 while (1) {
8979 ret = find_first_block_group(root, path, &key);
8980 if (ret > 0)
8981 break;
8982 if (ret != 0)
8983 goto error;
8984
8985 leaf = path->nodes[0];
8986 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8987
8988 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8989 found_key.offset);
8990 if (!cache) {
8991 ret = -ENOMEM;
8992 goto error;
8993 }
8994
8995 if (need_clear) {
8996 /*
8997 * When we mount with old space cache, we need to
8998 * set BTRFS_DC_CLEAR and set dirty flag.
8999 *
9000 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9001 * truncate the old free space cache inode and
9002 * setup a new one.
9003 * b) Setting 'dirty flag' makes sure that we flush
9004 * the new space cache info onto disk.
9005 */
9006 if (btrfs_test_opt(root, SPACE_CACHE))
9007 cache->disk_cache_state = BTRFS_DC_CLEAR;
9008 }
9009
9010 read_extent_buffer(leaf, &cache->item,
9011 btrfs_item_ptr_offset(leaf, path->slots[0]),
9012 sizeof(cache->item));
9013 cache->flags = btrfs_block_group_flags(&cache->item);
9014
9015 key.objectid = found_key.objectid + found_key.offset;
9016 btrfs_release_path(path);
9017
9018 /*
9019 * We need to exclude the super stripes now so that the space
9020 * info has super bytes accounted for, otherwise we'll think
9021 * we have more space than we actually do.
9022 */
9023 ret = exclude_super_stripes(root, cache);
9024 if (ret) {
9025 /*
9026 * We may have excluded something, so call this just in
9027 * case.
9028 */
9029 free_excluded_extents(root, cache);
9030 btrfs_put_block_group(cache);
9031 goto error;
9032 }
9033
9034 /*
9035 * check for two cases, either we are full, and therefore
9036 * don't need to bother with the caching work since we won't
9037 * find any space, or we are empty, and we can just add all
9038 * the space in and be done with it. This saves us _alot_ of
9039 * time, particularly in the full case.
9040 */
9041 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9042 cache->last_byte_to_unpin = (u64)-1;
9043 cache->cached = BTRFS_CACHE_FINISHED;
9044 free_excluded_extents(root, cache);
9045 } else if (btrfs_block_group_used(&cache->item) == 0) {
9046 cache->last_byte_to_unpin = (u64)-1;
9047 cache->cached = BTRFS_CACHE_FINISHED;
9048 add_new_free_space(cache, root->fs_info,
9049 found_key.objectid,
9050 found_key.objectid +
9051 found_key.offset);
9052 free_excluded_extents(root, cache);
9053 }
9054
9055 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9056 if (ret) {
9057 btrfs_remove_free_space_cache(cache);
9058 btrfs_put_block_group(cache);
9059 goto error;
9060 }
9061
9062 ret = update_space_info(info, cache->flags, found_key.offset,
9063 btrfs_block_group_used(&cache->item),
9064 &space_info);
9065 if (ret) {
9066 btrfs_remove_free_space_cache(cache);
9067 spin_lock(&info->block_group_cache_lock);
9068 rb_erase(&cache->cache_node,
9069 &info->block_group_cache_tree);
9070 RB_CLEAR_NODE(&cache->cache_node);
9071 spin_unlock(&info->block_group_cache_lock);
9072 btrfs_put_block_group(cache);
9073 goto error;
9074 }
9075
9076 cache->space_info = space_info;
9077 spin_lock(&cache->space_info->lock);
9078 cache->space_info->bytes_readonly += cache->bytes_super;
9079 spin_unlock(&cache->space_info->lock);
9080
9081 __link_block_group(space_info, cache);
9082
9083 set_avail_alloc_bits(root->fs_info, cache->flags);
9084 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9085 set_block_group_ro(cache, 1);
9086 } else if (btrfs_block_group_used(&cache->item) == 0) {
9087 spin_lock(&info->unused_bgs_lock);
9088 /* Should always be true but just in case. */
9089 if (list_empty(&cache->bg_list)) {
9090 btrfs_get_block_group(cache);
9091 list_add_tail(&cache->bg_list,
9092 &info->unused_bgs);
9093 }
9094 spin_unlock(&info->unused_bgs_lock);
9095 }
9096 }
9097
9098 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9099 if (!(get_alloc_profile(root, space_info->flags) &
9100 (BTRFS_BLOCK_GROUP_RAID10 |
9101 BTRFS_BLOCK_GROUP_RAID1 |
9102 BTRFS_BLOCK_GROUP_RAID5 |
9103 BTRFS_BLOCK_GROUP_RAID6 |
9104 BTRFS_BLOCK_GROUP_DUP)))
9105 continue;
9106 /*
9107 * avoid allocating from un-mirrored block group if there are
9108 * mirrored block groups.
9109 */
9110 list_for_each_entry(cache,
9111 &space_info->block_groups[BTRFS_RAID_RAID0],
9112 list)
9113 set_block_group_ro(cache, 1);
9114 list_for_each_entry(cache,
9115 &space_info->block_groups[BTRFS_RAID_SINGLE],
9116 list)
9117 set_block_group_ro(cache, 1);
9118 }
9119
9120 init_global_block_rsv(info);
9121 ret = 0;
9122 error:
9123 btrfs_free_path(path);
9124 return ret;
9125 }
9126
9127 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9128 struct btrfs_root *root)
9129 {
9130 struct btrfs_block_group_cache *block_group, *tmp;
9131 struct btrfs_root *extent_root = root->fs_info->extent_root;
9132 struct btrfs_block_group_item item;
9133 struct btrfs_key key;
9134 int ret = 0;
9135
9136 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9137 if (ret)
9138 goto next;
9139
9140 spin_lock(&block_group->lock);
9141 memcpy(&item, &block_group->item, sizeof(item));
9142 memcpy(&key, &block_group->key, sizeof(key));
9143 spin_unlock(&block_group->lock);
9144
9145 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9146 sizeof(item));
9147 if (ret)
9148 btrfs_abort_transaction(trans, extent_root, ret);
9149 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9150 key.objectid, key.offset);
9151 if (ret)
9152 btrfs_abort_transaction(trans, extent_root, ret);
9153 next:
9154 list_del_init(&block_group->bg_list);
9155 }
9156 }
9157
9158 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9159 struct btrfs_root *root, u64 bytes_used,
9160 u64 type, u64 chunk_objectid, u64 chunk_offset,
9161 u64 size)
9162 {
9163 int ret;
9164 struct btrfs_root *extent_root;
9165 struct btrfs_block_group_cache *cache;
9166
9167 extent_root = root->fs_info->extent_root;
9168
9169 btrfs_set_log_full_commit(root->fs_info, trans);
9170
9171 cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9172 if (!cache)
9173 return -ENOMEM;
9174
9175 btrfs_set_block_group_used(&cache->item, bytes_used);
9176 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9177 btrfs_set_block_group_flags(&cache->item, type);
9178
9179 cache->flags = type;
9180 cache->last_byte_to_unpin = (u64)-1;
9181 cache->cached = BTRFS_CACHE_FINISHED;
9182 ret = exclude_super_stripes(root, cache);
9183 if (ret) {
9184 /*
9185 * We may have excluded something, so call this just in
9186 * case.
9187 */
9188 free_excluded_extents(root, cache);
9189 btrfs_put_block_group(cache);
9190 return ret;
9191 }
9192
9193 add_new_free_space(cache, root->fs_info, chunk_offset,
9194 chunk_offset + size);
9195
9196 free_excluded_extents(root, cache);
9197
9198 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9199 if (ret) {
9200 btrfs_remove_free_space_cache(cache);
9201 btrfs_put_block_group(cache);
9202 return ret;
9203 }
9204
9205 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9206 &cache->space_info);
9207 if (ret) {
9208 btrfs_remove_free_space_cache(cache);
9209 spin_lock(&root->fs_info->block_group_cache_lock);
9210 rb_erase(&cache->cache_node,
9211 &root->fs_info->block_group_cache_tree);
9212 RB_CLEAR_NODE(&cache->cache_node);
9213 spin_unlock(&root->fs_info->block_group_cache_lock);
9214 btrfs_put_block_group(cache);
9215 return ret;
9216 }
9217 update_global_block_rsv(root->fs_info);
9218
9219 spin_lock(&cache->space_info->lock);
9220 cache->space_info->bytes_readonly += cache->bytes_super;
9221 spin_unlock(&cache->space_info->lock);
9222
9223 __link_block_group(cache->space_info, cache);
9224
9225 list_add_tail(&cache->bg_list, &trans->new_bgs);
9226
9227 set_avail_alloc_bits(extent_root->fs_info, type);
9228
9229 return 0;
9230 }
9231
9232 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9233 {
9234 u64 extra_flags = chunk_to_extended(flags) &
9235 BTRFS_EXTENDED_PROFILE_MASK;
9236
9237 write_seqlock(&fs_info->profiles_lock);
9238 if (flags & BTRFS_BLOCK_GROUP_DATA)
9239 fs_info->avail_data_alloc_bits &= ~extra_flags;
9240 if (flags & BTRFS_BLOCK_GROUP_METADATA)
9241 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9242 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9243 fs_info->avail_system_alloc_bits &= ~extra_flags;
9244 write_sequnlock(&fs_info->profiles_lock);
9245 }
9246
9247 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9248 struct btrfs_root *root, u64 group_start,
9249 struct extent_map *em)
9250 {
9251 struct btrfs_path *path;
9252 struct btrfs_block_group_cache *block_group;
9253 struct btrfs_free_cluster *cluster;
9254 struct btrfs_root *tree_root = root->fs_info->tree_root;
9255 struct btrfs_key key;
9256 struct inode *inode;
9257 struct kobject *kobj = NULL;
9258 int ret;
9259 int index;
9260 int factor;
9261 struct btrfs_caching_control *caching_ctl = NULL;
9262 bool remove_em;
9263
9264 root = root->fs_info->extent_root;
9265
9266 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9267 BUG_ON(!block_group);
9268 BUG_ON(!block_group->ro);
9269
9270 /*
9271 * Free the reserved super bytes from this block group before
9272 * remove it.
9273 */
9274 free_excluded_extents(root, block_group);
9275
9276 memcpy(&key, &block_group->key, sizeof(key));
9277 index = get_block_group_index(block_group);
9278 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9279 BTRFS_BLOCK_GROUP_RAID1 |
9280 BTRFS_BLOCK_GROUP_RAID10))
9281 factor = 2;
9282 else
9283 factor = 1;
9284
9285 /* make sure this block group isn't part of an allocation cluster */
9286 cluster = &root->fs_info->data_alloc_cluster;
9287 spin_lock(&cluster->refill_lock);
9288 btrfs_return_cluster_to_free_space(block_group, cluster);
9289 spin_unlock(&cluster->refill_lock);
9290
9291 /*
9292 * make sure this block group isn't part of a metadata
9293 * allocation cluster
9294 */
9295 cluster = &root->fs_info->meta_alloc_cluster;
9296 spin_lock(&cluster->refill_lock);
9297 btrfs_return_cluster_to_free_space(block_group, cluster);
9298 spin_unlock(&cluster->refill_lock);
9299
9300 path = btrfs_alloc_path();
9301 if (!path) {
9302 ret = -ENOMEM;
9303 goto out;
9304 }
9305
9306 inode = lookup_free_space_inode(tree_root, block_group, path);
9307 if (!IS_ERR(inode)) {
9308 ret = btrfs_orphan_add(trans, inode);
9309 if (ret) {
9310 btrfs_add_delayed_iput(inode);
9311 goto out;
9312 }
9313 clear_nlink(inode);
9314 /* One for the block groups ref */
9315 spin_lock(&block_group->lock);
9316 if (block_group->iref) {
9317 block_group->iref = 0;
9318 block_group->inode = NULL;
9319 spin_unlock(&block_group->lock);
9320 iput(inode);
9321 } else {
9322 spin_unlock(&block_group->lock);
9323 }
9324 /* One for our lookup ref */
9325 btrfs_add_delayed_iput(inode);
9326 }
9327
9328 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9329 key.offset = block_group->key.objectid;
9330 key.type = 0;
9331
9332 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9333 if (ret < 0)
9334 goto out;
9335 if (ret > 0)
9336 btrfs_release_path(path);
9337 if (ret == 0) {
9338 ret = btrfs_del_item(trans, tree_root, path);
9339 if (ret)
9340 goto out;
9341 btrfs_release_path(path);
9342 }
9343
9344 spin_lock(&root->fs_info->block_group_cache_lock);
9345 rb_erase(&block_group->cache_node,
9346 &root->fs_info->block_group_cache_tree);
9347 RB_CLEAR_NODE(&block_group->cache_node);
9348
9349 if (root->fs_info->first_logical_byte == block_group->key.objectid)
9350 root->fs_info->first_logical_byte = (u64)-1;
9351 spin_unlock(&root->fs_info->block_group_cache_lock);
9352
9353 down_write(&block_group->space_info->groups_sem);
9354 /*
9355 * we must use list_del_init so people can check to see if they
9356 * are still on the list after taking the semaphore
9357 */
9358 list_del_init(&block_group->list);
9359 list_del_init(&block_group->ro_list);
9360 if (list_empty(&block_group->space_info->block_groups[index])) {
9361 kobj = block_group->space_info->block_group_kobjs[index];
9362 block_group->space_info->block_group_kobjs[index] = NULL;
9363 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9364 }
9365 up_write(&block_group->space_info->groups_sem);
9366 if (kobj) {
9367 kobject_del(kobj);
9368 kobject_put(kobj);
9369 }
9370
9371 if (block_group->has_caching_ctl)
9372 caching_ctl = get_caching_control(block_group);
9373 if (block_group->cached == BTRFS_CACHE_STARTED)
9374 wait_block_group_cache_done(block_group);
9375 if (block_group->has_caching_ctl) {
9376 down_write(&root->fs_info->commit_root_sem);
9377 if (!caching_ctl) {
9378 struct btrfs_caching_control *ctl;
9379
9380 list_for_each_entry(ctl,
9381 &root->fs_info->caching_block_groups, list)
9382 if (ctl->block_group == block_group) {
9383 caching_ctl = ctl;
9384 atomic_inc(&caching_ctl->count);
9385 break;
9386 }
9387 }
9388 if (caching_ctl)
9389 list_del_init(&caching_ctl->list);
9390 up_write(&root->fs_info->commit_root_sem);
9391 if (caching_ctl) {
9392 /* Once for the caching bgs list and once for us. */
9393 put_caching_control(caching_ctl);
9394 put_caching_control(caching_ctl);
9395 }
9396 }
9397
9398 spin_lock(&trans->transaction->dirty_bgs_lock);
9399 if (!list_empty(&block_group->dirty_list)) {
9400 list_del_init(&block_group->dirty_list);
9401 btrfs_put_block_group(block_group);
9402 }
9403 spin_unlock(&trans->transaction->dirty_bgs_lock);
9404
9405 btrfs_remove_free_space_cache(block_group);
9406
9407 spin_lock(&block_group->space_info->lock);
9408 block_group->space_info->total_bytes -= block_group->key.offset;
9409 block_group->space_info->bytes_readonly -= block_group->key.offset;
9410 block_group->space_info->disk_total -= block_group->key.offset * factor;
9411 spin_unlock(&block_group->space_info->lock);
9412
9413 memcpy(&key, &block_group->key, sizeof(key));
9414
9415 lock_chunks(root);
9416 if (!list_empty(&em->list)) {
9417 /* We're in the transaction->pending_chunks list. */
9418 free_extent_map(em);
9419 }
9420 spin_lock(&block_group->lock);
9421 block_group->removed = 1;
9422 /*
9423 * At this point trimming can't start on this block group, because we
9424 * removed the block group from the tree fs_info->block_group_cache_tree
9425 * so no one can't find it anymore and even if someone already got this
9426 * block group before we removed it from the rbtree, they have already
9427 * incremented block_group->trimming - if they didn't, they won't find
9428 * any free space entries because we already removed them all when we
9429 * called btrfs_remove_free_space_cache().
9430 *
9431 * And we must not remove the extent map from the fs_info->mapping_tree
9432 * to prevent the same logical address range and physical device space
9433 * ranges from being reused for a new block group. This is because our
9434 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9435 * completely transactionless, so while it is trimming a range the
9436 * currently running transaction might finish and a new one start,
9437 * allowing for new block groups to be created that can reuse the same
9438 * physical device locations unless we take this special care.
9439 */
9440 remove_em = (atomic_read(&block_group->trimming) == 0);
9441 /*
9442 * Make sure a trimmer task always sees the em in the pinned_chunks list
9443 * if it sees block_group->removed == 1 (needs to lock block_group->lock
9444 * before checking block_group->removed).
9445 */
9446 if (!remove_em) {
9447 /*
9448 * Our em might be in trans->transaction->pending_chunks which
9449 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9450 * and so is the fs_info->pinned_chunks list.
9451 *
9452 * So at this point we must be holding the chunk_mutex to avoid
9453 * any races with chunk allocation (more specifically at
9454 * volumes.c:contains_pending_extent()), to ensure it always
9455 * sees the em, either in the pending_chunks list or in the
9456 * pinned_chunks list.
9457 */
9458 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9459 }
9460 spin_unlock(&block_group->lock);
9461
9462 if (remove_em) {
9463 struct extent_map_tree *em_tree;
9464
9465 em_tree = &root->fs_info->mapping_tree.map_tree;
9466 write_lock(&em_tree->lock);
9467 /*
9468 * The em might be in the pending_chunks list, so make sure the
9469 * chunk mutex is locked, since remove_extent_mapping() will
9470 * delete us from that list.
9471 */
9472 remove_extent_mapping(em_tree, em);
9473 write_unlock(&em_tree->lock);
9474 /* once for the tree */
9475 free_extent_map(em);
9476 }
9477
9478 unlock_chunks(root);
9479
9480 btrfs_put_block_group(block_group);
9481 btrfs_put_block_group(block_group);
9482
9483 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9484 if (ret > 0)
9485 ret = -EIO;
9486 if (ret < 0)
9487 goto out;
9488
9489 ret = btrfs_del_item(trans, root, path);
9490 out:
9491 btrfs_free_path(path);
9492 return ret;
9493 }
9494
9495 /*
9496 * Process the unused_bgs list and remove any that don't have any allocated
9497 * space inside of them.
9498 */
9499 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9500 {
9501 struct btrfs_block_group_cache *block_group;
9502 struct btrfs_space_info *space_info;
9503 struct btrfs_root *root = fs_info->extent_root;
9504 struct btrfs_trans_handle *trans;
9505 int ret = 0;
9506
9507 if (!fs_info->open)
9508 return;
9509
9510 spin_lock(&fs_info->unused_bgs_lock);
9511 while (!list_empty(&fs_info->unused_bgs)) {
9512 u64 start, end;
9513
9514 block_group = list_first_entry(&fs_info->unused_bgs,
9515 struct btrfs_block_group_cache,
9516 bg_list);
9517 space_info = block_group->space_info;
9518 list_del_init(&block_group->bg_list);
9519 if (ret || btrfs_mixed_space_info(space_info)) {
9520 btrfs_put_block_group(block_group);
9521 continue;
9522 }
9523 spin_unlock(&fs_info->unused_bgs_lock);
9524
9525 /* Don't want to race with allocators so take the groups_sem */
9526 down_write(&space_info->groups_sem);
9527 spin_lock(&block_group->lock);
9528 if (block_group->reserved ||
9529 btrfs_block_group_used(&block_group->item) ||
9530 block_group->ro) {
9531 /*
9532 * We want to bail if we made new allocations or have
9533 * outstanding allocations in this block group. We do
9534 * the ro check in case balance is currently acting on
9535 * this block group.
9536 */
9537 spin_unlock(&block_group->lock);
9538 up_write(&space_info->groups_sem);
9539 goto next;
9540 }
9541 spin_unlock(&block_group->lock);
9542
9543 /* We don't want to force the issue, only flip if it's ok. */
9544 ret = set_block_group_ro(block_group, 0);
9545 up_write(&space_info->groups_sem);
9546 if (ret < 0) {
9547 ret = 0;
9548 goto next;
9549 }
9550
9551 /*
9552 * Want to do this before we do anything else so we can recover
9553 * properly if we fail to join the transaction.
9554 */
9555 trans = btrfs_join_transaction(root);
9556 if (IS_ERR(trans)) {
9557 btrfs_set_block_group_rw(root, block_group);
9558 ret = PTR_ERR(trans);
9559 goto next;
9560 }
9561
9562 /*
9563 * We could have pending pinned extents for this block group,
9564 * just delete them, we don't care about them anymore.
9565 */
9566 start = block_group->key.objectid;
9567 end = start + block_group->key.offset - 1;
9568 /*
9569 * Hold the unused_bg_unpin_mutex lock to avoid racing with
9570 * btrfs_finish_extent_commit(). If we are at transaction N,
9571 * another task might be running finish_extent_commit() for the
9572 * previous transaction N - 1, and have seen a range belonging
9573 * to the block group in freed_extents[] before we were able to
9574 * clear the whole block group range from freed_extents[]. This
9575 * means that task can lookup for the block group after we
9576 * unpinned it from freed_extents[] and removed it, leading to
9577 * a BUG_ON() at btrfs_unpin_extent_range().
9578 */
9579 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9580 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9581 EXTENT_DIRTY, GFP_NOFS);
9582 if (ret) {
9583 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9584 btrfs_set_block_group_rw(root, block_group);
9585 goto end_trans;
9586 }
9587 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9588 EXTENT_DIRTY, GFP_NOFS);
9589 if (ret) {
9590 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9591 btrfs_set_block_group_rw(root, block_group);
9592 goto end_trans;
9593 }
9594 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9595
9596 /* Reset pinned so btrfs_put_block_group doesn't complain */
9597 block_group->pinned = 0;
9598
9599 /*
9600 * Btrfs_remove_chunk will abort the transaction if things go
9601 * horribly wrong.
9602 */
9603 ret = btrfs_remove_chunk(trans, root,
9604 block_group->key.objectid);
9605 end_trans:
9606 btrfs_end_transaction(trans, root);
9607 next:
9608 btrfs_put_block_group(block_group);
9609 spin_lock(&fs_info->unused_bgs_lock);
9610 }
9611 spin_unlock(&fs_info->unused_bgs_lock);
9612 }
9613
9614 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9615 {
9616 struct btrfs_space_info *space_info;
9617 struct btrfs_super_block *disk_super;
9618 u64 features;
9619 u64 flags;
9620 int mixed = 0;
9621 int ret;
9622
9623 disk_super = fs_info->super_copy;
9624 if (!btrfs_super_root(disk_super))
9625 return 1;
9626
9627 features = btrfs_super_incompat_flags(disk_super);
9628 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9629 mixed = 1;
9630
9631 flags = BTRFS_BLOCK_GROUP_SYSTEM;
9632 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9633 if (ret)
9634 goto out;
9635
9636 if (mixed) {
9637 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9638 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9639 } else {
9640 flags = BTRFS_BLOCK_GROUP_METADATA;
9641 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9642 if (ret)
9643 goto out;
9644
9645 flags = BTRFS_BLOCK_GROUP_DATA;
9646 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9647 }
9648 out:
9649 return ret;
9650 }
9651
9652 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9653 {
9654 return unpin_extent_range(root, start, end, false);
9655 }
9656
9657 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9658 {
9659 struct btrfs_fs_info *fs_info = root->fs_info;
9660 struct btrfs_block_group_cache *cache = NULL;
9661 u64 group_trimmed;
9662 u64 start;
9663 u64 end;
9664 u64 trimmed = 0;
9665 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9666 int ret = 0;
9667
9668 /*
9669 * try to trim all FS space, our block group may start from non-zero.
9670 */
9671 if (range->len == total_bytes)
9672 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9673 else
9674 cache = btrfs_lookup_block_group(fs_info, range->start);
9675
9676 while (cache) {
9677 if (cache->key.objectid >= (range->start + range->len)) {
9678 btrfs_put_block_group(cache);
9679 break;
9680 }
9681
9682 start = max(range->start, cache->key.objectid);
9683 end = min(range->start + range->len,
9684 cache->key.objectid + cache->key.offset);
9685
9686 if (end - start >= range->minlen) {
9687 if (!block_group_cache_done(cache)) {
9688 ret = cache_block_group(cache, 0);
9689 if (ret) {
9690 btrfs_put_block_group(cache);
9691 break;
9692 }
9693 ret = wait_block_group_cache_done(cache);
9694 if (ret) {
9695 btrfs_put_block_group(cache);
9696 break;
9697 }
9698 }
9699 ret = btrfs_trim_block_group(cache,
9700 &group_trimmed,
9701 start,
9702 end,
9703 range->minlen);
9704
9705 trimmed += group_trimmed;
9706 if (ret) {
9707 btrfs_put_block_group(cache);
9708 break;
9709 }
9710 }
9711
9712 cache = next_block_group(fs_info->tree_root, cache);
9713 }
9714
9715 range->len = trimmed;
9716 return ret;
9717 }
9718
9719 /*
9720 * btrfs_{start,end}_write_no_snapshoting() are similar to
9721 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9722 * data into the page cache through nocow before the subvolume is snapshoted,
9723 * but flush the data into disk after the snapshot creation, or to prevent
9724 * operations while snapshoting is ongoing and that cause the snapshot to be
9725 * inconsistent (writes followed by expanding truncates for example).
9726 */
9727 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9728 {
9729 percpu_counter_dec(&root->subv_writers->counter);
9730 /*
9731 * Make sure counter is updated before we wake up
9732 * waiters.
9733 */
9734 smp_mb();
9735 if (waitqueue_active(&root->subv_writers->wait))
9736 wake_up(&root->subv_writers->wait);
9737 }
9738
9739 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9740 {
9741 if (atomic_read(&root->will_be_snapshoted))
9742 return 0;
9743
9744 percpu_counter_inc(&root->subv_writers->counter);
9745 /*
9746 * Make sure counter is updated before we check for snapshot creation.
9747 */
9748 smp_mb();
9749 if (atomic_read(&root->will_be_snapshoted)) {
9750 btrfs_end_write_no_snapshoting(root);
9751 return 0;
9752 }
9753 return 1;
9754 }