2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
25 #include "print-tree.h"
26 #include "transaction.h"
29 #include "ref-cache.h"
31 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
32 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
35 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
37 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
38 btrfs_root
*extent_root
);
39 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
40 btrfs_root
*extent_root
);
41 static struct btrfs_block_group_cache
*
42 __btrfs_find_block_group(struct btrfs_root
*root
,
43 struct btrfs_block_group_cache
*hint
,
44 u64 search_start
, int data
, int owner
);
46 void maybe_lock_mutex(struct btrfs_root
*root
)
48 if (root
!= root
->fs_info
->extent_root
&&
49 root
!= root
->fs_info
->chunk_root
&&
50 root
!= root
->fs_info
->dev_root
) {
51 mutex_lock(&root
->fs_info
->alloc_mutex
);
55 void maybe_unlock_mutex(struct btrfs_root
*root
)
57 if (root
!= root
->fs_info
->extent_root
&&
58 root
!= root
->fs_info
->chunk_root
&&
59 root
!= root
->fs_info
->dev_root
) {
60 mutex_unlock(&root
->fs_info
->alloc_mutex
);
64 static int cache_block_group(struct btrfs_root
*root
,
65 struct btrfs_block_group_cache
*block_group
)
67 struct btrfs_path
*path
;
70 struct extent_buffer
*leaf
;
71 struct extent_io_tree
*free_space_cache
;
81 root
= root
->fs_info
->extent_root
;
82 free_space_cache
= &root
->fs_info
->free_space_cache
;
84 if (block_group
->cached
)
87 path
= btrfs_alloc_path();
93 * we get into deadlocks with paths held by callers of this function.
94 * since the alloc_mutex is protecting things right now, just
95 * skip the locking here
97 path
->skip_locking
= 1;
98 first_free
= block_group
->key
.objectid
;
99 key
.objectid
= block_group
->key
.objectid
;
101 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
102 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
105 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
109 leaf
= path
->nodes
[0];
110 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
111 if (key
.objectid
+ key
.offset
> first_free
)
112 first_free
= key
.objectid
+ key
.offset
;
115 leaf
= path
->nodes
[0];
116 slot
= path
->slots
[0];
117 if (slot
>= btrfs_header_nritems(leaf
)) {
118 ret
= btrfs_next_leaf(root
, path
);
127 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
128 if (key
.objectid
< block_group
->key
.objectid
) {
131 if (key
.objectid
>= block_group
->key
.objectid
+
132 block_group
->key
.offset
) {
136 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
141 if (key
.objectid
> last
) {
142 hole_size
= key
.objectid
- last
;
143 set_extent_dirty(free_space_cache
, last
,
144 last
+ hole_size
- 1,
147 last
= key
.objectid
+ key
.offset
;
155 if (block_group
->key
.objectid
+
156 block_group
->key
.offset
> last
) {
157 hole_size
= block_group
->key
.objectid
+
158 block_group
->key
.offset
- last
;
159 set_extent_dirty(free_space_cache
, last
,
160 last
+ hole_size
- 1, GFP_NOFS
);
162 block_group
->cached
= 1;
164 btrfs_free_path(path
);
168 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
172 struct extent_io_tree
*block_group_cache
;
173 struct btrfs_block_group_cache
*block_group
= NULL
;
179 bytenr
= max_t(u64
, bytenr
,
180 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
181 block_group_cache
= &info
->block_group_cache
;
182 ret
= find_first_extent_bit(block_group_cache
,
183 bytenr
, &start
, &end
,
184 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
189 ret
= get_state_private(block_group_cache
, start
, &ptr
);
193 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
197 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
201 struct extent_io_tree
*block_group_cache
;
202 struct btrfs_block_group_cache
*block_group
= NULL
;
208 bytenr
= max_t(u64
, bytenr
,
209 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
210 block_group_cache
= &info
->block_group_cache
;
211 ret
= find_first_extent_bit(block_group_cache
,
212 bytenr
, &start
, &end
,
213 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
218 ret
= get_state_private(block_group_cache
, start
, &ptr
);
222 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
223 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
224 block_group
->key
.objectid
+ block_group
->key
.offset
)
229 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
231 return (cache
->flags
& bits
) == bits
;
234 static int noinline
find_search_start(struct btrfs_root
*root
,
235 struct btrfs_block_group_cache
**cache_ret
,
236 u64
*start_ret
, u64 num
, int data
)
239 struct btrfs_block_group_cache
*cache
= *cache_ret
;
240 struct extent_io_tree
*free_space_cache
;
241 struct extent_state
*state
;
246 u64 search_start
= *start_ret
;
249 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
250 total_fs_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
251 free_space_cache
= &root
->fs_info
->free_space_cache
;
257 ret
= cache_block_group(root
, cache
);
262 last
= max(search_start
, cache
->key
.objectid
);
263 if (!block_group_bits(cache
, data
) || cache
->ro
)
266 spin_lock_irq(&free_space_cache
->lock
);
267 state
= find_first_extent_bit_state(free_space_cache
, last
, EXTENT_DIRTY
);
272 spin_unlock_irq(&free_space_cache
->lock
);
276 start
= max(last
, state
->start
);
277 last
= state
->end
+ 1;
278 if (last
- start
< num
) {
280 state
= extent_state_next(state
);
281 } while(state
&& !(state
->state
& EXTENT_DIRTY
));
284 spin_unlock_irq(&free_space_cache
->lock
);
288 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
)
290 if (!block_group_bits(cache
, data
)) {
291 printk("block group bits don't match %Lu %d\n", cache
->flags
, data
);
297 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
299 printk("Unable to find block group for %Lu\n", search_start
);
305 last
= cache
->key
.objectid
+ cache
->key
.offset
;
307 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
308 if (!cache
|| cache
->key
.objectid
>= total_fs_bytes
) {
317 if (cache_miss
&& !cache
->cached
) {
318 cache_block_group(root
, cache
);
320 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
323 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
330 static u64
div_factor(u64 num
, int factor
)
339 static int block_group_state_bits(u64 flags
)
342 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
343 bits
|= BLOCK_GROUP_DATA
;
344 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
345 bits
|= BLOCK_GROUP_METADATA
;
346 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
347 bits
|= BLOCK_GROUP_SYSTEM
;
351 static struct btrfs_block_group_cache
*
352 __btrfs_find_block_group(struct btrfs_root
*root
,
353 struct btrfs_block_group_cache
*hint
,
354 u64 search_start
, int data
, int owner
)
356 struct btrfs_block_group_cache
*cache
;
357 struct extent_io_tree
*block_group_cache
;
358 struct btrfs_block_group_cache
*found_group
= NULL
;
359 struct btrfs_fs_info
*info
= root
->fs_info
;
372 block_group_cache
= &info
->block_group_cache
;
374 if (data
& BTRFS_BLOCK_GROUP_METADATA
)
377 bit
= block_group_state_bits(data
);
380 struct btrfs_block_group_cache
*shint
;
381 shint
= btrfs_lookup_first_block_group(info
, search_start
);
382 if (shint
&& block_group_bits(shint
, data
) && !shint
->ro
) {
383 spin_lock(&shint
->lock
);
384 used
= btrfs_block_group_used(&shint
->item
);
385 if (used
+ shint
->pinned
<
386 div_factor(shint
->key
.offset
, factor
)) {
387 spin_unlock(&shint
->lock
);
390 spin_unlock(&shint
->lock
);
393 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
394 spin_lock(&hint
->lock
);
395 used
= btrfs_block_group_used(&hint
->item
);
396 if (used
+ hint
->pinned
<
397 div_factor(hint
->key
.offset
, factor
)) {
398 spin_unlock(&hint
->lock
);
401 spin_unlock(&hint
->lock
);
402 last
= hint
->key
.objectid
+ hint
->key
.offset
;
405 last
= max(hint
->key
.objectid
, search_start
);
411 ret
= find_first_extent_bit(block_group_cache
, last
,
416 ret
= get_state_private(block_group_cache
, start
, &ptr
);
422 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
423 spin_lock(&cache
->lock
);
424 last
= cache
->key
.objectid
+ cache
->key
.offset
;
425 used
= btrfs_block_group_used(&cache
->item
);
427 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
428 free_check
= div_factor(cache
->key
.offset
, factor
);
429 if (used
+ cache
->pinned
< free_check
) {
431 spin_unlock(&cache
->lock
);
435 spin_unlock(&cache
->lock
);
443 if (!full_search
&& factor
< 10) {
453 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
454 struct btrfs_block_group_cache
455 *hint
, u64 search_start
,
459 struct btrfs_block_group_cache
*ret
;
460 ret
= __btrfs_find_block_group(root
, hint
, search_start
, data
, owner
);
463 static u64
hash_extent_ref(u64 root_objectid
, u64 ref_generation
,
464 u64 owner
, u64 owner_offset
)
466 u32 high_crc
= ~(u32
)0;
467 u32 low_crc
= ~(u32
)0;
469 lenum
= cpu_to_le64(root_objectid
);
470 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
471 lenum
= cpu_to_le64(ref_generation
);
472 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
473 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
474 lenum
= cpu_to_le64(owner
);
475 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
476 lenum
= cpu_to_le64(owner_offset
);
477 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
479 return ((u64
)high_crc
<< 32) | (u64
)low_crc
;
482 static int match_extent_ref(struct extent_buffer
*leaf
,
483 struct btrfs_extent_ref
*disk_ref
,
484 struct btrfs_extent_ref
*cpu_ref
)
489 if (cpu_ref
->objectid
)
490 len
= sizeof(*cpu_ref
);
492 len
= 2 * sizeof(u64
);
493 ret
= memcmp_extent_buffer(leaf
, cpu_ref
, (unsigned long)disk_ref
,
498 static int noinline
lookup_extent_backref(struct btrfs_trans_handle
*trans
,
499 struct btrfs_root
*root
,
500 struct btrfs_path
*path
, u64 bytenr
,
502 u64 ref_generation
, u64 owner
,
503 u64 owner_offset
, int del
)
506 struct btrfs_key key
;
507 struct btrfs_key found_key
;
508 struct btrfs_extent_ref ref
;
509 struct extent_buffer
*leaf
;
510 struct btrfs_extent_ref
*disk_ref
;
514 btrfs_set_stack_ref_root(&ref
, root_objectid
);
515 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
516 btrfs_set_stack_ref_objectid(&ref
, owner
);
517 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
519 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
522 key
.objectid
= bytenr
;
523 key
.type
= BTRFS_EXTENT_REF_KEY
;
526 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
530 leaf
= path
->nodes
[0];
532 u32 nritems
= btrfs_header_nritems(leaf
);
533 if (path
->slots
[0] >= nritems
) {
534 ret2
= btrfs_next_leaf(root
, path
);
537 leaf
= path
->nodes
[0];
539 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
540 if (found_key
.objectid
!= bytenr
||
541 found_key
.type
!= BTRFS_EXTENT_REF_KEY
)
543 key
.offset
= found_key
.offset
;
545 btrfs_release_path(root
, path
);
549 disk_ref
= btrfs_item_ptr(path
->nodes
[0],
551 struct btrfs_extent_ref
);
552 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
)) {
556 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
557 key
.offset
= found_key
.offset
+ 1;
558 btrfs_release_path(root
, path
);
565 * Back reference rules. Back refs have three main goals:
567 * 1) differentiate between all holders of references to an extent so that
568 * when a reference is dropped we can make sure it was a valid reference
569 * before freeing the extent.
571 * 2) Provide enough information to quickly find the holders of an extent
572 * if we notice a given block is corrupted or bad.
574 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575 * maintenance. This is actually the same as #2, but with a slightly
576 * different use case.
578 * File extents can be referenced by:
580 * - multiple snapshots, subvolumes, or different generations in one subvol
581 * - different files inside a single subvolume (in theory, not implemented yet)
582 * - different offsets inside a file (bookend extents in file.c)
584 * The extent ref structure has fields for:
586 * - Objectid of the subvolume root
587 * - Generation number of the tree holding the reference
588 * - objectid of the file holding the reference
589 * - offset in the file corresponding to the key holding the reference
591 * When a file extent is allocated the fields are filled in:
592 * (root_key.objectid, trans->transid, inode objectid, offset in file)
594 * When a leaf is cow'd new references are added for every file extent found
595 * in the leaf. It looks the same as the create case, but trans->transid
596 * will be different when the block is cow'd.
598 * (root_key.objectid, trans->transid, inode objectid, offset in file)
600 * When a file extent is removed either during snapshot deletion or file
601 * truncation, the corresponding back reference is found
604 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605 * inode objectid, offset in file)
607 * Btree extents can be referenced by:
609 * - Different subvolumes
610 * - Different generations of the same subvolume
612 * Storing sufficient information for a full reverse mapping of a btree
613 * block would require storing the lowest key of the block in the backref,
614 * and it would require updating that lowest key either before write out or
615 * every time it changed. Instead, the objectid of the lowest key is stored
616 * along with the level of the tree block. This provides a hint
617 * about where in the btree the block can be found. Searches through the
618 * btree only need to look for a pointer to that block, so they stop one
619 * level higher than the level recorded in the backref.
621 * Some btrees do not do reference counting on their extents. These
622 * include the extent tree and the tree of tree roots. Backrefs for these
623 * trees always have a generation of zero.
625 * When a tree block is created, back references are inserted:
627 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
629 * When a tree block is cow'd in a reference counted root,
630 * new back references are added for all the blocks it points to.
631 * These are of the form (trans->transid will have increased since creation):
633 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
635 * Because the lowest_key_objectid and the level are just hints
636 * they are not used when backrefs are deleted. When a backref is deleted:
638 * if backref was for a tree root:
639 * root_objectid = root->root_key.objectid
641 * root_objectid = btrfs_header_owner(parent)
643 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
645 * Back Reference Key hashing:
647 * Back references have four fields, each 64 bits long. Unfortunately,
648 * This is hashed into a single 64 bit number and placed into the key offset.
649 * The key objectid corresponds to the first byte in the extent, and the
650 * key type is set to BTRFS_EXTENT_REF_KEY
652 int btrfs_insert_extent_backref(struct btrfs_trans_handle
*trans
,
653 struct btrfs_root
*root
,
654 struct btrfs_path
*path
, u64 bytenr
,
655 u64 root_objectid
, u64 ref_generation
,
656 u64 owner
, u64 owner_offset
)
659 struct btrfs_key key
;
660 struct btrfs_extent_ref ref
;
661 struct btrfs_extent_ref
*disk_ref
;
664 btrfs_set_stack_ref_root(&ref
, root_objectid
);
665 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
666 btrfs_set_stack_ref_objectid(&ref
, owner
);
667 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
669 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
672 key
.objectid
= bytenr
;
673 key
.type
= BTRFS_EXTENT_REF_KEY
;
675 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(ref
));
676 while (ret
== -EEXIST
) {
677 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
678 struct btrfs_extent_ref
);
679 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
))
682 btrfs_release_path(root
, path
);
683 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
688 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
689 struct btrfs_extent_ref
);
690 write_extent_buffer(path
->nodes
[0], &ref
, (unsigned long)disk_ref
,
692 btrfs_mark_buffer_dirty(path
->nodes
[0]);
694 btrfs_release_path(root
, path
);
698 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
699 struct btrfs_root
*root
,
700 u64 bytenr
, u64 num_bytes
,
701 u64 root_objectid
, u64 ref_generation
,
702 u64 owner
, u64 owner_offset
)
704 struct btrfs_path
*path
;
706 struct btrfs_key key
;
707 struct extent_buffer
*l
;
708 struct btrfs_extent_item
*item
;
711 WARN_ON(num_bytes
< root
->sectorsize
);
712 path
= btrfs_alloc_path();
717 key
.objectid
= bytenr
;
718 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
719 key
.offset
= num_bytes
;
720 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
729 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
730 refs
= btrfs_extent_refs(l
, item
);
731 btrfs_set_extent_refs(l
, item
, refs
+ 1);
732 btrfs_mark_buffer_dirty(path
->nodes
[0]);
734 btrfs_release_path(root
->fs_info
->extent_root
, path
);
737 ret
= btrfs_insert_extent_backref(trans
, root
->fs_info
->extent_root
,
738 path
, bytenr
, root_objectid
,
739 ref_generation
, owner
, owner_offset
);
741 finish_current_insert(trans
, root
->fs_info
->extent_root
);
742 del_pending_extents(trans
, root
->fs_info
->extent_root
);
744 btrfs_free_path(path
);
748 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
749 struct btrfs_root
*root
,
750 u64 bytenr
, u64 num_bytes
,
751 u64 root_objectid
, u64 ref_generation
,
752 u64 owner
, u64 owner_offset
)
756 mutex_lock(&root
->fs_info
->alloc_mutex
);
757 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
,
758 root_objectid
, ref_generation
,
759 owner
, owner_offset
);
760 mutex_unlock(&root
->fs_info
->alloc_mutex
);
764 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
765 struct btrfs_root
*root
)
767 finish_current_insert(trans
, root
->fs_info
->extent_root
);
768 del_pending_extents(trans
, root
->fs_info
->extent_root
);
772 static int lookup_extent_ref(struct btrfs_trans_handle
*trans
,
773 struct btrfs_root
*root
, u64 bytenr
,
774 u64 num_bytes
, u32
*refs
)
776 struct btrfs_path
*path
;
778 struct btrfs_key key
;
779 struct extent_buffer
*l
;
780 struct btrfs_extent_item
*item
;
782 WARN_ON(num_bytes
< root
->sectorsize
);
783 path
= btrfs_alloc_path();
785 key
.objectid
= bytenr
;
786 key
.offset
= num_bytes
;
787 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
788 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
793 btrfs_print_leaf(root
, path
->nodes
[0]);
794 printk("failed to find block number %Lu\n", bytenr
);
798 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
799 *refs
= btrfs_extent_refs(l
, item
);
801 btrfs_free_path(path
);
805 u32
btrfs_count_snapshots_in_path(struct btrfs_root
*root
,
806 struct btrfs_path
*count_path
,
810 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
811 struct btrfs_path
*path
;
815 u64 root_objectid
= root
->root_key
.objectid
;
821 struct btrfs_key key
;
822 struct btrfs_key found_key
;
823 struct extent_buffer
*l
;
824 struct btrfs_extent_item
*item
;
825 struct btrfs_extent_ref
*ref_item
;
828 /* FIXME, needs locking */
831 mutex_lock(&root
->fs_info
->alloc_mutex
);
832 path
= btrfs_alloc_path();
835 bytenr
= first_extent
;
837 bytenr
= count_path
->nodes
[level
]->start
;
840 key
.objectid
= bytenr
;
843 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
844 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
850 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
852 if (found_key
.objectid
!= bytenr
||
853 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
) {
857 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
858 extent_refs
= btrfs_extent_refs(l
, item
);
861 nritems
= btrfs_header_nritems(l
);
862 if (path
->slots
[0] >= nritems
) {
863 ret
= btrfs_next_leaf(extent_root
, path
);
868 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
869 if (found_key
.objectid
!= bytenr
)
872 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
878 ref_item
= btrfs_item_ptr(l
, path
->slots
[0],
879 struct btrfs_extent_ref
);
880 found_objectid
= btrfs_ref_root(l
, ref_item
);
882 if (found_objectid
!= root_objectid
) {
887 found_owner
= btrfs_ref_objectid(l
, ref_item
);
888 if (found_owner
!= expected_owner
) {
893 * nasty. we don't count a reference held by
894 * the running transaction. This allows nodatacow
895 * to avoid cow most of the time
897 if (found_owner
>= BTRFS_FIRST_FREE_OBJECTID
&&
898 btrfs_ref_generation(l
, ref_item
) ==
899 root
->fs_info
->generation
) {
907 * if there is more than one reference against a data extent,
908 * we have to assume the other ref is another snapshot
910 if (level
== -1 && extent_refs
> 1) {
914 if (cur_count
== 0) {
918 if (level
>= 0 && root
->node
== count_path
->nodes
[level
])
921 btrfs_release_path(root
, path
);
925 btrfs_free_path(path
);
926 mutex_unlock(&root
->fs_info
->alloc_mutex
);
930 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
931 struct extent_buffer
*buf
, int cache_ref
)
935 struct btrfs_key key
;
936 struct btrfs_file_extent_item
*fi
;
941 int nr_file_extents
= 0;
946 level
= btrfs_header_level(buf
);
947 nritems
= btrfs_header_nritems(buf
);
948 for (i
= 0; i
< nritems
; i
++) {
952 btrfs_item_key_to_cpu(buf
, &key
, i
);
953 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
955 fi
= btrfs_item_ptr(buf
, i
,
956 struct btrfs_file_extent_item
);
957 if (btrfs_file_extent_type(buf
, fi
) ==
958 BTRFS_FILE_EXTENT_INLINE
)
960 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
961 if (disk_bytenr
== 0)
964 if (buf
!= root
->commit_root
)
967 mutex_lock(&root
->fs_info
->alloc_mutex
);
968 ret
= __btrfs_inc_extent_ref(trans
, root
, disk_bytenr
,
969 btrfs_file_extent_disk_num_bytes(buf
, fi
),
970 root
->root_key
.objectid
, trans
->transid
,
971 key
.objectid
, key
.offset
);
972 mutex_unlock(&root
->fs_info
->alloc_mutex
);
979 bytenr
= btrfs_node_blockptr(buf
, i
);
980 btrfs_node_key_to_cpu(buf
, &key
, i
);
982 mutex_lock(&root
->fs_info
->alloc_mutex
);
983 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
,
984 btrfs_level_size(root
, level
- 1),
985 root
->root_key
.objectid
,
987 level
- 1, key
.objectid
);
988 mutex_unlock(&root
->fs_info
->alloc_mutex
);
996 /* cache orignal leaf block's references */
997 if (level
== 0 && cache_ref
&& buf
!= root
->commit_root
) {
998 struct btrfs_leaf_ref
*ref
;
999 struct btrfs_extent_info
*info
;
1001 ref
= btrfs_alloc_leaf_ref(nr_file_extents
);
1007 btrfs_item_key_to_cpu(buf
, &ref
->key
, 0);
1009 ref
->bytenr
= buf
->start
;
1010 ref
->owner
= btrfs_header_owner(buf
);
1011 ref
->generation
= btrfs_header_generation(buf
);
1012 ref
->nritems
= nr_file_extents
;
1013 info
= ref
->extents
;
1015 for (i
= 0; nr_file_extents
> 0 && i
< nritems
; i
++) {
1017 btrfs_item_key_to_cpu(buf
, &key
, i
);
1018 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1020 fi
= btrfs_item_ptr(buf
, i
,
1021 struct btrfs_file_extent_item
);
1022 if (btrfs_file_extent_type(buf
, fi
) ==
1023 BTRFS_FILE_EXTENT_INLINE
)
1025 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1026 if (disk_bytenr
== 0)
1029 info
->bytenr
= disk_bytenr
;
1031 btrfs_file_extent_disk_num_bytes(buf
, fi
);
1032 info
->objectid
= key
.objectid
;
1033 info
->offset
= key
.offset
;
1037 BUG_ON(!root
->ref_tree
);
1038 ret
= btrfs_add_leaf_ref(root
, ref
);
1040 btrfs_free_leaf_ref(ref
);
1047 for (i
=0; i
< faili
; i
++) {
1050 btrfs_item_key_to_cpu(buf
, &key
, i
);
1051 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1053 fi
= btrfs_item_ptr(buf
, i
,
1054 struct btrfs_file_extent_item
);
1055 if (btrfs_file_extent_type(buf
, fi
) ==
1056 BTRFS_FILE_EXTENT_INLINE
)
1058 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1059 if (disk_bytenr
== 0)
1061 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
1062 btrfs_file_extent_disk_num_bytes(buf
,
1066 bytenr
= btrfs_node_blockptr(buf
, i
);
1067 err
= btrfs_free_extent(trans
, root
, bytenr
,
1068 btrfs_level_size(root
, level
- 1), 0);
1076 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1077 struct btrfs_root
*root
,
1078 struct btrfs_path
*path
,
1079 struct btrfs_block_group_cache
*cache
)
1083 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1085 struct extent_buffer
*leaf
;
1087 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1092 leaf
= path
->nodes
[0];
1093 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1094 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1095 btrfs_mark_buffer_dirty(leaf
);
1096 btrfs_release_path(extent_root
, path
);
1098 finish_current_insert(trans
, extent_root
);
1099 pending_ret
= del_pending_extents(trans
, extent_root
);
1108 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1109 struct btrfs_root
*root
)
1111 struct extent_io_tree
*block_group_cache
;
1112 struct btrfs_block_group_cache
*cache
;
1116 struct btrfs_path
*path
;
1122 block_group_cache
= &root
->fs_info
->block_group_cache
;
1123 path
= btrfs_alloc_path();
1127 mutex_lock(&root
->fs_info
->alloc_mutex
);
1129 ret
= find_first_extent_bit(block_group_cache
, last
,
1130 &start
, &end
, BLOCK_GROUP_DIRTY
);
1135 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1138 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1139 err
= write_one_cache_group(trans
, root
,
1142 * if we fail to write the cache group, we want
1143 * to keep it marked dirty in hopes that a later
1150 clear_extent_bits(block_group_cache
, start
, end
,
1151 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1153 btrfs_free_path(path
);
1154 mutex_unlock(&root
->fs_info
->alloc_mutex
);
1158 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1161 struct list_head
*head
= &info
->space_info
;
1162 struct list_head
*cur
;
1163 struct btrfs_space_info
*found
;
1164 list_for_each(cur
, head
) {
1165 found
= list_entry(cur
, struct btrfs_space_info
, list
);
1166 if (found
->flags
== flags
)
1173 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1174 u64 total_bytes
, u64 bytes_used
,
1175 struct btrfs_space_info
**space_info
)
1177 struct btrfs_space_info
*found
;
1179 found
= __find_space_info(info
, flags
);
1181 found
->total_bytes
+= total_bytes
;
1182 found
->bytes_used
+= bytes_used
;
1184 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1185 *space_info
= found
;
1188 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1192 list_add(&found
->list
, &info
->space_info
);
1193 found
->flags
= flags
;
1194 found
->total_bytes
= total_bytes
;
1195 found
->bytes_used
= bytes_used
;
1196 found
->bytes_pinned
= 0;
1198 found
->force_alloc
= 0;
1199 *space_info
= found
;
1203 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1205 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1206 BTRFS_BLOCK_GROUP_RAID1
|
1207 BTRFS_BLOCK_GROUP_RAID10
|
1208 BTRFS_BLOCK_GROUP_DUP
);
1210 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1211 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1212 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1213 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1214 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1215 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1219 static u64
reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
1221 u64 num_devices
= root
->fs_info
->fs_devices
->num_devices
;
1223 if (num_devices
== 1)
1224 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
1225 if (num_devices
< 4)
1226 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
1228 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
1229 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
1230 BTRFS_BLOCK_GROUP_RAID10
))) {
1231 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
1234 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
1235 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
1236 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
1239 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
1240 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
1241 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
1242 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
1243 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
1247 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1248 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1249 u64 flags
, int force
)
1251 struct btrfs_space_info
*space_info
;
1257 flags
= reduce_alloc_profile(extent_root
, flags
);
1259 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1261 ret
= update_space_info(extent_root
->fs_info
, flags
,
1265 BUG_ON(!space_info
);
1267 if (space_info
->force_alloc
) {
1269 space_info
->force_alloc
= 0;
1271 if (space_info
->full
)
1274 thresh
= div_factor(space_info
->total_bytes
, 6);
1276 (space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1280 mutex_lock(&extent_root
->fs_info
->chunk_mutex
);
1281 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1282 if (ret
== -ENOSPC
) {
1283 printk("space info full %Lu\n", flags
);
1284 space_info
->full
= 1;
1289 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1290 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1293 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
1298 static int update_block_group(struct btrfs_trans_handle
*trans
,
1299 struct btrfs_root
*root
,
1300 u64 bytenr
, u64 num_bytes
, int alloc
,
1303 struct btrfs_block_group_cache
*cache
;
1304 struct btrfs_fs_info
*info
= root
->fs_info
;
1305 u64 total
= num_bytes
;
1311 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1313 cache
= btrfs_lookup_block_group(info
, bytenr
);
1317 byte_in_group
= bytenr
- cache
->key
.objectid
;
1318 WARN_ON(byte_in_group
> cache
->key
.offset
);
1319 start
= cache
->key
.objectid
;
1320 end
= start
+ cache
->key
.offset
- 1;
1321 set_extent_bits(&info
->block_group_cache
, start
, end
,
1322 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1324 spin_lock(&cache
->lock
);
1325 old_val
= btrfs_block_group_used(&cache
->item
);
1326 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1328 old_val
+= num_bytes
;
1329 cache
->space_info
->bytes_used
+= num_bytes
;
1330 btrfs_set_block_group_used(&cache
->item
, old_val
);
1331 spin_unlock(&cache
->lock
);
1333 old_val
-= num_bytes
;
1334 cache
->space_info
->bytes_used
-= num_bytes
;
1335 btrfs_set_block_group_used(&cache
->item
, old_val
);
1336 spin_unlock(&cache
->lock
);
1338 set_extent_dirty(&info
->free_space_cache
,
1339 bytenr
, bytenr
+ num_bytes
- 1,
1344 bytenr
+= num_bytes
;
1349 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
1354 ret
= find_first_extent_bit(&root
->fs_info
->block_group_cache
,
1355 search_start
, &start
, &end
,
1356 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
1357 BLOCK_GROUP_SYSTEM
);
1364 static int update_pinned_extents(struct btrfs_root
*root
,
1365 u64 bytenr
, u64 num
, int pin
)
1368 struct btrfs_block_group_cache
*cache
;
1369 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1371 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1373 set_extent_dirty(&fs_info
->pinned_extents
,
1374 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1376 clear_extent_dirty(&fs_info
->pinned_extents
,
1377 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1380 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1382 u64 first
= first_logical_byte(root
, bytenr
);
1383 WARN_ON(first
< bytenr
);
1384 len
= min(first
- bytenr
, num
);
1386 len
= min(num
, cache
->key
.offset
-
1387 (bytenr
- cache
->key
.objectid
));
1391 spin_lock(&cache
->lock
);
1392 cache
->pinned
+= len
;
1393 cache
->space_info
->bytes_pinned
+= len
;
1394 spin_unlock(&cache
->lock
);
1396 fs_info
->total_pinned
+= len
;
1399 spin_lock(&cache
->lock
);
1400 cache
->pinned
-= len
;
1401 cache
->space_info
->bytes_pinned
-= len
;
1402 spin_unlock(&cache
->lock
);
1404 fs_info
->total_pinned
-= len
;
1412 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1417 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1421 ret
= find_first_extent_bit(pinned_extents
, last
,
1422 &start
, &end
, EXTENT_DIRTY
);
1425 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1431 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1432 struct btrfs_root
*root
,
1433 struct extent_io_tree
*unpin
)
1438 struct extent_io_tree
*free_space_cache
;
1439 free_space_cache
= &root
->fs_info
->free_space_cache
;
1441 mutex_lock(&root
->fs_info
->alloc_mutex
);
1443 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1447 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1448 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1449 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1450 if (need_resched()) {
1451 mutex_unlock(&root
->fs_info
->alloc_mutex
);
1453 mutex_lock(&root
->fs_info
->alloc_mutex
);
1456 mutex_unlock(&root
->fs_info
->alloc_mutex
);
1460 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1461 struct btrfs_root
*extent_root
)
1465 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1466 struct extent_buffer
*eb
;
1467 struct btrfs_path
*path
;
1468 struct btrfs_key ins
;
1469 struct btrfs_disk_key first
;
1470 struct btrfs_extent_item extent_item
;
1475 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
1476 btrfs_set_stack_extent_refs(&extent_item
, 1);
1477 btrfs_set_key_type(&ins
, BTRFS_EXTENT_ITEM_KEY
);
1478 path
= btrfs_alloc_path();
1481 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1482 &end
, EXTENT_LOCKED
);
1486 ins
.objectid
= start
;
1487 ins
.offset
= end
+ 1 - start
;
1488 err
= btrfs_insert_item(trans
, extent_root
, &ins
,
1489 &extent_item
, sizeof(extent_item
));
1490 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
1493 eb
= btrfs_find_tree_block(extent_root
, ins
.objectid
,
1496 if (!btrfs_buffer_uptodate(eb
, trans
->transid
)) {
1497 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
1498 btrfs_read_buffer(eb
, trans
->transid
);
1499 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
1502 btrfs_tree_lock(eb
);
1503 level
= btrfs_header_level(eb
);
1505 btrfs_item_key(eb
, &first
, 0);
1507 btrfs_node_key(eb
, &first
, 0);
1509 btrfs_tree_unlock(eb
);
1510 free_extent_buffer(eb
);
1512 * the first key is just a hint, so the race we've created
1513 * against reading it is fine
1515 err
= btrfs_insert_extent_backref(trans
, extent_root
, path
,
1516 start
, extent_root
->root_key
.objectid
,
1518 btrfs_disk_key_objectid(&first
));
1520 if (need_resched()) {
1521 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
1523 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
1526 btrfs_free_path(path
);
1530 static int pin_down_bytes(struct btrfs_root
*root
, u64 bytenr
, u32 num_bytes
,
1535 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1537 struct extent_buffer
*buf
;
1538 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
1540 if (btrfs_buffer_uptodate(buf
, 0) &&
1541 btrfs_try_tree_lock(buf
)) {
1543 root
->fs_info
->running_transaction
->transid
;
1544 u64 header_transid
=
1545 btrfs_header_generation(buf
);
1546 if (header_transid
== transid
&&
1547 !btrfs_header_flag(buf
,
1548 BTRFS_HEADER_FLAG_WRITTEN
)) {
1549 clean_tree_block(NULL
, root
, buf
);
1550 btrfs_tree_unlock(buf
);
1551 free_extent_buffer(buf
);
1554 btrfs_tree_unlock(buf
);
1556 free_extent_buffer(buf
);
1558 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
1560 set_extent_bits(&root
->fs_info
->pending_del
,
1561 bytenr
, bytenr
+ num_bytes
- 1,
1562 EXTENT_LOCKED
, GFP_NOFS
);
1569 * remove an extent from the root, returns 0 on success
1571 static int __free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1572 *root
, u64 bytenr
, u64 num_bytes
,
1573 u64 root_objectid
, u64 ref_generation
,
1574 u64 owner_objectid
, u64 owner_offset
, int pin
,
1577 struct btrfs_path
*path
;
1578 struct btrfs_key key
;
1579 struct btrfs_fs_info
*info
= root
->fs_info
;
1580 struct btrfs_root
*extent_root
= info
->extent_root
;
1581 struct extent_buffer
*leaf
;
1583 int extent_slot
= 0;
1584 int found_extent
= 0;
1586 struct btrfs_extent_item
*ei
;
1589 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1590 key
.objectid
= bytenr
;
1591 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1592 key
.offset
= num_bytes
;
1593 path
= btrfs_alloc_path();
1598 ret
= lookup_extent_backref(trans
, extent_root
, path
,
1599 bytenr
, root_objectid
,
1601 owner_objectid
, owner_offset
, 1);
1603 struct btrfs_key found_key
;
1604 extent_slot
= path
->slots
[0];
1605 while(extent_slot
> 0) {
1607 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1609 if (found_key
.objectid
!= bytenr
)
1611 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1612 found_key
.offset
== num_bytes
) {
1616 if (path
->slots
[0] - extent_slot
> 5)
1620 ret
= btrfs_del_item(trans
, extent_root
, path
);
1622 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
1624 printk("Unable to find ref byte nr %Lu root %Lu "
1625 " gen %Lu owner %Lu offset %Lu\n", bytenr
,
1626 root_objectid
, ref_generation
, owner_objectid
,
1629 if (!found_extent
) {
1630 btrfs_release_path(extent_root
, path
);
1631 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, -1, 1);
1635 extent_slot
= path
->slots
[0];
1638 leaf
= path
->nodes
[0];
1639 ei
= btrfs_item_ptr(leaf
, extent_slot
,
1640 struct btrfs_extent_item
);
1641 refs
= btrfs_extent_refs(leaf
, ei
);
1644 btrfs_set_extent_refs(leaf
, ei
, refs
);
1646 btrfs_mark_buffer_dirty(leaf
);
1648 if (refs
== 0 && found_extent
&& path
->slots
[0] == extent_slot
+ 1) {
1649 /* if the back ref and the extent are next to each other
1650 * they get deleted below in one shot
1652 path
->slots
[0] = extent_slot
;
1654 } else if (found_extent
) {
1655 /* otherwise delete the extent back ref */
1656 ret
= btrfs_del_item(trans
, extent_root
, path
);
1658 /* if refs are 0, we need to setup the path for deletion */
1660 btrfs_release_path(extent_root
, path
);
1661 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
1674 ret
= pin_down_bytes(root
, bytenr
, num_bytes
, 0);
1680 /* block accounting for super block */
1681 spin_lock_irq(&info
->delalloc_lock
);
1682 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1683 btrfs_set_super_bytes_used(&info
->super_copy
,
1684 super_used
- num_bytes
);
1685 spin_unlock_irq(&info
->delalloc_lock
);
1687 /* block accounting for root item */
1688 root_used
= btrfs_root_used(&root
->root_item
);
1689 btrfs_set_root_used(&root
->root_item
,
1690 root_used
- num_bytes
);
1691 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
1696 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
1700 btrfs_free_path(path
);
1701 finish_current_insert(trans
, extent_root
);
1706 * find all the blocks marked as pending in the radix tree and remove
1707 * them from the extent map
1709 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
1710 btrfs_root
*extent_root
)
1716 struct extent_io_tree
*pending_del
;
1717 struct extent_io_tree
*pinned_extents
;
1719 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
1720 pending_del
= &extent_root
->fs_info
->pending_del
;
1721 pinned_extents
= &extent_root
->fs_info
->pinned_extents
;
1724 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
1728 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
1730 if (!test_range_bit(&extent_root
->fs_info
->extent_ins
,
1731 start
, end
, EXTENT_LOCKED
, 0)) {
1732 update_pinned_extents(extent_root
, start
,
1733 end
+ 1 - start
, 1);
1734 ret
= __free_extent(trans
, extent_root
,
1735 start
, end
+ 1 - start
,
1736 extent_root
->root_key
.objectid
,
1739 clear_extent_bits(&extent_root
->fs_info
->extent_ins
,
1740 start
, end
, EXTENT_LOCKED
, GFP_NOFS
);
1745 if (need_resched()) {
1746 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
1748 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
1755 * remove an extent from the root, returns 0 on success
1757 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
1758 struct btrfs_root
*root
, u64 bytenr
,
1759 u64 num_bytes
, u64 root_objectid
,
1760 u64 ref_generation
, u64 owner_objectid
,
1761 u64 owner_offset
, int pin
)
1763 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1767 WARN_ON(num_bytes
< root
->sectorsize
);
1768 if (!root
->ref_cows
)
1771 if (root
== extent_root
) {
1772 pin_down_bytes(root
, bytenr
, num_bytes
, 1);
1775 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, root_objectid
,
1776 ref_generation
, owner_objectid
, owner_offset
,
1779 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1780 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
1781 return ret
? ret
: pending_ret
;
1784 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
1785 struct btrfs_root
*root
, u64 bytenr
,
1786 u64 num_bytes
, u64 root_objectid
,
1787 u64 ref_generation
, u64 owner_objectid
,
1788 u64 owner_offset
, int pin
)
1792 maybe_lock_mutex(root
);
1793 ret
= __btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1794 root_objectid
, ref_generation
,
1795 owner_objectid
, owner_offset
, pin
);
1796 maybe_unlock_mutex(root
);
1800 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
1802 u64 mask
= ((u64
)root
->stripesize
- 1);
1803 u64 ret
= (val
+ mask
) & ~mask
;
1808 * walks the btree of allocated extents and find a hole of a given size.
1809 * The key ins is changed to record the hole:
1810 * ins->objectid == block start
1811 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1812 * ins->offset == number of blocks
1813 * Any available blocks before search_start are skipped.
1815 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
1816 struct btrfs_root
*orig_root
,
1817 u64 num_bytes
, u64 empty_size
,
1818 u64 search_start
, u64 search_end
,
1819 u64 hint_byte
, struct btrfs_key
*ins
,
1820 u64 exclude_start
, u64 exclude_nr
,
1824 u64 orig_search_start
;
1825 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
1826 struct btrfs_fs_info
*info
= root
->fs_info
;
1827 u64 total_needed
= num_bytes
;
1828 u64
*last_ptr
= NULL
;
1829 struct btrfs_block_group_cache
*block_group
;
1832 int chunk_alloc_done
= 0;
1833 int empty_cluster
= 2 * 1024 * 1024;
1834 int allowed_chunk_alloc
= 0;
1836 WARN_ON(num_bytes
< root
->sectorsize
);
1837 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
1839 if (orig_root
->ref_cows
|| empty_size
)
1840 allowed_chunk_alloc
= 1;
1842 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
1843 last_ptr
= &root
->fs_info
->last_alloc
;
1844 empty_cluster
= 256 * 1024;
1847 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
1848 last_ptr
= &root
->fs_info
->last_data_alloc
;
1853 hint_byte
= *last_ptr
;
1855 empty_size
+= empty_cluster
;
1859 search_start
= max(search_start
, first_logical_byte(root
, 0));
1860 orig_search_start
= search_start
;
1862 if (search_end
== (u64
)-1)
1863 search_end
= btrfs_super_total_bytes(&info
->super_copy
);
1866 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
1868 hint_byte
= search_start
;
1869 block_group
= btrfs_find_block_group(root
, block_group
,
1870 hint_byte
, data
, 1);
1871 if (last_ptr
&& *last_ptr
== 0 && block_group
)
1872 hint_byte
= block_group
->key
.objectid
;
1874 block_group
= btrfs_find_block_group(root
,
1876 search_start
, data
, 1);
1878 search_start
= max(search_start
, hint_byte
);
1880 total_needed
+= empty_size
;
1884 block_group
= btrfs_lookup_first_block_group(info
,
1887 block_group
= btrfs_lookup_first_block_group(info
,
1890 if (full_scan
&& !chunk_alloc_done
) {
1891 if (allowed_chunk_alloc
) {
1892 do_chunk_alloc(trans
, root
,
1893 num_bytes
+ 2 * 1024 * 1024, data
, 1);
1894 allowed_chunk_alloc
= 0;
1895 } else if (block_group
&& block_group_bits(block_group
, data
)) {
1896 block_group
->space_info
->force_alloc
= 1;
1898 chunk_alloc_done
= 1;
1900 ret
= find_search_start(root
, &block_group
, &search_start
,
1901 total_needed
, data
);
1902 if (ret
== -ENOSPC
&& last_ptr
&& *last_ptr
) {
1904 block_group
= btrfs_lookup_first_block_group(info
,
1906 search_start
= orig_search_start
;
1907 ret
= find_search_start(root
, &block_group
, &search_start
,
1908 total_needed
, data
);
1915 if (last_ptr
&& *last_ptr
&& search_start
!= *last_ptr
) {
1918 empty_size
+= empty_cluster
;
1919 total_needed
+= empty_size
;
1921 block_group
= btrfs_lookup_first_block_group(info
,
1923 search_start
= orig_search_start
;
1924 ret
= find_search_start(root
, &block_group
,
1925 &search_start
, total_needed
, data
);
1932 search_start
= stripe_align(root
, search_start
);
1933 ins
->objectid
= search_start
;
1934 ins
->offset
= num_bytes
;
1936 if (ins
->objectid
+ num_bytes
>= search_end
)
1939 if (ins
->objectid
+ num_bytes
>
1940 block_group
->key
.objectid
+ block_group
->key
.offset
) {
1941 search_start
= block_group
->key
.objectid
+
1942 block_group
->key
.offset
;
1946 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
1947 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
1948 search_start
= ins
->objectid
+ num_bytes
;
1952 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
1953 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
1954 search_start
= ins
->objectid
+ num_bytes
;
1958 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
1959 ins
->objectid
< exclude_start
+ exclude_nr
)) {
1960 search_start
= exclude_start
+ exclude_nr
;
1964 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
1965 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
1967 trans
->block_group
= block_group
;
1969 ins
->offset
= num_bytes
;
1971 *last_ptr
= ins
->objectid
+ ins
->offset
;
1973 btrfs_super_total_bytes(&root
->fs_info
->super_copy
)) {
1980 if (search_start
+ num_bytes
>= search_end
) {
1982 search_start
= orig_search_start
;
1989 total_needed
-= empty_size
;
1994 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
1996 block_group
= btrfs_find_block_group(root
, block_group
,
1997 search_start
, data
, 0);
2004 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2005 struct btrfs_root
*root
,
2006 u64 num_bytes
, u64 min_alloc_size
,
2007 u64 empty_size
, u64 hint_byte
,
2008 u64 search_end
, struct btrfs_key
*ins
,
2012 u64 search_start
= 0;
2014 struct btrfs_fs_info
*info
= root
->fs_info
;
2017 alloc_profile
= info
->avail_data_alloc_bits
&
2018 info
->data_alloc_profile
;
2019 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2020 } else if (root
== root
->fs_info
->chunk_root
) {
2021 alloc_profile
= info
->avail_system_alloc_bits
&
2022 info
->system_alloc_profile
;
2023 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2025 alloc_profile
= info
->avail_metadata_alloc_bits
&
2026 info
->metadata_alloc_profile
;
2027 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2030 data
= reduce_alloc_profile(root
, data
);
2032 * the only place that sets empty_size is btrfs_realloc_node, which
2033 * is not called recursively on allocations
2035 if (empty_size
|| root
->ref_cows
) {
2036 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2037 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2039 BTRFS_BLOCK_GROUP_METADATA
|
2040 (info
->metadata_alloc_profile
&
2041 info
->avail_metadata_alloc_bits
), 0);
2044 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2045 num_bytes
+ 2 * 1024 * 1024, data
, 0);
2049 WARN_ON(num_bytes
< root
->sectorsize
);
2050 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2051 search_start
, search_end
, hint_byte
, ins
,
2052 trans
->alloc_exclude_start
,
2053 trans
->alloc_exclude_nr
, data
);
2055 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
2056 num_bytes
= num_bytes
>> 1;
2057 num_bytes
= max(num_bytes
, min_alloc_size
);
2058 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2059 num_bytes
, data
, 1);
2063 printk("allocation failed flags %Lu\n", data
);
2066 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
2067 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
2072 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2073 struct btrfs_root
*root
,
2074 u64 num_bytes
, u64 min_alloc_size
,
2075 u64 empty_size
, u64 hint_byte
,
2076 u64 search_end
, struct btrfs_key
*ins
,
2080 maybe_lock_mutex(root
);
2081 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
2082 empty_size
, hint_byte
, search_end
, ins
,
2084 maybe_unlock_mutex(root
);
2088 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
2089 struct btrfs_root
*root
,
2090 u64 root_objectid
, u64 ref_generation
,
2091 u64 owner
, u64 owner_offset
,
2092 struct btrfs_key
*ins
)
2098 u64 num_bytes
= ins
->offset
;
2100 struct btrfs_fs_info
*info
= root
->fs_info
;
2101 struct btrfs_root
*extent_root
= info
->extent_root
;
2102 struct btrfs_extent_item
*extent_item
;
2103 struct btrfs_extent_ref
*ref
;
2104 struct btrfs_path
*path
;
2105 struct btrfs_key keys
[2];
2107 /* block accounting for super block */
2108 spin_lock_irq(&info
->delalloc_lock
);
2109 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2110 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
2111 spin_unlock_irq(&info
->delalloc_lock
);
2113 /* block accounting for root item */
2114 root_used
= btrfs_root_used(&root
->root_item
);
2115 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
2117 if (root
== extent_root
) {
2118 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2119 ins
->objectid
+ ins
->offset
- 1,
2120 EXTENT_LOCKED
, GFP_NOFS
);
2124 memcpy(&keys
[0], ins
, sizeof(*ins
));
2125 keys
[1].offset
= hash_extent_ref(root_objectid
, ref_generation
,
2126 owner
, owner_offset
);
2127 keys
[1].objectid
= ins
->objectid
;
2128 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
2129 sizes
[0] = sizeof(*extent_item
);
2130 sizes
[1] = sizeof(*ref
);
2132 path
= btrfs_alloc_path();
2135 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
2139 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2140 struct btrfs_extent_item
);
2141 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, 1);
2142 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
2143 struct btrfs_extent_ref
);
2145 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
2146 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
2147 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
2148 btrfs_set_ref_offset(path
->nodes
[0], ref
, owner_offset
);
2150 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2152 trans
->alloc_exclude_start
= 0;
2153 trans
->alloc_exclude_nr
= 0;
2154 btrfs_free_path(path
);
2155 finish_current_insert(trans
, extent_root
);
2156 pending_ret
= del_pending_extents(trans
, extent_root
);
2166 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1, 0);
2168 printk("update block group failed for %Lu %Lu\n",
2169 ins
->objectid
, ins
->offset
);
2176 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
2177 struct btrfs_root
*root
,
2178 u64 root_objectid
, u64 ref_generation
,
2179 u64 owner
, u64 owner_offset
,
2180 struct btrfs_key
*ins
)
2183 maybe_lock_mutex(root
);
2184 ret
= __btrfs_alloc_reserved_extent(trans
, root
, root_objectid
,
2185 ref_generation
, owner
,
2187 maybe_unlock_mutex(root
);
2191 * finds a free extent and does all the dirty work required for allocation
2192 * returns the key for the extent through ins, and a tree buffer for
2193 * the first block of the extent through buf.
2195 * returns 0 if everything worked, non-zero otherwise.
2197 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
2198 struct btrfs_root
*root
,
2199 u64 num_bytes
, u64 min_alloc_size
,
2200 u64 root_objectid
, u64 ref_generation
,
2201 u64 owner
, u64 owner_offset
,
2202 u64 empty_size
, u64 hint_byte
,
2203 u64 search_end
, struct btrfs_key
*ins
, u64 data
)
2207 maybe_lock_mutex(root
);
2209 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
,
2210 min_alloc_size
, empty_size
, hint_byte
,
2211 search_end
, ins
, data
);
2213 ret
= __btrfs_alloc_reserved_extent(trans
, root
, root_objectid
,
2214 ref_generation
, owner
,
2218 maybe_unlock_mutex(root
);
2222 * helper function to allocate a block for a given tree
2223 * returns the tree buffer or NULL.
2225 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2226 struct btrfs_root
*root
,
2235 struct btrfs_key ins
;
2237 struct extent_buffer
*buf
;
2239 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, blocksize
,
2240 root_objectid
, ref_generation
,
2241 level
, first_objectid
, empty_size
, hint
,
2245 return ERR_PTR(ret
);
2247 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2249 btrfs_free_extent(trans
, root
, ins
.objectid
, blocksize
,
2250 root
->root_key
.objectid
, ref_generation
,
2252 return ERR_PTR(-ENOMEM
);
2254 btrfs_set_header_generation(buf
, trans
->transid
);
2255 btrfs_tree_lock(buf
);
2256 clean_tree_block(trans
, root
, buf
);
2257 btrfs_set_buffer_uptodate(buf
);
2259 if (PageDirty(buf
->first_page
)) {
2260 printk("page %lu dirty\n", buf
->first_page
->index
);
2264 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
2265 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
2266 trans
->blocks_used
++;
2270 static int noinline
drop_leaf_ref_no_cache(struct btrfs_trans_handle
*trans
,
2271 struct btrfs_root
*root
,
2272 struct extent_buffer
*leaf
)
2275 u64 leaf_generation
;
2276 struct btrfs_key key
;
2277 struct btrfs_file_extent_item
*fi
;
2282 BUG_ON(!btrfs_is_leaf(leaf
));
2283 nritems
= btrfs_header_nritems(leaf
);
2284 leaf_owner
= btrfs_header_owner(leaf
);
2285 leaf_generation
= btrfs_header_generation(leaf
);
2287 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2289 for (i
= 0; i
< nritems
; i
++) {
2293 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2294 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2296 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2297 if (btrfs_file_extent_type(leaf
, fi
) ==
2298 BTRFS_FILE_EXTENT_INLINE
)
2301 * FIXME make sure to insert a trans record that
2302 * repeats the snapshot del on crash
2304 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2305 if (disk_bytenr
== 0)
2308 mutex_lock(&root
->fs_info
->alloc_mutex
);
2309 ret
= __btrfs_free_extent(trans
, root
, disk_bytenr
,
2310 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2311 leaf_owner
, leaf_generation
,
2312 key
.objectid
, key
.offset
, 0);
2313 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2317 mutex_lock(&root
->fs_info
->alloc_mutex
);
2321 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2322 struct btrfs_root
*root
,
2323 struct btrfs_leaf_ref
*ref
)
2327 struct btrfs_extent_info
*info
= ref
->extents
;
2329 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2330 for (i
= 0; i
< ref
->nritems
; i
++) {
2331 mutex_lock(&root
->fs_info
->alloc_mutex
);
2332 ret
= __btrfs_free_extent(trans
, root
,
2333 info
->bytenr
, info
->num_bytes
,
2334 ref
->owner
, ref
->generation
,
2335 info
->objectid
, info
->offset
, 0);
2336 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2340 mutex_lock(&root
->fs_info
->alloc_mutex
);
2345 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2346 struct extent_buffer
*node
,
2359 nritems
= btrfs_header_nritems(node
);
2360 level
= btrfs_header_level(node
);
2364 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2365 bytenr
= btrfs_node_blockptr(node
, i
);
2366 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2367 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2371 blocksize
= btrfs_level_size(root
, level
- 1);
2373 ret
= lookup_extent_ref(NULL
, root
, bytenr
,
2381 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2382 btrfs_node_ptr_generation(node
, i
));
2383 last
= bytenr
+ blocksize
;
2391 * we want to avoid as much random IO as we can with the alloc mutex
2392 * held, so drop the lock and do the lookup, then do it again with the
2395 int drop_snap_lookup_refcount(struct btrfs_root
*root
, u64 start
, u64 len
,
2398 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2399 lookup_extent_ref(NULL
, root
, start
, len
, refs
);
2401 mutex_lock(&root
->fs_info
->alloc_mutex
);
2402 return lookup_extent_ref(NULL
, root
, start
, len
, refs
);
2406 * helper function for drop_snapshot, this walks down the tree dropping ref
2407 * counts as it goes.
2409 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2410 struct btrfs_root
*root
,
2411 struct btrfs_path
*path
, int *level
)
2417 struct extent_buffer
*next
;
2418 struct extent_buffer
*cur
;
2419 struct extent_buffer
*parent
;
2420 struct btrfs_leaf_ref
*ref
;
2425 mutex_lock(&root
->fs_info
->alloc_mutex
);
2427 WARN_ON(*level
< 0);
2428 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2429 ret
= drop_snap_lookup_refcount(root
, path
->nodes
[*level
]->start
,
2430 path
->nodes
[*level
]->len
, &refs
);
2436 * walk down to the last node level and free all the leaves
2438 while(*level
>= 0) {
2439 WARN_ON(*level
< 0);
2440 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2441 cur
= path
->nodes
[*level
];
2443 if (btrfs_header_level(cur
) != *level
)
2446 if (path
->slots
[*level
] >=
2447 btrfs_header_nritems(cur
))
2450 ret
= drop_leaf_ref_no_cache(trans
, root
, cur
);
2454 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2455 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2456 blocksize
= btrfs_level_size(root
, *level
- 1);
2458 ret
= drop_snap_lookup_refcount(root
, bytenr
, blocksize
, &refs
);
2461 parent
= path
->nodes
[*level
];
2462 root_owner
= btrfs_header_owner(parent
);
2463 root_gen
= btrfs_header_generation(parent
);
2464 path
->slots
[*level
]++;
2465 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
2466 blocksize
, root_owner
,
2473 struct btrfs_key key
;
2474 btrfs_node_key_to_cpu(cur
, &key
, path
->slots
[*level
]);
2475 ref
= btrfs_lookup_leaf_ref(root
, &key
);
2477 ret
= drop_leaf_ref(trans
, root
, ref
);
2479 btrfs_remove_leaf_ref(root
, ref
);
2480 btrfs_free_leaf_ref(ref
);
2486 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2487 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2488 free_extent_buffer(next
);
2489 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2491 if (path
->slots
[*level
] == 0)
2492 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2493 next
= read_tree_block(root
, bytenr
, blocksize
,
2496 mutex_lock(&root
->fs_info
->alloc_mutex
);
2498 /* we've dropped the lock, double check */
2499 ret
= lookup_extent_ref(NULL
, root
, bytenr
, blocksize
,
2503 parent
= path
->nodes
[*level
];
2504 root_owner
= btrfs_header_owner(parent
);
2505 root_gen
= btrfs_header_generation(parent
);
2507 path
->slots
[*level
]++;
2508 free_extent_buffer(next
);
2509 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
2517 WARN_ON(*level
<= 0);
2518 if (path
->nodes
[*level
-1])
2519 free_extent_buffer(path
->nodes
[*level
-1]);
2520 path
->nodes
[*level
-1] = next
;
2521 *level
= btrfs_header_level(next
);
2522 path
->slots
[*level
] = 0;
2525 WARN_ON(*level
< 0);
2526 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2528 if (path
->nodes
[*level
] == root
->node
) {
2529 parent
= path
->nodes
[*level
];
2530 bytenr
= path
->nodes
[*level
]->start
;
2532 parent
= path
->nodes
[*level
+ 1];
2533 bytenr
= btrfs_node_blockptr(parent
, path
->slots
[*level
+ 1]);
2536 blocksize
= btrfs_level_size(root
, *level
);
2537 root_owner
= btrfs_header_owner(parent
);
2538 root_gen
= btrfs_header_generation(parent
);
2540 ret
= __btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2541 root_owner
, root_gen
, 0, 0, 1);
2542 free_extent_buffer(path
->nodes
[*level
]);
2543 path
->nodes
[*level
] = NULL
;
2546 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2552 * helper for dropping snapshots. This walks back up the tree in the path
2553 * to find the first node higher up where we haven't yet gone through
2556 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2557 struct btrfs_root
*root
,
2558 struct btrfs_path
*path
, int *level
)
2562 struct btrfs_root_item
*root_item
= &root
->root_item
;
2567 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2568 slot
= path
->slots
[i
];
2569 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2570 struct extent_buffer
*node
;
2571 struct btrfs_disk_key disk_key
;
2572 node
= path
->nodes
[i
];
2575 WARN_ON(*level
== 0);
2576 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2577 memcpy(&root_item
->drop_progress
,
2578 &disk_key
, sizeof(disk_key
));
2579 root_item
->drop_level
= i
;
2582 if (path
->nodes
[*level
] == root
->node
) {
2583 root_owner
= root
->root_key
.objectid
;
2585 btrfs_header_generation(path
->nodes
[*level
]);
2587 struct extent_buffer
*node
;
2588 node
= path
->nodes
[*level
+ 1];
2589 root_owner
= btrfs_header_owner(node
);
2590 root_gen
= btrfs_header_generation(node
);
2592 ret
= btrfs_free_extent(trans
, root
,
2593 path
->nodes
[*level
]->start
,
2594 path
->nodes
[*level
]->len
,
2595 root_owner
, root_gen
, 0, 0, 1);
2597 free_extent_buffer(path
->nodes
[*level
]);
2598 path
->nodes
[*level
] = NULL
;
2606 * drop the reference count on the tree rooted at 'snap'. This traverses
2607 * the tree freeing any blocks that have a ref count of zero after being
2610 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2616 struct btrfs_path
*path
;
2619 struct btrfs_root_item
*root_item
= &root
->root_item
;
2621 WARN_ON(!mutex_is_locked(&root
->fs_info
->drop_mutex
));
2622 path
= btrfs_alloc_path();
2625 level
= btrfs_header_level(root
->node
);
2627 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2628 path
->nodes
[level
] = root
->node
;
2629 extent_buffer_get(root
->node
);
2630 path
->slots
[level
] = 0;
2632 struct btrfs_key key
;
2633 struct btrfs_disk_key found_key
;
2634 struct extent_buffer
*node
;
2636 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2637 level
= root_item
->drop_level
;
2638 path
->lowest_level
= level
;
2639 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2644 node
= path
->nodes
[level
];
2645 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2646 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2647 sizeof(found_key
)));
2649 * unlock our path, this is safe because only this
2650 * function is allowed to delete this snapshot
2652 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
2653 if (path
->nodes
[i
] && path
->locks
[i
]) {
2655 btrfs_tree_unlock(path
->nodes
[i
]);
2660 wret
= walk_down_tree(trans
, root
, path
, &level
);
2666 wret
= walk_up_tree(trans
, root
, path
, &level
);
2671 if (trans
->transaction
->in_commit
) {
2676 for (i
= 0; i
<= orig_level
; i
++) {
2677 if (path
->nodes
[i
]) {
2678 free_extent_buffer(path
->nodes
[i
]);
2679 path
->nodes
[i
] = NULL
;
2683 btrfs_free_path(path
);
2687 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
2694 mutex_lock(&info
->alloc_mutex
);
2696 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
2697 &start
, &end
, (unsigned int)-1);
2700 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
2702 kfree((void *)(unsigned long)ptr
);
2703 clear_extent_bits(&info
->block_group_cache
, start
,
2704 end
, (unsigned int)-1, GFP_NOFS
);
2707 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
2708 &start
, &end
, EXTENT_DIRTY
);
2711 clear_extent_dirty(&info
->free_space_cache
, start
,
2714 mutex_unlock(&info
->alloc_mutex
);
2718 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
2721 return min(last
, start
+ nr
- 1);
2724 static int noinline
relocate_inode_pages(struct inode
*inode
, u64 start
,
2729 unsigned long last_index
;
2732 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2733 struct file_ra_state
*ra
;
2734 unsigned long total_read
= 0;
2735 unsigned long ra_pages
;
2736 struct btrfs_ordered_extent
*ordered
;
2737 struct btrfs_trans_handle
*trans
;
2739 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
2741 mutex_lock(&inode
->i_mutex
);
2742 i
= start
>> PAGE_CACHE_SHIFT
;
2743 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
2745 ra_pages
= BTRFS_I(inode
)->root
->fs_info
->bdi
.ra_pages
;
2747 file_ra_state_init(ra
, inode
->i_mapping
);
2749 for (; i
<= last_index
; i
++) {
2750 if (total_read
% ra_pages
== 0) {
2751 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
2752 calc_ra(i
, last_index
, ra_pages
));
2756 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
2757 goto truncate_racing
;
2758 page
= grab_cache_page(inode
->i_mapping
, i
);
2762 if (!PageUptodate(page
)) {
2763 btrfs_readpage(NULL
, page
);
2765 if (!PageUptodate(page
)) {
2767 page_cache_release(page
);
2771 wait_on_page_writeback(page
);
2773 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
2774 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
2775 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
2777 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
2779 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
2781 page_cache_release(page
);
2782 btrfs_start_ordered_extent(inode
, ordered
, 1);
2783 btrfs_put_ordered_extent(ordered
);
2786 set_page_extent_mapped(page
);
2789 set_extent_delalloc(io_tree
, page_start
,
2790 page_end
, GFP_NOFS
);
2791 set_page_dirty(page
);
2793 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
2795 page_cache_release(page
);
2799 /* we have to start the IO in order to get the ordered extents
2800 * instantiated. This allows the relocation to code to wait
2801 * for all the ordered extents to hit the disk.
2803 * Otherwise, it would constantly loop over the same extents
2804 * because the old ones don't get deleted until the IO is
2807 btrfs_fdatawrite_range(inode
->i_mapping
, start
, start
+ len
- 1,
2810 trans
= btrfs_start_transaction(BTRFS_I(inode
)->root
, 1);
2812 btrfs_end_transaction(trans
, BTRFS_I(inode
)->root
);
2813 mark_inode_dirty(inode
);
2815 mutex_unlock(&inode
->i_mutex
);
2819 vmtruncate(inode
, inode
->i_size
);
2820 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
2826 * The back references tell us which tree holds a ref on a block,
2827 * but it is possible for the tree root field in the reference to
2828 * reflect the original root before a snapshot was made. In this
2829 * case we should search through all the children of a given root
2830 * to find potential holders of references on a block.
2832 * Instead, we do something a little less fancy and just search
2833 * all the roots for a given key/block combination.
2835 static int find_root_for_ref(struct btrfs_root
*root
,
2836 struct btrfs_path
*path
,
2837 struct btrfs_key
*key0
,
2840 struct btrfs_root
**found_root
,
2843 struct btrfs_key root_location
;
2844 struct btrfs_root
*cur_root
= *found_root
;
2845 struct btrfs_file_extent_item
*file_extent
;
2846 u64 root_search_start
= BTRFS_FS_TREE_OBJECTID
;
2850 root_location
.offset
= (u64
)-1;
2851 root_location
.type
= BTRFS_ROOT_ITEM_KEY
;
2852 path
->lowest_level
= level
;
2855 ret
= btrfs_search_slot(NULL
, cur_root
, key0
, path
, 0, 0);
2857 if (ret
== 0 && file_key
) {
2858 struct extent_buffer
*leaf
= path
->nodes
[0];
2859 file_extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
2860 struct btrfs_file_extent_item
);
2861 if (btrfs_file_extent_type(leaf
, file_extent
) ==
2862 BTRFS_FILE_EXTENT_REG
) {
2864 btrfs_file_extent_disk_bytenr(leaf
,
2867 } else if (!file_key
) {
2868 if (path
->nodes
[level
])
2869 found_bytenr
= path
->nodes
[level
]->start
;
2872 btrfs_release_path(cur_root
, path
);
2874 if (found_bytenr
== bytenr
) {
2875 *found_root
= cur_root
;
2879 ret
= btrfs_search_root(root
->fs_info
->tree_root
,
2880 root_search_start
, &root_search_start
);
2884 root_location
.objectid
= root_search_start
;
2885 cur_root
= btrfs_read_fs_root_no_name(root
->fs_info
,
2893 path
->lowest_level
= 0;
2898 * note, this releases the path
2900 static int noinline
relocate_one_reference(struct btrfs_root
*extent_root
,
2901 struct btrfs_path
*path
,
2902 struct btrfs_key
*extent_key
,
2903 u64
*last_file_objectid
,
2904 u64
*last_file_offset
,
2905 u64
*last_file_root
,
2908 struct inode
*inode
;
2909 struct btrfs_root
*found_root
;
2910 struct btrfs_key root_location
;
2911 struct btrfs_key found_key
;
2912 struct btrfs_extent_ref
*ref
;
2920 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
2922 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2923 struct btrfs_extent_ref
);
2924 ref_root
= btrfs_ref_root(path
->nodes
[0], ref
);
2925 ref_gen
= btrfs_ref_generation(path
->nodes
[0], ref
);
2926 ref_objectid
= btrfs_ref_objectid(path
->nodes
[0], ref
);
2927 ref_offset
= btrfs_ref_offset(path
->nodes
[0], ref
);
2928 btrfs_release_path(extent_root
, path
);
2930 root_location
.objectid
= ref_root
;
2932 root_location
.offset
= 0;
2934 root_location
.offset
= (u64
)-1;
2935 root_location
.type
= BTRFS_ROOT_ITEM_KEY
;
2937 found_root
= btrfs_read_fs_root_no_name(extent_root
->fs_info
,
2939 BUG_ON(!found_root
);
2940 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
2942 if (ref_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
2943 found_key
.objectid
= ref_objectid
;
2944 found_key
.type
= BTRFS_EXTENT_DATA_KEY
;
2945 found_key
.offset
= ref_offset
;
2948 if (last_extent
== extent_key
->objectid
&&
2949 *last_file_objectid
== ref_objectid
&&
2950 *last_file_offset
== ref_offset
&&
2951 *last_file_root
== ref_root
)
2954 ret
= find_root_for_ref(extent_root
, path
, &found_key
,
2955 level
, 1, &found_root
,
2956 extent_key
->objectid
);
2961 if (last_extent
== extent_key
->objectid
&&
2962 *last_file_objectid
== ref_objectid
&&
2963 *last_file_offset
== ref_offset
&&
2964 *last_file_root
== ref_root
)
2967 inode
= btrfs_iget_locked(extent_root
->fs_info
->sb
,
2968 ref_objectid
, found_root
);
2969 if (inode
->i_state
& I_NEW
) {
2970 /* the inode and parent dir are two different roots */
2971 BTRFS_I(inode
)->root
= found_root
;
2972 BTRFS_I(inode
)->location
.objectid
= ref_objectid
;
2973 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
2974 BTRFS_I(inode
)->location
.offset
= 0;
2975 btrfs_read_locked_inode(inode
);
2976 unlock_new_inode(inode
);
2979 /* this can happen if the reference is not against
2980 * the latest version of the tree root
2982 if (is_bad_inode(inode
))
2985 *last_file_objectid
= inode
->i_ino
;
2986 *last_file_root
= found_root
->root_key
.objectid
;
2987 *last_file_offset
= ref_offset
;
2989 relocate_inode_pages(inode
, ref_offset
, extent_key
->offset
);
2992 struct btrfs_trans_handle
*trans
;
2993 struct extent_buffer
*eb
;
2996 eb
= read_tree_block(found_root
, extent_key
->objectid
,
2997 extent_key
->offset
, 0);
2998 btrfs_tree_lock(eb
);
2999 level
= btrfs_header_level(eb
);
3002 btrfs_item_key_to_cpu(eb
, &found_key
, 0);
3004 btrfs_node_key_to_cpu(eb
, &found_key
, 0);
3006 btrfs_tree_unlock(eb
);
3007 free_extent_buffer(eb
);
3009 ret
= find_root_for_ref(extent_root
, path
, &found_key
,
3010 level
, 0, &found_root
,
3011 extent_key
->objectid
);
3017 * right here almost anything could happen to our key,
3018 * but that's ok. The cow below will either relocate it
3019 * or someone else will have relocated it. Either way,
3020 * it is in a different spot than it was before and
3024 trans
= btrfs_start_transaction(found_root
, 1);
3026 if (found_root
== extent_root
->fs_info
->extent_root
||
3027 found_root
== extent_root
->fs_info
->chunk_root
||
3028 found_root
== extent_root
->fs_info
->dev_root
) {
3030 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
3033 path
->lowest_level
= level
;
3035 ret
= btrfs_search_slot(trans
, found_root
, &found_key
, path
,
3037 path
->lowest_level
= 0;
3038 btrfs_release_path(found_root
, path
);
3040 if (found_root
== found_root
->fs_info
->extent_root
)
3041 btrfs_extent_post_op(trans
, found_root
);
3043 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
3045 btrfs_end_transaction(trans
, found_root
);
3049 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
3053 static int noinline
del_extent_zero(struct btrfs_root
*extent_root
,
3054 struct btrfs_path
*path
,
3055 struct btrfs_key
*extent_key
)
3058 struct btrfs_trans_handle
*trans
;
3060 trans
= btrfs_start_transaction(extent_root
, 1);
3061 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
3068 ret
= btrfs_del_item(trans
, extent_root
, path
);
3070 btrfs_end_transaction(trans
, extent_root
);
3074 static int noinline
relocate_one_extent(struct btrfs_root
*extent_root
,
3075 struct btrfs_path
*path
,
3076 struct btrfs_key
*extent_key
)
3078 struct btrfs_key key
;
3079 struct btrfs_key found_key
;
3080 struct extent_buffer
*leaf
;
3081 u64 last_file_objectid
= 0;
3082 u64 last_file_root
= 0;
3083 u64 last_file_offset
= (u64
)-1;
3084 u64 last_extent
= 0;
3089 if (extent_key
->objectid
== 0) {
3090 ret
= del_extent_zero(extent_root
, path
, extent_key
);
3093 key
.objectid
= extent_key
->objectid
;
3094 key
.type
= BTRFS_EXTENT_REF_KEY
;
3098 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
3104 leaf
= path
->nodes
[0];
3105 nritems
= btrfs_header_nritems(leaf
);
3106 if (path
->slots
[0] == nritems
) {
3107 ret
= btrfs_next_leaf(extent_root
, path
);
3114 leaf
= path
->nodes
[0];
3117 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3118 if (found_key
.objectid
!= extent_key
->objectid
) {
3122 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
3126 key
.offset
= found_key
.offset
+ 1;
3127 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3129 ret
= relocate_one_reference(extent_root
, path
, extent_key
,
3130 &last_file_objectid
,
3132 &last_file_root
, last_extent
);
3135 last_extent
= extent_key
->objectid
;
3139 btrfs_release_path(extent_root
, path
);
3143 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
3146 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
3147 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
3149 num_devices
= root
->fs_info
->fs_devices
->num_devices
;
3150 if (num_devices
== 1) {
3151 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
3152 stripped
= flags
& ~stripped
;
3154 /* turn raid0 into single device chunks */
3155 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
3158 /* turn mirroring into duplication */
3159 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
3160 BTRFS_BLOCK_GROUP_RAID10
))
3161 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
3164 /* they already had raid on here, just return */
3165 if (flags
& stripped
)
3168 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
3169 stripped
= flags
& ~stripped
;
3171 /* switch duplicated blocks with raid1 */
3172 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
3173 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
3175 /* turn single device chunks into raid0 */
3176 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
3181 int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
3182 struct btrfs_block_group_cache
*shrink_block_group
,
3185 struct btrfs_trans_handle
*trans
;
3186 u64 new_alloc_flags
;
3189 spin_lock(&shrink_block_group
->lock
);
3190 if (btrfs_block_group_used(&shrink_block_group
->item
) > 0) {
3191 spin_unlock(&shrink_block_group
->lock
);
3192 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3194 trans
= btrfs_start_transaction(root
, 1);
3195 mutex_lock(&root
->fs_info
->alloc_mutex
);
3196 spin_lock(&shrink_block_group
->lock
);
3198 new_alloc_flags
= update_block_group_flags(root
,
3199 shrink_block_group
->flags
);
3200 if (new_alloc_flags
!= shrink_block_group
->flags
) {
3202 btrfs_block_group_used(&shrink_block_group
->item
);
3204 calc
= shrink_block_group
->key
.offset
;
3206 spin_unlock(&shrink_block_group
->lock
);
3208 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3209 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
3211 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3212 btrfs_end_transaction(trans
, root
);
3213 mutex_lock(&root
->fs_info
->alloc_mutex
);
3215 spin_unlock(&shrink_block_group
->lock
);
3219 int btrfs_shrink_extent_tree(struct btrfs_root
*root
, u64 shrink_start
)
3221 struct btrfs_trans_handle
*trans
;
3222 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
3223 struct btrfs_path
*path
;
3226 u64 shrink_last_byte
;
3227 struct btrfs_block_group_cache
*shrink_block_group
;
3228 struct btrfs_fs_info
*info
= root
->fs_info
;
3229 struct btrfs_key key
;
3230 struct btrfs_key found_key
;
3231 struct extent_buffer
*leaf
;
3236 mutex_lock(&root
->fs_info
->alloc_mutex
);
3237 shrink_block_group
= btrfs_lookup_block_group(root
->fs_info
,
3239 BUG_ON(!shrink_block_group
);
3241 shrink_last_byte
= shrink_block_group
->key
.objectid
+
3242 shrink_block_group
->key
.offset
;
3244 shrink_block_group
->space_info
->total_bytes
-=
3245 shrink_block_group
->key
.offset
;
3246 path
= btrfs_alloc_path();
3247 root
= root
->fs_info
->extent_root
;
3250 printk("btrfs relocating block group %llu flags %llu\n",
3251 (unsigned long long)shrink_start
,
3252 (unsigned long long)shrink_block_group
->flags
);
3254 __alloc_chunk_for_shrink(root
, shrink_block_group
, 1);
3258 shrink_block_group
->ro
= 1;
3262 key
.objectid
= shrink_start
;
3265 cur_byte
= key
.objectid
;
3267 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3271 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
3276 leaf
= path
->nodes
[0];
3277 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3278 if (found_key
.objectid
+ found_key
.offset
> shrink_start
&&
3279 found_key
.objectid
< shrink_last_byte
) {
3280 cur_byte
= found_key
.objectid
;
3281 key
.objectid
= cur_byte
;
3284 btrfs_release_path(root
, path
);
3287 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3292 leaf
= path
->nodes
[0];
3293 nritems
= btrfs_header_nritems(leaf
);
3294 if (path
->slots
[0] >= nritems
) {
3295 ret
= btrfs_next_leaf(root
, path
);
3302 leaf
= path
->nodes
[0];
3303 nritems
= btrfs_header_nritems(leaf
);
3306 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3308 if (found_key
.objectid
>= shrink_last_byte
)
3311 if (progress
&& need_resched()) {
3312 memcpy(&key
, &found_key
, sizeof(key
));
3314 btrfs_release_path(root
, path
);
3315 btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3321 if (btrfs_key_type(&found_key
) != BTRFS_EXTENT_ITEM_KEY
||
3322 found_key
.objectid
+ found_key
.offset
<= cur_byte
) {
3323 memcpy(&key
, &found_key
, sizeof(key
));
3330 cur_byte
= found_key
.objectid
+ found_key
.offset
;
3331 key
.objectid
= cur_byte
;
3332 btrfs_release_path(root
, path
);
3333 ret
= relocate_one_extent(root
, path
, &found_key
);
3334 __alloc_chunk_for_shrink(root
, shrink_block_group
, 0);
3337 btrfs_release_path(root
, path
);
3339 if (total_found
> 0) {
3340 printk("btrfs relocate found %llu last extent was %llu\n",
3341 (unsigned long long)total_found
,
3342 (unsigned long long)found_key
.objectid
);
3343 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3344 trans
= btrfs_start_transaction(tree_root
, 1);
3345 btrfs_commit_transaction(trans
, tree_root
);
3347 btrfs_clean_old_snapshots(tree_root
);
3349 btrfs_wait_ordered_extents(tree_root
);
3351 trans
= btrfs_start_transaction(tree_root
, 1);
3352 btrfs_commit_transaction(trans
, tree_root
);
3353 mutex_lock(&root
->fs_info
->alloc_mutex
);
3358 * we've freed all the extents, now remove the block
3359 * group item from the tree
3361 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3363 trans
= btrfs_start_transaction(root
, 1);
3365 mutex_lock(&root
->fs_info
->alloc_mutex
);
3366 memcpy(&key
, &shrink_block_group
->key
, sizeof(key
));
3368 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3372 btrfs_end_transaction(trans
, root
);
3376 clear_extent_bits(&info
->block_group_cache
, key
.objectid
,
3377 key
.objectid
+ key
.offset
- 1,
3378 (unsigned int)-1, GFP_NOFS
);
3381 clear_extent_bits(&info
->free_space_cache
,
3382 key
.objectid
, key
.objectid
+ key
.offset
- 1,
3383 (unsigned int)-1, GFP_NOFS
);
3385 memset(shrink_block_group
, 0, sizeof(*shrink_block_group
));
3386 kfree(shrink_block_group
);
3388 btrfs_del_item(trans
, root
, path
);
3389 btrfs_release_path(root
, path
);
3390 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3391 btrfs_commit_transaction(trans
, root
);
3393 mutex_lock(&root
->fs_info
->alloc_mutex
);
3395 /* the code to unpin extents might set a few bits in the free
3396 * space cache for this range again
3398 clear_extent_bits(&info
->free_space_cache
,
3399 key
.objectid
, key
.objectid
+ key
.offset
- 1,
3400 (unsigned int)-1, GFP_NOFS
);
3402 btrfs_free_path(path
);
3403 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3407 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
3408 struct btrfs_key
*key
)
3411 struct btrfs_key found_key
;
3412 struct extent_buffer
*leaf
;
3415 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3420 slot
= path
->slots
[0];
3421 leaf
= path
->nodes
[0];
3422 if (slot
>= btrfs_header_nritems(leaf
)) {
3423 ret
= btrfs_next_leaf(root
, path
);
3430 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3432 if (found_key
.objectid
>= key
->objectid
&&
3433 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
3444 int btrfs_read_block_groups(struct btrfs_root
*root
)
3446 struct btrfs_path
*path
;
3449 struct btrfs_block_group_cache
*cache
;
3450 struct btrfs_fs_info
*info
= root
->fs_info
;
3451 struct btrfs_space_info
*space_info
;
3452 struct extent_io_tree
*block_group_cache
;
3453 struct btrfs_key key
;
3454 struct btrfs_key found_key
;
3455 struct extent_buffer
*leaf
;
3457 block_group_cache
= &info
->block_group_cache
;
3458 root
= info
->extent_root
;
3461 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3462 path
= btrfs_alloc_path();
3466 mutex_lock(&root
->fs_info
->alloc_mutex
);
3468 ret
= find_first_block_group(root
, path
, &key
);
3476 leaf
= path
->nodes
[0];
3477 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3478 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3484 spin_lock_init(&cache
->lock
);
3485 read_extent_buffer(leaf
, &cache
->item
,
3486 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3487 sizeof(cache
->item
));
3488 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3490 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3491 btrfs_release_path(root
, path
);
3492 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3494 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3495 bit
= BLOCK_GROUP_DATA
;
3496 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3497 bit
= BLOCK_GROUP_SYSTEM
;
3498 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3499 bit
= BLOCK_GROUP_METADATA
;
3501 set_avail_alloc_bits(info
, cache
->flags
);
3503 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3504 btrfs_block_group_used(&cache
->item
),
3507 cache
->space_info
= space_info
;
3509 /* use EXTENT_LOCKED to prevent merging */
3510 set_extent_bits(block_group_cache
, found_key
.objectid
,
3511 found_key
.objectid
+ found_key
.offset
- 1,
3512 EXTENT_LOCKED
, GFP_NOFS
);
3513 set_state_private(block_group_cache
, found_key
.objectid
,
3514 (unsigned long)cache
);
3515 set_extent_bits(block_group_cache
, found_key
.objectid
,
3516 found_key
.objectid
+ found_key
.offset
- 1,
3517 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3519 btrfs_super_total_bytes(&info
->super_copy
))
3524 btrfs_free_path(path
);
3525 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3529 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3530 struct btrfs_root
*root
, u64 bytes_used
,
3531 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3536 struct btrfs_root
*extent_root
;
3537 struct btrfs_block_group_cache
*cache
;
3538 struct extent_io_tree
*block_group_cache
;
3540 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
3541 extent_root
= root
->fs_info
->extent_root
;
3542 block_group_cache
= &root
->fs_info
->block_group_cache
;
3544 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3546 cache
->key
.objectid
= chunk_offset
;
3547 cache
->key
.offset
= size
;
3548 spin_lock_init(&cache
->lock
);
3549 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3551 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3552 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3553 cache
->flags
= type
;
3554 btrfs_set_block_group_flags(&cache
->item
, type
);
3556 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
3557 &cache
->space_info
);
3560 bit
= block_group_state_bits(type
);
3561 set_extent_bits(block_group_cache
, chunk_offset
,
3562 chunk_offset
+ size
- 1,
3563 EXTENT_LOCKED
, GFP_NOFS
);
3564 set_state_private(block_group_cache
, chunk_offset
,
3565 (unsigned long)cache
);
3566 set_extent_bits(block_group_cache
, chunk_offset
,
3567 chunk_offset
+ size
- 1,
3568 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3570 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3571 sizeof(cache
->item
));
3574 finish_current_insert(trans
, extent_root
);
3575 ret
= del_pending_extents(trans
, extent_root
);
3577 set_avail_alloc_bits(extent_root
->fs_info
, type
);