2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
25 #include "print-tree.h"
26 #include "transaction.h"
30 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
31 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
32 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
37 btrfs_root
*extent_root
);
38 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
39 btrfs_root
*extent_root
);
40 static struct btrfs_block_group_cache
*
41 __btrfs_find_block_group(struct btrfs_root
*root
,
42 struct btrfs_block_group_cache
*hint
,
43 u64 search_start
, int data
, int owner
);
45 void maybe_lock_mutex(struct btrfs_root
*root
)
47 if (root
!= root
->fs_info
->extent_root
&&
48 root
!= root
->fs_info
->chunk_root
&&
49 root
!= root
->fs_info
->dev_root
) {
50 mutex_lock(&root
->fs_info
->alloc_mutex
);
54 void maybe_unlock_mutex(struct btrfs_root
*root
)
56 if (root
!= root
->fs_info
->extent_root
&&
57 root
!= root
->fs_info
->chunk_root
&&
58 root
!= root
->fs_info
->dev_root
) {
59 mutex_unlock(&root
->fs_info
->alloc_mutex
);
63 static int cache_block_group(struct btrfs_root
*root
,
64 struct btrfs_block_group_cache
*block_group
)
66 struct btrfs_path
*path
;
69 struct extent_buffer
*leaf
;
70 struct extent_io_tree
*free_space_cache
;
80 root
= root
->fs_info
->extent_root
;
81 free_space_cache
= &root
->fs_info
->free_space_cache
;
83 if (block_group
->cached
)
86 path
= btrfs_alloc_path();
92 * we get into deadlocks with paths held by callers of this function.
93 * since the alloc_mutex is protecting things right now, just
94 * skip the locking here
96 path
->skip_locking
= 1;
97 first_free
= block_group
->key
.objectid
;
98 key
.objectid
= block_group
->key
.objectid
;
100 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
101 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
104 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
108 leaf
= path
->nodes
[0];
109 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
110 if (key
.objectid
+ key
.offset
> first_free
)
111 first_free
= key
.objectid
+ key
.offset
;
114 leaf
= path
->nodes
[0];
115 slot
= path
->slots
[0];
116 if (slot
>= btrfs_header_nritems(leaf
)) {
117 ret
= btrfs_next_leaf(root
, path
);
126 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
127 if (key
.objectid
< block_group
->key
.objectid
) {
130 if (key
.objectid
>= block_group
->key
.objectid
+
131 block_group
->key
.offset
) {
135 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
140 if (key
.objectid
> last
) {
141 hole_size
= key
.objectid
- last
;
142 set_extent_dirty(free_space_cache
, last
,
143 last
+ hole_size
- 1,
146 last
= key
.objectid
+ key
.offset
;
154 if (block_group
->key
.objectid
+
155 block_group
->key
.offset
> last
) {
156 hole_size
= block_group
->key
.objectid
+
157 block_group
->key
.offset
- last
;
158 set_extent_dirty(free_space_cache
, last
,
159 last
+ hole_size
- 1, GFP_NOFS
);
161 block_group
->cached
= 1;
163 btrfs_free_path(path
);
167 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
171 struct extent_io_tree
*block_group_cache
;
172 struct btrfs_block_group_cache
*block_group
= NULL
;
178 bytenr
= max_t(u64
, bytenr
,
179 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
180 block_group_cache
= &info
->block_group_cache
;
181 ret
= find_first_extent_bit(block_group_cache
,
182 bytenr
, &start
, &end
,
183 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
188 ret
= get_state_private(block_group_cache
, start
, &ptr
);
192 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
196 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
200 struct extent_io_tree
*block_group_cache
;
201 struct btrfs_block_group_cache
*block_group
= NULL
;
207 bytenr
= max_t(u64
, bytenr
,
208 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
209 block_group_cache
= &info
->block_group_cache
;
210 ret
= find_first_extent_bit(block_group_cache
,
211 bytenr
, &start
, &end
,
212 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
217 ret
= get_state_private(block_group_cache
, start
, &ptr
);
221 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
222 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
223 block_group
->key
.objectid
+ block_group
->key
.offset
)
228 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
230 return (cache
->flags
& bits
) == bits
;
233 static int noinline
find_search_start(struct btrfs_root
*root
,
234 struct btrfs_block_group_cache
**cache_ret
,
235 u64
*start_ret
, u64 num
, int data
)
238 struct btrfs_block_group_cache
*cache
= *cache_ret
;
239 struct extent_io_tree
*free_space_cache
;
240 struct extent_state
*state
;
245 u64 search_start
= *start_ret
;
248 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
249 total_fs_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
250 free_space_cache
= &root
->fs_info
->free_space_cache
;
256 ret
= cache_block_group(root
, cache
);
261 last
= max(search_start
, cache
->key
.objectid
);
262 if (!block_group_bits(cache
, data
) || cache
->ro
)
265 spin_lock_irq(&free_space_cache
->lock
);
266 state
= find_first_extent_bit_state(free_space_cache
, last
, EXTENT_DIRTY
);
271 spin_unlock_irq(&free_space_cache
->lock
);
275 start
= max(last
, state
->start
);
276 last
= state
->end
+ 1;
277 if (last
- start
< num
) {
279 state
= extent_state_next(state
);
280 } while(state
&& !(state
->state
& EXTENT_DIRTY
));
283 spin_unlock_irq(&free_space_cache
->lock
);
287 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
)
289 if (!block_group_bits(cache
, data
)) {
290 printk("block group bits don't match %Lu %d\n", cache
->flags
, data
);
296 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
298 printk("Unable to find block group for %Lu\n", search_start
);
304 last
= cache
->key
.objectid
+ cache
->key
.offset
;
306 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
307 if (!cache
|| cache
->key
.objectid
>= total_fs_bytes
) {
316 if (cache_miss
&& !cache
->cached
) {
317 cache_block_group(root
, cache
);
319 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
322 cache
= __btrfs_find_block_group(root
, cache
, last
, data
, 0);
329 static u64
div_factor(u64 num
, int factor
)
338 static int block_group_state_bits(u64 flags
)
341 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
342 bits
|= BLOCK_GROUP_DATA
;
343 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
344 bits
|= BLOCK_GROUP_METADATA
;
345 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
346 bits
|= BLOCK_GROUP_SYSTEM
;
350 static struct btrfs_block_group_cache
*
351 __btrfs_find_block_group(struct btrfs_root
*root
,
352 struct btrfs_block_group_cache
*hint
,
353 u64 search_start
, int data
, int owner
)
355 struct btrfs_block_group_cache
*cache
;
356 struct extent_io_tree
*block_group_cache
;
357 struct btrfs_block_group_cache
*found_group
= NULL
;
358 struct btrfs_fs_info
*info
= root
->fs_info
;
371 block_group_cache
= &info
->block_group_cache
;
373 if (data
& BTRFS_BLOCK_GROUP_METADATA
)
376 bit
= block_group_state_bits(data
);
379 struct btrfs_block_group_cache
*shint
;
380 shint
= btrfs_lookup_first_block_group(info
, search_start
);
381 if (shint
&& block_group_bits(shint
, data
) && !shint
->ro
) {
382 used
= btrfs_block_group_used(&shint
->item
);
383 if (used
+ shint
->pinned
<
384 div_factor(shint
->key
.offset
, factor
)) {
389 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
390 used
= btrfs_block_group_used(&hint
->item
);
391 if (used
+ hint
->pinned
<
392 div_factor(hint
->key
.offset
, factor
)) {
395 last
= hint
->key
.objectid
+ hint
->key
.offset
;
398 last
= max(hint
->key
.objectid
, search_start
);
404 ret
= find_first_extent_bit(block_group_cache
, last
,
409 ret
= get_state_private(block_group_cache
, start
, &ptr
);
415 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
416 last
= cache
->key
.objectid
+ cache
->key
.offset
;
417 used
= btrfs_block_group_used(&cache
->item
);
419 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
420 free_check
= div_factor(cache
->key
.offset
, factor
);
421 if (used
+ cache
->pinned
< free_check
) {
433 if (!full_search
&& factor
< 10) {
443 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
444 struct btrfs_block_group_cache
445 *hint
, u64 search_start
,
449 struct btrfs_block_group_cache
*ret
;
450 mutex_lock(&root
->fs_info
->alloc_mutex
);
451 ret
= __btrfs_find_block_group(root
, hint
, search_start
, data
, owner
);
452 mutex_unlock(&root
->fs_info
->alloc_mutex
);
455 static u64
hash_extent_ref(u64 root_objectid
, u64 ref_generation
,
456 u64 owner
, u64 owner_offset
)
458 u32 high_crc
= ~(u32
)0;
459 u32 low_crc
= ~(u32
)0;
461 lenum
= cpu_to_le64(root_objectid
);
462 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
463 lenum
= cpu_to_le64(ref_generation
);
464 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
465 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
466 lenum
= cpu_to_le64(owner
);
467 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
468 lenum
= cpu_to_le64(owner_offset
);
469 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
471 return ((u64
)high_crc
<< 32) | (u64
)low_crc
;
474 static int match_extent_ref(struct extent_buffer
*leaf
,
475 struct btrfs_extent_ref
*disk_ref
,
476 struct btrfs_extent_ref
*cpu_ref
)
481 if (cpu_ref
->objectid
)
482 len
= sizeof(*cpu_ref
);
484 len
= 2 * sizeof(u64
);
485 ret
= memcmp_extent_buffer(leaf
, cpu_ref
, (unsigned long)disk_ref
,
490 static int noinline
lookup_extent_backref(struct btrfs_trans_handle
*trans
,
491 struct btrfs_root
*root
,
492 struct btrfs_path
*path
, u64 bytenr
,
494 u64 ref_generation
, u64 owner
,
495 u64 owner_offset
, int del
)
498 struct btrfs_key key
;
499 struct btrfs_key found_key
;
500 struct btrfs_extent_ref ref
;
501 struct extent_buffer
*leaf
;
502 struct btrfs_extent_ref
*disk_ref
;
506 btrfs_set_stack_ref_root(&ref
, root_objectid
);
507 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
508 btrfs_set_stack_ref_objectid(&ref
, owner
);
509 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
511 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
514 key
.objectid
= bytenr
;
515 key
.type
= BTRFS_EXTENT_REF_KEY
;
518 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
522 leaf
= path
->nodes
[0];
524 u32 nritems
= btrfs_header_nritems(leaf
);
525 if (path
->slots
[0] >= nritems
) {
526 ret2
= btrfs_next_leaf(root
, path
);
529 leaf
= path
->nodes
[0];
531 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
532 if (found_key
.objectid
!= bytenr
||
533 found_key
.type
!= BTRFS_EXTENT_REF_KEY
)
535 key
.offset
= found_key
.offset
;
537 btrfs_release_path(root
, path
);
541 disk_ref
= btrfs_item_ptr(path
->nodes
[0],
543 struct btrfs_extent_ref
);
544 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
)) {
548 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
549 key
.offset
= found_key
.offset
+ 1;
550 btrfs_release_path(root
, path
);
557 * Back reference rules. Back refs have three main goals:
559 * 1) differentiate between all holders of references to an extent so that
560 * when a reference is dropped we can make sure it was a valid reference
561 * before freeing the extent.
563 * 2) Provide enough information to quickly find the holders of an extent
564 * if we notice a given block is corrupted or bad.
566 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
567 * maintenance. This is actually the same as #2, but with a slightly
568 * different use case.
570 * File extents can be referenced by:
572 * - multiple snapshots, subvolumes, or different generations in one subvol
573 * - different files inside a single subvolume (in theory, not implemented yet)
574 * - different offsets inside a file (bookend extents in file.c)
576 * The extent ref structure has fields for:
578 * - Objectid of the subvolume root
579 * - Generation number of the tree holding the reference
580 * - objectid of the file holding the reference
581 * - offset in the file corresponding to the key holding the reference
583 * When a file extent is allocated the fields are filled in:
584 * (root_key.objectid, trans->transid, inode objectid, offset in file)
586 * When a leaf is cow'd new references are added for every file extent found
587 * in the leaf. It looks the same as the create case, but trans->transid
588 * will be different when the block is cow'd.
590 * (root_key.objectid, trans->transid, inode objectid, offset in file)
592 * When a file extent is removed either during snapshot deletion or file
593 * truncation, the corresponding back reference is found
596 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
597 * inode objectid, offset in file)
599 * Btree extents can be referenced by:
601 * - Different subvolumes
602 * - Different generations of the same subvolume
604 * Storing sufficient information for a full reverse mapping of a btree
605 * block would require storing the lowest key of the block in the backref,
606 * and it would require updating that lowest key either before write out or
607 * every time it changed. Instead, the objectid of the lowest key is stored
608 * along with the level of the tree block. This provides a hint
609 * about where in the btree the block can be found. Searches through the
610 * btree only need to look for a pointer to that block, so they stop one
611 * level higher than the level recorded in the backref.
613 * Some btrees do not do reference counting on their extents. These
614 * include the extent tree and the tree of tree roots. Backrefs for these
615 * trees always have a generation of zero.
617 * When a tree block is created, back references are inserted:
619 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
621 * When a tree block is cow'd in a reference counted root,
622 * new back references are added for all the blocks it points to.
623 * These are of the form (trans->transid will have increased since creation):
625 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
627 * Because the lowest_key_objectid and the level are just hints
628 * they are not used when backrefs are deleted. When a backref is deleted:
630 * if backref was for a tree root:
631 * root_objectid = root->root_key.objectid
633 * root_objectid = btrfs_header_owner(parent)
635 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
637 * Back Reference Key hashing:
639 * Back references have four fields, each 64 bits long. Unfortunately,
640 * This is hashed into a single 64 bit number and placed into the key offset.
641 * The key objectid corresponds to the first byte in the extent, and the
642 * key type is set to BTRFS_EXTENT_REF_KEY
644 int btrfs_insert_extent_backref(struct btrfs_trans_handle
*trans
,
645 struct btrfs_root
*root
,
646 struct btrfs_path
*path
, u64 bytenr
,
647 u64 root_objectid
, u64 ref_generation
,
648 u64 owner
, u64 owner_offset
)
651 struct btrfs_key key
;
652 struct btrfs_extent_ref ref
;
653 struct btrfs_extent_ref
*disk_ref
;
656 btrfs_set_stack_ref_root(&ref
, root_objectid
);
657 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
658 btrfs_set_stack_ref_objectid(&ref
, owner
);
659 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
661 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
664 key
.objectid
= bytenr
;
665 key
.type
= BTRFS_EXTENT_REF_KEY
;
667 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(ref
));
668 while (ret
== -EEXIST
) {
669 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
670 struct btrfs_extent_ref
);
671 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
))
674 btrfs_release_path(root
, path
);
675 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
680 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
681 struct btrfs_extent_ref
);
682 write_extent_buffer(path
->nodes
[0], &ref
, (unsigned long)disk_ref
,
684 btrfs_mark_buffer_dirty(path
->nodes
[0]);
686 btrfs_release_path(root
, path
);
690 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
691 struct btrfs_root
*root
,
692 u64 bytenr
, u64 num_bytes
,
693 u64 root_objectid
, u64 ref_generation
,
694 u64 owner
, u64 owner_offset
)
696 struct btrfs_path
*path
;
698 struct btrfs_key key
;
699 struct extent_buffer
*l
;
700 struct btrfs_extent_item
*item
;
703 WARN_ON(num_bytes
< root
->sectorsize
);
704 path
= btrfs_alloc_path();
709 key
.objectid
= bytenr
;
710 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
711 key
.offset
= num_bytes
;
712 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
721 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
722 refs
= btrfs_extent_refs(l
, item
);
723 btrfs_set_extent_refs(l
, item
, refs
+ 1);
724 btrfs_mark_buffer_dirty(path
->nodes
[0]);
726 btrfs_release_path(root
->fs_info
->extent_root
, path
);
729 ret
= btrfs_insert_extent_backref(trans
, root
->fs_info
->extent_root
,
730 path
, bytenr
, root_objectid
,
731 ref_generation
, owner
, owner_offset
);
733 finish_current_insert(trans
, root
->fs_info
->extent_root
);
734 del_pending_extents(trans
, root
->fs_info
->extent_root
);
736 btrfs_free_path(path
);
740 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
741 struct btrfs_root
*root
,
742 u64 bytenr
, u64 num_bytes
,
743 u64 root_objectid
, u64 ref_generation
,
744 u64 owner
, u64 owner_offset
)
748 mutex_lock(&root
->fs_info
->alloc_mutex
);
749 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
,
750 root_objectid
, ref_generation
,
751 owner
, owner_offset
);
752 mutex_unlock(&root
->fs_info
->alloc_mutex
);
756 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
757 struct btrfs_root
*root
)
759 finish_current_insert(trans
, root
->fs_info
->extent_root
);
760 del_pending_extents(trans
, root
->fs_info
->extent_root
);
764 static int lookup_extent_ref(struct btrfs_trans_handle
*trans
,
765 struct btrfs_root
*root
, u64 bytenr
,
766 u64 num_bytes
, u32
*refs
)
768 struct btrfs_path
*path
;
770 struct btrfs_key key
;
771 struct extent_buffer
*l
;
772 struct btrfs_extent_item
*item
;
774 WARN_ON(num_bytes
< root
->sectorsize
);
775 path
= btrfs_alloc_path();
777 key
.objectid
= bytenr
;
778 key
.offset
= num_bytes
;
779 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
780 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
785 btrfs_print_leaf(root
, path
->nodes
[0]);
786 printk("failed to find block number %Lu\n", bytenr
);
790 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
791 *refs
= btrfs_extent_refs(l
, item
);
793 btrfs_free_path(path
);
797 u32
btrfs_count_snapshots_in_path(struct btrfs_root
*root
,
798 struct btrfs_path
*count_path
,
802 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
803 struct btrfs_path
*path
;
807 u64 root_objectid
= root
->root_key
.objectid
;
813 struct btrfs_key key
;
814 struct btrfs_key found_key
;
815 struct extent_buffer
*l
;
816 struct btrfs_extent_item
*item
;
817 struct btrfs_extent_ref
*ref_item
;
820 /* FIXME, needs locking */
823 mutex_lock(&root
->fs_info
->alloc_mutex
);
824 path
= btrfs_alloc_path();
827 bytenr
= first_extent
;
829 bytenr
= count_path
->nodes
[level
]->start
;
832 key
.objectid
= bytenr
;
835 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
836 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
842 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
844 if (found_key
.objectid
!= bytenr
||
845 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
) {
849 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
850 extent_refs
= btrfs_extent_refs(l
, item
);
853 nritems
= btrfs_header_nritems(l
);
854 if (path
->slots
[0] >= nritems
) {
855 ret
= btrfs_next_leaf(extent_root
, path
);
860 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
861 if (found_key
.objectid
!= bytenr
)
864 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
870 ref_item
= btrfs_item_ptr(l
, path
->slots
[0],
871 struct btrfs_extent_ref
);
872 found_objectid
= btrfs_ref_root(l
, ref_item
);
874 if (found_objectid
!= root_objectid
) {
879 found_owner
= btrfs_ref_objectid(l
, ref_item
);
880 if (found_owner
!= expected_owner
) {
885 * nasty. we don't count a reference held by
886 * the running transaction. This allows nodatacow
887 * to avoid cow most of the time
889 if (found_owner
>= BTRFS_FIRST_FREE_OBJECTID
&&
890 btrfs_ref_generation(l
, ref_item
) ==
891 root
->fs_info
->generation
) {
899 * if there is more than one reference against a data extent,
900 * we have to assume the other ref is another snapshot
902 if (level
== -1 && extent_refs
> 1) {
906 if (cur_count
== 0) {
910 if (level
>= 0 && root
->node
== count_path
->nodes
[level
])
913 btrfs_release_path(root
, path
);
917 btrfs_free_path(path
);
918 mutex_unlock(&root
->fs_info
->alloc_mutex
);
922 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
923 struct extent_buffer
*buf
)
927 struct btrfs_key key
;
928 struct btrfs_file_extent_item
*fi
;
937 level
= btrfs_header_level(buf
);
938 nritems
= btrfs_header_nritems(buf
);
939 for (i
= 0; i
< nritems
; i
++) {
942 btrfs_item_key_to_cpu(buf
, &key
, i
);
943 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
945 fi
= btrfs_item_ptr(buf
, i
,
946 struct btrfs_file_extent_item
);
947 if (btrfs_file_extent_type(buf
, fi
) ==
948 BTRFS_FILE_EXTENT_INLINE
)
950 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
951 if (disk_bytenr
== 0)
954 mutex_lock(&root
->fs_info
->alloc_mutex
);
955 ret
= __btrfs_inc_extent_ref(trans
, root
, disk_bytenr
,
956 btrfs_file_extent_disk_num_bytes(buf
, fi
),
957 root
->root_key
.objectid
, trans
->transid
,
958 key
.objectid
, key
.offset
);
959 mutex_unlock(&root
->fs_info
->alloc_mutex
);
966 bytenr
= btrfs_node_blockptr(buf
, i
);
967 btrfs_node_key_to_cpu(buf
, &key
, i
);
969 mutex_lock(&root
->fs_info
->alloc_mutex
);
970 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
,
971 btrfs_level_size(root
, level
- 1),
972 root
->root_key
.objectid
,
974 level
- 1, key
.objectid
);
975 mutex_unlock(&root
->fs_info
->alloc_mutex
);
987 for (i
=0; i
< faili
; i
++) {
990 btrfs_item_key_to_cpu(buf
, &key
, i
);
991 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
993 fi
= btrfs_item_ptr(buf
, i
,
994 struct btrfs_file_extent_item
);
995 if (btrfs_file_extent_type(buf
, fi
) ==
996 BTRFS_FILE_EXTENT_INLINE
)
998 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
999 if (disk_bytenr
== 0)
1001 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
1002 btrfs_file_extent_disk_num_bytes(buf
,
1006 bytenr
= btrfs_node_blockptr(buf
, i
);
1007 err
= btrfs_free_extent(trans
, root
, bytenr
,
1008 btrfs_level_size(root
, level
- 1), 0);
1016 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1017 struct btrfs_root
*root
,
1018 struct btrfs_path
*path
,
1019 struct btrfs_block_group_cache
*cache
)
1023 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1025 struct extent_buffer
*leaf
;
1027 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1032 leaf
= path
->nodes
[0];
1033 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1034 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1035 btrfs_mark_buffer_dirty(leaf
);
1036 btrfs_release_path(extent_root
, path
);
1038 finish_current_insert(trans
, extent_root
);
1039 pending_ret
= del_pending_extents(trans
, extent_root
);
1048 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1049 struct btrfs_root
*root
)
1051 struct extent_io_tree
*block_group_cache
;
1052 struct btrfs_block_group_cache
*cache
;
1056 struct btrfs_path
*path
;
1062 block_group_cache
= &root
->fs_info
->block_group_cache
;
1063 path
= btrfs_alloc_path();
1067 mutex_lock(&root
->fs_info
->alloc_mutex
);
1069 ret
= find_first_extent_bit(block_group_cache
, last
,
1070 &start
, &end
, BLOCK_GROUP_DIRTY
);
1075 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1078 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1079 err
= write_one_cache_group(trans
, root
,
1082 * if we fail to write the cache group, we want
1083 * to keep it marked dirty in hopes that a later
1090 clear_extent_bits(block_group_cache
, start
, end
,
1091 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1093 btrfs_free_path(path
);
1094 mutex_unlock(&root
->fs_info
->alloc_mutex
);
1098 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1101 struct list_head
*head
= &info
->space_info
;
1102 struct list_head
*cur
;
1103 struct btrfs_space_info
*found
;
1104 list_for_each(cur
, head
) {
1105 found
= list_entry(cur
, struct btrfs_space_info
, list
);
1106 if (found
->flags
== flags
)
1113 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1114 u64 total_bytes
, u64 bytes_used
,
1115 struct btrfs_space_info
**space_info
)
1117 struct btrfs_space_info
*found
;
1119 found
= __find_space_info(info
, flags
);
1121 found
->total_bytes
+= total_bytes
;
1122 found
->bytes_used
+= bytes_used
;
1124 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1125 *space_info
= found
;
1128 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1132 list_add(&found
->list
, &info
->space_info
);
1133 found
->flags
= flags
;
1134 found
->total_bytes
= total_bytes
;
1135 found
->bytes_used
= bytes_used
;
1136 found
->bytes_pinned
= 0;
1138 found
->force_alloc
= 0;
1139 *space_info
= found
;
1143 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1145 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1146 BTRFS_BLOCK_GROUP_RAID1
|
1147 BTRFS_BLOCK_GROUP_RAID10
|
1148 BTRFS_BLOCK_GROUP_DUP
);
1150 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1151 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1152 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1153 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1154 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1155 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1159 static u64
reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
1161 u64 num_devices
= root
->fs_info
->fs_devices
->num_devices
;
1163 if (num_devices
== 1)
1164 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
1165 if (num_devices
< 4)
1166 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
1168 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
1169 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
1170 BTRFS_BLOCK_GROUP_RAID10
))) {
1171 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
1174 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
1175 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
1176 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
1179 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
1180 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
1181 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
1182 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
1183 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
1187 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1188 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1189 u64 flags
, int force
)
1191 struct btrfs_space_info
*space_info
;
1197 flags
= reduce_alloc_profile(extent_root
, flags
);
1199 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1201 ret
= update_space_info(extent_root
->fs_info
, flags
,
1205 BUG_ON(!space_info
);
1207 if (space_info
->force_alloc
) {
1209 space_info
->force_alloc
= 0;
1211 if (space_info
->full
)
1214 thresh
= div_factor(space_info
->total_bytes
, 6);
1216 (space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1220 mutex_lock(&extent_root
->fs_info
->chunk_mutex
);
1221 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1222 if (ret
== -ENOSPC
) {
1223 printk("space info full %Lu\n", flags
);
1224 space_info
->full
= 1;
1229 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1230 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1233 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
1238 static int update_block_group(struct btrfs_trans_handle
*trans
,
1239 struct btrfs_root
*root
,
1240 u64 bytenr
, u64 num_bytes
, int alloc
,
1243 struct btrfs_block_group_cache
*cache
;
1244 struct btrfs_fs_info
*info
= root
->fs_info
;
1245 u64 total
= num_bytes
;
1251 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1253 cache
= btrfs_lookup_block_group(info
, bytenr
);
1257 byte_in_group
= bytenr
- cache
->key
.objectid
;
1258 WARN_ON(byte_in_group
> cache
->key
.offset
);
1259 start
= cache
->key
.objectid
;
1260 end
= start
+ cache
->key
.offset
- 1;
1261 set_extent_bits(&info
->block_group_cache
, start
, end
,
1262 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1264 old_val
= btrfs_block_group_used(&cache
->item
);
1265 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1267 old_val
+= num_bytes
;
1268 cache
->space_info
->bytes_used
+= num_bytes
;
1270 old_val
-= num_bytes
;
1271 cache
->space_info
->bytes_used
-= num_bytes
;
1273 set_extent_dirty(&info
->free_space_cache
,
1274 bytenr
, bytenr
+ num_bytes
- 1,
1278 btrfs_set_block_group_used(&cache
->item
, old_val
);
1280 bytenr
+= num_bytes
;
1285 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
1290 ret
= find_first_extent_bit(&root
->fs_info
->block_group_cache
,
1291 search_start
, &start
, &end
,
1292 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
1293 BLOCK_GROUP_SYSTEM
);
1300 static int update_pinned_extents(struct btrfs_root
*root
,
1301 u64 bytenr
, u64 num
, int pin
)
1304 struct btrfs_block_group_cache
*cache
;
1305 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1307 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1309 set_extent_dirty(&fs_info
->pinned_extents
,
1310 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1312 clear_extent_dirty(&fs_info
->pinned_extents
,
1313 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1316 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1318 u64 first
= first_logical_byte(root
, bytenr
);
1319 WARN_ON(first
< bytenr
);
1320 len
= min(first
- bytenr
, num
);
1322 len
= min(num
, cache
->key
.offset
-
1323 (bytenr
- cache
->key
.objectid
));
1327 cache
->pinned
+= len
;
1328 cache
->space_info
->bytes_pinned
+= len
;
1330 fs_info
->total_pinned
+= len
;
1333 cache
->pinned
-= len
;
1334 cache
->space_info
->bytes_pinned
-= len
;
1336 fs_info
->total_pinned
-= len
;
1344 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1349 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1353 ret
= find_first_extent_bit(pinned_extents
, last
,
1354 &start
, &end
, EXTENT_DIRTY
);
1357 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1363 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1364 struct btrfs_root
*root
,
1365 struct extent_io_tree
*unpin
)
1370 struct extent_io_tree
*free_space_cache
;
1371 free_space_cache
= &root
->fs_info
->free_space_cache
;
1373 mutex_lock(&root
->fs_info
->alloc_mutex
);
1375 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1379 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1380 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1381 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1383 mutex_unlock(&root
->fs_info
->alloc_mutex
);
1387 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1388 struct btrfs_root
*extent_root
)
1392 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1393 struct extent_buffer
*eb
;
1394 struct btrfs_path
*path
;
1395 struct btrfs_key ins
;
1396 struct btrfs_disk_key first
;
1397 struct btrfs_extent_item extent_item
;
1402 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
1403 btrfs_set_stack_extent_refs(&extent_item
, 1);
1404 btrfs_set_key_type(&ins
, BTRFS_EXTENT_ITEM_KEY
);
1405 path
= btrfs_alloc_path();
1408 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1409 &end
, EXTENT_LOCKED
);
1413 ins
.objectid
= start
;
1414 ins
.offset
= end
+ 1 - start
;
1415 err
= btrfs_insert_item(trans
, extent_root
, &ins
,
1416 &extent_item
, sizeof(extent_item
));
1417 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
1419 eb
= read_tree_block(extent_root
, ins
.objectid
, ins
.offset
,
1421 btrfs_tree_lock(eb
);
1422 level
= btrfs_header_level(eb
);
1424 btrfs_item_key(eb
, &first
, 0);
1426 btrfs_node_key(eb
, &first
, 0);
1428 btrfs_tree_unlock(eb
);
1429 free_extent_buffer(eb
);
1431 * the first key is just a hint, so the race we've created
1432 * against reading it is fine
1434 err
= btrfs_insert_extent_backref(trans
, extent_root
, path
,
1435 start
, extent_root
->root_key
.objectid
,
1437 btrfs_disk_key_objectid(&first
));
1440 btrfs_free_path(path
);
1444 static int pin_down_bytes(struct btrfs_root
*root
, u64 bytenr
, u32 num_bytes
,
1449 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1451 struct extent_buffer
*buf
;
1452 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
1454 if (!btrfs_try_tree_lock(buf
) &&
1455 btrfs_buffer_uptodate(buf
, 0)) {
1457 root
->fs_info
->running_transaction
->transid
;
1458 u64 header_transid
=
1459 btrfs_header_generation(buf
);
1460 if (header_transid
== transid
&&
1461 !btrfs_header_flag(buf
,
1462 BTRFS_HEADER_FLAG_WRITTEN
)) {
1463 clean_tree_block(NULL
, root
, buf
);
1464 btrfs_tree_unlock(buf
);
1465 free_extent_buffer(buf
);
1468 btrfs_tree_unlock(buf
);
1470 free_extent_buffer(buf
);
1472 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
1474 set_extent_bits(&root
->fs_info
->pending_del
,
1475 bytenr
, bytenr
+ num_bytes
- 1,
1476 EXTENT_LOCKED
, GFP_NOFS
);
1483 * remove an extent from the root, returns 0 on success
1485 static int __free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1486 *root
, u64 bytenr
, u64 num_bytes
,
1487 u64 root_objectid
, u64 ref_generation
,
1488 u64 owner_objectid
, u64 owner_offset
, int pin
,
1491 struct btrfs_path
*path
;
1492 struct btrfs_key key
;
1493 struct btrfs_fs_info
*info
= root
->fs_info
;
1494 struct btrfs_root
*extent_root
= info
->extent_root
;
1495 struct extent_buffer
*leaf
;
1497 int extent_slot
= 0;
1498 int found_extent
= 0;
1500 struct btrfs_extent_item
*ei
;
1503 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
1504 key
.objectid
= bytenr
;
1505 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1506 key
.offset
= num_bytes
;
1507 path
= btrfs_alloc_path();
1512 ret
= lookup_extent_backref(trans
, extent_root
, path
,
1513 bytenr
, root_objectid
,
1515 owner_objectid
, owner_offset
, 1);
1517 struct btrfs_key found_key
;
1518 extent_slot
= path
->slots
[0];
1519 while(extent_slot
> 0) {
1521 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1523 if (found_key
.objectid
!= bytenr
)
1525 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1526 found_key
.offset
== num_bytes
) {
1530 if (path
->slots
[0] - extent_slot
> 5)
1534 ret
= btrfs_del_item(trans
, extent_root
, path
);
1536 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
1538 printk("Unable to find ref byte nr %Lu root %Lu "
1539 " gen %Lu owner %Lu offset %Lu\n", bytenr
,
1540 root_objectid
, ref_generation
, owner_objectid
,
1543 if (!found_extent
) {
1544 btrfs_release_path(extent_root
, path
);
1545 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, -1, 1);
1549 extent_slot
= path
->slots
[0];
1552 leaf
= path
->nodes
[0];
1553 ei
= btrfs_item_ptr(leaf
, extent_slot
,
1554 struct btrfs_extent_item
);
1555 refs
= btrfs_extent_refs(leaf
, ei
);
1558 btrfs_set_extent_refs(leaf
, ei
, refs
);
1560 btrfs_mark_buffer_dirty(leaf
);
1562 if (refs
== 0 && found_extent
&& path
->slots
[0] == extent_slot
+ 1) {
1563 /* if the back ref and the extent are next to each other
1564 * they get deleted below in one shot
1566 path
->slots
[0] = extent_slot
;
1568 } else if (found_extent
) {
1569 /* otherwise delete the extent back ref */
1570 ret
= btrfs_del_item(trans
, extent_root
, path
);
1572 /* if refs are 0, we need to setup the path for deletion */
1574 btrfs_release_path(extent_root
, path
);
1575 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
1588 ret
= pin_down_bytes(root
, bytenr
, num_bytes
, 0);
1594 /* block accounting for super block */
1595 spin_lock_irq(&info
->delalloc_lock
);
1596 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1597 btrfs_set_super_bytes_used(&info
->super_copy
,
1598 super_used
- num_bytes
);
1599 spin_unlock_irq(&info
->delalloc_lock
);
1601 /* block accounting for root item */
1602 root_used
= btrfs_root_used(&root
->root_item
);
1603 btrfs_set_root_used(&root
->root_item
,
1604 root_used
- num_bytes
);
1605 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
1610 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
1614 btrfs_free_path(path
);
1615 finish_current_insert(trans
, extent_root
);
1620 * find all the blocks marked as pending in the radix tree and remove
1621 * them from the extent map
1623 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
1624 btrfs_root
*extent_root
)
1630 struct extent_io_tree
*pending_del
;
1631 struct extent_io_tree
*pinned_extents
;
1633 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
1634 pending_del
= &extent_root
->fs_info
->pending_del
;
1635 pinned_extents
= &extent_root
->fs_info
->pinned_extents
;
1638 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
1642 update_pinned_extents(extent_root
, start
, end
+ 1 - start
, 1);
1643 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
1645 ret
= __free_extent(trans
, extent_root
,
1646 start
, end
+ 1 - start
,
1647 extent_root
->root_key
.objectid
,
1656 * remove an extent from the root, returns 0 on success
1658 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
1659 struct btrfs_root
*root
, u64 bytenr
,
1660 u64 num_bytes
, u64 root_objectid
,
1661 u64 ref_generation
, u64 owner_objectid
,
1662 u64 owner_offset
, int pin
)
1664 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1668 WARN_ON(num_bytes
< root
->sectorsize
);
1669 if (!root
->ref_cows
)
1672 if (root
== extent_root
) {
1673 pin_down_bytes(root
, bytenr
, num_bytes
, 1);
1676 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, root_objectid
,
1677 ref_generation
, owner_objectid
, owner_offset
,
1680 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1681 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
1682 return ret
? ret
: pending_ret
;
1685 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
1686 struct btrfs_root
*root
, u64 bytenr
,
1687 u64 num_bytes
, u64 root_objectid
,
1688 u64 ref_generation
, u64 owner_objectid
,
1689 u64 owner_offset
, int pin
)
1693 maybe_lock_mutex(root
);
1694 ret
= __btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1695 root_objectid
, ref_generation
,
1696 owner_objectid
, owner_offset
, pin
);
1697 maybe_unlock_mutex(root
);
1701 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
1703 u64 mask
= ((u64
)root
->stripesize
- 1);
1704 u64 ret
= (val
+ mask
) & ~mask
;
1709 * walks the btree of allocated extents and find a hole of a given size.
1710 * The key ins is changed to record the hole:
1711 * ins->objectid == block start
1712 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1713 * ins->offset == number of blocks
1714 * Any available blocks before search_start are skipped.
1716 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
1717 struct btrfs_root
*orig_root
,
1718 u64 num_bytes
, u64 empty_size
,
1719 u64 search_start
, u64 search_end
,
1720 u64 hint_byte
, struct btrfs_key
*ins
,
1721 u64 exclude_start
, u64 exclude_nr
,
1725 u64 orig_search_start
;
1726 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
1727 struct btrfs_fs_info
*info
= root
->fs_info
;
1728 u64 total_needed
= num_bytes
;
1729 u64
*last_ptr
= NULL
;
1730 struct btrfs_block_group_cache
*block_group
;
1733 int chunk_alloc_done
= 0;
1734 int empty_cluster
= 2 * 1024 * 1024;
1735 int allowed_chunk_alloc
= 0;
1737 WARN_ON(num_bytes
< root
->sectorsize
);
1738 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
1740 if (orig_root
->ref_cows
|| empty_size
)
1741 allowed_chunk_alloc
= 1;
1743 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
1744 last_ptr
= &root
->fs_info
->last_alloc
;
1745 empty_cluster
= 256 * 1024;
1748 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
1749 last_ptr
= &root
->fs_info
->last_data_alloc
;
1754 hint_byte
= *last_ptr
;
1756 empty_size
+= empty_cluster
;
1760 search_start
= max(search_start
, first_logical_byte(root
, 0));
1761 orig_search_start
= search_start
;
1763 if (search_end
== (u64
)-1)
1764 search_end
= btrfs_super_total_bytes(&info
->super_copy
);
1767 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
1769 hint_byte
= search_start
;
1770 block_group
= __btrfs_find_block_group(root
, block_group
,
1771 hint_byte
, data
, 1);
1772 if (last_ptr
&& *last_ptr
== 0 && block_group
)
1773 hint_byte
= block_group
->key
.objectid
;
1775 block_group
= __btrfs_find_block_group(root
,
1777 search_start
, data
, 1);
1779 search_start
= max(search_start
, hint_byte
);
1781 total_needed
+= empty_size
;
1785 block_group
= btrfs_lookup_first_block_group(info
,
1788 block_group
= btrfs_lookup_first_block_group(info
,
1791 if (full_scan
&& !chunk_alloc_done
) {
1792 if (allowed_chunk_alloc
) {
1793 do_chunk_alloc(trans
, root
,
1794 num_bytes
+ 2 * 1024 * 1024, data
, 1);
1795 allowed_chunk_alloc
= 0;
1796 } else if (block_group
&& block_group_bits(block_group
, data
)) {
1797 block_group
->space_info
->force_alloc
= 1;
1799 chunk_alloc_done
= 1;
1801 ret
= find_search_start(root
, &block_group
, &search_start
,
1802 total_needed
, data
);
1803 if (ret
== -ENOSPC
&& last_ptr
&& *last_ptr
) {
1805 block_group
= btrfs_lookup_first_block_group(info
,
1807 search_start
= orig_search_start
;
1808 ret
= find_search_start(root
, &block_group
, &search_start
,
1809 total_needed
, data
);
1816 if (last_ptr
&& *last_ptr
&& search_start
!= *last_ptr
) {
1819 empty_size
+= empty_cluster
;
1820 total_needed
+= empty_size
;
1822 block_group
= btrfs_lookup_first_block_group(info
,
1824 search_start
= orig_search_start
;
1825 ret
= find_search_start(root
, &block_group
,
1826 &search_start
, total_needed
, data
);
1833 search_start
= stripe_align(root
, search_start
);
1834 ins
->objectid
= search_start
;
1835 ins
->offset
= num_bytes
;
1837 if (ins
->objectid
+ num_bytes
>= search_end
)
1840 if (ins
->objectid
+ num_bytes
>
1841 block_group
->key
.objectid
+ block_group
->key
.offset
) {
1842 search_start
= block_group
->key
.objectid
+
1843 block_group
->key
.offset
;
1847 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
1848 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
1849 search_start
= ins
->objectid
+ num_bytes
;
1853 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
1854 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
1855 search_start
= ins
->objectid
+ num_bytes
;
1859 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
1860 ins
->objectid
< exclude_start
+ exclude_nr
)) {
1861 search_start
= exclude_start
+ exclude_nr
;
1865 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
1866 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
1868 trans
->block_group
= block_group
;
1870 ins
->offset
= num_bytes
;
1872 *last_ptr
= ins
->objectid
+ ins
->offset
;
1874 btrfs_super_total_bytes(&root
->fs_info
->super_copy
)) {
1881 if (search_start
+ num_bytes
>= search_end
) {
1883 search_start
= orig_search_start
;
1890 total_needed
-= empty_size
;
1895 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
1897 block_group
= __btrfs_find_block_group(root
, block_group
,
1898 search_start
, data
, 0);
1905 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
1906 struct btrfs_root
*root
,
1907 u64 num_bytes
, u64 min_alloc_size
,
1908 u64 empty_size
, u64 hint_byte
,
1909 u64 search_end
, struct btrfs_key
*ins
,
1913 u64 search_start
= 0;
1915 struct btrfs_fs_info
*info
= root
->fs_info
;
1918 alloc_profile
= info
->avail_data_alloc_bits
&
1919 info
->data_alloc_profile
;
1920 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
1921 } else if (root
== root
->fs_info
->chunk_root
) {
1922 alloc_profile
= info
->avail_system_alloc_bits
&
1923 info
->system_alloc_profile
;
1924 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
1926 alloc_profile
= info
->avail_metadata_alloc_bits
&
1927 info
->metadata_alloc_profile
;
1928 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
1931 data
= reduce_alloc_profile(root
, data
);
1933 * the only place that sets empty_size is btrfs_realloc_node, which
1934 * is not called recursively on allocations
1936 if (empty_size
|| root
->ref_cows
) {
1937 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
1938 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1940 BTRFS_BLOCK_GROUP_METADATA
|
1941 (info
->metadata_alloc_profile
&
1942 info
->avail_metadata_alloc_bits
), 0);
1945 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1946 num_bytes
+ 2 * 1024 * 1024, data
, 0);
1950 WARN_ON(num_bytes
< root
->sectorsize
);
1951 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
1952 search_start
, search_end
, hint_byte
, ins
,
1953 trans
->alloc_exclude_start
,
1954 trans
->alloc_exclude_nr
, data
);
1956 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
1957 num_bytes
= num_bytes
>> 1;
1958 num_bytes
= max(num_bytes
, min_alloc_size
);
1959 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1960 num_bytes
, data
, 1);
1964 printk("allocation failed flags %Lu\n", data
);
1967 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
1968 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
1973 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
1974 struct btrfs_root
*root
,
1975 u64 num_bytes
, u64 min_alloc_size
,
1976 u64 empty_size
, u64 hint_byte
,
1977 u64 search_end
, struct btrfs_key
*ins
,
1981 maybe_lock_mutex(root
);
1982 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
1983 empty_size
, hint_byte
, search_end
, ins
,
1985 maybe_unlock_mutex(root
);
1989 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
1990 struct btrfs_root
*root
,
1991 u64 root_objectid
, u64 ref_generation
,
1992 u64 owner
, u64 owner_offset
,
1993 struct btrfs_key
*ins
)
1999 u64 num_bytes
= ins
->offset
;
2001 struct btrfs_fs_info
*info
= root
->fs_info
;
2002 struct btrfs_root
*extent_root
= info
->extent_root
;
2003 struct btrfs_extent_item
*extent_item
;
2004 struct btrfs_extent_ref
*ref
;
2005 struct btrfs_path
*path
;
2006 struct btrfs_key keys
[2];
2008 /* block accounting for super block */
2009 spin_lock_irq(&info
->delalloc_lock
);
2010 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2011 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
2012 spin_unlock_irq(&info
->delalloc_lock
);
2014 /* block accounting for root item */
2015 root_used
= btrfs_root_used(&root
->root_item
);
2016 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
2018 if (root
== extent_root
) {
2019 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2020 ins
->objectid
+ ins
->offset
- 1,
2021 EXTENT_LOCKED
, GFP_NOFS
);
2025 memcpy(&keys
[0], ins
, sizeof(*ins
));
2026 keys
[1].offset
= hash_extent_ref(root_objectid
, ref_generation
,
2027 owner
, owner_offset
);
2028 keys
[1].objectid
= ins
->objectid
;
2029 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
2030 sizes
[0] = sizeof(*extent_item
);
2031 sizes
[1] = sizeof(*ref
);
2033 path
= btrfs_alloc_path();
2036 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
2040 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2041 struct btrfs_extent_item
);
2042 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, 1);
2043 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
2044 struct btrfs_extent_ref
);
2046 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
2047 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
2048 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
2049 btrfs_set_ref_offset(path
->nodes
[0], ref
, owner_offset
);
2051 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2053 trans
->alloc_exclude_start
= 0;
2054 trans
->alloc_exclude_nr
= 0;
2055 btrfs_free_path(path
);
2056 finish_current_insert(trans
, extent_root
);
2057 pending_ret
= del_pending_extents(trans
, extent_root
);
2067 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1, 0);
2069 printk("update block group failed for %Lu %Lu\n",
2070 ins
->objectid
, ins
->offset
);
2077 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
2078 struct btrfs_root
*root
,
2079 u64 root_objectid
, u64 ref_generation
,
2080 u64 owner
, u64 owner_offset
,
2081 struct btrfs_key
*ins
)
2084 maybe_lock_mutex(root
);
2085 ret
= __btrfs_alloc_reserved_extent(trans
, root
, root_objectid
,
2086 ref_generation
, owner
,
2088 maybe_unlock_mutex(root
);
2092 * finds a free extent and does all the dirty work required for allocation
2093 * returns the key for the extent through ins, and a tree buffer for
2094 * the first block of the extent through buf.
2096 * returns 0 if everything worked, non-zero otherwise.
2098 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
2099 struct btrfs_root
*root
,
2100 u64 num_bytes
, u64 min_alloc_size
,
2101 u64 root_objectid
, u64 ref_generation
,
2102 u64 owner
, u64 owner_offset
,
2103 u64 empty_size
, u64 hint_byte
,
2104 u64 search_end
, struct btrfs_key
*ins
, u64 data
)
2108 maybe_lock_mutex(root
);
2110 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
,
2111 min_alloc_size
, empty_size
, hint_byte
,
2112 search_end
, ins
, data
);
2114 ret
= __btrfs_alloc_reserved_extent(trans
, root
, root_objectid
,
2115 ref_generation
, owner
,
2119 maybe_unlock_mutex(root
);
2123 * helper function to allocate a block for a given tree
2124 * returns the tree buffer or NULL.
2126 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2127 struct btrfs_root
*root
,
2136 struct btrfs_key ins
;
2138 struct extent_buffer
*buf
;
2140 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, blocksize
,
2141 root_objectid
, ref_generation
,
2142 level
, first_objectid
, empty_size
, hint
,
2146 return ERR_PTR(ret
);
2148 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2150 btrfs_free_extent(trans
, root
, ins
.objectid
, blocksize
,
2151 root
->root_key
.objectid
, ref_generation
,
2153 return ERR_PTR(-ENOMEM
);
2155 btrfs_set_header_generation(buf
, trans
->transid
);
2156 btrfs_tree_lock(buf
);
2157 clean_tree_block(trans
, root
, buf
);
2158 btrfs_set_buffer_uptodate(buf
);
2160 if (PageDirty(buf
->first_page
)) {
2161 printk("page %lu dirty\n", buf
->first_page
->index
);
2165 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
2166 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
2167 trans
->blocks_used
++;
2171 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2172 struct btrfs_root
*root
,
2173 struct extent_buffer
*leaf
)
2176 u64 leaf_generation
;
2177 struct btrfs_key key
;
2178 struct btrfs_file_extent_item
*fi
;
2183 BUG_ON(!btrfs_is_leaf(leaf
));
2184 nritems
= btrfs_header_nritems(leaf
);
2185 leaf_owner
= btrfs_header_owner(leaf
);
2186 leaf_generation
= btrfs_header_generation(leaf
);
2188 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2190 for (i
= 0; i
< nritems
; i
++) {
2193 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2194 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2196 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2197 if (btrfs_file_extent_type(leaf
, fi
) ==
2198 BTRFS_FILE_EXTENT_INLINE
)
2201 * FIXME make sure to insert a trans record that
2202 * repeats the snapshot del on crash
2204 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2205 if (disk_bytenr
== 0)
2208 mutex_lock(&root
->fs_info
->alloc_mutex
);
2209 ret
= __btrfs_free_extent(trans
, root
, disk_bytenr
,
2210 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2211 leaf_owner
, leaf_generation
,
2212 key
.objectid
, key
.offset
, 0);
2213 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2217 mutex_lock(&root
->fs_info
->alloc_mutex
);
2221 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2222 struct extent_buffer
*node
,
2235 nritems
= btrfs_header_nritems(node
);
2236 level
= btrfs_header_level(node
);
2240 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2241 bytenr
= btrfs_node_blockptr(node
, i
);
2242 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2243 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2247 blocksize
= btrfs_level_size(root
, level
- 1);
2249 ret
= lookup_extent_ref(NULL
, root
, bytenr
,
2257 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2258 btrfs_node_ptr_generation(node
, i
));
2259 last
= bytenr
+ blocksize
;
2267 * we want to avoid as much random IO as we can with the alloc mutex
2268 * held, so drop the lock and do the lookup, then do it again with the
2271 int drop_snap_lookup_refcount(struct btrfs_root
*root
, u64 start
, u64 len
,
2274 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2275 lookup_extent_ref(NULL
, root
, start
, len
, refs
);
2277 mutex_lock(&root
->fs_info
->alloc_mutex
);
2278 return lookup_extent_ref(NULL
, root
, start
, len
, refs
);
2282 * helper function for drop_snapshot, this walks down the tree dropping ref
2283 * counts as it goes.
2285 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2286 struct btrfs_root
*root
,
2287 struct btrfs_path
*path
, int *level
)
2293 struct extent_buffer
*next
;
2294 struct extent_buffer
*cur
;
2295 struct extent_buffer
*parent
;
2300 mutex_lock(&root
->fs_info
->alloc_mutex
);
2302 WARN_ON(*level
< 0);
2303 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2304 ret
= drop_snap_lookup_refcount(root
, path
->nodes
[*level
]->start
,
2305 path
->nodes
[*level
]->len
, &refs
);
2311 * walk down to the last node level and free all the leaves
2313 while(*level
>= 0) {
2314 WARN_ON(*level
< 0);
2315 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2316 cur
= path
->nodes
[*level
];
2318 if (btrfs_header_level(cur
) != *level
)
2321 if (path
->slots
[*level
] >=
2322 btrfs_header_nritems(cur
))
2325 ret
= drop_leaf_ref(trans
, root
, cur
);
2329 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2330 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2331 blocksize
= btrfs_level_size(root
, *level
- 1);
2333 ret
= drop_snap_lookup_refcount(root
, bytenr
, blocksize
, &refs
);
2336 parent
= path
->nodes
[*level
];
2337 root_owner
= btrfs_header_owner(parent
);
2338 root_gen
= btrfs_header_generation(parent
);
2339 path
->slots
[*level
]++;
2340 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
2341 blocksize
, root_owner
,
2346 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2347 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2348 free_extent_buffer(next
);
2349 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2351 if (path
->slots
[*level
] == 0)
2352 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2354 next
= read_tree_block(root
, bytenr
, blocksize
,
2357 mutex_lock(&root
->fs_info
->alloc_mutex
);
2359 /* we've dropped the lock, double check */
2360 ret
= lookup_extent_ref(NULL
, root
, bytenr
, blocksize
,
2364 parent
= path
->nodes
[*level
];
2365 root_owner
= btrfs_header_owner(parent
);
2366 root_gen
= btrfs_header_generation(parent
);
2368 path
->slots
[*level
]++;
2369 free_extent_buffer(next
);
2370 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
2378 WARN_ON(*level
<= 0);
2379 if (path
->nodes
[*level
-1])
2380 free_extent_buffer(path
->nodes
[*level
-1]);
2381 path
->nodes
[*level
-1] = next
;
2382 *level
= btrfs_header_level(next
);
2383 path
->slots
[*level
] = 0;
2386 WARN_ON(*level
< 0);
2387 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2389 if (path
->nodes
[*level
] == root
->node
) {
2390 root_owner
= root
->root_key
.objectid
;
2391 parent
= path
->nodes
[*level
];
2393 parent
= path
->nodes
[*level
+ 1];
2394 root_owner
= btrfs_header_owner(parent
);
2397 root_gen
= btrfs_header_generation(parent
);
2398 ret
= __btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
2399 path
->nodes
[*level
]->len
,
2400 root_owner
, root_gen
, 0, 0, 1);
2401 free_extent_buffer(path
->nodes
[*level
]);
2402 path
->nodes
[*level
] = NULL
;
2405 mutex_unlock(&root
->fs_info
->alloc_mutex
);
2411 * helper for dropping snapshots. This walks back up the tree in the path
2412 * to find the first node higher up where we haven't yet gone through
2415 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2416 struct btrfs_root
*root
,
2417 struct btrfs_path
*path
, int *level
)
2421 struct btrfs_root_item
*root_item
= &root
->root_item
;
2426 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2427 slot
= path
->slots
[i
];
2428 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2429 struct extent_buffer
*node
;
2430 struct btrfs_disk_key disk_key
;
2431 node
= path
->nodes
[i
];
2434 WARN_ON(*level
== 0);
2435 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2436 memcpy(&root_item
->drop_progress
,
2437 &disk_key
, sizeof(disk_key
));
2438 root_item
->drop_level
= i
;
2441 if (path
->nodes
[*level
] == root
->node
) {
2442 root_owner
= root
->root_key
.objectid
;
2444 btrfs_header_generation(path
->nodes
[*level
]);
2446 struct extent_buffer
*node
;
2447 node
= path
->nodes
[*level
+ 1];
2448 root_owner
= btrfs_header_owner(node
);
2449 root_gen
= btrfs_header_generation(node
);
2451 ret
= btrfs_free_extent(trans
, root
,
2452 path
->nodes
[*level
]->start
,
2453 path
->nodes
[*level
]->len
,
2454 root_owner
, root_gen
, 0, 0, 1);
2456 free_extent_buffer(path
->nodes
[*level
]);
2457 path
->nodes
[*level
] = NULL
;
2465 * drop the reference count on the tree rooted at 'snap'. This traverses
2466 * the tree freeing any blocks that have a ref count of zero after being
2469 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2475 struct btrfs_path
*path
;
2478 struct btrfs_root_item
*root_item
= &root
->root_item
;
2480 WARN_ON(!mutex_is_locked(&root
->fs_info
->drop_mutex
));
2481 path
= btrfs_alloc_path();
2484 level
= btrfs_header_level(root
->node
);
2486 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2487 path
->nodes
[level
] = root
->node
;
2488 extent_buffer_get(root
->node
);
2489 path
->slots
[level
] = 0;
2491 struct btrfs_key key
;
2492 struct btrfs_disk_key found_key
;
2493 struct extent_buffer
*node
;
2495 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2496 level
= root_item
->drop_level
;
2497 path
->lowest_level
= level
;
2498 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2503 node
= path
->nodes
[level
];
2504 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2505 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2506 sizeof(found_key
)));
2508 * unlock our path, this is safe because only this
2509 * function is allowed to delete this snapshot
2511 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
2512 if (path
->nodes
[i
] && path
->locks
[i
]) {
2514 btrfs_tree_unlock(path
->nodes
[i
]);
2519 wret
= walk_down_tree(trans
, root
, path
, &level
);
2525 wret
= walk_up_tree(trans
, root
, path
, &level
);
2530 if (trans
->transaction
->in_commit
) {
2535 for (i
= 0; i
<= orig_level
; i
++) {
2536 if (path
->nodes
[i
]) {
2537 free_extent_buffer(path
->nodes
[i
]);
2538 path
->nodes
[i
] = NULL
;
2542 btrfs_free_path(path
);
2546 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
2553 mutex_lock(&info
->alloc_mutex
);
2555 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
2556 &start
, &end
, (unsigned int)-1);
2559 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
2561 kfree((void *)(unsigned long)ptr
);
2562 clear_extent_bits(&info
->block_group_cache
, start
,
2563 end
, (unsigned int)-1, GFP_NOFS
);
2566 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
2567 &start
, &end
, EXTENT_DIRTY
);
2570 clear_extent_dirty(&info
->free_space_cache
, start
,
2573 mutex_unlock(&info
->alloc_mutex
);
2577 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
2580 return min(last
, start
+ nr
- 1);
2583 static int noinline
relocate_inode_pages(struct inode
*inode
, u64 start
,
2588 unsigned long last_index
;
2591 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2592 struct file_ra_state
*ra
;
2593 unsigned long total_read
= 0;
2594 unsigned long ra_pages
;
2595 struct btrfs_trans_handle
*trans
;
2597 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
2599 mutex_lock(&inode
->i_mutex
);
2600 i
= start
>> PAGE_CACHE_SHIFT
;
2601 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
2603 ra_pages
= BTRFS_I(inode
)->root
->fs_info
->bdi
.ra_pages
;
2605 file_ra_state_init(ra
, inode
->i_mapping
);
2607 for (; i
<= last_index
; i
++) {
2608 if (total_read
% ra_pages
== 0) {
2609 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
2610 calc_ra(i
, last_index
, ra_pages
));
2613 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > inode
->i_size
)
2614 goto truncate_racing
;
2616 page
= grab_cache_page(inode
->i_mapping
, i
);
2620 if (!PageUptodate(page
)) {
2621 btrfs_readpage(NULL
, page
);
2623 if (!PageUptodate(page
)) {
2625 page_cache_release(page
);
2629 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2630 ClearPageDirty(page
);
2632 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
2634 wait_on_page_writeback(page
);
2635 set_page_extent_mapped(page
);
2636 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
2637 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
2639 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
2641 set_extent_delalloc(io_tree
, page_start
,
2642 page_end
, GFP_NOFS
);
2643 set_page_dirty(page
);
2645 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
2647 page_cache_release(page
);
2649 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
2654 trans
= btrfs_start_transaction(BTRFS_I(inode
)->root
, 1);
2656 btrfs_end_transaction(trans
, BTRFS_I(inode
)->root
);
2657 mark_inode_dirty(inode
);
2659 mutex_unlock(&inode
->i_mutex
);
2663 vmtruncate(inode
, inode
->i_size
);
2664 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
2670 * The back references tell us which tree holds a ref on a block,
2671 * but it is possible for the tree root field in the reference to
2672 * reflect the original root before a snapshot was made. In this
2673 * case we should search through all the children of a given root
2674 * to find potential holders of references on a block.
2676 * Instead, we do something a little less fancy and just search
2677 * all the roots for a given key/block combination.
2679 static int find_root_for_ref(struct btrfs_root
*root
,
2680 struct btrfs_path
*path
,
2681 struct btrfs_key
*key0
,
2684 struct btrfs_root
**found_root
,
2687 struct btrfs_key root_location
;
2688 struct btrfs_root
*cur_root
= *found_root
;
2689 struct btrfs_file_extent_item
*file_extent
;
2690 u64 root_search_start
= BTRFS_FS_TREE_OBJECTID
;
2694 root_location
.offset
= (u64
)-1;
2695 root_location
.type
= BTRFS_ROOT_ITEM_KEY
;
2696 path
->lowest_level
= level
;
2699 ret
= btrfs_search_slot(NULL
, cur_root
, key0
, path
, 0, 0);
2701 if (ret
== 0 && file_key
) {
2702 struct extent_buffer
*leaf
= path
->nodes
[0];
2703 file_extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
2704 struct btrfs_file_extent_item
);
2705 if (btrfs_file_extent_type(leaf
, file_extent
) ==
2706 BTRFS_FILE_EXTENT_REG
) {
2708 btrfs_file_extent_disk_bytenr(leaf
,
2711 } else if (!file_key
) {
2712 if (path
->nodes
[level
])
2713 found_bytenr
= path
->nodes
[level
]->start
;
2716 btrfs_release_path(cur_root
, path
);
2718 if (found_bytenr
== bytenr
) {
2719 *found_root
= cur_root
;
2723 ret
= btrfs_search_root(root
->fs_info
->tree_root
,
2724 root_search_start
, &root_search_start
);
2728 root_location
.objectid
= root_search_start
;
2729 cur_root
= btrfs_read_fs_root_no_name(root
->fs_info
,
2737 path
->lowest_level
= 0;
2742 * note, this releases the path
2744 static int noinline
relocate_one_reference(struct btrfs_root
*extent_root
,
2745 struct btrfs_path
*path
,
2746 struct btrfs_key
*extent_key
,
2747 u64
*last_file_objectid
,
2748 u64
*last_file_offset
,
2749 u64
*last_file_root
,
2752 struct inode
*inode
;
2753 struct btrfs_root
*found_root
;
2754 struct btrfs_key root_location
;
2755 struct btrfs_key found_key
;
2756 struct btrfs_extent_ref
*ref
;
2764 WARN_ON(!mutex_is_locked(&extent_root
->fs_info
->alloc_mutex
));
2766 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2767 struct btrfs_extent_ref
);
2768 ref_root
= btrfs_ref_root(path
->nodes
[0], ref
);
2769 ref_gen
= btrfs_ref_generation(path
->nodes
[0], ref
);
2770 ref_objectid
= btrfs_ref_objectid(path
->nodes
[0], ref
);
2771 ref_offset
= btrfs_ref_offset(path
->nodes
[0], ref
);
2772 btrfs_release_path(extent_root
, path
);
2774 root_location
.objectid
= ref_root
;
2776 root_location
.offset
= 0;
2778 root_location
.offset
= (u64
)-1;
2779 root_location
.type
= BTRFS_ROOT_ITEM_KEY
;
2781 found_root
= btrfs_read_fs_root_no_name(extent_root
->fs_info
,
2783 BUG_ON(!found_root
);
2784 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
2786 if (ref_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
2787 found_key
.objectid
= ref_objectid
;
2788 found_key
.type
= BTRFS_EXTENT_DATA_KEY
;
2789 found_key
.offset
= ref_offset
;
2792 if (last_extent
== extent_key
->objectid
&&
2793 *last_file_objectid
== ref_objectid
&&
2794 *last_file_offset
== ref_offset
&&
2795 *last_file_root
== ref_root
)
2798 ret
= find_root_for_ref(extent_root
, path
, &found_key
,
2799 level
, 1, &found_root
,
2800 extent_key
->objectid
);
2805 if (last_extent
== extent_key
->objectid
&&
2806 *last_file_objectid
== ref_objectid
&&
2807 *last_file_offset
== ref_offset
&&
2808 *last_file_root
== ref_root
)
2811 inode
= btrfs_iget_locked(extent_root
->fs_info
->sb
,
2812 ref_objectid
, found_root
);
2813 if (inode
->i_state
& I_NEW
) {
2814 /* the inode and parent dir are two different roots */
2815 BTRFS_I(inode
)->root
= found_root
;
2816 BTRFS_I(inode
)->location
.objectid
= ref_objectid
;
2817 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
2818 BTRFS_I(inode
)->location
.offset
= 0;
2819 btrfs_read_locked_inode(inode
);
2820 unlock_new_inode(inode
);
2823 /* this can happen if the reference is not against
2824 * the latest version of the tree root
2826 if (is_bad_inode(inode
))
2829 *last_file_objectid
= inode
->i_ino
;
2830 *last_file_root
= found_root
->root_key
.objectid
;
2831 *last_file_offset
= ref_offset
;
2833 relocate_inode_pages(inode
, ref_offset
, extent_key
->offset
);
2836 struct btrfs_trans_handle
*trans
;
2837 struct extent_buffer
*eb
;
2840 eb
= read_tree_block(found_root
, extent_key
->objectid
,
2841 extent_key
->offset
, 0);
2842 btrfs_tree_lock(eb
);
2843 level
= btrfs_header_level(eb
);
2846 btrfs_item_key_to_cpu(eb
, &found_key
, 0);
2848 btrfs_node_key_to_cpu(eb
, &found_key
, 0);
2850 btrfs_tree_unlock(eb
);
2851 free_extent_buffer(eb
);
2853 ret
= find_root_for_ref(extent_root
, path
, &found_key
,
2854 level
, 0, &found_root
,
2855 extent_key
->objectid
);
2861 * right here almost anything could happen to our key,
2862 * but that's ok. The cow below will either relocate it
2863 * or someone else will have relocated it. Either way,
2864 * it is in a different spot than it was before and
2868 trans
= btrfs_start_transaction(found_root
, 1);
2870 if (found_root
== extent_root
->fs_info
->extent_root
||
2871 found_root
== extent_root
->fs_info
->chunk_root
||
2872 found_root
== extent_root
->fs_info
->dev_root
) {
2874 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
2877 path
->lowest_level
= level
;
2879 ret
= btrfs_search_slot(trans
, found_root
, &found_key
, path
,
2881 path
->lowest_level
= 0;
2882 btrfs_release_path(found_root
, path
);
2884 if (found_root
== found_root
->fs_info
->extent_root
)
2885 btrfs_extent_post_op(trans
, found_root
);
2887 mutex_unlock(&extent_root
->fs_info
->alloc_mutex
);
2889 btrfs_end_transaction(trans
, found_root
);
2893 mutex_lock(&extent_root
->fs_info
->alloc_mutex
);
2897 static int noinline
del_extent_zero(struct btrfs_root
*extent_root
,
2898 struct btrfs_path
*path
,
2899 struct btrfs_key
*extent_key
)
2902 struct btrfs_trans_handle
*trans
;
2904 trans
= btrfs_start_transaction(extent_root
, 1);
2905 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
2912 ret
= btrfs_del_item(trans
, extent_root
, path
);
2914 btrfs_end_transaction(trans
, extent_root
);
2918 static int noinline
relocate_one_extent(struct btrfs_root
*extent_root
,
2919 struct btrfs_path
*path
,
2920 struct btrfs_key
*extent_key
)
2922 struct btrfs_key key
;
2923 struct btrfs_key found_key
;
2924 struct extent_buffer
*leaf
;
2925 u64 last_file_objectid
= 0;
2926 u64 last_file_root
= 0;
2927 u64 last_file_offset
= (u64
)-1;
2928 u64 last_extent
= 0;
2933 if (extent_key
->objectid
== 0) {
2934 ret
= del_extent_zero(extent_root
, path
, extent_key
);
2937 key
.objectid
= extent_key
->objectid
;
2938 key
.type
= BTRFS_EXTENT_REF_KEY
;
2942 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
2948 leaf
= path
->nodes
[0];
2949 nritems
= btrfs_header_nritems(leaf
);
2950 if (path
->slots
[0] == nritems
) {
2951 ret
= btrfs_next_leaf(extent_root
, path
);
2958 leaf
= path
->nodes
[0];
2961 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2962 if (found_key
.objectid
!= extent_key
->objectid
) {
2966 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
2970 key
.offset
= found_key
.offset
+ 1;
2971 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
2973 ret
= relocate_one_reference(extent_root
, path
, extent_key
,
2974 &last_file_objectid
,
2976 &last_file_root
, last_extent
);
2979 last_extent
= extent_key
->objectid
;
2983 btrfs_release_path(extent_root
, path
);
2987 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
2990 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
2991 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
2993 num_devices
= root
->fs_info
->fs_devices
->num_devices
;
2994 if (num_devices
== 1) {
2995 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
2996 stripped
= flags
& ~stripped
;
2998 /* turn raid0 into single device chunks */
2999 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
3002 /* turn mirroring into duplication */
3003 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
3004 BTRFS_BLOCK_GROUP_RAID10
))
3005 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
3008 /* they already had raid on here, just return */
3009 if (flags
& stripped
)
3012 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
3013 stripped
= flags
& ~stripped
;
3015 /* switch duplicated blocks with raid1 */
3016 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
3017 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
3019 /* turn single device chunks into raid0 */
3020 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
3025 int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
3026 struct btrfs_block_group_cache
*shrink_block_group
,
3029 struct btrfs_trans_handle
*trans
;
3030 u64 new_alloc_flags
;
3033 if (btrfs_block_group_used(&shrink_block_group
->item
) > 0) {
3035 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3036 trans
= btrfs_start_transaction(root
, 1);
3037 mutex_lock(&root
->fs_info
->alloc_mutex
);
3039 new_alloc_flags
= update_block_group_flags(root
,
3040 shrink_block_group
->flags
);
3041 if (new_alloc_flags
!= shrink_block_group
->flags
) {
3043 btrfs_block_group_used(&shrink_block_group
->item
);
3045 calc
= shrink_block_group
->key
.offset
;
3047 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3048 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
3050 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3051 btrfs_end_transaction(trans
, root
);
3052 mutex_lock(&root
->fs_info
->alloc_mutex
);
3057 int btrfs_shrink_extent_tree(struct btrfs_root
*root
, u64 shrink_start
)
3059 struct btrfs_trans_handle
*trans
;
3060 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
3061 struct btrfs_path
*path
;
3064 u64 shrink_last_byte
;
3065 struct btrfs_block_group_cache
*shrink_block_group
;
3066 struct btrfs_fs_info
*info
= root
->fs_info
;
3067 struct btrfs_key key
;
3068 struct btrfs_key found_key
;
3069 struct extent_buffer
*leaf
;
3074 mutex_lock(&root
->fs_info
->alloc_mutex
);
3075 shrink_block_group
= btrfs_lookup_block_group(root
->fs_info
,
3077 BUG_ON(!shrink_block_group
);
3079 shrink_last_byte
= shrink_block_group
->key
.objectid
+
3080 shrink_block_group
->key
.offset
;
3082 shrink_block_group
->space_info
->total_bytes
-=
3083 shrink_block_group
->key
.offset
;
3084 path
= btrfs_alloc_path();
3085 root
= root
->fs_info
->extent_root
;
3088 printk("btrfs relocating block group %llu flags %llu\n",
3089 (unsigned long long)shrink_start
,
3090 (unsigned long long)shrink_block_group
->flags
);
3092 __alloc_chunk_for_shrink(root
, shrink_block_group
, 1);
3096 shrink_block_group
->ro
= 1;
3100 key
.objectid
= shrink_start
;
3103 cur_byte
= key
.objectid
;
3105 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3109 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
3114 leaf
= path
->nodes
[0];
3115 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3116 if (found_key
.objectid
+ found_key
.offset
> shrink_start
&&
3117 found_key
.objectid
< shrink_last_byte
) {
3118 cur_byte
= found_key
.objectid
;
3119 key
.objectid
= cur_byte
;
3122 btrfs_release_path(root
, path
);
3125 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3130 leaf
= path
->nodes
[0];
3131 nritems
= btrfs_header_nritems(leaf
);
3132 if (path
->slots
[0] >= nritems
) {
3133 ret
= btrfs_next_leaf(root
, path
);
3140 leaf
= path
->nodes
[0];
3141 nritems
= btrfs_header_nritems(leaf
);
3144 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3146 if (found_key
.objectid
>= shrink_last_byte
)
3149 if (progress
&& need_resched()) {
3150 memcpy(&key
, &found_key
, sizeof(key
));
3152 btrfs_release_path(root
, path
);
3153 btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3159 if (btrfs_key_type(&found_key
) != BTRFS_EXTENT_ITEM_KEY
||
3160 found_key
.objectid
+ found_key
.offset
<= cur_byte
) {
3161 memcpy(&key
, &found_key
, sizeof(key
));
3168 cur_byte
= found_key
.objectid
+ found_key
.offset
;
3169 key
.objectid
= cur_byte
;
3170 btrfs_release_path(root
, path
);
3171 ret
= relocate_one_extent(root
, path
, &found_key
);
3172 __alloc_chunk_for_shrink(root
, shrink_block_group
, 0);
3175 btrfs_release_path(root
, path
);
3177 if (total_found
> 0) {
3178 printk("btrfs relocate found %llu last extent was %llu\n",
3179 (unsigned long long)total_found
,
3180 (unsigned long long)found_key
.objectid
);
3181 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3182 trans
= btrfs_start_transaction(tree_root
, 1);
3183 btrfs_commit_transaction(trans
, tree_root
);
3185 btrfs_clean_old_snapshots(tree_root
);
3187 trans
= btrfs_start_transaction(tree_root
, 1);
3188 btrfs_commit_transaction(trans
, tree_root
);
3189 mutex_lock(&root
->fs_info
->alloc_mutex
);
3194 * we've freed all the extents, now remove the block
3195 * group item from the tree
3197 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3199 trans
= btrfs_start_transaction(root
, 1);
3200 mutex_lock(&root
->fs_info
->alloc_mutex
);
3201 memcpy(&key
, &shrink_block_group
->key
, sizeof(key
));
3203 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3209 clear_extent_bits(&info
->block_group_cache
, key
.objectid
,
3210 key
.objectid
+ key
.offset
- 1,
3211 (unsigned int)-1, GFP_NOFS
);
3214 clear_extent_bits(&info
->free_space_cache
,
3215 key
.objectid
, key
.objectid
+ key
.offset
- 1,
3216 (unsigned int)-1, GFP_NOFS
);
3218 memset(shrink_block_group
, 0, sizeof(*shrink_block_group
));
3219 kfree(shrink_block_group
);
3221 btrfs_del_item(trans
, root
, path
);
3222 btrfs_release_path(root
, path
);
3223 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3224 btrfs_commit_transaction(trans
, root
);
3226 mutex_lock(&root
->fs_info
->alloc_mutex
);
3228 /* the code to unpin extents might set a few bits in the free
3229 * space cache for this range again
3231 clear_extent_bits(&info
->free_space_cache
,
3232 key
.objectid
, key
.objectid
+ key
.offset
- 1,
3233 (unsigned int)-1, GFP_NOFS
);
3235 btrfs_free_path(path
);
3236 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3240 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
3241 struct btrfs_key
*key
)
3244 struct btrfs_key found_key
;
3245 struct extent_buffer
*leaf
;
3248 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3253 slot
= path
->slots
[0];
3254 leaf
= path
->nodes
[0];
3255 if (slot
>= btrfs_header_nritems(leaf
)) {
3256 ret
= btrfs_next_leaf(root
, path
);
3263 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3265 if (found_key
.objectid
>= key
->objectid
&&
3266 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
3277 int btrfs_read_block_groups(struct btrfs_root
*root
)
3279 struct btrfs_path
*path
;
3282 struct btrfs_block_group_cache
*cache
;
3283 struct btrfs_fs_info
*info
= root
->fs_info
;
3284 struct btrfs_space_info
*space_info
;
3285 struct extent_io_tree
*block_group_cache
;
3286 struct btrfs_key key
;
3287 struct btrfs_key found_key
;
3288 struct extent_buffer
*leaf
;
3290 block_group_cache
= &info
->block_group_cache
;
3291 root
= info
->extent_root
;
3294 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3295 path
= btrfs_alloc_path();
3299 mutex_lock(&root
->fs_info
->alloc_mutex
);
3301 ret
= find_first_block_group(root
, path
, &key
);
3309 leaf
= path
->nodes
[0];
3310 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3311 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3317 read_extent_buffer(leaf
, &cache
->item
,
3318 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3319 sizeof(cache
->item
));
3320 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3322 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3323 btrfs_release_path(root
, path
);
3324 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3326 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3327 bit
= BLOCK_GROUP_DATA
;
3328 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3329 bit
= BLOCK_GROUP_SYSTEM
;
3330 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3331 bit
= BLOCK_GROUP_METADATA
;
3333 set_avail_alloc_bits(info
, cache
->flags
);
3335 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3336 btrfs_block_group_used(&cache
->item
),
3339 cache
->space_info
= space_info
;
3341 /* use EXTENT_LOCKED to prevent merging */
3342 set_extent_bits(block_group_cache
, found_key
.objectid
,
3343 found_key
.objectid
+ found_key
.offset
- 1,
3344 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3345 set_state_private(block_group_cache
, found_key
.objectid
,
3346 (unsigned long)cache
);
3349 if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3350 cache_block_group(root
->fs_info
->extent_root
,
3354 btrfs_super_total_bytes(&info
->super_copy
))
3359 btrfs_free_path(path
);
3360 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3364 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3365 struct btrfs_root
*root
, u64 bytes_used
,
3366 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3371 struct btrfs_root
*extent_root
;
3372 struct btrfs_block_group_cache
*cache
;
3373 struct extent_io_tree
*block_group_cache
;
3375 WARN_ON(!mutex_is_locked(&root
->fs_info
->alloc_mutex
));
3376 extent_root
= root
->fs_info
->extent_root
;
3377 block_group_cache
= &root
->fs_info
->block_group_cache
;
3379 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3381 cache
->key
.objectid
= chunk_offset
;
3382 cache
->key
.offset
= size
;
3383 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3385 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3386 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3387 cache
->flags
= type
;
3388 btrfs_set_block_group_flags(&cache
->item
, type
);
3390 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
3391 &cache
->space_info
);
3394 bit
= block_group_state_bits(type
);
3395 set_extent_bits(block_group_cache
, chunk_offset
,
3396 chunk_offset
+ size
- 1,
3397 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3399 set_state_private(block_group_cache
, chunk_offset
,
3400 (unsigned long)cache
);
3401 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3402 sizeof(cache
->item
));
3405 finish_current_insert(trans
, extent_root
);
3406 ret
= del_pending_extents(trans
, extent_root
);
3408 set_avail_alloc_bits(extent_root
->fs_info
, type
);