2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
32 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
33 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
35 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
36 struct btrfs_free_space
*info
);
37 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
38 struct btrfs_free_space
*info
);
40 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
41 struct btrfs_path
*path
,
45 struct btrfs_key location
;
46 struct btrfs_disk_key disk_key
;
47 struct btrfs_free_space_header
*header
;
48 struct extent_buffer
*leaf
;
49 struct inode
*inode
= NULL
;
52 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
56 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
60 btrfs_release_path(path
);
61 return ERR_PTR(-ENOENT
);
64 leaf
= path
->nodes
[0];
65 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
66 struct btrfs_free_space_header
);
67 btrfs_free_space_key(leaf
, header
, &disk_key
);
68 btrfs_disk_key_to_cpu(&location
, &disk_key
);
69 btrfs_release_path(path
);
71 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
73 return ERR_PTR(-ENOENT
);
76 if (is_bad_inode(inode
)) {
78 return ERR_PTR(-ENOENT
);
81 mapping_set_gfp_mask(inode
->i_mapping
,
82 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
87 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
88 struct btrfs_block_group_cache
89 *block_group
, struct btrfs_path
*path
)
91 struct inode
*inode
= NULL
;
92 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
94 spin_lock(&block_group
->lock
);
95 if (block_group
->inode
)
96 inode
= igrab(block_group
->inode
);
97 spin_unlock(&block_group
->lock
);
101 inode
= __lookup_free_space_inode(root
, path
,
102 block_group
->key
.objectid
);
106 spin_lock(&block_group
->lock
);
107 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
108 btrfs_info(root
->fs_info
,
109 "Old style space inode found, converting.");
110 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
111 BTRFS_INODE_NODATACOW
;
112 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
115 if (!block_group
->iref
) {
116 block_group
->inode
= igrab(inode
);
117 block_group
->iref
= 1;
119 spin_unlock(&block_group
->lock
);
124 static int __create_free_space_inode(struct btrfs_root
*root
,
125 struct btrfs_trans_handle
*trans
,
126 struct btrfs_path
*path
,
129 struct btrfs_key key
;
130 struct btrfs_disk_key disk_key
;
131 struct btrfs_free_space_header
*header
;
132 struct btrfs_inode_item
*inode_item
;
133 struct extent_buffer
*leaf
;
134 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
137 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
141 /* We inline crc's for the free disk space cache */
142 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
143 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
145 leaf
= path
->nodes
[0];
146 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
147 struct btrfs_inode_item
);
148 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
149 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
150 sizeof(*inode_item
));
151 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
152 btrfs_set_inode_size(leaf
, inode_item
, 0);
153 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
154 btrfs_set_inode_uid(leaf
, inode_item
, 0);
155 btrfs_set_inode_gid(leaf
, inode_item
, 0);
156 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
157 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
158 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
159 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
160 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
161 btrfs_mark_buffer_dirty(leaf
);
162 btrfs_release_path(path
);
164 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
168 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
169 sizeof(struct btrfs_free_space_header
));
171 btrfs_release_path(path
);
174 leaf
= path
->nodes
[0];
175 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
176 struct btrfs_free_space_header
);
177 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
178 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
179 btrfs_mark_buffer_dirty(leaf
);
180 btrfs_release_path(path
);
185 int create_free_space_inode(struct btrfs_root
*root
,
186 struct btrfs_trans_handle
*trans
,
187 struct btrfs_block_group_cache
*block_group
,
188 struct btrfs_path
*path
)
193 ret
= btrfs_find_free_objectid(root
, &ino
);
197 return __create_free_space_inode(root
, trans
, path
, ino
,
198 block_group
->key
.objectid
);
201 int btrfs_check_trunc_cache_free_space(struct btrfs_root
*root
,
202 struct btrfs_block_rsv
*rsv
)
207 /* 1 for slack space, 1 for updating the inode */
208 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
209 btrfs_calc_trans_metadata_size(root
, 1);
211 spin_lock(&rsv
->lock
);
212 if (rsv
->reserved
< needed_bytes
)
216 spin_unlock(&rsv
->lock
);
220 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
221 struct btrfs_trans_handle
*trans
,
226 btrfs_i_size_write(inode
, 0);
227 truncate_pagecache(inode
, 0);
230 * We don't need an orphan item because truncating the free space cache
231 * will never be split across transactions.
233 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
234 0, BTRFS_EXTENT_DATA_KEY
);
236 btrfs_abort_transaction(trans
, root
, ret
);
240 ret
= btrfs_update_inode(trans
, root
, inode
);
242 btrfs_abort_transaction(trans
, root
, ret
);
247 static int readahead_cache(struct inode
*inode
)
249 struct file_ra_state
*ra
;
250 unsigned long last_index
;
252 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
256 file_ra_state_init(ra
, inode
->i_mapping
);
257 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
259 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
270 struct btrfs_root
*root
;
274 unsigned check_crcs
:1;
277 static int io_ctl_init(struct io_ctl
*io_ctl
, struct inode
*inode
,
278 struct btrfs_root
*root
, int write
)
283 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_CACHE_SIZE
);
285 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
288 /* Make sure we can fit our crcs into the first page */
289 if (write
&& check_crcs
&&
290 (num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
)
293 memset(io_ctl
, 0, sizeof(struct io_ctl
));
295 io_ctl
->pages
= kzalloc(sizeof(struct page
*) * num_pages
, GFP_NOFS
);
299 io_ctl
->num_pages
= num_pages
;
301 io_ctl
->check_crcs
= check_crcs
;
306 static void io_ctl_free(struct io_ctl
*io_ctl
)
308 kfree(io_ctl
->pages
);
311 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
314 kunmap(io_ctl
->page
);
320 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
322 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
323 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
324 io_ctl
->cur
= kmap(io_ctl
->page
);
325 io_ctl
->orig
= io_ctl
->cur
;
326 io_ctl
->size
= PAGE_CACHE_SIZE
;
328 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
331 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
335 io_ctl_unmap_page(io_ctl
);
337 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
338 if (io_ctl
->pages
[i
]) {
339 ClearPageChecked(io_ctl
->pages
[i
]);
340 unlock_page(io_ctl
->pages
[i
]);
341 page_cache_release(io_ctl
->pages
[i
]);
346 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct inode
*inode
,
350 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
353 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
354 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
356 io_ctl_drop_pages(io_ctl
);
359 io_ctl
->pages
[i
] = page
;
360 if (uptodate
&& !PageUptodate(page
)) {
361 btrfs_readpage(NULL
, page
);
363 if (!PageUptodate(page
)) {
364 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
365 "error reading free space cache");
366 io_ctl_drop_pages(io_ctl
);
372 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
373 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
374 set_page_extent_mapped(io_ctl
->pages
[i
]);
380 static void io_ctl_set_generation(struct io_ctl
*io_ctl
, u64 generation
)
384 io_ctl_map_page(io_ctl
, 1);
387 * Skip the csum areas. If we don't check crcs then we just have a
388 * 64bit chunk at the front of the first page.
390 if (io_ctl
->check_crcs
) {
391 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
392 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
394 io_ctl
->cur
+= sizeof(u64
);
395 io_ctl
->size
-= sizeof(u64
) * 2;
399 *val
= cpu_to_le64(generation
);
400 io_ctl
->cur
+= sizeof(u64
);
403 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
408 * Skip the crc area. If we don't check crcs then we just have a 64bit
409 * chunk at the front of the first page.
411 if (io_ctl
->check_crcs
) {
412 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
413 io_ctl
->size
-= sizeof(u64
) +
414 (sizeof(u32
) * io_ctl
->num_pages
);
416 io_ctl
->cur
+= sizeof(u64
);
417 io_ctl
->size
-= sizeof(u64
) * 2;
421 if (le64_to_cpu(*gen
) != generation
) {
422 printk_ratelimited(KERN_ERR
"BTRFS: space cache generation "
423 "(%Lu) does not match inode (%Lu)\n", *gen
,
425 io_ctl_unmap_page(io_ctl
);
428 io_ctl
->cur
+= sizeof(u64
);
432 static void io_ctl_set_crc(struct io_ctl
*io_ctl
, int index
)
438 if (!io_ctl
->check_crcs
) {
439 io_ctl_unmap_page(io_ctl
);
444 offset
= sizeof(u32
) * io_ctl
->num_pages
;
446 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
447 PAGE_CACHE_SIZE
- offset
);
448 btrfs_csum_final(crc
, (char *)&crc
);
449 io_ctl_unmap_page(io_ctl
);
450 tmp
= kmap(io_ctl
->pages
[0]);
453 kunmap(io_ctl
->pages
[0]);
456 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
462 if (!io_ctl
->check_crcs
) {
463 io_ctl_map_page(io_ctl
, 0);
468 offset
= sizeof(u32
) * io_ctl
->num_pages
;
470 tmp
= kmap(io_ctl
->pages
[0]);
473 kunmap(io_ctl
->pages
[0]);
475 io_ctl_map_page(io_ctl
, 0);
476 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
477 PAGE_CACHE_SIZE
- offset
);
478 btrfs_csum_final(crc
, (char *)&crc
);
480 printk_ratelimited(KERN_ERR
"BTRFS: csum mismatch on free "
482 io_ctl_unmap_page(io_ctl
);
489 static int io_ctl_add_entry(struct io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
492 struct btrfs_free_space_entry
*entry
;
498 entry
->offset
= cpu_to_le64(offset
);
499 entry
->bytes
= cpu_to_le64(bytes
);
500 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
501 BTRFS_FREE_SPACE_EXTENT
;
502 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
503 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
505 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
508 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
510 /* No more pages to map */
511 if (io_ctl
->index
>= io_ctl
->num_pages
)
514 /* map the next page */
515 io_ctl_map_page(io_ctl
, 1);
519 static int io_ctl_add_bitmap(struct io_ctl
*io_ctl
, void *bitmap
)
525 * If we aren't at the start of the current page, unmap this one and
526 * map the next one if there is any left.
528 if (io_ctl
->cur
!= io_ctl
->orig
) {
529 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
530 if (io_ctl
->index
>= io_ctl
->num_pages
)
532 io_ctl_map_page(io_ctl
, 0);
535 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
536 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
537 if (io_ctl
->index
< io_ctl
->num_pages
)
538 io_ctl_map_page(io_ctl
, 0);
542 static void io_ctl_zero_remaining_pages(struct io_ctl
*io_ctl
)
545 * If we're not on the boundary we know we've modified the page and we
546 * need to crc the page.
548 if (io_ctl
->cur
!= io_ctl
->orig
)
549 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
551 io_ctl_unmap_page(io_ctl
);
553 while (io_ctl
->index
< io_ctl
->num_pages
) {
554 io_ctl_map_page(io_ctl
, 1);
555 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
559 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
560 struct btrfs_free_space
*entry
, u8
*type
)
562 struct btrfs_free_space_entry
*e
;
566 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
572 entry
->offset
= le64_to_cpu(e
->offset
);
573 entry
->bytes
= le64_to_cpu(e
->bytes
);
575 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
576 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
578 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
581 io_ctl_unmap_page(io_ctl
);
586 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
587 struct btrfs_free_space
*entry
)
591 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
595 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
596 io_ctl_unmap_page(io_ctl
);
602 * Since we attach pinned extents after the fact we can have contiguous sections
603 * of free space that are split up in entries. This poses a problem with the
604 * tree logging stuff since it could have allocated across what appears to be 2
605 * entries since we would have merged the entries when adding the pinned extents
606 * back to the free space cache. So run through the space cache that we just
607 * loaded and merge contiguous entries. This will make the log replay stuff not
608 * blow up and it will make for nicer allocator behavior.
610 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
612 struct btrfs_free_space
*e
, *prev
= NULL
;
616 spin_lock(&ctl
->tree_lock
);
617 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
618 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
621 if (e
->bitmap
|| prev
->bitmap
)
623 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
624 unlink_free_space(ctl
, prev
);
625 unlink_free_space(ctl
, e
);
626 prev
->bytes
+= e
->bytes
;
627 kmem_cache_free(btrfs_free_space_cachep
, e
);
628 link_free_space(ctl
, prev
);
630 spin_unlock(&ctl
->tree_lock
);
636 spin_unlock(&ctl
->tree_lock
);
639 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
640 struct btrfs_free_space_ctl
*ctl
,
641 struct btrfs_path
*path
, u64 offset
)
643 struct btrfs_free_space_header
*header
;
644 struct extent_buffer
*leaf
;
645 struct io_ctl io_ctl
;
646 struct btrfs_key key
;
647 struct btrfs_free_space
*e
, *n
;
648 struct list_head bitmaps
;
655 INIT_LIST_HEAD(&bitmaps
);
657 /* Nothing in the space cache, goodbye */
658 if (!i_size_read(inode
))
661 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
665 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
669 btrfs_release_path(path
);
675 leaf
= path
->nodes
[0];
676 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
677 struct btrfs_free_space_header
);
678 num_entries
= btrfs_free_space_entries(leaf
, header
);
679 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
680 generation
= btrfs_free_space_generation(leaf
, header
);
681 btrfs_release_path(path
);
683 if (!BTRFS_I(inode
)->generation
) {
684 btrfs_info(root
->fs_info
,
685 "The free space cache file (%llu) is invalid. skip it\n",
690 if (BTRFS_I(inode
)->generation
!= generation
) {
691 btrfs_err(root
->fs_info
,
692 "free space inode generation (%llu) "
693 "did not match free space cache generation (%llu)",
694 BTRFS_I(inode
)->generation
, generation
);
701 ret
= io_ctl_init(&io_ctl
, inode
, root
, 0);
705 ret
= readahead_cache(inode
);
709 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
713 ret
= io_ctl_check_crc(&io_ctl
, 0);
717 ret
= io_ctl_check_generation(&io_ctl
, generation
);
721 while (num_entries
) {
722 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
727 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
729 kmem_cache_free(btrfs_free_space_cachep
, e
);
734 kmem_cache_free(btrfs_free_space_cachep
, e
);
738 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
739 spin_lock(&ctl
->tree_lock
);
740 ret
= link_free_space(ctl
, e
);
741 spin_unlock(&ctl
->tree_lock
);
743 btrfs_err(root
->fs_info
,
744 "Duplicate entries in free space cache, dumping");
745 kmem_cache_free(btrfs_free_space_cachep
, e
);
751 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
754 btrfs_free_space_cachep
, e
);
757 spin_lock(&ctl
->tree_lock
);
758 ret
= link_free_space(ctl
, e
);
759 ctl
->total_bitmaps
++;
760 ctl
->op
->recalc_thresholds(ctl
);
761 spin_unlock(&ctl
->tree_lock
);
763 btrfs_err(root
->fs_info
,
764 "Duplicate entries in free space cache, dumping");
765 kmem_cache_free(btrfs_free_space_cachep
, e
);
768 list_add_tail(&e
->list
, &bitmaps
);
774 io_ctl_unmap_page(&io_ctl
);
777 * We add the bitmaps at the end of the entries in order that
778 * the bitmap entries are added to the cache.
780 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
781 list_del_init(&e
->list
);
782 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
787 io_ctl_drop_pages(&io_ctl
);
788 merge_space_tree(ctl
);
791 io_ctl_free(&io_ctl
);
794 io_ctl_drop_pages(&io_ctl
);
795 __btrfs_remove_free_space_cache(ctl
);
799 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
800 struct btrfs_block_group_cache
*block_group
)
802 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
803 struct btrfs_root
*root
= fs_info
->tree_root
;
805 struct btrfs_path
*path
;
808 u64 used
= btrfs_block_group_used(&block_group
->item
);
811 * If this block group has been marked to be cleared for one reason or
812 * another then we can't trust the on disk cache, so just return.
814 spin_lock(&block_group
->lock
);
815 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
816 spin_unlock(&block_group
->lock
);
819 spin_unlock(&block_group
->lock
);
821 path
= btrfs_alloc_path();
824 path
->search_commit_root
= 1;
825 path
->skip_locking
= 1;
827 inode
= lookup_free_space_inode(root
, block_group
, path
);
829 btrfs_free_path(path
);
833 /* We may have converted the inode and made the cache invalid. */
834 spin_lock(&block_group
->lock
);
835 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
836 spin_unlock(&block_group
->lock
);
837 btrfs_free_path(path
);
840 spin_unlock(&block_group
->lock
);
842 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
843 path
, block_group
->key
.objectid
);
844 btrfs_free_path(path
);
848 spin_lock(&ctl
->tree_lock
);
849 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
850 block_group
->bytes_super
));
851 spin_unlock(&ctl
->tree_lock
);
854 __btrfs_remove_free_space_cache(ctl
);
855 btrfs_warn(fs_info
, "block group %llu has wrong amount of free space",
856 block_group
->key
.objectid
);
861 /* This cache is bogus, make sure it gets cleared */
862 spin_lock(&block_group
->lock
);
863 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
864 spin_unlock(&block_group
->lock
);
867 btrfs_warn(fs_info
, "failed to load free space cache for block group %llu, rebuild it now",
868 block_group
->key
.objectid
);
875 static noinline_for_stack
876 int write_cache_extent_entries(struct io_ctl
*io_ctl
,
877 struct btrfs_free_space_ctl
*ctl
,
878 struct btrfs_block_group_cache
*block_group
,
879 int *entries
, int *bitmaps
,
880 struct list_head
*bitmap_list
)
883 struct btrfs_free_cluster
*cluster
= NULL
;
884 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
886 /* Get the cluster for this block_group if it exists */
887 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
888 cluster
= list_entry(block_group
->cluster_list
.next
,
889 struct btrfs_free_cluster
,
893 if (!node
&& cluster
) {
894 node
= rb_first(&cluster
->root
);
898 /* Write out the extent entries */
900 struct btrfs_free_space
*e
;
902 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
905 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
911 list_add_tail(&e
->list
, bitmap_list
);
914 node
= rb_next(node
);
915 if (!node
&& cluster
) {
916 node
= rb_first(&cluster
->root
);
925 static noinline_for_stack
int
926 update_cache_item(struct btrfs_trans_handle
*trans
,
927 struct btrfs_root
*root
,
929 struct btrfs_path
*path
, u64 offset
,
930 int entries
, int bitmaps
)
932 struct btrfs_key key
;
933 struct btrfs_free_space_header
*header
;
934 struct extent_buffer
*leaf
;
937 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
941 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
943 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
944 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
948 leaf
= path
->nodes
[0];
950 struct btrfs_key found_key
;
951 ASSERT(path
->slots
[0]);
953 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
954 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
955 found_key
.offset
!= offset
) {
956 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
958 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
960 btrfs_release_path(path
);
965 BTRFS_I(inode
)->generation
= trans
->transid
;
966 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
967 struct btrfs_free_space_header
);
968 btrfs_set_free_space_entries(leaf
, header
, entries
);
969 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
970 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
971 btrfs_mark_buffer_dirty(leaf
);
972 btrfs_release_path(path
);
980 static noinline_for_stack
int
981 write_pinned_extent_entries(struct btrfs_root
*root
,
982 struct btrfs_block_group_cache
*block_group
,
983 struct io_ctl
*io_ctl
,
986 u64 start
, extent_start
, extent_end
, len
;
987 struct extent_io_tree
*unpin
= NULL
;
994 * We want to add any pinned extents to our free space cache
995 * so we don't leak the space
997 * We shouldn't have switched the pinned extents yet so this is the
1000 unpin
= root
->fs_info
->pinned_extents
;
1002 start
= block_group
->key
.objectid
;
1004 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1005 ret
= find_first_extent_bit(unpin
, start
,
1006 &extent_start
, &extent_end
,
1007 EXTENT_DIRTY
, NULL
);
1011 /* This pinned extent is out of our range */
1012 if (extent_start
>= block_group
->key
.objectid
+
1013 block_group
->key
.offset
)
1016 extent_start
= max(extent_start
, start
);
1017 extent_end
= min(block_group
->key
.objectid
+
1018 block_group
->key
.offset
, extent_end
+ 1);
1019 len
= extent_end
- extent_start
;
1022 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1032 static noinline_for_stack
int
1033 write_bitmap_entries(struct io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1035 struct list_head
*pos
, *n
;
1038 /* Write out the bitmaps */
1039 list_for_each_safe(pos
, n
, bitmap_list
) {
1040 struct btrfs_free_space
*entry
=
1041 list_entry(pos
, struct btrfs_free_space
, list
);
1043 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1046 list_del_init(&entry
->list
);
1052 static int flush_dirty_cache(struct inode
*inode
)
1056 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1058 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1059 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1065 static void noinline_for_stack
1066 cleanup_write_cache_enospc(struct inode
*inode
,
1067 struct io_ctl
*io_ctl
,
1068 struct extent_state
**cached_state
,
1069 struct list_head
*bitmap_list
)
1071 struct list_head
*pos
, *n
;
1073 list_for_each_safe(pos
, n
, bitmap_list
) {
1074 struct btrfs_free_space
*entry
=
1075 list_entry(pos
, struct btrfs_free_space
, list
);
1076 list_del_init(&entry
->list
);
1078 io_ctl_drop_pages(io_ctl
);
1079 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1080 i_size_read(inode
) - 1, cached_state
,
1085 * __btrfs_write_out_cache - write out cached info to an inode
1086 * @root - the root the inode belongs to
1087 * @ctl - the free space cache we are going to write out
1088 * @block_group - the block_group for this cache if it belongs to a block_group
1089 * @trans - the trans handle
1090 * @path - the path to use
1091 * @offset - the offset for the key we'll insert
1093 * This function writes out a free space cache struct to disk for quick recovery
1094 * on mount. This will return 0 if it was successfull in writing the cache out,
1095 * and -1 if it was not.
1097 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1098 struct btrfs_free_space_ctl
*ctl
,
1099 struct btrfs_block_group_cache
*block_group
,
1100 struct btrfs_trans_handle
*trans
,
1101 struct btrfs_path
*path
, u64 offset
)
1103 struct extent_state
*cached_state
= NULL
;
1104 struct io_ctl io_ctl
;
1105 LIST_HEAD(bitmap_list
);
1110 if (!i_size_read(inode
))
1113 ret
= io_ctl_init(&io_ctl
, inode
, root
, 1);
1117 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1118 down_write(&block_group
->data_rwsem
);
1119 spin_lock(&block_group
->lock
);
1120 if (block_group
->delalloc_bytes
) {
1121 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1122 spin_unlock(&block_group
->lock
);
1123 up_write(&block_group
->data_rwsem
);
1124 BTRFS_I(inode
)->generation
= 0;
1128 spin_unlock(&block_group
->lock
);
1131 /* Lock all pages first so we can lock the extent safely. */
1132 io_ctl_prepare_pages(&io_ctl
, inode
, 0);
1134 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1137 io_ctl_set_generation(&io_ctl
, trans
->transid
);
1139 /* Write out the extent entries in the free space cache */
1140 ret
= write_cache_extent_entries(&io_ctl
, ctl
,
1141 block_group
, &entries
, &bitmaps
,
1147 * Some spaces that are freed in the current transaction are pinned,
1148 * they will be added into free space cache after the transaction is
1149 * committed, we shouldn't lose them.
1151 ret
= write_pinned_extent_entries(root
, block_group
, &io_ctl
, &entries
);
1155 /* At last, we write out all the bitmaps. */
1156 ret
= write_bitmap_entries(&io_ctl
, &bitmap_list
);
1160 /* Zero out the rest of the pages just to make sure */
1161 io_ctl_zero_remaining_pages(&io_ctl
);
1163 /* Everything is written out, now we dirty the pages in the file. */
1164 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
.pages
, io_ctl
.num_pages
,
1165 0, i_size_read(inode
), &cached_state
);
1169 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1170 up_write(&block_group
->data_rwsem
);
1172 * Release the pages and unlock the extent, we will flush
1175 io_ctl_drop_pages(&io_ctl
);
1177 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1178 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1180 /* Flush the dirty pages in the cache file. */
1181 ret
= flush_dirty_cache(inode
);
1185 /* Update the cache item to tell everyone this cache file is valid. */
1186 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1189 io_ctl_free(&io_ctl
);
1191 invalidate_inode_pages2(inode
->i_mapping
);
1192 BTRFS_I(inode
)->generation
= 0;
1194 btrfs_update_inode(trans
, root
, inode
);
1198 cleanup_write_cache_enospc(inode
, &io_ctl
, &cached_state
, &bitmap_list
);
1200 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1201 up_write(&block_group
->data_rwsem
);
1206 int btrfs_write_out_cache(struct btrfs_root
*root
,
1207 struct btrfs_trans_handle
*trans
,
1208 struct btrfs_block_group_cache
*block_group
,
1209 struct btrfs_path
*path
)
1211 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1212 struct inode
*inode
;
1215 root
= root
->fs_info
->tree_root
;
1217 spin_lock(&block_group
->lock
);
1218 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1219 spin_unlock(&block_group
->lock
);
1223 if (block_group
->delalloc_bytes
) {
1224 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1225 spin_unlock(&block_group
->lock
);
1228 spin_unlock(&block_group
->lock
);
1230 inode
= lookup_free_space_inode(root
, block_group
, path
);
1234 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
1235 path
, block_group
->key
.objectid
);
1237 spin_lock(&block_group
->lock
);
1238 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1239 spin_unlock(&block_group
->lock
);
1242 btrfs_err(root
->fs_info
,
1243 "failed to write free space cache for block group %llu",
1244 block_group
->key
.objectid
);
1252 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1255 ASSERT(offset
>= bitmap_start
);
1256 offset
-= bitmap_start
;
1257 return (unsigned long)(div_u64(offset
, unit
));
1260 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1262 return (unsigned long)(div_u64(bytes
, unit
));
1265 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1269 u64 bytes_per_bitmap
;
1271 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1272 bitmap_start
= offset
- ctl
->start
;
1273 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1274 bitmap_start
*= bytes_per_bitmap
;
1275 bitmap_start
+= ctl
->start
;
1277 return bitmap_start
;
1280 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1281 struct rb_node
*node
, int bitmap
)
1283 struct rb_node
**p
= &root
->rb_node
;
1284 struct rb_node
*parent
= NULL
;
1285 struct btrfs_free_space
*info
;
1289 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1291 if (offset
< info
->offset
) {
1293 } else if (offset
> info
->offset
) {
1294 p
= &(*p
)->rb_right
;
1297 * we could have a bitmap entry and an extent entry
1298 * share the same offset. If this is the case, we want
1299 * the extent entry to always be found first if we do a
1300 * linear search through the tree, since we want to have
1301 * the quickest allocation time, and allocating from an
1302 * extent is faster than allocating from a bitmap. So
1303 * if we're inserting a bitmap and we find an entry at
1304 * this offset, we want to go right, or after this entry
1305 * logically. If we are inserting an extent and we've
1306 * found a bitmap, we want to go left, or before
1314 p
= &(*p
)->rb_right
;
1316 if (!info
->bitmap
) {
1325 rb_link_node(node
, parent
, p
);
1326 rb_insert_color(node
, root
);
1332 * searches the tree for the given offset.
1334 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1335 * want a section that has at least bytes size and comes at or after the given
1338 static struct btrfs_free_space
*
1339 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1340 u64 offset
, int bitmap_only
, int fuzzy
)
1342 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1343 struct btrfs_free_space
*entry
, *prev
= NULL
;
1345 /* find entry that is closest to the 'offset' */
1352 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1355 if (offset
< entry
->offset
)
1357 else if (offset
> entry
->offset
)
1370 * bitmap entry and extent entry may share same offset,
1371 * in that case, bitmap entry comes after extent entry.
1376 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1377 if (entry
->offset
!= offset
)
1380 WARN_ON(!entry
->bitmap
);
1383 if (entry
->bitmap
) {
1385 * if previous extent entry covers the offset,
1386 * we should return it instead of the bitmap entry
1388 n
= rb_prev(&entry
->offset_index
);
1390 prev
= rb_entry(n
, struct btrfs_free_space
,
1392 if (!prev
->bitmap
&&
1393 prev
->offset
+ prev
->bytes
> offset
)
1403 /* find last entry before the 'offset' */
1405 if (entry
->offset
> offset
) {
1406 n
= rb_prev(&entry
->offset_index
);
1408 entry
= rb_entry(n
, struct btrfs_free_space
,
1410 ASSERT(entry
->offset
<= offset
);
1419 if (entry
->bitmap
) {
1420 n
= rb_prev(&entry
->offset_index
);
1422 prev
= rb_entry(n
, struct btrfs_free_space
,
1424 if (!prev
->bitmap
&&
1425 prev
->offset
+ prev
->bytes
> offset
)
1428 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1430 } else if (entry
->offset
+ entry
->bytes
> offset
)
1437 if (entry
->bitmap
) {
1438 if (entry
->offset
+ BITS_PER_BITMAP
*
1442 if (entry
->offset
+ entry
->bytes
> offset
)
1446 n
= rb_next(&entry
->offset_index
);
1449 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1455 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1456 struct btrfs_free_space
*info
)
1458 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1459 ctl
->free_extents
--;
1462 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1463 struct btrfs_free_space
*info
)
1465 __unlink_free_space(ctl
, info
);
1466 ctl
->free_space
-= info
->bytes
;
1469 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1470 struct btrfs_free_space
*info
)
1474 ASSERT(info
->bytes
|| info
->bitmap
);
1475 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1476 &info
->offset_index
, (info
->bitmap
!= NULL
));
1480 ctl
->free_space
+= info
->bytes
;
1481 ctl
->free_extents
++;
1485 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1487 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1491 u64 size
= block_group
->key
.offset
;
1492 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1493 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1495 max_bitmaps
= max(max_bitmaps
, 1);
1497 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1500 * The goal is to keep the total amount of memory used per 1gb of space
1501 * at or below 32k, so we need to adjust how much memory we allow to be
1502 * used by extent based free space tracking
1504 if (size
< 1024 * 1024 * 1024)
1505 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1507 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1508 div64_u64(size
, 1024 * 1024 * 1024);
1511 * we want to account for 1 more bitmap than what we have so we can make
1512 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1513 * we add more bitmaps.
1515 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1517 if (bitmap_bytes
>= max_bytes
) {
1518 ctl
->extents_thresh
= 0;
1523 * we want the extent entry threshold to always be at most 1/2 the maxw
1524 * bytes we can have, or whatever is less than that.
1526 extent_bytes
= max_bytes
- bitmap_bytes
;
1527 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1529 ctl
->extents_thresh
=
1530 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1533 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1534 struct btrfs_free_space
*info
,
1535 u64 offset
, u64 bytes
)
1537 unsigned long start
, count
;
1539 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1540 count
= bytes_to_bits(bytes
, ctl
->unit
);
1541 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1543 bitmap_clear(info
->bitmap
, start
, count
);
1545 info
->bytes
-= bytes
;
1548 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1549 struct btrfs_free_space
*info
, u64 offset
,
1552 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1553 ctl
->free_space
-= bytes
;
1556 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1557 struct btrfs_free_space
*info
, u64 offset
,
1560 unsigned long start
, count
;
1562 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1563 count
= bytes_to_bits(bytes
, ctl
->unit
);
1564 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1566 bitmap_set(info
->bitmap
, start
, count
);
1568 info
->bytes
+= bytes
;
1569 ctl
->free_space
+= bytes
;
1573 * If we can not find suitable extent, we will use bytes to record
1574 * the size of the max extent.
1576 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1577 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1580 unsigned long found_bits
= 0;
1581 unsigned long max_bits
= 0;
1582 unsigned long bits
, i
;
1583 unsigned long next_zero
;
1584 unsigned long extent_bits
;
1586 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1587 max_t(u64
, *offset
, bitmap_info
->offset
));
1588 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1590 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1591 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1592 BITS_PER_BITMAP
, i
);
1593 extent_bits
= next_zero
- i
;
1594 if (extent_bits
>= bits
) {
1595 found_bits
= extent_bits
;
1597 } else if (extent_bits
> max_bits
) {
1598 max_bits
= extent_bits
;
1604 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1605 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1609 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1613 /* Cache the size of the max extent in bytes */
1614 static struct btrfs_free_space
*
1615 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1616 unsigned long align
, u64
*max_extent_size
)
1618 struct btrfs_free_space
*entry
;
1619 struct rb_node
*node
;
1624 if (!ctl
->free_space_offset
.rb_node
)
1627 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1631 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1632 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1633 if (entry
->bytes
< *bytes
) {
1634 if (entry
->bytes
> *max_extent_size
)
1635 *max_extent_size
= entry
->bytes
;
1639 /* make sure the space returned is big enough
1640 * to match our requested alignment
1642 if (*bytes
>= align
) {
1643 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1645 tmp
= tmp
* align
+ ctl
->start
;
1646 align_off
= tmp
- entry
->offset
;
1649 tmp
= entry
->offset
;
1652 if (entry
->bytes
< *bytes
+ align_off
) {
1653 if (entry
->bytes
> *max_extent_size
)
1654 *max_extent_size
= entry
->bytes
;
1658 if (entry
->bitmap
) {
1661 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
);
1666 } else if (size
> *max_extent_size
) {
1667 *max_extent_size
= size
;
1673 *bytes
= entry
->bytes
- align_off
;
1680 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1681 struct btrfs_free_space
*info
, u64 offset
)
1683 info
->offset
= offset_to_bitmap(ctl
, offset
);
1685 INIT_LIST_HEAD(&info
->list
);
1686 link_free_space(ctl
, info
);
1687 ctl
->total_bitmaps
++;
1689 ctl
->op
->recalc_thresholds(ctl
);
1692 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1693 struct btrfs_free_space
*bitmap_info
)
1695 unlink_free_space(ctl
, bitmap_info
);
1696 kfree(bitmap_info
->bitmap
);
1697 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1698 ctl
->total_bitmaps
--;
1699 ctl
->op
->recalc_thresholds(ctl
);
1702 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1703 struct btrfs_free_space
*bitmap_info
,
1704 u64
*offset
, u64
*bytes
)
1707 u64 search_start
, search_bytes
;
1711 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1714 * We need to search for bits in this bitmap. We could only cover some
1715 * of the extent in this bitmap thanks to how we add space, so we need
1716 * to search for as much as it as we can and clear that amount, and then
1717 * go searching for the next bit.
1719 search_start
= *offset
;
1720 search_bytes
= ctl
->unit
;
1721 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1722 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1723 if (ret
< 0 || search_start
!= *offset
)
1726 /* We may have found more bits than what we need */
1727 search_bytes
= min(search_bytes
, *bytes
);
1729 /* Cannot clear past the end of the bitmap */
1730 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1732 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1733 *offset
+= search_bytes
;
1734 *bytes
-= search_bytes
;
1737 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1738 if (!bitmap_info
->bytes
)
1739 free_bitmap(ctl
, bitmap_info
);
1742 * no entry after this bitmap, but we still have bytes to
1743 * remove, so something has gone wrong.
1748 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1752 * if the next entry isn't a bitmap we need to return to let the
1753 * extent stuff do its work.
1755 if (!bitmap_info
->bitmap
)
1759 * Ok the next item is a bitmap, but it may not actually hold
1760 * the information for the rest of this free space stuff, so
1761 * look for it, and if we don't find it return so we can try
1762 * everything over again.
1764 search_start
= *offset
;
1765 search_bytes
= ctl
->unit
;
1766 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1768 if (ret
< 0 || search_start
!= *offset
)
1772 } else if (!bitmap_info
->bytes
)
1773 free_bitmap(ctl
, bitmap_info
);
1778 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1779 struct btrfs_free_space
*info
, u64 offset
,
1782 u64 bytes_to_set
= 0;
1785 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1787 bytes_to_set
= min(end
- offset
, bytes
);
1789 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1791 return bytes_to_set
;
1795 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1796 struct btrfs_free_space
*info
)
1798 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1801 * If we are below the extents threshold then we can add this as an
1802 * extent, and don't have to deal with the bitmap
1804 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1806 * If this block group has some small extents we don't want to
1807 * use up all of our free slots in the cache with them, we want
1808 * to reserve them to larger extents, however if we have plent
1809 * of cache left then go ahead an dadd them, no sense in adding
1810 * the overhead of a bitmap if we don't have to.
1812 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1813 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1821 * The original block groups from mkfs can be really small, like 8
1822 * megabytes, so don't bother with a bitmap for those entries. However
1823 * some block groups can be smaller than what a bitmap would cover but
1824 * are still large enough that they could overflow the 32k memory limit,
1825 * so allow those block groups to still be allowed to have a bitmap
1828 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
1834 static struct btrfs_free_space_op free_space_op
= {
1835 .recalc_thresholds
= recalculate_thresholds
,
1836 .use_bitmap
= use_bitmap
,
1839 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1840 struct btrfs_free_space
*info
)
1842 struct btrfs_free_space
*bitmap_info
;
1843 struct btrfs_block_group_cache
*block_group
= NULL
;
1845 u64 bytes
, offset
, bytes_added
;
1848 bytes
= info
->bytes
;
1849 offset
= info
->offset
;
1851 if (!ctl
->op
->use_bitmap(ctl
, info
))
1854 if (ctl
->op
== &free_space_op
)
1855 block_group
= ctl
->private;
1858 * Since we link bitmaps right into the cluster we need to see if we
1859 * have a cluster here, and if so and it has our bitmap we need to add
1860 * the free space to that bitmap.
1862 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1863 struct btrfs_free_cluster
*cluster
;
1864 struct rb_node
*node
;
1865 struct btrfs_free_space
*entry
;
1867 cluster
= list_entry(block_group
->cluster_list
.next
,
1868 struct btrfs_free_cluster
,
1870 spin_lock(&cluster
->lock
);
1871 node
= rb_first(&cluster
->root
);
1873 spin_unlock(&cluster
->lock
);
1874 goto no_cluster_bitmap
;
1877 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1878 if (!entry
->bitmap
) {
1879 spin_unlock(&cluster
->lock
);
1880 goto no_cluster_bitmap
;
1883 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1884 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1886 bytes
-= bytes_added
;
1887 offset
+= bytes_added
;
1889 spin_unlock(&cluster
->lock
);
1897 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1904 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1905 bytes
-= bytes_added
;
1906 offset
+= bytes_added
;
1916 if (info
&& info
->bitmap
) {
1917 add_new_bitmap(ctl
, info
, offset
);
1922 spin_unlock(&ctl
->tree_lock
);
1924 /* no pre-allocated info, allocate a new one */
1926 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1929 spin_lock(&ctl
->tree_lock
);
1935 /* allocate the bitmap */
1936 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1937 spin_lock(&ctl
->tree_lock
);
1938 if (!info
->bitmap
) {
1948 kfree(info
->bitmap
);
1949 kmem_cache_free(btrfs_free_space_cachep
, info
);
1955 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1956 struct btrfs_free_space
*info
, bool update_stat
)
1958 struct btrfs_free_space
*left_info
;
1959 struct btrfs_free_space
*right_info
;
1960 bool merged
= false;
1961 u64 offset
= info
->offset
;
1962 u64 bytes
= info
->bytes
;
1965 * first we want to see if there is free space adjacent to the range we
1966 * are adding, if there is remove that struct and add a new one to
1967 * cover the entire range
1969 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1970 if (right_info
&& rb_prev(&right_info
->offset_index
))
1971 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1972 struct btrfs_free_space
, offset_index
);
1974 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1976 if (right_info
&& !right_info
->bitmap
) {
1978 unlink_free_space(ctl
, right_info
);
1980 __unlink_free_space(ctl
, right_info
);
1981 info
->bytes
+= right_info
->bytes
;
1982 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1986 if (left_info
&& !left_info
->bitmap
&&
1987 left_info
->offset
+ left_info
->bytes
== offset
) {
1989 unlink_free_space(ctl
, left_info
);
1991 __unlink_free_space(ctl
, left_info
);
1992 info
->offset
= left_info
->offset
;
1993 info
->bytes
+= left_info
->bytes
;
1994 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2001 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2002 struct btrfs_free_space
*info
,
2005 struct btrfs_free_space
*bitmap
;
2008 const u64 end
= info
->offset
+ info
->bytes
;
2009 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2012 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2016 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2017 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2020 bytes
= (j
- i
) * ctl
->unit
;
2021 info
->bytes
+= bytes
;
2024 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2026 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2029 free_bitmap(ctl
, bitmap
);
2034 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2035 struct btrfs_free_space
*info
,
2038 struct btrfs_free_space
*bitmap
;
2042 unsigned long prev_j
;
2045 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2046 /* If we're on a boundary, try the previous logical bitmap. */
2047 if (bitmap_offset
== info
->offset
) {
2048 if (info
->offset
== 0)
2050 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2053 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2057 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2059 prev_j
= (unsigned long)-1;
2060 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2068 if (prev_j
== (unsigned long)-1)
2069 bytes
= (i
+ 1) * ctl
->unit
;
2071 bytes
= (i
- prev_j
) * ctl
->unit
;
2073 info
->offset
-= bytes
;
2074 info
->bytes
+= bytes
;
2077 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2079 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2082 free_bitmap(ctl
, bitmap
);
2088 * We prefer always to allocate from extent entries, both for clustered and
2089 * non-clustered allocation requests. So when attempting to add a new extent
2090 * entry, try to see if there's adjacent free space in bitmap entries, and if
2091 * there is, migrate that space from the bitmaps to the extent.
2092 * Like this we get better chances of satisfying space allocation requests
2093 * because we attempt to satisfy them based on a single cache entry, and never
2094 * on 2 or more entries - even if the entries represent a contiguous free space
2095 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2098 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2099 struct btrfs_free_space
*info
,
2103 * Only work with disconnected entries, as we can change their offset,
2104 * and must be extent entries.
2106 ASSERT(!info
->bitmap
);
2107 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2109 if (ctl
->total_bitmaps
> 0) {
2111 bool stole_front
= false;
2113 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2114 if (ctl
->total_bitmaps
> 0)
2115 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2118 if (stole_end
|| stole_front
)
2119 try_merge_free_space(ctl
, info
, update_stat
);
2123 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
2124 u64 offset
, u64 bytes
)
2126 struct btrfs_free_space
*info
;
2129 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2133 info
->offset
= offset
;
2134 info
->bytes
= bytes
;
2135 RB_CLEAR_NODE(&info
->offset_index
);
2137 spin_lock(&ctl
->tree_lock
);
2139 if (try_merge_free_space(ctl
, info
, true))
2143 * There was no extent directly to the left or right of this new
2144 * extent then we know we're going to have to allocate a new extent, so
2145 * before we do that see if we need to drop this into a bitmap
2147 ret
= insert_into_bitmap(ctl
, info
);
2156 * Only steal free space from adjacent bitmaps if we're sure we're not
2157 * going to add the new free space to existing bitmap entries - because
2158 * that would mean unnecessary work that would be reverted. Therefore
2159 * attempt to steal space from bitmaps if we're adding an extent entry.
2161 steal_from_bitmap(ctl
, info
, true);
2163 ret
= link_free_space(ctl
, info
);
2165 kmem_cache_free(btrfs_free_space_cachep
, info
);
2167 spin_unlock(&ctl
->tree_lock
);
2170 printk(KERN_CRIT
"BTRFS: unable to add free space :%d\n", ret
);
2171 ASSERT(ret
!= -EEXIST
);
2177 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2178 u64 offset
, u64 bytes
)
2180 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2181 struct btrfs_free_space
*info
;
2183 bool re_search
= false;
2185 spin_lock(&ctl
->tree_lock
);
2192 info
= tree_search_offset(ctl
, offset
, 0, 0);
2195 * oops didn't find an extent that matched the space we wanted
2196 * to remove, look for a bitmap instead
2198 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2202 * If we found a partial bit of our free space in a
2203 * bitmap but then couldn't find the other part this may
2204 * be a problem, so WARN about it.
2212 if (!info
->bitmap
) {
2213 unlink_free_space(ctl
, info
);
2214 if (offset
== info
->offset
) {
2215 u64 to_free
= min(bytes
, info
->bytes
);
2217 info
->bytes
-= to_free
;
2218 info
->offset
+= to_free
;
2220 ret
= link_free_space(ctl
, info
);
2223 kmem_cache_free(btrfs_free_space_cachep
, info
);
2230 u64 old_end
= info
->bytes
+ info
->offset
;
2232 info
->bytes
= offset
- info
->offset
;
2233 ret
= link_free_space(ctl
, info
);
2238 /* Not enough bytes in this entry to satisfy us */
2239 if (old_end
< offset
+ bytes
) {
2240 bytes
-= old_end
- offset
;
2243 } else if (old_end
== offset
+ bytes
) {
2247 spin_unlock(&ctl
->tree_lock
);
2249 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2250 old_end
- (offset
+ bytes
));
2256 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2257 if (ret
== -EAGAIN
) {
2262 spin_unlock(&ctl
->tree_lock
);
2267 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2270 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2271 struct btrfs_free_space
*info
;
2275 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2276 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2277 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2279 btrfs_crit(block_group
->fs_info
,
2280 "entry offset %llu, bytes %llu, bitmap %s",
2281 info
->offset
, info
->bytes
,
2282 (info
->bitmap
) ? "yes" : "no");
2284 btrfs_info(block_group
->fs_info
, "block group has cluster?: %s",
2285 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2286 btrfs_info(block_group
->fs_info
,
2287 "%d blocks of free space at or bigger than bytes is", count
);
2290 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2292 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2294 spin_lock_init(&ctl
->tree_lock
);
2295 ctl
->unit
= block_group
->sectorsize
;
2296 ctl
->start
= block_group
->key
.objectid
;
2297 ctl
->private = block_group
;
2298 ctl
->op
= &free_space_op
;
2301 * we only want to have 32k of ram per block group for keeping
2302 * track of free space, and if we pass 1/2 of that we want to
2303 * start converting things over to using bitmaps
2305 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2306 sizeof(struct btrfs_free_space
);
2310 * for a given cluster, put all of its extents back into the free
2311 * space cache. If the block group passed doesn't match the block group
2312 * pointed to by the cluster, someone else raced in and freed the
2313 * cluster already. In that case, we just return without changing anything
2316 __btrfs_return_cluster_to_free_space(
2317 struct btrfs_block_group_cache
*block_group
,
2318 struct btrfs_free_cluster
*cluster
)
2320 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2321 struct btrfs_free_space
*entry
;
2322 struct rb_node
*node
;
2324 spin_lock(&cluster
->lock
);
2325 if (cluster
->block_group
!= block_group
)
2328 cluster
->block_group
= NULL
;
2329 cluster
->window_start
= 0;
2330 list_del_init(&cluster
->block_group_list
);
2332 node
= rb_first(&cluster
->root
);
2336 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2337 node
= rb_next(&entry
->offset_index
);
2338 rb_erase(&entry
->offset_index
, &cluster
->root
);
2339 RB_CLEAR_NODE(&entry
->offset_index
);
2341 bitmap
= (entry
->bitmap
!= NULL
);
2343 try_merge_free_space(ctl
, entry
, false);
2344 steal_from_bitmap(ctl
, entry
, false);
2346 tree_insert_offset(&ctl
->free_space_offset
,
2347 entry
->offset
, &entry
->offset_index
, bitmap
);
2349 cluster
->root
= RB_ROOT
;
2352 spin_unlock(&cluster
->lock
);
2353 btrfs_put_block_group(block_group
);
2357 static void __btrfs_remove_free_space_cache_locked(
2358 struct btrfs_free_space_ctl
*ctl
)
2360 struct btrfs_free_space
*info
;
2361 struct rb_node
*node
;
2363 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2364 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2365 if (!info
->bitmap
) {
2366 unlink_free_space(ctl
, info
);
2367 kmem_cache_free(btrfs_free_space_cachep
, info
);
2369 free_bitmap(ctl
, info
);
2371 if (need_resched()) {
2372 spin_unlock(&ctl
->tree_lock
);
2374 spin_lock(&ctl
->tree_lock
);
2379 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2381 spin_lock(&ctl
->tree_lock
);
2382 __btrfs_remove_free_space_cache_locked(ctl
);
2383 spin_unlock(&ctl
->tree_lock
);
2386 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2388 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2389 struct btrfs_free_cluster
*cluster
;
2390 struct list_head
*head
;
2392 spin_lock(&ctl
->tree_lock
);
2393 while ((head
= block_group
->cluster_list
.next
) !=
2394 &block_group
->cluster_list
) {
2395 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2398 WARN_ON(cluster
->block_group
!= block_group
);
2399 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2400 if (need_resched()) {
2401 spin_unlock(&ctl
->tree_lock
);
2403 spin_lock(&ctl
->tree_lock
);
2406 __btrfs_remove_free_space_cache_locked(ctl
);
2407 spin_unlock(&ctl
->tree_lock
);
2411 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2412 u64 offset
, u64 bytes
, u64 empty_size
,
2413 u64
*max_extent_size
)
2415 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2416 struct btrfs_free_space
*entry
= NULL
;
2417 u64 bytes_search
= bytes
+ empty_size
;
2420 u64 align_gap_len
= 0;
2422 spin_lock(&ctl
->tree_lock
);
2423 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2424 block_group
->full_stripe_len
, max_extent_size
);
2429 if (entry
->bitmap
) {
2430 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2432 free_bitmap(ctl
, entry
);
2434 unlink_free_space(ctl
, entry
);
2435 align_gap_len
= offset
- entry
->offset
;
2436 align_gap
= entry
->offset
;
2438 entry
->offset
= offset
+ bytes
;
2439 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2441 entry
->bytes
-= bytes
+ align_gap_len
;
2443 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2445 link_free_space(ctl
, entry
);
2448 spin_unlock(&ctl
->tree_lock
);
2451 __btrfs_add_free_space(ctl
, align_gap
, align_gap_len
);
2456 * given a cluster, put all of its extents back into the free space
2457 * cache. If a block group is passed, this function will only free
2458 * a cluster that belongs to the passed block group.
2460 * Otherwise, it'll get a reference on the block group pointed to by the
2461 * cluster and remove the cluster from it.
2463 int btrfs_return_cluster_to_free_space(
2464 struct btrfs_block_group_cache
*block_group
,
2465 struct btrfs_free_cluster
*cluster
)
2467 struct btrfs_free_space_ctl
*ctl
;
2470 /* first, get a safe pointer to the block group */
2471 spin_lock(&cluster
->lock
);
2473 block_group
= cluster
->block_group
;
2475 spin_unlock(&cluster
->lock
);
2478 } else if (cluster
->block_group
!= block_group
) {
2479 /* someone else has already freed it don't redo their work */
2480 spin_unlock(&cluster
->lock
);
2483 atomic_inc(&block_group
->count
);
2484 spin_unlock(&cluster
->lock
);
2486 ctl
= block_group
->free_space_ctl
;
2488 /* now return any extents the cluster had on it */
2489 spin_lock(&ctl
->tree_lock
);
2490 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2491 spin_unlock(&ctl
->tree_lock
);
2493 /* finally drop our ref */
2494 btrfs_put_block_group(block_group
);
2498 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2499 struct btrfs_free_cluster
*cluster
,
2500 struct btrfs_free_space
*entry
,
2501 u64 bytes
, u64 min_start
,
2502 u64
*max_extent_size
)
2504 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2506 u64 search_start
= cluster
->window_start
;
2507 u64 search_bytes
= bytes
;
2510 search_start
= min_start
;
2511 search_bytes
= bytes
;
2513 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2515 if (search_bytes
> *max_extent_size
)
2516 *max_extent_size
= search_bytes
;
2521 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2527 * given a cluster, try to allocate 'bytes' from it, returns 0
2528 * if it couldn't find anything suitably large, or a logical disk offset
2529 * if things worked out
2531 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2532 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2533 u64 min_start
, u64
*max_extent_size
)
2535 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2536 struct btrfs_free_space
*entry
= NULL
;
2537 struct rb_node
*node
;
2540 spin_lock(&cluster
->lock
);
2541 if (bytes
> cluster
->max_size
)
2544 if (cluster
->block_group
!= block_group
)
2547 node
= rb_first(&cluster
->root
);
2551 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2553 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2554 *max_extent_size
= entry
->bytes
;
2556 if (entry
->bytes
< bytes
||
2557 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2558 node
= rb_next(&entry
->offset_index
);
2561 entry
= rb_entry(node
, struct btrfs_free_space
,
2566 if (entry
->bitmap
) {
2567 ret
= btrfs_alloc_from_bitmap(block_group
,
2568 cluster
, entry
, bytes
,
2569 cluster
->window_start
,
2572 node
= rb_next(&entry
->offset_index
);
2575 entry
= rb_entry(node
, struct btrfs_free_space
,
2579 cluster
->window_start
+= bytes
;
2581 ret
= entry
->offset
;
2583 entry
->offset
+= bytes
;
2584 entry
->bytes
-= bytes
;
2587 if (entry
->bytes
== 0)
2588 rb_erase(&entry
->offset_index
, &cluster
->root
);
2592 spin_unlock(&cluster
->lock
);
2597 spin_lock(&ctl
->tree_lock
);
2599 ctl
->free_space
-= bytes
;
2600 if (entry
->bytes
== 0) {
2601 ctl
->free_extents
--;
2602 if (entry
->bitmap
) {
2603 kfree(entry
->bitmap
);
2604 ctl
->total_bitmaps
--;
2605 ctl
->op
->recalc_thresholds(ctl
);
2607 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2610 spin_unlock(&ctl
->tree_lock
);
2615 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2616 struct btrfs_free_space
*entry
,
2617 struct btrfs_free_cluster
*cluster
,
2618 u64 offset
, u64 bytes
,
2619 u64 cont1_bytes
, u64 min_bytes
)
2621 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2622 unsigned long next_zero
;
2624 unsigned long want_bits
;
2625 unsigned long min_bits
;
2626 unsigned long found_bits
;
2627 unsigned long start
= 0;
2628 unsigned long total_found
= 0;
2631 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2632 max_t(u64
, offset
, entry
->offset
));
2633 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2634 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2638 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2639 next_zero
= find_next_zero_bit(entry
->bitmap
,
2640 BITS_PER_BITMAP
, i
);
2641 if (next_zero
- i
>= min_bits
) {
2642 found_bits
= next_zero
- i
;
2653 cluster
->max_size
= 0;
2656 total_found
+= found_bits
;
2658 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2659 cluster
->max_size
= found_bits
* ctl
->unit
;
2661 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2666 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2667 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2668 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2669 &entry
->offset_index
, 1);
2670 ASSERT(!ret
); /* -EEXIST; Logic error */
2672 trace_btrfs_setup_cluster(block_group
, cluster
,
2673 total_found
* ctl
->unit
, 1);
2678 * This searches the block group for just extents to fill the cluster with.
2679 * Try to find a cluster with at least bytes total bytes, at least one
2680 * extent of cont1_bytes, and other clusters of at least min_bytes.
2683 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2684 struct btrfs_free_cluster
*cluster
,
2685 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2686 u64 cont1_bytes
, u64 min_bytes
)
2688 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2689 struct btrfs_free_space
*first
= NULL
;
2690 struct btrfs_free_space
*entry
= NULL
;
2691 struct btrfs_free_space
*last
;
2692 struct rb_node
*node
;
2697 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2702 * We don't want bitmaps, so just move along until we find a normal
2705 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2706 if (entry
->bitmap
&& list_empty(&entry
->list
))
2707 list_add_tail(&entry
->list
, bitmaps
);
2708 node
= rb_next(&entry
->offset_index
);
2711 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2714 window_free
= entry
->bytes
;
2715 max_extent
= entry
->bytes
;
2719 for (node
= rb_next(&entry
->offset_index
); node
;
2720 node
= rb_next(&entry
->offset_index
)) {
2721 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2723 if (entry
->bitmap
) {
2724 if (list_empty(&entry
->list
))
2725 list_add_tail(&entry
->list
, bitmaps
);
2729 if (entry
->bytes
< min_bytes
)
2733 window_free
+= entry
->bytes
;
2734 if (entry
->bytes
> max_extent
)
2735 max_extent
= entry
->bytes
;
2738 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2741 cluster
->window_start
= first
->offset
;
2743 node
= &first
->offset_index
;
2746 * now we've found our entries, pull them out of the free space
2747 * cache and put them into the cluster rbtree
2752 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2753 node
= rb_next(&entry
->offset_index
);
2754 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2757 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2758 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2759 &entry
->offset_index
, 0);
2760 total_size
+= entry
->bytes
;
2761 ASSERT(!ret
); /* -EEXIST; Logic error */
2762 } while (node
&& entry
!= last
);
2764 cluster
->max_size
= max_extent
;
2765 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2770 * This specifically looks for bitmaps that may work in the cluster, we assume
2771 * that we have already failed to find extents that will work.
2774 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2775 struct btrfs_free_cluster
*cluster
,
2776 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2777 u64 cont1_bytes
, u64 min_bytes
)
2779 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2780 struct btrfs_free_space
*entry
;
2782 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2784 if (ctl
->total_bitmaps
== 0)
2788 * The bitmap that covers offset won't be in the list unless offset
2789 * is just its start offset.
2791 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2792 if (entry
->offset
!= bitmap_offset
) {
2793 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2794 if (entry
&& list_empty(&entry
->list
))
2795 list_add(&entry
->list
, bitmaps
);
2798 list_for_each_entry(entry
, bitmaps
, list
) {
2799 if (entry
->bytes
< bytes
)
2801 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2802 bytes
, cont1_bytes
, min_bytes
);
2808 * The bitmaps list has all the bitmaps that record free space
2809 * starting after offset, so no more search is required.
2815 * here we try to find a cluster of blocks in a block group. The goal
2816 * is to find at least bytes+empty_size.
2817 * We might not find them all in one contiguous area.
2819 * returns zero and sets up cluster if things worked out, otherwise
2820 * it returns -enospc
2822 int btrfs_find_space_cluster(struct btrfs_root
*root
,
2823 struct btrfs_block_group_cache
*block_group
,
2824 struct btrfs_free_cluster
*cluster
,
2825 u64 offset
, u64 bytes
, u64 empty_size
)
2827 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2828 struct btrfs_free_space
*entry
, *tmp
;
2835 * Choose the minimum extent size we'll require for this
2836 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2837 * For metadata, allow allocates with smaller extents. For
2838 * data, keep it dense.
2840 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2841 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
2842 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2843 cont1_bytes
= bytes
;
2844 min_bytes
= block_group
->sectorsize
;
2846 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2847 min_bytes
= block_group
->sectorsize
;
2850 spin_lock(&ctl
->tree_lock
);
2853 * If we know we don't have enough space to make a cluster don't even
2854 * bother doing all the work to try and find one.
2856 if (ctl
->free_space
< bytes
) {
2857 spin_unlock(&ctl
->tree_lock
);
2861 spin_lock(&cluster
->lock
);
2863 /* someone already found a cluster, hooray */
2864 if (cluster
->block_group
) {
2869 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
2872 INIT_LIST_HEAD(&bitmaps
);
2873 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2875 cont1_bytes
, min_bytes
);
2877 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2878 offset
, bytes
+ empty_size
,
2879 cont1_bytes
, min_bytes
);
2881 /* Clear our temporary list */
2882 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2883 list_del_init(&entry
->list
);
2886 atomic_inc(&block_group
->count
);
2887 list_add_tail(&cluster
->block_group_list
,
2888 &block_group
->cluster_list
);
2889 cluster
->block_group
= block_group
;
2891 trace_btrfs_failed_cluster_setup(block_group
);
2894 spin_unlock(&cluster
->lock
);
2895 spin_unlock(&ctl
->tree_lock
);
2901 * simple code to zero out a cluster
2903 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2905 spin_lock_init(&cluster
->lock
);
2906 spin_lock_init(&cluster
->refill_lock
);
2907 cluster
->root
= RB_ROOT
;
2908 cluster
->max_size
= 0;
2909 INIT_LIST_HEAD(&cluster
->block_group_list
);
2910 cluster
->block_group
= NULL
;
2913 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
2914 u64
*total_trimmed
, u64 start
, u64 bytes
,
2915 u64 reserved_start
, u64 reserved_bytes
)
2917 struct btrfs_space_info
*space_info
= block_group
->space_info
;
2918 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2923 spin_lock(&space_info
->lock
);
2924 spin_lock(&block_group
->lock
);
2925 if (!block_group
->ro
) {
2926 block_group
->reserved
+= reserved_bytes
;
2927 space_info
->bytes_reserved
+= reserved_bytes
;
2930 spin_unlock(&block_group
->lock
);
2931 spin_unlock(&space_info
->lock
);
2933 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2934 start
, bytes
, &trimmed
);
2936 *total_trimmed
+= trimmed
;
2938 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
2941 spin_lock(&space_info
->lock
);
2942 spin_lock(&block_group
->lock
);
2943 if (block_group
->ro
)
2944 space_info
->bytes_readonly
+= reserved_bytes
;
2945 block_group
->reserved
-= reserved_bytes
;
2946 space_info
->bytes_reserved
-= reserved_bytes
;
2947 spin_unlock(&space_info
->lock
);
2948 spin_unlock(&block_group
->lock
);
2954 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2955 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2957 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2958 struct btrfs_free_space
*entry
;
2959 struct rb_node
*node
;
2965 while (start
< end
) {
2966 spin_lock(&ctl
->tree_lock
);
2968 if (ctl
->free_space
< minlen
) {
2969 spin_unlock(&ctl
->tree_lock
);
2973 entry
= tree_search_offset(ctl
, start
, 0, 1);
2975 spin_unlock(&ctl
->tree_lock
);
2980 while (entry
->bitmap
) {
2981 node
= rb_next(&entry
->offset_index
);
2983 spin_unlock(&ctl
->tree_lock
);
2986 entry
= rb_entry(node
, struct btrfs_free_space
,
2990 if (entry
->offset
>= end
) {
2991 spin_unlock(&ctl
->tree_lock
);
2995 extent_start
= entry
->offset
;
2996 extent_bytes
= entry
->bytes
;
2997 start
= max(start
, extent_start
);
2998 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
2999 if (bytes
< minlen
) {
3000 spin_unlock(&ctl
->tree_lock
);
3004 unlink_free_space(ctl
, entry
);
3005 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3007 spin_unlock(&ctl
->tree_lock
);
3009 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3010 extent_start
, extent_bytes
);
3016 if (fatal_signal_pending(current
)) {
3027 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3028 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3030 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3031 struct btrfs_free_space
*entry
;
3035 u64 offset
= offset_to_bitmap(ctl
, start
);
3037 while (offset
< end
) {
3038 bool next_bitmap
= false;
3040 spin_lock(&ctl
->tree_lock
);
3042 if (ctl
->free_space
< minlen
) {
3043 spin_unlock(&ctl
->tree_lock
);
3047 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3049 spin_unlock(&ctl
->tree_lock
);
3055 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
);
3056 if (ret2
|| start
>= end
) {
3057 spin_unlock(&ctl
->tree_lock
);
3062 bytes
= min(bytes
, end
- start
);
3063 if (bytes
< minlen
) {
3064 spin_unlock(&ctl
->tree_lock
);
3068 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3069 if (entry
->bytes
== 0)
3070 free_bitmap(ctl
, entry
);
3072 spin_unlock(&ctl
->tree_lock
);
3074 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3080 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3083 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3084 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3087 if (fatal_signal_pending(current
)) {
3098 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3099 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3105 spin_lock(&block_group
->lock
);
3106 if (block_group
->removed
) {
3107 spin_unlock(&block_group
->lock
);
3110 atomic_inc(&block_group
->trimming
);
3111 spin_unlock(&block_group
->lock
);
3113 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3117 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3119 spin_lock(&block_group
->lock
);
3120 if (atomic_dec_and_test(&block_group
->trimming
) &&
3121 block_group
->removed
) {
3122 struct extent_map_tree
*em_tree
;
3123 struct extent_map
*em
;
3125 spin_unlock(&block_group
->lock
);
3127 em_tree
= &block_group
->fs_info
->mapping_tree
.map_tree
;
3128 write_lock(&em_tree
->lock
);
3129 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3131 BUG_ON(!em
); /* logic error, can't happen */
3132 remove_extent_mapping(em_tree
, em
);
3133 write_unlock(&em_tree
->lock
);
3135 lock_chunks(block_group
->fs_info
->chunk_root
);
3136 list_del_init(&em
->list
);
3137 unlock_chunks(block_group
->fs_info
->chunk_root
);
3139 /* once for us and once for the tree */
3140 free_extent_map(em
);
3141 free_extent_map(em
);
3143 spin_unlock(&block_group
->lock
);
3150 * Find the left-most item in the cache tree, and then return the
3151 * smallest inode number in the item.
3153 * Note: the returned inode number may not be the smallest one in
3154 * the tree, if the left-most item is a bitmap.
3156 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3158 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3159 struct btrfs_free_space
*entry
= NULL
;
3162 spin_lock(&ctl
->tree_lock
);
3164 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3167 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3168 struct btrfs_free_space
, offset_index
);
3170 if (!entry
->bitmap
) {
3171 ino
= entry
->offset
;
3173 unlink_free_space(ctl
, entry
);
3177 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3179 link_free_space(ctl
, entry
);
3185 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
3186 /* Logic error; Should be empty if it can't find anything */
3190 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3191 if (entry
->bytes
== 0)
3192 free_bitmap(ctl
, entry
);
3195 spin_unlock(&ctl
->tree_lock
);
3200 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3201 struct btrfs_path
*path
)
3203 struct inode
*inode
= NULL
;
3205 spin_lock(&root
->ino_cache_lock
);
3206 if (root
->ino_cache_inode
)
3207 inode
= igrab(root
->ino_cache_inode
);
3208 spin_unlock(&root
->ino_cache_lock
);
3212 inode
= __lookup_free_space_inode(root
, path
, 0);
3216 spin_lock(&root
->ino_cache_lock
);
3217 if (!btrfs_fs_closing(root
->fs_info
))
3218 root
->ino_cache_inode
= igrab(inode
);
3219 spin_unlock(&root
->ino_cache_lock
);
3224 int create_free_ino_inode(struct btrfs_root
*root
,
3225 struct btrfs_trans_handle
*trans
,
3226 struct btrfs_path
*path
)
3228 return __create_free_space_inode(root
, trans
, path
,
3229 BTRFS_FREE_INO_OBJECTID
, 0);
3232 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3234 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3235 struct btrfs_path
*path
;
3236 struct inode
*inode
;
3238 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3240 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3244 * If we're unmounting then just return, since this does a search on the
3245 * normal root and not the commit root and we could deadlock.
3247 if (btrfs_fs_closing(fs_info
))
3250 path
= btrfs_alloc_path();
3254 inode
= lookup_free_ino_inode(root
, path
);
3258 if (root_gen
!= BTRFS_I(inode
)->generation
)
3261 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3265 "failed to load free ino cache for root %llu",
3266 root
->root_key
.objectid
);
3270 btrfs_free_path(path
);
3274 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3275 struct btrfs_trans_handle
*trans
,
3276 struct btrfs_path
*path
,
3277 struct inode
*inode
)
3279 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3282 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3285 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
3287 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
3289 btrfs_err(root
->fs_info
,
3290 "failed to write free ino cache for root %llu",
3291 root
->root_key
.objectid
);
3298 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3300 * Use this if you need to make a bitmap or extent entry specifically, it
3301 * doesn't do any of the merging that add_free_space does, this acts a lot like
3302 * how the free space cache loading stuff works, so you can get really weird
3305 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3306 u64 offset
, u64 bytes
, bool bitmap
)
3308 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3309 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3316 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3322 spin_lock(&ctl
->tree_lock
);
3323 info
->offset
= offset
;
3324 info
->bytes
= bytes
;
3325 ret
= link_free_space(ctl
, info
);
3326 spin_unlock(&ctl
->tree_lock
);
3328 kmem_cache_free(btrfs_free_space_cachep
, info
);
3333 map
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
3335 kmem_cache_free(btrfs_free_space_cachep
, info
);
3340 spin_lock(&ctl
->tree_lock
);
3341 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3346 add_new_bitmap(ctl
, info
, offset
);
3351 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3352 bytes
-= bytes_added
;
3353 offset
+= bytes_added
;
3354 spin_unlock(&ctl
->tree_lock
);
3360 kmem_cache_free(btrfs_free_space_cachep
, info
);
3367 * Checks to see if the given range is in the free space cache. This is really
3368 * just used to check the absence of space, so if there is free space in the
3369 * range at all we will return 1.
3371 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3372 u64 offset
, u64 bytes
)
3374 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3375 struct btrfs_free_space
*info
;
3378 spin_lock(&ctl
->tree_lock
);
3379 info
= tree_search_offset(ctl
, offset
, 0, 0);
3381 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3389 u64 bit_off
, bit_bytes
;
3391 struct btrfs_free_space
*tmp
;
3394 bit_bytes
= ctl
->unit
;
3395 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
);
3397 if (bit_off
== offset
) {
3400 } else if (bit_off
> offset
&&
3401 offset
+ bytes
> bit_off
) {
3407 n
= rb_prev(&info
->offset_index
);
3409 tmp
= rb_entry(n
, struct btrfs_free_space
,
3411 if (tmp
->offset
+ tmp
->bytes
< offset
)
3413 if (offset
+ bytes
< tmp
->offset
) {
3414 n
= rb_prev(&info
->offset_index
);
3421 n
= rb_next(&info
->offset_index
);
3423 tmp
= rb_entry(n
, struct btrfs_free_space
,
3425 if (offset
+ bytes
< tmp
->offset
)
3427 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3428 n
= rb_next(&info
->offset_index
);
3439 if (info
->offset
== offset
) {
3444 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3447 spin_unlock(&ctl
->tree_lock
);
3450 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */