2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <linux/uuid.h>
34 #include <asm/unaligned.h>
38 #include "transaction.h"
39 #include "btrfs_inode.h"
41 #include "print-tree.h"
42 #include "async-thread.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "check-integrity.h"
48 #include "rcu-string.h"
49 #include "dev-replace.h"
53 #include <asm/cpufeature.h>
56 static struct extent_io_ops btree_extent_io_ops
;
57 static void end_workqueue_fn(struct btrfs_work
*work
);
58 static void free_fs_root(struct btrfs_root
*root
);
59 static int btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
61 static void btrfs_destroy_ordered_operations(struct btrfs_transaction
*t
,
62 struct btrfs_root
*root
);
63 static void btrfs_destroy_ordered_extents(struct btrfs_root
*root
);
64 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
65 struct btrfs_root
*root
);
66 static void btrfs_evict_pending_snapshots(struct btrfs_transaction
*t
);
67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
);
68 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
69 struct extent_io_tree
*dirty_pages
,
71 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
72 struct extent_io_tree
*pinned_extents
);
73 static int btrfs_cleanup_transaction(struct btrfs_root
*root
);
74 static void btrfs_error_commit_super(struct btrfs_root
*root
);
77 * end_io_wq structs are used to do processing in task context when an IO is
78 * complete. This is used during reads to verify checksums, and it is used
79 * by writes to insert metadata for new file extents after IO is complete.
85 struct btrfs_fs_info
*info
;
88 struct list_head list
;
89 struct btrfs_work work
;
93 * async submit bios are used to offload expensive checksumming
94 * onto the worker threads. They checksum file and metadata bios
95 * just before they are sent down the IO stack.
97 struct async_submit_bio
{
100 struct list_head list
;
101 extent_submit_bio_hook_t
*submit_bio_start
;
102 extent_submit_bio_hook_t
*submit_bio_done
;
105 unsigned long bio_flags
;
107 * bio_offset is optional, can be used if the pages in the bio
108 * can't tell us where in the file the bio should go
111 struct btrfs_work work
;
116 * Lockdep class keys for extent_buffer->lock's in this root. For a given
117 * eb, the lockdep key is determined by the btrfs_root it belongs to and
118 * the level the eb occupies in the tree.
120 * Different roots are used for different purposes and may nest inside each
121 * other and they require separate keysets. As lockdep keys should be
122 * static, assign keysets according to the purpose of the root as indicated
123 * by btrfs_root->objectid. This ensures that all special purpose roots
124 * have separate keysets.
126 * Lock-nesting across peer nodes is always done with the immediate parent
127 * node locked thus preventing deadlock. As lockdep doesn't know this, use
128 * subclass to avoid triggering lockdep warning in such cases.
130 * The key is set by the readpage_end_io_hook after the buffer has passed
131 * csum validation but before the pages are unlocked. It is also set by
132 * btrfs_init_new_buffer on freshly allocated blocks.
134 * We also add a check to make sure the highest level of the tree is the
135 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
136 * needs update as well.
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
143 static struct btrfs_lockdep_keyset
{
144 u64 id
; /* root objectid */
145 const char *name_stem
; /* lock name stem */
146 char names
[BTRFS_MAX_LEVEL
+ 1][20];
147 struct lock_class_key keys
[BTRFS_MAX_LEVEL
+ 1];
148 } btrfs_lockdep_keysets
[] = {
149 { .id
= BTRFS_ROOT_TREE_OBJECTID
, .name_stem
= "root" },
150 { .id
= BTRFS_EXTENT_TREE_OBJECTID
, .name_stem
= "extent" },
151 { .id
= BTRFS_CHUNK_TREE_OBJECTID
, .name_stem
= "chunk" },
152 { .id
= BTRFS_DEV_TREE_OBJECTID
, .name_stem
= "dev" },
153 { .id
= BTRFS_FS_TREE_OBJECTID
, .name_stem
= "fs" },
154 { .id
= BTRFS_CSUM_TREE_OBJECTID
, .name_stem
= "csum" },
155 { .id
= BTRFS_ORPHAN_OBJECTID
, .name_stem
= "orphan" },
156 { .id
= BTRFS_TREE_LOG_OBJECTID
, .name_stem
= "log" },
157 { .id
= BTRFS_TREE_RELOC_OBJECTID
, .name_stem
= "treloc" },
158 { .id
= BTRFS_DATA_RELOC_TREE_OBJECTID
, .name_stem
= "dreloc" },
159 { .id
= 0, .name_stem
= "tree" },
162 void __init
btrfs_init_lockdep(void)
166 /* initialize lockdep class names */
167 for (i
= 0; i
< ARRAY_SIZE(btrfs_lockdep_keysets
); i
++) {
168 struct btrfs_lockdep_keyset
*ks
= &btrfs_lockdep_keysets
[i
];
170 for (j
= 0; j
< ARRAY_SIZE(ks
->names
); j
++)
171 snprintf(ks
->names
[j
], sizeof(ks
->names
[j
]),
172 "btrfs-%s-%02d", ks
->name_stem
, j
);
176 void btrfs_set_buffer_lockdep_class(u64 objectid
, struct extent_buffer
*eb
,
179 struct btrfs_lockdep_keyset
*ks
;
181 BUG_ON(level
>= ARRAY_SIZE(ks
->keys
));
183 /* find the matching keyset, id 0 is the default entry */
184 for (ks
= btrfs_lockdep_keysets
; ks
->id
; ks
++)
185 if (ks
->id
== objectid
)
188 lockdep_set_class_and_name(&eb
->lock
,
189 &ks
->keys
[level
], ks
->names
[level
]);
195 * extents on the btree inode are pretty simple, there's one extent
196 * that covers the entire device
198 static struct extent_map
*btree_get_extent(struct inode
*inode
,
199 struct page
*page
, size_t pg_offset
, u64 start
, u64 len
,
202 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
203 struct extent_map
*em
;
206 read_lock(&em_tree
->lock
);
207 em
= lookup_extent_mapping(em_tree
, start
, len
);
210 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
211 read_unlock(&em_tree
->lock
);
214 read_unlock(&em_tree
->lock
);
216 em
= alloc_extent_map();
218 em
= ERR_PTR(-ENOMEM
);
223 em
->block_len
= (u64
)-1;
225 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
227 write_lock(&em_tree
->lock
);
228 ret
= add_extent_mapping(em_tree
, em
, 0);
229 if (ret
== -EEXIST
) {
231 em
= lookup_extent_mapping(em_tree
, start
, len
);
238 write_unlock(&em_tree
->lock
);
244 u32
btrfs_csum_data(char *data
, u32 seed
, size_t len
)
246 return crc32c(seed
, data
, len
);
249 void btrfs_csum_final(u32 crc
, char *result
)
251 put_unaligned_le32(~crc
, result
);
255 * compute the csum for a btree block, and either verify it or write it
256 * into the csum field of the block.
258 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
261 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
264 unsigned long cur_len
;
265 unsigned long offset
= BTRFS_CSUM_SIZE
;
267 unsigned long map_start
;
268 unsigned long map_len
;
271 unsigned long inline_result
;
273 len
= buf
->len
- offset
;
275 err
= map_private_extent_buffer(buf
, offset
, 32,
276 &kaddr
, &map_start
, &map_len
);
279 cur_len
= min(len
, map_len
- (offset
- map_start
));
280 crc
= btrfs_csum_data(kaddr
+ offset
- map_start
,
285 if (csum_size
> sizeof(inline_result
)) {
286 result
= kzalloc(csum_size
* sizeof(char), GFP_NOFS
);
290 result
= (char *)&inline_result
;
293 btrfs_csum_final(crc
, result
);
296 if (memcmp_extent_buffer(buf
, result
, 0, csum_size
)) {
299 memcpy(&found
, result
, csum_size
);
301 read_extent_buffer(buf
, &val
, 0, csum_size
);
302 printk_ratelimited(KERN_INFO
"btrfs: %s checksum verify "
303 "failed on %llu wanted %X found %X "
305 root
->fs_info
->sb
->s_id
,
306 (unsigned long long)buf
->start
, val
, found
,
307 btrfs_header_level(buf
));
308 if (result
!= (char *)&inline_result
)
313 write_extent_buffer(buf
, result
, 0, csum_size
);
315 if (result
!= (char *)&inline_result
)
321 * we can't consider a given block up to date unless the transid of the
322 * block matches the transid in the parent node's pointer. This is how we
323 * detect blocks that either didn't get written at all or got written
324 * in the wrong place.
326 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
327 struct extent_buffer
*eb
, u64 parent_transid
,
330 struct extent_state
*cached_state
= NULL
;
333 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
339 lock_extent_bits(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
341 if (extent_buffer_uptodate(eb
) &&
342 btrfs_header_generation(eb
) == parent_transid
) {
346 printk_ratelimited("parent transid verify failed on %llu wanted %llu "
348 (unsigned long long)eb
->start
,
349 (unsigned long long)parent_transid
,
350 (unsigned long long)btrfs_header_generation(eb
));
352 clear_extent_buffer_uptodate(eb
);
354 unlock_extent_cached(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
355 &cached_state
, GFP_NOFS
);
360 * Return 0 if the superblock checksum type matches the checksum value of that
361 * algorithm. Pass the raw disk superblock data.
363 static int btrfs_check_super_csum(char *raw_disk_sb
)
365 struct btrfs_super_block
*disk_sb
=
366 (struct btrfs_super_block
*)raw_disk_sb
;
367 u16 csum_type
= btrfs_super_csum_type(disk_sb
);
370 if (csum_type
== BTRFS_CSUM_TYPE_CRC32
) {
372 const int csum_size
= sizeof(crc
);
373 char result
[csum_size
];
376 * The super_block structure does not span the whole
377 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
378 * is filled with zeros and is included in the checkum.
380 crc
= btrfs_csum_data(raw_disk_sb
+ BTRFS_CSUM_SIZE
,
381 crc
, BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
);
382 btrfs_csum_final(crc
, result
);
384 if (memcmp(raw_disk_sb
, result
, csum_size
))
388 if (csum_type
>= ARRAY_SIZE(btrfs_csum_sizes
)) {
389 printk(KERN_ERR
"btrfs: unsupported checksum algorithm %u\n",
398 * helper to read a given tree block, doing retries as required when
399 * the checksums don't match and we have alternate mirrors to try.
401 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
402 struct extent_buffer
*eb
,
403 u64 start
, u64 parent_transid
)
405 struct extent_io_tree
*io_tree
;
410 int failed_mirror
= 0;
412 clear_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
413 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
415 ret
= read_extent_buffer_pages(io_tree
, eb
, start
,
417 btree_get_extent
, mirror_num
);
419 if (!verify_parent_transid(io_tree
, eb
,
427 * This buffer's crc is fine, but its contents are corrupted, so
428 * there is no reason to read the other copies, they won't be
431 if (test_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
))
434 num_copies
= btrfs_num_copies(root
->fs_info
,
439 if (!failed_mirror
) {
441 failed_mirror
= eb
->read_mirror
;
445 if (mirror_num
== failed_mirror
)
448 if (mirror_num
> num_copies
)
452 if (failed
&& !ret
&& failed_mirror
)
453 repair_eb_io_failure(root
, eb
, failed_mirror
);
459 * checksum a dirty tree block before IO. This has extra checks to make sure
460 * we only fill in the checksum field in the first page of a multi-page block
463 static int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
465 struct extent_io_tree
*tree
;
466 u64 start
= page_offset(page
);
468 struct extent_buffer
*eb
;
470 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
472 eb
= (struct extent_buffer
*)page
->private;
473 if (page
!= eb
->pages
[0])
475 found_start
= btrfs_header_bytenr(eb
);
476 if (found_start
!= start
) {
480 if (!PageUptodate(page
)) {
484 csum_tree_block(root
, eb
, 0);
488 static int check_tree_block_fsid(struct btrfs_root
*root
,
489 struct extent_buffer
*eb
)
491 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
492 u8 fsid
[BTRFS_UUID_SIZE
];
495 read_extent_buffer(eb
, fsid
, (unsigned long)btrfs_header_fsid(eb
),
498 if (!memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
)) {
502 fs_devices
= fs_devices
->seed
;
507 #define CORRUPT(reason, eb, root, slot) \
508 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
509 "root=%llu, slot=%d\n", reason, \
510 (unsigned long long)btrfs_header_bytenr(eb), \
511 (unsigned long long)root->objectid, slot)
513 static noinline
int check_leaf(struct btrfs_root
*root
,
514 struct extent_buffer
*leaf
)
516 struct btrfs_key key
;
517 struct btrfs_key leaf_key
;
518 u32 nritems
= btrfs_header_nritems(leaf
);
524 /* Check the 0 item */
525 if (btrfs_item_offset_nr(leaf
, 0) + btrfs_item_size_nr(leaf
, 0) !=
526 BTRFS_LEAF_DATA_SIZE(root
)) {
527 CORRUPT("invalid item offset size pair", leaf
, root
, 0);
532 * Check to make sure each items keys are in the correct order and their
533 * offsets make sense. We only have to loop through nritems-1 because
534 * we check the current slot against the next slot, which verifies the
535 * next slot's offset+size makes sense and that the current's slot
538 for (slot
= 0; slot
< nritems
- 1; slot
++) {
539 btrfs_item_key_to_cpu(leaf
, &leaf_key
, slot
);
540 btrfs_item_key_to_cpu(leaf
, &key
, slot
+ 1);
542 /* Make sure the keys are in the right order */
543 if (btrfs_comp_cpu_keys(&leaf_key
, &key
) >= 0) {
544 CORRUPT("bad key order", leaf
, root
, slot
);
549 * Make sure the offset and ends are right, remember that the
550 * item data starts at the end of the leaf and grows towards the
553 if (btrfs_item_offset_nr(leaf
, slot
) !=
554 btrfs_item_end_nr(leaf
, slot
+ 1)) {
555 CORRUPT("slot offset bad", leaf
, root
, slot
);
560 * Check to make sure that we don't point outside of the leaf,
561 * just incase all the items are consistent to eachother, but
562 * all point outside of the leaf.
564 if (btrfs_item_end_nr(leaf
, slot
) >
565 BTRFS_LEAF_DATA_SIZE(root
)) {
566 CORRUPT("slot end outside of leaf", leaf
, root
, slot
);
574 static int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
575 struct extent_state
*state
, int mirror
)
577 struct extent_io_tree
*tree
;
580 struct extent_buffer
*eb
;
581 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
588 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
589 eb
= (struct extent_buffer
*)page
->private;
591 /* the pending IO might have been the only thing that kept this buffer
592 * in memory. Make sure we have a ref for all this other checks
594 extent_buffer_get(eb
);
596 reads_done
= atomic_dec_and_test(&eb
->io_pages
);
600 eb
->read_mirror
= mirror
;
601 if (test_bit(EXTENT_BUFFER_IOERR
, &eb
->bflags
)) {
606 found_start
= btrfs_header_bytenr(eb
);
607 if (found_start
!= eb
->start
) {
608 printk_ratelimited(KERN_INFO
"btrfs bad tree block start "
610 (unsigned long long)found_start
,
611 (unsigned long long)eb
->start
);
615 if (check_tree_block_fsid(root
, eb
)) {
616 printk_ratelimited(KERN_INFO
"btrfs bad fsid on block %llu\n",
617 (unsigned long long)eb
->start
);
621 found_level
= btrfs_header_level(eb
);
622 if (found_level
>= BTRFS_MAX_LEVEL
) {
623 btrfs_info(root
->fs_info
, "bad tree block level %d\n",
624 (int)btrfs_header_level(eb
));
629 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb
),
632 ret
= csum_tree_block(root
, eb
, 1);
639 * If this is a leaf block and it is corrupt, set the corrupt bit so
640 * that we don't try and read the other copies of this block, just
643 if (found_level
== 0 && check_leaf(root
, eb
)) {
644 set_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
649 set_extent_buffer_uptodate(eb
);
652 test_and_clear_bit(EXTENT_BUFFER_READAHEAD
, &eb
->bflags
))
653 btree_readahead_hook(root
, eb
, eb
->start
, ret
);
657 * our io error hook is going to dec the io pages
658 * again, we have to make sure it has something
661 atomic_inc(&eb
->io_pages
);
662 clear_extent_buffer_uptodate(eb
);
664 free_extent_buffer(eb
);
669 static int btree_io_failed_hook(struct page
*page
, int failed_mirror
)
671 struct extent_buffer
*eb
;
672 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
674 eb
= (struct extent_buffer
*)page
->private;
675 set_bit(EXTENT_BUFFER_IOERR
, &eb
->bflags
);
676 eb
->read_mirror
= failed_mirror
;
677 atomic_dec(&eb
->io_pages
);
678 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD
, &eb
->bflags
))
679 btree_readahead_hook(root
, eb
, eb
->start
, -EIO
);
680 return -EIO
; /* we fixed nothing */
683 static void end_workqueue_bio(struct bio
*bio
, int err
)
685 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
686 struct btrfs_fs_info
*fs_info
;
688 fs_info
= end_io_wq
->info
;
689 end_io_wq
->error
= err
;
690 end_io_wq
->work
.func
= end_workqueue_fn
;
691 end_io_wq
->work
.flags
= 0;
693 if (bio
->bi_rw
& REQ_WRITE
) {
694 if (end_io_wq
->metadata
== BTRFS_WQ_ENDIO_METADATA
)
695 btrfs_queue_worker(&fs_info
->endio_meta_write_workers
,
697 else if (end_io_wq
->metadata
== BTRFS_WQ_ENDIO_FREE_SPACE
)
698 btrfs_queue_worker(&fs_info
->endio_freespace_worker
,
700 else if (end_io_wq
->metadata
== BTRFS_WQ_ENDIO_RAID56
)
701 btrfs_queue_worker(&fs_info
->endio_raid56_workers
,
704 btrfs_queue_worker(&fs_info
->endio_write_workers
,
707 if (end_io_wq
->metadata
== BTRFS_WQ_ENDIO_RAID56
)
708 btrfs_queue_worker(&fs_info
->endio_raid56_workers
,
710 else if (end_io_wq
->metadata
)
711 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
714 btrfs_queue_worker(&fs_info
->endio_workers
,
720 * For the metadata arg you want
723 * 1 - if normal metadta
724 * 2 - if writing to the free space cache area
725 * 3 - raid parity work
727 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
730 struct end_io_wq
*end_io_wq
;
731 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
735 end_io_wq
->private = bio
->bi_private
;
736 end_io_wq
->end_io
= bio
->bi_end_io
;
737 end_io_wq
->info
= info
;
738 end_io_wq
->error
= 0;
739 end_io_wq
->bio
= bio
;
740 end_io_wq
->metadata
= metadata
;
742 bio
->bi_private
= end_io_wq
;
743 bio
->bi_end_io
= end_workqueue_bio
;
747 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info
*info
)
749 unsigned long limit
= min_t(unsigned long,
750 info
->workers
.max_workers
,
751 info
->fs_devices
->open_devices
);
755 static void run_one_async_start(struct btrfs_work
*work
)
757 struct async_submit_bio
*async
;
760 async
= container_of(work
, struct async_submit_bio
, work
);
761 ret
= async
->submit_bio_start(async
->inode
, async
->rw
, async
->bio
,
762 async
->mirror_num
, async
->bio_flags
,
768 static void run_one_async_done(struct btrfs_work
*work
)
770 struct btrfs_fs_info
*fs_info
;
771 struct async_submit_bio
*async
;
774 async
= container_of(work
, struct async_submit_bio
, work
);
775 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
777 limit
= btrfs_async_submit_limit(fs_info
);
778 limit
= limit
* 2 / 3;
780 if (atomic_dec_return(&fs_info
->nr_async_submits
) < limit
&&
781 waitqueue_active(&fs_info
->async_submit_wait
))
782 wake_up(&fs_info
->async_submit_wait
);
784 /* If an error occured we just want to clean up the bio and move on */
786 bio_endio(async
->bio
, async
->error
);
790 async
->submit_bio_done(async
->inode
, async
->rw
, async
->bio
,
791 async
->mirror_num
, async
->bio_flags
,
795 static void run_one_async_free(struct btrfs_work
*work
)
797 struct async_submit_bio
*async
;
799 async
= container_of(work
, struct async_submit_bio
, work
);
803 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
804 int rw
, struct bio
*bio
, int mirror_num
,
805 unsigned long bio_flags
,
807 extent_submit_bio_hook_t
*submit_bio_start
,
808 extent_submit_bio_hook_t
*submit_bio_done
)
810 struct async_submit_bio
*async
;
812 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
816 async
->inode
= inode
;
819 async
->mirror_num
= mirror_num
;
820 async
->submit_bio_start
= submit_bio_start
;
821 async
->submit_bio_done
= submit_bio_done
;
823 async
->work
.func
= run_one_async_start
;
824 async
->work
.ordered_func
= run_one_async_done
;
825 async
->work
.ordered_free
= run_one_async_free
;
827 async
->work
.flags
= 0;
828 async
->bio_flags
= bio_flags
;
829 async
->bio_offset
= bio_offset
;
833 atomic_inc(&fs_info
->nr_async_submits
);
836 btrfs_set_work_high_prio(&async
->work
);
838 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
840 while (atomic_read(&fs_info
->async_submit_draining
) &&
841 atomic_read(&fs_info
->nr_async_submits
)) {
842 wait_event(fs_info
->async_submit_wait
,
843 (atomic_read(&fs_info
->nr_async_submits
) == 0));
849 static int btree_csum_one_bio(struct bio
*bio
)
851 struct bio_vec
*bvec
= bio
->bi_io_vec
;
853 struct btrfs_root
*root
;
856 WARN_ON(bio
->bi_vcnt
<= 0);
857 while (bio_index
< bio
->bi_vcnt
) {
858 root
= BTRFS_I(bvec
->bv_page
->mapping
->host
)->root
;
859 ret
= csum_dirty_buffer(root
, bvec
->bv_page
);
868 static int __btree_submit_bio_start(struct inode
*inode
, int rw
,
869 struct bio
*bio
, int mirror_num
,
870 unsigned long bio_flags
,
874 * when we're called for a write, we're already in the async
875 * submission context. Just jump into btrfs_map_bio
877 return btree_csum_one_bio(bio
);
880 static int __btree_submit_bio_done(struct inode
*inode
, int rw
, struct bio
*bio
,
881 int mirror_num
, unsigned long bio_flags
,
887 * when we're called for a write, we're already in the async
888 * submission context. Just jump into btrfs_map_bio
890 ret
= btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
896 static int check_async_write(struct inode
*inode
, unsigned long bio_flags
)
898 if (bio_flags
& EXTENT_BIO_TREE_LOG
)
907 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
908 int mirror_num
, unsigned long bio_flags
,
911 int async
= check_async_write(inode
, bio_flags
);
914 if (!(rw
& REQ_WRITE
)) {
916 * called for a read, do the setup so that checksum validation
917 * can happen in the async kernel threads
919 ret
= btrfs_bio_wq_end_io(BTRFS_I(inode
)->root
->fs_info
,
923 ret
= btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
926 ret
= btree_csum_one_bio(bio
);
929 ret
= btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
933 * kthread helpers are used to submit writes so that
934 * checksumming can happen in parallel across all CPUs
936 ret
= btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
937 inode
, rw
, bio
, mirror_num
, 0,
939 __btree_submit_bio_start
,
940 __btree_submit_bio_done
);
950 #ifdef CONFIG_MIGRATION
951 static int btree_migratepage(struct address_space
*mapping
,
952 struct page
*newpage
, struct page
*page
,
953 enum migrate_mode mode
)
956 * we can't safely write a btree page from here,
957 * we haven't done the locking hook
962 * Buffers may be managed in a filesystem specific way.
963 * We must have no buffers or drop them.
965 if (page_has_private(page
) &&
966 !try_to_release_page(page
, GFP_KERNEL
))
968 return migrate_page(mapping
, newpage
, page
, mode
);
973 static int btree_writepages(struct address_space
*mapping
,
974 struct writeback_control
*wbc
)
976 struct extent_io_tree
*tree
;
977 struct btrfs_fs_info
*fs_info
;
980 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
981 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
983 if (wbc
->for_kupdate
)
986 fs_info
= BTRFS_I(mapping
->host
)->root
->fs_info
;
987 /* this is a bit racy, but that's ok */
988 ret
= percpu_counter_compare(&fs_info
->dirty_metadata_bytes
,
989 BTRFS_DIRTY_METADATA_THRESH
);
993 return btree_write_cache_pages(mapping
, wbc
);
996 static int btree_readpage(struct file
*file
, struct page
*page
)
998 struct extent_io_tree
*tree
;
999 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1000 return extent_read_full_page(tree
, page
, btree_get_extent
, 0);
1003 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
1005 if (PageWriteback(page
) || PageDirty(page
))
1008 return try_release_extent_buffer(page
);
1011 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
1013 struct extent_io_tree
*tree
;
1014 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1015 extent_invalidatepage(tree
, page
, offset
);
1016 btree_releasepage(page
, GFP_NOFS
);
1017 if (PagePrivate(page
)) {
1018 printk(KERN_WARNING
"btrfs warning page private not zero "
1019 "on page %llu\n", (unsigned long long)page_offset(page
));
1020 ClearPagePrivate(page
);
1021 set_page_private(page
, 0);
1022 page_cache_release(page
);
1026 static int btree_set_page_dirty(struct page
*page
)
1029 struct extent_buffer
*eb
;
1031 BUG_ON(!PagePrivate(page
));
1032 eb
= (struct extent_buffer
*)page
->private;
1034 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
1035 BUG_ON(!atomic_read(&eb
->refs
));
1036 btrfs_assert_tree_locked(eb
);
1038 return __set_page_dirty_nobuffers(page
);
1041 static const struct address_space_operations btree_aops
= {
1042 .readpage
= btree_readpage
,
1043 .writepages
= btree_writepages
,
1044 .releasepage
= btree_releasepage
,
1045 .invalidatepage
= btree_invalidatepage
,
1046 #ifdef CONFIG_MIGRATION
1047 .migratepage
= btree_migratepage
,
1049 .set_page_dirty
= btree_set_page_dirty
,
1052 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
1055 struct extent_buffer
*buf
= NULL
;
1056 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1059 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1062 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
1063 buf
, 0, WAIT_NONE
, btree_get_extent
, 0);
1064 free_extent_buffer(buf
);
1068 int reada_tree_block_flagged(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
1069 int mirror_num
, struct extent_buffer
**eb
)
1071 struct extent_buffer
*buf
= NULL
;
1072 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1073 struct extent_io_tree
*io_tree
= &BTRFS_I(btree_inode
)->io_tree
;
1076 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1080 set_bit(EXTENT_BUFFER_READAHEAD
, &buf
->bflags
);
1082 ret
= read_extent_buffer_pages(io_tree
, buf
, 0, WAIT_PAGE_LOCK
,
1083 btree_get_extent
, mirror_num
);
1085 free_extent_buffer(buf
);
1089 if (test_bit(EXTENT_BUFFER_CORRUPT
, &buf
->bflags
)) {
1090 free_extent_buffer(buf
);
1092 } else if (extent_buffer_uptodate(buf
)) {
1095 free_extent_buffer(buf
);
1100 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
1101 u64 bytenr
, u32 blocksize
)
1103 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1104 struct extent_buffer
*eb
;
1105 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
1110 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
1111 u64 bytenr
, u32 blocksize
)
1113 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1114 struct extent_buffer
*eb
;
1116 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
1122 int btrfs_write_tree_block(struct extent_buffer
*buf
)
1124 return filemap_fdatawrite_range(buf
->pages
[0]->mapping
, buf
->start
,
1125 buf
->start
+ buf
->len
- 1);
1128 int btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
1130 return filemap_fdatawait_range(buf
->pages
[0]->mapping
,
1131 buf
->start
, buf
->start
+ buf
->len
- 1);
1134 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
1135 u32 blocksize
, u64 parent_transid
)
1137 struct extent_buffer
*buf
= NULL
;
1140 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1144 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
1149 void clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1150 struct extent_buffer
*buf
)
1152 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1154 if (btrfs_header_generation(buf
) ==
1155 fs_info
->running_transaction
->transid
) {
1156 btrfs_assert_tree_locked(buf
);
1158 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
)) {
1159 __percpu_counter_add(&fs_info
->dirty_metadata_bytes
,
1161 fs_info
->dirty_metadata_batch
);
1162 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1163 btrfs_set_lock_blocking(buf
);
1164 clear_extent_buffer_dirty(buf
);
1169 static void __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
1170 u32 stripesize
, struct btrfs_root
*root
,
1171 struct btrfs_fs_info
*fs_info
,
1175 root
->commit_root
= NULL
;
1176 root
->sectorsize
= sectorsize
;
1177 root
->nodesize
= nodesize
;
1178 root
->leafsize
= leafsize
;
1179 root
->stripesize
= stripesize
;
1181 root
->track_dirty
= 0;
1183 root
->orphan_item_inserted
= 0;
1184 root
->orphan_cleanup_state
= 0;
1186 root
->objectid
= objectid
;
1187 root
->last_trans
= 0;
1188 root
->highest_objectid
= 0;
1190 root
->inode_tree
= RB_ROOT
;
1191 INIT_RADIX_TREE(&root
->delayed_nodes_tree
, GFP_ATOMIC
);
1192 root
->block_rsv
= NULL
;
1193 root
->orphan_block_rsv
= NULL
;
1195 INIT_LIST_HEAD(&root
->dirty_list
);
1196 INIT_LIST_HEAD(&root
->root_list
);
1197 INIT_LIST_HEAD(&root
->logged_list
[0]);
1198 INIT_LIST_HEAD(&root
->logged_list
[1]);
1199 spin_lock_init(&root
->orphan_lock
);
1200 spin_lock_init(&root
->inode_lock
);
1201 spin_lock_init(&root
->accounting_lock
);
1202 spin_lock_init(&root
->log_extents_lock
[0]);
1203 spin_lock_init(&root
->log_extents_lock
[1]);
1204 mutex_init(&root
->objectid_mutex
);
1205 mutex_init(&root
->log_mutex
);
1206 init_waitqueue_head(&root
->log_writer_wait
);
1207 init_waitqueue_head(&root
->log_commit_wait
[0]);
1208 init_waitqueue_head(&root
->log_commit_wait
[1]);
1209 atomic_set(&root
->log_commit
[0], 0);
1210 atomic_set(&root
->log_commit
[1], 0);
1211 atomic_set(&root
->log_writers
, 0);
1212 atomic_set(&root
->log_batch
, 0);
1213 atomic_set(&root
->orphan_inodes
, 0);
1214 root
->log_transid
= 0;
1215 root
->last_log_commit
= 0;
1216 extent_io_tree_init(&root
->dirty_log_pages
,
1217 fs_info
->btree_inode
->i_mapping
);
1219 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
1220 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
1221 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
1222 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
1223 root
->defrag_trans_start
= fs_info
->generation
;
1224 init_completion(&root
->kobj_unregister
);
1225 root
->defrag_running
= 0;
1226 root
->root_key
.objectid
= objectid
;
1229 spin_lock_init(&root
->root_item_lock
);
1232 static int __must_check
find_and_setup_root(struct btrfs_root
*tree_root
,
1233 struct btrfs_fs_info
*fs_info
,
1235 struct btrfs_root
*root
)
1241 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1242 tree_root
->sectorsize
, tree_root
->stripesize
,
1243 root
, fs_info
, objectid
);
1244 ret
= btrfs_find_last_root(tree_root
, objectid
,
1245 &root
->root_item
, &root
->root_key
);
1251 generation
= btrfs_root_generation(&root
->root_item
);
1252 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1253 root
->commit_root
= NULL
;
1254 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1255 blocksize
, generation
);
1256 if (!root
->node
|| !btrfs_buffer_uptodate(root
->node
, generation
, 0)) {
1257 free_extent_buffer(root
->node
);
1261 root
->commit_root
= btrfs_root_node(root
);
1265 static struct btrfs_root
*btrfs_alloc_root(struct btrfs_fs_info
*fs_info
)
1267 struct btrfs_root
*root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1269 root
->fs_info
= fs_info
;
1273 struct btrfs_root
*btrfs_create_tree(struct btrfs_trans_handle
*trans
,
1274 struct btrfs_fs_info
*fs_info
,
1277 struct extent_buffer
*leaf
;
1278 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1279 struct btrfs_root
*root
;
1280 struct btrfs_key key
;
1285 root
= btrfs_alloc_root(fs_info
);
1287 return ERR_PTR(-ENOMEM
);
1289 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1290 tree_root
->sectorsize
, tree_root
->stripesize
,
1291 root
, fs_info
, objectid
);
1292 root
->root_key
.objectid
= objectid
;
1293 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1294 root
->root_key
.offset
= 0;
1296 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
,
1297 0, objectid
, NULL
, 0, 0, 0);
1299 ret
= PTR_ERR(leaf
);
1304 bytenr
= leaf
->start
;
1305 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1306 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1307 btrfs_set_header_generation(leaf
, trans
->transid
);
1308 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1309 btrfs_set_header_owner(leaf
, objectid
);
1312 write_extent_buffer(leaf
, fs_info
->fsid
,
1313 (unsigned long)btrfs_header_fsid(leaf
),
1315 write_extent_buffer(leaf
, fs_info
->chunk_tree_uuid
,
1316 (unsigned long)btrfs_header_chunk_tree_uuid(leaf
),
1318 btrfs_mark_buffer_dirty(leaf
);
1320 root
->commit_root
= btrfs_root_node(root
);
1321 root
->track_dirty
= 1;
1324 root
->root_item
.flags
= 0;
1325 root
->root_item
.byte_limit
= 0;
1326 btrfs_set_root_bytenr(&root
->root_item
, leaf
->start
);
1327 btrfs_set_root_generation(&root
->root_item
, trans
->transid
);
1328 btrfs_set_root_level(&root
->root_item
, 0);
1329 btrfs_set_root_refs(&root
->root_item
, 1);
1330 btrfs_set_root_used(&root
->root_item
, leaf
->len
);
1331 btrfs_set_root_last_snapshot(&root
->root_item
, 0);
1332 btrfs_set_root_dirid(&root
->root_item
, 0);
1334 memcpy(root
->root_item
.uuid
, uuid
.b
, BTRFS_UUID_SIZE
);
1335 root
->root_item
.drop_level
= 0;
1337 key
.objectid
= objectid
;
1338 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1340 ret
= btrfs_insert_root(trans
, tree_root
, &key
, &root
->root_item
);
1344 btrfs_tree_unlock(leaf
);
1350 btrfs_tree_unlock(leaf
);
1351 free_extent_buffer(leaf
);
1355 return ERR_PTR(ret
);
1358 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
1359 struct btrfs_fs_info
*fs_info
)
1361 struct btrfs_root
*root
;
1362 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1363 struct extent_buffer
*leaf
;
1365 root
= btrfs_alloc_root(fs_info
);
1367 return ERR_PTR(-ENOMEM
);
1369 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1370 tree_root
->sectorsize
, tree_root
->stripesize
,
1371 root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1373 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
1374 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1375 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
1377 * log trees do not get reference counted because they go away
1378 * before a real commit is actually done. They do store pointers
1379 * to file data extents, and those reference counts still get
1380 * updated (along with back refs to the log tree).
1384 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
1385 BTRFS_TREE_LOG_OBJECTID
, NULL
,
1389 return ERR_CAST(leaf
);
1392 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1393 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1394 btrfs_set_header_generation(leaf
, trans
->transid
);
1395 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1396 btrfs_set_header_owner(leaf
, BTRFS_TREE_LOG_OBJECTID
);
1399 write_extent_buffer(root
->node
, root
->fs_info
->fsid
,
1400 (unsigned long)btrfs_header_fsid(root
->node
),
1402 btrfs_mark_buffer_dirty(root
->node
);
1403 btrfs_tree_unlock(root
->node
);
1407 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
1408 struct btrfs_fs_info
*fs_info
)
1410 struct btrfs_root
*log_root
;
1412 log_root
= alloc_log_tree(trans
, fs_info
);
1413 if (IS_ERR(log_root
))
1414 return PTR_ERR(log_root
);
1415 WARN_ON(fs_info
->log_root_tree
);
1416 fs_info
->log_root_tree
= log_root
;
1420 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
1421 struct btrfs_root
*root
)
1423 struct btrfs_root
*log_root
;
1424 struct btrfs_inode_item
*inode_item
;
1426 log_root
= alloc_log_tree(trans
, root
->fs_info
);
1427 if (IS_ERR(log_root
))
1428 return PTR_ERR(log_root
);
1430 log_root
->last_trans
= trans
->transid
;
1431 log_root
->root_key
.offset
= root
->root_key
.objectid
;
1433 inode_item
= &log_root
->root_item
.inode
;
1434 inode_item
->generation
= cpu_to_le64(1);
1435 inode_item
->size
= cpu_to_le64(3);
1436 inode_item
->nlink
= cpu_to_le32(1);
1437 inode_item
->nbytes
= cpu_to_le64(root
->leafsize
);
1438 inode_item
->mode
= cpu_to_le32(S_IFDIR
| 0755);
1440 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1442 WARN_ON(root
->log_root
);
1443 root
->log_root
= log_root
;
1444 root
->log_transid
= 0;
1445 root
->last_log_commit
= 0;
1449 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_root
*tree_root
,
1450 struct btrfs_key
*location
)
1452 struct btrfs_root
*root
;
1453 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1454 struct btrfs_path
*path
;
1455 struct extent_buffer
*l
;
1461 root
= btrfs_alloc_root(fs_info
);
1463 return ERR_PTR(-ENOMEM
);
1464 if (location
->offset
== (u64
)-1) {
1465 ret
= find_and_setup_root(tree_root
, fs_info
,
1466 location
->objectid
, root
);
1469 return ERR_PTR(ret
);
1474 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1475 tree_root
->sectorsize
, tree_root
->stripesize
,
1476 root
, fs_info
, location
->objectid
);
1478 path
= btrfs_alloc_path();
1481 return ERR_PTR(-ENOMEM
);
1483 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
1486 slot
= path
->slots
[0];
1487 btrfs_read_root_item(l
, slot
, &root
->root_item
);
1488 memcpy(&root
->root_key
, location
, sizeof(*location
));
1490 btrfs_free_path(path
);
1495 return ERR_PTR(ret
);
1498 generation
= btrfs_root_generation(&root
->root_item
);
1499 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1500 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1501 blocksize
, generation
);
1502 if (!root
->node
|| !extent_buffer_uptodate(root
->node
)) {
1503 ret
= (!root
->node
) ? -ENOMEM
: -EIO
;
1505 free_extent_buffer(root
->node
);
1507 return ERR_PTR(ret
);
1510 root
->commit_root
= btrfs_root_node(root
);
1511 BUG_ON(!root
->node
); /* -ENOMEM */
1513 if (location
->objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
1515 btrfs_check_and_init_root_item(&root
->root_item
);
1521 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
1522 struct btrfs_key
*location
)
1524 struct btrfs_root
*root
;
1527 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1528 return fs_info
->tree_root
;
1529 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1530 return fs_info
->extent_root
;
1531 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
1532 return fs_info
->chunk_root
;
1533 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
1534 return fs_info
->dev_root
;
1535 if (location
->objectid
== BTRFS_CSUM_TREE_OBJECTID
)
1536 return fs_info
->csum_root
;
1537 if (location
->objectid
== BTRFS_QUOTA_TREE_OBJECTID
)
1538 return fs_info
->quota_root
? fs_info
->quota_root
:
1541 spin_lock(&fs_info
->fs_roots_radix_lock
);
1542 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1543 (unsigned long)location
->objectid
);
1544 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1548 root
= btrfs_read_fs_root_no_radix(fs_info
->tree_root
, location
);
1552 root
->free_ino_ctl
= kzalloc(sizeof(*root
->free_ino_ctl
), GFP_NOFS
);
1553 root
->free_ino_pinned
= kzalloc(sizeof(*root
->free_ino_pinned
),
1555 if (!root
->free_ino_pinned
|| !root
->free_ino_ctl
) {
1560 btrfs_init_free_ino_ctl(root
);
1561 mutex_init(&root
->fs_commit_mutex
);
1562 spin_lock_init(&root
->cache_lock
);
1563 init_waitqueue_head(&root
->cache_wait
);
1565 ret
= get_anon_bdev(&root
->anon_dev
);
1569 if (btrfs_root_refs(&root
->root_item
) == 0) {
1574 ret
= btrfs_find_orphan_item(fs_info
->tree_root
, location
->objectid
);
1578 root
->orphan_item_inserted
= 1;
1580 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
1584 spin_lock(&fs_info
->fs_roots_radix_lock
);
1585 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1586 (unsigned long)root
->root_key
.objectid
,
1591 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1592 radix_tree_preload_end();
1594 if (ret
== -EEXIST
) {
1601 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
1602 root
->root_key
.objectid
);
1607 return ERR_PTR(ret
);
1610 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
1612 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
1614 struct btrfs_device
*device
;
1615 struct backing_dev_info
*bdi
;
1618 list_for_each_entry_rcu(device
, &info
->fs_devices
->devices
, dev_list
) {
1621 bdi
= blk_get_backing_dev_info(device
->bdev
);
1622 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
1632 * If this fails, caller must call bdi_destroy() to get rid of the
1635 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1639 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
1640 err
= bdi_setup_and_register(bdi
, "btrfs", BDI_CAP_MAP_COPY
);
1644 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1645 bdi
->congested_fn
= btrfs_congested_fn
;
1646 bdi
->congested_data
= info
;
1651 * called by the kthread helper functions to finally call the bio end_io
1652 * functions. This is where read checksum verification actually happens
1654 static void end_workqueue_fn(struct btrfs_work
*work
)
1657 struct end_io_wq
*end_io_wq
;
1658 struct btrfs_fs_info
*fs_info
;
1661 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1662 bio
= end_io_wq
->bio
;
1663 fs_info
= end_io_wq
->info
;
1665 error
= end_io_wq
->error
;
1666 bio
->bi_private
= end_io_wq
->private;
1667 bio
->bi_end_io
= end_io_wq
->end_io
;
1669 bio_endio(bio
, error
);
1672 static int cleaner_kthread(void *arg
)
1674 struct btrfs_root
*root
= arg
;
1679 if (!(root
->fs_info
->sb
->s_flags
& MS_RDONLY
) &&
1680 down_read_trylock(&root
->fs_info
->sb
->s_umount
)) {
1681 if (mutex_trylock(&root
->fs_info
->cleaner_mutex
)) {
1682 btrfs_run_delayed_iputs(root
);
1683 again
= btrfs_clean_one_deleted_snapshot(root
);
1684 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1686 btrfs_run_defrag_inodes(root
->fs_info
);
1687 up_read(&root
->fs_info
->sb
->s_umount
);
1690 if (!try_to_freeze() && !again
) {
1691 set_current_state(TASK_INTERRUPTIBLE
);
1692 if (!kthread_should_stop())
1694 __set_current_state(TASK_RUNNING
);
1696 } while (!kthread_should_stop());
1700 static int transaction_kthread(void *arg
)
1702 struct btrfs_root
*root
= arg
;
1703 struct btrfs_trans_handle
*trans
;
1704 struct btrfs_transaction
*cur
;
1707 unsigned long delay
;
1711 cannot_commit
= false;
1713 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1715 spin_lock(&root
->fs_info
->trans_lock
);
1716 cur
= root
->fs_info
->running_transaction
;
1718 spin_unlock(&root
->fs_info
->trans_lock
);
1722 now
= get_seconds();
1723 if (!cur
->blocked
&&
1724 (now
< cur
->start_time
|| now
- cur
->start_time
< 30)) {
1725 spin_unlock(&root
->fs_info
->trans_lock
);
1729 transid
= cur
->transid
;
1730 spin_unlock(&root
->fs_info
->trans_lock
);
1732 /* If the file system is aborted, this will always fail. */
1733 trans
= btrfs_attach_transaction(root
);
1734 if (IS_ERR(trans
)) {
1735 if (PTR_ERR(trans
) != -ENOENT
)
1736 cannot_commit
= true;
1739 if (transid
== trans
->transid
) {
1740 btrfs_commit_transaction(trans
, root
);
1742 btrfs_end_transaction(trans
, root
);
1745 wake_up_process(root
->fs_info
->cleaner_kthread
);
1746 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1748 if (!try_to_freeze()) {
1749 set_current_state(TASK_INTERRUPTIBLE
);
1750 if (!kthread_should_stop() &&
1751 (!btrfs_transaction_blocked(root
->fs_info
) ||
1753 schedule_timeout(delay
);
1754 __set_current_state(TASK_RUNNING
);
1756 } while (!kthread_should_stop());
1761 * this will find the highest generation in the array of
1762 * root backups. The index of the highest array is returned,
1763 * or -1 if we can't find anything.
1765 * We check to make sure the array is valid by comparing the
1766 * generation of the latest root in the array with the generation
1767 * in the super block. If they don't match we pitch it.
1769 static int find_newest_super_backup(struct btrfs_fs_info
*info
, u64 newest_gen
)
1772 int newest_index
= -1;
1773 struct btrfs_root_backup
*root_backup
;
1776 for (i
= 0; i
< BTRFS_NUM_BACKUP_ROOTS
; i
++) {
1777 root_backup
= info
->super_copy
->super_roots
+ i
;
1778 cur
= btrfs_backup_tree_root_gen(root_backup
);
1779 if (cur
== newest_gen
)
1783 /* check to see if we actually wrapped around */
1784 if (newest_index
== BTRFS_NUM_BACKUP_ROOTS
- 1) {
1785 root_backup
= info
->super_copy
->super_roots
;
1786 cur
= btrfs_backup_tree_root_gen(root_backup
);
1787 if (cur
== newest_gen
)
1790 return newest_index
;
1795 * find the oldest backup so we know where to store new entries
1796 * in the backup array. This will set the backup_root_index
1797 * field in the fs_info struct
1799 static void find_oldest_super_backup(struct btrfs_fs_info
*info
,
1802 int newest_index
= -1;
1804 newest_index
= find_newest_super_backup(info
, newest_gen
);
1805 /* if there was garbage in there, just move along */
1806 if (newest_index
== -1) {
1807 info
->backup_root_index
= 0;
1809 info
->backup_root_index
= (newest_index
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1814 * copy all the root pointers into the super backup array.
1815 * this will bump the backup pointer by one when it is
1818 static void backup_super_roots(struct btrfs_fs_info
*info
)
1821 struct btrfs_root_backup
*root_backup
;
1824 next_backup
= info
->backup_root_index
;
1825 last_backup
= (next_backup
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1826 BTRFS_NUM_BACKUP_ROOTS
;
1829 * just overwrite the last backup if we're at the same generation
1830 * this happens only at umount
1832 root_backup
= info
->super_for_commit
->super_roots
+ last_backup
;
1833 if (btrfs_backup_tree_root_gen(root_backup
) ==
1834 btrfs_header_generation(info
->tree_root
->node
))
1835 next_backup
= last_backup
;
1837 root_backup
= info
->super_for_commit
->super_roots
+ next_backup
;
1840 * make sure all of our padding and empty slots get zero filled
1841 * regardless of which ones we use today
1843 memset(root_backup
, 0, sizeof(*root_backup
));
1845 info
->backup_root_index
= (next_backup
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1847 btrfs_set_backup_tree_root(root_backup
, info
->tree_root
->node
->start
);
1848 btrfs_set_backup_tree_root_gen(root_backup
,
1849 btrfs_header_generation(info
->tree_root
->node
));
1851 btrfs_set_backup_tree_root_level(root_backup
,
1852 btrfs_header_level(info
->tree_root
->node
));
1854 btrfs_set_backup_chunk_root(root_backup
, info
->chunk_root
->node
->start
);
1855 btrfs_set_backup_chunk_root_gen(root_backup
,
1856 btrfs_header_generation(info
->chunk_root
->node
));
1857 btrfs_set_backup_chunk_root_level(root_backup
,
1858 btrfs_header_level(info
->chunk_root
->node
));
1860 btrfs_set_backup_extent_root(root_backup
, info
->extent_root
->node
->start
);
1861 btrfs_set_backup_extent_root_gen(root_backup
,
1862 btrfs_header_generation(info
->extent_root
->node
));
1863 btrfs_set_backup_extent_root_level(root_backup
,
1864 btrfs_header_level(info
->extent_root
->node
));
1867 * we might commit during log recovery, which happens before we set
1868 * the fs_root. Make sure it is valid before we fill it in.
1870 if (info
->fs_root
&& info
->fs_root
->node
) {
1871 btrfs_set_backup_fs_root(root_backup
,
1872 info
->fs_root
->node
->start
);
1873 btrfs_set_backup_fs_root_gen(root_backup
,
1874 btrfs_header_generation(info
->fs_root
->node
));
1875 btrfs_set_backup_fs_root_level(root_backup
,
1876 btrfs_header_level(info
->fs_root
->node
));
1879 btrfs_set_backup_dev_root(root_backup
, info
->dev_root
->node
->start
);
1880 btrfs_set_backup_dev_root_gen(root_backup
,
1881 btrfs_header_generation(info
->dev_root
->node
));
1882 btrfs_set_backup_dev_root_level(root_backup
,
1883 btrfs_header_level(info
->dev_root
->node
));
1885 btrfs_set_backup_csum_root(root_backup
, info
->csum_root
->node
->start
);
1886 btrfs_set_backup_csum_root_gen(root_backup
,
1887 btrfs_header_generation(info
->csum_root
->node
));
1888 btrfs_set_backup_csum_root_level(root_backup
,
1889 btrfs_header_level(info
->csum_root
->node
));
1891 btrfs_set_backup_total_bytes(root_backup
,
1892 btrfs_super_total_bytes(info
->super_copy
));
1893 btrfs_set_backup_bytes_used(root_backup
,
1894 btrfs_super_bytes_used(info
->super_copy
));
1895 btrfs_set_backup_num_devices(root_backup
,
1896 btrfs_super_num_devices(info
->super_copy
));
1899 * if we don't copy this out to the super_copy, it won't get remembered
1900 * for the next commit
1902 memcpy(&info
->super_copy
->super_roots
,
1903 &info
->super_for_commit
->super_roots
,
1904 sizeof(*root_backup
) * BTRFS_NUM_BACKUP_ROOTS
);
1908 * this copies info out of the root backup array and back into
1909 * the in-memory super block. It is meant to help iterate through
1910 * the array, so you send it the number of backups you've already
1911 * tried and the last backup index you used.
1913 * this returns -1 when it has tried all the backups
1915 static noinline
int next_root_backup(struct btrfs_fs_info
*info
,
1916 struct btrfs_super_block
*super
,
1917 int *num_backups_tried
, int *backup_index
)
1919 struct btrfs_root_backup
*root_backup
;
1920 int newest
= *backup_index
;
1922 if (*num_backups_tried
== 0) {
1923 u64 gen
= btrfs_super_generation(super
);
1925 newest
= find_newest_super_backup(info
, gen
);
1929 *backup_index
= newest
;
1930 *num_backups_tried
= 1;
1931 } else if (*num_backups_tried
== BTRFS_NUM_BACKUP_ROOTS
) {
1932 /* we've tried all the backups, all done */
1935 /* jump to the next oldest backup */
1936 newest
= (*backup_index
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1937 BTRFS_NUM_BACKUP_ROOTS
;
1938 *backup_index
= newest
;
1939 *num_backups_tried
+= 1;
1941 root_backup
= super
->super_roots
+ newest
;
1943 btrfs_set_super_generation(super
,
1944 btrfs_backup_tree_root_gen(root_backup
));
1945 btrfs_set_super_root(super
, btrfs_backup_tree_root(root_backup
));
1946 btrfs_set_super_root_level(super
,
1947 btrfs_backup_tree_root_level(root_backup
));
1948 btrfs_set_super_bytes_used(super
, btrfs_backup_bytes_used(root_backup
));
1951 * fixme: the total bytes and num_devices need to match or we should
1954 btrfs_set_super_total_bytes(super
, btrfs_backup_total_bytes(root_backup
));
1955 btrfs_set_super_num_devices(super
, btrfs_backup_num_devices(root_backup
));
1959 /* helper to cleanup workers */
1960 static void btrfs_stop_all_workers(struct btrfs_fs_info
*fs_info
)
1962 btrfs_stop_workers(&fs_info
->generic_worker
);
1963 btrfs_stop_workers(&fs_info
->fixup_workers
);
1964 btrfs_stop_workers(&fs_info
->delalloc_workers
);
1965 btrfs_stop_workers(&fs_info
->workers
);
1966 btrfs_stop_workers(&fs_info
->endio_workers
);
1967 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
1968 btrfs_stop_workers(&fs_info
->endio_raid56_workers
);
1969 btrfs_stop_workers(&fs_info
->rmw_workers
);
1970 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
1971 btrfs_stop_workers(&fs_info
->endio_write_workers
);
1972 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
1973 btrfs_stop_workers(&fs_info
->submit_workers
);
1974 btrfs_stop_workers(&fs_info
->delayed_workers
);
1975 btrfs_stop_workers(&fs_info
->caching_workers
);
1976 btrfs_stop_workers(&fs_info
->readahead_workers
);
1977 btrfs_stop_workers(&fs_info
->flush_workers
);
1978 btrfs_stop_workers(&fs_info
->qgroup_rescan_workers
);
1981 /* helper to cleanup tree roots */
1982 static void free_root_pointers(struct btrfs_fs_info
*info
, int chunk_root
)
1984 free_extent_buffer(info
->tree_root
->node
);
1985 free_extent_buffer(info
->tree_root
->commit_root
);
1986 free_extent_buffer(info
->dev_root
->node
);
1987 free_extent_buffer(info
->dev_root
->commit_root
);
1988 free_extent_buffer(info
->extent_root
->node
);
1989 free_extent_buffer(info
->extent_root
->commit_root
);
1990 free_extent_buffer(info
->csum_root
->node
);
1991 free_extent_buffer(info
->csum_root
->commit_root
);
1992 if (info
->quota_root
) {
1993 free_extent_buffer(info
->quota_root
->node
);
1994 free_extent_buffer(info
->quota_root
->commit_root
);
1997 info
->tree_root
->node
= NULL
;
1998 info
->tree_root
->commit_root
= NULL
;
1999 info
->dev_root
->node
= NULL
;
2000 info
->dev_root
->commit_root
= NULL
;
2001 info
->extent_root
->node
= NULL
;
2002 info
->extent_root
->commit_root
= NULL
;
2003 info
->csum_root
->node
= NULL
;
2004 info
->csum_root
->commit_root
= NULL
;
2005 if (info
->quota_root
) {
2006 info
->quota_root
->node
= NULL
;
2007 info
->quota_root
->commit_root
= NULL
;
2011 free_extent_buffer(info
->chunk_root
->node
);
2012 free_extent_buffer(info
->chunk_root
->commit_root
);
2013 info
->chunk_root
->node
= NULL
;
2014 info
->chunk_root
->commit_root
= NULL
;
2018 static void del_fs_roots(struct btrfs_fs_info
*fs_info
)
2021 struct btrfs_root
*gang
[8];
2024 while (!list_empty(&fs_info
->dead_roots
)) {
2025 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
2026 struct btrfs_root
, root_list
);
2027 list_del(&gang
[0]->root_list
);
2029 if (gang
[0]->in_radix
) {
2030 btrfs_free_fs_root(fs_info
, gang
[0]);
2032 free_extent_buffer(gang
[0]->node
);
2033 free_extent_buffer(gang
[0]->commit_root
);
2039 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2044 for (i
= 0; i
< ret
; i
++)
2045 btrfs_free_fs_root(fs_info
, gang
[i
]);
2049 int open_ctree(struct super_block
*sb
,
2050 struct btrfs_fs_devices
*fs_devices
,
2060 struct btrfs_key location
;
2061 struct buffer_head
*bh
;
2062 struct btrfs_super_block
*disk_super
;
2063 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
2064 struct btrfs_root
*tree_root
;
2065 struct btrfs_root
*extent_root
;
2066 struct btrfs_root
*csum_root
;
2067 struct btrfs_root
*chunk_root
;
2068 struct btrfs_root
*dev_root
;
2069 struct btrfs_root
*quota_root
;
2070 struct btrfs_root
*log_tree_root
;
2073 int num_backups_tried
= 0;
2074 int backup_index
= 0;
2076 tree_root
= fs_info
->tree_root
= btrfs_alloc_root(fs_info
);
2077 extent_root
= fs_info
->extent_root
= btrfs_alloc_root(fs_info
);
2078 csum_root
= fs_info
->csum_root
= btrfs_alloc_root(fs_info
);
2079 chunk_root
= fs_info
->chunk_root
= btrfs_alloc_root(fs_info
);
2080 dev_root
= fs_info
->dev_root
= btrfs_alloc_root(fs_info
);
2081 quota_root
= fs_info
->quota_root
= btrfs_alloc_root(fs_info
);
2083 if (!tree_root
|| !extent_root
|| !csum_root
||
2084 !chunk_root
|| !dev_root
|| !quota_root
) {
2089 ret
= init_srcu_struct(&fs_info
->subvol_srcu
);
2095 ret
= setup_bdi(fs_info
, &fs_info
->bdi
);
2101 ret
= percpu_counter_init(&fs_info
->dirty_metadata_bytes
, 0);
2106 fs_info
->dirty_metadata_batch
= PAGE_CACHE_SIZE
*
2107 (1 + ilog2(nr_cpu_ids
));
2109 ret
= percpu_counter_init(&fs_info
->delalloc_bytes
, 0);
2112 goto fail_dirty_metadata_bytes
;
2115 fs_info
->btree_inode
= new_inode(sb
);
2116 if (!fs_info
->btree_inode
) {
2118 goto fail_delalloc_bytes
;
2121 mapping_set_gfp_mask(fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
2123 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
2124 INIT_LIST_HEAD(&fs_info
->trans_list
);
2125 INIT_LIST_HEAD(&fs_info
->dead_roots
);
2126 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
2127 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
2128 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
2129 spin_lock_init(&fs_info
->delalloc_lock
);
2130 spin_lock_init(&fs_info
->trans_lock
);
2131 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
2132 spin_lock_init(&fs_info
->delayed_iput_lock
);
2133 spin_lock_init(&fs_info
->defrag_inodes_lock
);
2134 spin_lock_init(&fs_info
->free_chunk_lock
);
2135 spin_lock_init(&fs_info
->tree_mod_seq_lock
);
2136 spin_lock_init(&fs_info
->super_lock
);
2137 rwlock_init(&fs_info
->tree_mod_log_lock
);
2138 mutex_init(&fs_info
->reloc_mutex
);
2139 seqlock_init(&fs_info
->profiles_lock
);
2141 init_completion(&fs_info
->kobj_unregister
);
2142 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
2143 INIT_LIST_HEAD(&fs_info
->space_info
);
2144 INIT_LIST_HEAD(&fs_info
->tree_mod_seq_list
);
2145 btrfs_mapping_init(&fs_info
->mapping_tree
);
2146 btrfs_init_block_rsv(&fs_info
->global_block_rsv
,
2147 BTRFS_BLOCK_RSV_GLOBAL
);
2148 btrfs_init_block_rsv(&fs_info
->delalloc_block_rsv
,
2149 BTRFS_BLOCK_RSV_DELALLOC
);
2150 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
, BTRFS_BLOCK_RSV_TRANS
);
2151 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
, BTRFS_BLOCK_RSV_CHUNK
);
2152 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
, BTRFS_BLOCK_RSV_EMPTY
);
2153 btrfs_init_block_rsv(&fs_info
->delayed_block_rsv
,
2154 BTRFS_BLOCK_RSV_DELOPS
);
2155 atomic_set(&fs_info
->nr_async_submits
, 0);
2156 atomic_set(&fs_info
->async_delalloc_pages
, 0);
2157 atomic_set(&fs_info
->async_submit_draining
, 0);
2158 atomic_set(&fs_info
->nr_async_bios
, 0);
2159 atomic_set(&fs_info
->defrag_running
, 0);
2160 atomic64_set(&fs_info
->tree_mod_seq
, 0);
2162 fs_info
->max_inline
= 8192 * 1024;
2163 fs_info
->metadata_ratio
= 0;
2164 fs_info
->defrag_inodes
= RB_ROOT
;
2165 fs_info
->trans_no_join
= 0;
2166 fs_info
->free_chunk_space
= 0;
2167 fs_info
->tree_mod_log
= RB_ROOT
;
2169 /* readahead state */
2170 INIT_RADIX_TREE(&fs_info
->reada_tree
, GFP_NOFS
& ~__GFP_WAIT
);
2171 spin_lock_init(&fs_info
->reada_lock
);
2173 fs_info
->thread_pool_size
= min_t(unsigned long,
2174 num_online_cpus() + 2, 8);
2176 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
2177 spin_lock_init(&fs_info
->ordered_extent_lock
);
2178 fs_info
->delayed_root
= kmalloc(sizeof(struct btrfs_delayed_root
),
2180 if (!fs_info
->delayed_root
) {
2184 btrfs_init_delayed_root(fs_info
->delayed_root
);
2186 mutex_init(&fs_info
->scrub_lock
);
2187 atomic_set(&fs_info
->scrubs_running
, 0);
2188 atomic_set(&fs_info
->scrub_pause_req
, 0);
2189 atomic_set(&fs_info
->scrubs_paused
, 0);
2190 atomic_set(&fs_info
->scrub_cancel_req
, 0);
2191 init_waitqueue_head(&fs_info
->scrub_pause_wait
);
2192 init_rwsem(&fs_info
->scrub_super_lock
);
2193 fs_info
->scrub_workers_refcnt
= 0;
2194 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2195 fs_info
->check_integrity_print_mask
= 0;
2198 spin_lock_init(&fs_info
->balance_lock
);
2199 mutex_init(&fs_info
->balance_mutex
);
2200 atomic_set(&fs_info
->balance_running
, 0);
2201 atomic_set(&fs_info
->balance_pause_req
, 0);
2202 atomic_set(&fs_info
->balance_cancel_req
, 0);
2203 fs_info
->balance_ctl
= NULL
;
2204 init_waitqueue_head(&fs_info
->balance_wait_q
);
2206 sb
->s_blocksize
= 4096;
2207 sb
->s_blocksize_bits
= blksize_bits(4096);
2208 sb
->s_bdi
= &fs_info
->bdi
;
2210 fs_info
->btree_inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
2211 set_nlink(fs_info
->btree_inode
, 1);
2213 * we set the i_size on the btree inode to the max possible int.
2214 * the real end of the address space is determined by all of
2215 * the devices in the system
2217 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
2218 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
2219 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
2221 RB_CLEAR_NODE(&BTRFS_I(fs_info
->btree_inode
)->rb_node
);
2222 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
2223 fs_info
->btree_inode
->i_mapping
);
2224 BTRFS_I(fs_info
->btree_inode
)->io_tree
.track_uptodate
= 0;
2225 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
);
2227 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
2229 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
2230 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
2231 sizeof(struct btrfs_key
));
2232 set_bit(BTRFS_INODE_DUMMY
,
2233 &BTRFS_I(fs_info
->btree_inode
)->runtime_flags
);
2234 insert_inode_hash(fs_info
->btree_inode
);
2236 spin_lock_init(&fs_info
->block_group_cache_lock
);
2237 fs_info
->block_group_cache_tree
= RB_ROOT
;
2238 fs_info
->first_logical_byte
= (u64
)-1;
2240 extent_io_tree_init(&fs_info
->freed_extents
[0],
2241 fs_info
->btree_inode
->i_mapping
);
2242 extent_io_tree_init(&fs_info
->freed_extents
[1],
2243 fs_info
->btree_inode
->i_mapping
);
2244 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
2245 fs_info
->do_barriers
= 1;
2248 mutex_init(&fs_info
->ordered_operations_mutex
);
2249 mutex_init(&fs_info
->tree_log_mutex
);
2250 mutex_init(&fs_info
->chunk_mutex
);
2251 mutex_init(&fs_info
->transaction_kthread_mutex
);
2252 mutex_init(&fs_info
->cleaner_mutex
);
2253 mutex_init(&fs_info
->volume_mutex
);
2254 init_rwsem(&fs_info
->extent_commit_sem
);
2255 init_rwsem(&fs_info
->cleanup_work_sem
);
2256 init_rwsem(&fs_info
->subvol_sem
);
2257 fs_info
->dev_replace
.lock_owner
= 0;
2258 atomic_set(&fs_info
->dev_replace
.nesting_level
, 0);
2259 mutex_init(&fs_info
->dev_replace
.lock_finishing_cancel_unmount
);
2260 mutex_init(&fs_info
->dev_replace
.lock_management_lock
);
2261 mutex_init(&fs_info
->dev_replace
.lock
);
2263 spin_lock_init(&fs_info
->qgroup_lock
);
2264 mutex_init(&fs_info
->qgroup_ioctl_lock
);
2265 fs_info
->qgroup_tree
= RB_ROOT
;
2266 INIT_LIST_HEAD(&fs_info
->dirty_qgroups
);
2267 fs_info
->qgroup_seq
= 1;
2268 fs_info
->quota_enabled
= 0;
2269 fs_info
->pending_quota_state
= 0;
2270 mutex_init(&fs_info
->qgroup_rescan_lock
);
2272 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
2273 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
2275 init_waitqueue_head(&fs_info
->transaction_throttle
);
2276 init_waitqueue_head(&fs_info
->transaction_wait
);
2277 init_waitqueue_head(&fs_info
->transaction_blocked_wait
);
2278 init_waitqueue_head(&fs_info
->async_submit_wait
);
2280 ret
= btrfs_alloc_stripe_hash_table(fs_info
);
2286 __setup_root(4096, 4096, 4096, 4096, tree_root
,
2287 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
2289 invalidate_bdev(fs_devices
->latest_bdev
);
2292 * Read super block and check the signature bytes only
2294 bh
= btrfs_read_dev_super(fs_devices
->latest_bdev
);
2301 * We want to check superblock checksum, the type is stored inside.
2302 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2304 if (btrfs_check_super_csum(bh
->b_data
)) {
2305 printk(KERN_ERR
"btrfs: superblock checksum mismatch\n");
2311 * super_copy is zeroed at allocation time and we never touch the
2312 * following bytes up to INFO_SIZE, the checksum is calculated from
2313 * the whole block of INFO_SIZE
2315 memcpy(fs_info
->super_copy
, bh
->b_data
, sizeof(*fs_info
->super_copy
));
2316 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
2317 sizeof(*fs_info
->super_for_commit
));
2320 memcpy(fs_info
->fsid
, fs_info
->super_copy
->fsid
, BTRFS_FSID_SIZE
);
2322 ret
= btrfs_check_super_valid(fs_info
, sb
->s_flags
& MS_RDONLY
);
2324 printk(KERN_ERR
"btrfs: superblock contains fatal errors\n");
2329 disk_super
= fs_info
->super_copy
;
2330 if (!btrfs_super_root(disk_super
))
2333 /* check FS state, whether FS is broken. */
2334 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_ERROR
)
2335 set_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
);
2338 * run through our array of backup supers and setup
2339 * our ring pointer to the oldest one
2341 generation
= btrfs_super_generation(disk_super
);
2342 find_oldest_super_backup(fs_info
, generation
);
2345 * In the long term, we'll store the compression type in the super
2346 * block, and it'll be used for per file compression control.
2348 fs_info
->compress_type
= BTRFS_COMPRESS_ZLIB
;
2350 ret
= btrfs_parse_options(tree_root
, options
);
2356 features
= btrfs_super_incompat_flags(disk_super
) &
2357 ~BTRFS_FEATURE_INCOMPAT_SUPP
;
2359 printk(KERN_ERR
"BTRFS: couldn't mount because of "
2360 "unsupported optional features (%Lx).\n",
2361 (unsigned long long)features
);
2366 if (btrfs_super_leafsize(disk_super
) !=
2367 btrfs_super_nodesize(disk_super
)) {
2368 printk(KERN_ERR
"BTRFS: couldn't mount because metadata "
2369 "blocksizes don't match. node %d leaf %d\n",
2370 btrfs_super_nodesize(disk_super
),
2371 btrfs_super_leafsize(disk_super
));
2375 if (btrfs_super_leafsize(disk_super
) > BTRFS_MAX_METADATA_BLOCKSIZE
) {
2376 printk(KERN_ERR
"BTRFS: couldn't mount because metadata "
2377 "blocksize (%d) was too large\n",
2378 btrfs_super_leafsize(disk_super
));
2383 features
= btrfs_super_incompat_flags(disk_super
);
2384 features
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
2385 if (tree_root
->fs_info
->compress_type
== BTRFS_COMPRESS_LZO
)
2386 features
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO
;
2388 if (features
& BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
)
2389 printk(KERN_ERR
"btrfs: has skinny extents\n");
2392 * flag our filesystem as having big metadata blocks if
2393 * they are bigger than the page size
2395 if (btrfs_super_leafsize(disk_super
) > PAGE_CACHE_SIZE
) {
2396 if (!(features
& BTRFS_FEATURE_INCOMPAT_BIG_METADATA
))
2397 printk(KERN_INFO
"btrfs flagging fs with big metadata feature\n");
2398 features
|= BTRFS_FEATURE_INCOMPAT_BIG_METADATA
;
2401 nodesize
= btrfs_super_nodesize(disk_super
);
2402 leafsize
= btrfs_super_leafsize(disk_super
);
2403 sectorsize
= btrfs_super_sectorsize(disk_super
);
2404 stripesize
= btrfs_super_stripesize(disk_super
);
2405 fs_info
->dirty_metadata_batch
= leafsize
* (1 + ilog2(nr_cpu_ids
));
2406 fs_info
->delalloc_batch
= sectorsize
* 512 * (1 + ilog2(nr_cpu_ids
));
2409 * mixed block groups end up with duplicate but slightly offset
2410 * extent buffers for the same range. It leads to corruptions
2412 if ((features
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
) &&
2413 (sectorsize
!= leafsize
)) {
2414 printk(KERN_WARNING
"btrfs: unequal leaf/node/sector sizes "
2415 "are not allowed for mixed block groups on %s\n",
2421 * Needn't use the lock because there is no other task which will
2424 btrfs_set_super_incompat_flags(disk_super
, features
);
2426 features
= btrfs_super_compat_ro_flags(disk_super
) &
2427 ~BTRFS_FEATURE_COMPAT_RO_SUPP
;
2428 if (!(sb
->s_flags
& MS_RDONLY
) && features
) {
2429 printk(KERN_ERR
"BTRFS: couldn't mount RDWR because of "
2430 "unsupported option features (%Lx).\n",
2431 (unsigned long long)features
);
2436 btrfs_init_workers(&fs_info
->generic_worker
,
2437 "genwork", 1, NULL
);
2439 btrfs_init_workers(&fs_info
->workers
, "worker",
2440 fs_info
->thread_pool_size
,
2441 &fs_info
->generic_worker
);
2443 btrfs_init_workers(&fs_info
->delalloc_workers
, "delalloc",
2444 fs_info
->thread_pool_size
,
2445 &fs_info
->generic_worker
);
2447 btrfs_init_workers(&fs_info
->flush_workers
, "flush_delalloc",
2448 fs_info
->thread_pool_size
,
2449 &fs_info
->generic_worker
);
2451 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
2452 min_t(u64
, fs_devices
->num_devices
,
2453 fs_info
->thread_pool_size
),
2454 &fs_info
->generic_worker
);
2456 btrfs_init_workers(&fs_info
->caching_workers
, "cache",
2457 2, &fs_info
->generic_worker
);
2459 /* a higher idle thresh on the submit workers makes it much more
2460 * likely that bios will be send down in a sane order to the
2463 fs_info
->submit_workers
.idle_thresh
= 64;
2465 fs_info
->workers
.idle_thresh
= 16;
2466 fs_info
->workers
.ordered
= 1;
2468 fs_info
->delalloc_workers
.idle_thresh
= 2;
2469 fs_info
->delalloc_workers
.ordered
= 1;
2471 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1,
2472 &fs_info
->generic_worker
);
2473 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
2474 fs_info
->thread_pool_size
,
2475 &fs_info
->generic_worker
);
2476 btrfs_init_workers(&fs_info
->endio_meta_workers
, "endio-meta",
2477 fs_info
->thread_pool_size
,
2478 &fs_info
->generic_worker
);
2479 btrfs_init_workers(&fs_info
->endio_meta_write_workers
,
2480 "endio-meta-write", fs_info
->thread_pool_size
,
2481 &fs_info
->generic_worker
);
2482 btrfs_init_workers(&fs_info
->endio_raid56_workers
,
2483 "endio-raid56", fs_info
->thread_pool_size
,
2484 &fs_info
->generic_worker
);
2485 btrfs_init_workers(&fs_info
->rmw_workers
,
2486 "rmw", fs_info
->thread_pool_size
,
2487 &fs_info
->generic_worker
);
2488 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
2489 fs_info
->thread_pool_size
,
2490 &fs_info
->generic_worker
);
2491 btrfs_init_workers(&fs_info
->endio_freespace_worker
, "freespace-write",
2492 1, &fs_info
->generic_worker
);
2493 btrfs_init_workers(&fs_info
->delayed_workers
, "delayed-meta",
2494 fs_info
->thread_pool_size
,
2495 &fs_info
->generic_worker
);
2496 btrfs_init_workers(&fs_info
->readahead_workers
, "readahead",
2497 fs_info
->thread_pool_size
,
2498 &fs_info
->generic_worker
);
2499 btrfs_init_workers(&fs_info
->qgroup_rescan_workers
, "qgroup-rescan", 1,
2500 &fs_info
->generic_worker
);
2503 * endios are largely parallel and should have a very
2506 fs_info
->endio_workers
.idle_thresh
= 4;
2507 fs_info
->endio_meta_workers
.idle_thresh
= 4;
2508 fs_info
->endio_raid56_workers
.idle_thresh
= 4;
2509 fs_info
->rmw_workers
.idle_thresh
= 2;
2511 fs_info
->endio_write_workers
.idle_thresh
= 2;
2512 fs_info
->endio_meta_write_workers
.idle_thresh
= 2;
2513 fs_info
->readahead_workers
.idle_thresh
= 2;
2516 * btrfs_start_workers can really only fail because of ENOMEM so just
2517 * return -ENOMEM if any of these fail.
2519 ret
= btrfs_start_workers(&fs_info
->workers
);
2520 ret
|= btrfs_start_workers(&fs_info
->generic_worker
);
2521 ret
|= btrfs_start_workers(&fs_info
->submit_workers
);
2522 ret
|= btrfs_start_workers(&fs_info
->delalloc_workers
);
2523 ret
|= btrfs_start_workers(&fs_info
->fixup_workers
);
2524 ret
|= btrfs_start_workers(&fs_info
->endio_workers
);
2525 ret
|= btrfs_start_workers(&fs_info
->endio_meta_workers
);
2526 ret
|= btrfs_start_workers(&fs_info
->rmw_workers
);
2527 ret
|= btrfs_start_workers(&fs_info
->endio_raid56_workers
);
2528 ret
|= btrfs_start_workers(&fs_info
->endio_meta_write_workers
);
2529 ret
|= btrfs_start_workers(&fs_info
->endio_write_workers
);
2530 ret
|= btrfs_start_workers(&fs_info
->endio_freespace_worker
);
2531 ret
|= btrfs_start_workers(&fs_info
->delayed_workers
);
2532 ret
|= btrfs_start_workers(&fs_info
->caching_workers
);
2533 ret
|= btrfs_start_workers(&fs_info
->readahead_workers
);
2534 ret
|= btrfs_start_workers(&fs_info
->flush_workers
);
2535 ret
|= btrfs_start_workers(&fs_info
->qgroup_rescan_workers
);
2538 goto fail_sb_buffer
;
2541 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
2542 fs_info
->bdi
.ra_pages
= max(fs_info
->bdi
.ra_pages
,
2543 4 * 1024 * 1024 / PAGE_CACHE_SIZE
);
2545 tree_root
->nodesize
= nodesize
;
2546 tree_root
->leafsize
= leafsize
;
2547 tree_root
->sectorsize
= sectorsize
;
2548 tree_root
->stripesize
= stripesize
;
2550 sb
->s_blocksize
= sectorsize
;
2551 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
2553 if (disk_super
->magic
!= cpu_to_le64(BTRFS_MAGIC
)) {
2554 printk(KERN_INFO
"btrfs: valid FS not found on %s\n", sb
->s_id
);
2555 goto fail_sb_buffer
;
2558 if (sectorsize
!= PAGE_SIZE
) {
2559 printk(KERN_WARNING
"btrfs: Incompatible sector size(%lu) "
2560 "found on %s\n", (unsigned long)sectorsize
, sb
->s_id
);
2561 goto fail_sb_buffer
;
2564 mutex_lock(&fs_info
->chunk_mutex
);
2565 ret
= btrfs_read_sys_array(tree_root
);
2566 mutex_unlock(&fs_info
->chunk_mutex
);
2568 printk(KERN_WARNING
"btrfs: failed to read the system "
2569 "array on %s\n", sb
->s_id
);
2570 goto fail_sb_buffer
;
2573 blocksize
= btrfs_level_size(tree_root
,
2574 btrfs_super_chunk_root_level(disk_super
));
2575 generation
= btrfs_super_chunk_root_generation(disk_super
);
2577 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2578 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
2580 chunk_root
->node
= read_tree_block(chunk_root
,
2581 btrfs_super_chunk_root(disk_super
),
2582 blocksize
, generation
);
2583 if (!chunk_root
->node
||
2584 !test_bit(EXTENT_BUFFER_UPTODATE
, &chunk_root
->node
->bflags
)) {
2585 printk(KERN_WARNING
"btrfs: failed to read chunk root on %s\n",
2587 goto fail_tree_roots
;
2589 btrfs_set_root_node(&chunk_root
->root_item
, chunk_root
->node
);
2590 chunk_root
->commit_root
= btrfs_root_node(chunk_root
);
2592 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
2593 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
2596 ret
= btrfs_read_chunk_tree(chunk_root
);
2598 printk(KERN_WARNING
"btrfs: failed to read chunk tree on %s\n",
2600 goto fail_tree_roots
;
2604 * keep the device that is marked to be the target device for the
2605 * dev_replace procedure
2607 btrfs_close_extra_devices(fs_info
, fs_devices
, 0);
2609 if (!fs_devices
->latest_bdev
) {
2610 printk(KERN_CRIT
"btrfs: failed to read devices on %s\n",
2612 goto fail_tree_roots
;
2616 blocksize
= btrfs_level_size(tree_root
,
2617 btrfs_super_root_level(disk_super
));
2618 generation
= btrfs_super_generation(disk_super
);
2620 tree_root
->node
= read_tree_block(tree_root
,
2621 btrfs_super_root(disk_super
),
2622 blocksize
, generation
);
2623 if (!tree_root
->node
||
2624 !test_bit(EXTENT_BUFFER_UPTODATE
, &tree_root
->node
->bflags
)) {
2625 printk(KERN_WARNING
"btrfs: failed to read tree root on %s\n",
2628 goto recovery_tree_root
;
2631 btrfs_set_root_node(&tree_root
->root_item
, tree_root
->node
);
2632 tree_root
->commit_root
= btrfs_root_node(tree_root
);
2634 ret
= find_and_setup_root(tree_root
, fs_info
,
2635 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
2637 goto recovery_tree_root
;
2638 extent_root
->track_dirty
= 1;
2640 ret
= find_and_setup_root(tree_root
, fs_info
,
2641 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
2643 goto recovery_tree_root
;
2644 dev_root
->track_dirty
= 1;
2646 ret
= find_and_setup_root(tree_root
, fs_info
,
2647 BTRFS_CSUM_TREE_OBJECTID
, csum_root
);
2649 goto recovery_tree_root
;
2650 csum_root
->track_dirty
= 1;
2652 ret
= find_and_setup_root(tree_root
, fs_info
,
2653 BTRFS_QUOTA_TREE_OBJECTID
, quota_root
);
2656 quota_root
= fs_info
->quota_root
= NULL
;
2658 quota_root
->track_dirty
= 1;
2659 fs_info
->quota_enabled
= 1;
2660 fs_info
->pending_quota_state
= 1;
2663 fs_info
->generation
= generation
;
2664 fs_info
->last_trans_committed
= generation
;
2666 ret
= btrfs_recover_balance(fs_info
);
2668 printk(KERN_WARNING
"btrfs: failed to recover balance\n");
2669 goto fail_block_groups
;
2672 ret
= btrfs_init_dev_stats(fs_info
);
2674 printk(KERN_ERR
"btrfs: failed to init dev_stats: %d\n",
2676 goto fail_block_groups
;
2679 ret
= btrfs_init_dev_replace(fs_info
);
2681 pr_err("btrfs: failed to init dev_replace: %d\n", ret
);
2682 goto fail_block_groups
;
2685 btrfs_close_extra_devices(fs_info
, fs_devices
, 1);
2687 ret
= btrfs_init_space_info(fs_info
);
2689 printk(KERN_ERR
"Failed to initial space info: %d\n", ret
);
2690 goto fail_block_groups
;
2693 ret
= btrfs_read_block_groups(extent_root
);
2695 printk(KERN_ERR
"Failed to read block groups: %d\n", ret
);
2696 goto fail_block_groups
;
2698 fs_info
->num_tolerated_disk_barrier_failures
=
2699 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info
);
2700 if (fs_info
->fs_devices
->missing_devices
>
2701 fs_info
->num_tolerated_disk_barrier_failures
&&
2702 !(sb
->s_flags
& MS_RDONLY
)) {
2704 "Btrfs: too many missing devices, writeable mount is not allowed\n");
2705 goto fail_block_groups
;
2708 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
2710 if (IS_ERR(fs_info
->cleaner_kthread
))
2711 goto fail_block_groups
;
2713 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
2715 "btrfs-transaction");
2716 if (IS_ERR(fs_info
->transaction_kthread
))
2719 if (!btrfs_test_opt(tree_root
, SSD
) &&
2720 !btrfs_test_opt(tree_root
, NOSSD
) &&
2721 !fs_info
->fs_devices
->rotating
) {
2722 printk(KERN_INFO
"Btrfs detected SSD devices, enabling SSD "
2724 btrfs_set_opt(fs_info
->mount_opt
, SSD
);
2727 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2728 if (btrfs_test_opt(tree_root
, CHECK_INTEGRITY
)) {
2729 ret
= btrfsic_mount(tree_root
, fs_devices
,
2730 btrfs_test_opt(tree_root
,
2731 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA
) ?
2733 fs_info
->check_integrity_print_mask
);
2735 printk(KERN_WARNING
"btrfs: failed to initialize"
2736 " integrity check module %s\n", sb
->s_id
);
2739 ret
= btrfs_read_qgroup_config(fs_info
);
2741 goto fail_trans_kthread
;
2743 /* do not make disk changes in broken FS */
2744 if (btrfs_super_log_root(disk_super
) != 0) {
2745 u64 bytenr
= btrfs_super_log_root(disk_super
);
2747 if (fs_devices
->rw_devices
== 0) {
2748 printk(KERN_WARNING
"Btrfs log replay required "
2754 btrfs_level_size(tree_root
,
2755 btrfs_super_log_root_level(disk_super
));
2757 log_tree_root
= btrfs_alloc_root(fs_info
);
2758 if (!log_tree_root
) {
2763 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2764 log_tree_root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
2766 log_tree_root
->node
= read_tree_block(tree_root
, bytenr
,
2769 if (!log_tree_root
->node
||
2770 !extent_buffer_uptodate(log_tree_root
->node
)) {
2771 printk(KERN_ERR
"btrfs: failed to read log tree\n");
2772 free_extent_buffer(log_tree_root
->node
);
2773 kfree(log_tree_root
);
2774 goto fail_trans_kthread
;
2776 /* returns with log_tree_root freed on success */
2777 ret
= btrfs_recover_log_trees(log_tree_root
);
2779 btrfs_error(tree_root
->fs_info
, ret
,
2780 "Failed to recover log tree");
2781 free_extent_buffer(log_tree_root
->node
);
2782 kfree(log_tree_root
);
2783 goto fail_trans_kthread
;
2786 if (sb
->s_flags
& MS_RDONLY
) {
2787 ret
= btrfs_commit_super(tree_root
);
2789 goto fail_trans_kthread
;
2793 ret
= btrfs_find_orphan_roots(tree_root
);
2795 goto fail_trans_kthread
;
2797 if (!(sb
->s_flags
& MS_RDONLY
)) {
2798 ret
= btrfs_cleanup_fs_roots(fs_info
);
2800 goto fail_trans_kthread
;
2802 ret
= btrfs_recover_relocation(tree_root
);
2805 "btrfs: failed to recover relocation\n");
2811 location
.objectid
= BTRFS_FS_TREE_OBJECTID
;
2812 location
.type
= BTRFS_ROOT_ITEM_KEY
;
2813 location
.offset
= (u64
)-1;
2815 fs_info
->fs_root
= btrfs_read_fs_root_no_name(fs_info
, &location
);
2816 if (!fs_info
->fs_root
)
2818 if (IS_ERR(fs_info
->fs_root
)) {
2819 err
= PTR_ERR(fs_info
->fs_root
);
2823 if (sb
->s_flags
& MS_RDONLY
)
2826 down_read(&fs_info
->cleanup_work_sem
);
2827 if ((ret
= btrfs_orphan_cleanup(fs_info
->fs_root
)) ||
2828 (ret
= btrfs_orphan_cleanup(fs_info
->tree_root
))) {
2829 up_read(&fs_info
->cleanup_work_sem
);
2830 close_ctree(tree_root
);
2833 up_read(&fs_info
->cleanup_work_sem
);
2835 ret
= btrfs_resume_balance_async(fs_info
);
2837 printk(KERN_WARNING
"btrfs: failed to resume balance\n");
2838 close_ctree(tree_root
);
2842 ret
= btrfs_resume_dev_replace_async(fs_info
);
2844 pr_warn("btrfs: failed to resume dev_replace\n");
2845 close_ctree(tree_root
);
2852 btrfs_free_qgroup_config(fs_info
);
2854 kthread_stop(fs_info
->transaction_kthread
);
2855 del_fs_roots(fs_info
);
2856 btrfs_cleanup_transaction(fs_info
->tree_root
);
2858 kthread_stop(fs_info
->cleaner_kthread
);
2861 * make sure we're done with the btree inode before we stop our
2864 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
2867 btrfs_put_block_group_cache(fs_info
);
2868 btrfs_free_block_groups(fs_info
);
2871 free_root_pointers(fs_info
, 1);
2872 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2875 btrfs_stop_all_workers(fs_info
);
2878 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2880 iput(fs_info
->btree_inode
);
2881 fail_delalloc_bytes
:
2882 percpu_counter_destroy(&fs_info
->delalloc_bytes
);
2883 fail_dirty_metadata_bytes
:
2884 percpu_counter_destroy(&fs_info
->dirty_metadata_bytes
);
2886 bdi_destroy(&fs_info
->bdi
);
2888 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2890 btrfs_free_stripe_hash_table(fs_info
);
2891 btrfs_close_devices(fs_info
->fs_devices
);
2895 if (!btrfs_test_opt(tree_root
, RECOVERY
))
2896 goto fail_tree_roots
;
2898 free_root_pointers(fs_info
, 0);
2900 /* don't use the log in recovery mode, it won't be valid */
2901 btrfs_set_super_log_root(disk_super
, 0);
2903 /* we can't trust the free space cache either */
2904 btrfs_set_opt(fs_info
->mount_opt
, CLEAR_CACHE
);
2906 ret
= next_root_backup(fs_info
, fs_info
->super_copy
,
2907 &num_backups_tried
, &backup_index
);
2909 goto fail_block_groups
;
2910 goto retry_root_backup
;
2913 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
2916 set_buffer_uptodate(bh
);
2918 struct btrfs_device
*device
= (struct btrfs_device
*)
2921 printk_ratelimited_in_rcu(KERN_WARNING
"lost page write due to "
2922 "I/O error on %s\n",
2923 rcu_str_deref(device
->name
));
2924 /* note, we dont' set_buffer_write_io_error because we have
2925 * our own ways of dealing with the IO errors
2927 clear_buffer_uptodate(bh
);
2928 btrfs_dev_stat_inc_and_print(device
, BTRFS_DEV_STAT_WRITE_ERRS
);
2934 struct buffer_head
*btrfs_read_dev_super(struct block_device
*bdev
)
2936 struct buffer_head
*bh
;
2937 struct buffer_head
*latest
= NULL
;
2938 struct btrfs_super_block
*super
;
2943 /* we would like to check all the supers, but that would make
2944 * a btrfs mount succeed after a mkfs from a different FS.
2945 * So, we need to add a special mount option to scan for
2946 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2948 for (i
= 0; i
< 1; i
++) {
2949 bytenr
= btrfs_sb_offset(i
);
2950 if (bytenr
+ 4096 >= i_size_read(bdev
->bd_inode
))
2952 bh
= __bread(bdev
, bytenr
/ 4096, 4096);
2956 super
= (struct btrfs_super_block
*)bh
->b_data
;
2957 if (btrfs_super_bytenr(super
) != bytenr
||
2958 super
->magic
!= cpu_to_le64(BTRFS_MAGIC
)) {
2963 if (!latest
|| btrfs_super_generation(super
) > transid
) {
2966 transid
= btrfs_super_generation(super
);
2975 * this should be called twice, once with wait == 0 and
2976 * once with wait == 1. When wait == 0 is done, all the buffer heads
2977 * we write are pinned.
2979 * They are released when wait == 1 is done.
2980 * max_mirrors must be the same for both runs, and it indicates how
2981 * many supers on this one device should be written.
2983 * max_mirrors == 0 means to write them all.
2985 static int write_dev_supers(struct btrfs_device
*device
,
2986 struct btrfs_super_block
*sb
,
2987 int do_barriers
, int wait
, int max_mirrors
)
2989 struct buffer_head
*bh
;
2996 if (max_mirrors
== 0)
2997 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
2999 for (i
= 0; i
< max_mirrors
; i
++) {
3000 bytenr
= btrfs_sb_offset(i
);
3001 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
3005 bh
= __find_get_block(device
->bdev
, bytenr
/ 4096,
3006 BTRFS_SUPER_INFO_SIZE
);
3012 if (!buffer_uptodate(bh
))
3015 /* drop our reference */
3018 /* drop the reference from the wait == 0 run */
3022 btrfs_set_super_bytenr(sb
, bytenr
);
3025 crc
= btrfs_csum_data((char *)sb
+
3026 BTRFS_CSUM_SIZE
, crc
,
3027 BTRFS_SUPER_INFO_SIZE
-
3029 btrfs_csum_final(crc
, sb
->csum
);
3032 * one reference for us, and we leave it for the
3035 bh
= __getblk(device
->bdev
, bytenr
/ 4096,
3036 BTRFS_SUPER_INFO_SIZE
);
3038 printk(KERN_ERR
"btrfs: couldn't get super "
3039 "buffer head for bytenr %Lu\n", bytenr
);
3044 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
3046 /* one reference for submit_bh */
3049 set_buffer_uptodate(bh
);
3051 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
3052 bh
->b_private
= device
;
3056 * we fua the first super. The others we allow
3059 ret
= btrfsic_submit_bh(WRITE_FUA
, bh
);
3063 return errors
< i
? 0 : -1;
3067 * endio for the write_dev_flush, this will wake anyone waiting
3068 * for the barrier when it is done
3070 static void btrfs_end_empty_barrier(struct bio
*bio
, int err
)
3073 if (err
== -EOPNOTSUPP
)
3074 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
3075 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
3077 if (bio
->bi_private
)
3078 complete(bio
->bi_private
);
3083 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
3084 * sent down. With wait == 1, it waits for the previous flush.
3086 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3089 static int write_dev_flush(struct btrfs_device
*device
, int wait
)
3094 if (device
->nobarriers
)
3098 bio
= device
->flush_bio
;
3102 wait_for_completion(&device
->flush_wait
);
3104 if (bio_flagged(bio
, BIO_EOPNOTSUPP
)) {
3105 printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3106 rcu_str_deref(device
->name
));
3107 device
->nobarriers
= 1;
3108 } else if (!bio_flagged(bio
, BIO_UPTODATE
)) {
3110 btrfs_dev_stat_inc_and_print(device
,
3111 BTRFS_DEV_STAT_FLUSH_ERRS
);
3114 /* drop the reference from the wait == 0 run */
3116 device
->flush_bio
= NULL
;
3122 * one reference for us, and we leave it for the
3125 device
->flush_bio
= NULL
;
3126 bio
= bio_alloc(GFP_NOFS
, 0);
3130 bio
->bi_end_io
= btrfs_end_empty_barrier
;
3131 bio
->bi_bdev
= device
->bdev
;
3132 init_completion(&device
->flush_wait
);
3133 bio
->bi_private
= &device
->flush_wait
;
3134 device
->flush_bio
= bio
;
3137 btrfsic_submit_bio(WRITE_FLUSH
, bio
);
3143 * send an empty flush down to each device in parallel,
3144 * then wait for them
3146 static int barrier_all_devices(struct btrfs_fs_info
*info
)
3148 struct list_head
*head
;
3149 struct btrfs_device
*dev
;
3150 int errors_send
= 0;
3151 int errors_wait
= 0;
3154 /* send down all the barriers */
3155 head
= &info
->fs_devices
->devices
;
3156 list_for_each_entry_rcu(dev
, head
, dev_list
) {
3161 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
3164 ret
= write_dev_flush(dev
, 0);
3169 /* wait for all the barriers */
3170 list_for_each_entry_rcu(dev
, head
, dev_list
) {
3175 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
3178 ret
= write_dev_flush(dev
, 1);
3182 if (errors_send
> info
->num_tolerated_disk_barrier_failures
||
3183 errors_wait
> info
->num_tolerated_disk_barrier_failures
)
3188 int btrfs_calc_num_tolerated_disk_barrier_failures(
3189 struct btrfs_fs_info
*fs_info
)
3191 struct btrfs_ioctl_space_info space
;
3192 struct btrfs_space_info
*sinfo
;
3193 u64 types
[] = {BTRFS_BLOCK_GROUP_DATA
,
3194 BTRFS_BLOCK_GROUP_SYSTEM
,
3195 BTRFS_BLOCK_GROUP_METADATA
,
3196 BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_METADATA
};
3200 int num_tolerated_disk_barrier_failures
=
3201 (int)fs_info
->fs_devices
->num_devices
;
3203 for (i
= 0; i
< num_types
; i
++) {
3204 struct btrfs_space_info
*tmp
;
3208 list_for_each_entry_rcu(tmp
, &fs_info
->space_info
, list
) {
3209 if (tmp
->flags
== types
[i
]) {
3219 down_read(&sinfo
->groups_sem
);
3220 for (c
= 0; c
< BTRFS_NR_RAID_TYPES
; c
++) {
3221 if (!list_empty(&sinfo
->block_groups
[c
])) {
3224 btrfs_get_block_group_info(
3225 &sinfo
->block_groups
[c
], &space
);
3226 if (space
.total_bytes
== 0 ||
3227 space
.used_bytes
== 0)
3229 flags
= space
.flags
;
3232 * 0: if dup, single or RAID0 is configured for
3233 * any of metadata, system or data, else
3234 * 1: if RAID5 is configured, or if RAID1 or
3235 * RAID10 is configured and only two mirrors
3237 * 2: if RAID6 is configured, else
3238 * num_mirrors - 1: if RAID1 or RAID10 is
3239 * configured and more than
3240 * 2 mirrors are used.
3242 if (num_tolerated_disk_barrier_failures
> 0 &&
3243 ((flags
& (BTRFS_BLOCK_GROUP_DUP
|
3244 BTRFS_BLOCK_GROUP_RAID0
)) ||
3245 ((flags
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)
3247 num_tolerated_disk_barrier_failures
= 0;
3248 else if (num_tolerated_disk_barrier_failures
> 1) {
3249 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
3250 BTRFS_BLOCK_GROUP_RAID5
|
3251 BTRFS_BLOCK_GROUP_RAID10
)) {
3252 num_tolerated_disk_barrier_failures
= 1;
3254 BTRFS_BLOCK_GROUP_RAID5
) {
3255 num_tolerated_disk_barrier_failures
= 2;
3260 up_read(&sinfo
->groups_sem
);
3263 return num_tolerated_disk_barrier_failures
;
3266 static int write_all_supers(struct btrfs_root
*root
, int max_mirrors
)
3268 struct list_head
*head
;
3269 struct btrfs_device
*dev
;
3270 struct btrfs_super_block
*sb
;
3271 struct btrfs_dev_item
*dev_item
;
3275 int total_errors
= 0;
3278 max_errors
= btrfs_super_num_devices(root
->fs_info
->super_copy
) - 1;
3279 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
3280 backup_super_roots(root
->fs_info
);
3282 sb
= root
->fs_info
->super_for_commit
;
3283 dev_item
= &sb
->dev_item
;
3285 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3286 head
= &root
->fs_info
->fs_devices
->devices
;
3289 ret
= barrier_all_devices(root
->fs_info
);
3292 &root
->fs_info
->fs_devices
->device_list_mutex
);
3293 btrfs_error(root
->fs_info
, ret
,
3294 "errors while submitting device barriers.");
3299 list_for_each_entry_rcu(dev
, head
, dev_list
) {
3304 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
3307 btrfs_set_stack_device_generation(dev_item
, 0);
3308 btrfs_set_stack_device_type(dev_item
, dev
->type
);
3309 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
3310 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
3311 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
3312 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
3313 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
3314 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
3315 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
3316 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
3318 flags
= btrfs_super_flags(sb
);
3319 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
3321 ret
= write_dev_supers(dev
, sb
, do_barriers
, 0, max_mirrors
);
3325 if (total_errors
> max_errors
) {
3326 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
3329 /* This shouldn't happen. FUA is masked off if unsupported */
3334 list_for_each_entry_rcu(dev
, head
, dev_list
) {
3337 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
3340 ret
= write_dev_supers(dev
, sb
, do_barriers
, 1, max_mirrors
);
3344 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3345 if (total_errors
> max_errors
) {
3346 btrfs_error(root
->fs_info
, -EIO
,
3347 "%d errors while writing supers", total_errors
);
3353 int write_ctree_super(struct btrfs_trans_handle
*trans
,
3354 struct btrfs_root
*root
, int max_mirrors
)
3358 ret
= write_all_supers(root
, max_mirrors
);
3362 void btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3364 spin_lock(&fs_info
->fs_roots_radix_lock
);
3365 radix_tree_delete(&fs_info
->fs_roots_radix
,
3366 (unsigned long)root
->root_key
.objectid
);
3367 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3369 if (btrfs_root_refs(&root
->root_item
) == 0)
3370 synchronize_srcu(&fs_info
->subvol_srcu
);
3372 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
3373 btrfs_free_log(NULL
, root
);
3374 btrfs_free_log_root_tree(NULL
, fs_info
);
3377 __btrfs_remove_free_space_cache(root
->free_ino_pinned
);
3378 __btrfs_remove_free_space_cache(root
->free_ino_ctl
);
3382 static void free_fs_root(struct btrfs_root
*root
)
3384 iput(root
->cache_inode
);
3385 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
3387 free_anon_bdev(root
->anon_dev
);
3388 free_extent_buffer(root
->node
);
3389 free_extent_buffer(root
->commit_root
);
3390 kfree(root
->free_ino_ctl
);
3391 kfree(root
->free_ino_pinned
);
3396 int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
3398 u64 root_objectid
= 0;
3399 struct btrfs_root
*gang
[8];
3404 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
3405 (void **)gang
, root_objectid
,
3410 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
3411 for (i
= 0; i
< ret
; i
++) {
3414 root_objectid
= gang
[i
]->root_key
.objectid
;
3415 err
= btrfs_orphan_cleanup(gang
[i
]);
3424 int btrfs_commit_super(struct btrfs_root
*root
)
3426 struct btrfs_trans_handle
*trans
;
3429 mutex_lock(&root
->fs_info
->cleaner_mutex
);
3430 btrfs_run_delayed_iputs(root
);
3431 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
3432 wake_up_process(root
->fs_info
->cleaner_kthread
);
3434 /* wait until ongoing cleanup work done */
3435 down_write(&root
->fs_info
->cleanup_work_sem
);
3436 up_write(&root
->fs_info
->cleanup_work_sem
);
3438 trans
= btrfs_join_transaction(root
);
3440 return PTR_ERR(trans
);
3441 ret
= btrfs_commit_transaction(trans
, root
);
3444 /* run commit again to drop the original snapshot */
3445 trans
= btrfs_join_transaction(root
);
3447 return PTR_ERR(trans
);
3448 ret
= btrfs_commit_transaction(trans
, root
);
3451 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
3453 btrfs_error(root
->fs_info
, ret
,
3454 "Failed to sync btree inode to disk.");
3458 ret
= write_ctree_super(NULL
, root
, 0);
3462 int close_ctree(struct btrfs_root
*root
)
3464 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3467 fs_info
->closing
= 1;
3470 /* pause restriper - we want to resume on mount */
3471 btrfs_pause_balance(fs_info
);
3473 btrfs_dev_replace_suspend_for_unmount(fs_info
);
3475 btrfs_scrub_cancel(fs_info
);
3477 /* wait for any defraggers to finish */
3478 wait_event(fs_info
->transaction_wait
,
3479 (atomic_read(&fs_info
->defrag_running
) == 0));
3481 /* clear out the rbtree of defraggable inodes */
3482 btrfs_cleanup_defrag_inodes(fs_info
);
3484 if (!(fs_info
->sb
->s_flags
& MS_RDONLY
)) {
3485 ret
= btrfs_commit_super(root
);
3487 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
3490 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
3491 btrfs_error_commit_super(root
);
3493 btrfs_put_block_group_cache(fs_info
);
3495 kthread_stop(fs_info
->transaction_kthread
);
3496 kthread_stop(fs_info
->cleaner_kthread
);
3498 fs_info
->closing
= 2;
3501 btrfs_free_qgroup_config(root
->fs_info
);
3503 if (percpu_counter_sum(&fs_info
->delalloc_bytes
)) {
3504 printk(KERN_INFO
"btrfs: at unmount delalloc count %lld\n",
3505 percpu_counter_sum(&fs_info
->delalloc_bytes
));
3508 free_root_pointers(fs_info
, 1);
3510 btrfs_free_block_groups(fs_info
);
3512 del_fs_roots(fs_info
);
3514 iput(fs_info
->btree_inode
);
3516 btrfs_stop_all_workers(fs_info
);
3518 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3519 if (btrfs_test_opt(root
, CHECK_INTEGRITY
))
3520 btrfsic_unmount(root
, fs_info
->fs_devices
);
3523 btrfs_close_devices(fs_info
->fs_devices
);
3524 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
3526 percpu_counter_destroy(&fs_info
->dirty_metadata_bytes
);
3527 percpu_counter_destroy(&fs_info
->delalloc_bytes
);
3528 bdi_destroy(&fs_info
->bdi
);
3529 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
3531 btrfs_free_stripe_hash_table(fs_info
);
3536 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
,
3540 struct inode
*btree_inode
= buf
->pages
[0]->mapping
->host
;
3542 ret
= extent_buffer_uptodate(buf
);
3546 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
3547 parent_transid
, atomic
);
3553 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
3555 return set_extent_buffer_uptodate(buf
);
3558 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
3560 struct btrfs_root
*root
= BTRFS_I(buf
->pages
[0]->mapping
->host
)->root
;
3561 u64 transid
= btrfs_header_generation(buf
);
3564 btrfs_assert_tree_locked(buf
);
3565 if (transid
!= root
->fs_info
->generation
)
3566 WARN(1, KERN_CRIT
"btrfs transid mismatch buffer %llu, "
3567 "found %llu running %llu\n",
3568 (unsigned long long)buf
->start
,
3569 (unsigned long long)transid
,
3570 (unsigned long long)root
->fs_info
->generation
);
3571 was_dirty
= set_extent_buffer_dirty(buf
);
3573 __percpu_counter_add(&root
->fs_info
->dirty_metadata_bytes
,
3575 root
->fs_info
->dirty_metadata_batch
);
3578 static void __btrfs_btree_balance_dirty(struct btrfs_root
*root
,
3582 * looks as though older kernels can get into trouble with
3583 * this code, they end up stuck in balance_dirty_pages forever
3587 if (current
->flags
& PF_MEMALLOC
)
3591 btrfs_balance_delayed_items(root
);
3593 ret
= percpu_counter_compare(&root
->fs_info
->dirty_metadata_bytes
,
3594 BTRFS_DIRTY_METADATA_THRESH
);
3596 balance_dirty_pages_ratelimited(
3597 root
->fs_info
->btree_inode
->i_mapping
);
3602 void btrfs_btree_balance_dirty(struct btrfs_root
*root
)
3604 __btrfs_btree_balance_dirty(root
, 1);
3607 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root
*root
)
3609 __btrfs_btree_balance_dirty(root
, 0);
3612 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
3614 struct btrfs_root
*root
= BTRFS_I(buf
->pages
[0]->mapping
->host
)->root
;
3615 return btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
3618 static int btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
3622 * Placeholder for checks
3627 static void btrfs_error_commit_super(struct btrfs_root
*root
)
3629 mutex_lock(&root
->fs_info
->cleaner_mutex
);
3630 btrfs_run_delayed_iputs(root
);
3631 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
3633 down_write(&root
->fs_info
->cleanup_work_sem
);
3634 up_write(&root
->fs_info
->cleanup_work_sem
);
3636 /* cleanup FS via transaction */
3637 btrfs_cleanup_transaction(root
);
3640 static void btrfs_destroy_ordered_operations(struct btrfs_transaction
*t
,
3641 struct btrfs_root
*root
)
3643 struct btrfs_inode
*btrfs_inode
;
3644 struct list_head splice
;
3646 INIT_LIST_HEAD(&splice
);
3648 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
3649 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3651 list_splice_init(&t
->ordered_operations
, &splice
);
3652 while (!list_empty(&splice
)) {
3653 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3654 ordered_operations
);
3656 list_del_init(&btrfs_inode
->ordered_operations
);
3658 btrfs_invalidate_inodes(btrfs_inode
->root
);
3661 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3662 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
3665 static void btrfs_destroy_ordered_extents(struct btrfs_root
*root
)
3667 struct btrfs_ordered_extent
*ordered
;
3669 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3671 * This will just short circuit the ordered completion stuff which will
3672 * make sure the ordered extent gets properly cleaned up.
3674 list_for_each_entry(ordered
, &root
->fs_info
->ordered_extents
,
3676 set_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
);
3677 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3680 int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
3681 struct btrfs_root
*root
)
3683 struct rb_node
*node
;
3684 struct btrfs_delayed_ref_root
*delayed_refs
;
3685 struct btrfs_delayed_ref_node
*ref
;
3688 delayed_refs
= &trans
->delayed_refs
;
3690 spin_lock(&delayed_refs
->lock
);
3691 if (delayed_refs
->num_entries
== 0) {
3692 spin_unlock(&delayed_refs
->lock
);
3693 printk(KERN_INFO
"delayed_refs has NO entry\n");
3697 while ((node
= rb_first(&delayed_refs
->root
)) != NULL
) {
3698 struct btrfs_delayed_ref_head
*head
= NULL
;
3700 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
3701 atomic_set(&ref
->refs
, 1);
3702 if (btrfs_delayed_ref_is_head(ref
)) {
3704 head
= btrfs_delayed_node_to_head(ref
);
3705 if (!mutex_trylock(&head
->mutex
)) {
3706 atomic_inc(&ref
->refs
);
3707 spin_unlock(&delayed_refs
->lock
);
3709 /* Need to wait for the delayed ref to run */
3710 mutex_lock(&head
->mutex
);
3711 mutex_unlock(&head
->mutex
);
3712 btrfs_put_delayed_ref(ref
);
3714 spin_lock(&delayed_refs
->lock
);
3718 if (head
->must_insert_reserved
)
3719 btrfs_pin_extent(root
, ref
->bytenr
,
3721 btrfs_free_delayed_extent_op(head
->extent_op
);
3722 delayed_refs
->num_heads
--;
3723 if (list_empty(&head
->cluster
))
3724 delayed_refs
->num_heads_ready
--;
3725 list_del_init(&head
->cluster
);
3729 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
3730 delayed_refs
->num_entries
--;
3732 mutex_unlock(&head
->mutex
);
3733 spin_unlock(&delayed_refs
->lock
);
3734 btrfs_put_delayed_ref(ref
);
3737 spin_lock(&delayed_refs
->lock
);
3740 spin_unlock(&delayed_refs
->lock
);
3745 static void btrfs_evict_pending_snapshots(struct btrfs_transaction
*t
)
3747 struct btrfs_pending_snapshot
*snapshot
;
3748 struct list_head splice
;
3750 INIT_LIST_HEAD(&splice
);
3752 list_splice_init(&t
->pending_snapshots
, &splice
);
3754 while (!list_empty(&splice
)) {
3755 snapshot
= list_entry(splice
.next
,
3756 struct btrfs_pending_snapshot
,
3758 snapshot
->error
= -ECANCELED
;
3759 list_del_init(&snapshot
->list
);
3763 static void btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
)
3765 struct btrfs_inode
*btrfs_inode
;
3766 struct list_head splice
;
3768 INIT_LIST_HEAD(&splice
);
3770 spin_lock(&root
->fs_info
->delalloc_lock
);
3771 list_splice_init(&root
->fs_info
->delalloc_inodes
, &splice
);
3773 while (!list_empty(&splice
)) {
3774 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3777 list_del_init(&btrfs_inode
->delalloc_inodes
);
3778 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
3779 &btrfs_inode
->runtime_flags
);
3781 btrfs_invalidate_inodes(btrfs_inode
->root
);
3784 spin_unlock(&root
->fs_info
->delalloc_lock
);
3787 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
3788 struct extent_io_tree
*dirty_pages
,
3792 struct extent_buffer
*eb
;
3797 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
3802 clear_extent_bits(dirty_pages
, start
, end
, mark
, GFP_NOFS
);
3803 while (start
<= end
) {
3804 eb
= btrfs_find_tree_block(root
, start
,
3809 wait_on_extent_buffer_writeback(eb
);
3811 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
,
3813 clear_extent_buffer_dirty(eb
);
3814 free_extent_buffer_stale(eb
);
3821 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
3822 struct extent_io_tree
*pinned_extents
)
3824 struct extent_io_tree
*unpin
;
3830 unpin
= pinned_extents
;
3833 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3834 EXTENT_DIRTY
, NULL
);
3839 if (btrfs_test_opt(root
, DISCARD
))
3840 ret
= btrfs_error_discard_extent(root
, start
,
3844 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3845 btrfs_error_unpin_extent_range(root
, start
, end
);
3850 if (unpin
== &root
->fs_info
->freed_extents
[0])
3851 unpin
= &root
->fs_info
->freed_extents
[1];
3853 unpin
= &root
->fs_info
->freed_extents
[0];
3861 void btrfs_cleanup_one_transaction(struct btrfs_transaction
*cur_trans
,
3862 struct btrfs_root
*root
)
3864 btrfs_destroy_delayed_refs(cur_trans
, root
);
3865 btrfs_block_rsv_release(root
, &root
->fs_info
->trans_block_rsv
,
3866 cur_trans
->dirty_pages
.dirty_bytes
);
3868 /* FIXME: cleanup wait for commit */
3869 cur_trans
->in_commit
= 1;
3870 cur_trans
->blocked
= 1;
3871 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3873 btrfs_evict_pending_snapshots(cur_trans
);
3875 cur_trans
->blocked
= 0;
3876 wake_up(&root
->fs_info
->transaction_wait
);
3878 cur_trans
->commit_done
= 1;
3879 wake_up(&cur_trans
->commit_wait
);
3881 btrfs_destroy_delayed_inodes(root
);
3882 btrfs_assert_delayed_root_empty(root
);
3884 btrfs_destroy_marked_extents(root
, &cur_trans
->dirty_pages
,
3886 btrfs_destroy_pinned_extent(root
,
3887 root
->fs_info
->pinned_extents
);
3890 memset(cur_trans, 0, sizeof(*cur_trans));
3891 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3895 static int btrfs_cleanup_transaction(struct btrfs_root
*root
)
3897 struct btrfs_transaction
*t
;
3900 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
3902 spin_lock(&root
->fs_info
->trans_lock
);
3903 list_splice_init(&root
->fs_info
->trans_list
, &list
);
3904 root
->fs_info
->trans_no_join
= 1;
3905 spin_unlock(&root
->fs_info
->trans_lock
);
3907 while (!list_empty(&list
)) {
3908 t
= list_entry(list
.next
, struct btrfs_transaction
, list
);
3910 btrfs_destroy_ordered_operations(t
, root
);
3912 btrfs_destroy_ordered_extents(root
);
3914 btrfs_destroy_delayed_refs(t
, root
);
3916 /* FIXME: cleanup wait for commit */
3920 if (waitqueue_active(&root
->fs_info
->transaction_blocked_wait
))
3921 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3923 btrfs_evict_pending_snapshots(t
);
3927 if (waitqueue_active(&root
->fs_info
->transaction_wait
))
3928 wake_up(&root
->fs_info
->transaction_wait
);
3932 if (waitqueue_active(&t
->commit_wait
))
3933 wake_up(&t
->commit_wait
);
3935 btrfs_destroy_delayed_inodes(root
);
3936 btrfs_assert_delayed_root_empty(root
);
3938 btrfs_destroy_delalloc_inodes(root
);
3940 spin_lock(&root
->fs_info
->trans_lock
);
3941 root
->fs_info
->running_transaction
= NULL
;
3942 spin_unlock(&root
->fs_info
->trans_lock
);
3944 btrfs_destroy_marked_extents(root
, &t
->dirty_pages
,
3947 btrfs_destroy_pinned_extent(root
,
3948 root
->fs_info
->pinned_extents
);
3950 atomic_set(&t
->use_count
, 0);
3951 list_del_init(&t
->list
);
3952 memset(t
, 0, sizeof(*t
));
3953 kmem_cache_free(btrfs_transaction_cachep
, t
);
3956 spin_lock(&root
->fs_info
->trans_lock
);
3957 root
->fs_info
->trans_no_join
= 0;
3958 spin_unlock(&root
->fs_info
->trans_lock
);
3959 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
3964 static struct extent_io_ops btree_extent_io_ops
= {
3965 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
3966 .readpage_io_failed_hook
= btree_io_failed_hook
,
3967 .submit_bio_hook
= btree_submit_bio_hook
,
3968 /* note we're sharing with inode.c for the merge bio hook */
3969 .merge_bio_hook
= btrfs_merge_bio_hook
,