]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/btrfs/disk-io.c
Merge remote-tracking branch 'asoc/topic/pcm512x' into asoc-next
[mirror_ubuntu-focal-kernel.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53 #include "tree-checker.h"
54 #include "ref-verify.h"
55
56 #ifdef CONFIG_X86
57 #include <asm/cpufeature.h>
58 #endif
59
60 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
61 BTRFS_HEADER_FLAG_RELOC |\
62 BTRFS_SUPER_FLAG_ERROR |\
63 BTRFS_SUPER_FLAG_SEEDING |\
64 BTRFS_SUPER_FLAG_METADUMP)
65
66 static const struct extent_io_ops btree_extent_io_ops;
67 static void end_workqueue_fn(struct btrfs_work *work);
68 static void free_fs_root(struct btrfs_root *root);
69 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
70 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
71 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
72 struct btrfs_fs_info *fs_info);
73 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
74 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
75 struct extent_io_tree *dirty_pages,
76 int mark);
77 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
78 struct extent_io_tree *pinned_extents);
79 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
80 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
81
82 /*
83 * btrfs_end_io_wq structs are used to do processing in task context when an IO
84 * is complete. This is used during reads to verify checksums, and it is used
85 * by writes to insert metadata for new file extents after IO is complete.
86 */
87 struct btrfs_end_io_wq {
88 struct bio *bio;
89 bio_end_io_t *end_io;
90 void *private;
91 struct btrfs_fs_info *info;
92 blk_status_t status;
93 enum btrfs_wq_endio_type metadata;
94 struct btrfs_work work;
95 };
96
97 static struct kmem_cache *btrfs_end_io_wq_cache;
98
99 int __init btrfs_end_io_wq_init(void)
100 {
101 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
102 sizeof(struct btrfs_end_io_wq),
103 0,
104 SLAB_MEM_SPREAD,
105 NULL);
106 if (!btrfs_end_io_wq_cache)
107 return -ENOMEM;
108 return 0;
109 }
110
111 void btrfs_end_io_wq_exit(void)
112 {
113 kmem_cache_destroy(btrfs_end_io_wq_cache);
114 }
115
116 /*
117 * async submit bios are used to offload expensive checksumming
118 * onto the worker threads. They checksum file and metadata bios
119 * just before they are sent down the IO stack.
120 */
121 struct async_submit_bio {
122 void *private_data;
123 struct btrfs_fs_info *fs_info;
124 struct bio *bio;
125 extent_submit_bio_hook_t *submit_bio_start;
126 extent_submit_bio_hook_t *submit_bio_done;
127 int mirror_num;
128 unsigned long bio_flags;
129 /*
130 * bio_offset is optional, can be used if the pages in the bio
131 * can't tell us where in the file the bio should go
132 */
133 u64 bio_offset;
134 struct btrfs_work work;
135 blk_status_t status;
136 };
137
138 /*
139 * Lockdep class keys for extent_buffer->lock's in this root. For a given
140 * eb, the lockdep key is determined by the btrfs_root it belongs to and
141 * the level the eb occupies in the tree.
142 *
143 * Different roots are used for different purposes and may nest inside each
144 * other and they require separate keysets. As lockdep keys should be
145 * static, assign keysets according to the purpose of the root as indicated
146 * by btrfs_root->objectid. This ensures that all special purpose roots
147 * have separate keysets.
148 *
149 * Lock-nesting across peer nodes is always done with the immediate parent
150 * node locked thus preventing deadlock. As lockdep doesn't know this, use
151 * subclass to avoid triggering lockdep warning in such cases.
152 *
153 * The key is set by the readpage_end_io_hook after the buffer has passed
154 * csum validation but before the pages are unlocked. It is also set by
155 * btrfs_init_new_buffer on freshly allocated blocks.
156 *
157 * We also add a check to make sure the highest level of the tree is the
158 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
159 * needs update as well.
160 */
161 #ifdef CONFIG_DEBUG_LOCK_ALLOC
162 # if BTRFS_MAX_LEVEL != 8
163 # error
164 # endif
165
166 static struct btrfs_lockdep_keyset {
167 u64 id; /* root objectid */
168 const char *name_stem; /* lock name stem */
169 char names[BTRFS_MAX_LEVEL + 1][20];
170 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
171 } btrfs_lockdep_keysets[] = {
172 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
173 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
174 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
175 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
176 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
177 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
178 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
179 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
180 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
181 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
182 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
183 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
184 { .id = 0, .name_stem = "tree" },
185 };
186
187 void __init btrfs_init_lockdep(void)
188 {
189 int i, j;
190
191 /* initialize lockdep class names */
192 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
193 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
194
195 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
196 snprintf(ks->names[j], sizeof(ks->names[j]),
197 "btrfs-%s-%02d", ks->name_stem, j);
198 }
199 }
200
201 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
202 int level)
203 {
204 struct btrfs_lockdep_keyset *ks;
205
206 BUG_ON(level >= ARRAY_SIZE(ks->keys));
207
208 /* find the matching keyset, id 0 is the default entry */
209 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
210 if (ks->id == objectid)
211 break;
212
213 lockdep_set_class_and_name(&eb->lock,
214 &ks->keys[level], ks->names[level]);
215 }
216
217 #endif
218
219 /*
220 * extents on the btree inode are pretty simple, there's one extent
221 * that covers the entire device
222 */
223 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
224 struct page *page, size_t pg_offset, u64 start, u64 len,
225 int create)
226 {
227 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
228 struct extent_map_tree *em_tree = &inode->extent_tree;
229 struct extent_map *em;
230 int ret;
231
232 read_lock(&em_tree->lock);
233 em = lookup_extent_mapping(em_tree, start, len);
234 if (em) {
235 em->bdev = fs_info->fs_devices->latest_bdev;
236 read_unlock(&em_tree->lock);
237 goto out;
238 }
239 read_unlock(&em_tree->lock);
240
241 em = alloc_extent_map();
242 if (!em) {
243 em = ERR_PTR(-ENOMEM);
244 goto out;
245 }
246 em->start = 0;
247 em->len = (u64)-1;
248 em->block_len = (u64)-1;
249 em->block_start = 0;
250 em->bdev = fs_info->fs_devices->latest_bdev;
251
252 write_lock(&em_tree->lock);
253 ret = add_extent_mapping(em_tree, em, 0);
254 if (ret == -EEXIST) {
255 free_extent_map(em);
256 em = lookup_extent_mapping(em_tree, start, len);
257 if (!em)
258 em = ERR_PTR(-EIO);
259 } else if (ret) {
260 free_extent_map(em);
261 em = ERR_PTR(ret);
262 }
263 write_unlock(&em_tree->lock);
264
265 out:
266 return em;
267 }
268
269 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
270 {
271 return btrfs_crc32c(seed, data, len);
272 }
273
274 void btrfs_csum_final(u32 crc, u8 *result)
275 {
276 put_unaligned_le32(~crc, result);
277 }
278
279 /*
280 * compute the csum for a btree block, and either verify it or write it
281 * into the csum field of the block.
282 */
283 static int csum_tree_block(struct btrfs_fs_info *fs_info,
284 struct extent_buffer *buf,
285 int verify)
286 {
287 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
288 char *result = NULL;
289 unsigned long len;
290 unsigned long cur_len;
291 unsigned long offset = BTRFS_CSUM_SIZE;
292 char *kaddr;
293 unsigned long map_start;
294 unsigned long map_len;
295 int err;
296 u32 crc = ~(u32)0;
297 unsigned long inline_result;
298
299 len = buf->len - offset;
300 while (len > 0) {
301 err = map_private_extent_buffer(buf, offset, 32,
302 &kaddr, &map_start, &map_len);
303 if (err)
304 return err;
305 cur_len = min(len, map_len - (offset - map_start));
306 crc = btrfs_csum_data(kaddr + offset - map_start,
307 crc, cur_len);
308 len -= cur_len;
309 offset += cur_len;
310 }
311 if (csum_size > sizeof(inline_result)) {
312 result = kzalloc(csum_size, GFP_NOFS);
313 if (!result)
314 return -ENOMEM;
315 } else {
316 result = (char *)&inline_result;
317 }
318
319 btrfs_csum_final(crc, result);
320
321 if (verify) {
322 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
323 u32 val;
324 u32 found = 0;
325 memcpy(&found, result, csum_size);
326
327 read_extent_buffer(buf, &val, 0, csum_size);
328 btrfs_warn_rl(fs_info,
329 "%s checksum verify failed on %llu wanted %X found %X level %d",
330 fs_info->sb->s_id, buf->start,
331 val, found, btrfs_header_level(buf));
332 if (result != (char *)&inline_result)
333 kfree(result);
334 return -EUCLEAN;
335 }
336 } else {
337 write_extent_buffer(buf, result, 0, csum_size);
338 }
339 if (result != (char *)&inline_result)
340 kfree(result);
341 return 0;
342 }
343
344 /*
345 * we can't consider a given block up to date unless the transid of the
346 * block matches the transid in the parent node's pointer. This is how we
347 * detect blocks that either didn't get written at all or got written
348 * in the wrong place.
349 */
350 static int verify_parent_transid(struct extent_io_tree *io_tree,
351 struct extent_buffer *eb, u64 parent_transid,
352 int atomic)
353 {
354 struct extent_state *cached_state = NULL;
355 int ret;
356 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
357
358 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
359 return 0;
360
361 if (atomic)
362 return -EAGAIN;
363
364 if (need_lock) {
365 btrfs_tree_read_lock(eb);
366 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
367 }
368
369 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
370 &cached_state);
371 if (extent_buffer_uptodate(eb) &&
372 btrfs_header_generation(eb) == parent_transid) {
373 ret = 0;
374 goto out;
375 }
376 btrfs_err_rl(eb->fs_info,
377 "parent transid verify failed on %llu wanted %llu found %llu",
378 eb->start,
379 parent_transid, btrfs_header_generation(eb));
380 ret = 1;
381
382 /*
383 * Things reading via commit roots that don't have normal protection,
384 * like send, can have a really old block in cache that may point at a
385 * block that has been freed and re-allocated. So don't clear uptodate
386 * if we find an eb that is under IO (dirty/writeback) because we could
387 * end up reading in the stale data and then writing it back out and
388 * making everybody very sad.
389 */
390 if (!extent_buffer_under_io(eb))
391 clear_extent_buffer_uptodate(eb);
392 out:
393 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
394 &cached_state, GFP_NOFS);
395 if (need_lock)
396 btrfs_tree_read_unlock_blocking(eb);
397 return ret;
398 }
399
400 /*
401 * Return 0 if the superblock checksum type matches the checksum value of that
402 * algorithm. Pass the raw disk superblock data.
403 */
404 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
405 char *raw_disk_sb)
406 {
407 struct btrfs_super_block *disk_sb =
408 (struct btrfs_super_block *)raw_disk_sb;
409 u16 csum_type = btrfs_super_csum_type(disk_sb);
410 int ret = 0;
411
412 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
413 u32 crc = ~(u32)0;
414 const int csum_size = sizeof(crc);
415 char result[csum_size];
416
417 /*
418 * The super_block structure does not span the whole
419 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
420 * is filled with zeros and is included in the checksum.
421 */
422 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
423 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
424 btrfs_csum_final(crc, result);
425
426 if (memcmp(raw_disk_sb, result, csum_size))
427 ret = 1;
428 }
429
430 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
431 btrfs_err(fs_info, "unsupported checksum algorithm %u",
432 csum_type);
433 ret = 1;
434 }
435
436 return ret;
437 }
438
439 /*
440 * helper to read a given tree block, doing retries as required when
441 * the checksums don't match and we have alternate mirrors to try.
442 */
443 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
444 struct extent_buffer *eb,
445 u64 parent_transid)
446 {
447 struct extent_io_tree *io_tree;
448 int failed = 0;
449 int ret;
450 int num_copies = 0;
451 int mirror_num = 0;
452 int failed_mirror = 0;
453
454 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
455 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
456 while (1) {
457 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
458 btree_get_extent, mirror_num);
459 if (!ret) {
460 if (!verify_parent_transid(io_tree, eb,
461 parent_transid, 0))
462 break;
463 else
464 ret = -EIO;
465 }
466
467 /*
468 * This buffer's crc is fine, but its contents are corrupted, so
469 * there is no reason to read the other copies, they won't be
470 * any less wrong.
471 */
472 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
473 break;
474
475 num_copies = btrfs_num_copies(fs_info,
476 eb->start, eb->len);
477 if (num_copies == 1)
478 break;
479
480 if (!failed_mirror) {
481 failed = 1;
482 failed_mirror = eb->read_mirror;
483 }
484
485 mirror_num++;
486 if (mirror_num == failed_mirror)
487 mirror_num++;
488
489 if (mirror_num > num_copies)
490 break;
491 }
492
493 if (failed && !ret && failed_mirror)
494 repair_eb_io_failure(fs_info, eb, failed_mirror);
495
496 return ret;
497 }
498
499 /*
500 * checksum a dirty tree block before IO. This has extra checks to make sure
501 * we only fill in the checksum field in the first page of a multi-page block
502 */
503
504 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
505 {
506 u64 start = page_offset(page);
507 u64 found_start;
508 struct extent_buffer *eb;
509
510 eb = (struct extent_buffer *)page->private;
511 if (page != eb->pages[0])
512 return 0;
513
514 found_start = btrfs_header_bytenr(eb);
515 /*
516 * Please do not consolidate these warnings into a single if.
517 * It is useful to know what went wrong.
518 */
519 if (WARN_ON(found_start != start))
520 return -EUCLEAN;
521 if (WARN_ON(!PageUptodate(page)))
522 return -EUCLEAN;
523
524 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
525 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
526
527 return csum_tree_block(fs_info, eb, 0);
528 }
529
530 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
531 struct extent_buffer *eb)
532 {
533 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
534 u8 fsid[BTRFS_FSID_SIZE];
535 int ret = 1;
536
537 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
538 while (fs_devices) {
539 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
540 ret = 0;
541 break;
542 }
543 fs_devices = fs_devices->seed;
544 }
545 return ret;
546 }
547
548 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
549 u64 phy_offset, struct page *page,
550 u64 start, u64 end, int mirror)
551 {
552 u64 found_start;
553 int found_level;
554 struct extent_buffer *eb;
555 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
556 struct btrfs_fs_info *fs_info = root->fs_info;
557 int ret = 0;
558 int reads_done;
559
560 if (!page->private)
561 goto out;
562
563 eb = (struct extent_buffer *)page->private;
564
565 /* the pending IO might have been the only thing that kept this buffer
566 * in memory. Make sure we have a ref for all this other checks
567 */
568 extent_buffer_get(eb);
569
570 reads_done = atomic_dec_and_test(&eb->io_pages);
571 if (!reads_done)
572 goto err;
573
574 eb->read_mirror = mirror;
575 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
576 ret = -EIO;
577 goto err;
578 }
579
580 found_start = btrfs_header_bytenr(eb);
581 if (found_start != eb->start) {
582 btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
583 found_start, eb->start);
584 ret = -EIO;
585 goto err;
586 }
587 if (check_tree_block_fsid(fs_info, eb)) {
588 btrfs_err_rl(fs_info, "bad fsid on block %llu",
589 eb->start);
590 ret = -EIO;
591 goto err;
592 }
593 found_level = btrfs_header_level(eb);
594 if (found_level >= BTRFS_MAX_LEVEL) {
595 btrfs_err(fs_info, "bad tree block level %d",
596 (int)btrfs_header_level(eb));
597 ret = -EIO;
598 goto err;
599 }
600
601 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
602 eb, found_level);
603
604 ret = csum_tree_block(fs_info, eb, 1);
605 if (ret)
606 goto err;
607
608 /*
609 * If this is a leaf block and it is corrupt, set the corrupt bit so
610 * that we don't try and read the other copies of this block, just
611 * return -EIO.
612 */
613 if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
614 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
615 ret = -EIO;
616 }
617
618 if (found_level > 0 && btrfs_check_node(root, eb))
619 ret = -EIO;
620
621 if (!ret)
622 set_extent_buffer_uptodate(eb);
623 err:
624 if (reads_done &&
625 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
626 btree_readahead_hook(eb, ret);
627
628 if (ret) {
629 /*
630 * our io error hook is going to dec the io pages
631 * again, we have to make sure it has something
632 * to decrement
633 */
634 atomic_inc(&eb->io_pages);
635 clear_extent_buffer_uptodate(eb);
636 }
637 free_extent_buffer(eb);
638 out:
639 return ret;
640 }
641
642 static int btree_io_failed_hook(struct page *page, int failed_mirror)
643 {
644 struct extent_buffer *eb;
645
646 eb = (struct extent_buffer *)page->private;
647 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
648 eb->read_mirror = failed_mirror;
649 atomic_dec(&eb->io_pages);
650 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
651 btree_readahead_hook(eb, -EIO);
652 return -EIO; /* we fixed nothing */
653 }
654
655 static void end_workqueue_bio(struct bio *bio)
656 {
657 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
658 struct btrfs_fs_info *fs_info;
659 struct btrfs_workqueue *wq;
660 btrfs_work_func_t func;
661
662 fs_info = end_io_wq->info;
663 end_io_wq->status = bio->bi_status;
664
665 if (bio_op(bio) == REQ_OP_WRITE) {
666 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
667 wq = fs_info->endio_meta_write_workers;
668 func = btrfs_endio_meta_write_helper;
669 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
670 wq = fs_info->endio_freespace_worker;
671 func = btrfs_freespace_write_helper;
672 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
673 wq = fs_info->endio_raid56_workers;
674 func = btrfs_endio_raid56_helper;
675 } else {
676 wq = fs_info->endio_write_workers;
677 func = btrfs_endio_write_helper;
678 }
679 } else {
680 if (unlikely(end_io_wq->metadata ==
681 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
682 wq = fs_info->endio_repair_workers;
683 func = btrfs_endio_repair_helper;
684 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
685 wq = fs_info->endio_raid56_workers;
686 func = btrfs_endio_raid56_helper;
687 } else if (end_io_wq->metadata) {
688 wq = fs_info->endio_meta_workers;
689 func = btrfs_endio_meta_helper;
690 } else {
691 wq = fs_info->endio_workers;
692 func = btrfs_endio_helper;
693 }
694 }
695
696 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
697 btrfs_queue_work(wq, &end_io_wq->work);
698 }
699
700 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
701 enum btrfs_wq_endio_type metadata)
702 {
703 struct btrfs_end_io_wq *end_io_wq;
704
705 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
706 if (!end_io_wq)
707 return BLK_STS_RESOURCE;
708
709 end_io_wq->private = bio->bi_private;
710 end_io_wq->end_io = bio->bi_end_io;
711 end_io_wq->info = info;
712 end_io_wq->status = 0;
713 end_io_wq->bio = bio;
714 end_io_wq->metadata = metadata;
715
716 bio->bi_private = end_io_wq;
717 bio->bi_end_io = end_workqueue_bio;
718 return 0;
719 }
720
721 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
722 {
723 unsigned long limit = min_t(unsigned long,
724 info->thread_pool_size,
725 info->fs_devices->open_devices);
726 return 256 * limit;
727 }
728
729 static void run_one_async_start(struct btrfs_work *work)
730 {
731 struct async_submit_bio *async;
732 blk_status_t ret;
733
734 async = container_of(work, struct async_submit_bio, work);
735 ret = async->submit_bio_start(async->private_data, async->bio,
736 async->mirror_num, async->bio_flags,
737 async->bio_offset);
738 if (ret)
739 async->status = ret;
740 }
741
742 static void run_one_async_done(struct btrfs_work *work)
743 {
744 struct async_submit_bio *async;
745
746 async = container_of(work, struct async_submit_bio, work);
747
748 /* If an error occurred we just want to clean up the bio and move on */
749 if (async->status) {
750 async->bio->bi_status = async->status;
751 bio_endio(async->bio);
752 return;
753 }
754
755 async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
756 async->bio_flags, async->bio_offset);
757 }
758
759 static void run_one_async_free(struct btrfs_work *work)
760 {
761 struct async_submit_bio *async;
762
763 async = container_of(work, struct async_submit_bio, work);
764 kfree(async);
765 }
766
767 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
768 int mirror_num, unsigned long bio_flags,
769 u64 bio_offset, void *private_data,
770 extent_submit_bio_hook_t *submit_bio_start,
771 extent_submit_bio_hook_t *submit_bio_done)
772 {
773 struct async_submit_bio *async;
774
775 async = kmalloc(sizeof(*async), GFP_NOFS);
776 if (!async)
777 return BLK_STS_RESOURCE;
778
779 async->private_data = private_data;
780 async->fs_info = fs_info;
781 async->bio = bio;
782 async->mirror_num = mirror_num;
783 async->submit_bio_start = submit_bio_start;
784 async->submit_bio_done = submit_bio_done;
785
786 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
787 run_one_async_done, run_one_async_free);
788
789 async->bio_flags = bio_flags;
790 async->bio_offset = bio_offset;
791
792 async->status = 0;
793
794 if (op_is_sync(bio->bi_opf))
795 btrfs_set_work_high_priority(&async->work);
796
797 btrfs_queue_work(fs_info->workers, &async->work);
798 return 0;
799 }
800
801 static blk_status_t btree_csum_one_bio(struct bio *bio)
802 {
803 struct bio_vec *bvec;
804 struct btrfs_root *root;
805 int i, ret = 0;
806
807 ASSERT(!bio_flagged(bio, BIO_CLONED));
808 bio_for_each_segment_all(bvec, bio, i) {
809 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
810 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
811 if (ret)
812 break;
813 }
814
815 return errno_to_blk_status(ret);
816 }
817
818 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
819 int mirror_num, unsigned long bio_flags,
820 u64 bio_offset)
821 {
822 /*
823 * when we're called for a write, we're already in the async
824 * submission context. Just jump into btrfs_map_bio
825 */
826 return btree_csum_one_bio(bio);
827 }
828
829 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
830 int mirror_num, unsigned long bio_flags,
831 u64 bio_offset)
832 {
833 struct inode *inode = private_data;
834 blk_status_t ret;
835
836 /*
837 * when we're called for a write, we're already in the async
838 * submission context. Just jump into btrfs_map_bio
839 */
840 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
841 if (ret) {
842 bio->bi_status = ret;
843 bio_endio(bio);
844 }
845 return ret;
846 }
847
848 static int check_async_write(struct btrfs_inode *bi)
849 {
850 if (atomic_read(&bi->sync_writers))
851 return 0;
852 #ifdef CONFIG_X86
853 if (static_cpu_has(X86_FEATURE_XMM4_2))
854 return 0;
855 #endif
856 return 1;
857 }
858
859 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
860 int mirror_num, unsigned long bio_flags,
861 u64 bio_offset)
862 {
863 struct inode *inode = private_data;
864 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
865 int async = check_async_write(BTRFS_I(inode));
866 blk_status_t ret;
867
868 if (bio_op(bio) != REQ_OP_WRITE) {
869 /*
870 * called for a read, do the setup so that checksum validation
871 * can happen in the async kernel threads
872 */
873 ret = btrfs_bio_wq_end_io(fs_info, bio,
874 BTRFS_WQ_ENDIO_METADATA);
875 if (ret)
876 goto out_w_error;
877 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
878 } else if (!async) {
879 ret = btree_csum_one_bio(bio);
880 if (ret)
881 goto out_w_error;
882 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
883 } else {
884 /*
885 * kthread helpers are used to submit writes so that
886 * checksumming can happen in parallel across all CPUs
887 */
888 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
889 bio_offset, private_data,
890 __btree_submit_bio_start,
891 __btree_submit_bio_done);
892 }
893
894 if (ret)
895 goto out_w_error;
896 return 0;
897
898 out_w_error:
899 bio->bi_status = ret;
900 bio_endio(bio);
901 return ret;
902 }
903
904 #ifdef CONFIG_MIGRATION
905 static int btree_migratepage(struct address_space *mapping,
906 struct page *newpage, struct page *page,
907 enum migrate_mode mode)
908 {
909 /*
910 * we can't safely write a btree page from here,
911 * we haven't done the locking hook
912 */
913 if (PageDirty(page))
914 return -EAGAIN;
915 /*
916 * Buffers may be managed in a filesystem specific way.
917 * We must have no buffers or drop them.
918 */
919 if (page_has_private(page) &&
920 !try_to_release_page(page, GFP_KERNEL))
921 return -EAGAIN;
922 return migrate_page(mapping, newpage, page, mode);
923 }
924 #endif
925
926
927 static int btree_writepages(struct address_space *mapping,
928 struct writeback_control *wbc)
929 {
930 struct btrfs_fs_info *fs_info;
931 int ret;
932
933 if (wbc->sync_mode == WB_SYNC_NONE) {
934
935 if (wbc->for_kupdate)
936 return 0;
937
938 fs_info = BTRFS_I(mapping->host)->root->fs_info;
939 /* this is a bit racy, but that's ok */
940 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
941 BTRFS_DIRTY_METADATA_THRESH);
942 if (ret < 0)
943 return 0;
944 }
945 return btree_write_cache_pages(mapping, wbc);
946 }
947
948 static int btree_readpage(struct file *file, struct page *page)
949 {
950 struct extent_io_tree *tree;
951 tree = &BTRFS_I(page->mapping->host)->io_tree;
952 return extent_read_full_page(tree, page, btree_get_extent, 0);
953 }
954
955 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
956 {
957 if (PageWriteback(page) || PageDirty(page))
958 return 0;
959
960 return try_release_extent_buffer(page);
961 }
962
963 static void btree_invalidatepage(struct page *page, unsigned int offset,
964 unsigned int length)
965 {
966 struct extent_io_tree *tree;
967 tree = &BTRFS_I(page->mapping->host)->io_tree;
968 extent_invalidatepage(tree, page, offset);
969 btree_releasepage(page, GFP_NOFS);
970 if (PagePrivate(page)) {
971 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
972 "page private not zero on page %llu",
973 (unsigned long long)page_offset(page));
974 ClearPagePrivate(page);
975 set_page_private(page, 0);
976 put_page(page);
977 }
978 }
979
980 static int btree_set_page_dirty(struct page *page)
981 {
982 #ifdef DEBUG
983 struct extent_buffer *eb;
984
985 BUG_ON(!PagePrivate(page));
986 eb = (struct extent_buffer *)page->private;
987 BUG_ON(!eb);
988 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
989 BUG_ON(!atomic_read(&eb->refs));
990 btrfs_assert_tree_locked(eb);
991 #endif
992 return __set_page_dirty_nobuffers(page);
993 }
994
995 static const struct address_space_operations btree_aops = {
996 .readpage = btree_readpage,
997 .writepages = btree_writepages,
998 .releasepage = btree_releasepage,
999 .invalidatepage = btree_invalidatepage,
1000 #ifdef CONFIG_MIGRATION
1001 .migratepage = btree_migratepage,
1002 #endif
1003 .set_page_dirty = btree_set_page_dirty,
1004 };
1005
1006 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1007 {
1008 struct extent_buffer *buf = NULL;
1009 struct inode *btree_inode = fs_info->btree_inode;
1010
1011 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1012 if (IS_ERR(buf))
1013 return;
1014 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1015 buf, WAIT_NONE, btree_get_extent, 0);
1016 free_extent_buffer(buf);
1017 }
1018
1019 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1020 int mirror_num, struct extent_buffer **eb)
1021 {
1022 struct extent_buffer *buf = NULL;
1023 struct inode *btree_inode = fs_info->btree_inode;
1024 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1025 int ret;
1026
1027 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1028 if (IS_ERR(buf))
1029 return 0;
1030
1031 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1032
1033 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1034 btree_get_extent, mirror_num);
1035 if (ret) {
1036 free_extent_buffer(buf);
1037 return ret;
1038 }
1039
1040 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1041 free_extent_buffer(buf);
1042 return -EIO;
1043 } else if (extent_buffer_uptodate(buf)) {
1044 *eb = buf;
1045 } else {
1046 free_extent_buffer(buf);
1047 }
1048 return 0;
1049 }
1050
1051 struct extent_buffer *btrfs_find_create_tree_block(
1052 struct btrfs_fs_info *fs_info,
1053 u64 bytenr)
1054 {
1055 if (btrfs_is_testing(fs_info))
1056 return alloc_test_extent_buffer(fs_info, bytenr);
1057 return alloc_extent_buffer(fs_info, bytenr);
1058 }
1059
1060
1061 int btrfs_write_tree_block(struct extent_buffer *buf)
1062 {
1063 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1064 buf->start + buf->len - 1);
1065 }
1066
1067 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1068 {
1069 filemap_fdatawait_range(buf->pages[0]->mapping,
1070 buf->start, buf->start + buf->len - 1);
1071 }
1072
1073 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1074 u64 parent_transid)
1075 {
1076 struct extent_buffer *buf = NULL;
1077 int ret;
1078
1079 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1080 if (IS_ERR(buf))
1081 return buf;
1082
1083 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1084 if (ret) {
1085 free_extent_buffer(buf);
1086 return ERR_PTR(ret);
1087 }
1088 return buf;
1089
1090 }
1091
1092 void clean_tree_block(struct btrfs_fs_info *fs_info,
1093 struct extent_buffer *buf)
1094 {
1095 if (btrfs_header_generation(buf) ==
1096 fs_info->running_transaction->transid) {
1097 btrfs_assert_tree_locked(buf);
1098
1099 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1100 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1101 -buf->len,
1102 fs_info->dirty_metadata_batch);
1103 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1104 btrfs_set_lock_blocking(buf);
1105 clear_extent_buffer_dirty(buf);
1106 }
1107 }
1108 }
1109
1110 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1111 {
1112 struct btrfs_subvolume_writers *writers;
1113 int ret;
1114
1115 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1116 if (!writers)
1117 return ERR_PTR(-ENOMEM);
1118
1119 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1120 if (ret < 0) {
1121 kfree(writers);
1122 return ERR_PTR(ret);
1123 }
1124
1125 init_waitqueue_head(&writers->wait);
1126 return writers;
1127 }
1128
1129 static void
1130 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1131 {
1132 percpu_counter_destroy(&writers->counter);
1133 kfree(writers);
1134 }
1135
1136 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1137 u64 objectid)
1138 {
1139 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1140 root->node = NULL;
1141 root->commit_root = NULL;
1142 root->state = 0;
1143 root->orphan_cleanup_state = 0;
1144
1145 root->objectid = objectid;
1146 root->last_trans = 0;
1147 root->highest_objectid = 0;
1148 root->nr_delalloc_inodes = 0;
1149 root->nr_ordered_extents = 0;
1150 root->name = NULL;
1151 root->inode_tree = RB_ROOT;
1152 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1153 root->block_rsv = NULL;
1154 root->orphan_block_rsv = NULL;
1155
1156 INIT_LIST_HEAD(&root->dirty_list);
1157 INIT_LIST_HEAD(&root->root_list);
1158 INIT_LIST_HEAD(&root->delalloc_inodes);
1159 INIT_LIST_HEAD(&root->delalloc_root);
1160 INIT_LIST_HEAD(&root->ordered_extents);
1161 INIT_LIST_HEAD(&root->ordered_root);
1162 INIT_LIST_HEAD(&root->logged_list[0]);
1163 INIT_LIST_HEAD(&root->logged_list[1]);
1164 spin_lock_init(&root->orphan_lock);
1165 spin_lock_init(&root->inode_lock);
1166 spin_lock_init(&root->delalloc_lock);
1167 spin_lock_init(&root->ordered_extent_lock);
1168 spin_lock_init(&root->accounting_lock);
1169 spin_lock_init(&root->log_extents_lock[0]);
1170 spin_lock_init(&root->log_extents_lock[1]);
1171 mutex_init(&root->objectid_mutex);
1172 mutex_init(&root->log_mutex);
1173 mutex_init(&root->ordered_extent_mutex);
1174 mutex_init(&root->delalloc_mutex);
1175 init_waitqueue_head(&root->log_writer_wait);
1176 init_waitqueue_head(&root->log_commit_wait[0]);
1177 init_waitqueue_head(&root->log_commit_wait[1]);
1178 INIT_LIST_HEAD(&root->log_ctxs[0]);
1179 INIT_LIST_HEAD(&root->log_ctxs[1]);
1180 atomic_set(&root->log_commit[0], 0);
1181 atomic_set(&root->log_commit[1], 0);
1182 atomic_set(&root->log_writers, 0);
1183 atomic_set(&root->log_batch, 0);
1184 atomic_set(&root->orphan_inodes, 0);
1185 refcount_set(&root->refs, 1);
1186 atomic_set(&root->will_be_snapshotted, 0);
1187 atomic64_set(&root->qgroup_meta_rsv, 0);
1188 root->log_transid = 0;
1189 root->log_transid_committed = -1;
1190 root->last_log_commit = 0;
1191 if (!dummy)
1192 extent_io_tree_init(&root->dirty_log_pages, NULL);
1193
1194 memset(&root->root_key, 0, sizeof(root->root_key));
1195 memset(&root->root_item, 0, sizeof(root->root_item));
1196 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1197 if (!dummy)
1198 root->defrag_trans_start = fs_info->generation;
1199 else
1200 root->defrag_trans_start = 0;
1201 root->root_key.objectid = objectid;
1202 root->anon_dev = 0;
1203
1204 spin_lock_init(&root->root_item_lock);
1205 }
1206
1207 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1208 gfp_t flags)
1209 {
1210 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1211 if (root)
1212 root->fs_info = fs_info;
1213 return root;
1214 }
1215
1216 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1217 /* Should only be used by the testing infrastructure */
1218 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1219 {
1220 struct btrfs_root *root;
1221
1222 if (!fs_info)
1223 return ERR_PTR(-EINVAL);
1224
1225 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1226 if (!root)
1227 return ERR_PTR(-ENOMEM);
1228
1229 /* We don't use the stripesize in selftest, set it as sectorsize */
1230 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1231 root->alloc_bytenr = 0;
1232
1233 return root;
1234 }
1235 #endif
1236
1237 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1238 struct btrfs_fs_info *fs_info,
1239 u64 objectid)
1240 {
1241 struct extent_buffer *leaf;
1242 struct btrfs_root *tree_root = fs_info->tree_root;
1243 struct btrfs_root *root;
1244 struct btrfs_key key;
1245 int ret = 0;
1246 uuid_le uuid;
1247
1248 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1249 if (!root)
1250 return ERR_PTR(-ENOMEM);
1251
1252 __setup_root(root, fs_info, objectid);
1253 root->root_key.objectid = objectid;
1254 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1255 root->root_key.offset = 0;
1256
1257 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1258 if (IS_ERR(leaf)) {
1259 ret = PTR_ERR(leaf);
1260 leaf = NULL;
1261 goto fail;
1262 }
1263
1264 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1265 btrfs_set_header_bytenr(leaf, leaf->start);
1266 btrfs_set_header_generation(leaf, trans->transid);
1267 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1268 btrfs_set_header_owner(leaf, objectid);
1269 root->node = leaf;
1270
1271 write_extent_buffer_fsid(leaf, fs_info->fsid);
1272 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1273 btrfs_mark_buffer_dirty(leaf);
1274
1275 root->commit_root = btrfs_root_node(root);
1276 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1277
1278 root->root_item.flags = 0;
1279 root->root_item.byte_limit = 0;
1280 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1281 btrfs_set_root_generation(&root->root_item, trans->transid);
1282 btrfs_set_root_level(&root->root_item, 0);
1283 btrfs_set_root_refs(&root->root_item, 1);
1284 btrfs_set_root_used(&root->root_item, leaf->len);
1285 btrfs_set_root_last_snapshot(&root->root_item, 0);
1286 btrfs_set_root_dirid(&root->root_item, 0);
1287 uuid_le_gen(&uuid);
1288 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1289 root->root_item.drop_level = 0;
1290
1291 key.objectid = objectid;
1292 key.type = BTRFS_ROOT_ITEM_KEY;
1293 key.offset = 0;
1294 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1295 if (ret)
1296 goto fail;
1297
1298 btrfs_tree_unlock(leaf);
1299
1300 return root;
1301
1302 fail:
1303 if (leaf) {
1304 btrfs_tree_unlock(leaf);
1305 free_extent_buffer(root->commit_root);
1306 free_extent_buffer(leaf);
1307 }
1308 kfree(root);
1309
1310 return ERR_PTR(ret);
1311 }
1312
1313 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1314 struct btrfs_fs_info *fs_info)
1315 {
1316 struct btrfs_root *root;
1317 struct extent_buffer *leaf;
1318
1319 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1320 if (!root)
1321 return ERR_PTR(-ENOMEM);
1322
1323 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1324
1325 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1326 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1327 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1328
1329 /*
1330 * DON'T set REF_COWS for log trees
1331 *
1332 * log trees do not get reference counted because they go away
1333 * before a real commit is actually done. They do store pointers
1334 * to file data extents, and those reference counts still get
1335 * updated (along with back refs to the log tree).
1336 */
1337
1338 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1339 NULL, 0, 0, 0);
1340 if (IS_ERR(leaf)) {
1341 kfree(root);
1342 return ERR_CAST(leaf);
1343 }
1344
1345 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1346 btrfs_set_header_bytenr(leaf, leaf->start);
1347 btrfs_set_header_generation(leaf, trans->transid);
1348 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1349 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1350 root->node = leaf;
1351
1352 write_extent_buffer_fsid(root->node, fs_info->fsid);
1353 btrfs_mark_buffer_dirty(root->node);
1354 btrfs_tree_unlock(root->node);
1355 return root;
1356 }
1357
1358 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1359 struct btrfs_fs_info *fs_info)
1360 {
1361 struct btrfs_root *log_root;
1362
1363 log_root = alloc_log_tree(trans, fs_info);
1364 if (IS_ERR(log_root))
1365 return PTR_ERR(log_root);
1366 WARN_ON(fs_info->log_root_tree);
1367 fs_info->log_root_tree = log_root;
1368 return 0;
1369 }
1370
1371 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root)
1373 {
1374 struct btrfs_fs_info *fs_info = root->fs_info;
1375 struct btrfs_root *log_root;
1376 struct btrfs_inode_item *inode_item;
1377
1378 log_root = alloc_log_tree(trans, fs_info);
1379 if (IS_ERR(log_root))
1380 return PTR_ERR(log_root);
1381
1382 log_root->last_trans = trans->transid;
1383 log_root->root_key.offset = root->root_key.objectid;
1384
1385 inode_item = &log_root->root_item.inode;
1386 btrfs_set_stack_inode_generation(inode_item, 1);
1387 btrfs_set_stack_inode_size(inode_item, 3);
1388 btrfs_set_stack_inode_nlink(inode_item, 1);
1389 btrfs_set_stack_inode_nbytes(inode_item,
1390 fs_info->nodesize);
1391 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1392
1393 btrfs_set_root_node(&log_root->root_item, log_root->node);
1394
1395 WARN_ON(root->log_root);
1396 root->log_root = log_root;
1397 root->log_transid = 0;
1398 root->log_transid_committed = -1;
1399 root->last_log_commit = 0;
1400 return 0;
1401 }
1402
1403 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1404 struct btrfs_key *key)
1405 {
1406 struct btrfs_root *root;
1407 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1408 struct btrfs_path *path;
1409 u64 generation;
1410 int ret;
1411
1412 path = btrfs_alloc_path();
1413 if (!path)
1414 return ERR_PTR(-ENOMEM);
1415
1416 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1417 if (!root) {
1418 ret = -ENOMEM;
1419 goto alloc_fail;
1420 }
1421
1422 __setup_root(root, fs_info, key->objectid);
1423
1424 ret = btrfs_find_root(tree_root, key, path,
1425 &root->root_item, &root->root_key);
1426 if (ret) {
1427 if (ret > 0)
1428 ret = -ENOENT;
1429 goto find_fail;
1430 }
1431
1432 generation = btrfs_root_generation(&root->root_item);
1433 root->node = read_tree_block(fs_info,
1434 btrfs_root_bytenr(&root->root_item),
1435 generation);
1436 if (IS_ERR(root->node)) {
1437 ret = PTR_ERR(root->node);
1438 goto find_fail;
1439 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1440 ret = -EIO;
1441 free_extent_buffer(root->node);
1442 goto find_fail;
1443 }
1444 root->commit_root = btrfs_root_node(root);
1445 out:
1446 btrfs_free_path(path);
1447 return root;
1448
1449 find_fail:
1450 kfree(root);
1451 alloc_fail:
1452 root = ERR_PTR(ret);
1453 goto out;
1454 }
1455
1456 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1457 struct btrfs_key *location)
1458 {
1459 struct btrfs_root *root;
1460
1461 root = btrfs_read_tree_root(tree_root, location);
1462 if (IS_ERR(root))
1463 return root;
1464
1465 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1466 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1467 btrfs_check_and_init_root_item(&root->root_item);
1468 }
1469
1470 return root;
1471 }
1472
1473 int btrfs_init_fs_root(struct btrfs_root *root)
1474 {
1475 int ret;
1476 struct btrfs_subvolume_writers *writers;
1477
1478 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1479 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1480 GFP_NOFS);
1481 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1482 ret = -ENOMEM;
1483 goto fail;
1484 }
1485
1486 writers = btrfs_alloc_subvolume_writers();
1487 if (IS_ERR(writers)) {
1488 ret = PTR_ERR(writers);
1489 goto fail;
1490 }
1491 root->subv_writers = writers;
1492
1493 btrfs_init_free_ino_ctl(root);
1494 spin_lock_init(&root->ino_cache_lock);
1495 init_waitqueue_head(&root->ino_cache_wait);
1496
1497 ret = get_anon_bdev(&root->anon_dev);
1498 if (ret)
1499 goto fail;
1500
1501 mutex_lock(&root->objectid_mutex);
1502 ret = btrfs_find_highest_objectid(root,
1503 &root->highest_objectid);
1504 if (ret) {
1505 mutex_unlock(&root->objectid_mutex);
1506 goto fail;
1507 }
1508
1509 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1510
1511 mutex_unlock(&root->objectid_mutex);
1512
1513 return 0;
1514 fail:
1515 /* the caller is responsible to call free_fs_root */
1516 return ret;
1517 }
1518
1519 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1520 u64 root_id)
1521 {
1522 struct btrfs_root *root;
1523
1524 spin_lock(&fs_info->fs_roots_radix_lock);
1525 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1526 (unsigned long)root_id);
1527 spin_unlock(&fs_info->fs_roots_radix_lock);
1528 return root;
1529 }
1530
1531 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1532 struct btrfs_root *root)
1533 {
1534 int ret;
1535
1536 ret = radix_tree_preload(GFP_NOFS);
1537 if (ret)
1538 return ret;
1539
1540 spin_lock(&fs_info->fs_roots_radix_lock);
1541 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1542 (unsigned long)root->root_key.objectid,
1543 root);
1544 if (ret == 0)
1545 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1546 spin_unlock(&fs_info->fs_roots_radix_lock);
1547 radix_tree_preload_end();
1548
1549 return ret;
1550 }
1551
1552 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1553 struct btrfs_key *location,
1554 bool check_ref)
1555 {
1556 struct btrfs_root *root;
1557 struct btrfs_path *path;
1558 struct btrfs_key key;
1559 int ret;
1560
1561 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1562 return fs_info->tree_root;
1563 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1564 return fs_info->extent_root;
1565 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1566 return fs_info->chunk_root;
1567 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1568 return fs_info->dev_root;
1569 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1570 return fs_info->csum_root;
1571 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1572 return fs_info->quota_root ? fs_info->quota_root :
1573 ERR_PTR(-ENOENT);
1574 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1575 return fs_info->uuid_root ? fs_info->uuid_root :
1576 ERR_PTR(-ENOENT);
1577 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1578 return fs_info->free_space_root ? fs_info->free_space_root :
1579 ERR_PTR(-ENOENT);
1580 again:
1581 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1582 if (root) {
1583 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1584 return ERR_PTR(-ENOENT);
1585 return root;
1586 }
1587
1588 root = btrfs_read_fs_root(fs_info->tree_root, location);
1589 if (IS_ERR(root))
1590 return root;
1591
1592 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1593 ret = -ENOENT;
1594 goto fail;
1595 }
1596
1597 ret = btrfs_init_fs_root(root);
1598 if (ret)
1599 goto fail;
1600
1601 path = btrfs_alloc_path();
1602 if (!path) {
1603 ret = -ENOMEM;
1604 goto fail;
1605 }
1606 key.objectid = BTRFS_ORPHAN_OBJECTID;
1607 key.type = BTRFS_ORPHAN_ITEM_KEY;
1608 key.offset = location->objectid;
1609
1610 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1611 btrfs_free_path(path);
1612 if (ret < 0)
1613 goto fail;
1614 if (ret == 0)
1615 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1616
1617 ret = btrfs_insert_fs_root(fs_info, root);
1618 if (ret) {
1619 if (ret == -EEXIST) {
1620 free_fs_root(root);
1621 goto again;
1622 }
1623 goto fail;
1624 }
1625 return root;
1626 fail:
1627 free_fs_root(root);
1628 return ERR_PTR(ret);
1629 }
1630
1631 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1632 {
1633 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1634 int ret = 0;
1635 struct btrfs_device *device;
1636 struct backing_dev_info *bdi;
1637
1638 rcu_read_lock();
1639 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1640 if (!device->bdev)
1641 continue;
1642 bdi = device->bdev->bd_bdi;
1643 if (bdi_congested(bdi, bdi_bits)) {
1644 ret = 1;
1645 break;
1646 }
1647 }
1648 rcu_read_unlock();
1649 return ret;
1650 }
1651
1652 /*
1653 * called by the kthread helper functions to finally call the bio end_io
1654 * functions. This is where read checksum verification actually happens
1655 */
1656 static void end_workqueue_fn(struct btrfs_work *work)
1657 {
1658 struct bio *bio;
1659 struct btrfs_end_io_wq *end_io_wq;
1660
1661 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1662 bio = end_io_wq->bio;
1663
1664 bio->bi_status = end_io_wq->status;
1665 bio->bi_private = end_io_wq->private;
1666 bio->bi_end_io = end_io_wq->end_io;
1667 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1668 bio_endio(bio);
1669 }
1670
1671 static int cleaner_kthread(void *arg)
1672 {
1673 struct btrfs_root *root = arg;
1674 struct btrfs_fs_info *fs_info = root->fs_info;
1675 int again;
1676 struct btrfs_trans_handle *trans;
1677
1678 do {
1679 again = 0;
1680
1681 /* Make the cleaner go to sleep early. */
1682 if (btrfs_need_cleaner_sleep(fs_info))
1683 goto sleep;
1684
1685 /*
1686 * Do not do anything if we might cause open_ctree() to block
1687 * before we have finished mounting the filesystem.
1688 */
1689 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1690 goto sleep;
1691
1692 if (!mutex_trylock(&fs_info->cleaner_mutex))
1693 goto sleep;
1694
1695 /*
1696 * Avoid the problem that we change the status of the fs
1697 * during the above check and trylock.
1698 */
1699 if (btrfs_need_cleaner_sleep(fs_info)) {
1700 mutex_unlock(&fs_info->cleaner_mutex);
1701 goto sleep;
1702 }
1703
1704 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1705 btrfs_run_delayed_iputs(fs_info);
1706 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1707
1708 again = btrfs_clean_one_deleted_snapshot(root);
1709 mutex_unlock(&fs_info->cleaner_mutex);
1710
1711 /*
1712 * The defragger has dealt with the R/O remount and umount,
1713 * needn't do anything special here.
1714 */
1715 btrfs_run_defrag_inodes(fs_info);
1716
1717 /*
1718 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1719 * with relocation (btrfs_relocate_chunk) and relocation
1720 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1721 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1722 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1723 * unused block groups.
1724 */
1725 btrfs_delete_unused_bgs(fs_info);
1726 sleep:
1727 if (!again) {
1728 set_current_state(TASK_INTERRUPTIBLE);
1729 if (!kthread_should_stop())
1730 schedule();
1731 __set_current_state(TASK_RUNNING);
1732 }
1733 } while (!kthread_should_stop());
1734
1735 /*
1736 * Transaction kthread is stopped before us and wakes us up.
1737 * However we might have started a new transaction and COWed some
1738 * tree blocks when deleting unused block groups for example. So
1739 * make sure we commit the transaction we started to have a clean
1740 * shutdown when evicting the btree inode - if it has dirty pages
1741 * when we do the final iput() on it, eviction will trigger a
1742 * writeback for it which will fail with null pointer dereferences
1743 * since work queues and other resources were already released and
1744 * destroyed by the time the iput/eviction/writeback is made.
1745 */
1746 trans = btrfs_attach_transaction(root);
1747 if (IS_ERR(trans)) {
1748 if (PTR_ERR(trans) != -ENOENT)
1749 btrfs_err(fs_info,
1750 "cleaner transaction attach returned %ld",
1751 PTR_ERR(trans));
1752 } else {
1753 int ret;
1754
1755 ret = btrfs_commit_transaction(trans);
1756 if (ret)
1757 btrfs_err(fs_info,
1758 "cleaner open transaction commit returned %d",
1759 ret);
1760 }
1761
1762 return 0;
1763 }
1764
1765 static int transaction_kthread(void *arg)
1766 {
1767 struct btrfs_root *root = arg;
1768 struct btrfs_fs_info *fs_info = root->fs_info;
1769 struct btrfs_trans_handle *trans;
1770 struct btrfs_transaction *cur;
1771 u64 transid;
1772 unsigned long now;
1773 unsigned long delay;
1774 bool cannot_commit;
1775
1776 do {
1777 cannot_commit = false;
1778 delay = HZ * fs_info->commit_interval;
1779 mutex_lock(&fs_info->transaction_kthread_mutex);
1780
1781 spin_lock(&fs_info->trans_lock);
1782 cur = fs_info->running_transaction;
1783 if (!cur) {
1784 spin_unlock(&fs_info->trans_lock);
1785 goto sleep;
1786 }
1787
1788 now = get_seconds();
1789 if (cur->state < TRANS_STATE_BLOCKED &&
1790 (now < cur->start_time ||
1791 now - cur->start_time < fs_info->commit_interval)) {
1792 spin_unlock(&fs_info->trans_lock);
1793 delay = HZ * 5;
1794 goto sleep;
1795 }
1796 transid = cur->transid;
1797 spin_unlock(&fs_info->trans_lock);
1798
1799 /* If the file system is aborted, this will always fail. */
1800 trans = btrfs_attach_transaction(root);
1801 if (IS_ERR(trans)) {
1802 if (PTR_ERR(trans) != -ENOENT)
1803 cannot_commit = true;
1804 goto sleep;
1805 }
1806 if (transid == trans->transid) {
1807 btrfs_commit_transaction(trans);
1808 } else {
1809 btrfs_end_transaction(trans);
1810 }
1811 sleep:
1812 wake_up_process(fs_info->cleaner_kthread);
1813 mutex_unlock(&fs_info->transaction_kthread_mutex);
1814
1815 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1816 &fs_info->fs_state)))
1817 btrfs_cleanup_transaction(fs_info);
1818 set_current_state(TASK_INTERRUPTIBLE);
1819 if (!kthread_should_stop() &&
1820 (!btrfs_transaction_blocked(fs_info) ||
1821 cannot_commit))
1822 schedule_timeout(delay);
1823 __set_current_state(TASK_RUNNING);
1824 } while (!kthread_should_stop());
1825 return 0;
1826 }
1827
1828 /*
1829 * this will find the highest generation in the array of
1830 * root backups. The index of the highest array is returned,
1831 * or -1 if we can't find anything.
1832 *
1833 * We check to make sure the array is valid by comparing the
1834 * generation of the latest root in the array with the generation
1835 * in the super block. If they don't match we pitch it.
1836 */
1837 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1838 {
1839 u64 cur;
1840 int newest_index = -1;
1841 struct btrfs_root_backup *root_backup;
1842 int i;
1843
1844 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1845 root_backup = info->super_copy->super_roots + i;
1846 cur = btrfs_backup_tree_root_gen(root_backup);
1847 if (cur == newest_gen)
1848 newest_index = i;
1849 }
1850
1851 /* check to see if we actually wrapped around */
1852 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1853 root_backup = info->super_copy->super_roots;
1854 cur = btrfs_backup_tree_root_gen(root_backup);
1855 if (cur == newest_gen)
1856 newest_index = 0;
1857 }
1858 return newest_index;
1859 }
1860
1861
1862 /*
1863 * find the oldest backup so we know where to store new entries
1864 * in the backup array. This will set the backup_root_index
1865 * field in the fs_info struct
1866 */
1867 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1868 u64 newest_gen)
1869 {
1870 int newest_index = -1;
1871
1872 newest_index = find_newest_super_backup(info, newest_gen);
1873 /* if there was garbage in there, just move along */
1874 if (newest_index == -1) {
1875 info->backup_root_index = 0;
1876 } else {
1877 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1878 }
1879 }
1880
1881 /*
1882 * copy all the root pointers into the super backup array.
1883 * this will bump the backup pointer by one when it is
1884 * done
1885 */
1886 static void backup_super_roots(struct btrfs_fs_info *info)
1887 {
1888 int next_backup;
1889 struct btrfs_root_backup *root_backup;
1890 int last_backup;
1891
1892 next_backup = info->backup_root_index;
1893 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1894 BTRFS_NUM_BACKUP_ROOTS;
1895
1896 /*
1897 * just overwrite the last backup if we're at the same generation
1898 * this happens only at umount
1899 */
1900 root_backup = info->super_for_commit->super_roots + last_backup;
1901 if (btrfs_backup_tree_root_gen(root_backup) ==
1902 btrfs_header_generation(info->tree_root->node))
1903 next_backup = last_backup;
1904
1905 root_backup = info->super_for_commit->super_roots + next_backup;
1906
1907 /*
1908 * make sure all of our padding and empty slots get zero filled
1909 * regardless of which ones we use today
1910 */
1911 memset(root_backup, 0, sizeof(*root_backup));
1912
1913 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1914
1915 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1916 btrfs_set_backup_tree_root_gen(root_backup,
1917 btrfs_header_generation(info->tree_root->node));
1918
1919 btrfs_set_backup_tree_root_level(root_backup,
1920 btrfs_header_level(info->tree_root->node));
1921
1922 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1923 btrfs_set_backup_chunk_root_gen(root_backup,
1924 btrfs_header_generation(info->chunk_root->node));
1925 btrfs_set_backup_chunk_root_level(root_backup,
1926 btrfs_header_level(info->chunk_root->node));
1927
1928 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1929 btrfs_set_backup_extent_root_gen(root_backup,
1930 btrfs_header_generation(info->extent_root->node));
1931 btrfs_set_backup_extent_root_level(root_backup,
1932 btrfs_header_level(info->extent_root->node));
1933
1934 /*
1935 * we might commit during log recovery, which happens before we set
1936 * the fs_root. Make sure it is valid before we fill it in.
1937 */
1938 if (info->fs_root && info->fs_root->node) {
1939 btrfs_set_backup_fs_root(root_backup,
1940 info->fs_root->node->start);
1941 btrfs_set_backup_fs_root_gen(root_backup,
1942 btrfs_header_generation(info->fs_root->node));
1943 btrfs_set_backup_fs_root_level(root_backup,
1944 btrfs_header_level(info->fs_root->node));
1945 }
1946
1947 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1948 btrfs_set_backup_dev_root_gen(root_backup,
1949 btrfs_header_generation(info->dev_root->node));
1950 btrfs_set_backup_dev_root_level(root_backup,
1951 btrfs_header_level(info->dev_root->node));
1952
1953 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1954 btrfs_set_backup_csum_root_gen(root_backup,
1955 btrfs_header_generation(info->csum_root->node));
1956 btrfs_set_backup_csum_root_level(root_backup,
1957 btrfs_header_level(info->csum_root->node));
1958
1959 btrfs_set_backup_total_bytes(root_backup,
1960 btrfs_super_total_bytes(info->super_copy));
1961 btrfs_set_backup_bytes_used(root_backup,
1962 btrfs_super_bytes_used(info->super_copy));
1963 btrfs_set_backup_num_devices(root_backup,
1964 btrfs_super_num_devices(info->super_copy));
1965
1966 /*
1967 * if we don't copy this out to the super_copy, it won't get remembered
1968 * for the next commit
1969 */
1970 memcpy(&info->super_copy->super_roots,
1971 &info->super_for_commit->super_roots,
1972 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1973 }
1974
1975 /*
1976 * this copies info out of the root backup array and back into
1977 * the in-memory super block. It is meant to help iterate through
1978 * the array, so you send it the number of backups you've already
1979 * tried and the last backup index you used.
1980 *
1981 * this returns -1 when it has tried all the backups
1982 */
1983 static noinline int next_root_backup(struct btrfs_fs_info *info,
1984 struct btrfs_super_block *super,
1985 int *num_backups_tried, int *backup_index)
1986 {
1987 struct btrfs_root_backup *root_backup;
1988 int newest = *backup_index;
1989
1990 if (*num_backups_tried == 0) {
1991 u64 gen = btrfs_super_generation(super);
1992
1993 newest = find_newest_super_backup(info, gen);
1994 if (newest == -1)
1995 return -1;
1996
1997 *backup_index = newest;
1998 *num_backups_tried = 1;
1999 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2000 /* we've tried all the backups, all done */
2001 return -1;
2002 } else {
2003 /* jump to the next oldest backup */
2004 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2005 BTRFS_NUM_BACKUP_ROOTS;
2006 *backup_index = newest;
2007 *num_backups_tried += 1;
2008 }
2009 root_backup = super->super_roots + newest;
2010
2011 btrfs_set_super_generation(super,
2012 btrfs_backup_tree_root_gen(root_backup));
2013 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2014 btrfs_set_super_root_level(super,
2015 btrfs_backup_tree_root_level(root_backup));
2016 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2017
2018 /*
2019 * fixme: the total bytes and num_devices need to match or we should
2020 * need a fsck
2021 */
2022 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2023 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2024 return 0;
2025 }
2026
2027 /* helper to cleanup workers */
2028 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2029 {
2030 btrfs_destroy_workqueue(fs_info->fixup_workers);
2031 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2032 btrfs_destroy_workqueue(fs_info->workers);
2033 btrfs_destroy_workqueue(fs_info->endio_workers);
2034 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2035 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2036 btrfs_destroy_workqueue(fs_info->rmw_workers);
2037 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2038 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2039 btrfs_destroy_workqueue(fs_info->submit_workers);
2040 btrfs_destroy_workqueue(fs_info->delayed_workers);
2041 btrfs_destroy_workqueue(fs_info->caching_workers);
2042 btrfs_destroy_workqueue(fs_info->readahead_workers);
2043 btrfs_destroy_workqueue(fs_info->flush_workers);
2044 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2045 btrfs_destroy_workqueue(fs_info->extent_workers);
2046 /*
2047 * Now that all other work queues are destroyed, we can safely destroy
2048 * the queues used for metadata I/O, since tasks from those other work
2049 * queues can do metadata I/O operations.
2050 */
2051 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2052 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2053 }
2054
2055 static void free_root_extent_buffers(struct btrfs_root *root)
2056 {
2057 if (root) {
2058 free_extent_buffer(root->node);
2059 free_extent_buffer(root->commit_root);
2060 root->node = NULL;
2061 root->commit_root = NULL;
2062 }
2063 }
2064
2065 /* helper to cleanup tree roots */
2066 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2067 {
2068 free_root_extent_buffers(info->tree_root);
2069
2070 free_root_extent_buffers(info->dev_root);
2071 free_root_extent_buffers(info->extent_root);
2072 free_root_extent_buffers(info->csum_root);
2073 free_root_extent_buffers(info->quota_root);
2074 free_root_extent_buffers(info->uuid_root);
2075 if (chunk_root)
2076 free_root_extent_buffers(info->chunk_root);
2077 free_root_extent_buffers(info->free_space_root);
2078 }
2079
2080 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2081 {
2082 int ret;
2083 struct btrfs_root *gang[8];
2084 int i;
2085
2086 while (!list_empty(&fs_info->dead_roots)) {
2087 gang[0] = list_entry(fs_info->dead_roots.next,
2088 struct btrfs_root, root_list);
2089 list_del(&gang[0]->root_list);
2090
2091 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2092 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2093 } else {
2094 free_extent_buffer(gang[0]->node);
2095 free_extent_buffer(gang[0]->commit_root);
2096 btrfs_put_fs_root(gang[0]);
2097 }
2098 }
2099
2100 while (1) {
2101 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2102 (void **)gang, 0,
2103 ARRAY_SIZE(gang));
2104 if (!ret)
2105 break;
2106 for (i = 0; i < ret; i++)
2107 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2108 }
2109
2110 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2111 btrfs_free_log_root_tree(NULL, fs_info);
2112 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2113 }
2114 }
2115
2116 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2117 {
2118 mutex_init(&fs_info->scrub_lock);
2119 atomic_set(&fs_info->scrubs_running, 0);
2120 atomic_set(&fs_info->scrub_pause_req, 0);
2121 atomic_set(&fs_info->scrubs_paused, 0);
2122 atomic_set(&fs_info->scrub_cancel_req, 0);
2123 init_waitqueue_head(&fs_info->scrub_pause_wait);
2124 fs_info->scrub_workers_refcnt = 0;
2125 }
2126
2127 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2128 {
2129 spin_lock_init(&fs_info->balance_lock);
2130 mutex_init(&fs_info->balance_mutex);
2131 atomic_set(&fs_info->balance_running, 0);
2132 atomic_set(&fs_info->balance_pause_req, 0);
2133 atomic_set(&fs_info->balance_cancel_req, 0);
2134 fs_info->balance_ctl = NULL;
2135 init_waitqueue_head(&fs_info->balance_wait_q);
2136 }
2137
2138 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2139 {
2140 struct inode *inode = fs_info->btree_inode;
2141
2142 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2143 set_nlink(inode, 1);
2144 /*
2145 * we set the i_size on the btree inode to the max possible int.
2146 * the real end of the address space is determined by all of
2147 * the devices in the system
2148 */
2149 inode->i_size = OFFSET_MAX;
2150 inode->i_mapping->a_ops = &btree_aops;
2151
2152 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2153 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2154 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2155 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2156
2157 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2158
2159 BTRFS_I(inode)->root = fs_info->tree_root;
2160 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2161 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2162 btrfs_insert_inode_hash(inode);
2163 }
2164
2165 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2166 {
2167 fs_info->dev_replace.lock_owner = 0;
2168 atomic_set(&fs_info->dev_replace.nesting_level, 0);
2169 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2170 rwlock_init(&fs_info->dev_replace.lock);
2171 atomic_set(&fs_info->dev_replace.read_locks, 0);
2172 atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2173 init_waitqueue_head(&fs_info->replace_wait);
2174 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2175 }
2176
2177 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2178 {
2179 spin_lock_init(&fs_info->qgroup_lock);
2180 mutex_init(&fs_info->qgroup_ioctl_lock);
2181 fs_info->qgroup_tree = RB_ROOT;
2182 fs_info->qgroup_op_tree = RB_ROOT;
2183 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2184 fs_info->qgroup_seq = 1;
2185 fs_info->qgroup_ulist = NULL;
2186 fs_info->qgroup_rescan_running = false;
2187 mutex_init(&fs_info->qgroup_rescan_lock);
2188 }
2189
2190 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2191 struct btrfs_fs_devices *fs_devices)
2192 {
2193 int max_active = fs_info->thread_pool_size;
2194 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2195
2196 fs_info->workers =
2197 btrfs_alloc_workqueue(fs_info, "worker",
2198 flags | WQ_HIGHPRI, max_active, 16);
2199
2200 fs_info->delalloc_workers =
2201 btrfs_alloc_workqueue(fs_info, "delalloc",
2202 flags, max_active, 2);
2203
2204 fs_info->flush_workers =
2205 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2206 flags, max_active, 0);
2207
2208 fs_info->caching_workers =
2209 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2210
2211 /*
2212 * a higher idle thresh on the submit workers makes it much more
2213 * likely that bios will be send down in a sane order to the
2214 * devices
2215 */
2216 fs_info->submit_workers =
2217 btrfs_alloc_workqueue(fs_info, "submit", flags,
2218 min_t(u64, fs_devices->num_devices,
2219 max_active), 64);
2220
2221 fs_info->fixup_workers =
2222 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2223
2224 /*
2225 * endios are largely parallel and should have a very
2226 * low idle thresh
2227 */
2228 fs_info->endio_workers =
2229 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2230 fs_info->endio_meta_workers =
2231 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2232 max_active, 4);
2233 fs_info->endio_meta_write_workers =
2234 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2235 max_active, 2);
2236 fs_info->endio_raid56_workers =
2237 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2238 max_active, 4);
2239 fs_info->endio_repair_workers =
2240 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2241 fs_info->rmw_workers =
2242 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2243 fs_info->endio_write_workers =
2244 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2245 max_active, 2);
2246 fs_info->endio_freespace_worker =
2247 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2248 max_active, 0);
2249 fs_info->delayed_workers =
2250 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2251 max_active, 0);
2252 fs_info->readahead_workers =
2253 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2254 max_active, 2);
2255 fs_info->qgroup_rescan_workers =
2256 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2257 fs_info->extent_workers =
2258 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2259 min_t(u64, fs_devices->num_devices,
2260 max_active), 8);
2261
2262 if (!(fs_info->workers && fs_info->delalloc_workers &&
2263 fs_info->submit_workers && fs_info->flush_workers &&
2264 fs_info->endio_workers && fs_info->endio_meta_workers &&
2265 fs_info->endio_meta_write_workers &&
2266 fs_info->endio_repair_workers &&
2267 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2268 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2269 fs_info->caching_workers && fs_info->readahead_workers &&
2270 fs_info->fixup_workers && fs_info->delayed_workers &&
2271 fs_info->extent_workers &&
2272 fs_info->qgroup_rescan_workers)) {
2273 return -ENOMEM;
2274 }
2275
2276 return 0;
2277 }
2278
2279 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2280 struct btrfs_fs_devices *fs_devices)
2281 {
2282 int ret;
2283 struct btrfs_root *log_tree_root;
2284 struct btrfs_super_block *disk_super = fs_info->super_copy;
2285 u64 bytenr = btrfs_super_log_root(disk_super);
2286
2287 if (fs_devices->rw_devices == 0) {
2288 btrfs_warn(fs_info, "log replay required on RO media");
2289 return -EIO;
2290 }
2291
2292 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2293 if (!log_tree_root)
2294 return -ENOMEM;
2295
2296 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2297
2298 log_tree_root->node = read_tree_block(fs_info, bytenr,
2299 fs_info->generation + 1);
2300 if (IS_ERR(log_tree_root->node)) {
2301 btrfs_warn(fs_info, "failed to read log tree");
2302 ret = PTR_ERR(log_tree_root->node);
2303 kfree(log_tree_root);
2304 return ret;
2305 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2306 btrfs_err(fs_info, "failed to read log tree");
2307 free_extent_buffer(log_tree_root->node);
2308 kfree(log_tree_root);
2309 return -EIO;
2310 }
2311 /* returns with log_tree_root freed on success */
2312 ret = btrfs_recover_log_trees(log_tree_root);
2313 if (ret) {
2314 btrfs_handle_fs_error(fs_info, ret,
2315 "Failed to recover log tree");
2316 free_extent_buffer(log_tree_root->node);
2317 kfree(log_tree_root);
2318 return ret;
2319 }
2320
2321 if (sb_rdonly(fs_info->sb)) {
2322 ret = btrfs_commit_super(fs_info);
2323 if (ret)
2324 return ret;
2325 }
2326
2327 return 0;
2328 }
2329
2330 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2331 {
2332 struct btrfs_root *tree_root = fs_info->tree_root;
2333 struct btrfs_root *root;
2334 struct btrfs_key location;
2335 int ret;
2336
2337 BUG_ON(!fs_info->tree_root);
2338
2339 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2340 location.type = BTRFS_ROOT_ITEM_KEY;
2341 location.offset = 0;
2342
2343 root = btrfs_read_tree_root(tree_root, &location);
2344 if (IS_ERR(root))
2345 return PTR_ERR(root);
2346 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2347 fs_info->extent_root = root;
2348
2349 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2350 root = btrfs_read_tree_root(tree_root, &location);
2351 if (IS_ERR(root))
2352 return PTR_ERR(root);
2353 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2354 fs_info->dev_root = root;
2355 btrfs_init_devices_late(fs_info);
2356
2357 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2358 root = btrfs_read_tree_root(tree_root, &location);
2359 if (IS_ERR(root))
2360 return PTR_ERR(root);
2361 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2362 fs_info->csum_root = root;
2363
2364 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2365 root = btrfs_read_tree_root(tree_root, &location);
2366 if (!IS_ERR(root)) {
2367 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2368 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2369 fs_info->quota_root = root;
2370 }
2371
2372 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2373 root = btrfs_read_tree_root(tree_root, &location);
2374 if (IS_ERR(root)) {
2375 ret = PTR_ERR(root);
2376 if (ret != -ENOENT)
2377 return ret;
2378 } else {
2379 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2380 fs_info->uuid_root = root;
2381 }
2382
2383 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2384 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2385 root = btrfs_read_tree_root(tree_root, &location);
2386 if (IS_ERR(root))
2387 return PTR_ERR(root);
2388 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2389 fs_info->free_space_root = root;
2390 }
2391
2392 return 0;
2393 }
2394
2395 int open_ctree(struct super_block *sb,
2396 struct btrfs_fs_devices *fs_devices,
2397 char *options)
2398 {
2399 u32 sectorsize;
2400 u32 nodesize;
2401 u32 stripesize;
2402 u64 generation;
2403 u64 features;
2404 struct btrfs_key location;
2405 struct buffer_head *bh;
2406 struct btrfs_super_block *disk_super;
2407 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2408 struct btrfs_root *tree_root;
2409 struct btrfs_root *chunk_root;
2410 int ret;
2411 int err = -EINVAL;
2412 int num_backups_tried = 0;
2413 int backup_index = 0;
2414 int max_active;
2415 int clear_free_space_tree = 0;
2416
2417 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2418 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2419 if (!tree_root || !chunk_root) {
2420 err = -ENOMEM;
2421 goto fail;
2422 }
2423
2424 ret = init_srcu_struct(&fs_info->subvol_srcu);
2425 if (ret) {
2426 err = ret;
2427 goto fail;
2428 }
2429
2430 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2431 if (ret) {
2432 err = ret;
2433 goto fail_srcu;
2434 }
2435 fs_info->dirty_metadata_batch = PAGE_SIZE *
2436 (1 + ilog2(nr_cpu_ids));
2437
2438 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2439 if (ret) {
2440 err = ret;
2441 goto fail_dirty_metadata_bytes;
2442 }
2443
2444 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2445 if (ret) {
2446 err = ret;
2447 goto fail_delalloc_bytes;
2448 }
2449
2450 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2451 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2452 INIT_LIST_HEAD(&fs_info->trans_list);
2453 INIT_LIST_HEAD(&fs_info->dead_roots);
2454 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2455 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2456 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2457 spin_lock_init(&fs_info->delalloc_root_lock);
2458 spin_lock_init(&fs_info->trans_lock);
2459 spin_lock_init(&fs_info->fs_roots_radix_lock);
2460 spin_lock_init(&fs_info->delayed_iput_lock);
2461 spin_lock_init(&fs_info->defrag_inodes_lock);
2462 spin_lock_init(&fs_info->tree_mod_seq_lock);
2463 spin_lock_init(&fs_info->super_lock);
2464 spin_lock_init(&fs_info->qgroup_op_lock);
2465 spin_lock_init(&fs_info->buffer_lock);
2466 spin_lock_init(&fs_info->unused_bgs_lock);
2467 rwlock_init(&fs_info->tree_mod_log_lock);
2468 mutex_init(&fs_info->unused_bg_unpin_mutex);
2469 mutex_init(&fs_info->delete_unused_bgs_mutex);
2470 mutex_init(&fs_info->reloc_mutex);
2471 mutex_init(&fs_info->delalloc_root_mutex);
2472 mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2473 seqlock_init(&fs_info->profiles_lock);
2474
2475 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2476 INIT_LIST_HEAD(&fs_info->space_info);
2477 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2478 INIT_LIST_HEAD(&fs_info->unused_bgs);
2479 btrfs_mapping_init(&fs_info->mapping_tree);
2480 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2481 BTRFS_BLOCK_RSV_GLOBAL);
2482 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2483 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2484 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2485 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2486 BTRFS_BLOCK_RSV_DELOPS);
2487 atomic_set(&fs_info->async_delalloc_pages, 0);
2488 atomic_set(&fs_info->defrag_running, 0);
2489 atomic_set(&fs_info->qgroup_op_seq, 0);
2490 atomic_set(&fs_info->reada_works_cnt, 0);
2491 atomic64_set(&fs_info->tree_mod_seq, 0);
2492 fs_info->sb = sb;
2493 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2494 fs_info->metadata_ratio = 0;
2495 fs_info->defrag_inodes = RB_ROOT;
2496 atomic64_set(&fs_info->free_chunk_space, 0);
2497 fs_info->tree_mod_log = RB_ROOT;
2498 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2499 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2500 /* readahead state */
2501 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2502 spin_lock_init(&fs_info->reada_lock);
2503 btrfs_init_ref_verify(fs_info);
2504
2505 fs_info->thread_pool_size = min_t(unsigned long,
2506 num_online_cpus() + 2, 8);
2507
2508 INIT_LIST_HEAD(&fs_info->ordered_roots);
2509 spin_lock_init(&fs_info->ordered_root_lock);
2510
2511 fs_info->btree_inode = new_inode(sb);
2512 if (!fs_info->btree_inode) {
2513 err = -ENOMEM;
2514 goto fail_bio_counter;
2515 }
2516 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2517
2518 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2519 GFP_KERNEL);
2520 if (!fs_info->delayed_root) {
2521 err = -ENOMEM;
2522 goto fail_iput;
2523 }
2524 btrfs_init_delayed_root(fs_info->delayed_root);
2525
2526 btrfs_init_scrub(fs_info);
2527 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2528 fs_info->check_integrity_print_mask = 0;
2529 #endif
2530 btrfs_init_balance(fs_info);
2531 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2532
2533 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2534 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2535
2536 btrfs_init_btree_inode(fs_info);
2537
2538 spin_lock_init(&fs_info->block_group_cache_lock);
2539 fs_info->block_group_cache_tree = RB_ROOT;
2540 fs_info->first_logical_byte = (u64)-1;
2541
2542 extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2543 extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2544 fs_info->pinned_extents = &fs_info->freed_extents[0];
2545 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2546
2547 mutex_init(&fs_info->ordered_operations_mutex);
2548 mutex_init(&fs_info->tree_log_mutex);
2549 mutex_init(&fs_info->chunk_mutex);
2550 mutex_init(&fs_info->transaction_kthread_mutex);
2551 mutex_init(&fs_info->cleaner_mutex);
2552 mutex_init(&fs_info->volume_mutex);
2553 mutex_init(&fs_info->ro_block_group_mutex);
2554 init_rwsem(&fs_info->commit_root_sem);
2555 init_rwsem(&fs_info->cleanup_work_sem);
2556 init_rwsem(&fs_info->subvol_sem);
2557 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2558
2559 btrfs_init_dev_replace_locks(fs_info);
2560 btrfs_init_qgroup(fs_info);
2561
2562 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2563 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2564
2565 init_waitqueue_head(&fs_info->transaction_throttle);
2566 init_waitqueue_head(&fs_info->transaction_wait);
2567 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2568 init_waitqueue_head(&fs_info->async_submit_wait);
2569
2570 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2571
2572 /* Usable values until the real ones are cached from the superblock */
2573 fs_info->nodesize = 4096;
2574 fs_info->sectorsize = 4096;
2575 fs_info->stripesize = 4096;
2576
2577 ret = btrfs_alloc_stripe_hash_table(fs_info);
2578 if (ret) {
2579 err = ret;
2580 goto fail_alloc;
2581 }
2582
2583 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2584
2585 invalidate_bdev(fs_devices->latest_bdev);
2586
2587 /*
2588 * Read super block and check the signature bytes only
2589 */
2590 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2591 if (IS_ERR(bh)) {
2592 err = PTR_ERR(bh);
2593 goto fail_alloc;
2594 }
2595
2596 /*
2597 * We want to check superblock checksum, the type is stored inside.
2598 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2599 */
2600 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2601 btrfs_err(fs_info, "superblock checksum mismatch");
2602 err = -EINVAL;
2603 brelse(bh);
2604 goto fail_alloc;
2605 }
2606
2607 /*
2608 * super_copy is zeroed at allocation time and we never touch the
2609 * following bytes up to INFO_SIZE, the checksum is calculated from
2610 * the whole block of INFO_SIZE
2611 */
2612 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2613 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2614 sizeof(*fs_info->super_for_commit));
2615 brelse(bh);
2616
2617 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2618
2619 ret = btrfs_check_super_valid(fs_info);
2620 if (ret) {
2621 btrfs_err(fs_info, "superblock contains fatal errors");
2622 err = -EINVAL;
2623 goto fail_alloc;
2624 }
2625
2626 disk_super = fs_info->super_copy;
2627 if (!btrfs_super_root(disk_super))
2628 goto fail_alloc;
2629
2630 /* check FS state, whether FS is broken. */
2631 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2632 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2633
2634 /*
2635 * run through our array of backup supers and setup
2636 * our ring pointer to the oldest one
2637 */
2638 generation = btrfs_super_generation(disk_super);
2639 find_oldest_super_backup(fs_info, generation);
2640
2641 /*
2642 * In the long term, we'll store the compression type in the super
2643 * block, and it'll be used for per file compression control.
2644 */
2645 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2646
2647 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2648 if (ret) {
2649 err = ret;
2650 goto fail_alloc;
2651 }
2652
2653 features = btrfs_super_incompat_flags(disk_super) &
2654 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2655 if (features) {
2656 btrfs_err(fs_info,
2657 "cannot mount because of unsupported optional features (%llx)",
2658 features);
2659 err = -EINVAL;
2660 goto fail_alloc;
2661 }
2662
2663 features = btrfs_super_incompat_flags(disk_super);
2664 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2665 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2666 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2667 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2668 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2669
2670 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2671 btrfs_info(fs_info, "has skinny extents");
2672
2673 /*
2674 * flag our filesystem as having big metadata blocks if
2675 * they are bigger than the page size
2676 */
2677 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2678 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2679 btrfs_info(fs_info,
2680 "flagging fs with big metadata feature");
2681 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2682 }
2683
2684 nodesize = btrfs_super_nodesize(disk_super);
2685 sectorsize = btrfs_super_sectorsize(disk_super);
2686 stripesize = sectorsize;
2687 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2688 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2689
2690 /* Cache block sizes */
2691 fs_info->nodesize = nodesize;
2692 fs_info->sectorsize = sectorsize;
2693 fs_info->stripesize = stripesize;
2694
2695 /*
2696 * mixed block groups end up with duplicate but slightly offset
2697 * extent buffers for the same range. It leads to corruptions
2698 */
2699 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2700 (sectorsize != nodesize)) {
2701 btrfs_err(fs_info,
2702 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2703 nodesize, sectorsize);
2704 goto fail_alloc;
2705 }
2706
2707 /*
2708 * Needn't use the lock because there is no other task which will
2709 * update the flag.
2710 */
2711 btrfs_set_super_incompat_flags(disk_super, features);
2712
2713 features = btrfs_super_compat_ro_flags(disk_super) &
2714 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2715 if (!sb_rdonly(sb) && features) {
2716 btrfs_err(fs_info,
2717 "cannot mount read-write because of unsupported optional features (%llx)",
2718 features);
2719 err = -EINVAL;
2720 goto fail_alloc;
2721 }
2722
2723 max_active = fs_info->thread_pool_size;
2724
2725 ret = btrfs_init_workqueues(fs_info, fs_devices);
2726 if (ret) {
2727 err = ret;
2728 goto fail_sb_buffer;
2729 }
2730
2731 sb->s_bdi->congested_fn = btrfs_congested_fn;
2732 sb->s_bdi->congested_data = fs_info;
2733 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2734 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2735 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2736 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2737
2738 sb->s_blocksize = sectorsize;
2739 sb->s_blocksize_bits = blksize_bits(sectorsize);
2740 memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
2741
2742 mutex_lock(&fs_info->chunk_mutex);
2743 ret = btrfs_read_sys_array(fs_info);
2744 mutex_unlock(&fs_info->chunk_mutex);
2745 if (ret) {
2746 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2747 goto fail_sb_buffer;
2748 }
2749
2750 generation = btrfs_super_chunk_root_generation(disk_super);
2751
2752 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2753
2754 chunk_root->node = read_tree_block(fs_info,
2755 btrfs_super_chunk_root(disk_super),
2756 generation);
2757 if (IS_ERR(chunk_root->node) ||
2758 !extent_buffer_uptodate(chunk_root->node)) {
2759 btrfs_err(fs_info, "failed to read chunk root");
2760 if (!IS_ERR(chunk_root->node))
2761 free_extent_buffer(chunk_root->node);
2762 chunk_root->node = NULL;
2763 goto fail_tree_roots;
2764 }
2765 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2766 chunk_root->commit_root = btrfs_root_node(chunk_root);
2767
2768 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2769 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2770
2771 ret = btrfs_read_chunk_tree(fs_info);
2772 if (ret) {
2773 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2774 goto fail_tree_roots;
2775 }
2776
2777 /*
2778 * keep the device that is marked to be the target device for the
2779 * dev_replace procedure
2780 */
2781 btrfs_close_extra_devices(fs_devices, 0);
2782
2783 if (!fs_devices->latest_bdev) {
2784 btrfs_err(fs_info, "failed to read devices");
2785 goto fail_tree_roots;
2786 }
2787
2788 retry_root_backup:
2789 generation = btrfs_super_generation(disk_super);
2790
2791 tree_root->node = read_tree_block(fs_info,
2792 btrfs_super_root(disk_super),
2793 generation);
2794 if (IS_ERR(tree_root->node) ||
2795 !extent_buffer_uptodate(tree_root->node)) {
2796 btrfs_warn(fs_info, "failed to read tree root");
2797 if (!IS_ERR(tree_root->node))
2798 free_extent_buffer(tree_root->node);
2799 tree_root->node = NULL;
2800 goto recovery_tree_root;
2801 }
2802
2803 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2804 tree_root->commit_root = btrfs_root_node(tree_root);
2805 btrfs_set_root_refs(&tree_root->root_item, 1);
2806
2807 mutex_lock(&tree_root->objectid_mutex);
2808 ret = btrfs_find_highest_objectid(tree_root,
2809 &tree_root->highest_objectid);
2810 if (ret) {
2811 mutex_unlock(&tree_root->objectid_mutex);
2812 goto recovery_tree_root;
2813 }
2814
2815 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2816
2817 mutex_unlock(&tree_root->objectid_mutex);
2818
2819 ret = btrfs_read_roots(fs_info);
2820 if (ret)
2821 goto recovery_tree_root;
2822
2823 fs_info->generation = generation;
2824 fs_info->last_trans_committed = generation;
2825
2826 ret = btrfs_recover_balance(fs_info);
2827 if (ret) {
2828 btrfs_err(fs_info, "failed to recover balance: %d", ret);
2829 goto fail_block_groups;
2830 }
2831
2832 ret = btrfs_init_dev_stats(fs_info);
2833 if (ret) {
2834 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2835 goto fail_block_groups;
2836 }
2837
2838 ret = btrfs_init_dev_replace(fs_info);
2839 if (ret) {
2840 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
2841 goto fail_block_groups;
2842 }
2843
2844 btrfs_close_extra_devices(fs_devices, 1);
2845
2846 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2847 if (ret) {
2848 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
2849 ret);
2850 goto fail_block_groups;
2851 }
2852
2853 ret = btrfs_sysfs_add_device(fs_devices);
2854 if (ret) {
2855 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
2856 ret);
2857 goto fail_fsdev_sysfs;
2858 }
2859
2860 ret = btrfs_sysfs_add_mounted(fs_info);
2861 if (ret) {
2862 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
2863 goto fail_fsdev_sysfs;
2864 }
2865
2866 ret = btrfs_init_space_info(fs_info);
2867 if (ret) {
2868 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
2869 goto fail_sysfs;
2870 }
2871
2872 ret = btrfs_read_block_groups(fs_info);
2873 if (ret) {
2874 btrfs_err(fs_info, "failed to read block groups: %d", ret);
2875 goto fail_sysfs;
2876 }
2877
2878 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) {
2879 btrfs_warn(fs_info,
2880 "writeable mount is not allowed due to too many missing devices");
2881 goto fail_sysfs;
2882 }
2883
2884 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2885 "btrfs-cleaner");
2886 if (IS_ERR(fs_info->cleaner_kthread))
2887 goto fail_sysfs;
2888
2889 fs_info->transaction_kthread = kthread_run(transaction_kthread,
2890 tree_root,
2891 "btrfs-transaction");
2892 if (IS_ERR(fs_info->transaction_kthread))
2893 goto fail_cleaner;
2894
2895 if (!btrfs_test_opt(fs_info, NOSSD) &&
2896 !fs_info->fs_devices->rotating) {
2897 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
2898 }
2899
2900 /*
2901 * Mount does not set all options immediately, we can do it now and do
2902 * not have to wait for transaction commit
2903 */
2904 btrfs_apply_pending_changes(fs_info);
2905
2906 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2907 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
2908 ret = btrfsic_mount(fs_info, fs_devices,
2909 btrfs_test_opt(fs_info,
2910 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2911 1 : 0,
2912 fs_info->check_integrity_print_mask);
2913 if (ret)
2914 btrfs_warn(fs_info,
2915 "failed to initialize integrity check module: %d",
2916 ret);
2917 }
2918 #endif
2919 ret = btrfs_read_qgroup_config(fs_info);
2920 if (ret)
2921 goto fail_trans_kthread;
2922
2923 if (btrfs_build_ref_tree(fs_info))
2924 btrfs_err(fs_info, "couldn't build ref tree");
2925
2926 /* do not make disk changes in broken FS or nologreplay is given */
2927 if (btrfs_super_log_root(disk_super) != 0 &&
2928 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
2929 ret = btrfs_replay_log(fs_info, fs_devices);
2930 if (ret) {
2931 err = ret;
2932 goto fail_qgroup;
2933 }
2934 }
2935
2936 ret = btrfs_find_orphan_roots(fs_info);
2937 if (ret)
2938 goto fail_qgroup;
2939
2940 if (!sb_rdonly(sb)) {
2941 ret = btrfs_cleanup_fs_roots(fs_info);
2942 if (ret)
2943 goto fail_qgroup;
2944
2945 mutex_lock(&fs_info->cleaner_mutex);
2946 ret = btrfs_recover_relocation(tree_root);
2947 mutex_unlock(&fs_info->cleaner_mutex);
2948 if (ret < 0) {
2949 btrfs_warn(fs_info, "failed to recover relocation: %d",
2950 ret);
2951 err = -EINVAL;
2952 goto fail_qgroup;
2953 }
2954 }
2955
2956 location.objectid = BTRFS_FS_TREE_OBJECTID;
2957 location.type = BTRFS_ROOT_ITEM_KEY;
2958 location.offset = 0;
2959
2960 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2961 if (IS_ERR(fs_info->fs_root)) {
2962 err = PTR_ERR(fs_info->fs_root);
2963 goto fail_qgroup;
2964 }
2965
2966 if (sb_rdonly(sb))
2967 return 0;
2968
2969 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2970 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2971 clear_free_space_tree = 1;
2972 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2973 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2974 btrfs_warn(fs_info, "free space tree is invalid");
2975 clear_free_space_tree = 1;
2976 }
2977
2978 if (clear_free_space_tree) {
2979 btrfs_info(fs_info, "clearing free space tree");
2980 ret = btrfs_clear_free_space_tree(fs_info);
2981 if (ret) {
2982 btrfs_warn(fs_info,
2983 "failed to clear free space tree: %d", ret);
2984 close_ctree(fs_info);
2985 return ret;
2986 }
2987 }
2988
2989 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
2990 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2991 btrfs_info(fs_info, "creating free space tree");
2992 ret = btrfs_create_free_space_tree(fs_info);
2993 if (ret) {
2994 btrfs_warn(fs_info,
2995 "failed to create free space tree: %d", ret);
2996 close_ctree(fs_info);
2997 return ret;
2998 }
2999 }
3000
3001 down_read(&fs_info->cleanup_work_sem);
3002 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3003 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3004 up_read(&fs_info->cleanup_work_sem);
3005 close_ctree(fs_info);
3006 return ret;
3007 }
3008 up_read(&fs_info->cleanup_work_sem);
3009
3010 ret = btrfs_resume_balance_async(fs_info);
3011 if (ret) {
3012 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3013 close_ctree(fs_info);
3014 return ret;
3015 }
3016
3017 ret = btrfs_resume_dev_replace_async(fs_info);
3018 if (ret) {
3019 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3020 close_ctree(fs_info);
3021 return ret;
3022 }
3023
3024 btrfs_qgroup_rescan_resume(fs_info);
3025
3026 if (!fs_info->uuid_root) {
3027 btrfs_info(fs_info, "creating UUID tree");
3028 ret = btrfs_create_uuid_tree(fs_info);
3029 if (ret) {
3030 btrfs_warn(fs_info,
3031 "failed to create the UUID tree: %d", ret);
3032 close_ctree(fs_info);
3033 return ret;
3034 }
3035 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3036 fs_info->generation !=
3037 btrfs_super_uuid_tree_generation(disk_super)) {
3038 btrfs_info(fs_info, "checking UUID tree");
3039 ret = btrfs_check_uuid_tree(fs_info);
3040 if (ret) {
3041 btrfs_warn(fs_info,
3042 "failed to check the UUID tree: %d", ret);
3043 close_ctree(fs_info);
3044 return ret;
3045 }
3046 } else {
3047 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3048 }
3049 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3050
3051 /*
3052 * backuproot only affect mount behavior, and if open_ctree succeeded,
3053 * no need to keep the flag
3054 */
3055 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3056
3057 return 0;
3058
3059 fail_qgroup:
3060 btrfs_free_qgroup_config(fs_info);
3061 fail_trans_kthread:
3062 kthread_stop(fs_info->transaction_kthread);
3063 btrfs_cleanup_transaction(fs_info);
3064 btrfs_free_fs_roots(fs_info);
3065 fail_cleaner:
3066 kthread_stop(fs_info->cleaner_kthread);
3067
3068 /*
3069 * make sure we're done with the btree inode before we stop our
3070 * kthreads
3071 */
3072 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3073
3074 fail_sysfs:
3075 btrfs_sysfs_remove_mounted(fs_info);
3076
3077 fail_fsdev_sysfs:
3078 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3079
3080 fail_block_groups:
3081 btrfs_put_block_group_cache(fs_info);
3082
3083 fail_tree_roots:
3084 free_root_pointers(fs_info, 1);
3085 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3086
3087 fail_sb_buffer:
3088 btrfs_stop_all_workers(fs_info);
3089 btrfs_free_block_groups(fs_info);
3090 fail_alloc:
3091 fail_iput:
3092 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3093
3094 iput(fs_info->btree_inode);
3095 fail_bio_counter:
3096 percpu_counter_destroy(&fs_info->bio_counter);
3097 fail_delalloc_bytes:
3098 percpu_counter_destroy(&fs_info->delalloc_bytes);
3099 fail_dirty_metadata_bytes:
3100 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3101 fail_srcu:
3102 cleanup_srcu_struct(&fs_info->subvol_srcu);
3103 fail:
3104 btrfs_free_stripe_hash_table(fs_info);
3105 btrfs_close_devices(fs_info->fs_devices);
3106 return err;
3107
3108 recovery_tree_root:
3109 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3110 goto fail_tree_roots;
3111
3112 free_root_pointers(fs_info, 0);
3113
3114 /* don't use the log in recovery mode, it won't be valid */
3115 btrfs_set_super_log_root(disk_super, 0);
3116
3117 /* we can't trust the free space cache either */
3118 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3119
3120 ret = next_root_backup(fs_info, fs_info->super_copy,
3121 &num_backups_tried, &backup_index);
3122 if (ret == -1)
3123 goto fail_block_groups;
3124 goto retry_root_backup;
3125 }
3126
3127 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3128 {
3129 if (uptodate) {
3130 set_buffer_uptodate(bh);
3131 } else {
3132 struct btrfs_device *device = (struct btrfs_device *)
3133 bh->b_private;
3134
3135 btrfs_warn_rl_in_rcu(device->fs_info,
3136 "lost page write due to IO error on %s",
3137 rcu_str_deref(device->name));
3138 /* note, we don't set_buffer_write_io_error because we have
3139 * our own ways of dealing with the IO errors
3140 */
3141 clear_buffer_uptodate(bh);
3142 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3143 }
3144 unlock_buffer(bh);
3145 put_bh(bh);
3146 }
3147
3148 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3149 struct buffer_head **bh_ret)
3150 {
3151 struct buffer_head *bh;
3152 struct btrfs_super_block *super;
3153 u64 bytenr;
3154
3155 bytenr = btrfs_sb_offset(copy_num);
3156 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3157 return -EINVAL;
3158
3159 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3160 /*
3161 * If we fail to read from the underlying devices, as of now
3162 * the best option we have is to mark it EIO.
3163 */
3164 if (!bh)
3165 return -EIO;
3166
3167 super = (struct btrfs_super_block *)bh->b_data;
3168 if (btrfs_super_bytenr(super) != bytenr ||
3169 btrfs_super_magic(super) != BTRFS_MAGIC) {
3170 brelse(bh);
3171 return -EINVAL;
3172 }
3173
3174 *bh_ret = bh;
3175 return 0;
3176 }
3177
3178
3179 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3180 {
3181 struct buffer_head *bh;
3182 struct buffer_head *latest = NULL;
3183 struct btrfs_super_block *super;
3184 int i;
3185 u64 transid = 0;
3186 int ret = -EINVAL;
3187
3188 /* we would like to check all the supers, but that would make
3189 * a btrfs mount succeed after a mkfs from a different FS.
3190 * So, we need to add a special mount option to scan for
3191 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3192 */
3193 for (i = 0; i < 1; i++) {
3194 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3195 if (ret)
3196 continue;
3197
3198 super = (struct btrfs_super_block *)bh->b_data;
3199
3200 if (!latest || btrfs_super_generation(super) > transid) {
3201 brelse(latest);
3202 latest = bh;
3203 transid = btrfs_super_generation(super);
3204 } else {
3205 brelse(bh);
3206 }
3207 }
3208
3209 if (!latest)
3210 return ERR_PTR(ret);
3211
3212 return latest;
3213 }
3214
3215 /*
3216 * Write superblock @sb to the @device. Do not wait for completion, all the
3217 * buffer heads we write are pinned.
3218 *
3219 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3220 * the expected device size at commit time. Note that max_mirrors must be
3221 * same for write and wait phases.
3222 *
3223 * Return number of errors when buffer head is not found or submission fails.
3224 */
3225 static int write_dev_supers(struct btrfs_device *device,
3226 struct btrfs_super_block *sb, int max_mirrors)
3227 {
3228 struct buffer_head *bh;
3229 int i;
3230 int ret;
3231 int errors = 0;
3232 u32 crc;
3233 u64 bytenr;
3234 int op_flags;
3235
3236 if (max_mirrors == 0)
3237 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3238
3239 for (i = 0; i < max_mirrors; i++) {
3240 bytenr = btrfs_sb_offset(i);
3241 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3242 device->commit_total_bytes)
3243 break;
3244
3245 btrfs_set_super_bytenr(sb, bytenr);
3246
3247 crc = ~(u32)0;
3248 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3249 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3250 btrfs_csum_final(crc, sb->csum);
3251
3252 /* One reference for us, and we leave it for the caller */
3253 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3254 BTRFS_SUPER_INFO_SIZE);
3255 if (!bh) {
3256 btrfs_err(device->fs_info,
3257 "couldn't get super buffer head for bytenr %llu",
3258 bytenr);
3259 errors++;
3260 continue;
3261 }
3262
3263 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3264
3265 /* one reference for submit_bh */
3266 get_bh(bh);
3267
3268 set_buffer_uptodate(bh);
3269 lock_buffer(bh);
3270 bh->b_end_io = btrfs_end_buffer_write_sync;
3271 bh->b_private = device;
3272
3273 /*
3274 * we fua the first super. The others we allow
3275 * to go down lazy.
3276 */
3277 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3278 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3279 op_flags |= REQ_FUA;
3280 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3281 if (ret)
3282 errors++;
3283 }
3284 return errors < i ? 0 : -1;
3285 }
3286
3287 /*
3288 * Wait for write completion of superblocks done by write_dev_supers,
3289 * @max_mirrors same for write and wait phases.
3290 *
3291 * Return number of errors when buffer head is not found or not marked up to
3292 * date.
3293 */
3294 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3295 {
3296 struct buffer_head *bh;
3297 int i;
3298 int errors = 0;
3299 u64 bytenr;
3300
3301 if (max_mirrors == 0)
3302 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3303
3304 for (i = 0; i < max_mirrors; i++) {
3305 bytenr = btrfs_sb_offset(i);
3306 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3307 device->commit_total_bytes)
3308 break;
3309
3310 bh = __find_get_block(device->bdev,
3311 bytenr / BTRFS_BDEV_BLOCKSIZE,
3312 BTRFS_SUPER_INFO_SIZE);
3313 if (!bh) {
3314 errors++;
3315 continue;
3316 }
3317 wait_on_buffer(bh);
3318 if (!buffer_uptodate(bh))
3319 errors++;
3320
3321 /* drop our reference */
3322 brelse(bh);
3323
3324 /* drop the reference from the writing run */
3325 brelse(bh);
3326 }
3327
3328 return errors < i ? 0 : -1;
3329 }
3330
3331 /*
3332 * endio for the write_dev_flush, this will wake anyone waiting
3333 * for the barrier when it is done
3334 */
3335 static void btrfs_end_empty_barrier(struct bio *bio)
3336 {
3337 complete(bio->bi_private);
3338 }
3339
3340 /*
3341 * Submit a flush request to the device if it supports it. Error handling is
3342 * done in the waiting counterpart.
3343 */
3344 static void write_dev_flush(struct btrfs_device *device)
3345 {
3346 struct request_queue *q = bdev_get_queue(device->bdev);
3347 struct bio *bio = device->flush_bio;
3348
3349 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3350 return;
3351
3352 bio_reset(bio);
3353 bio->bi_end_io = btrfs_end_empty_barrier;
3354 bio_set_dev(bio, device->bdev);
3355 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3356 init_completion(&device->flush_wait);
3357 bio->bi_private = &device->flush_wait;
3358
3359 btrfsic_submit_bio(bio);
3360 device->flush_bio_sent = 1;
3361 }
3362
3363 /*
3364 * If the flush bio has been submitted by write_dev_flush, wait for it.
3365 */
3366 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3367 {
3368 struct bio *bio = device->flush_bio;
3369
3370 if (!device->flush_bio_sent)
3371 return BLK_STS_OK;
3372
3373 device->flush_bio_sent = 0;
3374 wait_for_completion_io(&device->flush_wait);
3375
3376 return bio->bi_status;
3377 }
3378
3379 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3380 {
3381 if (!btrfs_check_rw_degradable(fs_info))
3382 return -EIO;
3383 return 0;
3384 }
3385
3386 /*
3387 * send an empty flush down to each device in parallel,
3388 * then wait for them
3389 */
3390 static int barrier_all_devices(struct btrfs_fs_info *info)
3391 {
3392 struct list_head *head;
3393 struct btrfs_device *dev;
3394 int errors_wait = 0;
3395 blk_status_t ret;
3396
3397 /* send down all the barriers */
3398 head = &info->fs_devices->devices;
3399 list_for_each_entry_rcu(dev, head, dev_list) {
3400 if (dev->missing)
3401 continue;
3402 if (!dev->bdev)
3403 continue;
3404 if (!dev->in_fs_metadata || !dev->writeable)
3405 continue;
3406
3407 write_dev_flush(dev);
3408 dev->last_flush_error = BLK_STS_OK;
3409 }
3410
3411 /* wait for all the barriers */
3412 list_for_each_entry_rcu(dev, head, dev_list) {
3413 if (dev->missing)
3414 continue;
3415 if (!dev->bdev) {
3416 errors_wait++;
3417 continue;
3418 }
3419 if (!dev->in_fs_metadata || !dev->writeable)
3420 continue;
3421
3422 ret = wait_dev_flush(dev);
3423 if (ret) {
3424 dev->last_flush_error = ret;
3425 btrfs_dev_stat_inc_and_print(dev,
3426 BTRFS_DEV_STAT_FLUSH_ERRS);
3427 errors_wait++;
3428 }
3429 }
3430
3431 if (errors_wait) {
3432 /*
3433 * At some point we need the status of all disks
3434 * to arrive at the volume status. So error checking
3435 * is being pushed to a separate loop.
3436 */
3437 return check_barrier_error(info);
3438 }
3439 return 0;
3440 }
3441
3442 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3443 {
3444 int raid_type;
3445 int min_tolerated = INT_MAX;
3446
3447 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3448 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3449 min_tolerated = min(min_tolerated,
3450 btrfs_raid_array[BTRFS_RAID_SINGLE].
3451 tolerated_failures);
3452
3453 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3454 if (raid_type == BTRFS_RAID_SINGLE)
3455 continue;
3456 if (!(flags & btrfs_raid_group[raid_type]))
3457 continue;
3458 min_tolerated = min(min_tolerated,
3459 btrfs_raid_array[raid_type].
3460 tolerated_failures);
3461 }
3462
3463 if (min_tolerated == INT_MAX) {
3464 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3465 min_tolerated = 0;
3466 }
3467
3468 return min_tolerated;
3469 }
3470
3471 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3472 {
3473 struct list_head *head;
3474 struct btrfs_device *dev;
3475 struct btrfs_super_block *sb;
3476 struct btrfs_dev_item *dev_item;
3477 int ret;
3478 int do_barriers;
3479 int max_errors;
3480 int total_errors = 0;
3481 u64 flags;
3482
3483 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3484
3485 /*
3486 * max_mirrors == 0 indicates we're from commit_transaction,
3487 * not from fsync where the tree roots in fs_info have not
3488 * been consistent on disk.
3489 */
3490 if (max_mirrors == 0)
3491 backup_super_roots(fs_info);
3492
3493 sb = fs_info->super_for_commit;
3494 dev_item = &sb->dev_item;
3495
3496 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3497 head = &fs_info->fs_devices->devices;
3498 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3499
3500 if (do_barriers) {
3501 ret = barrier_all_devices(fs_info);
3502 if (ret) {
3503 mutex_unlock(
3504 &fs_info->fs_devices->device_list_mutex);
3505 btrfs_handle_fs_error(fs_info, ret,
3506 "errors while submitting device barriers.");
3507 return ret;
3508 }
3509 }
3510
3511 list_for_each_entry_rcu(dev, head, dev_list) {
3512 if (!dev->bdev) {
3513 total_errors++;
3514 continue;
3515 }
3516 if (!dev->in_fs_metadata || !dev->writeable)
3517 continue;
3518
3519 btrfs_set_stack_device_generation(dev_item, 0);
3520 btrfs_set_stack_device_type(dev_item, dev->type);
3521 btrfs_set_stack_device_id(dev_item, dev->devid);
3522 btrfs_set_stack_device_total_bytes(dev_item,
3523 dev->commit_total_bytes);
3524 btrfs_set_stack_device_bytes_used(dev_item,
3525 dev->commit_bytes_used);
3526 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3527 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3528 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3529 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3530 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3531
3532 flags = btrfs_super_flags(sb);
3533 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3534
3535 ret = write_dev_supers(dev, sb, max_mirrors);
3536 if (ret)
3537 total_errors++;
3538 }
3539 if (total_errors > max_errors) {
3540 btrfs_err(fs_info, "%d errors while writing supers",
3541 total_errors);
3542 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3543
3544 /* FUA is masked off if unsupported and can't be the reason */
3545 btrfs_handle_fs_error(fs_info, -EIO,
3546 "%d errors while writing supers",
3547 total_errors);
3548 return -EIO;
3549 }
3550
3551 total_errors = 0;
3552 list_for_each_entry_rcu(dev, head, dev_list) {
3553 if (!dev->bdev)
3554 continue;
3555 if (!dev->in_fs_metadata || !dev->writeable)
3556 continue;
3557
3558 ret = wait_dev_supers(dev, max_mirrors);
3559 if (ret)
3560 total_errors++;
3561 }
3562 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3563 if (total_errors > max_errors) {
3564 btrfs_handle_fs_error(fs_info, -EIO,
3565 "%d errors while writing supers",
3566 total_errors);
3567 return -EIO;
3568 }
3569 return 0;
3570 }
3571
3572 /* Drop a fs root from the radix tree and free it. */
3573 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3574 struct btrfs_root *root)
3575 {
3576 spin_lock(&fs_info->fs_roots_radix_lock);
3577 radix_tree_delete(&fs_info->fs_roots_radix,
3578 (unsigned long)root->root_key.objectid);
3579 spin_unlock(&fs_info->fs_roots_radix_lock);
3580
3581 if (btrfs_root_refs(&root->root_item) == 0)
3582 synchronize_srcu(&fs_info->subvol_srcu);
3583
3584 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3585 btrfs_free_log(NULL, root);
3586 if (root->reloc_root) {
3587 free_extent_buffer(root->reloc_root->node);
3588 free_extent_buffer(root->reloc_root->commit_root);
3589 btrfs_put_fs_root(root->reloc_root);
3590 root->reloc_root = NULL;
3591 }
3592 }
3593
3594 if (root->free_ino_pinned)
3595 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3596 if (root->free_ino_ctl)
3597 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3598 free_fs_root(root);
3599 }
3600
3601 static void free_fs_root(struct btrfs_root *root)
3602 {
3603 iput(root->ino_cache_inode);
3604 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3605 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3606 root->orphan_block_rsv = NULL;
3607 if (root->anon_dev)
3608 free_anon_bdev(root->anon_dev);
3609 if (root->subv_writers)
3610 btrfs_free_subvolume_writers(root->subv_writers);
3611 free_extent_buffer(root->node);
3612 free_extent_buffer(root->commit_root);
3613 kfree(root->free_ino_ctl);
3614 kfree(root->free_ino_pinned);
3615 kfree(root->name);
3616 btrfs_put_fs_root(root);
3617 }
3618
3619 void btrfs_free_fs_root(struct btrfs_root *root)
3620 {
3621 free_fs_root(root);
3622 }
3623
3624 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3625 {
3626 u64 root_objectid = 0;
3627 struct btrfs_root *gang[8];
3628 int i = 0;
3629 int err = 0;
3630 unsigned int ret = 0;
3631 int index;
3632
3633 while (1) {
3634 index = srcu_read_lock(&fs_info->subvol_srcu);
3635 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3636 (void **)gang, root_objectid,
3637 ARRAY_SIZE(gang));
3638 if (!ret) {
3639 srcu_read_unlock(&fs_info->subvol_srcu, index);
3640 break;
3641 }
3642 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3643
3644 for (i = 0; i < ret; i++) {
3645 /* Avoid to grab roots in dead_roots */
3646 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3647 gang[i] = NULL;
3648 continue;
3649 }
3650 /* grab all the search result for later use */
3651 gang[i] = btrfs_grab_fs_root(gang[i]);
3652 }
3653 srcu_read_unlock(&fs_info->subvol_srcu, index);
3654
3655 for (i = 0; i < ret; i++) {
3656 if (!gang[i])
3657 continue;
3658 root_objectid = gang[i]->root_key.objectid;
3659 err = btrfs_orphan_cleanup(gang[i]);
3660 if (err)
3661 break;
3662 btrfs_put_fs_root(gang[i]);
3663 }
3664 root_objectid++;
3665 }
3666
3667 /* release the uncleaned roots due to error */
3668 for (; i < ret; i++) {
3669 if (gang[i])
3670 btrfs_put_fs_root(gang[i]);
3671 }
3672 return err;
3673 }
3674
3675 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3676 {
3677 struct btrfs_root *root = fs_info->tree_root;
3678 struct btrfs_trans_handle *trans;
3679
3680 mutex_lock(&fs_info->cleaner_mutex);
3681 btrfs_run_delayed_iputs(fs_info);
3682 mutex_unlock(&fs_info->cleaner_mutex);
3683 wake_up_process(fs_info->cleaner_kthread);
3684
3685 /* wait until ongoing cleanup work done */
3686 down_write(&fs_info->cleanup_work_sem);
3687 up_write(&fs_info->cleanup_work_sem);
3688
3689 trans = btrfs_join_transaction(root);
3690 if (IS_ERR(trans))
3691 return PTR_ERR(trans);
3692 return btrfs_commit_transaction(trans);
3693 }
3694
3695 void close_ctree(struct btrfs_fs_info *fs_info)
3696 {
3697 struct btrfs_root *root = fs_info->tree_root;
3698 int ret;
3699
3700 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3701
3702 /* wait for the qgroup rescan worker to stop */
3703 btrfs_qgroup_wait_for_completion(fs_info, false);
3704
3705 /* wait for the uuid_scan task to finish */
3706 down(&fs_info->uuid_tree_rescan_sem);
3707 /* avoid complains from lockdep et al., set sem back to initial state */
3708 up(&fs_info->uuid_tree_rescan_sem);
3709
3710 /* pause restriper - we want to resume on mount */
3711 btrfs_pause_balance(fs_info);
3712
3713 btrfs_dev_replace_suspend_for_unmount(fs_info);
3714
3715 btrfs_scrub_cancel(fs_info);
3716
3717 /* wait for any defraggers to finish */
3718 wait_event(fs_info->transaction_wait,
3719 (atomic_read(&fs_info->defrag_running) == 0));
3720
3721 /* clear out the rbtree of defraggable inodes */
3722 btrfs_cleanup_defrag_inodes(fs_info);
3723
3724 cancel_work_sync(&fs_info->async_reclaim_work);
3725
3726 if (!sb_rdonly(fs_info->sb)) {
3727 /*
3728 * If the cleaner thread is stopped and there are
3729 * block groups queued for removal, the deletion will be
3730 * skipped when we quit the cleaner thread.
3731 */
3732 btrfs_delete_unused_bgs(fs_info);
3733
3734 ret = btrfs_commit_super(fs_info);
3735 if (ret)
3736 btrfs_err(fs_info, "commit super ret %d", ret);
3737 }
3738
3739 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3740 btrfs_error_commit_super(fs_info);
3741
3742 kthread_stop(fs_info->transaction_kthread);
3743 kthread_stop(fs_info->cleaner_kthread);
3744
3745 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3746
3747 btrfs_free_qgroup_config(fs_info);
3748
3749 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3750 btrfs_info(fs_info, "at unmount delalloc count %lld",
3751 percpu_counter_sum(&fs_info->delalloc_bytes));
3752 }
3753
3754 btrfs_sysfs_remove_mounted(fs_info);
3755 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3756
3757 btrfs_free_fs_roots(fs_info);
3758
3759 btrfs_put_block_group_cache(fs_info);
3760
3761 /*
3762 * we must make sure there is not any read request to
3763 * submit after we stopping all workers.
3764 */
3765 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3766 btrfs_stop_all_workers(fs_info);
3767
3768 btrfs_free_block_groups(fs_info);
3769
3770 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3771 free_root_pointers(fs_info, 1);
3772
3773 iput(fs_info->btree_inode);
3774
3775 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3776 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3777 btrfsic_unmount(fs_info->fs_devices);
3778 #endif
3779
3780 btrfs_close_devices(fs_info->fs_devices);
3781 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3782
3783 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3784 percpu_counter_destroy(&fs_info->delalloc_bytes);
3785 percpu_counter_destroy(&fs_info->bio_counter);
3786 cleanup_srcu_struct(&fs_info->subvol_srcu);
3787
3788 btrfs_free_stripe_hash_table(fs_info);
3789 btrfs_free_ref_cache(fs_info);
3790
3791 __btrfs_free_block_rsv(root->orphan_block_rsv);
3792 root->orphan_block_rsv = NULL;
3793
3794 while (!list_empty(&fs_info->pinned_chunks)) {
3795 struct extent_map *em;
3796
3797 em = list_first_entry(&fs_info->pinned_chunks,
3798 struct extent_map, list);
3799 list_del_init(&em->list);
3800 free_extent_map(em);
3801 }
3802 }
3803
3804 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3805 int atomic)
3806 {
3807 int ret;
3808 struct inode *btree_inode = buf->pages[0]->mapping->host;
3809
3810 ret = extent_buffer_uptodate(buf);
3811 if (!ret)
3812 return ret;
3813
3814 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3815 parent_transid, atomic);
3816 if (ret == -EAGAIN)
3817 return ret;
3818 return !ret;
3819 }
3820
3821 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3822 {
3823 struct btrfs_fs_info *fs_info;
3824 struct btrfs_root *root;
3825 u64 transid = btrfs_header_generation(buf);
3826 int was_dirty;
3827
3828 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3829 /*
3830 * This is a fast path so only do this check if we have sanity tests
3831 * enabled. Normal people shouldn't be marking dummy buffers as dirty
3832 * outside of the sanity tests.
3833 */
3834 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3835 return;
3836 #endif
3837 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3838 fs_info = root->fs_info;
3839 btrfs_assert_tree_locked(buf);
3840 if (transid != fs_info->generation)
3841 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
3842 buf->start, transid, fs_info->generation);
3843 was_dirty = set_extent_buffer_dirty(buf);
3844 if (!was_dirty)
3845 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3846 buf->len,
3847 fs_info->dirty_metadata_batch);
3848 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3849 /*
3850 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
3851 * but item data not updated.
3852 * So here we should only check item pointers, not item data.
3853 */
3854 if (btrfs_header_level(buf) == 0 &&
3855 btrfs_check_leaf_relaxed(root, buf)) {
3856 btrfs_print_leaf(buf);
3857 ASSERT(0);
3858 }
3859 #endif
3860 }
3861
3862 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
3863 int flush_delayed)
3864 {
3865 /*
3866 * looks as though older kernels can get into trouble with
3867 * this code, they end up stuck in balance_dirty_pages forever
3868 */
3869 int ret;
3870
3871 if (current->flags & PF_MEMALLOC)
3872 return;
3873
3874 if (flush_delayed)
3875 btrfs_balance_delayed_items(fs_info);
3876
3877 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3878 BTRFS_DIRTY_METADATA_THRESH);
3879 if (ret > 0) {
3880 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
3881 }
3882 }
3883
3884 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
3885 {
3886 __btrfs_btree_balance_dirty(fs_info, 1);
3887 }
3888
3889 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
3890 {
3891 __btrfs_btree_balance_dirty(fs_info, 0);
3892 }
3893
3894 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3895 {
3896 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3897 struct btrfs_fs_info *fs_info = root->fs_info;
3898
3899 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
3900 }
3901
3902 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
3903 {
3904 struct btrfs_super_block *sb = fs_info->super_copy;
3905 u64 nodesize = btrfs_super_nodesize(sb);
3906 u64 sectorsize = btrfs_super_sectorsize(sb);
3907 int ret = 0;
3908
3909 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
3910 btrfs_err(fs_info, "no valid FS found");
3911 ret = -EINVAL;
3912 }
3913 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
3914 btrfs_warn(fs_info, "unrecognized super flag: %llu",
3915 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
3916 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3917 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
3918 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3919 ret = -EINVAL;
3920 }
3921 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
3922 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
3923 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
3924 ret = -EINVAL;
3925 }
3926 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
3927 btrfs_err(fs_info, "log_root level too big: %d >= %d",
3928 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
3929 ret = -EINVAL;
3930 }
3931
3932 /*
3933 * Check sectorsize and nodesize first, other check will need it.
3934 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
3935 */
3936 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
3937 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
3938 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
3939 ret = -EINVAL;
3940 }
3941 /* Only PAGE SIZE is supported yet */
3942 if (sectorsize != PAGE_SIZE) {
3943 btrfs_err(fs_info,
3944 "sectorsize %llu not supported yet, only support %lu",
3945 sectorsize, PAGE_SIZE);
3946 ret = -EINVAL;
3947 }
3948 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
3949 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
3950 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
3951 ret = -EINVAL;
3952 }
3953 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
3954 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
3955 le32_to_cpu(sb->__unused_leafsize), nodesize);
3956 ret = -EINVAL;
3957 }
3958
3959 /* Root alignment check */
3960 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
3961 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
3962 btrfs_super_root(sb));
3963 ret = -EINVAL;
3964 }
3965 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
3966 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
3967 btrfs_super_chunk_root(sb));
3968 ret = -EINVAL;
3969 }
3970 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
3971 btrfs_warn(fs_info, "log_root block unaligned: %llu",
3972 btrfs_super_log_root(sb));
3973 ret = -EINVAL;
3974 }
3975
3976 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
3977 btrfs_err(fs_info,
3978 "dev_item UUID does not match fsid: %pU != %pU",
3979 fs_info->fsid, sb->dev_item.fsid);
3980 ret = -EINVAL;
3981 }
3982
3983 /*
3984 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
3985 * done later
3986 */
3987 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
3988 btrfs_err(fs_info, "bytes_used is too small %llu",
3989 btrfs_super_bytes_used(sb));
3990 ret = -EINVAL;
3991 }
3992 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
3993 btrfs_err(fs_info, "invalid stripesize %u",
3994 btrfs_super_stripesize(sb));
3995 ret = -EINVAL;
3996 }
3997 if (btrfs_super_num_devices(sb) > (1UL << 31))
3998 btrfs_warn(fs_info, "suspicious number of devices: %llu",
3999 btrfs_super_num_devices(sb));
4000 if (btrfs_super_num_devices(sb) == 0) {
4001 btrfs_err(fs_info, "number of devices is 0");
4002 ret = -EINVAL;
4003 }
4004
4005 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4006 btrfs_err(fs_info, "super offset mismatch %llu != %u",
4007 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4008 ret = -EINVAL;
4009 }
4010
4011 /*
4012 * Obvious sys_chunk_array corruptions, it must hold at least one key
4013 * and one chunk
4014 */
4015 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4016 btrfs_err(fs_info, "system chunk array too big %u > %u",
4017 btrfs_super_sys_array_size(sb),
4018 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4019 ret = -EINVAL;
4020 }
4021 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4022 + sizeof(struct btrfs_chunk)) {
4023 btrfs_err(fs_info, "system chunk array too small %u < %zu",
4024 btrfs_super_sys_array_size(sb),
4025 sizeof(struct btrfs_disk_key)
4026 + sizeof(struct btrfs_chunk));
4027 ret = -EINVAL;
4028 }
4029
4030 /*
4031 * The generation is a global counter, we'll trust it more than the others
4032 * but it's still possible that it's the one that's wrong.
4033 */
4034 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4035 btrfs_warn(fs_info,
4036 "suspicious: generation < chunk_root_generation: %llu < %llu",
4037 btrfs_super_generation(sb),
4038 btrfs_super_chunk_root_generation(sb));
4039 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4040 && btrfs_super_cache_generation(sb) != (u64)-1)
4041 btrfs_warn(fs_info,
4042 "suspicious: generation < cache_generation: %llu < %llu",
4043 btrfs_super_generation(sb),
4044 btrfs_super_cache_generation(sb));
4045
4046 return ret;
4047 }
4048
4049 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4050 {
4051 mutex_lock(&fs_info->cleaner_mutex);
4052 btrfs_run_delayed_iputs(fs_info);
4053 mutex_unlock(&fs_info->cleaner_mutex);
4054
4055 down_write(&fs_info->cleanup_work_sem);
4056 up_write(&fs_info->cleanup_work_sem);
4057
4058 /* cleanup FS via transaction */
4059 btrfs_cleanup_transaction(fs_info);
4060 }
4061
4062 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4063 {
4064 struct btrfs_ordered_extent *ordered;
4065
4066 spin_lock(&root->ordered_extent_lock);
4067 /*
4068 * This will just short circuit the ordered completion stuff which will
4069 * make sure the ordered extent gets properly cleaned up.
4070 */
4071 list_for_each_entry(ordered, &root->ordered_extents,
4072 root_extent_list)
4073 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4074 spin_unlock(&root->ordered_extent_lock);
4075 }
4076
4077 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4078 {
4079 struct btrfs_root *root;
4080 struct list_head splice;
4081
4082 INIT_LIST_HEAD(&splice);
4083
4084 spin_lock(&fs_info->ordered_root_lock);
4085 list_splice_init(&fs_info->ordered_roots, &splice);
4086 while (!list_empty(&splice)) {
4087 root = list_first_entry(&splice, struct btrfs_root,
4088 ordered_root);
4089 list_move_tail(&root->ordered_root,
4090 &fs_info->ordered_roots);
4091
4092 spin_unlock(&fs_info->ordered_root_lock);
4093 btrfs_destroy_ordered_extents(root);
4094
4095 cond_resched();
4096 spin_lock(&fs_info->ordered_root_lock);
4097 }
4098 spin_unlock(&fs_info->ordered_root_lock);
4099 }
4100
4101 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4102 struct btrfs_fs_info *fs_info)
4103 {
4104 struct rb_node *node;
4105 struct btrfs_delayed_ref_root *delayed_refs;
4106 struct btrfs_delayed_ref_node *ref;
4107 int ret = 0;
4108
4109 delayed_refs = &trans->delayed_refs;
4110
4111 spin_lock(&delayed_refs->lock);
4112 if (atomic_read(&delayed_refs->num_entries) == 0) {
4113 spin_unlock(&delayed_refs->lock);
4114 btrfs_info(fs_info, "delayed_refs has NO entry");
4115 return ret;
4116 }
4117
4118 while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4119 struct btrfs_delayed_ref_head *head;
4120 struct rb_node *n;
4121 bool pin_bytes = false;
4122
4123 head = rb_entry(node, struct btrfs_delayed_ref_head,
4124 href_node);
4125 if (!mutex_trylock(&head->mutex)) {
4126 refcount_inc(&head->refs);
4127 spin_unlock(&delayed_refs->lock);
4128
4129 mutex_lock(&head->mutex);
4130 mutex_unlock(&head->mutex);
4131 btrfs_put_delayed_ref_head(head);
4132 spin_lock(&delayed_refs->lock);
4133 continue;
4134 }
4135 spin_lock(&head->lock);
4136 while ((n = rb_first(&head->ref_tree)) != NULL) {
4137 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4138 ref_node);
4139 ref->in_tree = 0;
4140 rb_erase(&ref->ref_node, &head->ref_tree);
4141 RB_CLEAR_NODE(&ref->ref_node);
4142 if (!list_empty(&ref->add_list))
4143 list_del(&ref->add_list);
4144 atomic_dec(&delayed_refs->num_entries);
4145 btrfs_put_delayed_ref(ref);
4146 }
4147 if (head->must_insert_reserved)
4148 pin_bytes = true;
4149 btrfs_free_delayed_extent_op(head->extent_op);
4150 delayed_refs->num_heads--;
4151 if (head->processing == 0)
4152 delayed_refs->num_heads_ready--;
4153 atomic_dec(&delayed_refs->num_entries);
4154 rb_erase(&head->href_node, &delayed_refs->href_root);
4155 RB_CLEAR_NODE(&head->href_node);
4156 spin_unlock(&head->lock);
4157 spin_unlock(&delayed_refs->lock);
4158 mutex_unlock(&head->mutex);
4159
4160 if (pin_bytes)
4161 btrfs_pin_extent(fs_info, head->bytenr,
4162 head->num_bytes, 1);
4163 btrfs_put_delayed_ref_head(head);
4164 cond_resched();
4165 spin_lock(&delayed_refs->lock);
4166 }
4167
4168 spin_unlock(&delayed_refs->lock);
4169
4170 return ret;
4171 }
4172
4173 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4174 {
4175 struct btrfs_inode *btrfs_inode;
4176 struct list_head splice;
4177
4178 INIT_LIST_HEAD(&splice);
4179
4180 spin_lock(&root->delalloc_lock);
4181 list_splice_init(&root->delalloc_inodes, &splice);
4182
4183 while (!list_empty(&splice)) {
4184 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4185 delalloc_inodes);
4186
4187 list_del_init(&btrfs_inode->delalloc_inodes);
4188 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4189 &btrfs_inode->runtime_flags);
4190 spin_unlock(&root->delalloc_lock);
4191
4192 btrfs_invalidate_inodes(btrfs_inode->root);
4193
4194 spin_lock(&root->delalloc_lock);
4195 }
4196
4197 spin_unlock(&root->delalloc_lock);
4198 }
4199
4200 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4201 {
4202 struct btrfs_root *root;
4203 struct list_head splice;
4204
4205 INIT_LIST_HEAD(&splice);
4206
4207 spin_lock(&fs_info->delalloc_root_lock);
4208 list_splice_init(&fs_info->delalloc_roots, &splice);
4209 while (!list_empty(&splice)) {
4210 root = list_first_entry(&splice, struct btrfs_root,
4211 delalloc_root);
4212 list_del_init(&root->delalloc_root);
4213 root = btrfs_grab_fs_root(root);
4214 BUG_ON(!root);
4215 spin_unlock(&fs_info->delalloc_root_lock);
4216
4217 btrfs_destroy_delalloc_inodes(root);
4218 btrfs_put_fs_root(root);
4219
4220 spin_lock(&fs_info->delalloc_root_lock);
4221 }
4222 spin_unlock(&fs_info->delalloc_root_lock);
4223 }
4224
4225 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4226 struct extent_io_tree *dirty_pages,
4227 int mark)
4228 {
4229 int ret;
4230 struct extent_buffer *eb;
4231 u64 start = 0;
4232 u64 end;
4233
4234 while (1) {
4235 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4236 mark, NULL);
4237 if (ret)
4238 break;
4239
4240 clear_extent_bits(dirty_pages, start, end, mark);
4241 while (start <= end) {
4242 eb = find_extent_buffer(fs_info, start);
4243 start += fs_info->nodesize;
4244 if (!eb)
4245 continue;
4246 wait_on_extent_buffer_writeback(eb);
4247
4248 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4249 &eb->bflags))
4250 clear_extent_buffer_dirty(eb);
4251 free_extent_buffer_stale(eb);
4252 }
4253 }
4254
4255 return ret;
4256 }
4257
4258 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4259 struct extent_io_tree *pinned_extents)
4260 {
4261 struct extent_io_tree *unpin;
4262 u64 start;
4263 u64 end;
4264 int ret;
4265 bool loop = true;
4266
4267 unpin = pinned_extents;
4268 again:
4269 while (1) {
4270 ret = find_first_extent_bit(unpin, 0, &start, &end,
4271 EXTENT_DIRTY, NULL);
4272 if (ret)
4273 break;
4274
4275 clear_extent_dirty(unpin, start, end);
4276 btrfs_error_unpin_extent_range(fs_info, start, end);
4277 cond_resched();
4278 }
4279
4280 if (loop) {
4281 if (unpin == &fs_info->freed_extents[0])
4282 unpin = &fs_info->freed_extents[1];
4283 else
4284 unpin = &fs_info->freed_extents[0];
4285 loop = false;
4286 goto again;
4287 }
4288
4289 return 0;
4290 }
4291
4292 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4293 {
4294 struct inode *inode;
4295
4296 inode = cache->io_ctl.inode;
4297 if (inode) {
4298 invalidate_inode_pages2(inode->i_mapping);
4299 BTRFS_I(inode)->generation = 0;
4300 cache->io_ctl.inode = NULL;
4301 iput(inode);
4302 }
4303 btrfs_put_block_group(cache);
4304 }
4305
4306 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4307 struct btrfs_fs_info *fs_info)
4308 {
4309 struct btrfs_block_group_cache *cache;
4310
4311 spin_lock(&cur_trans->dirty_bgs_lock);
4312 while (!list_empty(&cur_trans->dirty_bgs)) {
4313 cache = list_first_entry(&cur_trans->dirty_bgs,
4314 struct btrfs_block_group_cache,
4315 dirty_list);
4316 if (!cache) {
4317 btrfs_err(fs_info, "orphan block group dirty_bgs list");
4318 spin_unlock(&cur_trans->dirty_bgs_lock);
4319 return;
4320 }
4321
4322 if (!list_empty(&cache->io_list)) {
4323 spin_unlock(&cur_trans->dirty_bgs_lock);
4324 list_del_init(&cache->io_list);
4325 btrfs_cleanup_bg_io(cache);
4326 spin_lock(&cur_trans->dirty_bgs_lock);
4327 }
4328
4329 list_del_init(&cache->dirty_list);
4330 spin_lock(&cache->lock);
4331 cache->disk_cache_state = BTRFS_DC_ERROR;
4332 spin_unlock(&cache->lock);
4333
4334 spin_unlock(&cur_trans->dirty_bgs_lock);
4335 btrfs_put_block_group(cache);
4336 spin_lock(&cur_trans->dirty_bgs_lock);
4337 }
4338 spin_unlock(&cur_trans->dirty_bgs_lock);
4339
4340 while (!list_empty(&cur_trans->io_bgs)) {
4341 cache = list_first_entry(&cur_trans->io_bgs,
4342 struct btrfs_block_group_cache,
4343 io_list);
4344 if (!cache) {
4345 btrfs_err(fs_info, "orphan block group on io_bgs list");
4346 return;
4347 }
4348
4349 list_del_init(&cache->io_list);
4350 spin_lock(&cache->lock);
4351 cache->disk_cache_state = BTRFS_DC_ERROR;
4352 spin_unlock(&cache->lock);
4353 btrfs_cleanup_bg_io(cache);
4354 }
4355 }
4356
4357 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4358 struct btrfs_fs_info *fs_info)
4359 {
4360 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4361 ASSERT(list_empty(&cur_trans->dirty_bgs));
4362 ASSERT(list_empty(&cur_trans->io_bgs));
4363
4364 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4365
4366 cur_trans->state = TRANS_STATE_COMMIT_START;
4367 wake_up(&fs_info->transaction_blocked_wait);
4368
4369 cur_trans->state = TRANS_STATE_UNBLOCKED;
4370 wake_up(&fs_info->transaction_wait);
4371
4372 btrfs_destroy_delayed_inodes(fs_info);
4373 btrfs_assert_delayed_root_empty(fs_info);
4374
4375 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4376 EXTENT_DIRTY);
4377 btrfs_destroy_pinned_extent(fs_info,
4378 fs_info->pinned_extents);
4379
4380 cur_trans->state =TRANS_STATE_COMPLETED;
4381 wake_up(&cur_trans->commit_wait);
4382 }
4383
4384 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4385 {
4386 struct btrfs_transaction *t;
4387
4388 mutex_lock(&fs_info->transaction_kthread_mutex);
4389
4390 spin_lock(&fs_info->trans_lock);
4391 while (!list_empty(&fs_info->trans_list)) {
4392 t = list_first_entry(&fs_info->trans_list,
4393 struct btrfs_transaction, list);
4394 if (t->state >= TRANS_STATE_COMMIT_START) {
4395 refcount_inc(&t->use_count);
4396 spin_unlock(&fs_info->trans_lock);
4397 btrfs_wait_for_commit(fs_info, t->transid);
4398 btrfs_put_transaction(t);
4399 spin_lock(&fs_info->trans_lock);
4400 continue;
4401 }
4402 if (t == fs_info->running_transaction) {
4403 t->state = TRANS_STATE_COMMIT_DOING;
4404 spin_unlock(&fs_info->trans_lock);
4405 /*
4406 * We wait for 0 num_writers since we don't hold a trans
4407 * handle open currently for this transaction.
4408 */
4409 wait_event(t->writer_wait,
4410 atomic_read(&t->num_writers) == 0);
4411 } else {
4412 spin_unlock(&fs_info->trans_lock);
4413 }
4414 btrfs_cleanup_one_transaction(t, fs_info);
4415
4416 spin_lock(&fs_info->trans_lock);
4417 if (t == fs_info->running_transaction)
4418 fs_info->running_transaction = NULL;
4419 list_del_init(&t->list);
4420 spin_unlock(&fs_info->trans_lock);
4421
4422 btrfs_put_transaction(t);
4423 trace_btrfs_transaction_commit(fs_info->tree_root);
4424 spin_lock(&fs_info->trans_lock);
4425 }
4426 spin_unlock(&fs_info->trans_lock);
4427 btrfs_destroy_all_ordered_extents(fs_info);
4428 btrfs_destroy_delayed_inodes(fs_info);
4429 btrfs_assert_delayed_root_empty(fs_info);
4430 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4431 btrfs_destroy_all_delalloc_inodes(fs_info);
4432 mutex_unlock(&fs_info->transaction_kthread_mutex);
4433
4434 return 0;
4435 }
4436
4437 static struct btrfs_fs_info *btree_fs_info(void *private_data)
4438 {
4439 struct inode *inode = private_data;
4440 return btrfs_sb(inode->i_sb);
4441 }
4442
4443 static const struct extent_io_ops btree_extent_io_ops = {
4444 /* mandatory callbacks */
4445 .submit_bio_hook = btree_submit_bio_hook,
4446 .readpage_end_io_hook = btree_readpage_end_io_hook,
4447 /* note we're sharing with inode.c for the merge bio hook */
4448 .merge_bio_hook = btrfs_merge_bio_hook,
4449 .readpage_io_failed_hook = btree_io_failed_hook,
4450 .set_range_writeback = btrfs_set_range_writeback,
4451 .tree_fs_info = btree_fs_info,
4452
4453 /* optional callbacks */
4454 };