]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/disk-io.c
Merge branch 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53
54 #ifdef CONFIG_X86
55 #include <asm/cpufeature.h>
56 #endif
57
58 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
59 BTRFS_HEADER_FLAG_RELOC |\
60 BTRFS_SUPER_FLAG_ERROR |\
61 BTRFS_SUPER_FLAG_SEEDING |\
62 BTRFS_SUPER_FLAG_METADUMP)
63
64 static const struct extent_io_ops btree_extent_io_ops;
65 static void end_workqueue_fn(struct btrfs_work *work);
66 static void free_fs_root(struct btrfs_root *root);
67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
68 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
69 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
70 struct btrfs_fs_info *fs_info);
71 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
72 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
73 struct extent_io_tree *dirty_pages,
74 int mark);
75 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
76 struct extent_io_tree *pinned_extents);
77 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
78 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
79
80 /*
81 * btrfs_end_io_wq structs are used to do processing in task context when an IO
82 * is complete. This is used during reads to verify checksums, and it is used
83 * by writes to insert metadata for new file extents after IO is complete.
84 */
85 struct btrfs_end_io_wq {
86 struct bio *bio;
87 bio_end_io_t *end_io;
88 void *private;
89 struct btrfs_fs_info *info;
90 blk_status_t status;
91 enum btrfs_wq_endio_type metadata;
92 struct btrfs_work work;
93 };
94
95 static struct kmem_cache *btrfs_end_io_wq_cache;
96
97 int __init btrfs_end_io_wq_init(void)
98 {
99 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
100 sizeof(struct btrfs_end_io_wq),
101 0,
102 SLAB_MEM_SPREAD,
103 NULL);
104 if (!btrfs_end_io_wq_cache)
105 return -ENOMEM;
106 return 0;
107 }
108
109 void btrfs_end_io_wq_exit(void)
110 {
111 kmem_cache_destroy(btrfs_end_io_wq_cache);
112 }
113
114 /*
115 * async submit bios are used to offload expensive checksumming
116 * onto the worker threads. They checksum file and metadata bios
117 * just before they are sent down the IO stack.
118 */
119 struct async_submit_bio {
120 void *private_data;
121 struct btrfs_fs_info *fs_info;
122 struct bio *bio;
123 extent_submit_bio_hook_t *submit_bio_start;
124 extent_submit_bio_hook_t *submit_bio_done;
125 int mirror_num;
126 unsigned long bio_flags;
127 /*
128 * bio_offset is optional, can be used if the pages in the bio
129 * can't tell us where in the file the bio should go
130 */
131 u64 bio_offset;
132 struct btrfs_work work;
133 blk_status_t status;
134 };
135
136 /*
137 * Lockdep class keys for extent_buffer->lock's in this root. For a given
138 * eb, the lockdep key is determined by the btrfs_root it belongs to and
139 * the level the eb occupies in the tree.
140 *
141 * Different roots are used for different purposes and may nest inside each
142 * other and they require separate keysets. As lockdep keys should be
143 * static, assign keysets according to the purpose of the root as indicated
144 * by btrfs_root->objectid. This ensures that all special purpose roots
145 * have separate keysets.
146 *
147 * Lock-nesting across peer nodes is always done with the immediate parent
148 * node locked thus preventing deadlock. As lockdep doesn't know this, use
149 * subclass to avoid triggering lockdep warning in such cases.
150 *
151 * The key is set by the readpage_end_io_hook after the buffer has passed
152 * csum validation but before the pages are unlocked. It is also set by
153 * btrfs_init_new_buffer on freshly allocated blocks.
154 *
155 * We also add a check to make sure the highest level of the tree is the
156 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
157 * needs update as well.
158 */
159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
160 # if BTRFS_MAX_LEVEL != 8
161 # error
162 # endif
163
164 static struct btrfs_lockdep_keyset {
165 u64 id; /* root objectid */
166 const char *name_stem; /* lock name stem */
167 char names[BTRFS_MAX_LEVEL + 1][20];
168 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
169 } btrfs_lockdep_keysets[] = {
170 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
171 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
172 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
173 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
174 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
175 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
176 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
177 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
178 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
179 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
180 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
181 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
182 { .id = 0, .name_stem = "tree" },
183 };
184
185 void __init btrfs_init_lockdep(void)
186 {
187 int i, j;
188
189 /* initialize lockdep class names */
190 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
191 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
192
193 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
194 snprintf(ks->names[j], sizeof(ks->names[j]),
195 "btrfs-%s-%02d", ks->name_stem, j);
196 }
197 }
198
199 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
200 int level)
201 {
202 struct btrfs_lockdep_keyset *ks;
203
204 BUG_ON(level >= ARRAY_SIZE(ks->keys));
205
206 /* find the matching keyset, id 0 is the default entry */
207 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
208 if (ks->id == objectid)
209 break;
210
211 lockdep_set_class_and_name(&eb->lock,
212 &ks->keys[level], ks->names[level]);
213 }
214
215 #endif
216
217 /*
218 * extents on the btree inode are pretty simple, there's one extent
219 * that covers the entire device
220 */
221 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
222 struct page *page, size_t pg_offset, u64 start, u64 len,
223 int create)
224 {
225 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
226 struct extent_map_tree *em_tree = &inode->extent_tree;
227 struct extent_map *em;
228 int ret;
229
230 read_lock(&em_tree->lock);
231 em = lookup_extent_mapping(em_tree, start, len);
232 if (em) {
233 em->bdev = fs_info->fs_devices->latest_bdev;
234 read_unlock(&em_tree->lock);
235 goto out;
236 }
237 read_unlock(&em_tree->lock);
238
239 em = alloc_extent_map();
240 if (!em) {
241 em = ERR_PTR(-ENOMEM);
242 goto out;
243 }
244 em->start = 0;
245 em->len = (u64)-1;
246 em->block_len = (u64)-1;
247 em->block_start = 0;
248 em->bdev = fs_info->fs_devices->latest_bdev;
249
250 write_lock(&em_tree->lock);
251 ret = add_extent_mapping(em_tree, em, 0);
252 if (ret == -EEXIST) {
253 free_extent_map(em);
254 em = lookup_extent_mapping(em_tree, start, len);
255 if (!em)
256 em = ERR_PTR(-EIO);
257 } else if (ret) {
258 free_extent_map(em);
259 em = ERR_PTR(ret);
260 }
261 write_unlock(&em_tree->lock);
262
263 out:
264 return em;
265 }
266
267 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
268 {
269 return btrfs_crc32c(seed, data, len);
270 }
271
272 void btrfs_csum_final(u32 crc, u8 *result)
273 {
274 put_unaligned_le32(~crc, result);
275 }
276
277 /*
278 * compute the csum for a btree block, and either verify it or write it
279 * into the csum field of the block.
280 */
281 static int csum_tree_block(struct btrfs_fs_info *fs_info,
282 struct extent_buffer *buf,
283 int verify)
284 {
285 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
286 char *result = NULL;
287 unsigned long len;
288 unsigned long cur_len;
289 unsigned long offset = BTRFS_CSUM_SIZE;
290 char *kaddr;
291 unsigned long map_start;
292 unsigned long map_len;
293 int err;
294 u32 crc = ~(u32)0;
295 unsigned long inline_result;
296
297 len = buf->len - offset;
298 while (len > 0) {
299 err = map_private_extent_buffer(buf, offset, 32,
300 &kaddr, &map_start, &map_len);
301 if (err)
302 return err;
303 cur_len = min(len, map_len - (offset - map_start));
304 crc = btrfs_csum_data(kaddr + offset - map_start,
305 crc, cur_len);
306 len -= cur_len;
307 offset += cur_len;
308 }
309 if (csum_size > sizeof(inline_result)) {
310 result = kzalloc(csum_size, GFP_NOFS);
311 if (!result)
312 return -ENOMEM;
313 } else {
314 result = (char *)&inline_result;
315 }
316
317 btrfs_csum_final(crc, result);
318
319 if (verify) {
320 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
321 u32 val;
322 u32 found = 0;
323 memcpy(&found, result, csum_size);
324
325 read_extent_buffer(buf, &val, 0, csum_size);
326 btrfs_warn_rl(fs_info,
327 "%s checksum verify failed on %llu wanted %X found %X level %d",
328 fs_info->sb->s_id, buf->start,
329 val, found, btrfs_header_level(buf));
330 if (result != (char *)&inline_result)
331 kfree(result);
332 return -EUCLEAN;
333 }
334 } else {
335 write_extent_buffer(buf, result, 0, csum_size);
336 }
337 if (result != (char *)&inline_result)
338 kfree(result);
339 return 0;
340 }
341
342 /*
343 * we can't consider a given block up to date unless the transid of the
344 * block matches the transid in the parent node's pointer. This is how we
345 * detect blocks that either didn't get written at all or got written
346 * in the wrong place.
347 */
348 static int verify_parent_transid(struct extent_io_tree *io_tree,
349 struct extent_buffer *eb, u64 parent_transid,
350 int atomic)
351 {
352 struct extent_state *cached_state = NULL;
353 int ret;
354 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
355
356 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
357 return 0;
358
359 if (atomic)
360 return -EAGAIN;
361
362 if (need_lock) {
363 btrfs_tree_read_lock(eb);
364 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
365 }
366
367 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
368 &cached_state);
369 if (extent_buffer_uptodate(eb) &&
370 btrfs_header_generation(eb) == parent_transid) {
371 ret = 0;
372 goto out;
373 }
374 btrfs_err_rl(eb->fs_info,
375 "parent transid verify failed on %llu wanted %llu found %llu",
376 eb->start,
377 parent_transid, btrfs_header_generation(eb));
378 ret = 1;
379
380 /*
381 * Things reading via commit roots that don't have normal protection,
382 * like send, can have a really old block in cache that may point at a
383 * block that has been freed and re-allocated. So don't clear uptodate
384 * if we find an eb that is under IO (dirty/writeback) because we could
385 * end up reading in the stale data and then writing it back out and
386 * making everybody very sad.
387 */
388 if (!extent_buffer_under_io(eb))
389 clear_extent_buffer_uptodate(eb);
390 out:
391 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
392 &cached_state, GFP_NOFS);
393 if (need_lock)
394 btrfs_tree_read_unlock_blocking(eb);
395 return ret;
396 }
397
398 /*
399 * Return 0 if the superblock checksum type matches the checksum value of that
400 * algorithm. Pass the raw disk superblock data.
401 */
402 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
403 char *raw_disk_sb)
404 {
405 struct btrfs_super_block *disk_sb =
406 (struct btrfs_super_block *)raw_disk_sb;
407 u16 csum_type = btrfs_super_csum_type(disk_sb);
408 int ret = 0;
409
410 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
411 u32 crc = ~(u32)0;
412 const int csum_size = sizeof(crc);
413 char result[csum_size];
414
415 /*
416 * The super_block structure does not span the whole
417 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
418 * is filled with zeros and is included in the checksum.
419 */
420 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
421 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
422 btrfs_csum_final(crc, result);
423
424 if (memcmp(raw_disk_sb, result, csum_size))
425 ret = 1;
426 }
427
428 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
429 btrfs_err(fs_info, "unsupported checksum algorithm %u",
430 csum_type);
431 ret = 1;
432 }
433
434 return ret;
435 }
436
437 /*
438 * helper to read a given tree block, doing retries as required when
439 * the checksums don't match and we have alternate mirrors to try.
440 */
441 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
442 struct extent_buffer *eb,
443 u64 parent_transid)
444 {
445 struct extent_io_tree *io_tree;
446 int failed = 0;
447 int ret;
448 int num_copies = 0;
449 int mirror_num = 0;
450 int failed_mirror = 0;
451
452 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
453 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
454 while (1) {
455 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
456 btree_get_extent, mirror_num);
457 if (!ret) {
458 if (!verify_parent_transid(io_tree, eb,
459 parent_transid, 0))
460 break;
461 else
462 ret = -EIO;
463 }
464
465 /*
466 * This buffer's crc is fine, but its contents are corrupted, so
467 * there is no reason to read the other copies, they won't be
468 * any less wrong.
469 */
470 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
471 break;
472
473 num_copies = btrfs_num_copies(fs_info,
474 eb->start, eb->len);
475 if (num_copies == 1)
476 break;
477
478 if (!failed_mirror) {
479 failed = 1;
480 failed_mirror = eb->read_mirror;
481 }
482
483 mirror_num++;
484 if (mirror_num == failed_mirror)
485 mirror_num++;
486
487 if (mirror_num > num_copies)
488 break;
489 }
490
491 if (failed && !ret && failed_mirror)
492 repair_eb_io_failure(fs_info, eb, failed_mirror);
493
494 return ret;
495 }
496
497 /*
498 * checksum a dirty tree block before IO. This has extra checks to make sure
499 * we only fill in the checksum field in the first page of a multi-page block
500 */
501
502 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
503 {
504 u64 start = page_offset(page);
505 u64 found_start;
506 struct extent_buffer *eb;
507
508 eb = (struct extent_buffer *)page->private;
509 if (page != eb->pages[0])
510 return 0;
511
512 found_start = btrfs_header_bytenr(eb);
513 /*
514 * Please do not consolidate these warnings into a single if.
515 * It is useful to know what went wrong.
516 */
517 if (WARN_ON(found_start != start))
518 return -EUCLEAN;
519 if (WARN_ON(!PageUptodate(page)))
520 return -EUCLEAN;
521
522 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
523 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
524
525 return csum_tree_block(fs_info, eb, 0);
526 }
527
528 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
529 struct extent_buffer *eb)
530 {
531 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
532 u8 fsid[BTRFS_UUID_SIZE];
533 int ret = 1;
534
535 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
536 while (fs_devices) {
537 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
538 ret = 0;
539 break;
540 }
541 fs_devices = fs_devices->seed;
542 }
543 return ret;
544 }
545
546 #define CORRUPT(reason, eb, root, slot) \
547 btrfs_crit(root->fs_info, \
548 "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
549 btrfs_header_level(eb) == 0 ? "leaf" : "node", \
550 reason, btrfs_header_bytenr(eb), root->objectid, slot)
551
552 static noinline int check_leaf(struct btrfs_root *root,
553 struct extent_buffer *leaf)
554 {
555 struct btrfs_fs_info *fs_info = root->fs_info;
556 struct btrfs_key key;
557 struct btrfs_key leaf_key;
558 u32 nritems = btrfs_header_nritems(leaf);
559 int slot;
560
561 /*
562 * Extent buffers from a relocation tree have a owner field that
563 * corresponds to the subvolume tree they are based on. So just from an
564 * extent buffer alone we can not find out what is the id of the
565 * corresponding subvolume tree, so we can not figure out if the extent
566 * buffer corresponds to the root of the relocation tree or not. So skip
567 * this check for relocation trees.
568 */
569 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
570 struct btrfs_root *check_root;
571
572 key.objectid = btrfs_header_owner(leaf);
573 key.type = BTRFS_ROOT_ITEM_KEY;
574 key.offset = (u64)-1;
575
576 check_root = btrfs_get_fs_root(fs_info, &key, false);
577 /*
578 * The only reason we also check NULL here is that during
579 * open_ctree() some roots has not yet been set up.
580 */
581 if (!IS_ERR_OR_NULL(check_root)) {
582 struct extent_buffer *eb;
583
584 eb = btrfs_root_node(check_root);
585 /* if leaf is the root, then it's fine */
586 if (leaf != eb) {
587 CORRUPT("non-root leaf's nritems is 0",
588 leaf, check_root, 0);
589 free_extent_buffer(eb);
590 return -EIO;
591 }
592 free_extent_buffer(eb);
593 }
594 return 0;
595 }
596
597 if (nritems == 0)
598 return 0;
599
600 /* Check the 0 item */
601 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
602 BTRFS_LEAF_DATA_SIZE(fs_info)) {
603 CORRUPT("invalid item offset size pair", leaf, root, 0);
604 return -EIO;
605 }
606
607 /*
608 * Check to make sure each items keys are in the correct order and their
609 * offsets make sense. We only have to loop through nritems-1 because
610 * we check the current slot against the next slot, which verifies the
611 * next slot's offset+size makes sense and that the current's slot
612 * offset is correct.
613 */
614 for (slot = 0; slot < nritems - 1; slot++) {
615 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
616 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
617
618 /* Make sure the keys are in the right order */
619 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
620 CORRUPT("bad key order", leaf, root, slot);
621 return -EIO;
622 }
623
624 /*
625 * Make sure the offset and ends are right, remember that the
626 * item data starts at the end of the leaf and grows towards the
627 * front.
628 */
629 if (btrfs_item_offset_nr(leaf, slot) !=
630 btrfs_item_end_nr(leaf, slot + 1)) {
631 CORRUPT("slot offset bad", leaf, root, slot);
632 return -EIO;
633 }
634
635 /*
636 * Check to make sure that we don't point outside of the leaf,
637 * just in case all the items are consistent to each other, but
638 * all point outside of the leaf.
639 */
640 if (btrfs_item_end_nr(leaf, slot) >
641 BTRFS_LEAF_DATA_SIZE(fs_info)) {
642 CORRUPT("slot end outside of leaf", leaf, root, slot);
643 return -EIO;
644 }
645 }
646
647 return 0;
648 }
649
650 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
651 {
652 unsigned long nr = btrfs_header_nritems(node);
653 struct btrfs_key key, next_key;
654 int slot;
655 u64 bytenr;
656 int ret = 0;
657
658 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
659 btrfs_crit(root->fs_info,
660 "corrupt node: block %llu root %llu nritems %lu",
661 node->start, root->objectid, nr);
662 return -EIO;
663 }
664
665 for (slot = 0; slot < nr - 1; slot++) {
666 bytenr = btrfs_node_blockptr(node, slot);
667 btrfs_node_key_to_cpu(node, &key, slot);
668 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
669
670 if (!bytenr) {
671 CORRUPT("invalid item slot", node, root, slot);
672 ret = -EIO;
673 goto out;
674 }
675
676 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
677 CORRUPT("bad key order", node, root, slot);
678 ret = -EIO;
679 goto out;
680 }
681 }
682 out:
683 return ret;
684 }
685
686 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
687 u64 phy_offset, struct page *page,
688 u64 start, u64 end, int mirror)
689 {
690 u64 found_start;
691 int found_level;
692 struct extent_buffer *eb;
693 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
694 struct btrfs_fs_info *fs_info = root->fs_info;
695 int ret = 0;
696 int reads_done;
697
698 if (!page->private)
699 goto out;
700
701 eb = (struct extent_buffer *)page->private;
702
703 /* the pending IO might have been the only thing that kept this buffer
704 * in memory. Make sure we have a ref for all this other checks
705 */
706 extent_buffer_get(eb);
707
708 reads_done = atomic_dec_and_test(&eb->io_pages);
709 if (!reads_done)
710 goto err;
711
712 eb->read_mirror = mirror;
713 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
714 ret = -EIO;
715 goto err;
716 }
717
718 found_start = btrfs_header_bytenr(eb);
719 if (found_start != eb->start) {
720 btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
721 found_start, eb->start);
722 ret = -EIO;
723 goto err;
724 }
725 if (check_tree_block_fsid(fs_info, eb)) {
726 btrfs_err_rl(fs_info, "bad fsid on block %llu",
727 eb->start);
728 ret = -EIO;
729 goto err;
730 }
731 found_level = btrfs_header_level(eb);
732 if (found_level >= BTRFS_MAX_LEVEL) {
733 btrfs_err(fs_info, "bad tree block level %d",
734 (int)btrfs_header_level(eb));
735 ret = -EIO;
736 goto err;
737 }
738
739 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
740 eb, found_level);
741
742 ret = csum_tree_block(fs_info, eb, 1);
743 if (ret)
744 goto err;
745
746 /*
747 * If this is a leaf block and it is corrupt, set the corrupt bit so
748 * that we don't try and read the other copies of this block, just
749 * return -EIO.
750 */
751 if (found_level == 0 && check_leaf(root, eb)) {
752 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
753 ret = -EIO;
754 }
755
756 if (found_level > 0 && check_node(root, eb))
757 ret = -EIO;
758
759 if (!ret)
760 set_extent_buffer_uptodate(eb);
761 err:
762 if (reads_done &&
763 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
764 btree_readahead_hook(eb, ret);
765
766 if (ret) {
767 /*
768 * our io error hook is going to dec the io pages
769 * again, we have to make sure it has something
770 * to decrement
771 */
772 atomic_inc(&eb->io_pages);
773 clear_extent_buffer_uptodate(eb);
774 }
775 free_extent_buffer(eb);
776 out:
777 return ret;
778 }
779
780 static int btree_io_failed_hook(struct page *page, int failed_mirror)
781 {
782 struct extent_buffer *eb;
783
784 eb = (struct extent_buffer *)page->private;
785 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
786 eb->read_mirror = failed_mirror;
787 atomic_dec(&eb->io_pages);
788 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
789 btree_readahead_hook(eb, -EIO);
790 return -EIO; /* we fixed nothing */
791 }
792
793 static void end_workqueue_bio(struct bio *bio)
794 {
795 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
796 struct btrfs_fs_info *fs_info;
797 struct btrfs_workqueue *wq;
798 btrfs_work_func_t func;
799
800 fs_info = end_io_wq->info;
801 end_io_wq->status = bio->bi_status;
802
803 if (bio_op(bio) == REQ_OP_WRITE) {
804 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
805 wq = fs_info->endio_meta_write_workers;
806 func = btrfs_endio_meta_write_helper;
807 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
808 wq = fs_info->endio_freespace_worker;
809 func = btrfs_freespace_write_helper;
810 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
811 wq = fs_info->endio_raid56_workers;
812 func = btrfs_endio_raid56_helper;
813 } else {
814 wq = fs_info->endio_write_workers;
815 func = btrfs_endio_write_helper;
816 }
817 } else {
818 if (unlikely(end_io_wq->metadata ==
819 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
820 wq = fs_info->endio_repair_workers;
821 func = btrfs_endio_repair_helper;
822 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
823 wq = fs_info->endio_raid56_workers;
824 func = btrfs_endio_raid56_helper;
825 } else if (end_io_wq->metadata) {
826 wq = fs_info->endio_meta_workers;
827 func = btrfs_endio_meta_helper;
828 } else {
829 wq = fs_info->endio_workers;
830 func = btrfs_endio_helper;
831 }
832 }
833
834 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
835 btrfs_queue_work(wq, &end_io_wq->work);
836 }
837
838 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
839 enum btrfs_wq_endio_type metadata)
840 {
841 struct btrfs_end_io_wq *end_io_wq;
842
843 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
844 if (!end_io_wq)
845 return BLK_STS_RESOURCE;
846
847 end_io_wq->private = bio->bi_private;
848 end_io_wq->end_io = bio->bi_end_io;
849 end_io_wq->info = info;
850 end_io_wq->status = 0;
851 end_io_wq->bio = bio;
852 end_io_wq->metadata = metadata;
853
854 bio->bi_private = end_io_wq;
855 bio->bi_end_io = end_workqueue_bio;
856 return 0;
857 }
858
859 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
860 {
861 unsigned long limit = min_t(unsigned long,
862 info->thread_pool_size,
863 info->fs_devices->open_devices);
864 return 256 * limit;
865 }
866
867 static void run_one_async_start(struct btrfs_work *work)
868 {
869 struct async_submit_bio *async;
870 blk_status_t ret;
871
872 async = container_of(work, struct async_submit_bio, work);
873 ret = async->submit_bio_start(async->private_data, async->bio,
874 async->mirror_num, async->bio_flags,
875 async->bio_offset);
876 if (ret)
877 async->status = ret;
878 }
879
880 static void run_one_async_done(struct btrfs_work *work)
881 {
882 struct btrfs_fs_info *fs_info;
883 struct async_submit_bio *async;
884 int limit;
885
886 async = container_of(work, struct async_submit_bio, work);
887 fs_info = async->fs_info;
888
889 limit = btrfs_async_submit_limit(fs_info);
890 limit = limit * 2 / 3;
891
892 /*
893 * atomic_dec_return implies a barrier for waitqueue_active
894 */
895 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
896 waitqueue_active(&fs_info->async_submit_wait))
897 wake_up(&fs_info->async_submit_wait);
898
899 /* If an error occurred we just want to clean up the bio and move on */
900 if (async->status) {
901 async->bio->bi_status = async->status;
902 bio_endio(async->bio);
903 return;
904 }
905
906 async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
907 async->bio_flags, async->bio_offset);
908 }
909
910 static void run_one_async_free(struct btrfs_work *work)
911 {
912 struct async_submit_bio *async;
913
914 async = container_of(work, struct async_submit_bio, work);
915 kfree(async);
916 }
917
918 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
919 int mirror_num, unsigned long bio_flags,
920 u64 bio_offset, void *private_data,
921 extent_submit_bio_hook_t *submit_bio_start,
922 extent_submit_bio_hook_t *submit_bio_done)
923 {
924 struct async_submit_bio *async;
925
926 async = kmalloc(sizeof(*async), GFP_NOFS);
927 if (!async)
928 return BLK_STS_RESOURCE;
929
930 async->private_data = private_data;
931 async->fs_info = fs_info;
932 async->bio = bio;
933 async->mirror_num = mirror_num;
934 async->submit_bio_start = submit_bio_start;
935 async->submit_bio_done = submit_bio_done;
936
937 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
938 run_one_async_done, run_one_async_free);
939
940 async->bio_flags = bio_flags;
941 async->bio_offset = bio_offset;
942
943 async->status = 0;
944
945 atomic_inc(&fs_info->nr_async_submits);
946
947 if (op_is_sync(bio->bi_opf))
948 btrfs_set_work_high_priority(&async->work);
949
950 btrfs_queue_work(fs_info->workers, &async->work);
951
952 while (atomic_read(&fs_info->async_submit_draining) &&
953 atomic_read(&fs_info->nr_async_submits)) {
954 wait_event(fs_info->async_submit_wait,
955 (atomic_read(&fs_info->nr_async_submits) == 0));
956 }
957
958 return 0;
959 }
960
961 static blk_status_t btree_csum_one_bio(struct bio *bio)
962 {
963 struct bio_vec *bvec;
964 struct btrfs_root *root;
965 int i, ret = 0;
966
967 bio_for_each_segment_all(bvec, bio, i) {
968 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
969 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
970 if (ret)
971 break;
972 }
973
974 return errno_to_blk_status(ret);
975 }
976
977 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
978 int mirror_num, unsigned long bio_flags,
979 u64 bio_offset)
980 {
981 /*
982 * when we're called for a write, we're already in the async
983 * submission context. Just jump into btrfs_map_bio
984 */
985 return btree_csum_one_bio(bio);
986 }
987
988 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
989 int mirror_num, unsigned long bio_flags,
990 u64 bio_offset)
991 {
992 struct inode *inode = private_data;
993 blk_status_t ret;
994
995 /*
996 * when we're called for a write, we're already in the async
997 * submission context. Just jump into btrfs_map_bio
998 */
999 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
1000 if (ret) {
1001 bio->bi_status = ret;
1002 bio_endio(bio);
1003 }
1004 return ret;
1005 }
1006
1007 static int check_async_write(unsigned long bio_flags)
1008 {
1009 if (bio_flags & EXTENT_BIO_TREE_LOG)
1010 return 0;
1011 #ifdef CONFIG_X86
1012 if (static_cpu_has(X86_FEATURE_XMM4_2))
1013 return 0;
1014 #endif
1015 return 1;
1016 }
1017
1018 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
1019 int mirror_num, unsigned long bio_flags,
1020 u64 bio_offset)
1021 {
1022 struct inode *inode = private_data;
1023 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1024 int async = check_async_write(bio_flags);
1025 blk_status_t ret;
1026
1027 if (bio_op(bio) != REQ_OP_WRITE) {
1028 /*
1029 * called for a read, do the setup so that checksum validation
1030 * can happen in the async kernel threads
1031 */
1032 ret = btrfs_bio_wq_end_io(fs_info, bio,
1033 BTRFS_WQ_ENDIO_METADATA);
1034 if (ret)
1035 goto out_w_error;
1036 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1037 } else if (!async) {
1038 ret = btree_csum_one_bio(bio);
1039 if (ret)
1040 goto out_w_error;
1041 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1042 } else {
1043 /*
1044 * kthread helpers are used to submit writes so that
1045 * checksumming can happen in parallel across all CPUs
1046 */
1047 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
1048 bio_offset, private_data,
1049 __btree_submit_bio_start,
1050 __btree_submit_bio_done);
1051 }
1052
1053 if (ret)
1054 goto out_w_error;
1055 return 0;
1056
1057 out_w_error:
1058 bio->bi_status = ret;
1059 bio_endio(bio);
1060 return ret;
1061 }
1062
1063 #ifdef CONFIG_MIGRATION
1064 static int btree_migratepage(struct address_space *mapping,
1065 struct page *newpage, struct page *page,
1066 enum migrate_mode mode)
1067 {
1068 /*
1069 * we can't safely write a btree page from here,
1070 * we haven't done the locking hook
1071 */
1072 if (PageDirty(page))
1073 return -EAGAIN;
1074 /*
1075 * Buffers may be managed in a filesystem specific way.
1076 * We must have no buffers or drop them.
1077 */
1078 if (page_has_private(page) &&
1079 !try_to_release_page(page, GFP_KERNEL))
1080 return -EAGAIN;
1081 return migrate_page(mapping, newpage, page, mode);
1082 }
1083 #endif
1084
1085
1086 static int btree_writepages(struct address_space *mapping,
1087 struct writeback_control *wbc)
1088 {
1089 struct btrfs_fs_info *fs_info;
1090 int ret;
1091
1092 if (wbc->sync_mode == WB_SYNC_NONE) {
1093
1094 if (wbc->for_kupdate)
1095 return 0;
1096
1097 fs_info = BTRFS_I(mapping->host)->root->fs_info;
1098 /* this is a bit racy, but that's ok */
1099 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1100 BTRFS_DIRTY_METADATA_THRESH);
1101 if (ret < 0)
1102 return 0;
1103 }
1104 return btree_write_cache_pages(mapping, wbc);
1105 }
1106
1107 static int btree_readpage(struct file *file, struct page *page)
1108 {
1109 struct extent_io_tree *tree;
1110 tree = &BTRFS_I(page->mapping->host)->io_tree;
1111 return extent_read_full_page(tree, page, btree_get_extent, 0);
1112 }
1113
1114 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1115 {
1116 if (PageWriteback(page) || PageDirty(page))
1117 return 0;
1118
1119 return try_release_extent_buffer(page);
1120 }
1121
1122 static void btree_invalidatepage(struct page *page, unsigned int offset,
1123 unsigned int length)
1124 {
1125 struct extent_io_tree *tree;
1126 tree = &BTRFS_I(page->mapping->host)->io_tree;
1127 extent_invalidatepage(tree, page, offset);
1128 btree_releasepage(page, GFP_NOFS);
1129 if (PagePrivate(page)) {
1130 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1131 "page private not zero on page %llu",
1132 (unsigned long long)page_offset(page));
1133 ClearPagePrivate(page);
1134 set_page_private(page, 0);
1135 put_page(page);
1136 }
1137 }
1138
1139 static int btree_set_page_dirty(struct page *page)
1140 {
1141 #ifdef DEBUG
1142 struct extent_buffer *eb;
1143
1144 BUG_ON(!PagePrivate(page));
1145 eb = (struct extent_buffer *)page->private;
1146 BUG_ON(!eb);
1147 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1148 BUG_ON(!atomic_read(&eb->refs));
1149 btrfs_assert_tree_locked(eb);
1150 #endif
1151 return __set_page_dirty_nobuffers(page);
1152 }
1153
1154 static const struct address_space_operations btree_aops = {
1155 .readpage = btree_readpage,
1156 .writepages = btree_writepages,
1157 .releasepage = btree_releasepage,
1158 .invalidatepage = btree_invalidatepage,
1159 #ifdef CONFIG_MIGRATION
1160 .migratepage = btree_migratepage,
1161 #endif
1162 .set_page_dirty = btree_set_page_dirty,
1163 };
1164
1165 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1166 {
1167 struct extent_buffer *buf = NULL;
1168 struct inode *btree_inode = fs_info->btree_inode;
1169
1170 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1171 if (IS_ERR(buf))
1172 return;
1173 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1174 buf, WAIT_NONE, btree_get_extent, 0);
1175 free_extent_buffer(buf);
1176 }
1177
1178 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1179 int mirror_num, struct extent_buffer **eb)
1180 {
1181 struct extent_buffer *buf = NULL;
1182 struct inode *btree_inode = fs_info->btree_inode;
1183 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1184 int ret;
1185
1186 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1187 if (IS_ERR(buf))
1188 return 0;
1189
1190 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1191
1192 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1193 btree_get_extent, mirror_num);
1194 if (ret) {
1195 free_extent_buffer(buf);
1196 return ret;
1197 }
1198
1199 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1200 free_extent_buffer(buf);
1201 return -EIO;
1202 } else if (extent_buffer_uptodate(buf)) {
1203 *eb = buf;
1204 } else {
1205 free_extent_buffer(buf);
1206 }
1207 return 0;
1208 }
1209
1210 struct extent_buffer *btrfs_find_create_tree_block(
1211 struct btrfs_fs_info *fs_info,
1212 u64 bytenr)
1213 {
1214 if (btrfs_is_testing(fs_info))
1215 return alloc_test_extent_buffer(fs_info, bytenr);
1216 return alloc_extent_buffer(fs_info, bytenr);
1217 }
1218
1219
1220 int btrfs_write_tree_block(struct extent_buffer *buf)
1221 {
1222 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1223 buf->start + buf->len - 1);
1224 }
1225
1226 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1227 {
1228 filemap_fdatawait_range(buf->pages[0]->mapping,
1229 buf->start, buf->start + buf->len - 1);
1230 }
1231
1232 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1233 u64 parent_transid)
1234 {
1235 struct extent_buffer *buf = NULL;
1236 int ret;
1237
1238 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1239 if (IS_ERR(buf))
1240 return buf;
1241
1242 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1243 if (ret) {
1244 free_extent_buffer(buf);
1245 return ERR_PTR(ret);
1246 }
1247 return buf;
1248
1249 }
1250
1251 void clean_tree_block(struct btrfs_fs_info *fs_info,
1252 struct extent_buffer *buf)
1253 {
1254 if (btrfs_header_generation(buf) ==
1255 fs_info->running_transaction->transid) {
1256 btrfs_assert_tree_locked(buf);
1257
1258 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1259 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1260 -buf->len,
1261 fs_info->dirty_metadata_batch);
1262 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1263 btrfs_set_lock_blocking(buf);
1264 clear_extent_buffer_dirty(buf);
1265 }
1266 }
1267 }
1268
1269 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1270 {
1271 struct btrfs_subvolume_writers *writers;
1272 int ret;
1273
1274 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1275 if (!writers)
1276 return ERR_PTR(-ENOMEM);
1277
1278 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1279 if (ret < 0) {
1280 kfree(writers);
1281 return ERR_PTR(ret);
1282 }
1283
1284 init_waitqueue_head(&writers->wait);
1285 return writers;
1286 }
1287
1288 static void
1289 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1290 {
1291 percpu_counter_destroy(&writers->counter);
1292 kfree(writers);
1293 }
1294
1295 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1296 u64 objectid)
1297 {
1298 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1299 root->node = NULL;
1300 root->commit_root = NULL;
1301 root->state = 0;
1302 root->orphan_cleanup_state = 0;
1303
1304 root->objectid = objectid;
1305 root->last_trans = 0;
1306 root->highest_objectid = 0;
1307 root->nr_delalloc_inodes = 0;
1308 root->nr_ordered_extents = 0;
1309 root->name = NULL;
1310 root->inode_tree = RB_ROOT;
1311 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1312 root->block_rsv = NULL;
1313 root->orphan_block_rsv = NULL;
1314
1315 INIT_LIST_HEAD(&root->dirty_list);
1316 INIT_LIST_HEAD(&root->root_list);
1317 INIT_LIST_HEAD(&root->delalloc_inodes);
1318 INIT_LIST_HEAD(&root->delalloc_root);
1319 INIT_LIST_HEAD(&root->ordered_extents);
1320 INIT_LIST_HEAD(&root->ordered_root);
1321 INIT_LIST_HEAD(&root->logged_list[0]);
1322 INIT_LIST_HEAD(&root->logged_list[1]);
1323 spin_lock_init(&root->orphan_lock);
1324 spin_lock_init(&root->inode_lock);
1325 spin_lock_init(&root->delalloc_lock);
1326 spin_lock_init(&root->ordered_extent_lock);
1327 spin_lock_init(&root->accounting_lock);
1328 spin_lock_init(&root->log_extents_lock[0]);
1329 spin_lock_init(&root->log_extents_lock[1]);
1330 mutex_init(&root->objectid_mutex);
1331 mutex_init(&root->log_mutex);
1332 mutex_init(&root->ordered_extent_mutex);
1333 mutex_init(&root->delalloc_mutex);
1334 init_waitqueue_head(&root->log_writer_wait);
1335 init_waitqueue_head(&root->log_commit_wait[0]);
1336 init_waitqueue_head(&root->log_commit_wait[1]);
1337 INIT_LIST_HEAD(&root->log_ctxs[0]);
1338 INIT_LIST_HEAD(&root->log_ctxs[1]);
1339 atomic_set(&root->log_commit[0], 0);
1340 atomic_set(&root->log_commit[1], 0);
1341 atomic_set(&root->log_writers, 0);
1342 atomic_set(&root->log_batch, 0);
1343 atomic_set(&root->orphan_inodes, 0);
1344 refcount_set(&root->refs, 1);
1345 atomic_set(&root->will_be_snapshoted, 0);
1346 atomic64_set(&root->qgroup_meta_rsv, 0);
1347 root->log_transid = 0;
1348 root->log_transid_committed = -1;
1349 root->last_log_commit = 0;
1350 if (!dummy)
1351 extent_io_tree_init(&root->dirty_log_pages, NULL);
1352
1353 memset(&root->root_key, 0, sizeof(root->root_key));
1354 memset(&root->root_item, 0, sizeof(root->root_item));
1355 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1356 if (!dummy)
1357 root->defrag_trans_start = fs_info->generation;
1358 else
1359 root->defrag_trans_start = 0;
1360 root->root_key.objectid = objectid;
1361 root->anon_dev = 0;
1362
1363 spin_lock_init(&root->root_item_lock);
1364 }
1365
1366 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1367 gfp_t flags)
1368 {
1369 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1370 if (root)
1371 root->fs_info = fs_info;
1372 return root;
1373 }
1374
1375 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1376 /* Should only be used by the testing infrastructure */
1377 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1378 {
1379 struct btrfs_root *root;
1380
1381 if (!fs_info)
1382 return ERR_PTR(-EINVAL);
1383
1384 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1385 if (!root)
1386 return ERR_PTR(-ENOMEM);
1387
1388 /* We don't use the stripesize in selftest, set it as sectorsize */
1389 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1390 root->alloc_bytenr = 0;
1391
1392 return root;
1393 }
1394 #endif
1395
1396 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1397 struct btrfs_fs_info *fs_info,
1398 u64 objectid)
1399 {
1400 struct extent_buffer *leaf;
1401 struct btrfs_root *tree_root = fs_info->tree_root;
1402 struct btrfs_root *root;
1403 struct btrfs_key key;
1404 int ret = 0;
1405 uuid_le uuid;
1406
1407 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1408 if (!root)
1409 return ERR_PTR(-ENOMEM);
1410
1411 __setup_root(root, fs_info, objectid);
1412 root->root_key.objectid = objectid;
1413 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1414 root->root_key.offset = 0;
1415
1416 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1417 if (IS_ERR(leaf)) {
1418 ret = PTR_ERR(leaf);
1419 leaf = NULL;
1420 goto fail;
1421 }
1422
1423 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1424 btrfs_set_header_bytenr(leaf, leaf->start);
1425 btrfs_set_header_generation(leaf, trans->transid);
1426 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1427 btrfs_set_header_owner(leaf, objectid);
1428 root->node = leaf;
1429
1430 write_extent_buffer_fsid(leaf, fs_info->fsid);
1431 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1432 btrfs_mark_buffer_dirty(leaf);
1433
1434 root->commit_root = btrfs_root_node(root);
1435 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1436
1437 root->root_item.flags = 0;
1438 root->root_item.byte_limit = 0;
1439 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1440 btrfs_set_root_generation(&root->root_item, trans->transid);
1441 btrfs_set_root_level(&root->root_item, 0);
1442 btrfs_set_root_refs(&root->root_item, 1);
1443 btrfs_set_root_used(&root->root_item, leaf->len);
1444 btrfs_set_root_last_snapshot(&root->root_item, 0);
1445 btrfs_set_root_dirid(&root->root_item, 0);
1446 uuid_le_gen(&uuid);
1447 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1448 root->root_item.drop_level = 0;
1449
1450 key.objectid = objectid;
1451 key.type = BTRFS_ROOT_ITEM_KEY;
1452 key.offset = 0;
1453 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1454 if (ret)
1455 goto fail;
1456
1457 btrfs_tree_unlock(leaf);
1458
1459 return root;
1460
1461 fail:
1462 if (leaf) {
1463 btrfs_tree_unlock(leaf);
1464 free_extent_buffer(root->commit_root);
1465 free_extent_buffer(leaf);
1466 }
1467 kfree(root);
1468
1469 return ERR_PTR(ret);
1470 }
1471
1472 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1473 struct btrfs_fs_info *fs_info)
1474 {
1475 struct btrfs_root *root;
1476 struct extent_buffer *leaf;
1477
1478 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1479 if (!root)
1480 return ERR_PTR(-ENOMEM);
1481
1482 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1483
1484 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1485 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1486 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1487
1488 /*
1489 * DON'T set REF_COWS for log trees
1490 *
1491 * log trees do not get reference counted because they go away
1492 * before a real commit is actually done. They do store pointers
1493 * to file data extents, and those reference counts still get
1494 * updated (along with back refs to the log tree).
1495 */
1496
1497 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1498 NULL, 0, 0, 0);
1499 if (IS_ERR(leaf)) {
1500 kfree(root);
1501 return ERR_CAST(leaf);
1502 }
1503
1504 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1505 btrfs_set_header_bytenr(leaf, leaf->start);
1506 btrfs_set_header_generation(leaf, trans->transid);
1507 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1508 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1509 root->node = leaf;
1510
1511 write_extent_buffer_fsid(root->node, fs_info->fsid);
1512 btrfs_mark_buffer_dirty(root->node);
1513 btrfs_tree_unlock(root->node);
1514 return root;
1515 }
1516
1517 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1518 struct btrfs_fs_info *fs_info)
1519 {
1520 struct btrfs_root *log_root;
1521
1522 log_root = alloc_log_tree(trans, fs_info);
1523 if (IS_ERR(log_root))
1524 return PTR_ERR(log_root);
1525 WARN_ON(fs_info->log_root_tree);
1526 fs_info->log_root_tree = log_root;
1527 return 0;
1528 }
1529
1530 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1531 struct btrfs_root *root)
1532 {
1533 struct btrfs_fs_info *fs_info = root->fs_info;
1534 struct btrfs_root *log_root;
1535 struct btrfs_inode_item *inode_item;
1536
1537 log_root = alloc_log_tree(trans, fs_info);
1538 if (IS_ERR(log_root))
1539 return PTR_ERR(log_root);
1540
1541 log_root->last_trans = trans->transid;
1542 log_root->root_key.offset = root->root_key.objectid;
1543
1544 inode_item = &log_root->root_item.inode;
1545 btrfs_set_stack_inode_generation(inode_item, 1);
1546 btrfs_set_stack_inode_size(inode_item, 3);
1547 btrfs_set_stack_inode_nlink(inode_item, 1);
1548 btrfs_set_stack_inode_nbytes(inode_item,
1549 fs_info->nodesize);
1550 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1551
1552 btrfs_set_root_node(&log_root->root_item, log_root->node);
1553
1554 WARN_ON(root->log_root);
1555 root->log_root = log_root;
1556 root->log_transid = 0;
1557 root->log_transid_committed = -1;
1558 root->last_log_commit = 0;
1559 return 0;
1560 }
1561
1562 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1563 struct btrfs_key *key)
1564 {
1565 struct btrfs_root *root;
1566 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1567 struct btrfs_path *path;
1568 u64 generation;
1569 int ret;
1570
1571 path = btrfs_alloc_path();
1572 if (!path)
1573 return ERR_PTR(-ENOMEM);
1574
1575 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1576 if (!root) {
1577 ret = -ENOMEM;
1578 goto alloc_fail;
1579 }
1580
1581 __setup_root(root, fs_info, key->objectid);
1582
1583 ret = btrfs_find_root(tree_root, key, path,
1584 &root->root_item, &root->root_key);
1585 if (ret) {
1586 if (ret > 0)
1587 ret = -ENOENT;
1588 goto find_fail;
1589 }
1590
1591 generation = btrfs_root_generation(&root->root_item);
1592 root->node = read_tree_block(fs_info,
1593 btrfs_root_bytenr(&root->root_item),
1594 generation);
1595 if (IS_ERR(root->node)) {
1596 ret = PTR_ERR(root->node);
1597 goto find_fail;
1598 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1599 ret = -EIO;
1600 free_extent_buffer(root->node);
1601 goto find_fail;
1602 }
1603 root->commit_root = btrfs_root_node(root);
1604 out:
1605 btrfs_free_path(path);
1606 return root;
1607
1608 find_fail:
1609 kfree(root);
1610 alloc_fail:
1611 root = ERR_PTR(ret);
1612 goto out;
1613 }
1614
1615 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1616 struct btrfs_key *location)
1617 {
1618 struct btrfs_root *root;
1619
1620 root = btrfs_read_tree_root(tree_root, location);
1621 if (IS_ERR(root))
1622 return root;
1623
1624 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1625 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1626 btrfs_check_and_init_root_item(&root->root_item);
1627 }
1628
1629 return root;
1630 }
1631
1632 int btrfs_init_fs_root(struct btrfs_root *root)
1633 {
1634 int ret;
1635 struct btrfs_subvolume_writers *writers;
1636
1637 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1638 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1639 GFP_NOFS);
1640 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1641 ret = -ENOMEM;
1642 goto fail;
1643 }
1644
1645 writers = btrfs_alloc_subvolume_writers();
1646 if (IS_ERR(writers)) {
1647 ret = PTR_ERR(writers);
1648 goto fail;
1649 }
1650 root->subv_writers = writers;
1651
1652 btrfs_init_free_ino_ctl(root);
1653 spin_lock_init(&root->ino_cache_lock);
1654 init_waitqueue_head(&root->ino_cache_wait);
1655
1656 ret = get_anon_bdev(&root->anon_dev);
1657 if (ret)
1658 goto fail;
1659
1660 mutex_lock(&root->objectid_mutex);
1661 ret = btrfs_find_highest_objectid(root,
1662 &root->highest_objectid);
1663 if (ret) {
1664 mutex_unlock(&root->objectid_mutex);
1665 goto fail;
1666 }
1667
1668 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1669
1670 mutex_unlock(&root->objectid_mutex);
1671
1672 return 0;
1673 fail:
1674 /* the caller is responsible to call free_fs_root */
1675 return ret;
1676 }
1677
1678 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1679 u64 root_id)
1680 {
1681 struct btrfs_root *root;
1682
1683 spin_lock(&fs_info->fs_roots_radix_lock);
1684 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1685 (unsigned long)root_id);
1686 spin_unlock(&fs_info->fs_roots_radix_lock);
1687 return root;
1688 }
1689
1690 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1691 struct btrfs_root *root)
1692 {
1693 int ret;
1694
1695 ret = radix_tree_preload(GFP_NOFS);
1696 if (ret)
1697 return ret;
1698
1699 spin_lock(&fs_info->fs_roots_radix_lock);
1700 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1701 (unsigned long)root->root_key.objectid,
1702 root);
1703 if (ret == 0)
1704 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1705 spin_unlock(&fs_info->fs_roots_radix_lock);
1706 radix_tree_preload_end();
1707
1708 return ret;
1709 }
1710
1711 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1712 struct btrfs_key *location,
1713 bool check_ref)
1714 {
1715 struct btrfs_root *root;
1716 struct btrfs_path *path;
1717 struct btrfs_key key;
1718 int ret;
1719
1720 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1721 return fs_info->tree_root;
1722 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1723 return fs_info->extent_root;
1724 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1725 return fs_info->chunk_root;
1726 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1727 return fs_info->dev_root;
1728 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1729 return fs_info->csum_root;
1730 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1731 return fs_info->quota_root ? fs_info->quota_root :
1732 ERR_PTR(-ENOENT);
1733 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1734 return fs_info->uuid_root ? fs_info->uuid_root :
1735 ERR_PTR(-ENOENT);
1736 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1737 return fs_info->free_space_root ? fs_info->free_space_root :
1738 ERR_PTR(-ENOENT);
1739 again:
1740 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1741 if (root) {
1742 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1743 return ERR_PTR(-ENOENT);
1744 return root;
1745 }
1746
1747 root = btrfs_read_fs_root(fs_info->tree_root, location);
1748 if (IS_ERR(root))
1749 return root;
1750
1751 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1752 ret = -ENOENT;
1753 goto fail;
1754 }
1755
1756 ret = btrfs_init_fs_root(root);
1757 if (ret)
1758 goto fail;
1759
1760 path = btrfs_alloc_path();
1761 if (!path) {
1762 ret = -ENOMEM;
1763 goto fail;
1764 }
1765 key.objectid = BTRFS_ORPHAN_OBJECTID;
1766 key.type = BTRFS_ORPHAN_ITEM_KEY;
1767 key.offset = location->objectid;
1768
1769 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1770 btrfs_free_path(path);
1771 if (ret < 0)
1772 goto fail;
1773 if (ret == 0)
1774 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1775
1776 ret = btrfs_insert_fs_root(fs_info, root);
1777 if (ret) {
1778 if (ret == -EEXIST) {
1779 free_fs_root(root);
1780 goto again;
1781 }
1782 goto fail;
1783 }
1784 return root;
1785 fail:
1786 free_fs_root(root);
1787 return ERR_PTR(ret);
1788 }
1789
1790 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1791 {
1792 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1793 int ret = 0;
1794 struct btrfs_device *device;
1795 struct backing_dev_info *bdi;
1796
1797 rcu_read_lock();
1798 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1799 if (!device->bdev)
1800 continue;
1801 bdi = device->bdev->bd_bdi;
1802 if (bdi_congested(bdi, bdi_bits)) {
1803 ret = 1;
1804 break;
1805 }
1806 }
1807 rcu_read_unlock();
1808 return ret;
1809 }
1810
1811 /*
1812 * called by the kthread helper functions to finally call the bio end_io
1813 * functions. This is where read checksum verification actually happens
1814 */
1815 static void end_workqueue_fn(struct btrfs_work *work)
1816 {
1817 struct bio *bio;
1818 struct btrfs_end_io_wq *end_io_wq;
1819
1820 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1821 bio = end_io_wq->bio;
1822
1823 bio->bi_status = end_io_wq->status;
1824 bio->bi_private = end_io_wq->private;
1825 bio->bi_end_io = end_io_wq->end_io;
1826 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1827 bio_endio(bio);
1828 }
1829
1830 static int cleaner_kthread(void *arg)
1831 {
1832 struct btrfs_root *root = arg;
1833 struct btrfs_fs_info *fs_info = root->fs_info;
1834 int again;
1835 struct btrfs_trans_handle *trans;
1836
1837 do {
1838 again = 0;
1839
1840 /* Make the cleaner go to sleep early. */
1841 if (btrfs_need_cleaner_sleep(fs_info))
1842 goto sleep;
1843
1844 /*
1845 * Do not do anything if we might cause open_ctree() to block
1846 * before we have finished mounting the filesystem.
1847 */
1848 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1849 goto sleep;
1850
1851 if (!mutex_trylock(&fs_info->cleaner_mutex))
1852 goto sleep;
1853
1854 /*
1855 * Avoid the problem that we change the status of the fs
1856 * during the above check and trylock.
1857 */
1858 if (btrfs_need_cleaner_sleep(fs_info)) {
1859 mutex_unlock(&fs_info->cleaner_mutex);
1860 goto sleep;
1861 }
1862
1863 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1864 btrfs_run_delayed_iputs(fs_info);
1865 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1866
1867 again = btrfs_clean_one_deleted_snapshot(root);
1868 mutex_unlock(&fs_info->cleaner_mutex);
1869
1870 /*
1871 * The defragger has dealt with the R/O remount and umount,
1872 * needn't do anything special here.
1873 */
1874 btrfs_run_defrag_inodes(fs_info);
1875
1876 /*
1877 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1878 * with relocation (btrfs_relocate_chunk) and relocation
1879 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1880 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1881 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1882 * unused block groups.
1883 */
1884 btrfs_delete_unused_bgs(fs_info);
1885 sleep:
1886 if (!again) {
1887 set_current_state(TASK_INTERRUPTIBLE);
1888 if (!kthread_should_stop())
1889 schedule();
1890 __set_current_state(TASK_RUNNING);
1891 }
1892 } while (!kthread_should_stop());
1893
1894 /*
1895 * Transaction kthread is stopped before us and wakes us up.
1896 * However we might have started a new transaction and COWed some
1897 * tree blocks when deleting unused block groups for example. So
1898 * make sure we commit the transaction we started to have a clean
1899 * shutdown when evicting the btree inode - if it has dirty pages
1900 * when we do the final iput() on it, eviction will trigger a
1901 * writeback for it which will fail with null pointer dereferences
1902 * since work queues and other resources were already released and
1903 * destroyed by the time the iput/eviction/writeback is made.
1904 */
1905 trans = btrfs_attach_transaction(root);
1906 if (IS_ERR(trans)) {
1907 if (PTR_ERR(trans) != -ENOENT)
1908 btrfs_err(fs_info,
1909 "cleaner transaction attach returned %ld",
1910 PTR_ERR(trans));
1911 } else {
1912 int ret;
1913
1914 ret = btrfs_commit_transaction(trans);
1915 if (ret)
1916 btrfs_err(fs_info,
1917 "cleaner open transaction commit returned %d",
1918 ret);
1919 }
1920
1921 return 0;
1922 }
1923
1924 static int transaction_kthread(void *arg)
1925 {
1926 struct btrfs_root *root = arg;
1927 struct btrfs_fs_info *fs_info = root->fs_info;
1928 struct btrfs_trans_handle *trans;
1929 struct btrfs_transaction *cur;
1930 u64 transid;
1931 unsigned long now;
1932 unsigned long delay;
1933 bool cannot_commit;
1934
1935 do {
1936 cannot_commit = false;
1937 delay = HZ * fs_info->commit_interval;
1938 mutex_lock(&fs_info->transaction_kthread_mutex);
1939
1940 spin_lock(&fs_info->trans_lock);
1941 cur = fs_info->running_transaction;
1942 if (!cur) {
1943 spin_unlock(&fs_info->trans_lock);
1944 goto sleep;
1945 }
1946
1947 now = get_seconds();
1948 if (cur->state < TRANS_STATE_BLOCKED &&
1949 (now < cur->start_time ||
1950 now - cur->start_time < fs_info->commit_interval)) {
1951 spin_unlock(&fs_info->trans_lock);
1952 delay = HZ * 5;
1953 goto sleep;
1954 }
1955 transid = cur->transid;
1956 spin_unlock(&fs_info->trans_lock);
1957
1958 /* If the file system is aborted, this will always fail. */
1959 trans = btrfs_attach_transaction(root);
1960 if (IS_ERR(trans)) {
1961 if (PTR_ERR(trans) != -ENOENT)
1962 cannot_commit = true;
1963 goto sleep;
1964 }
1965 if (transid == trans->transid) {
1966 btrfs_commit_transaction(trans);
1967 } else {
1968 btrfs_end_transaction(trans);
1969 }
1970 sleep:
1971 wake_up_process(fs_info->cleaner_kthread);
1972 mutex_unlock(&fs_info->transaction_kthread_mutex);
1973
1974 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1975 &fs_info->fs_state)))
1976 btrfs_cleanup_transaction(fs_info);
1977 set_current_state(TASK_INTERRUPTIBLE);
1978 if (!kthread_should_stop() &&
1979 (!btrfs_transaction_blocked(fs_info) ||
1980 cannot_commit))
1981 schedule_timeout(delay);
1982 __set_current_state(TASK_RUNNING);
1983 } while (!kthread_should_stop());
1984 return 0;
1985 }
1986
1987 /*
1988 * this will find the highest generation in the array of
1989 * root backups. The index of the highest array is returned,
1990 * or -1 if we can't find anything.
1991 *
1992 * We check to make sure the array is valid by comparing the
1993 * generation of the latest root in the array with the generation
1994 * in the super block. If they don't match we pitch it.
1995 */
1996 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1997 {
1998 u64 cur;
1999 int newest_index = -1;
2000 struct btrfs_root_backup *root_backup;
2001 int i;
2002
2003 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2004 root_backup = info->super_copy->super_roots + i;
2005 cur = btrfs_backup_tree_root_gen(root_backup);
2006 if (cur == newest_gen)
2007 newest_index = i;
2008 }
2009
2010 /* check to see if we actually wrapped around */
2011 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
2012 root_backup = info->super_copy->super_roots;
2013 cur = btrfs_backup_tree_root_gen(root_backup);
2014 if (cur == newest_gen)
2015 newest_index = 0;
2016 }
2017 return newest_index;
2018 }
2019
2020
2021 /*
2022 * find the oldest backup so we know where to store new entries
2023 * in the backup array. This will set the backup_root_index
2024 * field in the fs_info struct
2025 */
2026 static void find_oldest_super_backup(struct btrfs_fs_info *info,
2027 u64 newest_gen)
2028 {
2029 int newest_index = -1;
2030
2031 newest_index = find_newest_super_backup(info, newest_gen);
2032 /* if there was garbage in there, just move along */
2033 if (newest_index == -1) {
2034 info->backup_root_index = 0;
2035 } else {
2036 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
2037 }
2038 }
2039
2040 /*
2041 * copy all the root pointers into the super backup array.
2042 * this will bump the backup pointer by one when it is
2043 * done
2044 */
2045 static void backup_super_roots(struct btrfs_fs_info *info)
2046 {
2047 int next_backup;
2048 struct btrfs_root_backup *root_backup;
2049 int last_backup;
2050
2051 next_backup = info->backup_root_index;
2052 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
2053 BTRFS_NUM_BACKUP_ROOTS;
2054
2055 /*
2056 * just overwrite the last backup if we're at the same generation
2057 * this happens only at umount
2058 */
2059 root_backup = info->super_for_commit->super_roots + last_backup;
2060 if (btrfs_backup_tree_root_gen(root_backup) ==
2061 btrfs_header_generation(info->tree_root->node))
2062 next_backup = last_backup;
2063
2064 root_backup = info->super_for_commit->super_roots + next_backup;
2065
2066 /*
2067 * make sure all of our padding and empty slots get zero filled
2068 * regardless of which ones we use today
2069 */
2070 memset(root_backup, 0, sizeof(*root_backup));
2071
2072 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
2073
2074 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
2075 btrfs_set_backup_tree_root_gen(root_backup,
2076 btrfs_header_generation(info->tree_root->node));
2077
2078 btrfs_set_backup_tree_root_level(root_backup,
2079 btrfs_header_level(info->tree_root->node));
2080
2081 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
2082 btrfs_set_backup_chunk_root_gen(root_backup,
2083 btrfs_header_generation(info->chunk_root->node));
2084 btrfs_set_backup_chunk_root_level(root_backup,
2085 btrfs_header_level(info->chunk_root->node));
2086
2087 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2088 btrfs_set_backup_extent_root_gen(root_backup,
2089 btrfs_header_generation(info->extent_root->node));
2090 btrfs_set_backup_extent_root_level(root_backup,
2091 btrfs_header_level(info->extent_root->node));
2092
2093 /*
2094 * we might commit during log recovery, which happens before we set
2095 * the fs_root. Make sure it is valid before we fill it in.
2096 */
2097 if (info->fs_root && info->fs_root->node) {
2098 btrfs_set_backup_fs_root(root_backup,
2099 info->fs_root->node->start);
2100 btrfs_set_backup_fs_root_gen(root_backup,
2101 btrfs_header_generation(info->fs_root->node));
2102 btrfs_set_backup_fs_root_level(root_backup,
2103 btrfs_header_level(info->fs_root->node));
2104 }
2105
2106 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2107 btrfs_set_backup_dev_root_gen(root_backup,
2108 btrfs_header_generation(info->dev_root->node));
2109 btrfs_set_backup_dev_root_level(root_backup,
2110 btrfs_header_level(info->dev_root->node));
2111
2112 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2113 btrfs_set_backup_csum_root_gen(root_backup,
2114 btrfs_header_generation(info->csum_root->node));
2115 btrfs_set_backup_csum_root_level(root_backup,
2116 btrfs_header_level(info->csum_root->node));
2117
2118 btrfs_set_backup_total_bytes(root_backup,
2119 btrfs_super_total_bytes(info->super_copy));
2120 btrfs_set_backup_bytes_used(root_backup,
2121 btrfs_super_bytes_used(info->super_copy));
2122 btrfs_set_backup_num_devices(root_backup,
2123 btrfs_super_num_devices(info->super_copy));
2124
2125 /*
2126 * if we don't copy this out to the super_copy, it won't get remembered
2127 * for the next commit
2128 */
2129 memcpy(&info->super_copy->super_roots,
2130 &info->super_for_commit->super_roots,
2131 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2132 }
2133
2134 /*
2135 * this copies info out of the root backup array and back into
2136 * the in-memory super block. It is meant to help iterate through
2137 * the array, so you send it the number of backups you've already
2138 * tried and the last backup index you used.
2139 *
2140 * this returns -1 when it has tried all the backups
2141 */
2142 static noinline int next_root_backup(struct btrfs_fs_info *info,
2143 struct btrfs_super_block *super,
2144 int *num_backups_tried, int *backup_index)
2145 {
2146 struct btrfs_root_backup *root_backup;
2147 int newest = *backup_index;
2148
2149 if (*num_backups_tried == 0) {
2150 u64 gen = btrfs_super_generation(super);
2151
2152 newest = find_newest_super_backup(info, gen);
2153 if (newest == -1)
2154 return -1;
2155
2156 *backup_index = newest;
2157 *num_backups_tried = 1;
2158 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2159 /* we've tried all the backups, all done */
2160 return -1;
2161 } else {
2162 /* jump to the next oldest backup */
2163 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2164 BTRFS_NUM_BACKUP_ROOTS;
2165 *backup_index = newest;
2166 *num_backups_tried += 1;
2167 }
2168 root_backup = super->super_roots + newest;
2169
2170 btrfs_set_super_generation(super,
2171 btrfs_backup_tree_root_gen(root_backup));
2172 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2173 btrfs_set_super_root_level(super,
2174 btrfs_backup_tree_root_level(root_backup));
2175 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2176
2177 /*
2178 * fixme: the total bytes and num_devices need to match or we should
2179 * need a fsck
2180 */
2181 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2182 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2183 return 0;
2184 }
2185
2186 /* helper to cleanup workers */
2187 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2188 {
2189 btrfs_destroy_workqueue(fs_info->fixup_workers);
2190 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2191 btrfs_destroy_workqueue(fs_info->workers);
2192 btrfs_destroy_workqueue(fs_info->endio_workers);
2193 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2194 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2195 btrfs_destroy_workqueue(fs_info->rmw_workers);
2196 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2197 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2198 btrfs_destroy_workqueue(fs_info->submit_workers);
2199 btrfs_destroy_workqueue(fs_info->delayed_workers);
2200 btrfs_destroy_workqueue(fs_info->caching_workers);
2201 btrfs_destroy_workqueue(fs_info->readahead_workers);
2202 btrfs_destroy_workqueue(fs_info->flush_workers);
2203 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2204 btrfs_destroy_workqueue(fs_info->extent_workers);
2205 /*
2206 * Now that all other work queues are destroyed, we can safely destroy
2207 * the queues used for metadata I/O, since tasks from those other work
2208 * queues can do metadata I/O operations.
2209 */
2210 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2211 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2212 }
2213
2214 static void free_root_extent_buffers(struct btrfs_root *root)
2215 {
2216 if (root) {
2217 free_extent_buffer(root->node);
2218 free_extent_buffer(root->commit_root);
2219 root->node = NULL;
2220 root->commit_root = NULL;
2221 }
2222 }
2223
2224 /* helper to cleanup tree roots */
2225 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2226 {
2227 free_root_extent_buffers(info->tree_root);
2228
2229 free_root_extent_buffers(info->dev_root);
2230 free_root_extent_buffers(info->extent_root);
2231 free_root_extent_buffers(info->csum_root);
2232 free_root_extent_buffers(info->quota_root);
2233 free_root_extent_buffers(info->uuid_root);
2234 if (chunk_root)
2235 free_root_extent_buffers(info->chunk_root);
2236 free_root_extent_buffers(info->free_space_root);
2237 }
2238
2239 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2240 {
2241 int ret;
2242 struct btrfs_root *gang[8];
2243 int i;
2244
2245 while (!list_empty(&fs_info->dead_roots)) {
2246 gang[0] = list_entry(fs_info->dead_roots.next,
2247 struct btrfs_root, root_list);
2248 list_del(&gang[0]->root_list);
2249
2250 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2251 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2252 } else {
2253 free_extent_buffer(gang[0]->node);
2254 free_extent_buffer(gang[0]->commit_root);
2255 btrfs_put_fs_root(gang[0]);
2256 }
2257 }
2258
2259 while (1) {
2260 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2261 (void **)gang, 0,
2262 ARRAY_SIZE(gang));
2263 if (!ret)
2264 break;
2265 for (i = 0; i < ret; i++)
2266 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2267 }
2268
2269 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2270 btrfs_free_log_root_tree(NULL, fs_info);
2271 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2272 }
2273 }
2274
2275 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2276 {
2277 mutex_init(&fs_info->scrub_lock);
2278 atomic_set(&fs_info->scrubs_running, 0);
2279 atomic_set(&fs_info->scrub_pause_req, 0);
2280 atomic_set(&fs_info->scrubs_paused, 0);
2281 atomic_set(&fs_info->scrub_cancel_req, 0);
2282 init_waitqueue_head(&fs_info->scrub_pause_wait);
2283 fs_info->scrub_workers_refcnt = 0;
2284 }
2285
2286 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2287 {
2288 spin_lock_init(&fs_info->balance_lock);
2289 mutex_init(&fs_info->balance_mutex);
2290 atomic_set(&fs_info->balance_running, 0);
2291 atomic_set(&fs_info->balance_pause_req, 0);
2292 atomic_set(&fs_info->balance_cancel_req, 0);
2293 fs_info->balance_ctl = NULL;
2294 init_waitqueue_head(&fs_info->balance_wait_q);
2295 }
2296
2297 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2298 {
2299 struct inode *inode = fs_info->btree_inode;
2300
2301 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2302 set_nlink(inode, 1);
2303 /*
2304 * we set the i_size on the btree inode to the max possible int.
2305 * the real end of the address space is determined by all of
2306 * the devices in the system
2307 */
2308 inode->i_size = OFFSET_MAX;
2309 inode->i_mapping->a_ops = &btree_aops;
2310
2311 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2312 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2313 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2314 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2315
2316 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2317
2318 BTRFS_I(inode)->root = fs_info->tree_root;
2319 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2320 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2321 btrfs_insert_inode_hash(inode);
2322 }
2323
2324 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2325 {
2326 fs_info->dev_replace.lock_owner = 0;
2327 atomic_set(&fs_info->dev_replace.nesting_level, 0);
2328 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2329 rwlock_init(&fs_info->dev_replace.lock);
2330 atomic_set(&fs_info->dev_replace.read_locks, 0);
2331 atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2332 init_waitqueue_head(&fs_info->replace_wait);
2333 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2334 }
2335
2336 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2337 {
2338 spin_lock_init(&fs_info->qgroup_lock);
2339 mutex_init(&fs_info->qgroup_ioctl_lock);
2340 fs_info->qgroup_tree = RB_ROOT;
2341 fs_info->qgroup_op_tree = RB_ROOT;
2342 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2343 fs_info->qgroup_seq = 1;
2344 fs_info->qgroup_ulist = NULL;
2345 fs_info->qgroup_rescan_running = false;
2346 mutex_init(&fs_info->qgroup_rescan_lock);
2347 }
2348
2349 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2350 struct btrfs_fs_devices *fs_devices)
2351 {
2352 int max_active = fs_info->thread_pool_size;
2353 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2354
2355 fs_info->workers =
2356 btrfs_alloc_workqueue(fs_info, "worker",
2357 flags | WQ_HIGHPRI, max_active, 16);
2358
2359 fs_info->delalloc_workers =
2360 btrfs_alloc_workqueue(fs_info, "delalloc",
2361 flags, max_active, 2);
2362
2363 fs_info->flush_workers =
2364 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2365 flags, max_active, 0);
2366
2367 fs_info->caching_workers =
2368 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2369
2370 /*
2371 * a higher idle thresh on the submit workers makes it much more
2372 * likely that bios will be send down in a sane order to the
2373 * devices
2374 */
2375 fs_info->submit_workers =
2376 btrfs_alloc_workqueue(fs_info, "submit", flags,
2377 min_t(u64, fs_devices->num_devices,
2378 max_active), 64);
2379
2380 fs_info->fixup_workers =
2381 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2382
2383 /*
2384 * endios are largely parallel and should have a very
2385 * low idle thresh
2386 */
2387 fs_info->endio_workers =
2388 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2389 fs_info->endio_meta_workers =
2390 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2391 max_active, 4);
2392 fs_info->endio_meta_write_workers =
2393 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2394 max_active, 2);
2395 fs_info->endio_raid56_workers =
2396 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2397 max_active, 4);
2398 fs_info->endio_repair_workers =
2399 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2400 fs_info->rmw_workers =
2401 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2402 fs_info->endio_write_workers =
2403 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2404 max_active, 2);
2405 fs_info->endio_freespace_worker =
2406 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2407 max_active, 0);
2408 fs_info->delayed_workers =
2409 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2410 max_active, 0);
2411 fs_info->readahead_workers =
2412 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2413 max_active, 2);
2414 fs_info->qgroup_rescan_workers =
2415 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2416 fs_info->extent_workers =
2417 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2418 min_t(u64, fs_devices->num_devices,
2419 max_active), 8);
2420
2421 if (!(fs_info->workers && fs_info->delalloc_workers &&
2422 fs_info->submit_workers && fs_info->flush_workers &&
2423 fs_info->endio_workers && fs_info->endio_meta_workers &&
2424 fs_info->endio_meta_write_workers &&
2425 fs_info->endio_repair_workers &&
2426 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2427 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2428 fs_info->caching_workers && fs_info->readahead_workers &&
2429 fs_info->fixup_workers && fs_info->delayed_workers &&
2430 fs_info->extent_workers &&
2431 fs_info->qgroup_rescan_workers)) {
2432 return -ENOMEM;
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2439 struct btrfs_fs_devices *fs_devices)
2440 {
2441 int ret;
2442 struct btrfs_root *log_tree_root;
2443 struct btrfs_super_block *disk_super = fs_info->super_copy;
2444 u64 bytenr = btrfs_super_log_root(disk_super);
2445
2446 if (fs_devices->rw_devices == 0) {
2447 btrfs_warn(fs_info, "log replay required on RO media");
2448 return -EIO;
2449 }
2450
2451 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2452 if (!log_tree_root)
2453 return -ENOMEM;
2454
2455 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2456
2457 log_tree_root->node = read_tree_block(fs_info, bytenr,
2458 fs_info->generation + 1);
2459 if (IS_ERR(log_tree_root->node)) {
2460 btrfs_warn(fs_info, "failed to read log tree");
2461 ret = PTR_ERR(log_tree_root->node);
2462 kfree(log_tree_root);
2463 return ret;
2464 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2465 btrfs_err(fs_info, "failed to read log tree");
2466 free_extent_buffer(log_tree_root->node);
2467 kfree(log_tree_root);
2468 return -EIO;
2469 }
2470 /* returns with log_tree_root freed on success */
2471 ret = btrfs_recover_log_trees(log_tree_root);
2472 if (ret) {
2473 btrfs_handle_fs_error(fs_info, ret,
2474 "Failed to recover log tree");
2475 free_extent_buffer(log_tree_root->node);
2476 kfree(log_tree_root);
2477 return ret;
2478 }
2479
2480 if (fs_info->sb->s_flags & MS_RDONLY) {
2481 ret = btrfs_commit_super(fs_info);
2482 if (ret)
2483 return ret;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2490 {
2491 struct btrfs_root *tree_root = fs_info->tree_root;
2492 struct btrfs_root *root;
2493 struct btrfs_key location;
2494 int ret;
2495
2496 BUG_ON(!fs_info->tree_root);
2497
2498 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2499 location.type = BTRFS_ROOT_ITEM_KEY;
2500 location.offset = 0;
2501
2502 root = btrfs_read_tree_root(tree_root, &location);
2503 if (IS_ERR(root))
2504 return PTR_ERR(root);
2505 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2506 fs_info->extent_root = root;
2507
2508 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2509 root = btrfs_read_tree_root(tree_root, &location);
2510 if (IS_ERR(root))
2511 return PTR_ERR(root);
2512 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2513 fs_info->dev_root = root;
2514 btrfs_init_devices_late(fs_info);
2515
2516 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2517 root = btrfs_read_tree_root(tree_root, &location);
2518 if (IS_ERR(root))
2519 return PTR_ERR(root);
2520 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2521 fs_info->csum_root = root;
2522
2523 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2524 root = btrfs_read_tree_root(tree_root, &location);
2525 if (!IS_ERR(root)) {
2526 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2527 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2528 fs_info->quota_root = root;
2529 }
2530
2531 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2532 root = btrfs_read_tree_root(tree_root, &location);
2533 if (IS_ERR(root)) {
2534 ret = PTR_ERR(root);
2535 if (ret != -ENOENT)
2536 return ret;
2537 } else {
2538 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2539 fs_info->uuid_root = root;
2540 }
2541
2542 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2543 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2544 root = btrfs_read_tree_root(tree_root, &location);
2545 if (IS_ERR(root))
2546 return PTR_ERR(root);
2547 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2548 fs_info->free_space_root = root;
2549 }
2550
2551 return 0;
2552 }
2553
2554 int open_ctree(struct super_block *sb,
2555 struct btrfs_fs_devices *fs_devices,
2556 char *options)
2557 {
2558 u32 sectorsize;
2559 u32 nodesize;
2560 u32 stripesize;
2561 u64 generation;
2562 u64 features;
2563 struct btrfs_key location;
2564 struct buffer_head *bh;
2565 struct btrfs_super_block *disk_super;
2566 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2567 struct btrfs_root *tree_root;
2568 struct btrfs_root *chunk_root;
2569 int ret;
2570 int err = -EINVAL;
2571 int num_backups_tried = 0;
2572 int backup_index = 0;
2573 int max_active;
2574 int clear_free_space_tree = 0;
2575
2576 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2577 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2578 if (!tree_root || !chunk_root) {
2579 err = -ENOMEM;
2580 goto fail;
2581 }
2582
2583 ret = init_srcu_struct(&fs_info->subvol_srcu);
2584 if (ret) {
2585 err = ret;
2586 goto fail;
2587 }
2588
2589 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2590 if (ret) {
2591 err = ret;
2592 goto fail_srcu;
2593 }
2594 fs_info->dirty_metadata_batch = PAGE_SIZE *
2595 (1 + ilog2(nr_cpu_ids));
2596
2597 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2598 if (ret) {
2599 err = ret;
2600 goto fail_dirty_metadata_bytes;
2601 }
2602
2603 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2604 if (ret) {
2605 err = ret;
2606 goto fail_delalloc_bytes;
2607 }
2608
2609 fs_info->btree_inode = new_inode(sb);
2610 if (!fs_info->btree_inode) {
2611 err = -ENOMEM;
2612 goto fail_bio_counter;
2613 }
2614
2615 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2616
2617 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2618 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2619 INIT_LIST_HEAD(&fs_info->trans_list);
2620 INIT_LIST_HEAD(&fs_info->dead_roots);
2621 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2622 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2623 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2624 spin_lock_init(&fs_info->delalloc_root_lock);
2625 spin_lock_init(&fs_info->trans_lock);
2626 spin_lock_init(&fs_info->fs_roots_radix_lock);
2627 spin_lock_init(&fs_info->delayed_iput_lock);
2628 spin_lock_init(&fs_info->defrag_inodes_lock);
2629 spin_lock_init(&fs_info->tree_mod_seq_lock);
2630 spin_lock_init(&fs_info->super_lock);
2631 spin_lock_init(&fs_info->qgroup_op_lock);
2632 spin_lock_init(&fs_info->buffer_lock);
2633 spin_lock_init(&fs_info->unused_bgs_lock);
2634 rwlock_init(&fs_info->tree_mod_log_lock);
2635 mutex_init(&fs_info->unused_bg_unpin_mutex);
2636 mutex_init(&fs_info->delete_unused_bgs_mutex);
2637 mutex_init(&fs_info->reloc_mutex);
2638 mutex_init(&fs_info->delalloc_root_mutex);
2639 mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2640 seqlock_init(&fs_info->profiles_lock);
2641
2642 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2643 INIT_LIST_HEAD(&fs_info->space_info);
2644 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2645 INIT_LIST_HEAD(&fs_info->unused_bgs);
2646 btrfs_mapping_init(&fs_info->mapping_tree);
2647 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2648 BTRFS_BLOCK_RSV_GLOBAL);
2649 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2650 BTRFS_BLOCK_RSV_DELALLOC);
2651 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2652 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2653 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2654 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2655 BTRFS_BLOCK_RSV_DELOPS);
2656 atomic_set(&fs_info->nr_async_submits, 0);
2657 atomic_set(&fs_info->async_delalloc_pages, 0);
2658 atomic_set(&fs_info->async_submit_draining, 0);
2659 atomic_set(&fs_info->nr_async_bios, 0);
2660 atomic_set(&fs_info->defrag_running, 0);
2661 atomic_set(&fs_info->qgroup_op_seq, 0);
2662 atomic_set(&fs_info->reada_works_cnt, 0);
2663 atomic64_set(&fs_info->tree_mod_seq, 0);
2664 fs_info->sb = sb;
2665 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2666 fs_info->metadata_ratio = 0;
2667 fs_info->defrag_inodes = RB_ROOT;
2668 atomic64_set(&fs_info->free_chunk_space, 0);
2669 fs_info->tree_mod_log = RB_ROOT;
2670 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2671 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2672 /* readahead state */
2673 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2674 spin_lock_init(&fs_info->reada_lock);
2675
2676 fs_info->thread_pool_size = min_t(unsigned long,
2677 num_online_cpus() + 2, 8);
2678
2679 INIT_LIST_HEAD(&fs_info->ordered_roots);
2680 spin_lock_init(&fs_info->ordered_root_lock);
2681 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2682 GFP_KERNEL);
2683 if (!fs_info->delayed_root) {
2684 err = -ENOMEM;
2685 goto fail_iput;
2686 }
2687 btrfs_init_delayed_root(fs_info->delayed_root);
2688
2689 btrfs_init_scrub(fs_info);
2690 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2691 fs_info->check_integrity_print_mask = 0;
2692 #endif
2693 btrfs_init_balance(fs_info);
2694 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2695
2696 sb->s_blocksize = 4096;
2697 sb->s_blocksize_bits = blksize_bits(4096);
2698
2699 btrfs_init_btree_inode(fs_info);
2700
2701 spin_lock_init(&fs_info->block_group_cache_lock);
2702 fs_info->block_group_cache_tree = RB_ROOT;
2703 fs_info->first_logical_byte = (u64)-1;
2704
2705 extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2706 extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2707 fs_info->pinned_extents = &fs_info->freed_extents[0];
2708 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2709
2710 mutex_init(&fs_info->ordered_operations_mutex);
2711 mutex_init(&fs_info->tree_log_mutex);
2712 mutex_init(&fs_info->chunk_mutex);
2713 mutex_init(&fs_info->transaction_kthread_mutex);
2714 mutex_init(&fs_info->cleaner_mutex);
2715 mutex_init(&fs_info->volume_mutex);
2716 mutex_init(&fs_info->ro_block_group_mutex);
2717 init_rwsem(&fs_info->commit_root_sem);
2718 init_rwsem(&fs_info->cleanup_work_sem);
2719 init_rwsem(&fs_info->subvol_sem);
2720 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2721
2722 btrfs_init_dev_replace_locks(fs_info);
2723 btrfs_init_qgroup(fs_info);
2724
2725 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2726 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2727
2728 init_waitqueue_head(&fs_info->transaction_throttle);
2729 init_waitqueue_head(&fs_info->transaction_wait);
2730 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2731 init_waitqueue_head(&fs_info->async_submit_wait);
2732
2733 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2734
2735 /* Usable values until the real ones are cached from the superblock */
2736 fs_info->nodesize = 4096;
2737 fs_info->sectorsize = 4096;
2738 fs_info->stripesize = 4096;
2739
2740 ret = btrfs_alloc_stripe_hash_table(fs_info);
2741 if (ret) {
2742 err = ret;
2743 goto fail_alloc;
2744 }
2745
2746 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2747
2748 invalidate_bdev(fs_devices->latest_bdev);
2749
2750 /*
2751 * Read super block and check the signature bytes only
2752 */
2753 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2754 if (IS_ERR(bh)) {
2755 err = PTR_ERR(bh);
2756 goto fail_alloc;
2757 }
2758
2759 /*
2760 * We want to check superblock checksum, the type is stored inside.
2761 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2762 */
2763 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2764 btrfs_err(fs_info, "superblock checksum mismatch");
2765 err = -EINVAL;
2766 brelse(bh);
2767 goto fail_alloc;
2768 }
2769
2770 /*
2771 * super_copy is zeroed at allocation time and we never touch the
2772 * following bytes up to INFO_SIZE, the checksum is calculated from
2773 * the whole block of INFO_SIZE
2774 */
2775 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2776 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2777 sizeof(*fs_info->super_for_commit));
2778 brelse(bh);
2779
2780 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2781
2782 ret = btrfs_check_super_valid(fs_info);
2783 if (ret) {
2784 btrfs_err(fs_info, "superblock contains fatal errors");
2785 err = -EINVAL;
2786 goto fail_alloc;
2787 }
2788
2789 disk_super = fs_info->super_copy;
2790 if (!btrfs_super_root(disk_super))
2791 goto fail_alloc;
2792
2793 /* check FS state, whether FS is broken. */
2794 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2795 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2796
2797 /*
2798 * run through our array of backup supers and setup
2799 * our ring pointer to the oldest one
2800 */
2801 generation = btrfs_super_generation(disk_super);
2802 find_oldest_super_backup(fs_info, generation);
2803
2804 /*
2805 * In the long term, we'll store the compression type in the super
2806 * block, and it'll be used for per file compression control.
2807 */
2808 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2809
2810 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2811 if (ret) {
2812 err = ret;
2813 goto fail_alloc;
2814 }
2815
2816 features = btrfs_super_incompat_flags(disk_super) &
2817 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2818 if (features) {
2819 btrfs_err(fs_info,
2820 "cannot mount because of unsupported optional features (%llx)",
2821 features);
2822 err = -EINVAL;
2823 goto fail_alloc;
2824 }
2825
2826 features = btrfs_super_incompat_flags(disk_super);
2827 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2828 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2829 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2830
2831 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2832 btrfs_info(fs_info, "has skinny extents");
2833
2834 /*
2835 * flag our filesystem as having big metadata blocks if
2836 * they are bigger than the page size
2837 */
2838 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2839 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2840 btrfs_info(fs_info,
2841 "flagging fs with big metadata feature");
2842 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2843 }
2844
2845 nodesize = btrfs_super_nodesize(disk_super);
2846 sectorsize = btrfs_super_sectorsize(disk_super);
2847 stripesize = sectorsize;
2848 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2849 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2850
2851 /* Cache block sizes */
2852 fs_info->nodesize = nodesize;
2853 fs_info->sectorsize = sectorsize;
2854 fs_info->stripesize = stripesize;
2855
2856 /*
2857 * mixed block groups end up with duplicate but slightly offset
2858 * extent buffers for the same range. It leads to corruptions
2859 */
2860 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2861 (sectorsize != nodesize)) {
2862 btrfs_err(fs_info,
2863 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2864 nodesize, sectorsize);
2865 goto fail_alloc;
2866 }
2867
2868 /*
2869 * Needn't use the lock because there is no other task which will
2870 * update the flag.
2871 */
2872 btrfs_set_super_incompat_flags(disk_super, features);
2873
2874 features = btrfs_super_compat_ro_flags(disk_super) &
2875 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2876 if (!(sb->s_flags & MS_RDONLY) && features) {
2877 btrfs_err(fs_info,
2878 "cannot mount read-write because of unsupported optional features (%llx)",
2879 features);
2880 err = -EINVAL;
2881 goto fail_alloc;
2882 }
2883
2884 max_active = fs_info->thread_pool_size;
2885
2886 ret = btrfs_init_workqueues(fs_info, fs_devices);
2887 if (ret) {
2888 err = ret;
2889 goto fail_sb_buffer;
2890 }
2891
2892 sb->s_bdi->congested_fn = btrfs_congested_fn;
2893 sb->s_bdi->congested_data = fs_info;
2894 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2895 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
2896 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2897 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2898
2899 sb->s_blocksize = sectorsize;
2900 sb->s_blocksize_bits = blksize_bits(sectorsize);
2901
2902 mutex_lock(&fs_info->chunk_mutex);
2903 ret = btrfs_read_sys_array(fs_info);
2904 mutex_unlock(&fs_info->chunk_mutex);
2905 if (ret) {
2906 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2907 goto fail_sb_buffer;
2908 }
2909
2910 generation = btrfs_super_chunk_root_generation(disk_super);
2911
2912 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2913
2914 chunk_root->node = read_tree_block(fs_info,
2915 btrfs_super_chunk_root(disk_super),
2916 generation);
2917 if (IS_ERR(chunk_root->node) ||
2918 !extent_buffer_uptodate(chunk_root->node)) {
2919 btrfs_err(fs_info, "failed to read chunk root");
2920 if (!IS_ERR(chunk_root->node))
2921 free_extent_buffer(chunk_root->node);
2922 chunk_root->node = NULL;
2923 goto fail_tree_roots;
2924 }
2925 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2926 chunk_root->commit_root = btrfs_root_node(chunk_root);
2927
2928 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2929 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2930
2931 ret = btrfs_read_chunk_tree(fs_info);
2932 if (ret) {
2933 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2934 goto fail_tree_roots;
2935 }
2936
2937 /*
2938 * keep the device that is marked to be the target device for the
2939 * dev_replace procedure
2940 */
2941 btrfs_close_extra_devices(fs_devices, 0);
2942
2943 if (!fs_devices->latest_bdev) {
2944 btrfs_err(fs_info, "failed to read devices");
2945 goto fail_tree_roots;
2946 }
2947
2948 retry_root_backup:
2949 generation = btrfs_super_generation(disk_super);
2950
2951 tree_root->node = read_tree_block(fs_info,
2952 btrfs_super_root(disk_super),
2953 generation);
2954 if (IS_ERR(tree_root->node) ||
2955 !extent_buffer_uptodate(tree_root->node)) {
2956 btrfs_warn(fs_info, "failed to read tree root");
2957 if (!IS_ERR(tree_root->node))
2958 free_extent_buffer(tree_root->node);
2959 tree_root->node = NULL;
2960 goto recovery_tree_root;
2961 }
2962
2963 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2964 tree_root->commit_root = btrfs_root_node(tree_root);
2965 btrfs_set_root_refs(&tree_root->root_item, 1);
2966
2967 mutex_lock(&tree_root->objectid_mutex);
2968 ret = btrfs_find_highest_objectid(tree_root,
2969 &tree_root->highest_objectid);
2970 if (ret) {
2971 mutex_unlock(&tree_root->objectid_mutex);
2972 goto recovery_tree_root;
2973 }
2974
2975 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2976
2977 mutex_unlock(&tree_root->objectid_mutex);
2978
2979 ret = btrfs_read_roots(fs_info);
2980 if (ret)
2981 goto recovery_tree_root;
2982
2983 fs_info->generation = generation;
2984 fs_info->last_trans_committed = generation;
2985
2986 ret = btrfs_recover_balance(fs_info);
2987 if (ret) {
2988 btrfs_err(fs_info, "failed to recover balance: %d", ret);
2989 goto fail_block_groups;
2990 }
2991
2992 ret = btrfs_init_dev_stats(fs_info);
2993 if (ret) {
2994 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2995 goto fail_block_groups;
2996 }
2997
2998 ret = btrfs_init_dev_replace(fs_info);
2999 if (ret) {
3000 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3001 goto fail_block_groups;
3002 }
3003
3004 btrfs_close_extra_devices(fs_devices, 1);
3005
3006 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3007 if (ret) {
3008 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3009 ret);
3010 goto fail_block_groups;
3011 }
3012
3013 ret = btrfs_sysfs_add_device(fs_devices);
3014 if (ret) {
3015 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3016 ret);
3017 goto fail_fsdev_sysfs;
3018 }
3019
3020 ret = btrfs_sysfs_add_mounted(fs_info);
3021 if (ret) {
3022 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3023 goto fail_fsdev_sysfs;
3024 }
3025
3026 ret = btrfs_init_space_info(fs_info);
3027 if (ret) {
3028 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3029 goto fail_sysfs;
3030 }
3031
3032 ret = btrfs_read_block_groups(fs_info);
3033 if (ret) {
3034 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3035 goto fail_sysfs;
3036 }
3037 fs_info->num_tolerated_disk_barrier_failures =
3038 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3039 if (fs_info->fs_devices->missing_devices >
3040 fs_info->num_tolerated_disk_barrier_failures &&
3041 !(sb->s_flags & MS_RDONLY)) {
3042 btrfs_warn(fs_info,
3043 "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
3044 fs_info->fs_devices->missing_devices,
3045 fs_info->num_tolerated_disk_barrier_failures);
3046 goto fail_sysfs;
3047 }
3048
3049 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3050 "btrfs-cleaner");
3051 if (IS_ERR(fs_info->cleaner_kthread))
3052 goto fail_sysfs;
3053
3054 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3055 tree_root,
3056 "btrfs-transaction");
3057 if (IS_ERR(fs_info->transaction_kthread))
3058 goto fail_cleaner;
3059
3060 if (!btrfs_test_opt(fs_info, SSD) &&
3061 !btrfs_test_opt(fs_info, NOSSD) &&
3062 !fs_info->fs_devices->rotating) {
3063 btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
3064 btrfs_set_opt(fs_info->mount_opt, SSD);
3065 }
3066
3067 /*
3068 * Mount does not set all options immediately, we can do it now and do
3069 * not have to wait for transaction commit
3070 */
3071 btrfs_apply_pending_changes(fs_info);
3072
3073 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3074 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3075 ret = btrfsic_mount(fs_info, fs_devices,
3076 btrfs_test_opt(fs_info,
3077 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3078 1 : 0,
3079 fs_info->check_integrity_print_mask);
3080 if (ret)
3081 btrfs_warn(fs_info,
3082 "failed to initialize integrity check module: %d",
3083 ret);
3084 }
3085 #endif
3086 ret = btrfs_read_qgroup_config(fs_info);
3087 if (ret)
3088 goto fail_trans_kthread;
3089
3090 /* do not make disk changes in broken FS or nologreplay is given */
3091 if (btrfs_super_log_root(disk_super) != 0 &&
3092 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3093 ret = btrfs_replay_log(fs_info, fs_devices);
3094 if (ret) {
3095 err = ret;
3096 goto fail_qgroup;
3097 }
3098 }
3099
3100 ret = btrfs_find_orphan_roots(fs_info);
3101 if (ret)
3102 goto fail_qgroup;
3103
3104 if (!(sb->s_flags & MS_RDONLY)) {
3105 ret = btrfs_cleanup_fs_roots(fs_info);
3106 if (ret)
3107 goto fail_qgroup;
3108
3109 mutex_lock(&fs_info->cleaner_mutex);
3110 ret = btrfs_recover_relocation(tree_root);
3111 mutex_unlock(&fs_info->cleaner_mutex);
3112 if (ret < 0) {
3113 btrfs_warn(fs_info, "failed to recover relocation: %d",
3114 ret);
3115 err = -EINVAL;
3116 goto fail_qgroup;
3117 }
3118 }
3119
3120 location.objectid = BTRFS_FS_TREE_OBJECTID;
3121 location.type = BTRFS_ROOT_ITEM_KEY;
3122 location.offset = 0;
3123
3124 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3125 if (IS_ERR(fs_info->fs_root)) {
3126 err = PTR_ERR(fs_info->fs_root);
3127 goto fail_qgroup;
3128 }
3129
3130 if (sb->s_flags & MS_RDONLY)
3131 return 0;
3132
3133 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3134 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3135 clear_free_space_tree = 1;
3136 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3137 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3138 btrfs_warn(fs_info, "free space tree is invalid");
3139 clear_free_space_tree = 1;
3140 }
3141
3142 if (clear_free_space_tree) {
3143 btrfs_info(fs_info, "clearing free space tree");
3144 ret = btrfs_clear_free_space_tree(fs_info);
3145 if (ret) {
3146 btrfs_warn(fs_info,
3147 "failed to clear free space tree: %d", ret);
3148 close_ctree(fs_info);
3149 return ret;
3150 }
3151 }
3152
3153 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3154 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3155 btrfs_info(fs_info, "creating free space tree");
3156 ret = btrfs_create_free_space_tree(fs_info);
3157 if (ret) {
3158 btrfs_warn(fs_info,
3159 "failed to create free space tree: %d", ret);
3160 close_ctree(fs_info);
3161 return ret;
3162 }
3163 }
3164
3165 down_read(&fs_info->cleanup_work_sem);
3166 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3167 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3168 up_read(&fs_info->cleanup_work_sem);
3169 close_ctree(fs_info);
3170 return ret;
3171 }
3172 up_read(&fs_info->cleanup_work_sem);
3173
3174 ret = btrfs_resume_balance_async(fs_info);
3175 if (ret) {
3176 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3177 close_ctree(fs_info);
3178 return ret;
3179 }
3180
3181 ret = btrfs_resume_dev_replace_async(fs_info);
3182 if (ret) {
3183 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3184 close_ctree(fs_info);
3185 return ret;
3186 }
3187
3188 btrfs_qgroup_rescan_resume(fs_info);
3189
3190 if (!fs_info->uuid_root) {
3191 btrfs_info(fs_info, "creating UUID tree");
3192 ret = btrfs_create_uuid_tree(fs_info);
3193 if (ret) {
3194 btrfs_warn(fs_info,
3195 "failed to create the UUID tree: %d", ret);
3196 close_ctree(fs_info);
3197 return ret;
3198 }
3199 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3200 fs_info->generation !=
3201 btrfs_super_uuid_tree_generation(disk_super)) {
3202 btrfs_info(fs_info, "checking UUID tree");
3203 ret = btrfs_check_uuid_tree(fs_info);
3204 if (ret) {
3205 btrfs_warn(fs_info,
3206 "failed to check the UUID tree: %d", ret);
3207 close_ctree(fs_info);
3208 return ret;
3209 }
3210 } else {
3211 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3212 }
3213 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3214
3215 /*
3216 * backuproot only affect mount behavior, and if open_ctree succeeded,
3217 * no need to keep the flag
3218 */
3219 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3220
3221 return 0;
3222
3223 fail_qgroup:
3224 btrfs_free_qgroup_config(fs_info);
3225 fail_trans_kthread:
3226 kthread_stop(fs_info->transaction_kthread);
3227 btrfs_cleanup_transaction(fs_info);
3228 btrfs_free_fs_roots(fs_info);
3229 fail_cleaner:
3230 kthread_stop(fs_info->cleaner_kthread);
3231
3232 /*
3233 * make sure we're done with the btree inode before we stop our
3234 * kthreads
3235 */
3236 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3237
3238 fail_sysfs:
3239 btrfs_sysfs_remove_mounted(fs_info);
3240
3241 fail_fsdev_sysfs:
3242 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3243
3244 fail_block_groups:
3245 btrfs_put_block_group_cache(fs_info);
3246
3247 fail_tree_roots:
3248 free_root_pointers(fs_info, 1);
3249 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3250
3251 fail_sb_buffer:
3252 btrfs_stop_all_workers(fs_info);
3253 btrfs_free_block_groups(fs_info);
3254 fail_alloc:
3255 fail_iput:
3256 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3257
3258 iput(fs_info->btree_inode);
3259 fail_bio_counter:
3260 percpu_counter_destroy(&fs_info->bio_counter);
3261 fail_delalloc_bytes:
3262 percpu_counter_destroy(&fs_info->delalloc_bytes);
3263 fail_dirty_metadata_bytes:
3264 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3265 fail_srcu:
3266 cleanup_srcu_struct(&fs_info->subvol_srcu);
3267 fail:
3268 btrfs_free_stripe_hash_table(fs_info);
3269 btrfs_close_devices(fs_info->fs_devices);
3270 return err;
3271
3272 recovery_tree_root:
3273 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3274 goto fail_tree_roots;
3275
3276 free_root_pointers(fs_info, 0);
3277
3278 /* don't use the log in recovery mode, it won't be valid */
3279 btrfs_set_super_log_root(disk_super, 0);
3280
3281 /* we can't trust the free space cache either */
3282 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3283
3284 ret = next_root_backup(fs_info, fs_info->super_copy,
3285 &num_backups_tried, &backup_index);
3286 if (ret == -1)
3287 goto fail_block_groups;
3288 goto retry_root_backup;
3289 }
3290
3291 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3292 {
3293 if (uptodate) {
3294 set_buffer_uptodate(bh);
3295 } else {
3296 struct btrfs_device *device = (struct btrfs_device *)
3297 bh->b_private;
3298
3299 btrfs_warn_rl_in_rcu(device->fs_info,
3300 "lost page write due to IO error on %s",
3301 rcu_str_deref(device->name));
3302 /* note, we don't set_buffer_write_io_error because we have
3303 * our own ways of dealing with the IO errors
3304 */
3305 clear_buffer_uptodate(bh);
3306 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3307 }
3308 unlock_buffer(bh);
3309 put_bh(bh);
3310 }
3311
3312 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3313 struct buffer_head **bh_ret)
3314 {
3315 struct buffer_head *bh;
3316 struct btrfs_super_block *super;
3317 u64 bytenr;
3318
3319 bytenr = btrfs_sb_offset(copy_num);
3320 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3321 return -EINVAL;
3322
3323 bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
3324 /*
3325 * If we fail to read from the underlying devices, as of now
3326 * the best option we have is to mark it EIO.
3327 */
3328 if (!bh)
3329 return -EIO;
3330
3331 super = (struct btrfs_super_block *)bh->b_data;
3332 if (btrfs_super_bytenr(super) != bytenr ||
3333 btrfs_super_magic(super) != BTRFS_MAGIC) {
3334 brelse(bh);
3335 return -EINVAL;
3336 }
3337
3338 *bh_ret = bh;
3339 return 0;
3340 }
3341
3342
3343 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3344 {
3345 struct buffer_head *bh;
3346 struct buffer_head *latest = NULL;
3347 struct btrfs_super_block *super;
3348 int i;
3349 u64 transid = 0;
3350 int ret = -EINVAL;
3351
3352 /* we would like to check all the supers, but that would make
3353 * a btrfs mount succeed after a mkfs from a different FS.
3354 * So, we need to add a special mount option to scan for
3355 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3356 */
3357 for (i = 0; i < 1; i++) {
3358 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3359 if (ret)
3360 continue;
3361
3362 super = (struct btrfs_super_block *)bh->b_data;
3363
3364 if (!latest || btrfs_super_generation(super) > transid) {
3365 brelse(latest);
3366 latest = bh;
3367 transid = btrfs_super_generation(super);
3368 } else {
3369 brelse(bh);
3370 }
3371 }
3372
3373 if (!latest)
3374 return ERR_PTR(ret);
3375
3376 return latest;
3377 }
3378
3379 /*
3380 * this should be called twice, once with wait == 0 and
3381 * once with wait == 1. When wait == 0 is done, all the buffer heads
3382 * we write are pinned.
3383 *
3384 * They are released when wait == 1 is done.
3385 * max_mirrors must be the same for both runs, and it indicates how
3386 * many supers on this one device should be written.
3387 *
3388 * max_mirrors == 0 means to write them all.
3389 */
3390 static int write_dev_supers(struct btrfs_device *device,
3391 struct btrfs_super_block *sb,
3392 int wait, int max_mirrors)
3393 {
3394 struct buffer_head *bh;
3395 int i;
3396 int ret;
3397 int errors = 0;
3398 u32 crc;
3399 u64 bytenr;
3400
3401 if (max_mirrors == 0)
3402 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3403
3404 for (i = 0; i < max_mirrors; i++) {
3405 bytenr = btrfs_sb_offset(i);
3406 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3407 device->commit_total_bytes)
3408 break;
3409
3410 if (wait) {
3411 bh = __find_get_block(device->bdev, bytenr / 4096,
3412 BTRFS_SUPER_INFO_SIZE);
3413 if (!bh) {
3414 errors++;
3415 continue;
3416 }
3417 wait_on_buffer(bh);
3418 if (!buffer_uptodate(bh))
3419 errors++;
3420
3421 /* drop our reference */
3422 brelse(bh);
3423
3424 /* drop the reference from the wait == 0 run */
3425 brelse(bh);
3426 continue;
3427 } else {
3428 btrfs_set_super_bytenr(sb, bytenr);
3429
3430 crc = ~(u32)0;
3431 crc = btrfs_csum_data((const char *)sb +
3432 BTRFS_CSUM_SIZE, crc,
3433 BTRFS_SUPER_INFO_SIZE -
3434 BTRFS_CSUM_SIZE);
3435 btrfs_csum_final(crc, sb->csum);
3436
3437 /*
3438 * one reference for us, and we leave it for the
3439 * caller
3440 */
3441 bh = __getblk(device->bdev, bytenr / 4096,
3442 BTRFS_SUPER_INFO_SIZE);
3443 if (!bh) {
3444 btrfs_err(device->fs_info,
3445 "couldn't get super buffer head for bytenr %llu",
3446 bytenr);
3447 errors++;
3448 continue;
3449 }
3450
3451 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3452
3453 /* one reference for submit_bh */
3454 get_bh(bh);
3455
3456 set_buffer_uptodate(bh);
3457 lock_buffer(bh);
3458 bh->b_end_io = btrfs_end_buffer_write_sync;
3459 bh->b_private = device;
3460 }
3461
3462 /*
3463 * we fua the first super. The others we allow
3464 * to go down lazy.
3465 */
3466 if (i == 0) {
3467 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3468 REQ_SYNC | REQ_FUA, bh);
3469 } else {
3470 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3471 }
3472 if (ret)
3473 errors++;
3474 }
3475 return errors < i ? 0 : -1;
3476 }
3477
3478 /*
3479 * endio for the write_dev_flush, this will wake anyone waiting
3480 * for the barrier when it is done
3481 */
3482 static void btrfs_end_empty_barrier(struct bio *bio)
3483 {
3484 complete(bio->bi_private);
3485 }
3486
3487 /*
3488 * Submit a flush request to the device if it supports it. Error handling is
3489 * done in the waiting counterpart.
3490 */
3491 static void write_dev_flush(struct btrfs_device *device)
3492 {
3493 struct request_queue *q = bdev_get_queue(device->bdev);
3494 struct bio *bio = device->flush_bio;
3495
3496 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3497 return;
3498
3499 bio_reset(bio);
3500 bio->bi_end_io = btrfs_end_empty_barrier;
3501 bio->bi_bdev = device->bdev;
3502 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3503 init_completion(&device->flush_wait);
3504 bio->bi_private = &device->flush_wait;
3505
3506 submit_bio(bio);
3507 device->flush_bio_sent = 1;
3508 }
3509
3510 /*
3511 * If the flush bio has been submitted by write_dev_flush, wait for it.
3512 */
3513 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3514 {
3515 struct bio *bio = device->flush_bio;
3516
3517 if (!device->flush_bio_sent)
3518 return 0;
3519
3520 device->flush_bio_sent = 0;
3521 wait_for_completion_io(&device->flush_wait);
3522
3523 return bio->bi_status;
3524 }
3525
3526 static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
3527 {
3528 int dev_flush_error = 0;
3529 struct btrfs_device *dev;
3530
3531 list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
3532 if (!dev->bdev || dev->last_flush_error)
3533 dev_flush_error++;
3534 }
3535
3536 if (dev_flush_error >
3537 fsdevs->fs_info->num_tolerated_disk_barrier_failures)
3538 return -EIO;
3539
3540 return 0;
3541 }
3542
3543 /*
3544 * send an empty flush down to each device in parallel,
3545 * then wait for them
3546 */
3547 static int barrier_all_devices(struct btrfs_fs_info *info)
3548 {
3549 struct list_head *head;
3550 struct btrfs_device *dev;
3551 int errors_wait = 0;
3552 blk_status_t ret;
3553
3554 /* send down all the barriers */
3555 head = &info->fs_devices->devices;
3556 list_for_each_entry_rcu(dev, head, dev_list) {
3557 if (dev->missing)
3558 continue;
3559 if (!dev->bdev)
3560 continue;
3561 if (!dev->in_fs_metadata || !dev->writeable)
3562 continue;
3563
3564 write_dev_flush(dev);
3565 dev->last_flush_error = 0;
3566 }
3567
3568 /* wait for all the barriers */
3569 list_for_each_entry_rcu(dev, head, dev_list) {
3570 if (dev->missing)
3571 continue;
3572 if (!dev->bdev) {
3573 errors_wait++;
3574 continue;
3575 }
3576 if (!dev->in_fs_metadata || !dev->writeable)
3577 continue;
3578
3579 ret = wait_dev_flush(dev);
3580 if (ret) {
3581 dev->last_flush_error = ret;
3582 btrfs_dev_stat_inc_and_print(dev,
3583 BTRFS_DEV_STAT_FLUSH_ERRS);
3584 errors_wait++;
3585 }
3586 }
3587
3588 if (errors_wait) {
3589 /*
3590 * At some point we need the status of all disks
3591 * to arrive at the volume status. So error checking
3592 * is being pushed to a separate loop.
3593 */
3594 return check_barrier_error(info->fs_devices);
3595 }
3596 return 0;
3597 }
3598
3599 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3600 {
3601 int raid_type;
3602 int min_tolerated = INT_MAX;
3603
3604 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3605 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3606 min_tolerated = min(min_tolerated,
3607 btrfs_raid_array[BTRFS_RAID_SINGLE].
3608 tolerated_failures);
3609
3610 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3611 if (raid_type == BTRFS_RAID_SINGLE)
3612 continue;
3613 if (!(flags & btrfs_raid_group[raid_type]))
3614 continue;
3615 min_tolerated = min(min_tolerated,
3616 btrfs_raid_array[raid_type].
3617 tolerated_failures);
3618 }
3619
3620 if (min_tolerated == INT_MAX) {
3621 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3622 min_tolerated = 0;
3623 }
3624
3625 return min_tolerated;
3626 }
3627
3628 int btrfs_calc_num_tolerated_disk_barrier_failures(
3629 struct btrfs_fs_info *fs_info)
3630 {
3631 struct btrfs_ioctl_space_info space;
3632 struct btrfs_space_info *sinfo;
3633 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3634 BTRFS_BLOCK_GROUP_SYSTEM,
3635 BTRFS_BLOCK_GROUP_METADATA,
3636 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3637 int i;
3638 int c;
3639 int num_tolerated_disk_barrier_failures =
3640 (int)fs_info->fs_devices->num_devices;
3641
3642 for (i = 0; i < ARRAY_SIZE(types); i++) {
3643 struct btrfs_space_info *tmp;
3644
3645 sinfo = NULL;
3646 rcu_read_lock();
3647 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3648 if (tmp->flags == types[i]) {
3649 sinfo = tmp;
3650 break;
3651 }
3652 }
3653 rcu_read_unlock();
3654
3655 if (!sinfo)
3656 continue;
3657
3658 down_read(&sinfo->groups_sem);
3659 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3660 u64 flags;
3661
3662 if (list_empty(&sinfo->block_groups[c]))
3663 continue;
3664
3665 btrfs_get_block_group_info(&sinfo->block_groups[c],
3666 &space);
3667 if (space.total_bytes == 0 || space.used_bytes == 0)
3668 continue;
3669 flags = space.flags;
3670
3671 num_tolerated_disk_barrier_failures = min(
3672 num_tolerated_disk_barrier_failures,
3673 btrfs_get_num_tolerated_disk_barrier_failures(
3674 flags));
3675 }
3676 up_read(&sinfo->groups_sem);
3677 }
3678
3679 return num_tolerated_disk_barrier_failures;
3680 }
3681
3682 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3683 {
3684 struct list_head *head;
3685 struct btrfs_device *dev;
3686 struct btrfs_super_block *sb;
3687 struct btrfs_dev_item *dev_item;
3688 int ret;
3689 int do_barriers;
3690 int max_errors;
3691 int total_errors = 0;
3692 u64 flags;
3693
3694 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3695 backup_super_roots(fs_info);
3696
3697 sb = fs_info->super_for_commit;
3698 dev_item = &sb->dev_item;
3699
3700 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3701 head = &fs_info->fs_devices->devices;
3702 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3703
3704 if (do_barriers) {
3705 ret = barrier_all_devices(fs_info);
3706 if (ret) {
3707 mutex_unlock(
3708 &fs_info->fs_devices->device_list_mutex);
3709 btrfs_handle_fs_error(fs_info, ret,
3710 "errors while submitting device barriers.");
3711 return ret;
3712 }
3713 }
3714
3715 list_for_each_entry_rcu(dev, head, dev_list) {
3716 if (!dev->bdev) {
3717 total_errors++;
3718 continue;
3719 }
3720 if (!dev->in_fs_metadata || !dev->writeable)
3721 continue;
3722
3723 btrfs_set_stack_device_generation(dev_item, 0);
3724 btrfs_set_stack_device_type(dev_item, dev->type);
3725 btrfs_set_stack_device_id(dev_item, dev->devid);
3726 btrfs_set_stack_device_total_bytes(dev_item,
3727 dev->commit_total_bytes);
3728 btrfs_set_stack_device_bytes_used(dev_item,
3729 dev->commit_bytes_used);
3730 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3731 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3732 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3733 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3734 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3735
3736 flags = btrfs_super_flags(sb);
3737 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3738
3739 ret = write_dev_supers(dev, sb, 0, max_mirrors);
3740 if (ret)
3741 total_errors++;
3742 }
3743 if (total_errors > max_errors) {
3744 btrfs_err(fs_info, "%d errors while writing supers",
3745 total_errors);
3746 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3747
3748 /* FUA is masked off if unsupported and can't be the reason */
3749 btrfs_handle_fs_error(fs_info, -EIO,
3750 "%d errors while writing supers",
3751 total_errors);
3752 return -EIO;
3753 }
3754
3755 total_errors = 0;
3756 list_for_each_entry_rcu(dev, head, dev_list) {
3757 if (!dev->bdev)
3758 continue;
3759 if (!dev->in_fs_metadata || !dev->writeable)
3760 continue;
3761
3762 ret = write_dev_supers(dev, sb, 1, max_mirrors);
3763 if (ret)
3764 total_errors++;
3765 }
3766 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3767 if (total_errors > max_errors) {
3768 btrfs_handle_fs_error(fs_info, -EIO,
3769 "%d errors while writing supers",
3770 total_errors);
3771 return -EIO;
3772 }
3773 return 0;
3774 }
3775
3776 /* Drop a fs root from the radix tree and free it. */
3777 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3778 struct btrfs_root *root)
3779 {
3780 spin_lock(&fs_info->fs_roots_radix_lock);
3781 radix_tree_delete(&fs_info->fs_roots_radix,
3782 (unsigned long)root->root_key.objectid);
3783 spin_unlock(&fs_info->fs_roots_radix_lock);
3784
3785 if (btrfs_root_refs(&root->root_item) == 0)
3786 synchronize_srcu(&fs_info->subvol_srcu);
3787
3788 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3789 btrfs_free_log(NULL, root);
3790 if (root->reloc_root) {
3791 free_extent_buffer(root->reloc_root->node);
3792 free_extent_buffer(root->reloc_root->commit_root);
3793 btrfs_put_fs_root(root->reloc_root);
3794 root->reloc_root = NULL;
3795 }
3796 }
3797
3798 if (root->free_ino_pinned)
3799 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3800 if (root->free_ino_ctl)
3801 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3802 free_fs_root(root);
3803 }
3804
3805 static void free_fs_root(struct btrfs_root *root)
3806 {
3807 iput(root->ino_cache_inode);
3808 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3809 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3810 root->orphan_block_rsv = NULL;
3811 if (root->anon_dev)
3812 free_anon_bdev(root->anon_dev);
3813 if (root->subv_writers)
3814 btrfs_free_subvolume_writers(root->subv_writers);
3815 free_extent_buffer(root->node);
3816 free_extent_buffer(root->commit_root);
3817 kfree(root->free_ino_ctl);
3818 kfree(root->free_ino_pinned);
3819 kfree(root->name);
3820 btrfs_put_fs_root(root);
3821 }
3822
3823 void btrfs_free_fs_root(struct btrfs_root *root)
3824 {
3825 free_fs_root(root);
3826 }
3827
3828 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3829 {
3830 u64 root_objectid = 0;
3831 struct btrfs_root *gang[8];
3832 int i = 0;
3833 int err = 0;
3834 unsigned int ret = 0;
3835 int index;
3836
3837 while (1) {
3838 index = srcu_read_lock(&fs_info->subvol_srcu);
3839 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3840 (void **)gang, root_objectid,
3841 ARRAY_SIZE(gang));
3842 if (!ret) {
3843 srcu_read_unlock(&fs_info->subvol_srcu, index);
3844 break;
3845 }
3846 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3847
3848 for (i = 0; i < ret; i++) {
3849 /* Avoid to grab roots in dead_roots */
3850 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3851 gang[i] = NULL;
3852 continue;
3853 }
3854 /* grab all the search result for later use */
3855 gang[i] = btrfs_grab_fs_root(gang[i]);
3856 }
3857 srcu_read_unlock(&fs_info->subvol_srcu, index);
3858
3859 for (i = 0; i < ret; i++) {
3860 if (!gang[i])
3861 continue;
3862 root_objectid = gang[i]->root_key.objectid;
3863 err = btrfs_orphan_cleanup(gang[i]);
3864 if (err)
3865 break;
3866 btrfs_put_fs_root(gang[i]);
3867 }
3868 root_objectid++;
3869 }
3870
3871 /* release the uncleaned roots due to error */
3872 for (; i < ret; i++) {
3873 if (gang[i])
3874 btrfs_put_fs_root(gang[i]);
3875 }
3876 return err;
3877 }
3878
3879 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3880 {
3881 struct btrfs_root *root = fs_info->tree_root;
3882 struct btrfs_trans_handle *trans;
3883
3884 mutex_lock(&fs_info->cleaner_mutex);
3885 btrfs_run_delayed_iputs(fs_info);
3886 mutex_unlock(&fs_info->cleaner_mutex);
3887 wake_up_process(fs_info->cleaner_kthread);
3888
3889 /* wait until ongoing cleanup work done */
3890 down_write(&fs_info->cleanup_work_sem);
3891 up_write(&fs_info->cleanup_work_sem);
3892
3893 trans = btrfs_join_transaction(root);
3894 if (IS_ERR(trans))
3895 return PTR_ERR(trans);
3896 return btrfs_commit_transaction(trans);
3897 }
3898
3899 void close_ctree(struct btrfs_fs_info *fs_info)
3900 {
3901 struct btrfs_root *root = fs_info->tree_root;
3902 int ret;
3903
3904 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3905
3906 /* wait for the qgroup rescan worker to stop */
3907 btrfs_qgroup_wait_for_completion(fs_info, false);
3908
3909 /* wait for the uuid_scan task to finish */
3910 down(&fs_info->uuid_tree_rescan_sem);
3911 /* avoid complains from lockdep et al., set sem back to initial state */
3912 up(&fs_info->uuid_tree_rescan_sem);
3913
3914 /* pause restriper - we want to resume on mount */
3915 btrfs_pause_balance(fs_info);
3916
3917 btrfs_dev_replace_suspend_for_unmount(fs_info);
3918
3919 btrfs_scrub_cancel(fs_info);
3920
3921 /* wait for any defraggers to finish */
3922 wait_event(fs_info->transaction_wait,
3923 (atomic_read(&fs_info->defrag_running) == 0));
3924
3925 /* clear out the rbtree of defraggable inodes */
3926 btrfs_cleanup_defrag_inodes(fs_info);
3927
3928 cancel_work_sync(&fs_info->async_reclaim_work);
3929
3930 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3931 /*
3932 * If the cleaner thread is stopped and there are
3933 * block groups queued for removal, the deletion will be
3934 * skipped when we quit the cleaner thread.
3935 */
3936 btrfs_delete_unused_bgs(fs_info);
3937
3938 ret = btrfs_commit_super(fs_info);
3939 if (ret)
3940 btrfs_err(fs_info, "commit super ret %d", ret);
3941 }
3942
3943 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3944 btrfs_error_commit_super(fs_info);
3945
3946 kthread_stop(fs_info->transaction_kthread);
3947 kthread_stop(fs_info->cleaner_kthread);
3948
3949 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3950
3951 btrfs_free_qgroup_config(fs_info);
3952
3953 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3954 btrfs_info(fs_info, "at unmount delalloc count %lld",
3955 percpu_counter_sum(&fs_info->delalloc_bytes));
3956 }
3957
3958 btrfs_sysfs_remove_mounted(fs_info);
3959 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3960
3961 btrfs_free_fs_roots(fs_info);
3962
3963 btrfs_put_block_group_cache(fs_info);
3964
3965 /*
3966 * we must make sure there is not any read request to
3967 * submit after we stopping all workers.
3968 */
3969 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3970 btrfs_stop_all_workers(fs_info);
3971
3972 btrfs_free_block_groups(fs_info);
3973
3974 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3975 free_root_pointers(fs_info, 1);
3976
3977 iput(fs_info->btree_inode);
3978
3979 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3980 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3981 btrfsic_unmount(fs_info->fs_devices);
3982 #endif
3983
3984 btrfs_close_devices(fs_info->fs_devices);
3985 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3986
3987 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3988 percpu_counter_destroy(&fs_info->delalloc_bytes);
3989 percpu_counter_destroy(&fs_info->bio_counter);
3990 cleanup_srcu_struct(&fs_info->subvol_srcu);
3991
3992 btrfs_free_stripe_hash_table(fs_info);
3993
3994 __btrfs_free_block_rsv(root->orphan_block_rsv);
3995 root->orphan_block_rsv = NULL;
3996
3997 mutex_lock(&fs_info->chunk_mutex);
3998 while (!list_empty(&fs_info->pinned_chunks)) {
3999 struct extent_map *em;
4000
4001 em = list_first_entry(&fs_info->pinned_chunks,
4002 struct extent_map, list);
4003 list_del_init(&em->list);
4004 free_extent_map(em);
4005 }
4006 mutex_unlock(&fs_info->chunk_mutex);
4007 }
4008
4009 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4010 int atomic)
4011 {
4012 int ret;
4013 struct inode *btree_inode = buf->pages[0]->mapping->host;
4014
4015 ret = extent_buffer_uptodate(buf);
4016 if (!ret)
4017 return ret;
4018
4019 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4020 parent_transid, atomic);
4021 if (ret == -EAGAIN)
4022 return ret;
4023 return !ret;
4024 }
4025
4026 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4027 {
4028 struct btrfs_fs_info *fs_info;
4029 struct btrfs_root *root;
4030 u64 transid = btrfs_header_generation(buf);
4031 int was_dirty;
4032
4033 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4034 /*
4035 * This is a fast path so only do this check if we have sanity tests
4036 * enabled. Normal people shouldn't be marking dummy buffers as dirty
4037 * outside of the sanity tests.
4038 */
4039 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
4040 return;
4041 #endif
4042 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4043 fs_info = root->fs_info;
4044 btrfs_assert_tree_locked(buf);
4045 if (transid != fs_info->generation)
4046 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4047 buf->start, transid, fs_info->generation);
4048 was_dirty = set_extent_buffer_dirty(buf);
4049 if (!was_dirty)
4050 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
4051 buf->len,
4052 fs_info->dirty_metadata_batch);
4053 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4054 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4055 btrfs_print_leaf(fs_info, buf);
4056 ASSERT(0);
4057 }
4058 #endif
4059 }
4060
4061 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4062 int flush_delayed)
4063 {
4064 /*
4065 * looks as though older kernels can get into trouble with
4066 * this code, they end up stuck in balance_dirty_pages forever
4067 */
4068 int ret;
4069
4070 if (current->flags & PF_MEMALLOC)
4071 return;
4072
4073 if (flush_delayed)
4074 btrfs_balance_delayed_items(fs_info);
4075
4076 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4077 BTRFS_DIRTY_METADATA_THRESH);
4078 if (ret > 0) {
4079 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4080 }
4081 }
4082
4083 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4084 {
4085 __btrfs_btree_balance_dirty(fs_info, 1);
4086 }
4087
4088 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4089 {
4090 __btrfs_btree_balance_dirty(fs_info, 0);
4091 }
4092
4093 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4094 {
4095 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4096 struct btrfs_fs_info *fs_info = root->fs_info;
4097
4098 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
4099 }
4100
4101 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
4102 {
4103 struct btrfs_super_block *sb = fs_info->super_copy;
4104 u64 nodesize = btrfs_super_nodesize(sb);
4105 u64 sectorsize = btrfs_super_sectorsize(sb);
4106 int ret = 0;
4107
4108 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4109 btrfs_err(fs_info, "no valid FS found");
4110 ret = -EINVAL;
4111 }
4112 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4113 btrfs_warn(fs_info, "unrecognized super flag: %llu",
4114 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4115 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4116 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
4117 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4118 ret = -EINVAL;
4119 }
4120 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4121 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4122 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4123 ret = -EINVAL;
4124 }
4125 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4126 btrfs_err(fs_info, "log_root level too big: %d >= %d",
4127 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4128 ret = -EINVAL;
4129 }
4130
4131 /*
4132 * Check sectorsize and nodesize first, other check will need it.
4133 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4134 */
4135 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4136 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4137 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4138 ret = -EINVAL;
4139 }
4140 /* Only PAGE SIZE is supported yet */
4141 if (sectorsize != PAGE_SIZE) {
4142 btrfs_err(fs_info,
4143 "sectorsize %llu not supported yet, only support %lu",
4144 sectorsize, PAGE_SIZE);
4145 ret = -EINVAL;
4146 }
4147 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4148 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4149 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4150 ret = -EINVAL;
4151 }
4152 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4153 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4154 le32_to_cpu(sb->__unused_leafsize), nodesize);
4155 ret = -EINVAL;
4156 }
4157
4158 /* Root alignment check */
4159 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4160 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4161 btrfs_super_root(sb));
4162 ret = -EINVAL;
4163 }
4164 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4165 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4166 btrfs_super_chunk_root(sb));
4167 ret = -EINVAL;
4168 }
4169 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4170 btrfs_warn(fs_info, "log_root block unaligned: %llu",
4171 btrfs_super_log_root(sb));
4172 ret = -EINVAL;
4173 }
4174
4175 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
4176 btrfs_err(fs_info,
4177 "dev_item UUID does not match fsid: %pU != %pU",
4178 fs_info->fsid, sb->dev_item.fsid);
4179 ret = -EINVAL;
4180 }
4181
4182 /*
4183 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4184 * done later
4185 */
4186 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4187 btrfs_err(fs_info, "bytes_used is too small %llu",
4188 btrfs_super_bytes_used(sb));
4189 ret = -EINVAL;
4190 }
4191 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4192 btrfs_err(fs_info, "invalid stripesize %u",
4193 btrfs_super_stripesize(sb));
4194 ret = -EINVAL;
4195 }
4196 if (btrfs_super_num_devices(sb) > (1UL << 31))
4197 btrfs_warn(fs_info, "suspicious number of devices: %llu",
4198 btrfs_super_num_devices(sb));
4199 if (btrfs_super_num_devices(sb) == 0) {
4200 btrfs_err(fs_info, "number of devices is 0");
4201 ret = -EINVAL;
4202 }
4203
4204 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4205 btrfs_err(fs_info, "super offset mismatch %llu != %u",
4206 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4207 ret = -EINVAL;
4208 }
4209
4210 /*
4211 * Obvious sys_chunk_array corruptions, it must hold at least one key
4212 * and one chunk
4213 */
4214 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4215 btrfs_err(fs_info, "system chunk array too big %u > %u",
4216 btrfs_super_sys_array_size(sb),
4217 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4218 ret = -EINVAL;
4219 }
4220 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4221 + sizeof(struct btrfs_chunk)) {
4222 btrfs_err(fs_info, "system chunk array too small %u < %zu",
4223 btrfs_super_sys_array_size(sb),
4224 sizeof(struct btrfs_disk_key)
4225 + sizeof(struct btrfs_chunk));
4226 ret = -EINVAL;
4227 }
4228
4229 /*
4230 * The generation is a global counter, we'll trust it more than the others
4231 * but it's still possible that it's the one that's wrong.
4232 */
4233 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4234 btrfs_warn(fs_info,
4235 "suspicious: generation < chunk_root_generation: %llu < %llu",
4236 btrfs_super_generation(sb),
4237 btrfs_super_chunk_root_generation(sb));
4238 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4239 && btrfs_super_cache_generation(sb) != (u64)-1)
4240 btrfs_warn(fs_info,
4241 "suspicious: generation < cache_generation: %llu < %llu",
4242 btrfs_super_generation(sb),
4243 btrfs_super_cache_generation(sb));
4244
4245 return ret;
4246 }
4247
4248 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4249 {
4250 mutex_lock(&fs_info->cleaner_mutex);
4251 btrfs_run_delayed_iputs(fs_info);
4252 mutex_unlock(&fs_info->cleaner_mutex);
4253
4254 down_write(&fs_info->cleanup_work_sem);
4255 up_write(&fs_info->cleanup_work_sem);
4256
4257 /* cleanup FS via transaction */
4258 btrfs_cleanup_transaction(fs_info);
4259 }
4260
4261 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4262 {
4263 struct btrfs_ordered_extent *ordered;
4264
4265 spin_lock(&root->ordered_extent_lock);
4266 /*
4267 * This will just short circuit the ordered completion stuff which will
4268 * make sure the ordered extent gets properly cleaned up.
4269 */
4270 list_for_each_entry(ordered, &root->ordered_extents,
4271 root_extent_list)
4272 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4273 spin_unlock(&root->ordered_extent_lock);
4274 }
4275
4276 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4277 {
4278 struct btrfs_root *root;
4279 struct list_head splice;
4280
4281 INIT_LIST_HEAD(&splice);
4282
4283 spin_lock(&fs_info->ordered_root_lock);
4284 list_splice_init(&fs_info->ordered_roots, &splice);
4285 while (!list_empty(&splice)) {
4286 root = list_first_entry(&splice, struct btrfs_root,
4287 ordered_root);
4288 list_move_tail(&root->ordered_root,
4289 &fs_info->ordered_roots);
4290
4291 spin_unlock(&fs_info->ordered_root_lock);
4292 btrfs_destroy_ordered_extents(root);
4293
4294 cond_resched();
4295 spin_lock(&fs_info->ordered_root_lock);
4296 }
4297 spin_unlock(&fs_info->ordered_root_lock);
4298 }
4299
4300 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4301 struct btrfs_fs_info *fs_info)
4302 {
4303 struct rb_node *node;
4304 struct btrfs_delayed_ref_root *delayed_refs;
4305 struct btrfs_delayed_ref_node *ref;
4306 int ret = 0;
4307
4308 delayed_refs = &trans->delayed_refs;
4309
4310 spin_lock(&delayed_refs->lock);
4311 if (atomic_read(&delayed_refs->num_entries) == 0) {
4312 spin_unlock(&delayed_refs->lock);
4313 btrfs_info(fs_info, "delayed_refs has NO entry");
4314 return ret;
4315 }
4316
4317 while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4318 struct btrfs_delayed_ref_head *head;
4319 struct btrfs_delayed_ref_node *tmp;
4320 bool pin_bytes = false;
4321
4322 head = rb_entry(node, struct btrfs_delayed_ref_head,
4323 href_node);
4324 if (!mutex_trylock(&head->mutex)) {
4325 refcount_inc(&head->node.refs);
4326 spin_unlock(&delayed_refs->lock);
4327
4328 mutex_lock(&head->mutex);
4329 mutex_unlock(&head->mutex);
4330 btrfs_put_delayed_ref(&head->node);
4331 spin_lock(&delayed_refs->lock);
4332 continue;
4333 }
4334 spin_lock(&head->lock);
4335 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4336 list) {
4337 ref->in_tree = 0;
4338 list_del(&ref->list);
4339 if (!list_empty(&ref->add_list))
4340 list_del(&ref->add_list);
4341 atomic_dec(&delayed_refs->num_entries);
4342 btrfs_put_delayed_ref(ref);
4343 }
4344 if (head->must_insert_reserved)
4345 pin_bytes = true;
4346 btrfs_free_delayed_extent_op(head->extent_op);
4347 delayed_refs->num_heads--;
4348 if (head->processing == 0)
4349 delayed_refs->num_heads_ready--;
4350 atomic_dec(&delayed_refs->num_entries);
4351 head->node.in_tree = 0;
4352 rb_erase(&head->href_node, &delayed_refs->href_root);
4353 spin_unlock(&head->lock);
4354 spin_unlock(&delayed_refs->lock);
4355 mutex_unlock(&head->mutex);
4356
4357 if (pin_bytes)
4358 btrfs_pin_extent(fs_info, head->node.bytenr,
4359 head->node.num_bytes, 1);
4360 btrfs_put_delayed_ref(&head->node);
4361 cond_resched();
4362 spin_lock(&delayed_refs->lock);
4363 }
4364
4365 spin_unlock(&delayed_refs->lock);
4366
4367 return ret;
4368 }
4369
4370 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4371 {
4372 struct btrfs_inode *btrfs_inode;
4373 struct list_head splice;
4374
4375 INIT_LIST_HEAD(&splice);
4376
4377 spin_lock(&root->delalloc_lock);
4378 list_splice_init(&root->delalloc_inodes, &splice);
4379
4380 while (!list_empty(&splice)) {
4381 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4382 delalloc_inodes);
4383
4384 list_del_init(&btrfs_inode->delalloc_inodes);
4385 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4386 &btrfs_inode->runtime_flags);
4387 spin_unlock(&root->delalloc_lock);
4388
4389 btrfs_invalidate_inodes(btrfs_inode->root);
4390
4391 spin_lock(&root->delalloc_lock);
4392 }
4393
4394 spin_unlock(&root->delalloc_lock);
4395 }
4396
4397 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4398 {
4399 struct btrfs_root *root;
4400 struct list_head splice;
4401
4402 INIT_LIST_HEAD(&splice);
4403
4404 spin_lock(&fs_info->delalloc_root_lock);
4405 list_splice_init(&fs_info->delalloc_roots, &splice);
4406 while (!list_empty(&splice)) {
4407 root = list_first_entry(&splice, struct btrfs_root,
4408 delalloc_root);
4409 list_del_init(&root->delalloc_root);
4410 root = btrfs_grab_fs_root(root);
4411 BUG_ON(!root);
4412 spin_unlock(&fs_info->delalloc_root_lock);
4413
4414 btrfs_destroy_delalloc_inodes(root);
4415 btrfs_put_fs_root(root);
4416
4417 spin_lock(&fs_info->delalloc_root_lock);
4418 }
4419 spin_unlock(&fs_info->delalloc_root_lock);
4420 }
4421
4422 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4423 struct extent_io_tree *dirty_pages,
4424 int mark)
4425 {
4426 int ret;
4427 struct extent_buffer *eb;
4428 u64 start = 0;
4429 u64 end;
4430
4431 while (1) {
4432 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4433 mark, NULL);
4434 if (ret)
4435 break;
4436
4437 clear_extent_bits(dirty_pages, start, end, mark);
4438 while (start <= end) {
4439 eb = find_extent_buffer(fs_info, start);
4440 start += fs_info->nodesize;
4441 if (!eb)
4442 continue;
4443 wait_on_extent_buffer_writeback(eb);
4444
4445 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4446 &eb->bflags))
4447 clear_extent_buffer_dirty(eb);
4448 free_extent_buffer_stale(eb);
4449 }
4450 }
4451
4452 return ret;
4453 }
4454
4455 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4456 struct extent_io_tree *pinned_extents)
4457 {
4458 struct extent_io_tree *unpin;
4459 u64 start;
4460 u64 end;
4461 int ret;
4462 bool loop = true;
4463
4464 unpin = pinned_extents;
4465 again:
4466 while (1) {
4467 ret = find_first_extent_bit(unpin, 0, &start, &end,
4468 EXTENT_DIRTY, NULL);
4469 if (ret)
4470 break;
4471
4472 clear_extent_dirty(unpin, start, end);
4473 btrfs_error_unpin_extent_range(fs_info, start, end);
4474 cond_resched();
4475 }
4476
4477 if (loop) {
4478 if (unpin == &fs_info->freed_extents[0])
4479 unpin = &fs_info->freed_extents[1];
4480 else
4481 unpin = &fs_info->freed_extents[0];
4482 loop = false;
4483 goto again;
4484 }
4485
4486 return 0;
4487 }
4488
4489 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4490 {
4491 struct inode *inode;
4492
4493 inode = cache->io_ctl.inode;
4494 if (inode) {
4495 invalidate_inode_pages2(inode->i_mapping);
4496 BTRFS_I(inode)->generation = 0;
4497 cache->io_ctl.inode = NULL;
4498 iput(inode);
4499 }
4500 btrfs_put_block_group(cache);
4501 }
4502
4503 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4504 struct btrfs_fs_info *fs_info)
4505 {
4506 struct btrfs_block_group_cache *cache;
4507
4508 spin_lock(&cur_trans->dirty_bgs_lock);
4509 while (!list_empty(&cur_trans->dirty_bgs)) {
4510 cache = list_first_entry(&cur_trans->dirty_bgs,
4511 struct btrfs_block_group_cache,
4512 dirty_list);
4513 if (!cache) {
4514 btrfs_err(fs_info, "orphan block group dirty_bgs list");
4515 spin_unlock(&cur_trans->dirty_bgs_lock);
4516 return;
4517 }
4518
4519 if (!list_empty(&cache->io_list)) {
4520 spin_unlock(&cur_trans->dirty_bgs_lock);
4521 list_del_init(&cache->io_list);
4522 btrfs_cleanup_bg_io(cache);
4523 spin_lock(&cur_trans->dirty_bgs_lock);
4524 }
4525
4526 list_del_init(&cache->dirty_list);
4527 spin_lock(&cache->lock);
4528 cache->disk_cache_state = BTRFS_DC_ERROR;
4529 spin_unlock(&cache->lock);
4530
4531 spin_unlock(&cur_trans->dirty_bgs_lock);
4532 btrfs_put_block_group(cache);
4533 spin_lock(&cur_trans->dirty_bgs_lock);
4534 }
4535 spin_unlock(&cur_trans->dirty_bgs_lock);
4536
4537 while (!list_empty(&cur_trans->io_bgs)) {
4538 cache = list_first_entry(&cur_trans->io_bgs,
4539 struct btrfs_block_group_cache,
4540 io_list);
4541 if (!cache) {
4542 btrfs_err(fs_info, "orphan block group on io_bgs list");
4543 return;
4544 }
4545
4546 list_del_init(&cache->io_list);
4547 spin_lock(&cache->lock);
4548 cache->disk_cache_state = BTRFS_DC_ERROR;
4549 spin_unlock(&cache->lock);
4550 btrfs_cleanup_bg_io(cache);
4551 }
4552 }
4553
4554 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4555 struct btrfs_fs_info *fs_info)
4556 {
4557 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4558 ASSERT(list_empty(&cur_trans->dirty_bgs));
4559 ASSERT(list_empty(&cur_trans->io_bgs));
4560
4561 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4562
4563 cur_trans->state = TRANS_STATE_COMMIT_START;
4564 wake_up(&fs_info->transaction_blocked_wait);
4565
4566 cur_trans->state = TRANS_STATE_UNBLOCKED;
4567 wake_up(&fs_info->transaction_wait);
4568
4569 btrfs_destroy_delayed_inodes(fs_info);
4570 btrfs_assert_delayed_root_empty(fs_info);
4571
4572 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4573 EXTENT_DIRTY);
4574 btrfs_destroy_pinned_extent(fs_info,
4575 fs_info->pinned_extents);
4576
4577 cur_trans->state =TRANS_STATE_COMPLETED;
4578 wake_up(&cur_trans->commit_wait);
4579 }
4580
4581 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4582 {
4583 struct btrfs_transaction *t;
4584
4585 mutex_lock(&fs_info->transaction_kthread_mutex);
4586
4587 spin_lock(&fs_info->trans_lock);
4588 while (!list_empty(&fs_info->trans_list)) {
4589 t = list_first_entry(&fs_info->trans_list,
4590 struct btrfs_transaction, list);
4591 if (t->state >= TRANS_STATE_COMMIT_START) {
4592 refcount_inc(&t->use_count);
4593 spin_unlock(&fs_info->trans_lock);
4594 btrfs_wait_for_commit(fs_info, t->transid);
4595 btrfs_put_transaction(t);
4596 spin_lock(&fs_info->trans_lock);
4597 continue;
4598 }
4599 if (t == fs_info->running_transaction) {
4600 t->state = TRANS_STATE_COMMIT_DOING;
4601 spin_unlock(&fs_info->trans_lock);
4602 /*
4603 * We wait for 0 num_writers since we don't hold a trans
4604 * handle open currently for this transaction.
4605 */
4606 wait_event(t->writer_wait,
4607 atomic_read(&t->num_writers) == 0);
4608 } else {
4609 spin_unlock(&fs_info->trans_lock);
4610 }
4611 btrfs_cleanup_one_transaction(t, fs_info);
4612
4613 spin_lock(&fs_info->trans_lock);
4614 if (t == fs_info->running_transaction)
4615 fs_info->running_transaction = NULL;
4616 list_del_init(&t->list);
4617 spin_unlock(&fs_info->trans_lock);
4618
4619 btrfs_put_transaction(t);
4620 trace_btrfs_transaction_commit(fs_info->tree_root);
4621 spin_lock(&fs_info->trans_lock);
4622 }
4623 spin_unlock(&fs_info->trans_lock);
4624 btrfs_destroy_all_ordered_extents(fs_info);
4625 btrfs_destroy_delayed_inodes(fs_info);
4626 btrfs_assert_delayed_root_empty(fs_info);
4627 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4628 btrfs_destroy_all_delalloc_inodes(fs_info);
4629 mutex_unlock(&fs_info->transaction_kthread_mutex);
4630
4631 return 0;
4632 }
4633
4634 static struct btrfs_fs_info *btree_fs_info(void *private_data)
4635 {
4636 struct inode *inode = private_data;
4637 return btrfs_sb(inode->i_sb);
4638 }
4639
4640 static const struct extent_io_ops btree_extent_io_ops = {
4641 /* mandatory callbacks */
4642 .submit_bio_hook = btree_submit_bio_hook,
4643 .readpage_end_io_hook = btree_readpage_end_io_hook,
4644 /* note we're sharing with inode.c for the merge bio hook */
4645 .merge_bio_hook = btrfs_merge_bio_hook,
4646 .readpage_io_failed_hook = btree_io_failed_hook,
4647 .set_range_writeback = btrfs_set_range_writeback,
4648 .tree_fs_info = btree_fs_info,
4649
4650 /* optional callbacks */
4651 };