]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/disk-io.c
btrfs: Make get_extent_t take btrfs_inode
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53
54 #ifdef CONFIG_X86
55 #include <asm/cpufeature.h>
56 #endif
57
58 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
59 BTRFS_HEADER_FLAG_RELOC |\
60 BTRFS_SUPER_FLAG_ERROR |\
61 BTRFS_SUPER_FLAG_SEEDING |\
62 BTRFS_SUPER_FLAG_METADUMP)
63
64 static const struct extent_io_ops btree_extent_io_ops;
65 static void end_workqueue_fn(struct btrfs_work *work);
66 static void free_fs_root(struct btrfs_root *root);
67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
68 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
69 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
70 struct btrfs_fs_info *fs_info);
71 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
72 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
73 struct extent_io_tree *dirty_pages,
74 int mark);
75 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
76 struct extent_io_tree *pinned_extents);
77 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
78 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
79
80 /*
81 * btrfs_end_io_wq structs are used to do processing in task context when an IO
82 * is complete. This is used during reads to verify checksums, and it is used
83 * by writes to insert metadata for new file extents after IO is complete.
84 */
85 struct btrfs_end_io_wq {
86 struct bio *bio;
87 bio_end_io_t *end_io;
88 void *private;
89 struct btrfs_fs_info *info;
90 int error;
91 enum btrfs_wq_endio_type metadata;
92 struct list_head list;
93 struct btrfs_work work;
94 };
95
96 static struct kmem_cache *btrfs_end_io_wq_cache;
97
98 int __init btrfs_end_io_wq_init(void)
99 {
100 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
101 sizeof(struct btrfs_end_io_wq),
102 0,
103 SLAB_MEM_SPREAD,
104 NULL);
105 if (!btrfs_end_io_wq_cache)
106 return -ENOMEM;
107 return 0;
108 }
109
110 void btrfs_end_io_wq_exit(void)
111 {
112 kmem_cache_destroy(btrfs_end_io_wq_cache);
113 }
114
115 /*
116 * async submit bios are used to offload expensive checksumming
117 * onto the worker threads. They checksum file and metadata bios
118 * just before they are sent down the IO stack.
119 */
120 struct async_submit_bio {
121 struct inode *inode;
122 struct bio *bio;
123 struct list_head list;
124 extent_submit_bio_hook_t *submit_bio_start;
125 extent_submit_bio_hook_t *submit_bio_done;
126 int mirror_num;
127 unsigned long bio_flags;
128 /*
129 * bio_offset is optional, can be used if the pages in the bio
130 * can't tell us where in the file the bio should go
131 */
132 u64 bio_offset;
133 struct btrfs_work work;
134 int error;
135 };
136
137 /*
138 * Lockdep class keys for extent_buffer->lock's in this root. For a given
139 * eb, the lockdep key is determined by the btrfs_root it belongs to and
140 * the level the eb occupies in the tree.
141 *
142 * Different roots are used for different purposes and may nest inside each
143 * other and they require separate keysets. As lockdep keys should be
144 * static, assign keysets according to the purpose of the root as indicated
145 * by btrfs_root->objectid. This ensures that all special purpose roots
146 * have separate keysets.
147 *
148 * Lock-nesting across peer nodes is always done with the immediate parent
149 * node locked thus preventing deadlock. As lockdep doesn't know this, use
150 * subclass to avoid triggering lockdep warning in such cases.
151 *
152 * The key is set by the readpage_end_io_hook after the buffer has passed
153 * csum validation but before the pages are unlocked. It is also set by
154 * btrfs_init_new_buffer on freshly allocated blocks.
155 *
156 * We also add a check to make sure the highest level of the tree is the
157 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
158 * needs update as well.
159 */
160 #ifdef CONFIG_DEBUG_LOCK_ALLOC
161 # if BTRFS_MAX_LEVEL != 8
162 # error
163 # endif
164
165 static struct btrfs_lockdep_keyset {
166 u64 id; /* root objectid */
167 const char *name_stem; /* lock name stem */
168 char names[BTRFS_MAX_LEVEL + 1][20];
169 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
170 } btrfs_lockdep_keysets[] = {
171 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
172 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
173 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
174 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
175 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
176 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
177 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
178 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
179 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
180 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
181 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
182 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
183 { .id = 0, .name_stem = "tree" },
184 };
185
186 void __init btrfs_init_lockdep(void)
187 {
188 int i, j;
189
190 /* initialize lockdep class names */
191 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
192 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
193
194 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
195 snprintf(ks->names[j], sizeof(ks->names[j]),
196 "btrfs-%s-%02d", ks->name_stem, j);
197 }
198 }
199
200 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
201 int level)
202 {
203 struct btrfs_lockdep_keyset *ks;
204
205 BUG_ON(level >= ARRAY_SIZE(ks->keys));
206
207 /* find the matching keyset, id 0 is the default entry */
208 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
209 if (ks->id == objectid)
210 break;
211
212 lockdep_set_class_and_name(&eb->lock,
213 &ks->keys[level], ks->names[level]);
214 }
215
216 #endif
217
218 /*
219 * extents on the btree inode are pretty simple, there's one extent
220 * that covers the entire device
221 */
222 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
223 struct page *page, size_t pg_offset, u64 start, u64 len,
224 int create)
225 {
226 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
227 struct extent_map_tree *em_tree = &inode->extent_tree;
228 struct extent_map *em;
229 int ret;
230
231 read_lock(&em_tree->lock);
232 em = lookup_extent_mapping(em_tree, start, len);
233 if (em) {
234 em->bdev = fs_info->fs_devices->latest_bdev;
235 read_unlock(&em_tree->lock);
236 goto out;
237 }
238 read_unlock(&em_tree->lock);
239
240 em = alloc_extent_map();
241 if (!em) {
242 em = ERR_PTR(-ENOMEM);
243 goto out;
244 }
245 em->start = 0;
246 em->len = (u64)-1;
247 em->block_len = (u64)-1;
248 em->block_start = 0;
249 em->bdev = fs_info->fs_devices->latest_bdev;
250
251 write_lock(&em_tree->lock);
252 ret = add_extent_mapping(em_tree, em, 0);
253 if (ret == -EEXIST) {
254 free_extent_map(em);
255 em = lookup_extent_mapping(em_tree, start, len);
256 if (!em)
257 em = ERR_PTR(-EIO);
258 } else if (ret) {
259 free_extent_map(em);
260 em = ERR_PTR(ret);
261 }
262 write_unlock(&em_tree->lock);
263
264 out:
265 return em;
266 }
267
268 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
269 {
270 return btrfs_crc32c(seed, data, len);
271 }
272
273 void btrfs_csum_final(u32 crc, u8 *result)
274 {
275 put_unaligned_le32(~crc, result);
276 }
277
278 /*
279 * compute the csum for a btree block, and either verify it or write it
280 * into the csum field of the block.
281 */
282 static int csum_tree_block(struct btrfs_fs_info *fs_info,
283 struct extent_buffer *buf,
284 int verify)
285 {
286 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
287 char *result = NULL;
288 unsigned long len;
289 unsigned long cur_len;
290 unsigned long offset = BTRFS_CSUM_SIZE;
291 char *kaddr;
292 unsigned long map_start;
293 unsigned long map_len;
294 int err;
295 u32 crc = ~(u32)0;
296 unsigned long inline_result;
297
298 len = buf->len - offset;
299 while (len > 0) {
300 err = map_private_extent_buffer(buf, offset, 32,
301 &kaddr, &map_start, &map_len);
302 if (err)
303 return err;
304 cur_len = min(len, map_len - (offset - map_start));
305 crc = btrfs_csum_data(kaddr + offset - map_start,
306 crc, cur_len);
307 len -= cur_len;
308 offset += cur_len;
309 }
310 if (csum_size > sizeof(inline_result)) {
311 result = kzalloc(csum_size, GFP_NOFS);
312 if (!result)
313 return -ENOMEM;
314 } else {
315 result = (char *)&inline_result;
316 }
317
318 btrfs_csum_final(crc, result);
319
320 if (verify) {
321 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
322 u32 val;
323 u32 found = 0;
324 memcpy(&found, result, csum_size);
325
326 read_extent_buffer(buf, &val, 0, csum_size);
327 btrfs_warn_rl(fs_info,
328 "%s checksum verify failed on %llu wanted %X found %X level %d",
329 fs_info->sb->s_id, buf->start,
330 val, found, btrfs_header_level(buf));
331 if (result != (char *)&inline_result)
332 kfree(result);
333 return -EUCLEAN;
334 }
335 } else {
336 write_extent_buffer(buf, result, 0, csum_size);
337 }
338 if (result != (char *)&inline_result)
339 kfree(result);
340 return 0;
341 }
342
343 /*
344 * we can't consider a given block up to date unless the transid of the
345 * block matches the transid in the parent node's pointer. This is how we
346 * detect blocks that either didn't get written at all or got written
347 * in the wrong place.
348 */
349 static int verify_parent_transid(struct extent_io_tree *io_tree,
350 struct extent_buffer *eb, u64 parent_transid,
351 int atomic)
352 {
353 struct extent_state *cached_state = NULL;
354 int ret;
355 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
356
357 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
358 return 0;
359
360 if (atomic)
361 return -EAGAIN;
362
363 if (need_lock) {
364 btrfs_tree_read_lock(eb);
365 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
366 }
367
368 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
369 &cached_state);
370 if (extent_buffer_uptodate(eb) &&
371 btrfs_header_generation(eb) == parent_transid) {
372 ret = 0;
373 goto out;
374 }
375 btrfs_err_rl(eb->fs_info,
376 "parent transid verify failed on %llu wanted %llu found %llu",
377 eb->start,
378 parent_transid, btrfs_header_generation(eb));
379 ret = 1;
380
381 /*
382 * Things reading via commit roots that don't have normal protection,
383 * like send, can have a really old block in cache that may point at a
384 * block that has been freed and re-allocated. So don't clear uptodate
385 * if we find an eb that is under IO (dirty/writeback) because we could
386 * end up reading in the stale data and then writing it back out and
387 * making everybody very sad.
388 */
389 if (!extent_buffer_under_io(eb))
390 clear_extent_buffer_uptodate(eb);
391 out:
392 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
393 &cached_state, GFP_NOFS);
394 if (need_lock)
395 btrfs_tree_read_unlock_blocking(eb);
396 return ret;
397 }
398
399 /*
400 * Return 0 if the superblock checksum type matches the checksum value of that
401 * algorithm. Pass the raw disk superblock data.
402 */
403 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
404 char *raw_disk_sb)
405 {
406 struct btrfs_super_block *disk_sb =
407 (struct btrfs_super_block *)raw_disk_sb;
408 u16 csum_type = btrfs_super_csum_type(disk_sb);
409 int ret = 0;
410
411 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
412 u32 crc = ~(u32)0;
413 const int csum_size = sizeof(crc);
414 char result[csum_size];
415
416 /*
417 * The super_block structure does not span the whole
418 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
419 * is filled with zeros and is included in the checksum.
420 */
421 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
422 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
423 btrfs_csum_final(crc, result);
424
425 if (memcmp(raw_disk_sb, result, csum_size))
426 ret = 1;
427 }
428
429 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
430 btrfs_err(fs_info, "unsupported checksum algorithm %u",
431 csum_type);
432 ret = 1;
433 }
434
435 return ret;
436 }
437
438 /*
439 * helper to read a given tree block, doing retries as required when
440 * the checksums don't match and we have alternate mirrors to try.
441 */
442 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
443 struct extent_buffer *eb,
444 u64 parent_transid)
445 {
446 struct extent_io_tree *io_tree;
447 int failed = 0;
448 int ret;
449 int num_copies = 0;
450 int mirror_num = 0;
451 int failed_mirror = 0;
452
453 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
454 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
455 while (1) {
456 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
457 btree_get_extent, mirror_num);
458 if (!ret) {
459 if (!verify_parent_transid(io_tree, eb,
460 parent_transid, 0))
461 break;
462 else
463 ret = -EIO;
464 }
465
466 /*
467 * This buffer's crc is fine, but its contents are corrupted, so
468 * there is no reason to read the other copies, they won't be
469 * any less wrong.
470 */
471 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
472 break;
473
474 num_copies = btrfs_num_copies(fs_info,
475 eb->start, eb->len);
476 if (num_copies == 1)
477 break;
478
479 if (!failed_mirror) {
480 failed = 1;
481 failed_mirror = eb->read_mirror;
482 }
483
484 mirror_num++;
485 if (mirror_num == failed_mirror)
486 mirror_num++;
487
488 if (mirror_num > num_copies)
489 break;
490 }
491
492 if (failed && !ret && failed_mirror)
493 repair_eb_io_failure(fs_info, eb, failed_mirror);
494
495 return ret;
496 }
497
498 /*
499 * checksum a dirty tree block before IO. This has extra checks to make sure
500 * we only fill in the checksum field in the first page of a multi-page block
501 */
502
503 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
504 {
505 u64 start = page_offset(page);
506 u64 found_start;
507 struct extent_buffer *eb;
508
509 eb = (struct extent_buffer *)page->private;
510 if (page != eb->pages[0])
511 return 0;
512
513 found_start = btrfs_header_bytenr(eb);
514 /*
515 * Please do not consolidate these warnings into a single if.
516 * It is useful to know what went wrong.
517 */
518 if (WARN_ON(found_start != start))
519 return -EUCLEAN;
520 if (WARN_ON(!PageUptodate(page)))
521 return -EUCLEAN;
522
523 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
524 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
525
526 return csum_tree_block(fs_info, eb, 0);
527 }
528
529 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
530 struct extent_buffer *eb)
531 {
532 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
533 u8 fsid[BTRFS_UUID_SIZE];
534 int ret = 1;
535
536 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
537 while (fs_devices) {
538 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
539 ret = 0;
540 break;
541 }
542 fs_devices = fs_devices->seed;
543 }
544 return ret;
545 }
546
547 #define CORRUPT(reason, eb, root, slot) \
548 btrfs_crit(root->fs_info, \
549 "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
550 btrfs_header_level(eb) == 0 ? "leaf" : "node", \
551 reason, btrfs_header_bytenr(eb), root->objectid, slot)
552
553 static noinline int check_leaf(struct btrfs_root *root,
554 struct extent_buffer *leaf)
555 {
556 struct btrfs_fs_info *fs_info = root->fs_info;
557 struct btrfs_key key;
558 struct btrfs_key leaf_key;
559 u32 nritems = btrfs_header_nritems(leaf);
560 int slot;
561
562 /*
563 * Extent buffers from a relocation tree have a owner field that
564 * corresponds to the subvolume tree they are based on. So just from an
565 * extent buffer alone we can not find out what is the id of the
566 * corresponding subvolume tree, so we can not figure out if the extent
567 * buffer corresponds to the root of the relocation tree or not. So skip
568 * this check for relocation trees.
569 */
570 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
571 struct btrfs_root *check_root;
572
573 key.objectid = btrfs_header_owner(leaf);
574 key.type = BTRFS_ROOT_ITEM_KEY;
575 key.offset = (u64)-1;
576
577 check_root = btrfs_get_fs_root(fs_info, &key, false);
578 /*
579 * The only reason we also check NULL here is that during
580 * open_ctree() some roots has not yet been set up.
581 */
582 if (!IS_ERR_OR_NULL(check_root)) {
583 struct extent_buffer *eb;
584
585 eb = btrfs_root_node(check_root);
586 /* if leaf is the root, then it's fine */
587 if (leaf != eb) {
588 CORRUPT("non-root leaf's nritems is 0",
589 leaf, check_root, 0);
590 free_extent_buffer(eb);
591 return -EIO;
592 }
593 free_extent_buffer(eb);
594 }
595 return 0;
596 }
597
598 if (nritems == 0)
599 return 0;
600
601 /* Check the 0 item */
602 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
603 BTRFS_LEAF_DATA_SIZE(fs_info)) {
604 CORRUPT("invalid item offset size pair", leaf, root, 0);
605 return -EIO;
606 }
607
608 /*
609 * Check to make sure each items keys are in the correct order and their
610 * offsets make sense. We only have to loop through nritems-1 because
611 * we check the current slot against the next slot, which verifies the
612 * next slot's offset+size makes sense and that the current's slot
613 * offset is correct.
614 */
615 for (slot = 0; slot < nritems - 1; slot++) {
616 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
617 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
618
619 /* Make sure the keys are in the right order */
620 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
621 CORRUPT("bad key order", leaf, root, slot);
622 return -EIO;
623 }
624
625 /*
626 * Make sure the offset and ends are right, remember that the
627 * item data starts at the end of the leaf and grows towards the
628 * front.
629 */
630 if (btrfs_item_offset_nr(leaf, slot) !=
631 btrfs_item_end_nr(leaf, slot + 1)) {
632 CORRUPT("slot offset bad", leaf, root, slot);
633 return -EIO;
634 }
635
636 /*
637 * Check to make sure that we don't point outside of the leaf,
638 * just in case all the items are consistent to each other, but
639 * all point outside of the leaf.
640 */
641 if (btrfs_item_end_nr(leaf, slot) >
642 BTRFS_LEAF_DATA_SIZE(fs_info)) {
643 CORRUPT("slot end outside of leaf", leaf, root, slot);
644 return -EIO;
645 }
646 }
647
648 return 0;
649 }
650
651 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
652 {
653 unsigned long nr = btrfs_header_nritems(node);
654 struct btrfs_key key, next_key;
655 int slot;
656 u64 bytenr;
657 int ret = 0;
658
659 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
660 btrfs_crit(root->fs_info,
661 "corrupt node: block %llu root %llu nritems %lu",
662 node->start, root->objectid, nr);
663 return -EIO;
664 }
665
666 for (slot = 0; slot < nr - 1; slot++) {
667 bytenr = btrfs_node_blockptr(node, slot);
668 btrfs_node_key_to_cpu(node, &key, slot);
669 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
670
671 if (!bytenr) {
672 CORRUPT("invalid item slot", node, root, slot);
673 ret = -EIO;
674 goto out;
675 }
676
677 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
678 CORRUPT("bad key order", node, root, slot);
679 ret = -EIO;
680 goto out;
681 }
682 }
683 out:
684 return ret;
685 }
686
687 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
688 u64 phy_offset, struct page *page,
689 u64 start, u64 end, int mirror)
690 {
691 u64 found_start;
692 int found_level;
693 struct extent_buffer *eb;
694 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
695 struct btrfs_fs_info *fs_info = root->fs_info;
696 int ret = 0;
697 int reads_done;
698
699 if (!page->private)
700 goto out;
701
702 eb = (struct extent_buffer *)page->private;
703
704 /* the pending IO might have been the only thing that kept this buffer
705 * in memory. Make sure we have a ref for all this other checks
706 */
707 extent_buffer_get(eb);
708
709 reads_done = atomic_dec_and_test(&eb->io_pages);
710 if (!reads_done)
711 goto err;
712
713 eb->read_mirror = mirror;
714 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
715 ret = -EIO;
716 goto err;
717 }
718
719 found_start = btrfs_header_bytenr(eb);
720 if (found_start != eb->start) {
721 btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
722 found_start, eb->start);
723 ret = -EIO;
724 goto err;
725 }
726 if (check_tree_block_fsid(fs_info, eb)) {
727 btrfs_err_rl(fs_info, "bad fsid on block %llu",
728 eb->start);
729 ret = -EIO;
730 goto err;
731 }
732 found_level = btrfs_header_level(eb);
733 if (found_level >= BTRFS_MAX_LEVEL) {
734 btrfs_err(fs_info, "bad tree block level %d",
735 (int)btrfs_header_level(eb));
736 ret = -EIO;
737 goto err;
738 }
739
740 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
741 eb, found_level);
742
743 ret = csum_tree_block(fs_info, eb, 1);
744 if (ret)
745 goto err;
746
747 /*
748 * If this is a leaf block and it is corrupt, set the corrupt bit so
749 * that we don't try and read the other copies of this block, just
750 * return -EIO.
751 */
752 if (found_level == 0 && check_leaf(root, eb)) {
753 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
754 ret = -EIO;
755 }
756
757 if (found_level > 0 && check_node(root, eb))
758 ret = -EIO;
759
760 if (!ret)
761 set_extent_buffer_uptodate(eb);
762 err:
763 if (reads_done &&
764 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
765 btree_readahead_hook(fs_info, eb, ret);
766
767 if (ret) {
768 /*
769 * our io error hook is going to dec the io pages
770 * again, we have to make sure it has something
771 * to decrement
772 */
773 atomic_inc(&eb->io_pages);
774 clear_extent_buffer_uptodate(eb);
775 }
776 free_extent_buffer(eb);
777 out:
778 return ret;
779 }
780
781 static int btree_io_failed_hook(struct page *page, int failed_mirror)
782 {
783 struct extent_buffer *eb;
784
785 eb = (struct extent_buffer *)page->private;
786 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
787 eb->read_mirror = failed_mirror;
788 atomic_dec(&eb->io_pages);
789 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
790 btree_readahead_hook(eb->fs_info, eb, -EIO);
791 return -EIO; /* we fixed nothing */
792 }
793
794 static void end_workqueue_bio(struct bio *bio)
795 {
796 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
797 struct btrfs_fs_info *fs_info;
798 struct btrfs_workqueue *wq;
799 btrfs_work_func_t func;
800
801 fs_info = end_io_wq->info;
802 end_io_wq->error = bio->bi_error;
803
804 if (bio_op(bio) == REQ_OP_WRITE) {
805 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
806 wq = fs_info->endio_meta_write_workers;
807 func = btrfs_endio_meta_write_helper;
808 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
809 wq = fs_info->endio_freespace_worker;
810 func = btrfs_freespace_write_helper;
811 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
812 wq = fs_info->endio_raid56_workers;
813 func = btrfs_endio_raid56_helper;
814 } else {
815 wq = fs_info->endio_write_workers;
816 func = btrfs_endio_write_helper;
817 }
818 } else {
819 if (unlikely(end_io_wq->metadata ==
820 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
821 wq = fs_info->endio_repair_workers;
822 func = btrfs_endio_repair_helper;
823 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
824 wq = fs_info->endio_raid56_workers;
825 func = btrfs_endio_raid56_helper;
826 } else if (end_io_wq->metadata) {
827 wq = fs_info->endio_meta_workers;
828 func = btrfs_endio_meta_helper;
829 } else {
830 wq = fs_info->endio_workers;
831 func = btrfs_endio_helper;
832 }
833 }
834
835 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
836 btrfs_queue_work(wq, &end_io_wq->work);
837 }
838
839 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
840 enum btrfs_wq_endio_type metadata)
841 {
842 struct btrfs_end_io_wq *end_io_wq;
843
844 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
845 if (!end_io_wq)
846 return -ENOMEM;
847
848 end_io_wq->private = bio->bi_private;
849 end_io_wq->end_io = bio->bi_end_io;
850 end_io_wq->info = info;
851 end_io_wq->error = 0;
852 end_io_wq->bio = bio;
853 end_io_wq->metadata = metadata;
854
855 bio->bi_private = end_io_wq;
856 bio->bi_end_io = end_workqueue_bio;
857 return 0;
858 }
859
860 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
861 {
862 unsigned long limit = min_t(unsigned long,
863 info->thread_pool_size,
864 info->fs_devices->open_devices);
865 return 256 * limit;
866 }
867
868 static void run_one_async_start(struct btrfs_work *work)
869 {
870 struct async_submit_bio *async;
871 int ret;
872
873 async = container_of(work, struct async_submit_bio, work);
874 ret = async->submit_bio_start(async->inode, async->bio,
875 async->mirror_num, async->bio_flags,
876 async->bio_offset);
877 if (ret)
878 async->error = ret;
879 }
880
881 static void run_one_async_done(struct btrfs_work *work)
882 {
883 struct btrfs_fs_info *fs_info;
884 struct async_submit_bio *async;
885 int limit;
886
887 async = container_of(work, struct async_submit_bio, work);
888 fs_info = BTRFS_I(async->inode)->root->fs_info;
889
890 limit = btrfs_async_submit_limit(fs_info);
891 limit = limit * 2 / 3;
892
893 /*
894 * atomic_dec_return implies a barrier for waitqueue_active
895 */
896 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
897 waitqueue_active(&fs_info->async_submit_wait))
898 wake_up(&fs_info->async_submit_wait);
899
900 /* If an error occurred we just want to clean up the bio and move on */
901 if (async->error) {
902 async->bio->bi_error = async->error;
903 bio_endio(async->bio);
904 return;
905 }
906
907 async->submit_bio_done(async->inode, async->bio, async->mirror_num,
908 async->bio_flags, async->bio_offset);
909 }
910
911 static void run_one_async_free(struct btrfs_work *work)
912 {
913 struct async_submit_bio *async;
914
915 async = container_of(work, struct async_submit_bio, work);
916 kfree(async);
917 }
918
919 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
920 struct bio *bio, int mirror_num,
921 unsigned long bio_flags,
922 u64 bio_offset,
923 extent_submit_bio_hook_t *submit_bio_start,
924 extent_submit_bio_hook_t *submit_bio_done)
925 {
926 struct async_submit_bio *async;
927
928 async = kmalloc(sizeof(*async), GFP_NOFS);
929 if (!async)
930 return -ENOMEM;
931
932 async->inode = inode;
933 async->bio = bio;
934 async->mirror_num = mirror_num;
935 async->submit_bio_start = submit_bio_start;
936 async->submit_bio_done = submit_bio_done;
937
938 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
939 run_one_async_done, run_one_async_free);
940
941 async->bio_flags = bio_flags;
942 async->bio_offset = bio_offset;
943
944 async->error = 0;
945
946 atomic_inc(&fs_info->nr_async_submits);
947
948 if (op_is_sync(bio->bi_opf))
949 btrfs_set_work_high_priority(&async->work);
950
951 btrfs_queue_work(fs_info->workers, &async->work);
952
953 while (atomic_read(&fs_info->async_submit_draining) &&
954 atomic_read(&fs_info->nr_async_submits)) {
955 wait_event(fs_info->async_submit_wait,
956 (atomic_read(&fs_info->nr_async_submits) == 0));
957 }
958
959 return 0;
960 }
961
962 static int btree_csum_one_bio(struct bio *bio)
963 {
964 struct bio_vec *bvec;
965 struct btrfs_root *root;
966 int i, ret = 0;
967
968 bio_for_each_segment_all(bvec, bio, i) {
969 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
970 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
971 if (ret)
972 break;
973 }
974
975 return ret;
976 }
977
978 static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
979 int mirror_num, unsigned long bio_flags,
980 u64 bio_offset)
981 {
982 /*
983 * when we're called for a write, we're already in the async
984 * submission context. Just jump into btrfs_map_bio
985 */
986 return btree_csum_one_bio(bio);
987 }
988
989 static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
990 int mirror_num, unsigned long bio_flags,
991 u64 bio_offset)
992 {
993 int ret;
994
995 /*
996 * when we're called for a write, we're already in the async
997 * submission context. Just jump into btrfs_map_bio
998 */
999 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
1000 if (ret) {
1001 bio->bi_error = ret;
1002 bio_endio(bio);
1003 }
1004 return ret;
1005 }
1006
1007 static int check_async_write(unsigned long bio_flags)
1008 {
1009 if (bio_flags & EXTENT_BIO_TREE_LOG)
1010 return 0;
1011 #ifdef CONFIG_X86
1012 if (static_cpu_has(X86_FEATURE_XMM4_2))
1013 return 0;
1014 #endif
1015 return 1;
1016 }
1017
1018 static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
1019 int mirror_num, unsigned long bio_flags,
1020 u64 bio_offset)
1021 {
1022 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1023 int async = check_async_write(bio_flags);
1024 int ret;
1025
1026 if (bio_op(bio) != REQ_OP_WRITE) {
1027 /*
1028 * called for a read, do the setup so that checksum validation
1029 * can happen in the async kernel threads
1030 */
1031 ret = btrfs_bio_wq_end_io(fs_info, bio,
1032 BTRFS_WQ_ENDIO_METADATA);
1033 if (ret)
1034 goto out_w_error;
1035 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1036 } else if (!async) {
1037 ret = btree_csum_one_bio(bio);
1038 if (ret)
1039 goto out_w_error;
1040 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1041 } else {
1042 /*
1043 * kthread helpers are used to submit writes so that
1044 * checksumming can happen in parallel across all CPUs
1045 */
1046 ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
1047 bio_offset,
1048 __btree_submit_bio_start,
1049 __btree_submit_bio_done);
1050 }
1051
1052 if (ret)
1053 goto out_w_error;
1054 return 0;
1055
1056 out_w_error:
1057 bio->bi_error = ret;
1058 bio_endio(bio);
1059 return ret;
1060 }
1061
1062 #ifdef CONFIG_MIGRATION
1063 static int btree_migratepage(struct address_space *mapping,
1064 struct page *newpage, struct page *page,
1065 enum migrate_mode mode)
1066 {
1067 /*
1068 * we can't safely write a btree page from here,
1069 * we haven't done the locking hook
1070 */
1071 if (PageDirty(page))
1072 return -EAGAIN;
1073 /*
1074 * Buffers may be managed in a filesystem specific way.
1075 * We must have no buffers or drop them.
1076 */
1077 if (page_has_private(page) &&
1078 !try_to_release_page(page, GFP_KERNEL))
1079 return -EAGAIN;
1080 return migrate_page(mapping, newpage, page, mode);
1081 }
1082 #endif
1083
1084
1085 static int btree_writepages(struct address_space *mapping,
1086 struct writeback_control *wbc)
1087 {
1088 struct btrfs_fs_info *fs_info;
1089 int ret;
1090
1091 if (wbc->sync_mode == WB_SYNC_NONE) {
1092
1093 if (wbc->for_kupdate)
1094 return 0;
1095
1096 fs_info = BTRFS_I(mapping->host)->root->fs_info;
1097 /* this is a bit racy, but that's ok */
1098 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1099 BTRFS_DIRTY_METADATA_THRESH);
1100 if (ret < 0)
1101 return 0;
1102 }
1103 return btree_write_cache_pages(mapping, wbc);
1104 }
1105
1106 static int btree_readpage(struct file *file, struct page *page)
1107 {
1108 struct extent_io_tree *tree;
1109 tree = &BTRFS_I(page->mapping->host)->io_tree;
1110 return extent_read_full_page(tree, page, btree_get_extent, 0);
1111 }
1112
1113 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1114 {
1115 if (PageWriteback(page) || PageDirty(page))
1116 return 0;
1117
1118 return try_release_extent_buffer(page);
1119 }
1120
1121 static void btree_invalidatepage(struct page *page, unsigned int offset,
1122 unsigned int length)
1123 {
1124 struct extent_io_tree *tree;
1125 tree = &BTRFS_I(page->mapping->host)->io_tree;
1126 extent_invalidatepage(tree, page, offset);
1127 btree_releasepage(page, GFP_NOFS);
1128 if (PagePrivate(page)) {
1129 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1130 "page private not zero on page %llu",
1131 (unsigned long long)page_offset(page));
1132 ClearPagePrivate(page);
1133 set_page_private(page, 0);
1134 put_page(page);
1135 }
1136 }
1137
1138 static int btree_set_page_dirty(struct page *page)
1139 {
1140 #ifdef DEBUG
1141 struct extent_buffer *eb;
1142
1143 BUG_ON(!PagePrivate(page));
1144 eb = (struct extent_buffer *)page->private;
1145 BUG_ON(!eb);
1146 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1147 BUG_ON(!atomic_read(&eb->refs));
1148 btrfs_assert_tree_locked(eb);
1149 #endif
1150 return __set_page_dirty_nobuffers(page);
1151 }
1152
1153 static const struct address_space_operations btree_aops = {
1154 .readpage = btree_readpage,
1155 .writepages = btree_writepages,
1156 .releasepage = btree_releasepage,
1157 .invalidatepage = btree_invalidatepage,
1158 #ifdef CONFIG_MIGRATION
1159 .migratepage = btree_migratepage,
1160 #endif
1161 .set_page_dirty = btree_set_page_dirty,
1162 };
1163
1164 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1165 {
1166 struct extent_buffer *buf = NULL;
1167 struct inode *btree_inode = fs_info->btree_inode;
1168
1169 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1170 if (IS_ERR(buf))
1171 return;
1172 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1173 buf, WAIT_NONE, btree_get_extent, 0);
1174 free_extent_buffer(buf);
1175 }
1176
1177 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1178 int mirror_num, struct extent_buffer **eb)
1179 {
1180 struct extent_buffer *buf = NULL;
1181 struct inode *btree_inode = fs_info->btree_inode;
1182 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1183 int ret;
1184
1185 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1186 if (IS_ERR(buf))
1187 return 0;
1188
1189 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1190
1191 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1192 btree_get_extent, mirror_num);
1193 if (ret) {
1194 free_extent_buffer(buf);
1195 return ret;
1196 }
1197
1198 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1199 free_extent_buffer(buf);
1200 return -EIO;
1201 } else if (extent_buffer_uptodate(buf)) {
1202 *eb = buf;
1203 } else {
1204 free_extent_buffer(buf);
1205 }
1206 return 0;
1207 }
1208
1209 struct extent_buffer *btrfs_find_create_tree_block(
1210 struct btrfs_fs_info *fs_info,
1211 u64 bytenr)
1212 {
1213 if (btrfs_is_testing(fs_info))
1214 return alloc_test_extent_buffer(fs_info, bytenr);
1215 return alloc_extent_buffer(fs_info, bytenr);
1216 }
1217
1218
1219 int btrfs_write_tree_block(struct extent_buffer *buf)
1220 {
1221 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1222 buf->start + buf->len - 1);
1223 }
1224
1225 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1226 {
1227 return filemap_fdatawait_range(buf->pages[0]->mapping,
1228 buf->start, buf->start + buf->len - 1);
1229 }
1230
1231 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1232 u64 parent_transid)
1233 {
1234 struct extent_buffer *buf = NULL;
1235 int ret;
1236
1237 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1238 if (IS_ERR(buf))
1239 return buf;
1240
1241 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1242 if (ret) {
1243 free_extent_buffer(buf);
1244 return ERR_PTR(ret);
1245 }
1246 return buf;
1247
1248 }
1249
1250 void clean_tree_block(struct btrfs_fs_info *fs_info,
1251 struct extent_buffer *buf)
1252 {
1253 if (btrfs_header_generation(buf) ==
1254 fs_info->running_transaction->transid) {
1255 btrfs_assert_tree_locked(buf);
1256
1257 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1258 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1259 -buf->len,
1260 fs_info->dirty_metadata_batch);
1261 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1262 btrfs_set_lock_blocking(buf);
1263 clear_extent_buffer_dirty(buf);
1264 }
1265 }
1266 }
1267
1268 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1269 {
1270 struct btrfs_subvolume_writers *writers;
1271 int ret;
1272
1273 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1274 if (!writers)
1275 return ERR_PTR(-ENOMEM);
1276
1277 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1278 if (ret < 0) {
1279 kfree(writers);
1280 return ERR_PTR(ret);
1281 }
1282
1283 init_waitqueue_head(&writers->wait);
1284 return writers;
1285 }
1286
1287 static void
1288 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1289 {
1290 percpu_counter_destroy(&writers->counter);
1291 kfree(writers);
1292 }
1293
1294 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1295 u64 objectid)
1296 {
1297 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1298 root->node = NULL;
1299 root->commit_root = NULL;
1300 root->state = 0;
1301 root->orphan_cleanup_state = 0;
1302
1303 root->objectid = objectid;
1304 root->last_trans = 0;
1305 root->highest_objectid = 0;
1306 root->nr_delalloc_inodes = 0;
1307 root->nr_ordered_extents = 0;
1308 root->name = NULL;
1309 root->inode_tree = RB_ROOT;
1310 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1311 root->block_rsv = NULL;
1312 root->orphan_block_rsv = NULL;
1313
1314 INIT_LIST_HEAD(&root->dirty_list);
1315 INIT_LIST_HEAD(&root->root_list);
1316 INIT_LIST_HEAD(&root->delalloc_inodes);
1317 INIT_LIST_HEAD(&root->delalloc_root);
1318 INIT_LIST_HEAD(&root->ordered_extents);
1319 INIT_LIST_HEAD(&root->ordered_root);
1320 INIT_LIST_HEAD(&root->logged_list[0]);
1321 INIT_LIST_HEAD(&root->logged_list[1]);
1322 spin_lock_init(&root->orphan_lock);
1323 spin_lock_init(&root->inode_lock);
1324 spin_lock_init(&root->delalloc_lock);
1325 spin_lock_init(&root->ordered_extent_lock);
1326 spin_lock_init(&root->accounting_lock);
1327 spin_lock_init(&root->log_extents_lock[0]);
1328 spin_lock_init(&root->log_extents_lock[1]);
1329 mutex_init(&root->objectid_mutex);
1330 mutex_init(&root->log_mutex);
1331 mutex_init(&root->ordered_extent_mutex);
1332 mutex_init(&root->delalloc_mutex);
1333 init_waitqueue_head(&root->log_writer_wait);
1334 init_waitqueue_head(&root->log_commit_wait[0]);
1335 init_waitqueue_head(&root->log_commit_wait[1]);
1336 INIT_LIST_HEAD(&root->log_ctxs[0]);
1337 INIT_LIST_HEAD(&root->log_ctxs[1]);
1338 atomic_set(&root->log_commit[0], 0);
1339 atomic_set(&root->log_commit[1], 0);
1340 atomic_set(&root->log_writers, 0);
1341 atomic_set(&root->log_batch, 0);
1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0;
1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0;
1349 if (!dummy)
1350 extent_io_tree_init(&root->dirty_log_pages,
1351 fs_info->btree_inode->i_mapping);
1352
1353 memset(&root->root_key, 0, sizeof(root->root_key));
1354 memset(&root->root_item, 0, sizeof(root->root_item));
1355 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1356 if (!dummy)
1357 root->defrag_trans_start = fs_info->generation;
1358 else
1359 root->defrag_trans_start = 0;
1360 root->root_key.objectid = objectid;
1361 root->anon_dev = 0;
1362
1363 spin_lock_init(&root->root_item_lock);
1364 }
1365
1366 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1367 gfp_t flags)
1368 {
1369 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1370 if (root)
1371 root->fs_info = fs_info;
1372 return root;
1373 }
1374
1375 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1376 /* Should only be used by the testing infrastructure */
1377 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1378 {
1379 struct btrfs_root *root;
1380
1381 if (!fs_info)
1382 return ERR_PTR(-EINVAL);
1383
1384 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1385 if (!root)
1386 return ERR_PTR(-ENOMEM);
1387
1388 /* We don't use the stripesize in selftest, set it as sectorsize */
1389 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1390 root->alloc_bytenr = 0;
1391
1392 return root;
1393 }
1394 #endif
1395
1396 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1397 struct btrfs_fs_info *fs_info,
1398 u64 objectid)
1399 {
1400 struct extent_buffer *leaf;
1401 struct btrfs_root *tree_root = fs_info->tree_root;
1402 struct btrfs_root *root;
1403 struct btrfs_key key;
1404 int ret = 0;
1405 uuid_le uuid;
1406
1407 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1408 if (!root)
1409 return ERR_PTR(-ENOMEM);
1410
1411 __setup_root(root, fs_info, objectid);
1412 root->root_key.objectid = objectid;
1413 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1414 root->root_key.offset = 0;
1415
1416 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1417 if (IS_ERR(leaf)) {
1418 ret = PTR_ERR(leaf);
1419 leaf = NULL;
1420 goto fail;
1421 }
1422
1423 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1424 btrfs_set_header_bytenr(leaf, leaf->start);
1425 btrfs_set_header_generation(leaf, trans->transid);
1426 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1427 btrfs_set_header_owner(leaf, objectid);
1428 root->node = leaf;
1429
1430 write_extent_buffer_fsid(leaf, fs_info->fsid);
1431 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1432 btrfs_mark_buffer_dirty(leaf);
1433
1434 root->commit_root = btrfs_root_node(root);
1435 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1436
1437 root->root_item.flags = 0;
1438 root->root_item.byte_limit = 0;
1439 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1440 btrfs_set_root_generation(&root->root_item, trans->transid);
1441 btrfs_set_root_level(&root->root_item, 0);
1442 btrfs_set_root_refs(&root->root_item, 1);
1443 btrfs_set_root_used(&root->root_item, leaf->len);
1444 btrfs_set_root_last_snapshot(&root->root_item, 0);
1445 btrfs_set_root_dirid(&root->root_item, 0);
1446 uuid_le_gen(&uuid);
1447 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1448 root->root_item.drop_level = 0;
1449
1450 key.objectid = objectid;
1451 key.type = BTRFS_ROOT_ITEM_KEY;
1452 key.offset = 0;
1453 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1454 if (ret)
1455 goto fail;
1456
1457 btrfs_tree_unlock(leaf);
1458
1459 return root;
1460
1461 fail:
1462 if (leaf) {
1463 btrfs_tree_unlock(leaf);
1464 free_extent_buffer(root->commit_root);
1465 free_extent_buffer(leaf);
1466 }
1467 kfree(root);
1468
1469 return ERR_PTR(ret);
1470 }
1471
1472 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1473 struct btrfs_fs_info *fs_info)
1474 {
1475 struct btrfs_root *root;
1476 struct extent_buffer *leaf;
1477
1478 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1479 if (!root)
1480 return ERR_PTR(-ENOMEM);
1481
1482 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1483
1484 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1485 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1486 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1487
1488 /*
1489 * DON'T set REF_COWS for log trees
1490 *
1491 * log trees do not get reference counted because they go away
1492 * before a real commit is actually done. They do store pointers
1493 * to file data extents, and those reference counts still get
1494 * updated (along with back refs to the log tree).
1495 */
1496
1497 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1498 NULL, 0, 0, 0);
1499 if (IS_ERR(leaf)) {
1500 kfree(root);
1501 return ERR_CAST(leaf);
1502 }
1503
1504 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1505 btrfs_set_header_bytenr(leaf, leaf->start);
1506 btrfs_set_header_generation(leaf, trans->transid);
1507 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1508 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1509 root->node = leaf;
1510
1511 write_extent_buffer_fsid(root->node, fs_info->fsid);
1512 btrfs_mark_buffer_dirty(root->node);
1513 btrfs_tree_unlock(root->node);
1514 return root;
1515 }
1516
1517 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1518 struct btrfs_fs_info *fs_info)
1519 {
1520 struct btrfs_root *log_root;
1521
1522 log_root = alloc_log_tree(trans, fs_info);
1523 if (IS_ERR(log_root))
1524 return PTR_ERR(log_root);
1525 WARN_ON(fs_info->log_root_tree);
1526 fs_info->log_root_tree = log_root;
1527 return 0;
1528 }
1529
1530 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1531 struct btrfs_root *root)
1532 {
1533 struct btrfs_fs_info *fs_info = root->fs_info;
1534 struct btrfs_root *log_root;
1535 struct btrfs_inode_item *inode_item;
1536
1537 log_root = alloc_log_tree(trans, fs_info);
1538 if (IS_ERR(log_root))
1539 return PTR_ERR(log_root);
1540
1541 log_root->last_trans = trans->transid;
1542 log_root->root_key.offset = root->root_key.objectid;
1543
1544 inode_item = &log_root->root_item.inode;
1545 btrfs_set_stack_inode_generation(inode_item, 1);
1546 btrfs_set_stack_inode_size(inode_item, 3);
1547 btrfs_set_stack_inode_nlink(inode_item, 1);
1548 btrfs_set_stack_inode_nbytes(inode_item,
1549 fs_info->nodesize);
1550 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1551
1552 btrfs_set_root_node(&log_root->root_item, log_root->node);
1553
1554 WARN_ON(root->log_root);
1555 root->log_root = log_root;
1556 root->log_transid = 0;
1557 root->log_transid_committed = -1;
1558 root->last_log_commit = 0;
1559 return 0;
1560 }
1561
1562 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1563 struct btrfs_key *key)
1564 {
1565 struct btrfs_root *root;
1566 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1567 struct btrfs_path *path;
1568 u64 generation;
1569 int ret;
1570
1571 path = btrfs_alloc_path();
1572 if (!path)
1573 return ERR_PTR(-ENOMEM);
1574
1575 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1576 if (!root) {
1577 ret = -ENOMEM;
1578 goto alloc_fail;
1579 }
1580
1581 __setup_root(root, fs_info, key->objectid);
1582
1583 ret = btrfs_find_root(tree_root, key, path,
1584 &root->root_item, &root->root_key);
1585 if (ret) {
1586 if (ret > 0)
1587 ret = -ENOENT;
1588 goto find_fail;
1589 }
1590
1591 generation = btrfs_root_generation(&root->root_item);
1592 root->node = read_tree_block(fs_info,
1593 btrfs_root_bytenr(&root->root_item),
1594 generation);
1595 if (IS_ERR(root->node)) {
1596 ret = PTR_ERR(root->node);
1597 goto find_fail;
1598 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1599 ret = -EIO;
1600 free_extent_buffer(root->node);
1601 goto find_fail;
1602 }
1603 root->commit_root = btrfs_root_node(root);
1604 out:
1605 btrfs_free_path(path);
1606 return root;
1607
1608 find_fail:
1609 kfree(root);
1610 alloc_fail:
1611 root = ERR_PTR(ret);
1612 goto out;
1613 }
1614
1615 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1616 struct btrfs_key *location)
1617 {
1618 struct btrfs_root *root;
1619
1620 root = btrfs_read_tree_root(tree_root, location);
1621 if (IS_ERR(root))
1622 return root;
1623
1624 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1625 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1626 btrfs_check_and_init_root_item(&root->root_item);
1627 }
1628
1629 return root;
1630 }
1631
1632 int btrfs_init_fs_root(struct btrfs_root *root)
1633 {
1634 int ret;
1635 struct btrfs_subvolume_writers *writers;
1636
1637 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1638 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1639 GFP_NOFS);
1640 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1641 ret = -ENOMEM;
1642 goto fail;
1643 }
1644
1645 writers = btrfs_alloc_subvolume_writers();
1646 if (IS_ERR(writers)) {
1647 ret = PTR_ERR(writers);
1648 goto fail;
1649 }
1650 root->subv_writers = writers;
1651
1652 btrfs_init_free_ino_ctl(root);
1653 spin_lock_init(&root->ino_cache_lock);
1654 init_waitqueue_head(&root->ino_cache_wait);
1655
1656 ret = get_anon_bdev(&root->anon_dev);
1657 if (ret)
1658 goto fail;
1659
1660 mutex_lock(&root->objectid_mutex);
1661 ret = btrfs_find_highest_objectid(root,
1662 &root->highest_objectid);
1663 if (ret) {
1664 mutex_unlock(&root->objectid_mutex);
1665 goto fail;
1666 }
1667
1668 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1669
1670 mutex_unlock(&root->objectid_mutex);
1671
1672 return 0;
1673 fail:
1674 /* the caller is responsible to call free_fs_root */
1675 return ret;
1676 }
1677
1678 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1679 u64 root_id)
1680 {
1681 struct btrfs_root *root;
1682
1683 spin_lock(&fs_info->fs_roots_radix_lock);
1684 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1685 (unsigned long)root_id);
1686 spin_unlock(&fs_info->fs_roots_radix_lock);
1687 return root;
1688 }
1689
1690 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1691 struct btrfs_root *root)
1692 {
1693 int ret;
1694
1695 ret = radix_tree_preload(GFP_NOFS);
1696 if (ret)
1697 return ret;
1698
1699 spin_lock(&fs_info->fs_roots_radix_lock);
1700 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1701 (unsigned long)root->root_key.objectid,
1702 root);
1703 if (ret == 0)
1704 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1705 spin_unlock(&fs_info->fs_roots_radix_lock);
1706 radix_tree_preload_end();
1707
1708 return ret;
1709 }
1710
1711 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1712 struct btrfs_key *location,
1713 bool check_ref)
1714 {
1715 struct btrfs_root *root;
1716 struct btrfs_path *path;
1717 struct btrfs_key key;
1718 int ret;
1719
1720 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1721 return fs_info->tree_root;
1722 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1723 return fs_info->extent_root;
1724 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1725 return fs_info->chunk_root;
1726 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1727 return fs_info->dev_root;
1728 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1729 return fs_info->csum_root;
1730 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1731 return fs_info->quota_root ? fs_info->quota_root :
1732 ERR_PTR(-ENOENT);
1733 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1734 return fs_info->uuid_root ? fs_info->uuid_root :
1735 ERR_PTR(-ENOENT);
1736 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1737 return fs_info->free_space_root ? fs_info->free_space_root :
1738 ERR_PTR(-ENOENT);
1739 again:
1740 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1741 if (root) {
1742 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1743 return ERR_PTR(-ENOENT);
1744 return root;
1745 }
1746
1747 root = btrfs_read_fs_root(fs_info->tree_root, location);
1748 if (IS_ERR(root))
1749 return root;
1750
1751 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1752 ret = -ENOENT;
1753 goto fail;
1754 }
1755
1756 ret = btrfs_init_fs_root(root);
1757 if (ret)
1758 goto fail;
1759
1760 path = btrfs_alloc_path();
1761 if (!path) {
1762 ret = -ENOMEM;
1763 goto fail;
1764 }
1765 key.objectid = BTRFS_ORPHAN_OBJECTID;
1766 key.type = BTRFS_ORPHAN_ITEM_KEY;
1767 key.offset = location->objectid;
1768
1769 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1770 btrfs_free_path(path);
1771 if (ret < 0)
1772 goto fail;
1773 if (ret == 0)
1774 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1775
1776 ret = btrfs_insert_fs_root(fs_info, root);
1777 if (ret) {
1778 if (ret == -EEXIST) {
1779 free_fs_root(root);
1780 goto again;
1781 }
1782 goto fail;
1783 }
1784 return root;
1785 fail:
1786 free_fs_root(root);
1787 return ERR_PTR(ret);
1788 }
1789
1790 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1791 {
1792 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1793 int ret = 0;
1794 struct btrfs_device *device;
1795 struct backing_dev_info *bdi;
1796
1797 rcu_read_lock();
1798 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1799 if (!device->bdev)
1800 continue;
1801 bdi = blk_get_backing_dev_info(device->bdev);
1802 if (bdi_congested(bdi, bdi_bits)) {
1803 ret = 1;
1804 break;
1805 }
1806 }
1807 rcu_read_unlock();
1808 return ret;
1809 }
1810
1811 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1812 {
1813 int err;
1814
1815 err = bdi_setup_and_register(bdi, "btrfs");
1816 if (err)
1817 return err;
1818
1819 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
1820 bdi->congested_fn = btrfs_congested_fn;
1821 bdi->congested_data = info;
1822 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
1823 return 0;
1824 }
1825
1826 /*
1827 * called by the kthread helper functions to finally call the bio end_io
1828 * functions. This is where read checksum verification actually happens
1829 */
1830 static void end_workqueue_fn(struct btrfs_work *work)
1831 {
1832 struct bio *bio;
1833 struct btrfs_end_io_wq *end_io_wq;
1834
1835 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1836 bio = end_io_wq->bio;
1837
1838 bio->bi_error = end_io_wq->error;
1839 bio->bi_private = end_io_wq->private;
1840 bio->bi_end_io = end_io_wq->end_io;
1841 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1842 bio_endio(bio);
1843 }
1844
1845 static int cleaner_kthread(void *arg)
1846 {
1847 struct btrfs_root *root = arg;
1848 struct btrfs_fs_info *fs_info = root->fs_info;
1849 int again;
1850 struct btrfs_trans_handle *trans;
1851
1852 do {
1853 again = 0;
1854
1855 /* Make the cleaner go to sleep early. */
1856 if (btrfs_need_cleaner_sleep(fs_info))
1857 goto sleep;
1858
1859 /*
1860 * Do not do anything if we might cause open_ctree() to block
1861 * before we have finished mounting the filesystem.
1862 */
1863 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1864 goto sleep;
1865
1866 if (!mutex_trylock(&fs_info->cleaner_mutex))
1867 goto sleep;
1868
1869 /*
1870 * Avoid the problem that we change the status of the fs
1871 * during the above check and trylock.
1872 */
1873 if (btrfs_need_cleaner_sleep(fs_info)) {
1874 mutex_unlock(&fs_info->cleaner_mutex);
1875 goto sleep;
1876 }
1877
1878 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1879 btrfs_run_delayed_iputs(fs_info);
1880 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1881
1882 again = btrfs_clean_one_deleted_snapshot(root);
1883 mutex_unlock(&fs_info->cleaner_mutex);
1884
1885 /*
1886 * The defragger has dealt with the R/O remount and umount,
1887 * needn't do anything special here.
1888 */
1889 btrfs_run_defrag_inodes(fs_info);
1890
1891 /*
1892 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1893 * with relocation (btrfs_relocate_chunk) and relocation
1894 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1895 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1896 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1897 * unused block groups.
1898 */
1899 btrfs_delete_unused_bgs(fs_info);
1900 sleep:
1901 if (!again) {
1902 set_current_state(TASK_INTERRUPTIBLE);
1903 if (!kthread_should_stop())
1904 schedule();
1905 __set_current_state(TASK_RUNNING);
1906 }
1907 } while (!kthread_should_stop());
1908
1909 /*
1910 * Transaction kthread is stopped before us and wakes us up.
1911 * However we might have started a new transaction and COWed some
1912 * tree blocks when deleting unused block groups for example. So
1913 * make sure we commit the transaction we started to have a clean
1914 * shutdown when evicting the btree inode - if it has dirty pages
1915 * when we do the final iput() on it, eviction will trigger a
1916 * writeback for it which will fail with null pointer dereferences
1917 * since work queues and other resources were already released and
1918 * destroyed by the time the iput/eviction/writeback is made.
1919 */
1920 trans = btrfs_attach_transaction(root);
1921 if (IS_ERR(trans)) {
1922 if (PTR_ERR(trans) != -ENOENT)
1923 btrfs_err(fs_info,
1924 "cleaner transaction attach returned %ld",
1925 PTR_ERR(trans));
1926 } else {
1927 int ret;
1928
1929 ret = btrfs_commit_transaction(trans);
1930 if (ret)
1931 btrfs_err(fs_info,
1932 "cleaner open transaction commit returned %d",
1933 ret);
1934 }
1935
1936 return 0;
1937 }
1938
1939 static int transaction_kthread(void *arg)
1940 {
1941 struct btrfs_root *root = arg;
1942 struct btrfs_fs_info *fs_info = root->fs_info;
1943 struct btrfs_trans_handle *trans;
1944 struct btrfs_transaction *cur;
1945 u64 transid;
1946 unsigned long now;
1947 unsigned long delay;
1948 bool cannot_commit;
1949
1950 do {
1951 cannot_commit = false;
1952 delay = HZ * fs_info->commit_interval;
1953 mutex_lock(&fs_info->transaction_kthread_mutex);
1954
1955 spin_lock(&fs_info->trans_lock);
1956 cur = fs_info->running_transaction;
1957 if (!cur) {
1958 spin_unlock(&fs_info->trans_lock);
1959 goto sleep;
1960 }
1961
1962 now = get_seconds();
1963 if (cur->state < TRANS_STATE_BLOCKED &&
1964 (now < cur->start_time ||
1965 now - cur->start_time < fs_info->commit_interval)) {
1966 spin_unlock(&fs_info->trans_lock);
1967 delay = HZ * 5;
1968 goto sleep;
1969 }
1970 transid = cur->transid;
1971 spin_unlock(&fs_info->trans_lock);
1972
1973 /* If the file system is aborted, this will always fail. */
1974 trans = btrfs_attach_transaction(root);
1975 if (IS_ERR(trans)) {
1976 if (PTR_ERR(trans) != -ENOENT)
1977 cannot_commit = true;
1978 goto sleep;
1979 }
1980 if (transid == trans->transid) {
1981 btrfs_commit_transaction(trans);
1982 } else {
1983 btrfs_end_transaction(trans);
1984 }
1985 sleep:
1986 wake_up_process(fs_info->cleaner_kthread);
1987 mutex_unlock(&fs_info->transaction_kthread_mutex);
1988
1989 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1990 &fs_info->fs_state)))
1991 btrfs_cleanup_transaction(fs_info);
1992 set_current_state(TASK_INTERRUPTIBLE);
1993 if (!kthread_should_stop() &&
1994 (!btrfs_transaction_blocked(fs_info) ||
1995 cannot_commit))
1996 schedule_timeout(delay);
1997 __set_current_state(TASK_RUNNING);
1998 } while (!kthread_should_stop());
1999 return 0;
2000 }
2001
2002 /*
2003 * this will find the highest generation in the array of
2004 * root backups. The index of the highest array is returned,
2005 * or -1 if we can't find anything.
2006 *
2007 * We check to make sure the array is valid by comparing the
2008 * generation of the latest root in the array with the generation
2009 * in the super block. If they don't match we pitch it.
2010 */
2011 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
2012 {
2013 u64 cur;
2014 int newest_index = -1;
2015 struct btrfs_root_backup *root_backup;
2016 int i;
2017
2018 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2019 root_backup = info->super_copy->super_roots + i;
2020 cur = btrfs_backup_tree_root_gen(root_backup);
2021 if (cur == newest_gen)
2022 newest_index = i;
2023 }
2024
2025 /* check to see if we actually wrapped around */
2026 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
2027 root_backup = info->super_copy->super_roots;
2028 cur = btrfs_backup_tree_root_gen(root_backup);
2029 if (cur == newest_gen)
2030 newest_index = 0;
2031 }
2032 return newest_index;
2033 }
2034
2035
2036 /*
2037 * find the oldest backup so we know where to store new entries
2038 * in the backup array. This will set the backup_root_index
2039 * field in the fs_info struct
2040 */
2041 static void find_oldest_super_backup(struct btrfs_fs_info *info,
2042 u64 newest_gen)
2043 {
2044 int newest_index = -1;
2045
2046 newest_index = find_newest_super_backup(info, newest_gen);
2047 /* if there was garbage in there, just move along */
2048 if (newest_index == -1) {
2049 info->backup_root_index = 0;
2050 } else {
2051 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
2052 }
2053 }
2054
2055 /*
2056 * copy all the root pointers into the super backup array.
2057 * this will bump the backup pointer by one when it is
2058 * done
2059 */
2060 static void backup_super_roots(struct btrfs_fs_info *info)
2061 {
2062 int next_backup;
2063 struct btrfs_root_backup *root_backup;
2064 int last_backup;
2065
2066 next_backup = info->backup_root_index;
2067 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
2068 BTRFS_NUM_BACKUP_ROOTS;
2069
2070 /*
2071 * just overwrite the last backup if we're at the same generation
2072 * this happens only at umount
2073 */
2074 root_backup = info->super_for_commit->super_roots + last_backup;
2075 if (btrfs_backup_tree_root_gen(root_backup) ==
2076 btrfs_header_generation(info->tree_root->node))
2077 next_backup = last_backup;
2078
2079 root_backup = info->super_for_commit->super_roots + next_backup;
2080
2081 /*
2082 * make sure all of our padding and empty slots get zero filled
2083 * regardless of which ones we use today
2084 */
2085 memset(root_backup, 0, sizeof(*root_backup));
2086
2087 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
2088
2089 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
2090 btrfs_set_backup_tree_root_gen(root_backup,
2091 btrfs_header_generation(info->tree_root->node));
2092
2093 btrfs_set_backup_tree_root_level(root_backup,
2094 btrfs_header_level(info->tree_root->node));
2095
2096 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
2097 btrfs_set_backup_chunk_root_gen(root_backup,
2098 btrfs_header_generation(info->chunk_root->node));
2099 btrfs_set_backup_chunk_root_level(root_backup,
2100 btrfs_header_level(info->chunk_root->node));
2101
2102 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2103 btrfs_set_backup_extent_root_gen(root_backup,
2104 btrfs_header_generation(info->extent_root->node));
2105 btrfs_set_backup_extent_root_level(root_backup,
2106 btrfs_header_level(info->extent_root->node));
2107
2108 /*
2109 * we might commit during log recovery, which happens before we set
2110 * the fs_root. Make sure it is valid before we fill it in.
2111 */
2112 if (info->fs_root && info->fs_root->node) {
2113 btrfs_set_backup_fs_root(root_backup,
2114 info->fs_root->node->start);
2115 btrfs_set_backup_fs_root_gen(root_backup,
2116 btrfs_header_generation(info->fs_root->node));
2117 btrfs_set_backup_fs_root_level(root_backup,
2118 btrfs_header_level(info->fs_root->node));
2119 }
2120
2121 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2122 btrfs_set_backup_dev_root_gen(root_backup,
2123 btrfs_header_generation(info->dev_root->node));
2124 btrfs_set_backup_dev_root_level(root_backup,
2125 btrfs_header_level(info->dev_root->node));
2126
2127 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2128 btrfs_set_backup_csum_root_gen(root_backup,
2129 btrfs_header_generation(info->csum_root->node));
2130 btrfs_set_backup_csum_root_level(root_backup,
2131 btrfs_header_level(info->csum_root->node));
2132
2133 btrfs_set_backup_total_bytes(root_backup,
2134 btrfs_super_total_bytes(info->super_copy));
2135 btrfs_set_backup_bytes_used(root_backup,
2136 btrfs_super_bytes_used(info->super_copy));
2137 btrfs_set_backup_num_devices(root_backup,
2138 btrfs_super_num_devices(info->super_copy));
2139
2140 /*
2141 * if we don't copy this out to the super_copy, it won't get remembered
2142 * for the next commit
2143 */
2144 memcpy(&info->super_copy->super_roots,
2145 &info->super_for_commit->super_roots,
2146 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2147 }
2148
2149 /*
2150 * this copies info out of the root backup array and back into
2151 * the in-memory super block. It is meant to help iterate through
2152 * the array, so you send it the number of backups you've already
2153 * tried and the last backup index you used.
2154 *
2155 * this returns -1 when it has tried all the backups
2156 */
2157 static noinline int next_root_backup(struct btrfs_fs_info *info,
2158 struct btrfs_super_block *super,
2159 int *num_backups_tried, int *backup_index)
2160 {
2161 struct btrfs_root_backup *root_backup;
2162 int newest = *backup_index;
2163
2164 if (*num_backups_tried == 0) {
2165 u64 gen = btrfs_super_generation(super);
2166
2167 newest = find_newest_super_backup(info, gen);
2168 if (newest == -1)
2169 return -1;
2170
2171 *backup_index = newest;
2172 *num_backups_tried = 1;
2173 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2174 /* we've tried all the backups, all done */
2175 return -1;
2176 } else {
2177 /* jump to the next oldest backup */
2178 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2179 BTRFS_NUM_BACKUP_ROOTS;
2180 *backup_index = newest;
2181 *num_backups_tried += 1;
2182 }
2183 root_backup = super->super_roots + newest;
2184
2185 btrfs_set_super_generation(super,
2186 btrfs_backup_tree_root_gen(root_backup));
2187 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2188 btrfs_set_super_root_level(super,
2189 btrfs_backup_tree_root_level(root_backup));
2190 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2191
2192 /*
2193 * fixme: the total bytes and num_devices need to match or we should
2194 * need a fsck
2195 */
2196 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2197 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2198 return 0;
2199 }
2200
2201 /* helper to cleanup workers */
2202 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2203 {
2204 btrfs_destroy_workqueue(fs_info->fixup_workers);
2205 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2206 btrfs_destroy_workqueue(fs_info->workers);
2207 btrfs_destroy_workqueue(fs_info->endio_workers);
2208 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2209 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2210 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2211 btrfs_destroy_workqueue(fs_info->rmw_workers);
2212 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2213 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2214 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2215 btrfs_destroy_workqueue(fs_info->submit_workers);
2216 btrfs_destroy_workqueue(fs_info->delayed_workers);
2217 btrfs_destroy_workqueue(fs_info->caching_workers);
2218 btrfs_destroy_workqueue(fs_info->readahead_workers);
2219 btrfs_destroy_workqueue(fs_info->flush_workers);
2220 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2221 btrfs_destroy_workqueue(fs_info->extent_workers);
2222 }
2223
2224 static void free_root_extent_buffers(struct btrfs_root *root)
2225 {
2226 if (root) {
2227 free_extent_buffer(root->node);
2228 free_extent_buffer(root->commit_root);
2229 root->node = NULL;
2230 root->commit_root = NULL;
2231 }
2232 }
2233
2234 /* helper to cleanup tree roots */
2235 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2236 {
2237 free_root_extent_buffers(info->tree_root);
2238
2239 free_root_extent_buffers(info->dev_root);
2240 free_root_extent_buffers(info->extent_root);
2241 free_root_extent_buffers(info->csum_root);
2242 free_root_extent_buffers(info->quota_root);
2243 free_root_extent_buffers(info->uuid_root);
2244 if (chunk_root)
2245 free_root_extent_buffers(info->chunk_root);
2246 free_root_extent_buffers(info->free_space_root);
2247 }
2248
2249 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2250 {
2251 int ret;
2252 struct btrfs_root *gang[8];
2253 int i;
2254
2255 while (!list_empty(&fs_info->dead_roots)) {
2256 gang[0] = list_entry(fs_info->dead_roots.next,
2257 struct btrfs_root, root_list);
2258 list_del(&gang[0]->root_list);
2259
2260 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2261 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2262 } else {
2263 free_extent_buffer(gang[0]->node);
2264 free_extent_buffer(gang[0]->commit_root);
2265 btrfs_put_fs_root(gang[0]);
2266 }
2267 }
2268
2269 while (1) {
2270 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2271 (void **)gang, 0,
2272 ARRAY_SIZE(gang));
2273 if (!ret)
2274 break;
2275 for (i = 0; i < ret; i++)
2276 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2277 }
2278
2279 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2280 btrfs_free_log_root_tree(NULL, fs_info);
2281 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2282 }
2283 }
2284
2285 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2286 {
2287 mutex_init(&fs_info->scrub_lock);
2288 atomic_set(&fs_info->scrubs_running, 0);
2289 atomic_set(&fs_info->scrub_pause_req, 0);
2290 atomic_set(&fs_info->scrubs_paused, 0);
2291 atomic_set(&fs_info->scrub_cancel_req, 0);
2292 init_waitqueue_head(&fs_info->scrub_pause_wait);
2293 fs_info->scrub_workers_refcnt = 0;
2294 }
2295
2296 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2297 {
2298 spin_lock_init(&fs_info->balance_lock);
2299 mutex_init(&fs_info->balance_mutex);
2300 atomic_set(&fs_info->balance_running, 0);
2301 atomic_set(&fs_info->balance_pause_req, 0);
2302 atomic_set(&fs_info->balance_cancel_req, 0);
2303 fs_info->balance_ctl = NULL;
2304 init_waitqueue_head(&fs_info->balance_wait_q);
2305 }
2306
2307 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2308 {
2309 struct inode *inode = fs_info->btree_inode;
2310
2311 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2312 set_nlink(inode, 1);
2313 /*
2314 * we set the i_size on the btree inode to the max possible int.
2315 * the real end of the address space is determined by all of
2316 * the devices in the system
2317 */
2318 inode->i_size = OFFSET_MAX;
2319 inode->i_mapping->a_ops = &btree_aops;
2320
2321 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2322 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
2323 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2324 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2325
2326 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2327
2328 BTRFS_I(inode)->root = fs_info->tree_root;
2329 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2330 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2331 btrfs_insert_inode_hash(inode);
2332 }
2333
2334 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2335 {
2336 fs_info->dev_replace.lock_owner = 0;
2337 atomic_set(&fs_info->dev_replace.nesting_level, 0);
2338 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2339 rwlock_init(&fs_info->dev_replace.lock);
2340 atomic_set(&fs_info->dev_replace.read_locks, 0);
2341 atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2342 init_waitqueue_head(&fs_info->replace_wait);
2343 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2344 }
2345
2346 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2347 {
2348 spin_lock_init(&fs_info->qgroup_lock);
2349 mutex_init(&fs_info->qgroup_ioctl_lock);
2350 fs_info->qgroup_tree = RB_ROOT;
2351 fs_info->qgroup_op_tree = RB_ROOT;
2352 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2353 fs_info->qgroup_seq = 1;
2354 fs_info->qgroup_ulist = NULL;
2355 fs_info->qgroup_rescan_running = false;
2356 mutex_init(&fs_info->qgroup_rescan_lock);
2357 }
2358
2359 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2360 struct btrfs_fs_devices *fs_devices)
2361 {
2362 int max_active = fs_info->thread_pool_size;
2363 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2364
2365 fs_info->workers =
2366 btrfs_alloc_workqueue(fs_info, "worker",
2367 flags | WQ_HIGHPRI, max_active, 16);
2368
2369 fs_info->delalloc_workers =
2370 btrfs_alloc_workqueue(fs_info, "delalloc",
2371 flags, max_active, 2);
2372
2373 fs_info->flush_workers =
2374 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2375 flags, max_active, 0);
2376
2377 fs_info->caching_workers =
2378 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2379
2380 /*
2381 * a higher idle thresh on the submit workers makes it much more
2382 * likely that bios will be send down in a sane order to the
2383 * devices
2384 */
2385 fs_info->submit_workers =
2386 btrfs_alloc_workqueue(fs_info, "submit", flags,
2387 min_t(u64, fs_devices->num_devices,
2388 max_active), 64);
2389
2390 fs_info->fixup_workers =
2391 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2392
2393 /*
2394 * endios are largely parallel and should have a very
2395 * low idle thresh
2396 */
2397 fs_info->endio_workers =
2398 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2399 fs_info->endio_meta_workers =
2400 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2401 max_active, 4);
2402 fs_info->endio_meta_write_workers =
2403 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2404 max_active, 2);
2405 fs_info->endio_raid56_workers =
2406 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2407 max_active, 4);
2408 fs_info->endio_repair_workers =
2409 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2410 fs_info->rmw_workers =
2411 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2412 fs_info->endio_write_workers =
2413 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2414 max_active, 2);
2415 fs_info->endio_freespace_worker =
2416 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2417 max_active, 0);
2418 fs_info->delayed_workers =
2419 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2420 max_active, 0);
2421 fs_info->readahead_workers =
2422 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2423 max_active, 2);
2424 fs_info->qgroup_rescan_workers =
2425 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2426 fs_info->extent_workers =
2427 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2428 min_t(u64, fs_devices->num_devices,
2429 max_active), 8);
2430
2431 if (!(fs_info->workers && fs_info->delalloc_workers &&
2432 fs_info->submit_workers && fs_info->flush_workers &&
2433 fs_info->endio_workers && fs_info->endio_meta_workers &&
2434 fs_info->endio_meta_write_workers &&
2435 fs_info->endio_repair_workers &&
2436 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2437 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2438 fs_info->caching_workers && fs_info->readahead_workers &&
2439 fs_info->fixup_workers && fs_info->delayed_workers &&
2440 fs_info->extent_workers &&
2441 fs_info->qgroup_rescan_workers)) {
2442 return -ENOMEM;
2443 }
2444
2445 return 0;
2446 }
2447
2448 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2449 struct btrfs_fs_devices *fs_devices)
2450 {
2451 int ret;
2452 struct btrfs_root *log_tree_root;
2453 struct btrfs_super_block *disk_super = fs_info->super_copy;
2454 u64 bytenr = btrfs_super_log_root(disk_super);
2455
2456 if (fs_devices->rw_devices == 0) {
2457 btrfs_warn(fs_info, "log replay required on RO media");
2458 return -EIO;
2459 }
2460
2461 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2462 if (!log_tree_root)
2463 return -ENOMEM;
2464
2465 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2466
2467 log_tree_root->node = read_tree_block(fs_info, bytenr,
2468 fs_info->generation + 1);
2469 if (IS_ERR(log_tree_root->node)) {
2470 btrfs_warn(fs_info, "failed to read log tree");
2471 ret = PTR_ERR(log_tree_root->node);
2472 kfree(log_tree_root);
2473 return ret;
2474 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2475 btrfs_err(fs_info, "failed to read log tree");
2476 free_extent_buffer(log_tree_root->node);
2477 kfree(log_tree_root);
2478 return -EIO;
2479 }
2480 /* returns with log_tree_root freed on success */
2481 ret = btrfs_recover_log_trees(log_tree_root);
2482 if (ret) {
2483 btrfs_handle_fs_error(fs_info, ret,
2484 "Failed to recover log tree");
2485 free_extent_buffer(log_tree_root->node);
2486 kfree(log_tree_root);
2487 return ret;
2488 }
2489
2490 if (fs_info->sb->s_flags & MS_RDONLY) {
2491 ret = btrfs_commit_super(fs_info);
2492 if (ret)
2493 return ret;
2494 }
2495
2496 return 0;
2497 }
2498
2499 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2500 {
2501 struct btrfs_root *tree_root = fs_info->tree_root;
2502 struct btrfs_root *root;
2503 struct btrfs_key location;
2504 int ret;
2505
2506 BUG_ON(!fs_info->tree_root);
2507
2508 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2509 location.type = BTRFS_ROOT_ITEM_KEY;
2510 location.offset = 0;
2511
2512 root = btrfs_read_tree_root(tree_root, &location);
2513 if (IS_ERR(root))
2514 return PTR_ERR(root);
2515 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2516 fs_info->extent_root = root;
2517
2518 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2519 root = btrfs_read_tree_root(tree_root, &location);
2520 if (IS_ERR(root))
2521 return PTR_ERR(root);
2522 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2523 fs_info->dev_root = root;
2524 btrfs_init_devices_late(fs_info);
2525
2526 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2527 root = btrfs_read_tree_root(tree_root, &location);
2528 if (IS_ERR(root))
2529 return PTR_ERR(root);
2530 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2531 fs_info->csum_root = root;
2532
2533 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2534 root = btrfs_read_tree_root(tree_root, &location);
2535 if (!IS_ERR(root)) {
2536 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2537 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2538 fs_info->quota_root = root;
2539 }
2540
2541 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2542 root = btrfs_read_tree_root(tree_root, &location);
2543 if (IS_ERR(root)) {
2544 ret = PTR_ERR(root);
2545 if (ret != -ENOENT)
2546 return ret;
2547 } else {
2548 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2549 fs_info->uuid_root = root;
2550 }
2551
2552 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2553 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2554 root = btrfs_read_tree_root(tree_root, &location);
2555 if (IS_ERR(root))
2556 return PTR_ERR(root);
2557 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2558 fs_info->free_space_root = root;
2559 }
2560
2561 return 0;
2562 }
2563
2564 int open_ctree(struct super_block *sb,
2565 struct btrfs_fs_devices *fs_devices,
2566 char *options)
2567 {
2568 u32 sectorsize;
2569 u32 nodesize;
2570 u32 stripesize;
2571 u64 generation;
2572 u64 features;
2573 struct btrfs_key location;
2574 struct buffer_head *bh;
2575 struct btrfs_super_block *disk_super;
2576 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2577 struct btrfs_root *tree_root;
2578 struct btrfs_root *chunk_root;
2579 int ret;
2580 int err = -EINVAL;
2581 int num_backups_tried = 0;
2582 int backup_index = 0;
2583 int max_active;
2584 int clear_free_space_tree = 0;
2585
2586 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2587 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2588 if (!tree_root || !chunk_root) {
2589 err = -ENOMEM;
2590 goto fail;
2591 }
2592
2593 ret = init_srcu_struct(&fs_info->subvol_srcu);
2594 if (ret) {
2595 err = ret;
2596 goto fail;
2597 }
2598
2599 ret = setup_bdi(fs_info, &fs_info->bdi);
2600 if (ret) {
2601 err = ret;
2602 goto fail_srcu;
2603 }
2604
2605 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2606 if (ret) {
2607 err = ret;
2608 goto fail_bdi;
2609 }
2610 fs_info->dirty_metadata_batch = PAGE_SIZE *
2611 (1 + ilog2(nr_cpu_ids));
2612
2613 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2614 if (ret) {
2615 err = ret;
2616 goto fail_dirty_metadata_bytes;
2617 }
2618
2619 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2620 if (ret) {
2621 err = ret;
2622 goto fail_delalloc_bytes;
2623 }
2624
2625 fs_info->btree_inode = new_inode(sb);
2626 if (!fs_info->btree_inode) {
2627 err = -ENOMEM;
2628 goto fail_bio_counter;
2629 }
2630
2631 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2632
2633 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2634 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2635 INIT_LIST_HEAD(&fs_info->trans_list);
2636 INIT_LIST_HEAD(&fs_info->dead_roots);
2637 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2638 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2639 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2640 spin_lock_init(&fs_info->delalloc_root_lock);
2641 spin_lock_init(&fs_info->trans_lock);
2642 spin_lock_init(&fs_info->fs_roots_radix_lock);
2643 spin_lock_init(&fs_info->delayed_iput_lock);
2644 spin_lock_init(&fs_info->defrag_inodes_lock);
2645 spin_lock_init(&fs_info->free_chunk_lock);
2646 spin_lock_init(&fs_info->tree_mod_seq_lock);
2647 spin_lock_init(&fs_info->super_lock);
2648 spin_lock_init(&fs_info->qgroup_op_lock);
2649 spin_lock_init(&fs_info->buffer_lock);
2650 spin_lock_init(&fs_info->unused_bgs_lock);
2651 rwlock_init(&fs_info->tree_mod_log_lock);
2652 mutex_init(&fs_info->unused_bg_unpin_mutex);
2653 mutex_init(&fs_info->delete_unused_bgs_mutex);
2654 mutex_init(&fs_info->reloc_mutex);
2655 mutex_init(&fs_info->delalloc_root_mutex);
2656 mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2657 seqlock_init(&fs_info->profiles_lock);
2658
2659 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2660 INIT_LIST_HEAD(&fs_info->space_info);
2661 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2662 INIT_LIST_HEAD(&fs_info->unused_bgs);
2663 btrfs_mapping_init(&fs_info->mapping_tree);
2664 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2665 BTRFS_BLOCK_RSV_GLOBAL);
2666 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2667 BTRFS_BLOCK_RSV_DELALLOC);
2668 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2669 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2670 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2671 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2672 BTRFS_BLOCK_RSV_DELOPS);
2673 atomic_set(&fs_info->nr_async_submits, 0);
2674 atomic_set(&fs_info->async_delalloc_pages, 0);
2675 atomic_set(&fs_info->async_submit_draining, 0);
2676 atomic_set(&fs_info->nr_async_bios, 0);
2677 atomic_set(&fs_info->defrag_running, 0);
2678 atomic_set(&fs_info->qgroup_op_seq, 0);
2679 atomic_set(&fs_info->reada_works_cnt, 0);
2680 atomic64_set(&fs_info->tree_mod_seq, 0);
2681 fs_info->fs_frozen = 0;
2682 fs_info->sb = sb;
2683 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2684 fs_info->metadata_ratio = 0;
2685 fs_info->defrag_inodes = RB_ROOT;
2686 fs_info->free_chunk_space = 0;
2687 fs_info->tree_mod_log = RB_ROOT;
2688 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2689 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2690 /* readahead state */
2691 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2692 spin_lock_init(&fs_info->reada_lock);
2693
2694 fs_info->thread_pool_size = min_t(unsigned long,
2695 num_online_cpus() + 2, 8);
2696
2697 INIT_LIST_HEAD(&fs_info->ordered_roots);
2698 spin_lock_init(&fs_info->ordered_root_lock);
2699 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2700 GFP_KERNEL);
2701 if (!fs_info->delayed_root) {
2702 err = -ENOMEM;
2703 goto fail_iput;
2704 }
2705 btrfs_init_delayed_root(fs_info->delayed_root);
2706
2707 btrfs_init_scrub(fs_info);
2708 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2709 fs_info->check_integrity_print_mask = 0;
2710 #endif
2711 btrfs_init_balance(fs_info);
2712 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2713
2714 sb->s_blocksize = 4096;
2715 sb->s_blocksize_bits = blksize_bits(4096);
2716 sb->s_bdi = &fs_info->bdi;
2717
2718 btrfs_init_btree_inode(fs_info);
2719
2720 spin_lock_init(&fs_info->block_group_cache_lock);
2721 fs_info->block_group_cache_tree = RB_ROOT;
2722 fs_info->first_logical_byte = (u64)-1;
2723
2724 extent_io_tree_init(&fs_info->freed_extents[0],
2725 fs_info->btree_inode->i_mapping);
2726 extent_io_tree_init(&fs_info->freed_extents[1],
2727 fs_info->btree_inode->i_mapping);
2728 fs_info->pinned_extents = &fs_info->freed_extents[0];
2729 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2730
2731 mutex_init(&fs_info->ordered_operations_mutex);
2732 mutex_init(&fs_info->tree_log_mutex);
2733 mutex_init(&fs_info->chunk_mutex);
2734 mutex_init(&fs_info->transaction_kthread_mutex);
2735 mutex_init(&fs_info->cleaner_mutex);
2736 mutex_init(&fs_info->volume_mutex);
2737 mutex_init(&fs_info->ro_block_group_mutex);
2738 init_rwsem(&fs_info->commit_root_sem);
2739 init_rwsem(&fs_info->cleanup_work_sem);
2740 init_rwsem(&fs_info->subvol_sem);
2741 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2742
2743 btrfs_init_dev_replace_locks(fs_info);
2744 btrfs_init_qgroup(fs_info);
2745
2746 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2747 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2748
2749 init_waitqueue_head(&fs_info->transaction_throttle);
2750 init_waitqueue_head(&fs_info->transaction_wait);
2751 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2752 init_waitqueue_head(&fs_info->async_submit_wait);
2753
2754 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2755
2756 /* Usable values until the real ones are cached from the superblock */
2757 fs_info->nodesize = 4096;
2758 fs_info->sectorsize = 4096;
2759 fs_info->stripesize = 4096;
2760
2761 ret = btrfs_alloc_stripe_hash_table(fs_info);
2762 if (ret) {
2763 err = ret;
2764 goto fail_alloc;
2765 }
2766
2767 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2768
2769 invalidate_bdev(fs_devices->latest_bdev);
2770
2771 /*
2772 * Read super block and check the signature bytes only
2773 */
2774 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2775 if (IS_ERR(bh)) {
2776 err = PTR_ERR(bh);
2777 goto fail_alloc;
2778 }
2779
2780 /*
2781 * We want to check superblock checksum, the type is stored inside.
2782 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2783 */
2784 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2785 btrfs_err(fs_info, "superblock checksum mismatch");
2786 err = -EINVAL;
2787 brelse(bh);
2788 goto fail_alloc;
2789 }
2790
2791 /*
2792 * super_copy is zeroed at allocation time and we never touch the
2793 * following bytes up to INFO_SIZE, the checksum is calculated from
2794 * the whole block of INFO_SIZE
2795 */
2796 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2797 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2798 sizeof(*fs_info->super_for_commit));
2799 brelse(bh);
2800
2801 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2802
2803 ret = btrfs_check_super_valid(fs_info);
2804 if (ret) {
2805 btrfs_err(fs_info, "superblock contains fatal errors");
2806 err = -EINVAL;
2807 goto fail_alloc;
2808 }
2809
2810 disk_super = fs_info->super_copy;
2811 if (!btrfs_super_root(disk_super))
2812 goto fail_alloc;
2813
2814 /* check FS state, whether FS is broken. */
2815 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2816 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2817
2818 /*
2819 * run through our array of backup supers and setup
2820 * our ring pointer to the oldest one
2821 */
2822 generation = btrfs_super_generation(disk_super);
2823 find_oldest_super_backup(fs_info, generation);
2824
2825 /*
2826 * In the long term, we'll store the compression type in the super
2827 * block, and it'll be used for per file compression control.
2828 */
2829 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2830
2831 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2832 if (ret) {
2833 err = ret;
2834 goto fail_alloc;
2835 }
2836
2837 features = btrfs_super_incompat_flags(disk_super) &
2838 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2839 if (features) {
2840 btrfs_err(fs_info,
2841 "cannot mount because of unsupported optional features (%llx)",
2842 features);
2843 err = -EINVAL;
2844 goto fail_alloc;
2845 }
2846
2847 features = btrfs_super_incompat_flags(disk_super);
2848 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2849 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2850 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2851
2852 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2853 btrfs_info(fs_info, "has skinny extents");
2854
2855 /*
2856 * flag our filesystem as having big metadata blocks if
2857 * they are bigger than the page size
2858 */
2859 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2860 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2861 btrfs_info(fs_info,
2862 "flagging fs with big metadata feature");
2863 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2864 }
2865
2866 nodesize = btrfs_super_nodesize(disk_super);
2867 sectorsize = btrfs_super_sectorsize(disk_super);
2868 stripesize = sectorsize;
2869 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2870 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2871
2872 /* Cache block sizes */
2873 fs_info->nodesize = nodesize;
2874 fs_info->sectorsize = sectorsize;
2875 fs_info->stripesize = stripesize;
2876
2877 /*
2878 * mixed block groups end up with duplicate but slightly offset
2879 * extent buffers for the same range. It leads to corruptions
2880 */
2881 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2882 (sectorsize != nodesize)) {
2883 btrfs_err(fs_info,
2884 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2885 nodesize, sectorsize);
2886 goto fail_alloc;
2887 }
2888
2889 /*
2890 * Needn't use the lock because there is no other task which will
2891 * update the flag.
2892 */
2893 btrfs_set_super_incompat_flags(disk_super, features);
2894
2895 features = btrfs_super_compat_ro_flags(disk_super) &
2896 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2897 if (!(sb->s_flags & MS_RDONLY) && features) {
2898 btrfs_err(fs_info,
2899 "cannot mount read-write because of unsupported optional features (%llx)",
2900 features);
2901 err = -EINVAL;
2902 goto fail_alloc;
2903 }
2904
2905 max_active = fs_info->thread_pool_size;
2906
2907 ret = btrfs_init_workqueues(fs_info, fs_devices);
2908 if (ret) {
2909 err = ret;
2910 goto fail_sb_buffer;
2911 }
2912
2913 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2914 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2915 SZ_4M / PAGE_SIZE);
2916
2917 sb->s_blocksize = sectorsize;
2918 sb->s_blocksize_bits = blksize_bits(sectorsize);
2919
2920 mutex_lock(&fs_info->chunk_mutex);
2921 ret = btrfs_read_sys_array(fs_info);
2922 mutex_unlock(&fs_info->chunk_mutex);
2923 if (ret) {
2924 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2925 goto fail_sb_buffer;
2926 }
2927
2928 generation = btrfs_super_chunk_root_generation(disk_super);
2929
2930 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2931
2932 chunk_root->node = read_tree_block(fs_info,
2933 btrfs_super_chunk_root(disk_super),
2934 generation);
2935 if (IS_ERR(chunk_root->node) ||
2936 !extent_buffer_uptodate(chunk_root->node)) {
2937 btrfs_err(fs_info, "failed to read chunk root");
2938 if (!IS_ERR(chunk_root->node))
2939 free_extent_buffer(chunk_root->node);
2940 chunk_root->node = NULL;
2941 goto fail_tree_roots;
2942 }
2943 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2944 chunk_root->commit_root = btrfs_root_node(chunk_root);
2945
2946 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2947 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2948
2949 ret = btrfs_read_chunk_tree(fs_info);
2950 if (ret) {
2951 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2952 goto fail_tree_roots;
2953 }
2954
2955 /*
2956 * keep the device that is marked to be the target device for the
2957 * dev_replace procedure
2958 */
2959 btrfs_close_extra_devices(fs_devices, 0);
2960
2961 if (!fs_devices->latest_bdev) {
2962 btrfs_err(fs_info, "failed to read devices");
2963 goto fail_tree_roots;
2964 }
2965
2966 retry_root_backup:
2967 generation = btrfs_super_generation(disk_super);
2968
2969 tree_root->node = read_tree_block(fs_info,
2970 btrfs_super_root(disk_super),
2971 generation);
2972 if (IS_ERR(tree_root->node) ||
2973 !extent_buffer_uptodate(tree_root->node)) {
2974 btrfs_warn(fs_info, "failed to read tree root");
2975 if (!IS_ERR(tree_root->node))
2976 free_extent_buffer(tree_root->node);
2977 tree_root->node = NULL;
2978 goto recovery_tree_root;
2979 }
2980
2981 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2982 tree_root->commit_root = btrfs_root_node(tree_root);
2983 btrfs_set_root_refs(&tree_root->root_item, 1);
2984
2985 mutex_lock(&tree_root->objectid_mutex);
2986 ret = btrfs_find_highest_objectid(tree_root,
2987 &tree_root->highest_objectid);
2988 if (ret) {
2989 mutex_unlock(&tree_root->objectid_mutex);
2990 goto recovery_tree_root;
2991 }
2992
2993 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2994
2995 mutex_unlock(&tree_root->objectid_mutex);
2996
2997 ret = btrfs_read_roots(fs_info);
2998 if (ret)
2999 goto recovery_tree_root;
3000
3001 fs_info->generation = generation;
3002 fs_info->last_trans_committed = generation;
3003
3004 ret = btrfs_recover_balance(fs_info);
3005 if (ret) {
3006 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3007 goto fail_block_groups;
3008 }
3009
3010 ret = btrfs_init_dev_stats(fs_info);
3011 if (ret) {
3012 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3013 goto fail_block_groups;
3014 }
3015
3016 ret = btrfs_init_dev_replace(fs_info);
3017 if (ret) {
3018 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3019 goto fail_block_groups;
3020 }
3021
3022 btrfs_close_extra_devices(fs_devices, 1);
3023
3024 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3025 if (ret) {
3026 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3027 ret);
3028 goto fail_block_groups;
3029 }
3030
3031 ret = btrfs_sysfs_add_device(fs_devices);
3032 if (ret) {
3033 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3034 ret);
3035 goto fail_fsdev_sysfs;
3036 }
3037
3038 ret = btrfs_sysfs_add_mounted(fs_info);
3039 if (ret) {
3040 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3041 goto fail_fsdev_sysfs;
3042 }
3043
3044 ret = btrfs_init_space_info(fs_info);
3045 if (ret) {
3046 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3047 goto fail_sysfs;
3048 }
3049
3050 ret = btrfs_read_block_groups(fs_info);
3051 if (ret) {
3052 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3053 goto fail_sysfs;
3054 }
3055 fs_info->num_tolerated_disk_barrier_failures =
3056 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3057 if (fs_info->fs_devices->missing_devices >
3058 fs_info->num_tolerated_disk_barrier_failures &&
3059 !(sb->s_flags & MS_RDONLY)) {
3060 btrfs_warn(fs_info,
3061 "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
3062 fs_info->fs_devices->missing_devices,
3063 fs_info->num_tolerated_disk_barrier_failures);
3064 goto fail_sysfs;
3065 }
3066
3067 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3068 "btrfs-cleaner");
3069 if (IS_ERR(fs_info->cleaner_kthread))
3070 goto fail_sysfs;
3071
3072 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3073 tree_root,
3074 "btrfs-transaction");
3075 if (IS_ERR(fs_info->transaction_kthread))
3076 goto fail_cleaner;
3077
3078 if (!btrfs_test_opt(fs_info, SSD) &&
3079 !btrfs_test_opt(fs_info, NOSSD) &&
3080 !fs_info->fs_devices->rotating) {
3081 btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
3082 btrfs_set_opt(fs_info->mount_opt, SSD);
3083 }
3084
3085 /*
3086 * Mount does not set all options immediately, we can do it now and do
3087 * not have to wait for transaction commit
3088 */
3089 btrfs_apply_pending_changes(fs_info);
3090
3091 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3092 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3093 ret = btrfsic_mount(fs_info, fs_devices,
3094 btrfs_test_opt(fs_info,
3095 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3096 1 : 0,
3097 fs_info->check_integrity_print_mask);
3098 if (ret)
3099 btrfs_warn(fs_info,
3100 "failed to initialize integrity check module: %d",
3101 ret);
3102 }
3103 #endif
3104 ret = btrfs_read_qgroup_config(fs_info);
3105 if (ret)
3106 goto fail_trans_kthread;
3107
3108 /* do not make disk changes in broken FS or nologreplay is given */
3109 if (btrfs_super_log_root(disk_super) != 0 &&
3110 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3111 ret = btrfs_replay_log(fs_info, fs_devices);
3112 if (ret) {
3113 err = ret;
3114 goto fail_qgroup;
3115 }
3116 }
3117
3118 ret = btrfs_find_orphan_roots(fs_info);
3119 if (ret)
3120 goto fail_qgroup;
3121
3122 if (!(sb->s_flags & MS_RDONLY)) {
3123 ret = btrfs_cleanup_fs_roots(fs_info);
3124 if (ret)
3125 goto fail_qgroup;
3126
3127 mutex_lock(&fs_info->cleaner_mutex);
3128 ret = btrfs_recover_relocation(tree_root);
3129 mutex_unlock(&fs_info->cleaner_mutex);
3130 if (ret < 0) {
3131 btrfs_warn(fs_info, "failed to recover relocation: %d",
3132 ret);
3133 err = -EINVAL;
3134 goto fail_qgroup;
3135 }
3136 }
3137
3138 location.objectid = BTRFS_FS_TREE_OBJECTID;
3139 location.type = BTRFS_ROOT_ITEM_KEY;
3140 location.offset = 0;
3141
3142 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3143 if (IS_ERR(fs_info->fs_root)) {
3144 err = PTR_ERR(fs_info->fs_root);
3145 goto fail_qgroup;
3146 }
3147
3148 if (sb->s_flags & MS_RDONLY)
3149 return 0;
3150
3151 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3152 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3153 clear_free_space_tree = 1;
3154 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3155 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3156 btrfs_warn(fs_info, "free space tree is invalid");
3157 clear_free_space_tree = 1;
3158 }
3159
3160 if (clear_free_space_tree) {
3161 btrfs_info(fs_info, "clearing free space tree");
3162 ret = btrfs_clear_free_space_tree(fs_info);
3163 if (ret) {
3164 btrfs_warn(fs_info,
3165 "failed to clear free space tree: %d", ret);
3166 close_ctree(fs_info);
3167 return ret;
3168 }
3169 }
3170
3171 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3172 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3173 btrfs_info(fs_info, "creating free space tree");
3174 ret = btrfs_create_free_space_tree(fs_info);
3175 if (ret) {
3176 btrfs_warn(fs_info,
3177 "failed to create free space tree: %d", ret);
3178 close_ctree(fs_info);
3179 return ret;
3180 }
3181 }
3182
3183 down_read(&fs_info->cleanup_work_sem);
3184 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3185 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3186 up_read(&fs_info->cleanup_work_sem);
3187 close_ctree(fs_info);
3188 return ret;
3189 }
3190 up_read(&fs_info->cleanup_work_sem);
3191
3192 ret = btrfs_resume_balance_async(fs_info);
3193 if (ret) {
3194 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3195 close_ctree(fs_info);
3196 return ret;
3197 }
3198
3199 ret = btrfs_resume_dev_replace_async(fs_info);
3200 if (ret) {
3201 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3202 close_ctree(fs_info);
3203 return ret;
3204 }
3205
3206 btrfs_qgroup_rescan_resume(fs_info);
3207
3208 if (!fs_info->uuid_root) {
3209 btrfs_info(fs_info, "creating UUID tree");
3210 ret = btrfs_create_uuid_tree(fs_info);
3211 if (ret) {
3212 btrfs_warn(fs_info,
3213 "failed to create the UUID tree: %d", ret);
3214 close_ctree(fs_info);
3215 return ret;
3216 }
3217 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3218 fs_info->generation !=
3219 btrfs_super_uuid_tree_generation(disk_super)) {
3220 btrfs_info(fs_info, "checking UUID tree");
3221 ret = btrfs_check_uuid_tree(fs_info);
3222 if (ret) {
3223 btrfs_warn(fs_info,
3224 "failed to check the UUID tree: %d", ret);
3225 close_ctree(fs_info);
3226 return ret;
3227 }
3228 } else {
3229 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3230 }
3231 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3232
3233 /*
3234 * backuproot only affect mount behavior, and if open_ctree succeeded,
3235 * no need to keep the flag
3236 */
3237 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3238
3239 return 0;
3240
3241 fail_qgroup:
3242 btrfs_free_qgroup_config(fs_info);
3243 fail_trans_kthread:
3244 kthread_stop(fs_info->transaction_kthread);
3245 btrfs_cleanup_transaction(fs_info);
3246 btrfs_free_fs_roots(fs_info);
3247 fail_cleaner:
3248 kthread_stop(fs_info->cleaner_kthread);
3249
3250 /*
3251 * make sure we're done with the btree inode before we stop our
3252 * kthreads
3253 */
3254 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3255
3256 fail_sysfs:
3257 btrfs_sysfs_remove_mounted(fs_info);
3258
3259 fail_fsdev_sysfs:
3260 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3261
3262 fail_block_groups:
3263 btrfs_put_block_group_cache(fs_info);
3264 btrfs_free_block_groups(fs_info);
3265
3266 fail_tree_roots:
3267 free_root_pointers(fs_info, 1);
3268 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3269
3270 fail_sb_buffer:
3271 btrfs_stop_all_workers(fs_info);
3272 fail_alloc:
3273 fail_iput:
3274 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3275
3276 iput(fs_info->btree_inode);
3277 fail_bio_counter:
3278 percpu_counter_destroy(&fs_info->bio_counter);
3279 fail_delalloc_bytes:
3280 percpu_counter_destroy(&fs_info->delalloc_bytes);
3281 fail_dirty_metadata_bytes:
3282 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3283 fail_bdi:
3284 bdi_destroy(&fs_info->bdi);
3285 fail_srcu:
3286 cleanup_srcu_struct(&fs_info->subvol_srcu);
3287 fail:
3288 btrfs_free_stripe_hash_table(fs_info);
3289 btrfs_close_devices(fs_info->fs_devices);
3290 return err;
3291
3292 recovery_tree_root:
3293 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3294 goto fail_tree_roots;
3295
3296 free_root_pointers(fs_info, 0);
3297
3298 /* don't use the log in recovery mode, it won't be valid */
3299 btrfs_set_super_log_root(disk_super, 0);
3300
3301 /* we can't trust the free space cache either */
3302 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3303
3304 ret = next_root_backup(fs_info, fs_info->super_copy,
3305 &num_backups_tried, &backup_index);
3306 if (ret == -1)
3307 goto fail_block_groups;
3308 goto retry_root_backup;
3309 }
3310
3311 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3312 {
3313 if (uptodate) {
3314 set_buffer_uptodate(bh);
3315 } else {
3316 struct btrfs_device *device = (struct btrfs_device *)
3317 bh->b_private;
3318
3319 btrfs_warn_rl_in_rcu(device->fs_info,
3320 "lost page write due to IO error on %s",
3321 rcu_str_deref(device->name));
3322 /* note, we don't set_buffer_write_io_error because we have
3323 * our own ways of dealing with the IO errors
3324 */
3325 clear_buffer_uptodate(bh);
3326 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3327 }
3328 unlock_buffer(bh);
3329 put_bh(bh);
3330 }
3331
3332 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3333 struct buffer_head **bh_ret)
3334 {
3335 struct buffer_head *bh;
3336 struct btrfs_super_block *super;
3337 u64 bytenr;
3338
3339 bytenr = btrfs_sb_offset(copy_num);
3340 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3341 return -EINVAL;
3342
3343 bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
3344 /*
3345 * If we fail to read from the underlying devices, as of now
3346 * the best option we have is to mark it EIO.
3347 */
3348 if (!bh)
3349 return -EIO;
3350
3351 super = (struct btrfs_super_block *)bh->b_data;
3352 if (btrfs_super_bytenr(super) != bytenr ||
3353 btrfs_super_magic(super) != BTRFS_MAGIC) {
3354 brelse(bh);
3355 return -EINVAL;
3356 }
3357
3358 *bh_ret = bh;
3359 return 0;
3360 }
3361
3362
3363 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3364 {
3365 struct buffer_head *bh;
3366 struct buffer_head *latest = NULL;
3367 struct btrfs_super_block *super;
3368 int i;
3369 u64 transid = 0;
3370 int ret = -EINVAL;
3371
3372 /* we would like to check all the supers, but that would make
3373 * a btrfs mount succeed after a mkfs from a different FS.
3374 * So, we need to add a special mount option to scan for
3375 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3376 */
3377 for (i = 0; i < 1; i++) {
3378 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3379 if (ret)
3380 continue;
3381
3382 super = (struct btrfs_super_block *)bh->b_data;
3383
3384 if (!latest || btrfs_super_generation(super) > transid) {
3385 brelse(latest);
3386 latest = bh;
3387 transid = btrfs_super_generation(super);
3388 } else {
3389 brelse(bh);
3390 }
3391 }
3392
3393 if (!latest)
3394 return ERR_PTR(ret);
3395
3396 return latest;
3397 }
3398
3399 /*
3400 * this should be called twice, once with wait == 0 and
3401 * once with wait == 1. When wait == 0 is done, all the buffer heads
3402 * we write are pinned.
3403 *
3404 * They are released when wait == 1 is done.
3405 * max_mirrors must be the same for both runs, and it indicates how
3406 * many supers on this one device should be written.
3407 *
3408 * max_mirrors == 0 means to write them all.
3409 */
3410 static int write_dev_supers(struct btrfs_device *device,
3411 struct btrfs_super_block *sb,
3412 int wait, int max_mirrors)
3413 {
3414 struct buffer_head *bh;
3415 int i;
3416 int ret;
3417 int errors = 0;
3418 u32 crc;
3419 u64 bytenr;
3420
3421 if (max_mirrors == 0)
3422 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3423
3424 for (i = 0; i < max_mirrors; i++) {
3425 bytenr = btrfs_sb_offset(i);
3426 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3427 device->commit_total_bytes)
3428 break;
3429
3430 if (wait) {
3431 bh = __find_get_block(device->bdev, bytenr / 4096,
3432 BTRFS_SUPER_INFO_SIZE);
3433 if (!bh) {
3434 errors++;
3435 continue;
3436 }
3437 wait_on_buffer(bh);
3438 if (!buffer_uptodate(bh))
3439 errors++;
3440
3441 /* drop our reference */
3442 brelse(bh);
3443
3444 /* drop the reference from the wait == 0 run */
3445 brelse(bh);
3446 continue;
3447 } else {
3448 btrfs_set_super_bytenr(sb, bytenr);
3449
3450 crc = ~(u32)0;
3451 crc = btrfs_csum_data((char *)sb +
3452 BTRFS_CSUM_SIZE, crc,
3453 BTRFS_SUPER_INFO_SIZE -
3454 BTRFS_CSUM_SIZE);
3455 btrfs_csum_final(crc, sb->csum);
3456
3457 /*
3458 * one reference for us, and we leave it for the
3459 * caller
3460 */
3461 bh = __getblk(device->bdev, bytenr / 4096,
3462 BTRFS_SUPER_INFO_SIZE);
3463 if (!bh) {
3464 btrfs_err(device->fs_info,
3465 "couldn't get super buffer head for bytenr %llu",
3466 bytenr);
3467 errors++;
3468 continue;
3469 }
3470
3471 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3472
3473 /* one reference for submit_bh */
3474 get_bh(bh);
3475
3476 set_buffer_uptodate(bh);
3477 lock_buffer(bh);
3478 bh->b_end_io = btrfs_end_buffer_write_sync;
3479 bh->b_private = device;
3480 }
3481
3482 /*
3483 * we fua the first super. The others we allow
3484 * to go down lazy.
3485 */
3486 if (i == 0)
3487 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
3488 else
3489 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3490 if (ret)
3491 errors++;
3492 }
3493 return errors < i ? 0 : -1;
3494 }
3495
3496 /*
3497 * endio for the write_dev_flush, this will wake anyone waiting
3498 * for the barrier when it is done
3499 */
3500 static void btrfs_end_empty_barrier(struct bio *bio)
3501 {
3502 if (bio->bi_private)
3503 complete(bio->bi_private);
3504 bio_put(bio);
3505 }
3506
3507 /*
3508 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
3509 * sent down. With wait == 1, it waits for the previous flush.
3510 *
3511 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3512 * capable
3513 */
3514 static int write_dev_flush(struct btrfs_device *device, int wait)
3515 {
3516 struct bio *bio;
3517 int ret = 0;
3518
3519 if (device->nobarriers)
3520 return 0;
3521
3522 if (wait) {
3523 bio = device->flush_bio;
3524 if (!bio)
3525 return 0;
3526
3527 wait_for_completion(&device->flush_wait);
3528
3529 if (bio->bi_error) {
3530 ret = bio->bi_error;
3531 btrfs_dev_stat_inc_and_print(device,
3532 BTRFS_DEV_STAT_FLUSH_ERRS);
3533 }
3534
3535 /* drop the reference from the wait == 0 run */
3536 bio_put(bio);
3537 device->flush_bio = NULL;
3538
3539 return ret;
3540 }
3541
3542 /*
3543 * one reference for us, and we leave it for the
3544 * caller
3545 */
3546 device->flush_bio = NULL;
3547 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3548 if (!bio)
3549 return -ENOMEM;
3550
3551 bio->bi_end_io = btrfs_end_empty_barrier;
3552 bio->bi_bdev = device->bdev;
3553 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
3554 init_completion(&device->flush_wait);
3555 bio->bi_private = &device->flush_wait;
3556 device->flush_bio = bio;
3557
3558 bio_get(bio);
3559 btrfsic_submit_bio(bio);
3560
3561 return 0;
3562 }
3563
3564 /*
3565 * send an empty flush down to each device in parallel,
3566 * then wait for them
3567 */
3568 static int barrier_all_devices(struct btrfs_fs_info *info)
3569 {
3570 struct list_head *head;
3571 struct btrfs_device *dev;
3572 int errors_send = 0;
3573 int errors_wait = 0;
3574 int ret;
3575
3576 /* send down all the barriers */
3577 head = &info->fs_devices->devices;
3578 list_for_each_entry_rcu(dev, head, dev_list) {
3579 if (dev->missing)
3580 continue;
3581 if (!dev->bdev) {
3582 errors_send++;
3583 continue;
3584 }
3585 if (!dev->in_fs_metadata || !dev->writeable)
3586 continue;
3587
3588 ret = write_dev_flush(dev, 0);
3589 if (ret)
3590 errors_send++;
3591 }
3592
3593 /* wait for all the barriers */
3594 list_for_each_entry_rcu(dev, head, dev_list) {
3595 if (dev->missing)
3596 continue;
3597 if (!dev->bdev) {
3598 errors_wait++;
3599 continue;
3600 }
3601 if (!dev->in_fs_metadata || !dev->writeable)
3602 continue;
3603
3604 ret = write_dev_flush(dev, 1);
3605 if (ret)
3606 errors_wait++;
3607 }
3608 if (errors_send > info->num_tolerated_disk_barrier_failures ||
3609 errors_wait > info->num_tolerated_disk_barrier_failures)
3610 return -EIO;
3611 return 0;
3612 }
3613
3614 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3615 {
3616 int raid_type;
3617 int min_tolerated = INT_MAX;
3618
3619 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3620 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3621 min_tolerated = min(min_tolerated,
3622 btrfs_raid_array[BTRFS_RAID_SINGLE].
3623 tolerated_failures);
3624
3625 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3626 if (raid_type == BTRFS_RAID_SINGLE)
3627 continue;
3628 if (!(flags & btrfs_raid_group[raid_type]))
3629 continue;
3630 min_tolerated = min(min_tolerated,
3631 btrfs_raid_array[raid_type].
3632 tolerated_failures);
3633 }
3634
3635 if (min_tolerated == INT_MAX) {
3636 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3637 min_tolerated = 0;
3638 }
3639
3640 return min_tolerated;
3641 }
3642
3643 int btrfs_calc_num_tolerated_disk_barrier_failures(
3644 struct btrfs_fs_info *fs_info)
3645 {
3646 struct btrfs_ioctl_space_info space;
3647 struct btrfs_space_info *sinfo;
3648 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3649 BTRFS_BLOCK_GROUP_SYSTEM,
3650 BTRFS_BLOCK_GROUP_METADATA,
3651 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3652 int i;
3653 int c;
3654 int num_tolerated_disk_barrier_failures =
3655 (int)fs_info->fs_devices->num_devices;
3656
3657 for (i = 0; i < ARRAY_SIZE(types); i++) {
3658 struct btrfs_space_info *tmp;
3659
3660 sinfo = NULL;
3661 rcu_read_lock();
3662 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3663 if (tmp->flags == types[i]) {
3664 sinfo = tmp;
3665 break;
3666 }
3667 }
3668 rcu_read_unlock();
3669
3670 if (!sinfo)
3671 continue;
3672
3673 down_read(&sinfo->groups_sem);
3674 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3675 u64 flags;
3676
3677 if (list_empty(&sinfo->block_groups[c]))
3678 continue;
3679
3680 btrfs_get_block_group_info(&sinfo->block_groups[c],
3681 &space);
3682 if (space.total_bytes == 0 || space.used_bytes == 0)
3683 continue;
3684 flags = space.flags;
3685
3686 num_tolerated_disk_barrier_failures = min(
3687 num_tolerated_disk_barrier_failures,
3688 btrfs_get_num_tolerated_disk_barrier_failures(
3689 flags));
3690 }
3691 up_read(&sinfo->groups_sem);
3692 }
3693
3694 return num_tolerated_disk_barrier_failures;
3695 }
3696
3697 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3698 {
3699 struct list_head *head;
3700 struct btrfs_device *dev;
3701 struct btrfs_super_block *sb;
3702 struct btrfs_dev_item *dev_item;
3703 int ret;
3704 int do_barriers;
3705 int max_errors;
3706 int total_errors = 0;
3707 u64 flags;
3708
3709 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3710 backup_super_roots(fs_info);
3711
3712 sb = fs_info->super_for_commit;
3713 dev_item = &sb->dev_item;
3714
3715 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3716 head = &fs_info->fs_devices->devices;
3717 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3718
3719 if (do_barriers) {
3720 ret = barrier_all_devices(fs_info);
3721 if (ret) {
3722 mutex_unlock(
3723 &fs_info->fs_devices->device_list_mutex);
3724 btrfs_handle_fs_error(fs_info, ret,
3725 "errors while submitting device barriers.");
3726 return ret;
3727 }
3728 }
3729
3730 list_for_each_entry_rcu(dev, head, dev_list) {
3731 if (!dev->bdev) {
3732 total_errors++;
3733 continue;
3734 }
3735 if (!dev->in_fs_metadata || !dev->writeable)
3736 continue;
3737
3738 btrfs_set_stack_device_generation(dev_item, 0);
3739 btrfs_set_stack_device_type(dev_item, dev->type);
3740 btrfs_set_stack_device_id(dev_item, dev->devid);
3741 btrfs_set_stack_device_total_bytes(dev_item,
3742 dev->commit_total_bytes);
3743 btrfs_set_stack_device_bytes_used(dev_item,
3744 dev->commit_bytes_used);
3745 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3746 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3747 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3748 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3749 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3750
3751 flags = btrfs_super_flags(sb);
3752 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3753
3754 ret = write_dev_supers(dev, sb, 0, max_mirrors);
3755 if (ret)
3756 total_errors++;
3757 }
3758 if (total_errors > max_errors) {
3759 btrfs_err(fs_info, "%d errors while writing supers",
3760 total_errors);
3761 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3762
3763 /* FUA is masked off if unsupported and can't be the reason */
3764 btrfs_handle_fs_error(fs_info, -EIO,
3765 "%d errors while writing supers",
3766 total_errors);
3767 return -EIO;
3768 }
3769
3770 total_errors = 0;
3771 list_for_each_entry_rcu(dev, head, dev_list) {
3772 if (!dev->bdev)
3773 continue;
3774 if (!dev->in_fs_metadata || !dev->writeable)
3775 continue;
3776
3777 ret = write_dev_supers(dev, sb, 1, max_mirrors);
3778 if (ret)
3779 total_errors++;
3780 }
3781 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3782 if (total_errors > max_errors) {
3783 btrfs_handle_fs_error(fs_info, -EIO,
3784 "%d errors while writing supers",
3785 total_errors);
3786 return -EIO;
3787 }
3788 return 0;
3789 }
3790
3791 /* Drop a fs root from the radix tree and free it. */
3792 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3793 struct btrfs_root *root)
3794 {
3795 spin_lock(&fs_info->fs_roots_radix_lock);
3796 radix_tree_delete(&fs_info->fs_roots_radix,
3797 (unsigned long)root->root_key.objectid);
3798 spin_unlock(&fs_info->fs_roots_radix_lock);
3799
3800 if (btrfs_root_refs(&root->root_item) == 0)
3801 synchronize_srcu(&fs_info->subvol_srcu);
3802
3803 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3804 btrfs_free_log(NULL, root);
3805 if (root->reloc_root) {
3806 free_extent_buffer(root->reloc_root->node);
3807 free_extent_buffer(root->reloc_root->commit_root);
3808 btrfs_put_fs_root(root->reloc_root);
3809 root->reloc_root = NULL;
3810 }
3811 }
3812
3813 if (root->free_ino_pinned)
3814 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3815 if (root->free_ino_ctl)
3816 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3817 free_fs_root(root);
3818 }
3819
3820 static void free_fs_root(struct btrfs_root *root)
3821 {
3822 iput(root->ino_cache_inode);
3823 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3824 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3825 root->orphan_block_rsv = NULL;
3826 if (root->anon_dev)
3827 free_anon_bdev(root->anon_dev);
3828 if (root->subv_writers)
3829 btrfs_free_subvolume_writers(root->subv_writers);
3830 free_extent_buffer(root->node);
3831 free_extent_buffer(root->commit_root);
3832 kfree(root->free_ino_ctl);
3833 kfree(root->free_ino_pinned);
3834 kfree(root->name);
3835 btrfs_put_fs_root(root);
3836 }
3837
3838 void btrfs_free_fs_root(struct btrfs_root *root)
3839 {
3840 free_fs_root(root);
3841 }
3842
3843 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3844 {
3845 u64 root_objectid = 0;
3846 struct btrfs_root *gang[8];
3847 int i = 0;
3848 int err = 0;
3849 unsigned int ret = 0;
3850 int index;
3851
3852 while (1) {
3853 index = srcu_read_lock(&fs_info->subvol_srcu);
3854 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3855 (void **)gang, root_objectid,
3856 ARRAY_SIZE(gang));
3857 if (!ret) {
3858 srcu_read_unlock(&fs_info->subvol_srcu, index);
3859 break;
3860 }
3861 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3862
3863 for (i = 0; i < ret; i++) {
3864 /* Avoid to grab roots in dead_roots */
3865 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3866 gang[i] = NULL;
3867 continue;
3868 }
3869 /* grab all the search result for later use */
3870 gang[i] = btrfs_grab_fs_root(gang[i]);
3871 }
3872 srcu_read_unlock(&fs_info->subvol_srcu, index);
3873
3874 for (i = 0; i < ret; i++) {
3875 if (!gang[i])
3876 continue;
3877 root_objectid = gang[i]->root_key.objectid;
3878 err = btrfs_orphan_cleanup(gang[i]);
3879 if (err)
3880 break;
3881 btrfs_put_fs_root(gang[i]);
3882 }
3883 root_objectid++;
3884 }
3885
3886 /* release the uncleaned roots due to error */
3887 for (; i < ret; i++) {
3888 if (gang[i])
3889 btrfs_put_fs_root(gang[i]);
3890 }
3891 return err;
3892 }
3893
3894 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3895 {
3896 struct btrfs_root *root = fs_info->tree_root;
3897 struct btrfs_trans_handle *trans;
3898
3899 mutex_lock(&fs_info->cleaner_mutex);
3900 btrfs_run_delayed_iputs(fs_info);
3901 mutex_unlock(&fs_info->cleaner_mutex);
3902 wake_up_process(fs_info->cleaner_kthread);
3903
3904 /* wait until ongoing cleanup work done */
3905 down_write(&fs_info->cleanup_work_sem);
3906 up_write(&fs_info->cleanup_work_sem);
3907
3908 trans = btrfs_join_transaction(root);
3909 if (IS_ERR(trans))
3910 return PTR_ERR(trans);
3911 return btrfs_commit_transaction(trans);
3912 }
3913
3914 void close_ctree(struct btrfs_fs_info *fs_info)
3915 {
3916 struct btrfs_root *root = fs_info->tree_root;
3917 int ret;
3918
3919 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3920
3921 /* wait for the qgroup rescan worker to stop */
3922 btrfs_qgroup_wait_for_completion(fs_info, false);
3923
3924 /* wait for the uuid_scan task to finish */
3925 down(&fs_info->uuid_tree_rescan_sem);
3926 /* avoid complains from lockdep et al., set sem back to initial state */
3927 up(&fs_info->uuid_tree_rescan_sem);
3928
3929 /* pause restriper - we want to resume on mount */
3930 btrfs_pause_balance(fs_info);
3931
3932 btrfs_dev_replace_suspend_for_unmount(fs_info);
3933
3934 btrfs_scrub_cancel(fs_info);
3935
3936 /* wait for any defraggers to finish */
3937 wait_event(fs_info->transaction_wait,
3938 (atomic_read(&fs_info->defrag_running) == 0));
3939
3940 /* clear out the rbtree of defraggable inodes */
3941 btrfs_cleanup_defrag_inodes(fs_info);
3942
3943 cancel_work_sync(&fs_info->async_reclaim_work);
3944
3945 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3946 /*
3947 * If the cleaner thread is stopped and there are
3948 * block groups queued for removal, the deletion will be
3949 * skipped when we quit the cleaner thread.
3950 */
3951 btrfs_delete_unused_bgs(fs_info);
3952
3953 ret = btrfs_commit_super(fs_info);
3954 if (ret)
3955 btrfs_err(fs_info, "commit super ret %d", ret);
3956 }
3957
3958 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3959 btrfs_error_commit_super(fs_info);
3960
3961 kthread_stop(fs_info->transaction_kthread);
3962 kthread_stop(fs_info->cleaner_kthread);
3963
3964 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3965
3966 btrfs_free_qgroup_config(fs_info);
3967
3968 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3969 btrfs_info(fs_info, "at unmount delalloc count %lld",
3970 percpu_counter_sum(&fs_info->delalloc_bytes));
3971 }
3972
3973 btrfs_sysfs_remove_mounted(fs_info);
3974 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3975
3976 btrfs_free_fs_roots(fs_info);
3977
3978 btrfs_put_block_group_cache(fs_info);
3979
3980 btrfs_free_block_groups(fs_info);
3981
3982 /*
3983 * we must make sure there is not any read request to
3984 * submit after we stopping all workers.
3985 */
3986 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3987 btrfs_stop_all_workers(fs_info);
3988
3989 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3990 free_root_pointers(fs_info, 1);
3991
3992 iput(fs_info->btree_inode);
3993
3994 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3995 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3996 btrfsic_unmount(fs_info->fs_devices);
3997 #endif
3998
3999 btrfs_close_devices(fs_info->fs_devices);
4000 btrfs_mapping_tree_free(&fs_info->mapping_tree);
4001
4002 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4003 percpu_counter_destroy(&fs_info->delalloc_bytes);
4004 percpu_counter_destroy(&fs_info->bio_counter);
4005 bdi_destroy(&fs_info->bdi);
4006 cleanup_srcu_struct(&fs_info->subvol_srcu);
4007
4008 btrfs_free_stripe_hash_table(fs_info);
4009
4010 __btrfs_free_block_rsv(root->orphan_block_rsv);
4011 root->orphan_block_rsv = NULL;
4012
4013 mutex_lock(&fs_info->chunk_mutex);
4014 while (!list_empty(&fs_info->pinned_chunks)) {
4015 struct extent_map *em;
4016
4017 em = list_first_entry(&fs_info->pinned_chunks,
4018 struct extent_map, list);
4019 list_del_init(&em->list);
4020 free_extent_map(em);
4021 }
4022 mutex_unlock(&fs_info->chunk_mutex);
4023 }
4024
4025 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4026 int atomic)
4027 {
4028 int ret;
4029 struct inode *btree_inode = buf->pages[0]->mapping->host;
4030
4031 ret = extent_buffer_uptodate(buf);
4032 if (!ret)
4033 return ret;
4034
4035 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4036 parent_transid, atomic);
4037 if (ret == -EAGAIN)
4038 return ret;
4039 return !ret;
4040 }
4041
4042 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4043 {
4044 struct btrfs_fs_info *fs_info;
4045 struct btrfs_root *root;
4046 u64 transid = btrfs_header_generation(buf);
4047 int was_dirty;
4048
4049 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4050 /*
4051 * This is a fast path so only do this check if we have sanity tests
4052 * enabled. Normal people shouldn't be marking dummy buffers as dirty
4053 * outside of the sanity tests.
4054 */
4055 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
4056 return;
4057 #endif
4058 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4059 fs_info = root->fs_info;
4060 btrfs_assert_tree_locked(buf);
4061 if (transid != fs_info->generation)
4062 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4063 buf->start, transid, fs_info->generation);
4064 was_dirty = set_extent_buffer_dirty(buf);
4065 if (!was_dirty)
4066 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
4067 buf->len,
4068 fs_info->dirty_metadata_batch);
4069 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4070 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4071 btrfs_print_leaf(fs_info, buf);
4072 ASSERT(0);
4073 }
4074 #endif
4075 }
4076
4077 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4078 int flush_delayed)
4079 {
4080 /*
4081 * looks as though older kernels can get into trouble with
4082 * this code, they end up stuck in balance_dirty_pages forever
4083 */
4084 int ret;
4085
4086 if (current->flags & PF_MEMALLOC)
4087 return;
4088
4089 if (flush_delayed)
4090 btrfs_balance_delayed_items(fs_info);
4091
4092 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4093 BTRFS_DIRTY_METADATA_THRESH);
4094 if (ret > 0) {
4095 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4096 }
4097 }
4098
4099 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4100 {
4101 __btrfs_btree_balance_dirty(fs_info, 1);
4102 }
4103
4104 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4105 {
4106 __btrfs_btree_balance_dirty(fs_info, 0);
4107 }
4108
4109 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4110 {
4111 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4112 struct btrfs_fs_info *fs_info = root->fs_info;
4113
4114 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
4115 }
4116
4117 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
4118 {
4119 struct btrfs_super_block *sb = fs_info->super_copy;
4120 u64 nodesize = btrfs_super_nodesize(sb);
4121 u64 sectorsize = btrfs_super_sectorsize(sb);
4122 int ret = 0;
4123
4124 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4125 btrfs_err(fs_info, "no valid FS found");
4126 ret = -EINVAL;
4127 }
4128 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4129 btrfs_warn(fs_info, "unrecognized super flag: %llu",
4130 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4131 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4132 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
4133 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4134 ret = -EINVAL;
4135 }
4136 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4137 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4138 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4139 ret = -EINVAL;
4140 }
4141 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4142 btrfs_err(fs_info, "log_root level too big: %d >= %d",
4143 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4144 ret = -EINVAL;
4145 }
4146
4147 /*
4148 * Check sectorsize and nodesize first, other check will need it.
4149 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4150 */
4151 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4152 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4153 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4154 ret = -EINVAL;
4155 }
4156 /* Only PAGE SIZE is supported yet */
4157 if (sectorsize != PAGE_SIZE) {
4158 btrfs_err(fs_info,
4159 "sectorsize %llu not supported yet, only support %lu",
4160 sectorsize, PAGE_SIZE);
4161 ret = -EINVAL;
4162 }
4163 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4164 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4165 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4166 ret = -EINVAL;
4167 }
4168 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4169 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4170 le32_to_cpu(sb->__unused_leafsize), nodesize);
4171 ret = -EINVAL;
4172 }
4173
4174 /* Root alignment check */
4175 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4176 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4177 btrfs_super_root(sb));
4178 ret = -EINVAL;
4179 }
4180 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4181 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4182 btrfs_super_chunk_root(sb));
4183 ret = -EINVAL;
4184 }
4185 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4186 btrfs_warn(fs_info, "log_root block unaligned: %llu",
4187 btrfs_super_log_root(sb));
4188 ret = -EINVAL;
4189 }
4190
4191 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
4192 btrfs_err(fs_info,
4193 "dev_item UUID does not match fsid: %pU != %pU",
4194 fs_info->fsid, sb->dev_item.fsid);
4195 ret = -EINVAL;
4196 }
4197
4198 /*
4199 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4200 * done later
4201 */
4202 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4203 btrfs_err(fs_info, "bytes_used is too small %llu",
4204 btrfs_super_bytes_used(sb));
4205 ret = -EINVAL;
4206 }
4207 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4208 btrfs_err(fs_info, "invalid stripesize %u",
4209 btrfs_super_stripesize(sb));
4210 ret = -EINVAL;
4211 }
4212 if (btrfs_super_num_devices(sb) > (1UL << 31))
4213 btrfs_warn(fs_info, "suspicious number of devices: %llu",
4214 btrfs_super_num_devices(sb));
4215 if (btrfs_super_num_devices(sb) == 0) {
4216 btrfs_err(fs_info, "number of devices is 0");
4217 ret = -EINVAL;
4218 }
4219
4220 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4221 btrfs_err(fs_info, "super offset mismatch %llu != %u",
4222 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4223 ret = -EINVAL;
4224 }
4225
4226 /*
4227 * Obvious sys_chunk_array corruptions, it must hold at least one key
4228 * and one chunk
4229 */
4230 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4231 btrfs_err(fs_info, "system chunk array too big %u > %u",
4232 btrfs_super_sys_array_size(sb),
4233 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4234 ret = -EINVAL;
4235 }
4236 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4237 + sizeof(struct btrfs_chunk)) {
4238 btrfs_err(fs_info, "system chunk array too small %u < %zu",
4239 btrfs_super_sys_array_size(sb),
4240 sizeof(struct btrfs_disk_key)
4241 + sizeof(struct btrfs_chunk));
4242 ret = -EINVAL;
4243 }
4244
4245 /*
4246 * The generation is a global counter, we'll trust it more than the others
4247 * but it's still possible that it's the one that's wrong.
4248 */
4249 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4250 btrfs_warn(fs_info,
4251 "suspicious: generation < chunk_root_generation: %llu < %llu",
4252 btrfs_super_generation(sb),
4253 btrfs_super_chunk_root_generation(sb));
4254 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4255 && btrfs_super_cache_generation(sb) != (u64)-1)
4256 btrfs_warn(fs_info,
4257 "suspicious: generation < cache_generation: %llu < %llu",
4258 btrfs_super_generation(sb),
4259 btrfs_super_cache_generation(sb));
4260
4261 return ret;
4262 }
4263
4264 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4265 {
4266 mutex_lock(&fs_info->cleaner_mutex);
4267 btrfs_run_delayed_iputs(fs_info);
4268 mutex_unlock(&fs_info->cleaner_mutex);
4269
4270 down_write(&fs_info->cleanup_work_sem);
4271 up_write(&fs_info->cleanup_work_sem);
4272
4273 /* cleanup FS via transaction */
4274 btrfs_cleanup_transaction(fs_info);
4275 }
4276
4277 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4278 {
4279 struct btrfs_ordered_extent *ordered;
4280
4281 spin_lock(&root->ordered_extent_lock);
4282 /*
4283 * This will just short circuit the ordered completion stuff which will
4284 * make sure the ordered extent gets properly cleaned up.
4285 */
4286 list_for_each_entry(ordered, &root->ordered_extents,
4287 root_extent_list)
4288 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4289 spin_unlock(&root->ordered_extent_lock);
4290 }
4291
4292 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4293 {
4294 struct btrfs_root *root;
4295 struct list_head splice;
4296
4297 INIT_LIST_HEAD(&splice);
4298
4299 spin_lock(&fs_info->ordered_root_lock);
4300 list_splice_init(&fs_info->ordered_roots, &splice);
4301 while (!list_empty(&splice)) {
4302 root = list_first_entry(&splice, struct btrfs_root,
4303 ordered_root);
4304 list_move_tail(&root->ordered_root,
4305 &fs_info->ordered_roots);
4306
4307 spin_unlock(&fs_info->ordered_root_lock);
4308 btrfs_destroy_ordered_extents(root);
4309
4310 cond_resched();
4311 spin_lock(&fs_info->ordered_root_lock);
4312 }
4313 spin_unlock(&fs_info->ordered_root_lock);
4314 }
4315
4316 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4317 struct btrfs_fs_info *fs_info)
4318 {
4319 struct rb_node *node;
4320 struct btrfs_delayed_ref_root *delayed_refs;
4321 struct btrfs_delayed_ref_node *ref;
4322 int ret = 0;
4323
4324 delayed_refs = &trans->delayed_refs;
4325
4326 spin_lock(&delayed_refs->lock);
4327 if (atomic_read(&delayed_refs->num_entries) == 0) {
4328 spin_unlock(&delayed_refs->lock);
4329 btrfs_info(fs_info, "delayed_refs has NO entry");
4330 return ret;
4331 }
4332
4333 while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4334 struct btrfs_delayed_ref_head *head;
4335 struct btrfs_delayed_ref_node *tmp;
4336 bool pin_bytes = false;
4337
4338 head = rb_entry(node, struct btrfs_delayed_ref_head,
4339 href_node);
4340 if (!mutex_trylock(&head->mutex)) {
4341 atomic_inc(&head->node.refs);
4342 spin_unlock(&delayed_refs->lock);
4343
4344 mutex_lock(&head->mutex);
4345 mutex_unlock(&head->mutex);
4346 btrfs_put_delayed_ref(&head->node);
4347 spin_lock(&delayed_refs->lock);
4348 continue;
4349 }
4350 spin_lock(&head->lock);
4351 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4352 list) {
4353 ref->in_tree = 0;
4354 list_del(&ref->list);
4355 if (!list_empty(&ref->add_list))
4356 list_del(&ref->add_list);
4357 atomic_dec(&delayed_refs->num_entries);
4358 btrfs_put_delayed_ref(ref);
4359 }
4360 if (head->must_insert_reserved)
4361 pin_bytes = true;
4362 btrfs_free_delayed_extent_op(head->extent_op);
4363 delayed_refs->num_heads--;
4364 if (head->processing == 0)
4365 delayed_refs->num_heads_ready--;
4366 atomic_dec(&delayed_refs->num_entries);
4367 head->node.in_tree = 0;
4368 rb_erase(&head->href_node, &delayed_refs->href_root);
4369 spin_unlock(&head->lock);
4370 spin_unlock(&delayed_refs->lock);
4371 mutex_unlock(&head->mutex);
4372
4373 if (pin_bytes)
4374 btrfs_pin_extent(fs_info, head->node.bytenr,
4375 head->node.num_bytes, 1);
4376 btrfs_put_delayed_ref(&head->node);
4377 cond_resched();
4378 spin_lock(&delayed_refs->lock);
4379 }
4380
4381 spin_unlock(&delayed_refs->lock);
4382
4383 return ret;
4384 }
4385
4386 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4387 {
4388 struct btrfs_inode *btrfs_inode;
4389 struct list_head splice;
4390
4391 INIT_LIST_HEAD(&splice);
4392
4393 spin_lock(&root->delalloc_lock);
4394 list_splice_init(&root->delalloc_inodes, &splice);
4395
4396 while (!list_empty(&splice)) {
4397 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4398 delalloc_inodes);
4399
4400 list_del_init(&btrfs_inode->delalloc_inodes);
4401 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4402 &btrfs_inode->runtime_flags);
4403 spin_unlock(&root->delalloc_lock);
4404
4405 btrfs_invalidate_inodes(btrfs_inode->root);
4406
4407 spin_lock(&root->delalloc_lock);
4408 }
4409
4410 spin_unlock(&root->delalloc_lock);
4411 }
4412
4413 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4414 {
4415 struct btrfs_root *root;
4416 struct list_head splice;
4417
4418 INIT_LIST_HEAD(&splice);
4419
4420 spin_lock(&fs_info->delalloc_root_lock);
4421 list_splice_init(&fs_info->delalloc_roots, &splice);
4422 while (!list_empty(&splice)) {
4423 root = list_first_entry(&splice, struct btrfs_root,
4424 delalloc_root);
4425 list_del_init(&root->delalloc_root);
4426 root = btrfs_grab_fs_root(root);
4427 BUG_ON(!root);
4428 spin_unlock(&fs_info->delalloc_root_lock);
4429
4430 btrfs_destroy_delalloc_inodes(root);
4431 btrfs_put_fs_root(root);
4432
4433 spin_lock(&fs_info->delalloc_root_lock);
4434 }
4435 spin_unlock(&fs_info->delalloc_root_lock);
4436 }
4437
4438 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4439 struct extent_io_tree *dirty_pages,
4440 int mark)
4441 {
4442 int ret;
4443 struct extent_buffer *eb;
4444 u64 start = 0;
4445 u64 end;
4446
4447 while (1) {
4448 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4449 mark, NULL);
4450 if (ret)
4451 break;
4452
4453 clear_extent_bits(dirty_pages, start, end, mark);
4454 while (start <= end) {
4455 eb = find_extent_buffer(fs_info, start);
4456 start += fs_info->nodesize;
4457 if (!eb)
4458 continue;
4459 wait_on_extent_buffer_writeback(eb);
4460
4461 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4462 &eb->bflags))
4463 clear_extent_buffer_dirty(eb);
4464 free_extent_buffer_stale(eb);
4465 }
4466 }
4467
4468 return ret;
4469 }
4470
4471 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4472 struct extent_io_tree *pinned_extents)
4473 {
4474 struct extent_io_tree *unpin;
4475 u64 start;
4476 u64 end;
4477 int ret;
4478 bool loop = true;
4479
4480 unpin = pinned_extents;
4481 again:
4482 while (1) {
4483 ret = find_first_extent_bit(unpin, 0, &start, &end,
4484 EXTENT_DIRTY, NULL);
4485 if (ret)
4486 break;
4487
4488 clear_extent_dirty(unpin, start, end);
4489 btrfs_error_unpin_extent_range(fs_info, start, end);
4490 cond_resched();
4491 }
4492
4493 if (loop) {
4494 if (unpin == &fs_info->freed_extents[0])
4495 unpin = &fs_info->freed_extents[1];
4496 else
4497 unpin = &fs_info->freed_extents[0];
4498 loop = false;
4499 goto again;
4500 }
4501
4502 return 0;
4503 }
4504
4505 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4506 {
4507 struct inode *inode;
4508
4509 inode = cache->io_ctl.inode;
4510 if (inode) {
4511 invalidate_inode_pages2(inode->i_mapping);
4512 BTRFS_I(inode)->generation = 0;
4513 cache->io_ctl.inode = NULL;
4514 iput(inode);
4515 }
4516 btrfs_put_block_group(cache);
4517 }
4518
4519 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4520 struct btrfs_fs_info *fs_info)
4521 {
4522 struct btrfs_block_group_cache *cache;
4523
4524 spin_lock(&cur_trans->dirty_bgs_lock);
4525 while (!list_empty(&cur_trans->dirty_bgs)) {
4526 cache = list_first_entry(&cur_trans->dirty_bgs,
4527 struct btrfs_block_group_cache,
4528 dirty_list);
4529 if (!cache) {
4530 btrfs_err(fs_info, "orphan block group dirty_bgs list");
4531 spin_unlock(&cur_trans->dirty_bgs_lock);
4532 return;
4533 }
4534
4535 if (!list_empty(&cache->io_list)) {
4536 spin_unlock(&cur_trans->dirty_bgs_lock);
4537 list_del_init(&cache->io_list);
4538 btrfs_cleanup_bg_io(cache);
4539 spin_lock(&cur_trans->dirty_bgs_lock);
4540 }
4541
4542 list_del_init(&cache->dirty_list);
4543 spin_lock(&cache->lock);
4544 cache->disk_cache_state = BTRFS_DC_ERROR;
4545 spin_unlock(&cache->lock);
4546
4547 spin_unlock(&cur_trans->dirty_bgs_lock);
4548 btrfs_put_block_group(cache);
4549 spin_lock(&cur_trans->dirty_bgs_lock);
4550 }
4551 spin_unlock(&cur_trans->dirty_bgs_lock);
4552
4553 while (!list_empty(&cur_trans->io_bgs)) {
4554 cache = list_first_entry(&cur_trans->io_bgs,
4555 struct btrfs_block_group_cache,
4556 io_list);
4557 if (!cache) {
4558 btrfs_err(fs_info, "orphan block group on io_bgs list");
4559 return;
4560 }
4561
4562 list_del_init(&cache->io_list);
4563 spin_lock(&cache->lock);
4564 cache->disk_cache_state = BTRFS_DC_ERROR;
4565 spin_unlock(&cache->lock);
4566 btrfs_cleanup_bg_io(cache);
4567 }
4568 }
4569
4570 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4571 struct btrfs_fs_info *fs_info)
4572 {
4573 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4574 ASSERT(list_empty(&cur_trans->dirty_bgs));
4575 ASSERT(list_empty(&cur_trans->io_bgs));
4576
4577 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4578
4579 cur_trans->state = TRANS_STATE_COMMIT_START;
4580 wake_up(&fs_info->transaction_blocked_wait);
4581
4582 cur_trans->state = TRANS_STATE_UNBLOCKED;
4583 wake_up(&fs_info->transaction_wait);
4584
4585 btrfs_destroy_delayed_inodes(fs_info);
4586 btrfs_assert_delayed_root_empty(fs_info);
4587
4588 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4589 EXTENT_DIRTY);
4590 btrfs_destroy_pinned_extent(fs_info,
4591 fs_info->pinned_extents);
4592
4593 cur_trans->state =TRANS_STATE_COMPLETED;
4594 wake_up(&cur_trans->commit_wait);
4595
4596 /*
4597 memset(cur_trans, 0, sizeof(*cur_trans));
4598 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
4599 */
4600 }
4601
4602 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4603 {
4604 struct btrfs_transaction *t;
4605
4606 mutex_lock(&fs_info->transaction_kthread_mutex);
4607
4608 spin_lock(&fs_info->trans_lock);
4609 while (!list_empty(&fs_info->trans_list)) {
4610 t = list_first_entry(&fs_info->trans_list,
4611 struct btrfs_transaction, list);
4612 if (t->state >= TRANS_STATE_COMMIT_START) {
4613 atomic_inc(&t->use_count);
4614 spin_unlock(&fs_info->trans_lock);
4615 btrfs_wait_for_commit(fs_info, t->transid);
4616 btrfs_put_transaction(t);
4617 spin_lock(&fs_info->trans_lock);
4618 continue;
4619 }
4620 if (t == fs_info->running_transaction) {
4621 t->state = TRANS_STATE_COMMIT_DOING;
4622 spin_unlock(&fs_info->trans_lock);
4623 /*
4624 * We wait for 0 num_writers since we don't hold a trans
4625 * handle open currently for this transaction.
4626 */
4627 wait_event(t->writer_wait,
4628 atomic_read(&t->num_writers) == 0);
4629 } else {
4630 spin_unlock(&fs_info->trans_lock);
4631 }
4632 btrfs_cleanup_one_transaction(t, fs_info);
4633
4634 spin_lock(&fs_info->trans_lock);
4635 if (t == fs_info->running_transaction)
4636 fs_info->running_transaction = NULL;
4637 list_del_init(&t->list);
4638 spin_unlock(&fs_info->trans_lock);
4639
4640 btrfs_put_transaction(t);
4641 trace_btrfs_transaction_commit(fs_info->tree_root);
4642 spin_lock(&fs_info->trans_lock);
4643 }
4644 spin_unlock(&fs_info->trans_lock);
4645 btrfs_destroy_all_ordered_extents(fs_info);
4646 btrfs_destroy_delayed_inodes(fs_info);
4647 btrfs_assert_delayed_root_empty(fs_info);
4648 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4649 btrfs_destroy_all_delalloc_inodes(fs_info);
4650 mutex_unlock(&fs_info->transaction_kthread_mutex);
4651
4652 return 0;
4653 }
4654
4655 static const struct extent_io_ops btree_extent_io_ops = {
4656 .readpage_end_io_hook = btree_readpage_end_io_hook,
4657 .readpage_io_failed_hook = btree_io_failed_hook,
4658 .submit_bio_hook = btree_submit_bio_hook,
4659 /* note we're sharing with inode.c for the merge bio hook */
4660 .merge_bio_hook = btrfs_merge_bio_hook,
4661 };