]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/btrfs/scrub.c
btrfs: add mirror_num to extent_read_full_page
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / scrub.c
CommitLineData
a2de733c
AJ
1/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a2de733c 19#include <linux/blkdev.h>
558540c1 20#include <linux/ratelimit.h>
a2de733c
AJ
21#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
558540c1 25#include "backref.h"
a2de733c
AJ
26
27/*
28 * This is only the first step towards a full-features scrub. It reads all
29 * extent and super block and verifies the checksums. In case a bad checksum
30 * is found or the extent cannot be read, good data will be written back if
31 * any can be found.
32 *
33 * Future enhancements:
34 * - To enhance the performance, better read-ahead strategies for the
35 * extent-tree can be employed.
36 * - In case an unrepairable extent is encountered, track which files are
37 * affected and report them
38 * - In case of a read error on files with nodatasum, map the file and read
39 * the extent to trigger a writeback of the good copy
40 * - track and record media errors, throw out bad devices
a2de733c
AJ
41 * - add a mode to also read unallocated space
42 * - make the prefetch cancellable
43 */
44
45struct scrub_bio;
46struct scrub_page;
47struct scrub_dev;
a2de733c
AJ
48static void scrub_bio_end_io(struct bio *bio, int err);
49static void scrub_checksum(struct btrfs_work *work);
50static int scrub_checksum_data(struct scrub_dev *sdev,
51 struct scrub_page *spag, void *buffer);
52static int scrub_checksum_tree_block(struct scrub_dev *sdev,
53 struct scrub_page *spag, u64 logical,
54 void *buffer);
55static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
96e36920
ID
56static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
57static void scrub_fixup_end_io(struct bio *bio, int err);
58static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
59 struct page *page);
60static void scrub_fixup(struct scrub_bio *sbio, int ix);
a2de733c
AJ
61
62#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
63#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
64
65struct scrub_page {
66 u64 flags; /* extent flags */
67 u64 generation;
68 u64 mirror_num;
69 int have_csum;
70 u8 csum[BTRFS_CSUM_SIZE];
71};
72
73struct scrub_bio {
74 int index;
75 struct scrub_dev *sdev;
76 struct bio *bio;
77 int err;
78 u64 logical;
79 u64 physical;
80 struct scrub_page spag[SCRUB_PAGES_PER_BIO];
81 u64 count;
82 int next_free;
83 struct btrfs_work work;
84};
85
86struct scrub_dev {
87 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
88 struct btrfs_device *dev;
89 int first_free;
90 int curr;
91 atomic_t in_flight;
92 spinlock_t list_lock;
93 wait_queue_head_t list_wait;
94 u16 csum_size;
95 struct list_head csum_list;
96 atomic_t cancel_req;
8628764e 97 int readonly;
a2de733c
AJ
98 /*
99 * statistics
100 */
101 struct btrfs_scrub_progress stat;
102 spinlock_t stat_lock;
103};
104
558540c1
JS
105struct scrub_warning {
106 struct btrfs_path *path;
107 u64 extent_item_size;
108 char *scratch_buf;
109 char *msg_buf;
110 const char *errstr;
111 sector_t sector;
112 u64 logical;
113 struct btrfs_device *dev;
114 int msg_bufsize;
115 int scratch_bufsize;
116};
117
a2de733c
AJ
118static void scrub_free_csums(struct scrub_dev *sdev)
119{
120 while (!list_empty(&sdev->csum_list)) {
121 struct btrfs_ordered_sum *sum;
122 sum = list_first_entry(&sdev->csum_list,
123 struct btrfs_ordered_sum, list);
124 list_del(&sum->list);
125 kfree(sum);
126 }
127}
128
1bc87793
AJ
129static void scrub_free_bio(struct bio *bio)
130{
131 int i;
132 struct page *last_page = NULL;
133
134 if (!bio)
135 return;
136
137 for (i = 0; i < bio->bi_vcnt; ++i) {
138 if (bio->bi_io_vec[i].bv_page == last_page)
139 continue;
140 last_page = bio->bi_io_vec[i].bv_page;
141 __free_page(last_page);
142 }
143 bio_put(bio);
144}
145
a2de733c
AJ
146static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
147{
148 int i;
a2de733c
AJ
149
150 if (!sdev)
151 return;
152
153 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
154 struct scrub_bio *sbio = sdev->bios[i];
a2de733c
AJ
155
156 if (!sbio)
157 break;
158
1bc87793 159 scrub_free_bio(sbio->bio);
a2de733c
AJ
160 kfree(sbio);
161 }
162
163 scrub_free_csums(sdev);
164 kfree(sdev);
165}
166
167static noinline_for_stack
168struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
169{
170 struct scrub_dev *sdev;
171 int i;
a2de733c
AJ
172 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
173
174 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
175 if (!sdev)
176 goto nomem;
177 sdev->dev = dev;
178 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
a2de733c
AJ
179 struct scrub_bio *sbio;
180
181 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
182 if (!sbio)
183 goto nomem;
184 sdev->bios[i] = sbio;
185
a2de733c
AJ
186 sbio->index = i;
187 sbio->sdev = sdev;
a2de733c
AJ
188 sbio->count = 0;
189 sbio->work.func = scrub_checksum;
a2de733c
AJ
190
191 if (i != SCRUB_BIOS_PER_DEV-1)
192 sdev->bios[i]->next_free = i + 1;
193 else
194 sdev->bios[i]->next_free = -1;
195 }
196 sdev->first_free = 0;
197 sdev->curr = -1;
198 atomic_set(&sdev->in_flight, 0);
199 atomic_set(&sdev->cancel_req, 0);
200 sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
201 INIT_LIST_HEAD(&sdev->csum_list);
202
203 spin_lock_init(&sdev->list_lock);
204 spin_lock_init(&sdev->stat_lock);
205 init_waitqueue_head(&sdev->list_wait);
206 return sdev;
207
208nomem:
209 scrub_free_dev(sdev);
210 return ERR_PTR(-ENOMEM);
211}
212
558540c1
JS
213static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
214{
215 u64 isize;
216 u32 nlink;
217 int ret;
218 int i;
219 struct extent_buffer *eb;
220 struct btrfs_inode_item *inode_item;
221 struct scrub_warning *swarn = ctx;
222 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
223 struct inode_fs_paths *ipath = NULL;
224 struct btrfs_root *local_root;
225 struct btrfs_key root_key;
226
227 root_key.objectid = root;
228 root_key.type = BTRFS_ROOT_ITEM_KEY;
229 root_key.offset = (u64)-1;
230 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
231 if (IS_ERR(local_root)) {
232 ret = PTR_ERR(local_root);
233 goto err;
234 }
235
236 ret = inode_item_info(inum, 0, local_root, swarn->path);
237 if (ret) {
238 btrfs_release_path(swarn->path);
239 goto err;
240 }
241
242 eb = swarn->path->nodes[0];
243 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
244 struct btrfs_inode_item);
245 isize = btrfs_inode_size(eb, inode_item);
246 nlink = btrfs_inode_nlink(eb, inode_item);
247 btrfs_release_path(swarn->path);
248
249 ipath = init_ipath(4096, local_root, swarn->path);
250 ret = paths_from_inode(inum, ipath);
251
252 if (ret < 0)
253 goto err;
254
255 /*
256 * we deliberately ignore the bit ipath might have been too small to
257 * hold all of the paths here
258 */
259 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
260 printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
261 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
262 "length %llu, links %u (path: %s)\n", swarn->errstr,
263 swarn->logical, swarn->dev->name,
264 (unsigned long long)swarn->sector, root, inum, offset,
265 min(isize - offset, (u64)PAGE_SIZE), nlink,
266 ipath->fspath->str[i]);
267
268 free_ipath(ipath);
269 return 0;
270
271err:
272 printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
273 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
274 "resolving failed with ret=%d\n", swarn->errstr,
275 swarn->logical, swarn->dev->name,
276 (unsigned long long)swarn->sector, root, inum, offset, ret);
277
278 free_ipath(ipath);
279 return 0;
280}
281
282static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
283 int ix)
284{
285 struct btrfs_device *dev = sbio->sdev->dev;
286 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
287 struct btrfs_path *path;
288 struct btrfs_key found_key;
289 struct extent_buffer *eb;
290 struct btrfs_extent_item *ei;
291 struct scrub_warning swarn;
292 u32 item_size;
293 int ret;
294 u64 ref_root;
295 u8 ref_level;
296 unsigned long ptr = 0;
297 const int bufsize = 4096;
298 u64 extent_offset;
299
300 path = btrfs_alloc_path();
301
302 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
303 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
304 swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
305 swarn.logical = sbio->logical + ix * PAGE_SIZE;
306 swarn.errstr = errstr;
307 swarn.dev = dev;
308 swarn.msg_bufsize = bufsize;
309 swarn.scratch_bufsize = bufsize;
310
311 if (!path || !swarn.scratch_buf || !swarn.msg_buf)
312 goto out;
313
314 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
315 if (ret < 0)
316 goto out;
317
318 extent_offset = swarn.logical - found_key.objectid;
319 swarn.extent_item_size = found_key.offset;
320
321 eb = path->nodes[0];
322 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
323 item_size = btrfs_item_size_nr(eb, path->slots[0]);
324
325 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
326 do {
327 ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
328 &ref_root, &ref_level);
329 printk(KERN_WARNING "%s at logical %llu on dev %s, "
330 "sector %llu: metadata %s (level %d) in tree "
331 "%llu\n", errstr, swarn.logical, dev->name,
332 (unsigned long long)swarn.sector,
333 ref_level ? "node" : "leaf",
334 ret < 0 ? -1 : ref_level,
335 ret < 0 ? -1 : ref_root);
336 } while (ret != 1);
337 } else {
338 swarn.path = path;
339 iterate_extent_inodes(fs_info, path, found_key.objectid,
340 extent_offset,
341 scrub_print_warning_inode, &swarn);
342 }
343
344out:
345 btrfs_free_path(path);
346 kfree(swarn.scratch_buf);
347 kfree(swarn.msg_buf);
348}
349
a2de733c
AJ
350/*
351 * scrub_recheck_error gets called when either verification of the page
352 * failed or the bio failed to read, e.g. with EIO. In the latter case,
353 * recheck_error gets called for every page in the bio, even though only
354 * one may be bad
355 */
13db62b7 356static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
a2de733c 357{
13db62b7
JS
358 struct scrub_dev *sdev = sbio->sdev;
359 u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
558540c1
JS
360 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
361 DEFAULT_RATELIMIT_BURST);
13db62b7 362
96e36920 363 if (sbio->err) {
13db62b7 364 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
96e36920
ID
365 sbio->bio->bi_io_vec[ix].bv_page) == 0) {
366 if (scrub_fixup_check(sbio, ix) == 0)
13db62b7 367 return 0;
96e36920 368 }
558540c1
JS
369 if (__ratelimit(&_rs))
370 scrub_print_warning("i/o error", sbio, ix);
371 } else {
372 if (__ratelimit(&_rs))
373 scrub_print_warning("checksum error", sbio, ix);
a2de733c
AJ
374 }
375
13db62b7
JS
376 spin_lock(&sdev->stat_lock);
377 ++sdev->stat.read_errors;
378 spin_unlock(&sdev->stat_lock);
379
96e36920 380 scrub_fixup(sbio, ix);
13db62b7 381 return 1;
a2de733c
AJ
382}
383
96e36920 384static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
a2de733c
AJ
385{
386 int ret = 1;
387 struct page *page;
388 void *buffer;
96e36920 389 u64 flags = sbio->spag[ix].flags;
a2de733c 390
96e36920 391 page = sbio->bio->bi_io_vec[ix].bv_page;
a2de733c
AJ
392 buffer = kmap_atomic(page, KM_USER0);
393 if (flags & BTRFS_EXTENT_FLAG_DATA) {
96e36920
ID
394 ret = scrub_checksum_data(sbio->sdev,
395 sbio->spag + ix, buffer);
a2de733c 396 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
96e36920
ID
397 ret = scrub_checksum_tree_block(sbio->sdev,
398 sbio->spag + ix,
399 sbio->logical + ix * PAGE_SIZE,
a2de733c
AJ
400 buffer);
401 } else {
402 WARN_ON(1);
403 }
404 kunmap_atomic(buffer, KM_USER0);
405
406 return ret;
407}
408
a2de733c
AJ
409static void scrub_fixup_end_io(struct bio *bio, int err)
410{
411 complete((struct completion *)bio->bi_private);
412}
413
96e36920 414static void scrub_fixup(struct scrub_bio *sbio, int ix)
a2de733c 415{
96e36920 416 struct scrub_dev *sdev = sbio->sdev;
a2de733c
AJ
417 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
418 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
419 struct btrfs_multi_bio *multi = NULL;
96e36920 420 u64 logical = sbio->logical + ix * PAGE_SIZE;
a2de733c
AJ
421 u64 length;
422 int i;
423 int ret;
424 DECLARE_COMPLETION_ONSTACK(complete);
425
96e36920
ID
426 if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
427 (sbio->spag[ix].have_csum == 0)) {
a2de733c
AJ
428 /*
429 * nodatasum, don't try to fix anything
430 * FIXME: we can do better, open the inode and trigger a
431 * writeback
432 */
433 goto uncorrectable;
434 }
435
436 length = PAGE_SIZE;
96e36920 437 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
a2de733c
AJ
438 &multi, 0);
439 if (ret || !multi || length < PAGE_SIZE) {
440 printk(KERN_ERR
441 "scrub_fixup: btrfs_map_block failed us for %llu\n",
96e36920 442 (unsigned long long)logical);
a2de733c
AJ
443 WARN_ON(1);
444 return;
445 }
446
96e36920 447 if (multi->num_stripes == 1)
a2de733c
AJ
448 /* there aren't any replicas */
449 goto uncorrectable;
a2de733c
AJ
450
451 /*
452 * first find a good copy
453 */
454 for (i = 0; i < multi->num_stripes; ++i) {
193ea74b 455 if (i + 1 == sbio->spag[ix].mirror_num)
a2de733c
AJ
456 continue;
457
96e36920
ID
458 if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
459 multi->stripes[i].physical >> 9,
460 sbio->bio->bi_io_vec[ix].bv_page)) {
a2de733c
AJ
461 /* I/O-error, this is not a good copy */
462 continue;
96e36920 463 }
a2de733c 464
96e36920 465 if (scrub_fixup_check(sbio, ix) == 0)
a2de733c
AJ
466 break;
467 }
468 if (i == multi->num_stripes)
469 goto uncorrectable;
470
8628764e
AJ
471 if (!sdev->readonly) {
472 /*
473 * bi_io_vec[ix].bv_page now contains good data, write it back
474 */
475 if (scrub_fixup_io(WRITE, sdev->dev->bdev,
476 (sbio->physical + ix * PAGE_SIZE) >> 9,
477 sbio->bio->bi_io_vec[ix].bv_page)) {
478 /* I/O-error, writeback failed, give up */
479 goto uncorrectable;
480 }
96e36920 481 }
a2de733c
AJ
482
483 kfree(multi);
484 spin_lock(&sdev->stat_lock);
485 ++sdev->stat.corrected_errors;
486 spin_unlock(&sdev->stat_lock);
487
558540c1
JS
488 printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
489 (unsigned long long)logical);
a2de733c
AJ
490 return;
491
492uncorrectable:
493 kfree(multi);
494 spin_lock(&sdev->stat_lock);
495 ++sdev->stat.uncorrectable_errors;
496 spin_unlock(&sdev->stat_lock);
497
558540c1
JS
498 printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
499 "logical %llu\n", (unsigned long long)logical);
96e36920
ID
500}
501
502static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
503 struct page *page)
504{
505 struct bio *bio = NULL;
506 int ret;
507 DECLARE_COMPLETION_ONSTACK(complete);
508
96e36920
ID
509 bio = bio_alloc(GFP_NOFS, 1);
510 bio->bi_bdev = bdev;
511 bio->bi_sector = sector;
512 bio_add_page(bio, page, PAGE_SIZE, 0);
513 bio->bi_end_io = scrub_fixup_end_io;
514 bio->bi_private = &complete;
515 submit_bio(rw, bio);
516
e7786c3a 517 /* this will also unplug the queue */
96e36920
ID
518 wait_for_completion(&complete);
519
520 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
521 bio_put(bio);
522 return ret;
a2de733c
AJ
523}
524
525static void scrub_bio_end_io(struct bio *bio, int err)
526{
527 struct scrub_bio *sbio = bio->bi_private;
528 struct scrub_dev *sdev = sbio->sdev;
529 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
530
531 sbio->err = err;
1bc87793 532 sbio->bio = bio;
a2de733c
AJ
533
534 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
535}
536
537static void scrub_checksum(struct btrfs_work *work)
538{
539 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
540 struct scrub_dev *sdev = sbio->sdev;
541 struct page *page;
542 void *buffer;
543 int i;
544 u64 flags;
545 u64 logical;
546 int ret;
547
548 if (sbio->err) {
13db62b7 549 ret = 0;
a2de733c 550 for (i = 0; i < sbio->count; ++i)
13db62b7
JS
551 ret |= scrub_recheck_error(sbio, i);
552 if (!ret) {
553 spin_lock(&sdev->stat_lock);
554 ++sdev->stat.unverified_errors;
555 spin_unlock(&sdev->stat_lock);
556 }
96e36920
ID
557
558 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
559 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
560 sbio->bio->bi_phys_segments = 0;
561 sbio->bio->bi_idx = 0;
562
563 for (i = 0; i < sbio->count; i++) {
564 struct bio_vec *bi;
565 bi = &sbio->bio->bi_io_vec[i];
566 bi->bv_offset = 0;
567 bi->bv_len = PAGE_SIZE;
568 }
a2de733c
AJ
569 goto out;
570 }
571 for (i = 0; i < sbio->count; ++i) {
572 page = sbio->bio->bi_io_vec[i].bv_page;
573 buffer = kmap_atomic(page, KM_USER0);
574 flags = sbio->spag[i].flags;
575 logical = sbio->logical + i * PAGE_SIZE;
576 ret = 0;
577 if (flags & BTRFS_EXTENT_FLAG_DATA) {
578 ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
579 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
580 ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
581 logical, buffer);
582 } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
583 BUG_ON(i);
584 (void)scrub_checksum_super(sbio, buffer);
585 } else {
586 WARN_ON(1);
587 }
588 kunmap_atomic(buffer, KM_USER0);
13db62b7
JS
589 if (ret) {
590 ret = scrub_recheck_error(sbio, i);
591 if (!ret) {
592 spin_lock(&sdev->stat_lock);
593 ++sdev->stat.unverified_errors;
594 spin_unlock(&sdev->stat_lock);
595 }
596 }
a2de733c
AJ
597 }
598
599out:
1bc87793
AJ
600 scrub_free_bio(sbio->bio);
601 sbio->bio = NULL;
a2de733c
AJ
602 spin_lock(&sdev->list_lock);
603 sbio->next_free = sdev->first_free;
604 sdev->first_free = sbio->index;
605 spin_unlock(&sdev->list_lock);
a2de733c
AJ
606 atomic_dec(&sdev->in_flight);
607 wake_up(&sdev->list_wait);
608}
609
610static int scrub_checksum_data(struct scrub_dev *sdev,
611 struct scrub_page *spag, void *buffer)
612{
613 u8 csum[BTRFS_CSUM_SIZE];
614 u32 crc = ~(u32)0;
615 int fail = 0;
616 struct btrfs_root *root = sdev->dev->dev_root;
617
618 if (!spag->have_csum)
619 return 0;
620
621 crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
622 btrfs_csum_final(crc, csum);
623 if (memcmp(csum, spag->csum, sdev->csum_size))
624 fail = 1;
625
626 spin_lock(&sdev->stat_lock);
627 ++sdev->stat.data_extents_scrubbed;
628 sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
629 if (fail)
630 ++sdev->stat.csum_errors;
631 spin_unlock(&sdev->stat_lock);
632
633 return fail;
634}
635
636static int scrub_checksum_tree_block(struct scrub_dev *sdev,
637 struct scrub_page *spag, u64 logical,
638 void *buffer)
639{
640 struct btrfs_header *h;
641 struct btrfs_root *root = sdev->dev->dev_root;
642 struct btrfs_fs_info *fs_info = root->fs_info;
643 u8 csum[BTRFS_CSUM_SIZE];
644 u32 crc = ~(u32)0;
645 int fail = 0;
646 int crc_fail = 0;
647
648 /*
649 * we don't use the getter functions here, as we
650 * a) don't have an extent buffer and
651 * b) the page is already kmapped
652 */
653 h = (struct btrfs_header *)buffer;
654
655 if (logical != le64_to_cpu(h->bytenr))
656 ++fail;
657
658 if (spag->generation != le64_to_cpu(h->generation))
659 ++fail;
660
661 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
662 ++fail;
663
664 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
665 BTRFS_UUID_SIZE))
666 ++fail;
667
668 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
669 PAGE_SIZE - BTRFS_CSUM_SIZE);
670 btrfs_csum_final(crc, csum);
671 if (memcmp(csum, h->csum, sdev->csum_size))
672 ++crc_fail;
673
674 spin_lock(&sdev->stat_lock);
675 ++sdev->stat.tree_extents_scrubbed;
676 sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
677 if (crc_fail)
678 ++sdev->stat.csum_errors;
679 if (fail)
680 ++sdev->stat.verify_errors;
681 spin_unlock(&sdev->stat_lock);
682
683 return fail || crc_fail;
684}
685
686static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
687{
688 struct btrfs_super_block *s;
689 u64 logical;
690 struct scrub_dev *sdev = sbio->sdev;
691 struct btrfs_root *root = sdev->dev->dev_root;
692 struct btrfs_fs_info *fs_info = root->fs_info;
693 u8 csum[BTRFS_CSUM_SIZE];
694 u32 crc = ~(u32)0;
695 int fail = 0;
696
697 s = (struct btrfs_super_block *)buffer;
698 logical = sbio->logical;
699
700 if (logical != le64_to_cpu(s->bytenr))
701 ++fail;
702
703 if (sbio->spag[0].generation != le64_to_cpu(s->generation))
704 ++fail;
705
706 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
707 ++fail;
708
709 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
710 PAGE_SIZE - BTRFS_CSUM_SIZE);
711 btrfs_csum_final(crc, csum);
712 if (memcmp(csum, s->csum, sbio->sdev->csum_size))
713 ++fail;
714
715 if (fail) {
716 /*
717 * if we find an error in a super block, we just report it.
718 * They will get written with the next transaction commit
719 * anyway
720 */
721 spin_lock(&sdev->stat_lock);
722 ++sdev->stat.super_errors;
723 spin_unlock(&sdev->stat_lock);
724 }
725
726 return fail;
727}
728
729static int scrub_submit(struct scrub_dev *sdev)
730{
731 struct scrub_bio *sbio;
1bc87793
AJ
732 struct bio *bio;
733 int i;
a2de733c
AJ
734
735 if (sdev->curr == -1)
736 return 0;
737
738 sbio = sdev->bios[sdev->curr];
739
1bc87793
AJ
740 bio = bio_alloc(GFP_NOFS, sbio->count);
741 if (!bio)
742 goto nomem;
743
744 bio->bi_private = sbio;
745 bio->bi_end_io = scrub_bio_end_io;
746 bio->bi_bdev = sdev->dev->bdev;
747 bio->bi_sector = sbio->physical >> 9;
748
749 for (i = 0; i < sbio->count; ++i) {
750 struct page *page;
751 int ret;
752
753 page = alloc_page(GFP_NOFS);
754 if (!page)
755 goto nomem;
756
757 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
758 if (!ret) {
759 __free_page(page);
760 goto nomem;
761 }
762 }
763
a2de733c
AJ
764 sbio->err = 0;
765 sdev->curr = -1;
766 atomic_inc(&sdev->in_flight);
767
1bc87793 768 submit_bio(READ, bio);
a2de733c
AJ
769
770 return 0;
1bc87793
AJ
771
772nomem:
773 scrub_free_bio(bio);
774
775 return -ENOMEM;
a2de733c
AJ
776}
777
778static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
779 u64 physical, u64 flags, u64 gen, u64 mirror_num,
780 u8 *csum, int force)
781{
782 struct scrub_bio *sbio;
783
784again:
785 /*
786 * grab a fresh bio or wait for one to become available
787 */
788 while (sdev->curr == -1) {
789 spin_lock(&sdev->list_lock);
790 sdev->curr = sdev->first_free;
791 if (sdev->curr != -1) {
792 sdev->first_free = sdev->bios[sdev->curr]->next_free;
793 sdev->bios[sdev->curr]->next_free = -1;
794 sdev->bios[sdev->curr]->count = 0;
795 spin_unlock(&sdev->list_lock);
796 } else {
797 spin_unlock(&sdev->list_lock);
798 wait_event(sdev->list_wait, sdev->first_free != -1);
799 }
800 }
801 sbio = sdev->bios[sdev->curr];
802 if (sbio->count == 0) {
803 sbio->physical = physical;
804 sbio->logical = logical;
00d01bc1
AJ
805 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
806 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1bc87793
AJ
807 int ret;
808
809 ret = scrub_submit(sdev);
810 if (ret)
811 return ret;
a2de733c
AJ
812 goto again;
813 }
814 sbio->spag[sbio->count].flags = flags;
815 sbio->spag[sbio->count].generation = gen;
816 sbio->spag[sbio->count].have_csum = 0;
817 sbio->spag[sbio->count].mirror_num = mirror_num;
818 if (csum) {
819 sbio->spag[sbio->count].have_csum = 1;
820 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
821 }
822 ++sbio->count;
1bc87793
AJ
823 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
824 int ret;
825
826 ret = scrub_submit(sdev);
827 if (ret)
828 return ret;
829 }
a2de733c
AJ
830
831 return 0;
832}
833
834static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
835 u8 *csum)
836{
837 struct btrfs_ordered_sum *sum = NULL;
838 int ret = 0;
839 unsigned long i;
840 unsigned long num_sectors;
841 u32 sectorsize = sdev->dev->dev_root->sectorsize;
842
843 while (!list_empty(&sdev->csum_list)) {
844 sum = list_first_entry(&sdev->csum_list,
845 struct btrfs_ordered_sum, list);
846 if (sum->bytenr > logical)
847 return 0;
848 if (sum->bytenr + sum->len > logical)
849 break;
850
851 ++sdev->stat.csum_discards;
852 list_del(&sum->list);
853 kfree(sum);
854 sum = NULL;
855 }
856 if (!sum)
857 return 0;
858
859 num_sectors = sum->len / sectorsize;
860 for (i = 0; i < num_sectors; ++i) {
861 if (sum->sums[i].bytenr == logical) {
862 memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
863 ret = 1;
864 break;
865 }
866 }
867 if (ret && i == num_sectors - 1) {
868 list_del(&sum->list);
869 kfree(sum);
870 }
871 return ret;
872}
873
874/* scrub extent tries to collect up to 64 kB for each bio */
875static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
876 u64 physical, u64 flags, u64 gen, u64 mirror_num)
877{
878 int ret;
879 u8 csum[BTRFS_CSUM_SIZE];
880
881 while (len) {
882 u64 l = min_t(u64, len, PAGE_SIZE);
883 int have_csum = 0;
884
885 if (flags & BTRFS_EXTENT_FLAG_DATA) {
886 /* push csums to sbio */
887 have_csum = scrub_find_csum(sdev, logical, l, csum);
888 if (have_csum == 0)
889 ++sdev->stat.no_csum;
890 }
891 ret = scrub_page(sdev, logical, l, physical, flags, gen,
892 mirror_num, have_csum ? csum : NULL, 0);
893 if (ret)
894 return ret;
895 len -= l;
896 logical += l;
897 physical += l;
898 }
899 return 0;
900}
901
902static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
903 struct map_lookup *map, int num, u64 base, u64 length)
904{
905 struct btrfs_path *path;
906 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
907 struct btrfs_root *root = fs_info->extent_root;
908 struct btrfs_root *csum_root = fs_info->csum_root;
909 struct btrfs_extent_item *extent;
e7786c3a 910 struct blk_plug plug;
a2de733c
AJ
911 u64 flags;
912 int ret;
913 int slot;
914 int i;
915 u64 nstripes;
916 int start_stripe;
917 struct extent_buffer *l;
918 struct btrfs_key key;
919 u64 physical;
920 u64 logical;
921 u64 generation;
922 u64 mirror_num;
923
924 u64 increment = map->stripe_len;
925 u64 offset;
926
927 nstripes = length;
928 offset = 0;
929 do_div(nstripes, map->stripe_len);
930 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
931 offset = map->stripe_len * num;
932 increment = map->stripe_len * map->num_stripes;
193ea74b 933 mirror_num = 1;
a2de733c
AJ
934 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
935 int factor = map->num_stripes / map->sub_stripes;
936 offset = map->stripe_len * (num / map->sub_stripes);
937 increment = map->stripe_len * factor;
193ea74b 938 mirror_num = num % map->sub_stripes + 1;
a2de733c
AJ
939 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
940 increment = map->stripe_len;
193ea74b 941 mirror_num = num % map->num_stripes + 1;
a2de733c
AJ
942 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
943 increment = map->stripe_len;
193ea74b 944 mirror_num = num % map->num_stripes + 1;
a2de733c
AJ
945 } else {
946 increment = map->stripe_len;
193ea74b 947 mirror_num = 1;
a2de733c
AJ
948 }
949
950 path = btrfs_alloc_path();
951 if (!path)
952 return -ENOMEM;
953
954 path->reada = 2;
955 path->search_commit_root = 1;
956 path->skip_locking = 1;
957
958 /*
959 * find all extents for each stripe and just read them to get
960 * them into the page cache
961 * FIXME: we can do better. build a more intelligent prefetching
962 */
963 logical = base + offset;
964 physical = map->stripes[num].physical;
965 ret = 0;
966 for (i = 0; i < nstripes; ++i) {
967 key.objectid = logical;
968 key.type = BTRFS_EXTENT_ITEM_KEY;
969 key.offset = (u64)0;
970
971 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
972 if (ret < 0)
8c51032f 973 goto out_noplug;
a2de733c 974
8c51032f
AJ
975 /*
976 * we might miss half an extent here, but that doesn't matter,
977 * as it's only the prefetch
978 */
a2de733c
AJ
979 while (1) {
980 l = path->nodes[0];
981 slot = path->slots[0];
982 if (slot >= btrfs_header_nritems(l)) {
983 ret = btrfs_next_leaf(root, path);
984 if (ret == 0)
985 continue;
986 if (ret < 0)
8c51032f 987 goto out_noplug;
a2de733c
AJ
988
989 break;
990 }
991 btrfs_item_key_to_cpu(l, &key, slot);
992
993 if (key.objectid >= logical + map->stripe_len)
994 break;
995
996 path->slots[0]++;
997 }
71267333 998 btrfs_release_path(path);
a2de733c
AJ
999 logical += increment;
1000 physical += map->stripe_len;
1001 cond_resched();
1002 }
1003
1004 /*
1005 * collect all data csums for the stripe to avoid seeking during
1006 * the scrub. This might currently (crc32) end up to be about 1MB
1007 */
1008 start_stripe = 0;
e7786c3a 1009 blk_start_plug(&plug);
a2de733c
AJ
1010again:
1011 logical = base + offset + start_stripe * increment;
1012 for (i = start_stripe; i < nstripes; ++i) {
1013 ret = btrfs_lookup_csums_range(csum_root, logical,
1014 logical + map->stripe_len - 1,
1015 &sdev->csum_list, 1);
1016 if (ret)
1017 goto out;
1018
1019 logical += increment;
1020 cond_resched();
1021 }
1022 /*
1023 * now find all extents for each stripe and scrub them
1024 */
1025 logical = base + offset + start_stripe * increment;
1026 physical = map->stripes[num].physical + start_stripe * map->stripe_len;
1027 ret = 0;
1028 for (i = start_stripe; i < nstripes; ++i) {
1029 /*
1030 * canceled?
1031 */
1032 if (atomic_read(&fs_info->scrub_cancel_req) ||
1033 atomic_read(&sdev->cancel_req)) {
1034 ret = -ECANCELED;
1035 goto out;
1036 }
1037 /*
1038 * check to see if we have to pause
1039 */
1040 if (atomic_read(&fs_info->scrub_pause_req)) {
1041 /* push queued extents */
1042 scrub_submit(sdev);
1043 wait_event(sdev->list_wait,
1044 atomic_read(&sdev->in_flight) == 0);
1045 atomic_inc(&fs_info->scrubs_paused);
1046 wake_up(&fs_info->scrub_pause_wait);
1047 mutex_lock(&fs_info->scrub_lock);
1048 while (atomic_read(&fs_info->scrub_pause_req)) {
1049 mutex_unlock(&fs_info->scrub_lock);
1050 wait_event(fs_info->scrub_pause_wait,
1051 atomic_read(&fs_info->scrub_pause_req) == 0);
1052 mutex_lock(&fs_info->scrub_lock);
1053 }
1054 atomic_dec(&fs_info->scrubs_paused);
1055 mutex_unlock(&fs_info->scrub_lock);
1056 wake_up(&fs_info->scrub_pause_wait);
1057 scrub_free_csums(sdev);
1058 start_stripe = i;
1059 goto again;
1060 }
1061
1062 key.objectid = logical;
1063 key.type = BTRFS_EXTENT_ITEM_KEY;
1064 key.offset = (u64)0;
1065
1066 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1067 if (ret < 0)
1068 goto out;
8c51032f 1069 if (ret > 0) {
a2de733c
AJ
1070 ret = btrfs_previous_item(root, path, 0,
1071 BTRFS_EXTENT_ITEM_KEY);
1072 if (ret < 0)
1073 goto out;
8c51032f
AJ
1074 if (ret > 0) {
1075 /* there's no smaller item, so stick with the
1076 * larger one */
1077 btrfs_release_path(path);
1078 ret = btrfs_search_slot(NULL, root, &key,
1079 path, 0, 0);
1080 if (ret < 0)
1081 goto out;
1082 }
a2de733c
AJ
1083 }
1084
1085 while (1) {
1086 l = path->nodes[0];
1087 slot = path->slots[0];
1088 if (slot >= btrfs_header_nritems(l)) {
1089 ret = btrfs_next_leaf(root, path);
1090 if (ret == 0)
1091 continue;
1092 if (ret < 0)
1093 goto out;
1094
1095 break;
1096 }
1097 btrfs_item_key_to_cpu(l, &key, slot);
1098
1099 if (key.objectid + key.offset <= logical)
1100 goto next;
1101
1102 if (key.objectid >= logical + map->stripe_len)
1103 break;
1104
1105 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1106 goto next;
1107
1108 extent = btrfs_item_ptr(l, slot,
1109 struct btrfs_extent_item);
1110 flags = btrfs_extent_flags(l, extent);
1111 generation = btrfs_extent_generation(l, extent);
1112
1113 if (key.objectid < logical &&
1114 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1115 printk(KERN_ERR
1116 "btrfs scrub: tree block %llu spanning "
1117 "stripes, ignored. logical=%llu\n",
1118 (unsigned long long)key.objectid,
1119 (unsigned long long)logical);
1120 goto next;
1121 }
1122
1123 /*
1124 * trim extent to this stripe
1125 */
1126 if (key.objectid < logical) {
1127 key.offset -= logical - key.objectid;
1128 key.objectid = logical;
1129 }
1130 if (key.objectid + key.offset >
1131 logical + map->stripe_len) {
1132 key.offset = logical + map->stripe_len -
1133 key.objectid;
1134 }
1135
1136 ret = scrub_extent(sdev, key.objectid, key.offset,
1137 key.objectid - logical + physical,
1138 flags, generation, mirror_num);
1139 if (ret)
1140 goto out;
1141
1142next:
1143 path->slots[0]++;
1144 }
71267333 1145 btrfs_release_path(path);
a2de733c
AJ
1146 logical += increment;
1147 physical += map->stripe_len;
1148 spin_lock(&sdev->stat_lock);
1149 sdev->stat.last_physical = physical;
1150 spin_unlock(&sdev->stat_lock);
1151 }
1152 /* push queued extents */
1153 scrub_submit(sdev);
1154
1155out:
e7786c3a 1156 blk_finish_plug(&plug);
8c51032f 1157out_noplug:
a2de733c
AJ
1158 btrfs_free_path(path);
1159 return ret < 0 ? ret : 0;
1160}
1161
1162static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
1163 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
1164{
1165 struct btrfs_mapping_tree *map_tree =
1166 &sdev->dev->dev_root->fs_info->mapping_tree;
1167 struct map_lookup *map;
1168 struct extent_map *em;
1169 int i;
1170 int ret = -EINVAL;
1171
1172 read_lock(&map_tree->map_tree.lock);
1173 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
1174 read_unlock(&map_tree->map_tree.lock);
1175
1176 if (!em)
1177 return -EINVAL;
1178
1179 map = (struct map_lookup *)em->bdev;
1180 if (em->start != chunk_offset)
1181 goto out;
1182
1183 if (em->len < length)
1184 goto out;
1185
1186 for (i = 0; i < map->num_stripes; ++i) {
1187 if (map->stripes[i].dev == sdev->dev) {
1188 ret = scrub_stripe(sdev, map, i, chunk_offset, length);
1189 if (ret)
1190 goto out;
1191 }
1192 }
1193out:
1194 free_extent_map(em);
1195
1196 return ret;
1197}
1198
1199static noinline_for_stack
1200int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1201{
1202 struct btrfs_dev_extent *dev_extent = NULL;
1203 struct btrfs_path *path;
1204 struct btrfs_root *root = sdev->dev->dev_root;
1205 struct btrfs_fs_info *fs_info = root->fs_info;
1206 u64 length;
1207 u64 chunk_tree;
1208 u64 chunk_objectid;
1209 u64 chunk_offset;
1210 int ret;
1211 int slot;
1212 struct extent_buffer *l;
1213 struct btrfs_key key;
1214 struct btrfs_key found_key;
1215 struct btrfs_block_group_cache *cache;
1216
1217 path = btrfs_alloc_path();
1218 if (!path)
1219 return -ENOMEM;
1220
1221 path->reada = 2;
1222 path->search_commit_root = 1;
1223 path->skip_locking = 1;
1224
1225 key.objectid = sdev->dev->devid;
1226 key.offset = 0ull;
1227 key.type = BTRFS_DEV_EXTENT_KEY;
1228
1229
1230 while (1) {
1231 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1232 if (ret < 0)
8c51032f
AJ
1233 break;
1234 if (ret > 0) {
1235 if (path->slots[0] >=
1236 btrfs_header_nritems(path->nodes[0])) {
1237 ret = btrfs_next_leaf(root, path);
1238 if (ret)
1239 break;
1240 }
1241 }
a2de733c
AJ
1242
1243 l = path->nodes[0];
1244 slot = path->slots[0];
1245
1246 btrfs_item_key_to_cpu(l, &found_key, slot);
1247
1248 if (found_key.objectid != sdev->dev->devid)
1249 break;
1250
8c51032f 1251 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
a2de733c
AJ
1252 break;
1253
1254 if (found_key.offset >= end)
1255 break;
1256
1257 if (found_key.offset < key.offset)
1258 break;
1259
1260 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1261 length = btrfs_dev_extent_length(l, dev_extent);
1262
1263 if (found_key.offset + length <= start) {
1264 key.offset = found_key.offset + length;
71267333 1265 btrfs_release_path(path);
a2de733c
AJ
1266 continue;
1267 }
1268
1269 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1270 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1271 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1272
1273 /*
1274 * get a reference on the corresponding block group to prevent
1275 * the chunk from going away while we scrub it
1276 */
1277 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1278 if (!cache) {
1279 ret = -ENOENT;
8c51032f 1280 break;
a2de733c
AJ
1281 }
1282 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1283 chunk_offset, length);
1284 btrfs_put_block_group(cache);
1285 if (ret)
1286 break;
1287
1288 key.offset = found_key.offset + length;
71267333 1289 btrfs_release_path(path);
a2de733c
AJ
1290 }
1291
a2de733c 1292 btrfs_free_path(path);
8c51032f
AJ
1293
1294 /*
1295 * ret can still be 1 from search_slot or next_leaf,
1296 * that's not an error
1297 */
1298 return ret < 0 ? ret : 0;
a2de733c
AJ
1299}
1300
1301static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1302{
1303 int i;
1304 u64 bytenr;
1305 u64 gen;
1306 int ret;
1307 struct btrfs_device *device = sdev->dev;
1308 struct btrfs_root *root = device->dev_root;
1309
1310 gen = root->fs_info->last_trans_committed;
1311
1312 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1313 bytenr = btrfs_sb_offset(i);
1314 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
1315 break;
1316
1317 ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
1318 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
1319 if (ret)
1320 return ret;
1321 }
1322 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1323
1324 return 0;
1325}
1326
1327/*
1328 * get a reference count on fs_info->scrub_workers. start worker if necessary
1329 */
1330static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1331{
1332 struct btrfs_fs_info *fs_info = root->fs_info;
1333
1334 mutex_lock(&fs_info->scrub_lock);
632dd772
AJ
1335 if (fs_info->scrub_workers_refcnt == 0) {
1336 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1337 fs_info->thread_pool_size, &fs_info->generic_worker);
1338 fs_info->scrub_workers.idle_thresh = 4;
a2de733c 1339 btrfs_start_workers(&fs_info->scrub_workers, 1);
632dd772 1340 }
a2de733c
AJ
1341 ++fs_info->scrub_workers_refcnt;
1342 mutex_unlock(&fs_info->scrub_lock);
1343
1344 return 0;
1345}
1346
1347static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
1348{
1349 struct btrfs_fs_info *fs_info = root->fs_info;
1350
1351 mutex_lock(&fs_info->scrub_lock);
1352 if (--fs_info->scrub_workers_refcnt == 0)
1353 btrfs_stop_workers(&fs_info->scrub_workers);
1354 WARN_ON(fs_info->scrub_workers_refcnt < 0);
1355 mutex_unlock(&fs_info->scrub_lock);
1356}
1357
1358
1359int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
8628764e 1360 struct btrfs_scrub_progress *progress, int readonly)
a2de733c
AJ
1361{
1362 struct scrub_dev *sdev;
1363 struct btrfs_fs_info *fs_info = root->fs_info;
1364 int ret;
1365 struct btrfs_device *dev;
1366
7841cb28 1367 if (btrfs_fs_closing(root->fs_info))
a2de733c
AJ
1368 return -EINVAL;
1369
1370 /*
1371 * check some assumptions
1372 */
1373 if (root->sectorsize != PAGE_SIZE ||
1374 root->sectorsize != root->leafsize ||
1375 root->sectorsize != root->nodesize) {
1376 printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
1377 return -EINVAL;
1378 }
1379
1380 ret = scrub_workers_get(root);
1381 if (ret)
1382 return ret;
1383
1384 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1385 dev = btrfs_find_device(root, devid, NULL, NULL);
1386 if (!dev || dev->missing) {
1387 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1388 scrub_workers_put(root);
1389 return -ENODEV;
1390 }
1391 mutex_lock(&fs_info->scrub_lock);
1392
1393 if (!dev->in_fs_metadata) {
1394 mutex_unlock(&fs_info->scrub_lock);
1395 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1396 scrub_workers_put(root);
1397 return -ENODEV;
1398 }
1399
1400 if (dev->scrub_device) {
1401 mutex_unlock(&fs_info->scrub_lock);
1402 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1403 scrub_workers_put(root);
1404 return -EINPROGRESS;
1405 }
1406 sdev = scrub_setup_dev(dev);
1407 if (IS_ERR(sdev)) {
1408 mutex_unlock(&fs_info->scrub_lock);
1409 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1410 scrub_workers_put(root);
1411 return PTR_ERR(sdev);
1412 }
8628764e 1413 sdev->readonly = readonly;
a2de733c
AJ
1414 dev->scrub_device = sdev;
1415
1416 atomic_inc(&fs_info->scrubs_running);
1417 mutex_unlock(&fs_info->scrub_lock);
1418 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1419
1420 down_read(&fs_info->scrub_super_lock);
1421 ret = scrub_supers(sdev);
1422 up_read(&fs_info->scrub_super_lock);
1423
1424 if (!ret)
1425 ret = scrub_enumerate_chunks(sdev, start, end);
1426
1427 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1428
1429 atomic_dec(&fs_info->scrubs_running);
1430 wake_up(&fs_info->scrub_pause_wait);
1431
1432 if (progress)
1433 memcpy(progress, &sdev->stat, sizeof(*progress));
1434
1435 mutex_lock(&fs_info->scrub_lock);
1436 dev->scrub_device = NULL;
1437 mutex_unlock(&fs_info->scrub_lock);
1438
1439 scrub_free_dev(sdev);
1440 scrub_workers_put(root);
1441
1442 return ret;
1443}
1444
1445int btrfs_scrub_pause(struct btrfs_root *root)
1446{
1447 struct btrfs_fs_info *fs_info = root->fs_info;
1448
1449 mutex_lock(&fs_info->scrub_lock);
1450 atomic_inc(&fs_info->scrub_pause_req);
1451 while (atomic_read(&fs_info->scrubs_paused) !=
1452 atomic_read(&fs_info->scrubs_running)) {
1453 mutex_unlock(&fs_info->scrub_lock);
1454 wait_event(fs_info->scrub_pause_wait,
1455 atomic_read(&fs_info->scrubs_paused) ==
1456 atomic_read(&fs_info->scrubs_running));
1457 mutex_lock(&fs_info->scrub_lock);
1458 }
1459 mutex_unlock(&fs_info->scrub_lock);
1460
1461 return 0;
1462}
1463
1464int btrfs_scrub_continue(struct btrfs_root *root)
1465{
1466 struct btrfs_fs_info *fs_info = root->fs_info;
1467
1468 atomic_dec(&fs_info->scrub_pause_req);
1469 wake_up(&fs_info->scrub_pause_wait);
1470 return 0;
1471}
1472
1473int btrfs_scrub_pause_super(struct btrfs_root *root)
1474{
1475 down_write(&root->fs_info->scrub_super_lock);
1476 return 0;
1477}
1478
1479int btrfs_scrub_continue_super(struct btrfs_root *root)
1480{
1481 up_write(&root->fs_info->scrub_super_lock);
1482 return 0;
1483}
1484
1485int btrfs_scrub_cancel(struct btrfs_root *root)
1486{
1487 struct btrfs_fs_info *fs_info = root->fs_info;
1488
1489 mutex_lock(&fs_info->scrub_lock);
1490 if (!atomic_read(&fs_info->scrubs_running)) {
1491 mutex_unlock(&fs_info->scrub_lock);
1492 return -ENOTCONN;
1493 }
1494
1495 atomic_inc(&fs_info->scrub_cancel_req);
1496 while (atomic_read(&fs_info->scrubs_running)) {
1497 mutex_unlock(&fs_info->scrub_lock);
1498 wait_event(fs_info->scrub_pause_wait,
1499 atomic_read(&fs_info->scrubs_running) == 0);
1500 mutex_lock(&fs_info->scrub_lock);
1501 }
1502 atomic_dec(&fs_info->scrub_cancel_req);
1503 mutex_unlock(&fs_info->scrub_lock);
1504
1505 return 0;
1506}
1507
1508int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1509{
1510 struct btrfs_fs_info *fs_info = root->fs_info;
1511 struct scrub_dev *sdev;
1512
1513 mutex_lock(&fs_info->scrub_lock);
1514 sdev = dev->scrub_device;
1515 if (!sdev) {
1516 mutex_unlock(&fs_info->scrub_lock);
1517 return -ENOTCONN;
1518 }
1519 atomic_inc(&sdev->cancel_req);
1520 while (dev->scrub_device) {
1521 mutex_unlock(&fs_info->scrub_lock);
1522 wait_event(fs_info->scrub_pause_wait,
1523 dev->scrub_device == NULL);
1524 mutex_lock(&fs_info->scrub_lock);
1525 }
1526 mutex_unlock(&fs_info->scrub_lock);
1527
1528 return 0;
1529}
1530int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
1531{
1532 struct btrfs_fs_info *fs_info = root->fs_info;
1533 struct btrfs_device *dev;
1534 int ret;
1535
1536 /*
1537 * we have to hold the device_list_mutex here so the device
1538 * does not go away in cancel_dev. FIXME: find a better solution
1539 */
1540 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1541 dev = btrfs_find_device(root, devid, NULL, NULL);
1542 if (!dev) {
1543 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1544 return -ENODEV;
1545 }
1546 ret = btrfs_scrub_cancel_dev(root, dev);
1547 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1548
1549 return ret;
1550}
1551
1552int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
1553 struct btrfs_scrub_progress *progress)
1554{
1555 struct btrfs_device *dev;
1556 struct scrub_dev *sdev = NULL;
1557
1558 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1559 dev = btrfs_find_device(root, devid, NULL, NULL);
1560 if (dev)
1561 sdev = dev->scrub_device;
1562 if (sdev)
1563 memcpy(progress, &sdev->stat, sizeof(*progress));
1564 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1565
1566 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
1567}