2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_recover
{
68 struct btrfs_bio
*bbio
;
74 struct scrub_block
*sblock
;
76 struct btrfs_device
*dev
;
77 struct list_head list
;
78 u64 flags
; /* extent flags */
82 u64 physical_for_dev_replace
;
85 unsigned int mirror_num
:8;
86 unsigned int have_csum
:1;
87 unsigned int io_error
:1;
89 u8 csum
[BTRFS_CSUM_SIZE
];
91 struct scrub_recover
*recover
;
96 struct scrub_ctx
*sctx
;
97 struct btrfs_device
*dev
;
102 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
105 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
109 struct btrfs_work work
;
113 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
115 atomic_t outstanding_pages
;
116 atomic_t ref_count
; /* free mem on transition to zero */
117 struct scrub_ctx
*sctx
;
118 struct scrub_parity
*sparity
;
120 unsigned int header_error
:1;
121 unsigned int checksum_error
:1;
122 unsigned int no_io_error_seen
:1;
123 unsigned int generation_error
:1; /* also sets header_error */
125 /* The following is for the data used to check parity */
126 /* It is for the data with checksum */
127 unsigned int data_corrected
:1;
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity
{
133 struct scrub_ctx
*sctx
;
135 struct btrfs_device
*scrub_dev
;
147 struct list_head spages
;
149 /* Work of parity check and repair */
150 struct btrfs_work work
;
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap
;
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
159 unsigned long *ebitmap
;
161 unsigned long bitmap
[0];
164 struct scrub_wr_ctx
{
165 struct scrub_bio
*wr_curr_bio
;
166 struct btrfs_device
*tgtdev
;
167 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes
;
169 struct mutex wr_lock
;
173 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
174 struct btrfs_root
*dev_root
;
177 atomic_t bios_in_flight
;
178 atomic_t workers_pending
;
179 spinlock_t list_lock
;
180 wait_queue_head_t list_wait
;
182 struct list_head csum_list
;
185 int pages_per_rd_bio
;
190 struct scrub_wr_ctx wr_ctx
;
195 struct btrfs_scrub_progress stat
;
196 spinlock_t stat_lock
;
199 struct scrub_fixup_nodatasum
{
200 struct scrub_ctx
*sctx
;
201 struct btrfs_device
*dev
;
203 struct btrfs_root
*root
;
204 struct btrfs_work work
;
208 struct scrub_nocow_inode
{
212 struct list_head list
;
215 struct scrub_copy_nocow_ctx
{
216 struct scrub_ctx
*sctx
;
220 u64 physical_for_dev_replace
;
221 struct list_head inodes
;
222 struct btrfs_work work
;
225 struct scrub_warning
{
226 struct btrfs_path
*path
;
227 u64 extent_item_size
;
231 struct btrfs_device
*dev
;
234 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
235 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
236 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
237 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
238 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
239 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
240 struct btrfs_fs_info
*fs_info
,
241 struct scrub_block
*original_sblock
,
242 u64 length
, u64 logical
,
243 struct scrub_block
*sblocks_for_recheck
);
244 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
245 struct scrub_block
*sblock
, int is_metadata
,
246 int have_csum
, u8
*csum
, u64 generation
,
247 u16 csum_size
, int retry_failed_mirror
);
248 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
249 struct scrub_block
*sblock
,
250 int is_metadata
, int have_csum
,
251 const u8
*csum
, u64 generation
,
253 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
254 struct scrub_block
*sblock_good
,
256 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
257 struct scrub_block
*sblock_good
,
258 int page_num
, int force_write
);
259 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
260 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
262 static int scrub_checksum_data(struct scrub_block
*sblock
);
263 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
264 static int scrub_checksum_super(struct scrub_block
*sblock
);
265 static void scrub_block_get(struct scrub_block
*sblock
);
266 static void scrub_block_put(struct scrub_block
*sblock
);
267 static void scrub_page_get(struct scrub_page
*spage
);
268 static void scrub_page_put(struct scrub_page
*spage
);
269 static void scrub_parity_get(struct scrub_parity
*sparity
);
270 static void scrub_parity_put(struct scrub_parity
*sparity
);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
272 struct scrub_page
*spage
);
273 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
274 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
275 u64 gen
, int mirror_num
, u8
*csum
, int force
,
276 u64 physical_for_dev_replace
);
277 static void scrub_bio_end_io(struct bio
*bio
, int err
);
278 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
279 static void scrub_block_complete(struct scrub_block
*sblock
);
280 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
281 u64 extent_logical
, u64 extent_len
,
282 u64
*extent_physical
,
283 struct btrfs_device
**extent_dev
,
284 int *extent_mirror_num
);
285 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
286 struct scrub_wr_ctx
*wr_ctx
,
287 struct btrfs_fs_info
*fs_info
,
288 struct btrfs_device
*dev
,
290 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
291 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
292 struct scrub_page
*spage
);
293 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
294 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
295 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
296 static int write_page_nocow(struct scrub_ctx
*sctx
,
297 u64 physical_for_dev_replace
, struct page
*page
);
298 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
299 struct scrub_copy_nocow_ctx
*ctx
);
300 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
301 int mirror_num
, u64 physical_for_dev_replace
);
302 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
303 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
304 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
307 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
309 atomic_inc(&sctx
->bios_in_flight
);
312 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
314 atomic_dec(&sctx
->bios_in_flight
);
315 wake_up(&sctx
->list_wait
);
318 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
320 while (atomic_read(&fs_info
->scrub_pause_req
)) {
321 mutex_unlock(&fs_info
->scrub_lock
);
322 wait_event(fs_info
->scrub_pause_wait
,
323 atomic_read(&fs_info
->scrub_pause_req
) == 0);
324 mutex_lock(&fs_info
->scrub_lock
);
328 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
330 atomic_inc(&fs_info
->scrubs_paused
);
331 wake_up(&fs_info
->scrub_pause_wait
);
333 mutex_lock(&fs_info
->scrub_lock
);
334 __scrub_blocked_if_needed(fs_info
);
335 atomic_dec(&fs_info
->scrubs_paused
);
336 mutex_unlock(&fs_info
->scrub_lock
);
338 wake_up(&fs_info
->scrub_pause_wait
);
342 * used for workers that require transaction commits (i.e., for the
345 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
347 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
350 * increment scrubs_running to prevent cancel requests from
351 * completing as long as a worker is running. we must also
352 * increment scrubs_paused to prevent deadlocking on pause
353 * requests used for transactions commits (as the worker uses a
354 * transaction context). it is safe to regard the worker
355 * as paused for all matters practical. effectively, we only
356 * avoid cancellation requests from completing.
358 mutex_lock(&fs_info
->scrub_lock
);
359 atomic_inc(&fs_info
->scrubs_running
);
360 atomic_inc(&fs_info
->scrubs_paused
);
361 mutex_unlock(&fs_info
->scrub_lock
);
364 * check if @scrubs_running=@scrubs_paused condition
365 * inside wait_event() is not an atomic operation.
366 * which means we may inc/dec @scrub_running/paused
367 * at any time. Let's wake up @scrub_pause_wait as
368 * much as we can to let commit transaction blocked less.
370 wake_up(&fs_info
->scrub_pause_wait
);
372 atomic_inc(&sctx
->workers_pending
);
375 /* used for workers that require transaction commits */
376 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
378 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
381 * see scrub_pending_trans_workers_inc() why we're pretending
382 * to be paused in the scrub counters
384 mutex_lock(&fs_info
->scrub_lock
);
385 atomic_dec(&fs_info
->scrubs_running
);
386 atomic_dec(&fs_info
->scrubs_paused
);
387 mutex_unlock(&fs_info
->scrub_lock
);
388 atomic_dec(&sctx
->workers_pending
);
389 wake_up(&fs_info
->scrub_pause_wait
);
390 wake_up(&sctx
->list_wait
);
393 static void scrub_free_csums(struct scrub_ctx
*sctx
)
395 while (!list_empty(&sctx
->csum_list
)) {
396 struct btrfs_ordered_sum
*sum
;
397 sum
= list_first_entry(&sctx
->csum_list
,
398 struct btrfs_ordered_sum
, list
);
399 list_del(&sum
->list
);
404 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
411 scrub_free_wr_ctx(&sctx
->wr_ctx
);
413 /* this can happen when scrub is cancelled */
414 if (sctx
->curr
!= -1) {
415 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
417 for (i
= 0; i
< sbio
->page_count
; i
++) {
418 WARN_ON(!sbio
->pagev
[i
]->page
);
419 scrub_block_put(sbio
->pagev
[i
]->sblock
);
424 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
425 struct scrub_bio
*sbio
= sctx
->bios
[i
];
432 scrub_free_csums(sctx
);
436 static noinline_for_stack
437 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
439 struct scrub_ctx
*sctx
;
441 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
442 int pages_per_rd_bio
;
446 * the setting of pages_per_rd_bio is correct for scrub but might
447 * be wrong for the dev_replace code where we might read from
448 * different devices in the initial huge bios. However, that
449 * code is able to correctly handle the case when adding a page
453 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
454 bio_get_nr_vecs(dev
->bdev
));
456 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
457 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
460 sctx
->is_dev_replace
= is_dev_replace
;
461 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
463 sctx
->dev_root
= dev
->dev_root
;
464 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
465 struct scrub_bio
*sbio
;
467 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
470 sctx
->bios
[i
] = sbio
;
474 sbio
->page_count
= 0;
475 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
476 scrub_bio_end_io_worker
, NULL
, NULL
);
478 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
479 sctx
->bios
[i
]->next_free
= i
+ 1;
481 sctx
->bios
[i
]->next_free
= -1;
483 sctx
->first_free
= 0;
484 sctx
->nodesize
= dev
->dev_root
->nodesize
;
485 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
486 atomic_set(&sctx
->bios_in_flight
, 0);
487 atomic_set(&sctx
->workers_pending
, 0);
488 atomic_set(&sctx
->cancel_req
, 0);
489 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
490 INIT_LIST_HEAD(&sctx
->csum_list
);
492 spin_lock_init(&sctx
->list_lock
);
493 spin_lock_init(&sctx
->stat_lock
);
494 init_waitqueue_head(&sctx
->list_wait
);
496 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
497 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
499 scrub_free_ctx(sctx
);
505 scrub_free_ctx(sctx
);
506 return ERR_PTR(-ENOMEM
);
509 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
516 struct extent_buffer
*eb
;
517 struct btrfs_inode_item
*inode_item
;
518 struct scrub_warning
*swarn
= warn_ctx
;
519 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
520 struct inode_fs_paths
*ipath
= NULL
;
521 struct btrfs_root
*local_root
;
522 struct btrfs_key root_key
;
524 root_key
.objectid
= root
;
525 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
526 root_key
.offset
= (u64
)-1;
527 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
528 if (IS_ERR(local_root
)) {
529 ret
= PTR_ERR(local_root
);
533 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
535 btrfs_release_path(swarn
->path
);
539 eb
= swarn
->path
->nodes
[0];
540 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
541 struct btrfs_inode_item
);
542 isize
= btrfs_inode_size(eb
, inode_item
);
543 nlink
= btrfs_inode_nlink(eb
, inode_item
);
544 btrfs_release_path(swarn
->path
);
546 ipath
= init_ipath(4096, local_root
, swarn
->path
);
548 ret
= PTR_ERR(ipath
);
552 ret
= paths_from_inode(inum
, ipath
);
558 * we deliberately ignore the bit ipath might have been too small to
559 * hold all of the paths here
561 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
562 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
563 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
564 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
565 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
566 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
567 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
568 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
574 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
575 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
576 "resolving failed with ret=%d\n", swarn
->errstr
,
577 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
578 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
584 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
586 struct btrfs_device
*dev
;
587 struct btrfs_fs_info
*fs_info
;
588 struct btrfs_path
*path
;
589 struct btrfs_key found_key
;
590 struct extent_buffer
*eb
;
591 struct btrfs_extent_item
*ei
;
592 struct scrub_warning swarn
;
593 unsigned long ptr
= 0;
601 WARN_ON(sblock
->page_count
< 1);
602 dev
= sblock
->pagev
[0]->dev
;
603 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
605 path
= btrfs_alloc_path();
609 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
610 swarn
.logical
= sblock
->pagev
[0]->logical
;
611 swarn
.errstr
= errstr
;
614 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
619 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
620 swarn
.extent_item_size
= found_key
.offset
;
623 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
624 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
626 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
628 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
629 item_size
, &ref_root
,
631 printk_in_rcu(KERN_WARNING
632 "BTRFS: %s at logical %llu on dev %s, "
633 "sector %llu: metadata %s (level %d) in tree "
634 "%llu\n", errstr
, swarn
.logical
,
635 rcu_str_deref(dev
->name
),
636 (unsigned long long)swarn
.sector
,
637 ref_level
? "node" : "leaf",
638 ret
< 0 ? -1 : ref_level
,
639 ret
< 0 ? -1 : ref_root
);
641 btrfs_release_path(path
);
643 btrfs_release_path(path
);
646 iterate_extent_inodes(fs_info
, found_key
.objectid
,
648 scrub_print_warning_inode
, &swarn
);
652 btrfs_free_path(path
);
655 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
657 struct page
*page
= NULL
;
659 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
662 struct btrfs_key key
;
663 struct inode
*inode
= NULL
;
664 struct btrfs_fs_info
*fs_info
;
665 u64 end
= offset
+ PAGE_SIZE
- 1;
666 struct btrfs_root
*local_root
;
670 key
.type
= BTRFS_ROOT_ITEM_KEY
;
671 key
.offset
= (u64
)-1;
673 fs_info
= fixup
->root
->fs_info
;
674 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
676 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
677 if (IS_ERR(local_root
)) {
678 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
679 return PTR_ERR(local_root
);
682 key
.type
= BTRFS_INODE_ITEM_KEY
;
685 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
686 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
688 return PTR_ERR(inode
);
690 index
= offset
>> PAGE_CACHE_SHIFT
;
692 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
698 if (PageUptodate(page
)) {
699 if (PageDirty(page
)) {
701 * we need to write the data to the defect sector. the
702 * data that was in that sector is not in memory,
703 * because the page was modified. we must not write the
704 * modified page to that sector.
706 * TODO: what could be done here: wait for the delalloc
707 * runner to write out that page (might involve
708 * COW) and see whether the sector is still
709 * referenced afterwards.
711 * For the meantime, we'll treat this error
712 * incorrectable, although there is a chance that a
713 * later scrub will find the bad sector again and that
714 * there's no dirty page in memory, then.
719 ret
= repair_io_failure(inode
, offset
, PAGE_SIZE
,
720 fixup
->logical
, page
,
721 offset
- page_offset(page
),
727 * we need to get good data first. the general readpage path
728 * will call repair_io_failure for us, we just have to make
729 * sure we read the bad mirror.
731 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
732 EXTENT_DAMAGED
, GFP_NOFS
);
734 /* set_extent_bits should give proper error */
741 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
744 wait_on_page_locked(page
);
746 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
747 end
, EXTENT_DAMAGED
, 0, NULL
);
749 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
750 EXTENT_DAMAGED
, GFP_NOFS
);
762 if (ret
== 0 && corrected
) {
764 * we only need to call readpage for one of the inodes belonging
765 * to this extent. so make iterate_extent_inodes stop
773 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
776 struct scrub_fixup_nodatasum
*fixup
;
777 struct scrub_ctx
*sctx
;
778 struct btrfs_trans_handle
*trans
= NULL
;
779 struct btrfs_path
*path
;
780 int uncorrectable
= 0;
782 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
785 path
= btrfs_alloc_path();
787 spin_lock(&sctx
->stat_lock
);
788 ++sctx
->stat
.malloc_errors
;
789 spin_unlock(&sctx
->stat_lock
);
794 trans
= btrfs_join_transaction(fixup
->root
);
801 * the idea is to trigger a regular read through the standard path. we
802 * read a page from the (failed) logical address by specifying the
803 * corresponding copynum of the failed sector. thus, that readpage is
805 * that is the point where on-the-fly error correction will kick in
806 * (once it's finished) and rewrite the failed sector if a good copy
809 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
810 path
, scrub_fixup_readpage
,
818 spin_lock(&sctx
->stat_lock
);
819 ++sctx
->stat
.corrected_errors
;
820 spin_unlock(&sctx
->stat_lock
);
823 if (trans
&& !IS_ERR(trans
))
824 btrfs_end_transaction(trans
, fixup
->root
);
826 spin_lock(&sctx
->stat_lock
);
827 ++sctx
->stat
.uncorrectable_errors
;
828 spin_unlock(&sctx
->stat_lock
);
829 btrfs_dev_replace_stats_inc(
830 &sctx
->dev_root
->fs_info
->dev_replace
.
831 num_uncorrectable_read_errors
);
832 printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: "
833 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
834 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
837 btrfs_free_path(path
);
840 scrub_pending_trans_workers_dec(sctx
);
843 static inline void scrub_get_recover(struct scrub_recover
*recover
)
845 atomic_inc(&recover
->refs
);
848 static inline void scrub_put_recover(struct scrub_recover
*recover
)
850 if (atomic_dec_and_test(&recover
->refs
)) {
851 kfree(recover
->bbio
);
852 kfree(recover
->raid_map
);
858 * scrub_handle_errored_block gets called when either verification of the
859 * pages failed or the bio failed to read, e.g. with EIO. In the latter
860 * case, this function handles all pages in the bio, even though only one
862 * The goal of this function is to repair the errored block by using the
863 * contents of one of the mirrors.
865 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
867 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
868 struct btrfs_device
*dev
;
869 struct btrfs_fs_info
*fs_info
;
873 unsigned int failed_mirror_index
;
874 unsigned int is_metadata
;
875 unsigned int have_csum
;
877 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
878 struct scrub_block
*sblock_bad
;
883 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
884 DEFAULT_RATELIMIT_BURST
);
886 BUG_ON(sblock_to_check
->page_count
< 1);
887 fs_info
= sctx
->dev_root
->fs_info
;
888 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
890 * if we find an error in a super block, we just report it.
891 * They will get written with the next transaction commit
894 spin_lock(&sctx
->stat_lock
);
895 ++sctx
->stat
.super_errors
;
896 spin_unlock(&sctx
->stat_lock
);
899 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
900 logical
= sblock_to_check
->pagev
[0]->logical
;
901 generation
= sblock_to_check
->pagev
[0]->generation
;
902 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
903 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
904 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
905 BTRFS_EXTENT_FLAG_DATA
);
906 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
907 csum
= sblock_to_check
->pagev
[0]->csum
;
908 dev
= sblock_to_check
->pagev
[0]->dev
;
910 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
911 sblocks_for_recheck
= NULL
;
916 * read all mirrors one after the other. This includes to
917 * re-read the extent or metadata block that failed (that was
918 * the cause that this fixup code is called) another time,
919 * page by page this time in order to know which pages
920 * caused I/O errors and which ones are good (for all mirrors).
921 * It is the goal to handle the situation when more than one
922 * mirror contains I/O errors, but the errors do not
923 * overlap, i.e. the data can be repaired by selecting the
924 * pages from those mirrors without I/O error on the
925 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
926 * would be that mirror #1 has an I/O error on the first page,
927 * the second page is good, and mirror #2 has an I/O error on
928 * the second page, but the first page is good.
929 * Then the first page of the first mirror can be repaired by
930 * taking the first page of the second mirror, and the
931 * second page of the second mirror can be repaired by
932 * copying the contents of the 2nd page of the 1st mirror.
933 * One more note: if the pages of one mirror contain I/O
934 * errors, the checksum cannot be verified. In order to get
935 * the best data for repairing, the first attempt is to find
936 * a mirror without I/O errors and with a validated checksum.
937 * Only if this is not possible, the pages are picked from
938 * mirrors with I/O errors without considering the checksum.
939 * If the latter is the case, at the end, the checksum of the
940 * repaired area is verified in order to correctly maintain
944 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
945 sizeof(*sblocks_for_recheck
),
947 if (!sblocks_for_recheck
) {
948 spin_lock(&sctx
->stat_lock
);
949 sctx
->stat
.malloc_errors
++;
950 sctx
->stat
.read_errors
++;
951 sctx
->stat
.uncorrectable_errors
++;
952 spin_unlock(&sctx
->stat_lock
);
953 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
957 /* setup the context, map the logical blocks and alloc the pages */
958 ret
= scrub_setup_recheck_block(sctx
, fs_info
, sblock_to_check
, length
,
959 logical
, sblocks_for_recheck
);
961 spin_lock(&sctx
->stat_lock
);
962 sctx
->stat
.read_errors
++;
963 sctx
->stat
.uncorrectable_errors
++;
964 spin_unlock(&sctx
->stat_lock
);
965 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
968 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
969 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
971 /* build and submit the bios for the failed mirror, check checksums */
972 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
973 csum
, generation
, sctx
->csum_size
, 1);
975 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
976 sblock_bad
->no_io_error_seen
) {
978 * the error disappeared after reading page by page, or
979 * the area was part of a huge bio and other parts of the
980 * bio caused I/O errors, or the block layer merged several
981 * read requests into one and the error is caused by a
982 * different bio (usually one of the two latter cases is
985 spin_lock(&sctx
->stat_lock
);
986 sctx
->stat
.unverified_errors
++;
987 sblock_to_check
->data_corrected
= 1;
988 spin_unlock(&sctx
->stat_lock
);
990 if (sctx
->is_dev_replace
)
991 scrub_write_block_to_dev_replace(sblock_bad
);
995 if (!sblock_bad
->no_io_error_seen
) {
996 spin_lock(&sctx
->stat_lock
);
997 sctx
->stat
.read_errors
++;
998 spin_unlock(&sctx
->stat_lock
);
999 if (__ratelimit(&_rs
))
1000 scrub_print_warning("i/o error", sblock_to_check
);
1001 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
1002 } else if (sblock_bad
->checksum_error
) {
1003 spin_lock(&sctx
->stat_lock
);
1004 sctx
->stat
.csum_errors
++;
1005 spin_unlock(&sctx
->stat_lock
);
1006 if (__ratelimit(&_rs
))
1007 scrub_print_warning("checksum error", sblock_to_check
);
1008 btrfs_dev_stat_inc_and_print(dev
,
1009 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1010 } else if (sblock_bad
->header_error
) {
1011 spin_lock(&sctx
->stat_lock
);
1012 sctx
->stat
.verify_errors
++;
1013 spin_unlock(&sctx
->stat_lock
);
1014 if (__ratelimit(&_rs
))
1015 scrub_print_warning("checksum/header error",
1017 if (sblock_bad
->generation_error
)
1018 btrfs_dev_stat_inc_and_print(dev
,
1019 BTRFS_DEV_STAT_GENERATION_ERRS
);
1021 btrfs_dev_stat_inc_and_print(dev
,
1022 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1025 if (sctx
->readonly
) {
1026 ASSERT(!sctx
->is_dev_replace
);
1030 if (!is_metadata
&& !have_csum
) {
1031 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
1034 WARN_ON(sctx
->is_dev_replace
);
1037 * !is_metadata and !have_csum, this means that the data
1038 * might not be COW'ed, that it might be modified
1039 * concurrently. The general strategy to work on the
1040 * commit root does not help in the case when COW is not
1043 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
1044 if (!fixup_nodatasum
)
1045 goto did_not_correct_error
;
1046 fixup_nodatasum
->sctx
= sctx
;
1047 fixup_nodatasum
->dev
= dev
;
1048 fixup_nodatasum
->logical
= logical
;
1049 fixup_nodatasum
->root
= fs_info
->extent_root
;
1050 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
1051 scrub_pending_trans_workers_inc(sctx
);
1052 btrfs_init_work(&fixup_nodatasum
->work
, btrfs_scrub_helper
,
1053 scrub_fixup_nodatasum
, NULL
, NULL
);
1054 btrfs_queue_work(fs_info
->scrub_workers
,
1055 &fixup_nodatasum
->work
);
1060 * now build and submit the bios for the other mirrors, check
1062 * First try to pick the mirror which is completely without I/O
1063 * errors and also does not have a checksum error.
1064 * If one is found, and if a checksum is present, the full block
1065 * that is known to contain an error is rewritten. Afterwards
1066 * the block is known to be corrected.
1067 * If a mirror is found which is completely correct, and no
1068 * checksum is present, only those pages are rewritten that had
1069 * an I/O error in the block to be repaired, since it cannot be
1070 * determined, which copy of the other pages is better (and it
1071 * could happen otherwise that a correct page would be
1072 * overwritten by a bad one).
1074 for (mirror_index
= 0;
1075 mirror_index
< BTRFS_MAX_MIRRORS
&&
1076 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1078 struct scrub_block
*sblock_other
;
1080 if (mirror_index
== failed_mirror_index
)
1082 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1084 /* build and submit the bios, check checksums */
1085 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1086 have_csum
, csum
, generation
,
1087 sctx
->csum_size
, 0);
1089 if (!sblock_other
->header_error
&&
1090 !sblock_other
->checksum_error
&&
1091 sblock_other
->no_io_error_seen
) {
1092 if (sctx
->is_dev_replace
) {
1093 scrub_write_block_to_dev_replace(sblock_other
);
1095 int force_write
= is_metadata
|| have_csum
;
1097 ret
= scrub_repair_block_from_good_copy(
1098 sblock_bad
, sblock_other
,
1102 goto corrected_error
;
1107 * for dev_replace, pick good pages and write to the target device.
1109 if (sctx
->is_dev_replace
) {
1111 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1116 for (mirror_index
= 0;
1117 mirror_index
< BTRFS_MAX_MIRRORS
&&
1118 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1120 struct scrub_block
*sblock_other
=
1121 sblocks_for_recheck
+ mirror_index
;
1122 struct scrub_page
*page_other
=
1123 sblock_other
->pagev
[page_num
];
1125 if (!page_other
->io_error
) {
1126 ret
= scrub_write_page_to_dev_replace(
1127 sblock_other
, page_num
);
1129 /* succeeded for this page */
1133 btrfs_dev_replace_stats_inc(
1135 fs_info
->dev_replace
.
1143 * did not find a mirror to fetch the page
1144 * from. scrub_write_page_to_dev_replace()
1145 * handles this case (page->io_error), by
1146 * filling the block with zeros before
1147 * submitting the write request
1150 ret
= scrub_write_page_to_dev_replace(
1151 sblock_bad
, page_num
);
1153 btrfs_dev_replace_stats_inc(
1154 &sctx
->dev_root
->fs_info
->
1155 dev_replace
.num_write_errors
);
1163 * for regular scrub, repair those pages that are errored.
1164 * In case of I/O errors in the area that is supposed to be
1165 * repaired, continue by picking good copies of those pages.
1166 * Select the good pages from mirrors to rewrite bad pages from
1167 * the area to fix. Afterwards verify the checksum of the block
1168 * that is supposed to be repaired. This verification step is
1169 * only done for the purpose of statistic counting and for the
1170 * final scrub report, whether errors remain.
1171 * A perfect algorithm could make use of the checksum and try
1172 * all possible combinations of pages from the different mirrors
1173 * until the checksum verification succeeds. For example, when
1174 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1175 * of mirror #2 is readable but the final checksum test fails,
1176 * then the 2nd page of mirror #3 could be tried, whether now
1177 * the final checksum succeedes. But this would be a rare
1178 * exception and is therefore not implemented. At least it is
1179 * avoided that the good copy is overwritten.
1180 * A more useful improvement would be to pick the sectors
1181 * without I/O error based on sector sizes (512 bytes on legacy
1182 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1183 * mirror could be repaired by taking 512 byte of a different
1184 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1185 * area are unreadable.
1188 /* can only fix I/O errors from here on */
1189 if (sblock_bad
->no_io_error_seen
)
1190 goto did_not_correct_error
;
1193 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1194 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1196 if (!page_bad
->io_error
)
1199 for (mirror_index
= 0;
1200 mirror_index
< BTRFS_MAX_MIRRORS
&&
1201 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1203 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
1205 struct scrub_page
*page_other
= sblock_other
->pagev
[
1208 if (!page_other
->io_error
) {
1209 ret
= scrub_repair_page_from_good_copy(
1210 sblock_bad
, sblock_other
, page_num
, 0);
1212 page_bad
->io_error
= 0;
1213 break; /* succeeded for this page */
1218 if (page_bad
->io_error
) {
1219 /* did not find a mirror to copy the page from */
1225 if (is_metadata
|| have_csum
) {
1227 * need to verify the checksum now that all
1228 * sectors on disk are repaired (the write
1229 * request for data to be repaired is on its way).
1230 * Just be lazy and use scrub_recheck_block()
1231 * which re-reads the data before the checksum
1232 * is verified, but most likely the data comes out
1233 * of the page cache.
1235 scrub_recheck_block(fs_info
, sblock_bad
,
1236 is_metadata
, have_csum
, csum
,
1237 generation
, sctx
->csum_size
, 1);
1238 if (!sblock_bad
->header_error
&&
1239 !sblock_bad
->checksum_error
&&
1240 sblock_bad
->no_io_error_seen
)
1241 goto corrected_error
;
1243 goto did_not_correct_error
;
1246 spin_lock(&sctx
->stat_lock
);
1247 sctx
->stat
.corrected_errors
++;
1248 sblock_to_check
->data_corrected
= 1;
1249 spin_unlock(&sctx
->stat_lock
);
1250 printk_ratelimited_in_rcu(KERN_ERR
1251 "BTRFS: fixed up error at logical %llu on dev %s\n",
1252 logical
, rcu_str_deref(dev
->name
));
1255 did_not_correct_error
:
1256 spin_lock(&sctx
->stat_lock
);
1257 sctx
->stat
.uncorrectable_errors
++;
1258 spin_unlock(&sctx
->stat_lock
);
1259 printk_ratelimited_in_rcu(KERN_ERR
1260 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1261 logical
, rcu_str_deref(dev
->name
));
1265 if (sblocks_for_recheck
) {
1266 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1268 struct scrub_block
*sblock
= sblocks_for_recheck
+
1270 struct scrub_recover
*recover
;
1273 for (page_index
= 0; page_index
< sblock
->page_count
;
1275 sblock
->pagev
[page_index
]->sblock
= NULL
;
1276 recover
= sblock
->pagev
[page_index
]->recover
;
1278 scrub_put_recover(recover
);
1279 sblock
->pagev
[page_index
]->recover
=
1282 scrub_page_put(sblock
->pagev
[page_index
]);
1285 kfree(sblocks_for_recheck
);
1291 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
, u64
*raid_map
)
1294 if (raid_map
[bbio
->num_stripes
- 1] == RAID6_Q_STRIPE
)
1299 return (int)bbio
->num_stripes
;
1303 static inline void scrub_stripe_index_and_offset(u64 logical
, u64
*raid_map
,
1305 int nstripes
, int mirror
,
1313 for (i
= 0; i
< nstripes
; i
++) {
1314 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1315 raid_map
[i
] == RAID5_P_STRIPE
)
1318 if (logical
>= raid_map
[i
] &&
1319 logical
< raid_map
[i
] + mapped_length
)
1324 *stripe_offset
= logical
- raid_map
[i
];
1326 /* The other RAID type */
1327 *stripe_index
= mirror
;
1332 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1333 struct btrfs_fs_info
*fs_info
,
1334 struct scrub_block
*original_sblock
,
1335 u64 length
, u64 logical
,
1336 struct scrub_block
*sblocks_for_recheck
)
1338 struct scrub_recover
*recover
;
1339 struct btrfs_bio
*bbio
;
1351 * note: the two members ref_count and outstanding_pages
1352 * are not used (and not set) in the blocks that are used for
1353 * the recheck procedure
1357 while (length
> 0) {
1358 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1359 mapped_length
= sublen
;
1364 * with a length of PAGE_SIZE, each returned stripe
1365 * represents one mirror
1367 ret
= btrfs_map_sblock(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1368 &mapped_length
, &bbio
, 0, &raid_map
);
1369 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1375 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1382 atomic_set(&recover
->refs
, 1);
1383 recover
->bbio
= bbio
;
1384 recover
->raid_map
= raid_map
;
1385 recover
->map_length
= mapped_length
;
1387 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1389 nmirrors
= scrub_nr_raid_mirrors(bbio
, raid_map
);
1390 for (mirror_index
= 0; mirror_index
< nmirrors
;
1392 struct scrub_block
*sblock
;
1393 struct scrub_page
*page
;
1395 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1398 sblock
= sblocks_for_recheck
+ mirror_index
;
1399 sblock
->sctx
= sctx
;
1400 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1403 spin_lock(&sctx
->stat_lock
);
1404 sctx
->stat
.malloc_errors
++;
1405 spin_unlock(&sctx
->stat_lock
);
1406 scrub_put_recover(recover
);
1409 scrub_page_get(page
);
1410 sblock
->pagev
[page_index
] = page
;
1411 page
->logical
= logical
;
1413 scrub_stripe_index_and_offset(logical
, raid_map
,
1419 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1421 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1423 BUG_ON(page_index
>= original_sblock
->page_count
);
1424 page
->physical_for_dev_replace
=
1425 original_sblock
->pagev
[page_index
]->
1426 physical_for_dev_replace
;
1427 /* for missing devices, dev->bdev is NULL */
1428 page
->mirror_num
= mirror_index
+ 1;
1429 sblock
->page_count
++;
1430 page
->page
= alloc_page(GFP_NOFS
);
1434 scrub_get_recover(recover
);
1435 page
->recover
= recover
;
1437 scrub_put_recover(recover
);
1446 struct scrub_bio_ret
{
1447 struct completion event
;
1451 static void scrub_bio_wait_endio(struct bio
*bio
, int error
)
1453 struct scrub_bio_ret
*ret
= bio
->bi_private
;
1456 complete(&ret
->event
);
1459 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
1461 return page
->recover
&& page
->recover
->raid_map
;
1464 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1466 struct scrub_page
*page
)
1468 struct scrub_bio_ret done
;
1471 init_completion(&done
.event
);
1473 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1474 bio
->bi_private
= &done
;
1475 bio
->bi_end_io
= scrub_bio_wait_endio
;
1477 ret
= raid56_parity_recover(fs_info
->fs_root
, bio
, page
->recover
->bbio
,
1478 page
->recover
->raid_map
,
1479 page
->recover
->map_length
,
1480 page
->mirror_num
, 0);
1484 wait_for_completion(&done
.event
);
1492 * this function will check the on disk data for checksum errors, header
1493 * errors and read I/O errors. If any I/O errors happen, the exact pages
1494 * which are errored are marked as being bad. The goal is to enable scrub
1495 * to take those pages that are not errored from all the mirrors so that
1496 * the pages that are errored in the just handled mirror can be repaired.
1498 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1499 struct scrub_block
*sblock
, int is_metadata
,
1500 int have_csum
, u8
*csum
, u64 generation
,
1501 u16 csum_size
, int retry_failed_mirror
)
1505 sblock
->no_io_error_seen
= 1;
1506 sblock
->header_error
= 0;
1507 sblock
->checksum_error
= 0;
1509 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1511 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1513 if (page
->dev
->bdev
== NULL
) {
1515 sblock
->no_io_error_seen
= 0;
1519 WARN_ON(!page
->page
);
1520 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1523 sblock
->no_io_error_seen
= 0;
1526 bio
->bi_bdev
= page
->dev
->bdev
;
1528 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1529 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(page
)) {
1530 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, page
))
1531 sblock
->no_io_error_seen
= 0;
1533 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1535 if (btrfsic_submit_bio_wait(READ
, bio
))
1536 sblock
->no_io_error_seen
= 0;
1542 if (sblock
->no_io_error_seen
)
1543 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1544 have_csum
, csum
, generation
,
1550 static inline int scrub_check_fsid(u8 fsid
[],
1551 struct scrub_page
*spage
)
1553 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1556 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1560 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1561 struct scrub_block
*sblock
,
1562 int is_metadata
, int have_csum
,
1563 const u8
*csum
, u64 generation
,
1567 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1569 void *mapped_buffer
;
1571 WARN_ON(!sblock
->pagev
[0]->page
);
1573 struct btrfs_header
*h
;
1575 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1576 h
= (struct btrfs_header
*)mapped_buffer
;
1578 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1579 !scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]) ||
1580 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1582 sblock
->header_error
= 1;
1583 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1584 sblock
->header_error
= 1;
1585 sblock
->generation_error
= 1;
1592 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1595 for (page_num
= 0;;) {
1596 if (page_num
== 0 && is_metadata
)
1597 crc
= btrfs_csum_data(
1598 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1599 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1601 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1603 kunmap_atomic(mapped_buffer
);
1605 if (page_num
>= sblock
->page_count
)
1607 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1609 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1612 btrfs_csum_final(crc
, calculated_csum
);
1613 if (memcmp(calculated_csum
, csum
, csum_size
))
1614 sblock
->checksum_error
= 1;
1617 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1618 struct scrub_block
*sblock_good
,
1624 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1627 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1638 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1639 struct scrub_block
*sblock_good
,
1640 int page_num
, int force_write
)
1642 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1643 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1645 BUG_ON(page_bad
->page
== NULL
);
1646 BUG_ON(page_good
->page
== NULL
);
1647 if (force_write
|| sblock_bad
->header_error
||
1648 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1652 if (!page_bad
->dev
->bdev
) {
1653 printk_ratelimited(KERN_WARNING
"BTRFS: "
1654 "scrub_repair_page_from_good_copy(bdev == NULL) "
1655 "is unexpected!\n");
1659 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1662 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1663 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1665 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1666 if (PAGE_SIZE
!= ret
) {
1671 if (btrfsic_submit_bio_wait(WRITE
, bio
)) {
1672 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1673 BTRFS_DEV_STAT_WRITE_ERRS
);
1674 btrfs_dev_replace_stats_inc(
1675 &sblock_bad
->sctx
->dev_root
->fs_info
->
1676 dev_replace
.num_write_errors
);
1686 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1691 * This block is used for the check of the parity on the source device,
1692 * so the data needn't be written into the destination device.
1694 if (sblock
->sparity
)
1697 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1700 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1702 btrfs_dev_replace_stats_inc(
1703 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1708 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1711 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1713 BUG_ON(spage
->page
== NULL
);
1714 if (spage
->io_error
) {
1715 void *mapped_buffer
= kmap_atomic(spage
->page
);
1717 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1718 flush_dcache_page(spage
->page
);
1719 kunmap_atomic(mapped_buffer
);
1721 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1724 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1725 struct scrub_page
*spage
)
1727 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1728 struct scrub_bio
*sbio
;
1731 mutex_lock(&wr_ctx
->wr_lock
);
1733 if (!wr_ctx
->wr_curr_bio
) {
1734 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1736 if (!wr_ctx
->wr_curr_bio
) {
1737 mutex_unlock(&wr_ctx
->wr_lock
);
1740 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1741 wr_ctx
->wr_curr_bio
->page_count
= 0;
1743 sbio
= wr_ctx
->wr_curr_bio
;
1744 if (sbio
->page_count
== 0) {
1747 sbio
->physical
= spage
->physical_for_dev_replace
;
1748 sbio
->logical
= spage
->logical
;
1749 sbio
->dev
= wr_ctx
->tgtdev
;
1752 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1754 mutex_unlock(&wr_ctx
->wr_lock
);
1760 bio
->bi_private
= sbio
;
1761 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1762 bio
->bi_bdev
= sbio
->dev
->bdev
;
1763 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1765 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1766 spage
->physical_for_dev_replace
||
1767 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1769 scrub_wr_submit(sctx
);
1773 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1774 if (ret
!= PAGE_SIZE
) {
1775 if (sbio
->page_count
< 1) {
1778 mutex_unlock(&wr_ctx
->wr_lock
);
1781 scrub_wr_submit(sctx
);
1785 sbio
->pagev
[sbio
->page_count
] = spage
;
1786 scrub_page_get(spage
);
1788 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1789 scrub_wr_submit(sctx
);
1790 mutex_unlock(&wr_ctx
->wr_lock
);
1795 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1797 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1798 struct scrub_bio
*sbio
;
1800 if (!wr_ctx
->wr_curr_bio
)
1803 sbio
= wr_ctx
->wr_curr_bio
;
1804 wr_ctx
->wr_curr_bio
= NULL
;
1805 WARN_ON(!sbio
->bio
->bi_bdev
);
1806 scrub_pending_bio_inc(sctx
);
1807 /* process all writes in a single worker thread. Then the block layer
1808 * orders the requests before sending them to the driver which
1809 * doubled the write performance on spinning disks when measured
1811 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1814 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1816 struct scrub_bio
*sbio
= bio
->bi_private
;
1817 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1822 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1823 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1824 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1827 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1829 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1830 struct scrub_ctx
*sctx
= sbio
->sctx
;
1833 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1835 struct btrfs_dev_replace
*dev_replace
=
1836 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1838 for (i
= 0; i
< sbio
->page_count
; i
++) {
1839 struct scrub_page
*spage
= sbio
->pagev
[i
];
1841 spage
->io_error
= 1;
1842 btrfs_dev_replace_stats_inc(&dev_replace
->
1847 for (i
= 0; i
< sbio
->page_count
; i
++)
1848 scrub_page_put(sbio
->pagev
[i
]);
1852 scrub_pending_bio_dec(sctx
);
1855 static int scrub_checksum(struct scrub_block
*sblock
)
1860 WARN_ON(sblock
->page_count
< 1);
1861 flags
= sblock
->pagev
[0]->flags
;
1863 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1864 ret
= scrub_checksum_data(sblock
);
1865 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1866 ret
= scrub_checksum_tree_block(sblock
);
1867 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1868 (void)scrub_checksum_super(sblock
);
1872 scrub_handle_errored_block(sblock
);
1877 static int scrub_checksum_data(struct scrub_block
*sblock
)
1879 struct scrub_ctx
*sctx
= sblock
->sctx
;
1880 u8 csum
[BTRFS_CSUM_SIZE
];
1889 BUG_ON(sblock
->page_count
< 1);
1890 if (!sblock
->pagev
[0]->have_csum
)
1893 on_disk_csum
= sblock
->pagev
[0]->csum
;
1894 page
= sblock
->pagev
[0]->page
;
1895 buffer
= kmap_atomic(page
);
1897 len
= sctx
->sectorsize
;
1900 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1902 crc
= btrfs_csum_data(buffer
, crc
, l
);
1903 kunmap_atomic(buffer
);
1908 BUG_ON(index
>= sblock
->page_count
);
1909 BUG_ON(!sblock
->pagev
[index
]->page
);
1910 page
= sblock
->pagev
[index
]->page
;
1911 buffer
= kmap_atomic(page
);
1914 btrfs_csum_final(crc
, csum
);
1915 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1921 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1923 struct scrub_ctx
*sctx
= sblock
->sctx
;
1924 struct btrfs_header
*h
;
1925 struct btrfs_root
*root
= sctx
->dev_root
;
1926 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1927 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1928 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1930 void *mapped_buffer
;
1939 BUG_ON(sblock
->page_count
< 1);
1940 page
= sblock
->pagev
[0]->page
;
1941 mapped_buffer
= kmap_atomic(page
);
1942 h
= (struct btrfs_header
*)mapped_buffer
;
1943 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1946 * we don't use the getter functions here, as we
1947 * a) don't have an extent buffer and
1948 * b) the page is already kmapped
1951 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1954 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1957 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1960 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1964 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1965 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1966 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1969 u64 l
= min_t(u64
, len
, mapped_size
);
1971 crc
= btrfs_csum_data(p
, crc
, l
);
1972 kunmap_atomic(mapped_buffer
);
1977 BUG_ON(index
>= sblock
->page_count
);
1978 BUG_ON(!sblock
->pagev
[index
]->page
);
1979 page
= sblock
->pagev
[index
]->page
;
1980 mapped_buffer
= kmap_atomic(page
);
1981 mapped_size
= PAGE_SIZE
;
1985 btrfs_csum_final(crc
, calculated_csum
);
1986 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1989 return fail
|| crc_fail
;
1992 static int scrub_checksum_super(struct scrub_block
*sblock
)
1994 struct btrfs_super_block
*s
;
1995 struct scrub_ctx
*sctx
= sblock
->sctx
;
1996 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1997 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1999 void *mapped_buffer
;
2008 BUG_ON(sblock
->page_count
< 1);
2009 page
= sblock
->pagev
[0]->page
;
2010 mapped_buffer
= kmap_atomic(page
);
2011 s
= (struct btrfs_super_block
*)mapped_buffer
;
2012 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
2014 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
2017 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
2020 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
2023 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
2024 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
2025 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
2028 u64 l
= min_t(u64
, len
, mapped_size
);
2030 crc
= btrfs_csum_data(p
, crc
, l
);
2031 kunmap_atomic(mapped_buffer
);
2036 BUG_ON(index
>= sblock
->page_count
);
2037 BUG_ON(!sblock
->pagev
[index
]->page
);
2038 page
= sblock
->pagev
[index
]->page
;
2039 mapped_buffer
= kmap_atomic(page
);
2040 mapped_size
= PAGE_SIZE
;
2044 btrfs_csum_final(crc
, calculated_csum
);
2045 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
2048 if (fail_cor
+ fail_gen
) {
2050 * if we find an error in a super block, we just report it.
2051 * They will get written with the next transaction commit
2054 spin_lock(&sctx
->stat_lock
);
2055 ++sctx
->stat
.super_errors
;
2056 spin_unlock(&sctx
->stat_lock
);
2058 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2059 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
2061 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2062 BTRFS_DEV_STAT_GENERATION_ERRS
);
2065 return fail_cor
+ fail_gen
;
2068 static void scrub_block_get(struct scrub_block
*sblock
)
2070 atomic_inc(&sblock
->ref_count
);
2073 static void scrub_block_put(struct scrub_block
*sblock
)
2075 if (atomic_dec_and_test(&sblock
->ref_count
)) {
2078 if (sblock
->sparity
)
2079 scrub_parity_put(sblock
->sparity
);
2081 for (i
= 0; i
< sblock
->page_count
; i
++)
2082 scrub_page_put(sblock
->pagev
[i
]);
2087 static void scrub_page_get(struct scrub_page
*spage
)
2089 atomic_inc(&spage
->ref_count
);
2092 static void scrub_page_put(struct scrub_page
*spage
)
2094 if (atomic_dec_and_test(&spage
->ref_count
)) {
2096 __free_page(spage
->page
);
2101 static void scrub_submit(struct scrub_ctx
*sctx
)
2103 struct scrub_bio
*sbio
;
2105 if (sctx
->curr
== -1)
2108 sbio
= sctx
->bios
[sctx
->curr
];
2110 scrub_pending_bio_inc(sctx
);
2112 if (!sbio
->bio
->bi_bdev
) {
2114 * this case should not happen. If btrfs_map_block() is
2115 * wrong, it could happen for dev-replace operations on
2116 * missing devices when no mirrors are available, but in
2117 * this case it should already fail the mount.
2118 * This case is handled correctly (but _very_ slowly).
2120 printk_ratelimited(KERN_WARNING
2121 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2122 bio_endio(sbio
->bio
, -EIO
);
2124 btrfsic_submit_bio(READ
, sbio
->bio
);
2128 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2129 struct scrub_page
*spage
)
2131 struct scrub_block
*sblock
= spage
->sblock
;
2132 struct scrub_bio
*sbio
;
2137 * grab a fresh bio or wait for one to become available
2139 while (sctx
->curr
== -1) {
2140 spin_lock(&sctx
->list_lock
);
2141 sctx
->curr
= sctx
->first_free
;
2142 if (sctx
->curr
!= -1) {
2143 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2144 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2145 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2146 spin_unlock(&sctx
->list_lock
);
2148 spin_unlock(&sctx
->list_lock
);
2149 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2152 sbio
= sctx
->bios
[sctx
->curr
];
2153 if (sbio
->page_count
== 0) {
2156 sbio
->physical
= spage
->physical
;
2157 sbio
->logical
= spage
->logical
;
2158 sbio
->dev
= spage
->dev
;
2161 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
2167 bio
->bi_private
= sbio
;
2168 bio
->bi_end_io
= scrub_bio_end_io
;
2169 bio
->bi_bdev
= sbio
->dev
->bdev
;
2170 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2172 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2174 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2176 sbio
->dev
!= spage
->dev
) {
2181 sbio
->pagev
[sbio
->page_count
] = spage
;
2182 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2183 if (ret
!= PAGE_SIZE
) {
2184 if (sbio
->page_count
< 1) {
2193 scrub_block_get(sblock
); /* one for the page added to the bio */
2194 atomic_inc(&sblock
->outstanding_pages
);
2196 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2202 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2203 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2204 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2205 u64 physical_for_dev_replace
)
2207 struct scrub_block
*sblock
;
2210 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2212 spin_lock(&sctx
->stat_lock
);
2213 sctx
->stat
.malloc_errors
++;
2214 spin_unlock(&sctx
->stat_lock
);
2218 /* one ref inside this function, plus one for each page added to
2220 atomic_set(&sblock
->ref_count
, 1);
2221 sblock
->sctx
= sctx
;
2222 sblock
->no_io_error_seen
= 1;
2224 for (index
= 0; len
> 0; index
++) {
2225 struct scrub_page
*spage
;
2226 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2228 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2231 spin_lock(&sctx
->stat_lock
);
2232 sctx
->stat
.malloc_errors
++;
2233 spin_unlock(&sctx
->stat_lock
);
2234 scrub_block_put(sblock
);
2237 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2238 scrub_page_get(spage
);
2239 sblock
->pagev
[index
] = spage
;
2240 spage
->sblock
= sblock
;
2242 spage
->flags
= flags
;
2243 spage
->generation
= gen
;
2244 spage
->logical
= logical
;
2245 spage
->physical
= physical
;
2246 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2247 spage
->mirror_num
= mirror_num
;
2249 spage
->have_csum
= 1;
2250 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2252 spage
->have_csum
= 0;
2254 sblock
->page_count
++;
2255 spage
->page
= alloc_page(GFP_NOFS
);
2261 physical_for_dev_replace
+= l
;
2264 WARN_ON(sblock
->page_count
== 0);
2265 for (index
= 0; index
< sblock
->page_count
; index
++) {
2266 struct scrub_page
*spage
= sblock
->pagev
[index
];
2269 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2271 scrub_block_put(sblock
);
2279 /* last one frees, either here or in bio completion for last page */
2280 scrub_block_put(sblock
);
2284 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2286 struct scrub_bio
*sbio
= bio
->bi_private
;
2287 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2292 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2295 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2297 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2298 struct scrub_ctx
*sctx
= sbio
->sctx
;
2301 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2303 for (i
= 0; i
< sbio
->page_count
; i
++) {
2304 struct scrub_page
*spage
= sbio
->pagev
[i
];
2306 spage
->io_error
= 1;
2307 spage
->sblock
->no_io_error_seen
= 0;
2311 /* now complete the scrub_block items that have all pages completed */
2312 for (i
= 0; i
< sbio
->page_count
; i
++) {
2313 struct scrub_page
*spage
= sbio
->pagev
[i
];
2314 struct scrub_block
*sblock
= spage
->sblock
;
2316 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2317 scrub_block_complete(sblock
);
2318 scrub_block_put(sblock
);
2323 spin_lock(&sctx
->list_lock
);
2324 sbio
->next_free
= sctx
->first_free
;
2325 sctx
->first_free
= sbio
->index
;
2326 spin_unlock(&sctx
->list_lock
);
2328 if (sctx
->is_dev_replace
&&
2329 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2330 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2331 scrub_wr_submit(sctx
);
2332 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2335 scrub_pending_bio_dec(sctx
);
2338 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2339 unsigned long *bitmap
,
2344 int sectorsize
= sparity
->sctx
->dev_root
->sectorsize
;
2346 if (len
>= sparity
->stripe_len
) {
2347 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2351 start
-= sparity
->logic_start
;
2352 offset
= (int)do_div(start
, sparity
->stripe_len
);
2353 offset
/= sectorsize
;
2354 nsectors
= (int)len
/ sectorsize
;
2356 if (offset
+ nsectors
<= sparity
->nsectors
) {
2357 bitmap_set(bitmap
, offset
, nsectors
);
2361 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2362 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2365 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2368 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2371 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2374 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2377 static void scrub_block_complete(struct scrub_block
*sblock
)
2381 if (!sblock
->no_io_error_seen
) {
2383 scrub_handle_errored_block(sblock
);
2386 * if has checksum error, write via repair mechanism in
2387 * dev replace case, otherwise write here in dev replace
2390 corrupted
= scrub_checksum(sblock
);
2391 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2392 scrub_write_block_to_dev_replace(sblock
);
2395 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2396 u64 start
= sblock
->pagev
[0]->logical
;
2397 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2400 scrub_parity_mark_sectors_error(sblock
->sparity
,
2401 start
, end
- start
);
2405 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2408 struct btrfs_ordered_sum
*sum
= NULL
;
2409 unsigned long index
;
2410 unsigned long num_sectors
;
2412 while (!list_empty(&sctx
->csum_list
)) {
2413 sum
= list_first_entry(&sctx
->csum_list
,
2414 struct btrfs_ordered_sum
, list
);
2415 if (sum
->bytenr
> logical
)
2417 if (sum
->bytenr
+ sum
->len
> logical
)
2420 ++sctx
->stat
.csum_discards
;
2421 list_del(&sum
->list
);
2428 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2429 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2430 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2431 if (index
== num_sectors
- 1) {
2432 list_del(&sum
->list
);
2438 /* scrub extent tries to collect up to 64 kB for each bio */
2439 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2440 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2441 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2444 u8 csum
[BTRFS_CSUM_SIZE
];
2447 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2448 blocksize
= sctx
->sectorsize
;
2449 spin_lock(&sctx
->stat_lock
);
2450 sctx
->stat
.data_extents_scrubbed
++;
2451 sctx
->stat
.data_bytes_scrubbed
+= len
;
2452 spin_unlock(&sctx
->stat_lock
);
2453 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2454 blocksize
= sctx
->nodesize
;
2455 spin_lock(&sctx
->stat_lock
);
2456 sctx
->stat
.tree_extents_scrubbed
++;
2457 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2458 spin_unlock(&sctx
->stat_lock
);
2460 blocksize
= sctx
->sectorsize
;
2465 u64 l
= min_t(u64
, len
, blocksize
);
2468 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2469 /* push csums to sbio */
2470 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2472 ++sctx
->stat
.no_csum
;
2473 if (sctx
->is_dev_replace
&& !have_csum
) {
2474 ret
= copy_nocow_pages(sctx
, logical
, l
,
2476 physical_for_dev_replace
);
2477 goto behind_scrub_pages
;
2480 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2481 mirror_num
, have_csum
? csum
: NULL
, 0,
2482 physical_for_dev_replace
);
2489 physical_for_dev_replace
+= l
;
2494 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2495 u64 logical
, u64 len
,
2496 u64 physical
, struct btrfs_device
*dev
,
2497 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2499 struct scrub_ctx
*sctx
= sparity
->sctx
;
2500 struct scrub_block
*sblock
;
2503 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2505 spin_lock(&sctx
->stat_lock
);
2506 sctx
->stat
.malloc_errors
++;
2507 spin_unlock(&sctx
->stat_lock
);
2511 /* one ref inside this function, plus one for each page added to
2513 atomic_set(&sblock
->ref_count
, 1);
2514 sblock
->sctx
= sctx
;
2515 sblock
->no_io_error_seen
= 1;
2516 sblock
->sparity
= sparity
;
2517 scrub_parity_get(sparity
);
2519 for (index
= 0; len
> 0; index
++) {
2520 struct scrub_page
*spage
;
2521 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2523 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2526 spin_lock(&sctx
->stat_lock
);
2527 sctx
->stat
.malloc_errors
++;
2528 spin_unlock(&sctx
->stat_lock
);
2529 scrub_block_put(sblock
);
2532 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2533 /* For scrub block */
2534 scrub_page_get(spage
);
2535 sblock
->pagev
[index
] = spage
;
2536 /* For scrub parity */
2537 scrub_page_get(spage
);
2538 list_add_tail(&spage
->list
, &sparity
->spages
);
2539 spage
->sblock
= sblock
;
2541 spage
->flags
= flags
;
2542 spage
->generation
= gen
;
2543 spage
->logical
= logical
;
2544 spage
->physical
= physical
;
2545 spage
->mirror_num
= mirror_num
;
2547 spage
->have_csum
= 1;
2548 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2550 spage
->have_csum
= 0;
2552 sblock
->page_count
++;
2553 spage
->page
= alloc_page(GFP_NOFS
);
2561 WARN_ON(sblock
->page_count
== 0);
2562 for (index
= 0; index
< sblock
->page_count
; index
++) {
2563 struct scrub_page
*spage
= sblock
->pagev
[index
];
2566 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2568 scrub_block_put(sblock
);
2573 /* last one frees, either here or in bio completion for last page */
2574 scrub_block_put(sblock
);
2578 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2579 u64 logical
, u64 len
,
2580 u64 physical
, struct btrfs_device
*dev
,
2581 u64 flags
, u64 gen
, int mirror_num
)
2583 struct scrub_ctx
*sctx
= sparity
->sctx
;
2585 u8 csum
[BTRFS_CSUM_SIZE
];
2588 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2589 blocksize
= sctx
->sectorsize
;
2590 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2591 blocksize
= sctx
->nodesize
;
2593 blocksize
= sctx
->sectorsize
;
2598 u64 l
= min_t(u64
, len
, blocksize
);
2601 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2602 /* push csums to sbio */
2603 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2607 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2608 flags
, gen
, mirror_num
,
2609 have_csum
? csum
: NULL
);
2621 * Given a physical address, this will calculate it's
2622 * logical offset. if this is a parity stripe, it will return
2623 * the most left data stripe's logical offset.
2625 * return 0 if it is a data stripe, 1 means parity stripe.
2627 static int get_raid56_logic_offset(u64 physical
, int num
,
2628 struct map_lookup
*map
, u64
*offset
,
2638 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2639 nr_data_stripes(map
);
2641 *stripe_start
= last_offset
;
2643 *offset
= last_offset
;
2644 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2645 *offset
= last_offset
+ i
* map
->stripe_len
;
2647 stripe_nr
= *offset
;
2648 do_div(stripe_nr
, map
->stripe_len
);
2649 do_div(stripe_nr
, nr_data_stripes(map
));
2651 /* Work out the disk rotation on this stripe-set */
2652 rot
= do_div(stripe_nr
, map
->num_stripes
);
2653 /* calculate which stripe this data locates */
2655 stripe_index
= rot
% map
->num_stripes
;
2656 if (stripe_index
== num
)
2658 if (stripe_index
< num
)
2661 *offset
= last_offset
+ j
* map
->stripe_len
;
2665 static void scrub_free_parity(struct scrub_parity
*sparity
)
2667 struct scrub_ctx
*sctx
= sparity
->sctx
;
2668 struct scrub_page
*curr
, *next
;
2671 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2673 spin_lock(&sctx
->stat_lock
);
2674 sctx
->stat
.read_errors
+= nbits
;
2675 sctx
->stat
.uncorrectable_errors
+= nbits
;
2676 spin_unlock(&sctx
->stat_lock
);
2679 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2680 list_del_init(&curr
->list
);
2681 scrub_page_put(curr
);
2687 static void scrub_parity_bio_endio(struct bio
*bio
, int error
)
2689 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2690 struct scrub_ctx
*sctx
= sparity
->sctx
;
2693 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2696 scrub_free_parity(sparity
);
2697 scrub_pending_bio_dec(sctx
);
2701 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2703 struct scrub_ctx
*sctx
= sparity
->sctx
;
2705 struct btrfs_raid_bio
*rbio
;
2706 struct scrub_page
*spage
;
2707 struct btrfs_bio
*bbio
= NULL
;
2708 u64
*raid_map
= NULL
;
2712 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2716 length
= sparity
->logic_end
- sparity
->logic_start
+ 1;
2717 ret
= btrfs_map_sblock(sctx
->dev_root
->fs_info
, WRITE
,
2718 sparity
->logic_start
,
2719 &length
, &bbio
, 0, &raid_map
);
2720 if (ret
|| !bbio
|| !raid_map
)
2723 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 0);
2727 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2728 bio
->bi_private
= sparity
;
2729 bio
->bi_end_io
= scrub_parity_bio_endio
;
2731 rbio
= raid56_parity_alloc_scrub_rbio(sctx
->dev_root
, bio
, bbio
,
2739 list_for_each_entry(spage
, &sparity
->spages
, list
)
2740 raid56_parity_add_scrub_pages(rbio
, spage
->page
,
2743 scrub_pending_bio_inc(sctx
);
2744 raid56_parity_submit_scrub_rbio(rbio
);
2752 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2754 spin_lock(&sctx
->stat_lock
);
2755 sctx
->stat
.malloc_errors
++;
2756 spin_unlock(&sctx
->stat_lock
);
2758 scrub_free_parity(sparity
);
2761 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2763 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * (BITS_PER_LONG
/ 8);
2766 static void scrub_parity_get(struct scrub_parity
*sparity
)
2768 atomic_inc(&sparity
->ref_count
);
2771 static void scrub_parity_put(struct scrub_parity
*sparity
)
2773 if (!atomic_dec_and_test(&sparity
->ref_count
))
2776 scrub_parity_check_and_repair(sparity
);
2779 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2780 struct map_lookup
*map
,
2781 struct btrfs_device
*sdev
,
2782 struct btrfs_path
*path
,
2786 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2787 struct btrfs_root
*root
= fs_info
->extent_root
;
2788 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2789 struct btrfs_extent_item
*extent
;
2793 struct extent_buffer
*l
;
2794 struct btrfs_key key
;
2797 u64 extent_physical
;
2799 struct btrfs_device
*extent_dev
;
2800 struct scrub_parity
*sparity
;
2803 int extent_mirror_num
;
2806 nsectors
= map
->stripe_len
/ root
->sectorsize
;
2807 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2808 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2811 spin_lock(&sctx
->stat_lock
);
2812 sctx
->stat
.malloc_errors
++;
2813 spin_unlock(&sctx
->stat_lock
);
2817 sparity
->stripe_len
= map
->stripe_len
;
2818 sparity
->nsectors
= nsectors
;
2819 sparity
->sctx
= sctx
;
2820 sparity
->scrub_dev
= sdev
;
2821 sparity
->logic_start
= logic_start
;
2822 sparity
->logic_end
= logic_end
;
2823 atomic_set(&sparity
->ref_count
, 1);
2824 INIT_LIST_HEAD(&sparity
->spages
);
2825 sparity
->dbitmap
= sparity
->bitmap
;
2826 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2829 while (logic_start
< logic_end
) {
2830 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2831 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2833 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2834 key
.objectid
= logic_start
;
2835 key
.offset
= (u64
)-1;
2837 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2842 ret
= btrfs_previous_extent_item(root
, path
, 0);
2846 btrfs_release_path(path
);
2847 ret
= btrfs_search_slot(NULL
, root
, &key
,
2859 slot
= path
->slots
[0];
2860 if (slot
>= btrfs_header_nritems(l
)) {
2861 ret
= btrfs_next_leaf(root
, path
);
2870 btrfs_item_key_to_cpu(l
, &key
, slot
);
2872 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2873 bytes
= root
->nodesize
;
2877 if (key
.objectid
+ bytes
<= logic_start
)
2880 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2881 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2884 if (key
.objectid
> logic_end
) {
2889 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2890 logic_start
+= map
->stripe_len
;
2892 extent
= btrfs_item_ptr(l
, slot
,
2893 struct btrfs_extent_item
);
2894 flags
= btrfs_extent_flags(l
, extent
);
2895 generation
= btrfs_extent_generation(l
, extent
);
2897 if (key
.objectid
< logic_start
&&
2898 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2900 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2901 key
.objectid
, logic_start
);
2905 extent_logical
= key
.objectid
;
2908 if (extent_logical
< logic_start
) {
2909 extent_len
-= logic_start
- extent_logical
;
2910 extent_logical
= logic_start
;
2913 if (extent_logical
+ extent_len
>
2914 logic_start
+ map
->stripe_len
)
2915 extent_len
= logic_start
+ map
->stripe_len
-
2918 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2921 scrub_remap_extent(fs_info
, extent_logical
,
2922 extent_len
, &extent_physical
,
2924 &extent_mirror_num
);
2926 ret
= btrfs_lookup_csums_range(csum_root
,
2928 extent_logical
+ extent_len
- 1,
2929 &sctx
->csum_list
, 1);
2933 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2942 scrub_free_csums(sctx
);
2943 if (extent_logical
+ extent_len
<
2944 key
.objectid
+ bytes
) {
2945 logic_start
+= map
->stripe_len
;
2947 if (logic_start
>= logic_end
) {
2952 if (logic_start
< key
.objectid
+ bytes
) {
2961 btrfs_release_path(path
);
2966 logic_start
+= map
->stripe_len
;
2970 scrub_parity_mark_sectors_error(sparity
, logic_start
,
2971 logic_end
- logic_start
+ 1);
2972 scrub_parity_put(sparity
);
2974 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2975 scrub_wr_submit(sctx
);
2976 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2978 btrfs_release_path(path
);
2979 return ret
< 0 ? ret
: 0;
2982 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2983 struct map_lookup
*map
,
2984 struct btrfs_device
*scrub_dev
,
2985 int num
, u64 base
, u64 length
,
2988 struct btrfs_path
*path
, *ppath
;
2989 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2990 struct btrfs_root
*root
= fs_info
->extent_root
;
2991 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2992 struct btrfs_extent_item
*extent
;
2993 struct blk_plug plug
;
2998 struct extent_buffer
*l
;
2999 struct btrfs_key key
;
3006 struct reada_control
*reada1
;
3007 struct reada_control
*reada2
;
3008 struct btrfs_key key_start
;
3009 struct btrfs_key key_end
;
3010 u64 increment
= map
->stripe_len
;
3013 u64 extent_physical
;
3017 struct btrfs_device
*extent_dev
;
3018 int extent_mirror_num
;
3022 physical
= map
->stripes
[num
].physical
;
3024 do_div(nstripes
, map
->stripe_len
);
3025 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3026 offset
= map
->stripe_len
* num
;
3027 increment
= map
->stripe_len
* map
->num_stripes
;
3029 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3030 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3031 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3032 increment
= map
->stripe_len
* factor
;
3033 mirror_num
= num
% map
->sub_stripes
+ 1;
3034 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3035 increment
= map
->stripe_len
;
3036 mirror_num
= num
% map
->num_stripes
+ 1;
3037 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3038 increment
= map
->stripe_len
;
3039 mirror_num
= num
% map
->num_stripes
+ 1;
3040 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
3041 BTRFS_BLOCK_GROUP_RAID6
)) {
3042 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3043 increment
= map
->stripe_len
* nr_data_stripes(map
);
3046 increment
= map
->stripe_len
;
3050 path
= btrfs_alloc_path();
3054 ppath
= btrfs_alloc_path();
3056 btrfs_free_path(ppath
);
3061 * work on commit root. The related disk blocks are static as
3062 * long as COW is applied. This means, it is save to rewrite
3063 * them to repair disk errors without any race conditions
3065 path
->search_commit_root
= 1;
3066 path
->skip_locking
= 1;
3069 * trigger the readahead for extent tree csum tree and wait for
3070 * completion. During readahead, the scrub is officially paused
3071 * to not hold off transaction commits
3073 logical
= base
+ offset
;
3074 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3075 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
3076 BTRFS_BLOCK_GROUP_RAID6
)) {
3077 get_raid56_logic_offset(physical_end
, num
,
3078 map
, &logic_end
, NULL
);
3081 logic_end
= logical
+ increment
* nstripes
;
3083 wait_event(sctx
->list_wait
,
3084 atomic_read(&sctx
->bios_in_flight
) == 0);
3085 scrub_blocked_if_needed(fs_info
);
3087 /* FIXME it might be better to start readahead at commit root */
3088 key_start
.objectid
= logical
;
3089 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
3090 key_start
.offset
= (u64
)0;
3091 key_end
.objectid
= logic_end
;
3092 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3093 key_end
.offset
= (u64
)-1;
3094 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
3096 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3097 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
3098 key_start
.offset
= logical
;
3099 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3100 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3101 key_end
.offset
= logic_end
;
3102 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
3104 if (!IS_ERR(reada1
))
3105 btrfs_reada_wait(reada1
);
3106 if (!IS_ERR(reada2
))
3107 btrfs_reada_wait(reada2
);
3111 * collect all data csums for the stripe to avoid seeking during
3112 * the scrub. This might currently (crc32) end up to be about 1MB
3114 blk_start_plug(&plug
);
3117 * now find all extents for each stripe and scrub them
3120 while (physical
< physical_end
) {
3121 /* for raid56, we skip parity stripe */
3122 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
3123 BTRFS_BLOCK_GROUP_RAID6
)) {
3124 ret
= get_raid56_logic_offset(physical
, num
,
3125 map
, &logical
, &stripe_logical
);
3128 stripe_logical
+= base
;
3129 stripe_end
= stripe_logical
+ increment
- 1;
3130 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3131 ppath
, stripe_logical
,
3141 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3142 atomic_read(&sctx
->cancel_req
)) {
3147 * check to see if we have to pause
3149 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3150 /* push queued extents */
3151 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3153 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3154 scrub_wr_submit(sctx
);
3155 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3156 wait_event(sctx
->list_wait
,
3157 atomic_read(&sctx
->bios_in_flight
) == 0);
3158 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3159 scrub_blocked_if_needed(fs_info
);
3162 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3163 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3165 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3166 key
.objectid
= logical
;
3167 key
.offset
= (u64
)-1;
3169 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3174 ret
= btrfs_previous_extent_item(root
, path
, 0);
3178 /* there's no smaller item, so stick with the
3180 btrfs_release_path(path
);
3181 ret
= btrfs_search_slot(NULL
, root
, &key
,
3193 slot
= path
->slots
[0];
3194 if (slot
>= btrfs_header_nritems(l
)) {
3195 ret
= btrfs_next_leaf(root
, path
);
3204 btrfs_item_key_to_cpu(l
, &key
, slot
);
3206 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3207 bytes
= root
->nodesize
;
3211 if (key
.objectid
+ bytes
<= logical
)
3214 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3215 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3218 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3219 /* out of this device extent */
3220 if (key
.objectid
>= logic_end
)
3225 extent
= btrfs_item_ptr(l
, slot
,
3226 struct btrfs_extent_item
);
3227 flags
= btrfs_extent_flags(l
, extent
);
3228 generation
= btrfs_extent_generation(l
, extent
);
3230 if (key
.objectid
< logical
&&
3231 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
3233 "scrub: tree block %llu spanning "
3234 "stripes, ignored. logical=%llu",
3235 key
.objectid
, logical
);
3240 extent_logical
= key
.objectid
;
3244 * trim extent to this stripe
3246 if (extent_logical
< logical
) {
3247 extent_len
-= logical
- extent_logical
;
3248 extent_logical
= logical
;
3250 if (extent_logical
+ extent_len
>
3251 logical
+ map
->stripe_len
) {
3252 extent_len
= logical
+ map
->stripe_len
-
3256 extent_physical
= extent_logical
- logical
+ physical
;
3257 extent_dev
= scrub_dev
;
3258 extent_mirror_num
= mirror_num
;
3260 scrub_remap_extent(fs_info
, extent_logical
,
3261 extent_len
, &extent_physical
,
3263 &extent_mirror_num
);
3265 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
3266 logical
+ map
->stripe_len
- 1,
3267 &sctx
->csum_list
, 1);
3271 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
3272 extent_physical
, extent_dev
, flags
,
3273 generation
, extent_mirror_num
,
3274 extent_logical
- logical
+ physical
);
3278 scrub_free_csums(sctx
);
3279 if (extent_logical
+ extent_len
<
3280 key
.objectid
+ bytes
) {
3281 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
3282 BTRFS_BLOCK_GROUP_RAID6
)) {
3284 * loop until we find next data stripe
3285 * or we have finished all stripes.
3288 physical
+= map
->stripe_len
;
3289 ret
= get_raid56_logic_offset(physical
,
3294 if (ret
&& physical
< physical_end
) {
3295 stripe_logical
+= base
;
3296 stripe_end
= stripe_logical
+
3298 ret
= scrub_raid56_parity(sctx
,
3299 map
, scrub_dev
, ppath
,
3307 physical
+= map
->stripe_len
;
3308 logical
+= increment
;
3310 if (logical
< key
.objectid
+ bytes
) {
3315 if (physical
>= physical_end
) {
3323 btrfs_release_path(path
);
3325 logical
+= increment
;
3326 physical
+= map
->stripe_len
;
3327 spin_lock(&sctx
->stat_lock
);
3329 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3332 sctx
->stat
.last_physical
= physical
;
3333 spin_unlock(&sctx
->stat_lock
);
3338 /* push queued extents */
3340 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3341 scrub_wr_submit(sctx
);
3342 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3344 blk_finish_plug(&plug
);
3345 btrfs_free_path(path
);
3346 btrfs_free_path(ppath
);
3347 return ret
< 0 ? ret
: 0;
3350 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3351 struct btrfs_device
*scrub_dev
,
3352 u64 chunk_tree
, u64 chunk_objectid
,
3353 u64 chunk_offset
, u64 length
,
3354 u64 dev_offset
, int is_dev_replace
)
3356 struct btrfs_mapping_tree
*map_tree
=
3357 &sctx
->dev_root
->fs_info
->mapping_tree
;
3358 struct map_lookup
*map
;
3359 struct extent_map
*em
;
3363 read_lock(&map_tree
->map_tree
.lock
);
3364 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3365 read_unlock(&map_tree
->map_tree
.lock
);
3370 map
= (struct map_lookup
*)em
->bdev
;
3371 if (em
->start
!= chunk_offset
)
3374 if (em
->len
< length
)
3377 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3378 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3379 map
->stripes
[i
].physical
== dev_offset
) {
3380 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3381 chunk_offset
, length
,
3388 free_extent_map(em
);
3393 static noinline_for_stack
3394 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3395 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
3398 struct btrfs_dev_extent
*dev_extent
= NULL
;
3399 struct btrfs_path
*path
;
3400 struct btrfs_root
*root
= sctx
->dev_root
;
3401 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3408 struct extent_buffer
*l
;
3409 struct btrfs_key key
;
3410 struct btrfs_key found_key
;
3411 struct btrfs_block_group_cache
*cache
;
3412 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3414 path
= btrfs_alloc_path();
3419 path
->search_commit_root
= 1;
3420 path
->skip_locking
= 1;
3422 key
.objectid
= scrub_dev
->devid
;
3424 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3427 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3431 if (path
->slots
[0] >=
3432 btrfs_header_nritems(path
->nodes
[0])) {
3433 ret
= btrfs_next_leaf(root
, path
);
3440 slot
= path
->slots
[0];
3442 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3444 if (found_key
.objectid
!= scrub_dev
->devid
)
3447 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3450 if (found_key
.offset
>= end
)
3453 if (found_key
.offset
< key
.offset
)
3456 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3457 length
= btrfs_dev_extent_length(l
, dev_extent
);
3459 if (found_key
.offset
+ length
<= start
)
3462 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
3463 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
3464 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3467 * get a reference on the corresponding block group to prevent
3468 * the chunk from going away while we scrub it
3470 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3472 /* some chunks are removed but not committed to disk yet,
3473 * continue scrubbing */
3477 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3478 dev_replace
->cursor_left
= found_key
.offset
;
3479 dev_replace
->item_needs_writeback
= 1;
3480 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
3481 chunk_offset
, length
, found_key
.offset
,
3485 * flush, submit all pending read and write bios, afterwards
3487 * Note that in the dev replace case, a read request causes
3488 * write requests that are submitted in the read completion
3489 * worker. Therefore in the current situation, it is required
3490 * that all write requests are flushed, so that all read and
3491 * write requests are really completed when bios_in_flight
3494 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3496 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3497 scrub_wr_submit(sctx
);
3498 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3500 wait_event(sctx
->list_wait
,
3501 atomic_read(&sctx
->bios_in_flight
) == 0);
3502 atomic_inc(&fs_info
->scrubs_paused
);
3503 wake_up(&fs_info
->scrub_pause_wait
);
3506 * must be called before we decrease @scrub_paused.
3507 * make sure we don't block transaction commit while
3508 * we are waiting pending workers finished.
3510 wait_event(sctx
->list_wait
,
3511 atomic_read(&sctx
->workers_pending
) == 0);
3512 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3514 mutex_lock(&fs_info
->scrub_lock
);
3515 __scrub_blocked_if_needed(fs_info
);
3516 atomic_dec(&fs_info
->scrubs_paused
);
3517 mutex_unlock(&fs_info
->scrub_lock
);
3518 wake_up(&fs_info
->scrub_pause_wait
);
3520 btrfs_put_block_group(cache
);
3523 if (is_dev_replace
&&
3524 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3528 if (sctx
->stat
.malloc_errors
> 0) {
3533 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3534 dev_replace
->item_needs_writeback
= 1;
3536 key
.offset
= found_key
.offset
+ length
;
3537 btrfs_release_path(path
);
3540 btrfs_free_path(path
);
3543 * ret can still be 1 from search_slot or next_leaf,
3544 * that's not an error
3546 return ret
< 0 ? ret
: 0;
3549 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3550 struct btrfs_device
*scrub_dev
)
3556 struct btrfs_root
*root
= sctx
->dev_root
;
3558 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
3561 /* Seed devices of a new filesystem has their own generation. */
3562 if (scrub_dev
->fs_devices
!= root
->fs_info
->fs_devices
)
3563 gen
= scrub_dev
->generation
;
3565 gen
= root
->fs_info
->last_trans_committed
;
3567 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3568 bytenr
= btrfs_sb_offset(i
);
3569 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3570 scrub_dev
->commit_total_bytes
)
3573 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3574 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3579 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3585 * get a reference count on fs_info->scrub_workers. start worker if necessary
3587 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3591 int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3592 int max_active
= fs_info
->thread_pool_size
;
3594 if (fs_info
->scrub_workers_refcnt
== 0) {
3596 fs_info
->scrub_workers
=
3597 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3600 fs_info
->scrub_workers
=
3601 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3603 if (!fs_info
->scrub_workers
) {
3607 fs_info
->scrub_wr_completion_workers
=
3608 btrfs_alloc_workqueue("btrfs-scrubwrc", flags
,
3610 if (!fs_info
->scrub_wr_completion_workers
) {
3614 fs_info
->scrub_nocow_workers
=
3615 btrfs_alloc_workqueue("btrfs-scrubnc", flags
, 1, 0);
3616 if (!fs_info
->scrub_nocow_workers
) {
3621 ++fs_info
->scrub_workers_refcnt
;
3626 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
3628 if (--fs_info
->scrub_workers_refcnt
== 0) {
3629 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3630 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3631 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3633 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
3636 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3637 u64 end
, struct btrfs_scrub_progress
*progress
,
3638 int readonly
, int is_dev_replace
)
3640 struct scrub_ctx
*sctx
;
3642 struct btrfs_device
*dev
;
3643 struct rcu_string
*name
;
3645 if (btrfs_fs_closing(fs_info
))
3648 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
3650 * in this case scrub is unable to calculate the checksum
3651 * the way scrub is implemented. Do not handle this
3652 * situation at all because it won't ever happen.
3655 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3656 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
3660 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
3661 /* not supported for data w/o checksums */
3663 "scrub: size assumption sectorsize != PAGE_SIZE "
3664 "(%d != %lu) fails",
3665 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
3669 if (fs_info
->chunk_root
->nodesize
>
3670 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3671 fs_info
->chunk_root
->sectorsize
>
3672 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3674 * would exhaust the array bounds of pagev member in
3675 * struct scrub_block
3677 btrfs_err(fs_info
, "scrub: size assumption nodesize and sectorsize "
3678 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3679 fs_info
->chunk_root
->nodesize
,
3680 SCRUB_MAX_PAGES_PER_BLOCK
,
3681 fs_info
->chunk_root
->sectorsize
,
3682 SCRUB_MAX_PAGES_PER_BLOCK
);
3687 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3688 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3689 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
3690 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3694 if (!is_dev_replace
&& !readonly
&& !dev
->writeable
) {
3695 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3697 name
= rcu_dereference(dev
->name
);
3698 btrfs_err(fs_info
, "scrub: device %s is not writable",
3704 mutex_lock(&fs_info
->scrub_lock
);
3705 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
3706 mutex_unlock(&fs_info
->scrub_lock
);
3707 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3711 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
3712 if (dev
->scrub_device
||
3714 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3715 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3716 mutex_unlock(&fs_info
->scrub_lock
);
3717 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3718 return -EINPROGRESS
;
3720 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3722 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3724 mutex_unlock(&fs_info
->scrub_lock
);
3725 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3729 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
3731 mutex_unlock(&fs_info
->scrub_lock
);
3732 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3733 scrub_workers_put(fs_info
);
3734 return PTR_ERR(sctx
);
3736 sctx
->readonly
= readonly
;
3737 dev
->scrub_device
= sctx
;
3738 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3741 * checking @scrub_pause_req here, we can avoid
3742 * race between committing transaction and scrubbing.
3744 __scrub_blocked_if_needed(fs_info
);
3745 atomic_inc(&fs_info
->scrubs_running
);
3746 mutex_unlock(&fs_info
->scrub_lock
);
3748 if (!is_dev_replace
) {
3750 * by holding device list mutex, we can
3751 * kick off writing super in log tree sync.
3753 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3754 ret
= scrub_supers(sctx
, dev
);
3755 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3759 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3762 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3763 atomic_dec(&fs_info
->scrubs_running
);
3764 wake_up(&fs_info
->scrub_pause_wait
);
3766 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3769 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3771 mutex_lock(&fs_info
->scrub_lock
);
3772 dev
->scrub_device
= NULL
;
3773 scrub_workers_put(fs_info
);
3774 mutex_unlock(&fs_info
->scrub_lock
);
3776 scrub_free_ctx(sctx
);
3781 void btrfs_scrub_pause(struct btrfs_root
*root
)
3783 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3785 mutex_lock(&fs_info
->scrub_lock
);
3786 atomic_inc(&fs_info
->scrub_pause_req
);
3787 while (atomic_read(&fs_info
->scrubs_paused
) !=
3788 atomic_read(&fs_info
->scrubs_running
)) {
3789 mutex_unlock(&fs_info
->scrub_lock
);
3790 wait_event(fs_info
->scrub_pause_wait
,
3791 atomic_read(&fs_info
->scrubs_paused
) ==
3792 atomic_read(&fs_info
->scrubs_running
));
3793 mutex_lock(&fs_info
->scrub_lock
);
3795 mutex_unlock(&fs_info
->scrub_lock
);
3798 void btrfs_scrub_continue(struct btrfs_root
*root
)
3800 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3802 atomic_dec(&fs_info
->scrub_pause_req
);
3803 wake_up(&fs_info
->scrub_pause_wait
);
3806 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3808 mutex_lock(&fs_info
->scrub_lock
);
3809 if (!atomic_read(&fs_info
->scrubs_running
)) {
3810 mutex_unlock(&fs_info
->scrub_lock
);
3814 atomic_inc(&fs_info
->scrub_cancel_req
);
3815 while (atomic_read(&fs_info
->scrubs_running
)) {
3816 mutex_unlock(&fs_info
->scrub_lock
);
3817 wait_event(fs_info
->scrub_pause_wait
,
3818 atomic_read(&fs_info
->scrubs_running
) == 0);
3819 mutex_lock(&fs_info
->scrub_lock
);
3821 atomic_dec(&fs_info
->scrub_cancel_req
);
3822 mutex_unlock(&fs_info
->scrub_lock
);
3827 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3828 struct btrfs_device
*dev
)
3830 struct scrub_ctx
*sctx
;
3832 mutex_lock(&fs_info
->scrub_lock
);
3833 sctx
= dev
->scrub_device
;
3835 mutex_unlock(&fs_info
->scrub_lock
);
3838 atomic_inc(&sctx
->cancel_req
);
3839 while (dev
->scrub_device
) {
3840 mutex_unlock(&fs_info
->scrub_lock
);
3841 wait_event(fs_info
->scrub_pause_wait
,
3842 dev
->scrub_device
== NULL
);
3843 mutex_lock(&fs_info
->scrub_lock
);
3845 mutex_unlock(&fs_info
->scrub_lock
);
3850 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3851 struct btrfs_scrub_progress
*progress
)
3853 struct btrfs_device
*dev
;
3854 struct scrub_ctx
*sctx
= NULL
;
3856 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3857 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3859 sctx
= dev
->scrub_device
;
3861 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3862 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3864 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3867 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3868 u64 extent_logical
, u64 extent_len
,
3869 u64
*extent_physical
,
3870 struct btrfs_device
**extent_dev
,
3871 int *extent_mirror_num
)
3874 struct btrfs_bio
*bbio
= NULL
;
3877 mapped_length
= extent_len
;
3878 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3879 &mapped_length
, &bbio
, 0);
3880 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3881 !bbio
->stripes
[0].dev
->bdev
) {
3886 *extent_physical
= bbio
->stripes
[0].physical
;
3887 *extent_mirror_num
= bbio
->mirror_num
;
3888 *extent_dev
= bbio
->stripes
[0].dev
;
3892 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3893 struct scrub_wr_ctx
*wr_ctx
,
3894 struct btrfs_fs_info
*fs_info
,
3895 struct btrfs_device
*dev
,
3898 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3900 mutex_init(&wr_ctx
->wr_lock
);
3901 wr_ctx
->wr_curr_bio
= NULL
;
3902 if (!is_dev_replace
)
3905 WARN_ON(!dev
->bdev
);
3906 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3907 bio_get_nr_vecs(dev
->bdev
));
3908 wr_ctx
->tgtdev
= dev
;
3909 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3913 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3915 mutex_lock(&wr_ctx
->wr_lock
);
3916 kfree(wr_ctx
->wr_curr_bio
);
3917 wr_ctx
->wr_curr_bio
= NULL
;
3918 mutex_unlock(&wr_ctx
->wr_lock
);
3921 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3922 int mirror_num
, u64 physical_for_dev_replace
)
3924 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3925 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3927 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3929 spin_lock(&sctx
->stat_lock
);
3930 sctx
->stat
.malloc_errors
++;
3931 spin_unlock(&sctx
->stat_lock
);
3935 scrub_pending_trans_workers_inc(sctx
);
3937 nocow_ctx
->sctx
= sctx
;
3938 nocow_ctx
->logical
= logical
;
3939 nocow_ctx
->len
= len
;
3940 nocow_ctx
->mirror_num
= mirror_num
;
3941 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3942 btrfs_init_work(&nocow_ctx
->work
, btrfs_scrubnc_helper
,
3943 copy_nocow_pages_worker
, NULL
, NULL
);
3944 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
3945 btrfs_queue_work(fs_info
->scrub_nocow_workers
,
3951 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3953 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3954 struct scrub_nocow_inode
*nocow_inode
;
3956 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
3959 nocow_inode
->inum
= inum
;
3960 nocow_inode
->offset
= offset
;
3961 nocow_inode
->root
= root
;
3962 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
3966 #define COPY_COMPLETE 1
3968 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3970 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3971 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3972 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3973 u64 logical
= nocow_ctx
->logical
;
3974 u64 len
= nocow_ctx
->len
;
3975 int mirror_num
= nocow_ctx
->mirror_num
;
3976 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3978 struct btrfs_trans_handle
*trans
= NULL
;
3979 struct btrfs_fs_info
*fs_info
;
3980 struct btrfs_path
*path
;
3981 struct btrfs_root
*root
;
3982 int not_written
= 0;
3984 fs_info
= sctx
->dev_root
->fs_info
;
3985 root
= fs_info
->extent_root
;
3987 path
= btrfs_alloc_path();
3989 spin_lock(&sctx
->stat_lock
);
3990 sctx
->stat
.malloc_errors
++;
3991 spin_unlock(&sctx
->stat_lock
);
3996 trans
= btrfs_join_transaction(root
);
3997 if (IS_ERR(trans
)) {
4002 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
4003 record_inode_for_nocow
, nocow_ctx
);
4004 if (ret
!= 0 && ret
!= -ENOENT
) {
4005 btrfs_warn(fs_info
, "iterate_inodes_from_logical() failed: log %llu, "
4006 "phys %llu, len %llu, mir %u, ret %d",
4007 logical
, physical_for_dev_replace
, len
, mirror_num
,
4013 btrfs_end_transaction(trans
, root
);
4015 while (!list_empty(&nocow_ctx
->inodes
)) {
4016 struct scrub_nocow_inode
*entry
;
4017 entry
= list_first_entry(&nocow_ctx
->inodes
,
4018 struct scrub_nocow_inode
,
4020 list_del_init(&entry
->list
);
4021 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
4022 entry
->root
, nocow_ctx
);
4024 if (ret
== COPY_COMPLETE
) {
4032 while (!list_empty(&nocow_ctx
->inodes
)) {
4033 struct scrub_nocow_inode
*entry
;
4034 entry
= list_first_entry(&nocow_ctx
->inodes
,
4035 struct scrub_nocow_inode
,
4037 list_del_init(&entry
->list
);
4040 if (trans
&& !IS_ERR(trans
))
4041 btrfs_end_transaction(trans
, root
);
4043 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
4044 num_uncorrectable_read_errors
);
4046 btrfs_free_path(path
);
4049 scrub_pending_trans_workers_dec(sctx
);
4052 static int check_extent_to_block(struct inode
*inode
, u64 start
, u64 len
,
4055 struct extent_state
*cached_state
= NULL
;
4056 struct btrfs_ordered_extent
*ordered
;
4057 struct extent_io_tree
*io_tree
;
4058 struct extent_map
*em
;
4059 u64 lockstart
= start
, lockend
= start
+ len
- 1;
4062 io_tree
= &BTRFS_I(inode
)->io_tree
;
4064 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
4065 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
4067 btrfs_put_ordered_extent(ordered
);
4072 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
, 0);
4079 * This extent does not actually cover the logical extent anymore,
4080 * move on to the next inode.
4082 if (em
->block_start
> logical
||
4083 em
->block_start
+ em
->block_len
< logical
+ len
) {
4084 free_extent_map(em
);
4088 free_extent_map(em
);
4091 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
4096 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
4097 struct scrub_copy_nocow_ctx
*nocow_ctx
)
4099 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
4100 struct btrfs_key key
;
4101 struct inode
*inode
;
4103 struct btrfs_root
*local_root
;
4104 struct extent_io_tree
*io_tree
;
4105 u64 physical_for_dev_replace
;
4106 u64 nocow_ctx_logical
;
4107 u64 len
= nocow_ctx
->len
;
4108 unsigned long index
;
4113 key
.objectid
= root
;
4114 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4115 key
.offset
= (u64
)-1;
4117 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
4119 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4120 if (IS_ERR(local_root
)) {
4121 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4122 return PTR_ERR(local_root
);
4125 key
.type
= BTRFS_INODE_ITEM_KEY
;
4126 key
.objectid
= inum
;
4128 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
4129 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4131 return PTR_ERR(inode
);
4133 /* Avoid truncate/dio/punch hole.. */
4134 mutex_lock(&inode
->i_mutex
);
4135 inode_dio_wait(inode
);
4137 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4138 io_tree
= &BTRFS_I(inode
)->io_tree
;
4139 nocow_ctx_logical
= nocow_ctx
->logical
;
4141 ret
= check_extent_to_block(inode
, offset
, len
, nocow_ctx_logical
);
4143 ret
= ret
> 0 ? 0 : ret
;
4147 while (len
>= PAGE_CACHE_SIZE
) {
4148 index
= offset
>> PAGE_CACHE_SHIFT
;
4150 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4152 btrfs_err(fs_info
, "find_or_create_page() failed");
4157 if (PageUptodate(page
)) {
4158 if (PageDirty(page
))
4161 ClearPageError(page
);
4162 err
= extent_read_full_page(io_tree
, page
,
4164 nocow_ctx
->mirror_num
);
4172 * If the page has been remove from the page cache,
4173 * the data on it is meaningless, because it may be
4174 * old one, the new data may be written into the new
4175 * page in the page cache.
4177 if (page
->mapping
!= inode
->i_mapping
) {
4179 page_cache_release(page
);
4182 if (!PageUptodate(page
)) {
4188 ret
= check_extent_to_block(inode
, offset
, len
,
4191 ret
= ret
> 0 ? 0 : ret
;
4195 err
= write_page_nocow(nocow_ctx
->sctx
,
4196 physical_for_dev_replace
, page
);
4201 page_cache_release(page
);
4206 offset
+= PAGE_CACHE_SIZE
;
4207 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
4208 nocow_ctx_logical
+= PAGE_CACHE_SIZE
;
4209 len
-= PAGE_CACHE_SIZE
;
4211 ret
= COPY_COMPLETE
;
4213 mutex_unlock(&inode
->i_mutex
);
4218 static int write_page_nocow(struct scrub_ctx
*sctx
,
4219 u64 physical_for_dev_replace
, struct page
*page
)
4222 struct btrfs_device
*dev
;
4225 dev
= sctx
->wr_ctx
.tgtdev
;
4229 printk_ratelimited(KERN_WARNING
4230 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4233 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
4235 spin_lock(&sctx
->stat_lock
);
4236 sctx
->stat
.malloc_errors
++;
4237 spin_unlock(&sctx
->stat_lock
);
4240 bio
->bi_iter
.bi_size
= 0;
4241 bio
->bi_iter
.bi_sector
= physical_for_dev_replace
>> 9;
4242 bio
->bi_bdev
= dev
->bdev
;
4243 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
4244 if (ret
!= PAGE_CACHE_SIZE
) {
4247 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
4251 if (btrfsic_submit_bio_wait(WRITE_SYNC
, bio
))
4252 goto leave_with_eio
;