1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
12 #include "ordered-data.h"
13 #include "transaction.h"
15 #include "extent_io.h"
16 #include "dev-replace.h"
17 #include "check-integrity.h"
18 #include "rcu-string.h"
22 * This is only the first step towards a full-features scrub. It reads all
23 * extent and super block and verifies the checksums. In case a bad checksum
24 * is found or the extent cannot be read, good data will be written back if
27 * Future enhancements:
28 * - In case an unrepairable extent is encountered, track which files are
29 * affected and report them
30 * - track and record media errors, throw out bad devices
31 * - add a mode to also read unallocated space
38 * the following three values only influence the performance.
39 * The last one configures the number of parallel and outstanding I/O
40 * operations. The first two values configure an upper limit for the number
41 * of (dynamically allocated) pages that are added to a bio.
43 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
44 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
45 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
48 * the following value times PAGE_SIZE needs to be large enough to match the
49 * largest node/leaf/sector size that shall be supported.
50 * Values larger than BTRFS_STRIPE_LEN are not supported.
52 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
54 struct scrub_recover
{
56 struct btrfs_bio
*bbio
;
61 struct scrub_block
*sblock
;
63 struct btrfs_device
*dev
;
64 struct list_head list
;
65 u64 flags
; /* extent flags */
69 u64 physical_for_dev_replace
;
72 unsigned int mirror_num
:8;
73 unsigned int have_csum
:1;
74 unsigned int io_error
:1;
76 u8 csum
[BTRFS_CSUM_SIZE
];
78 struct scrub_recover
*recover
;
83 struct scrub_ctx
*sctx
;
84 struct btrfs_device
*dev
;
89 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
92 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
96 struct btrfs_work work
;
100 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
102 atomic_t outstanding_pages
;
103 refcount_t refs
; /* free mem on transition to zero */
104 struct scrub_ctx
*sctx
;
105 struct scrub_parity
*sparity
;
107 unsigned int header_error
:1;
108 unsigned int checksum_error
:1;
109 unsigned int no_io_error_seen
:1;
110 unsigned int generation_error
:1; /* also sets header_error */
112 /* The following is for the data used to check parity */
113 /* It is for the data with checksum */
114 unsigned int data_corrected
:1;
116 struct btrfs_work work
;
119 /* Used for the chunks with parity stripe such RAID5/6 */
120 struct scrub_parity
{
121 struct scrub_ctx
*sctx
;
123 struct btrfs_device
*scrub_dev
;
135 struct list_head spages
;
137 /* Work of parity check and repair */
138 struct btrfs_work work
;
140 /* Mark the parity blocks which have data */
141 unsigned long *dbitmap
;
144 * Mark the parity blocks which have data, but errors happen when
145 * read data or check data
147 unsigned long *ebitmap
;
149 unsigned long bitmap
[0];
153 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
154 struct btrfs_fs_info
*fs_info
;
157 atomic_t bios_in_flight
;
158 atomic_t workers_pending
;
159 spinlock_t list_lock
;
160 wait_queue_head_t list_wait
;
162 struct list_head csum_list
;
165 int pages_per_rd_bio
;
169 struct scrub_bio
*wr_curr_bio
;
170 struct mutex wr_lock
;
171 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
172 struct btrfs_device
*wr_tgtdev
;
173 bool flush_all_writes
;
178 struct btrfs_scrub_progress stat
;
179 spinlock_t stat_lock
;
182 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 * decrement bios_in_flight and workers_pending and then do a wakeup
184 * on the list_wait wait queue. We must ensure the main scrub task
185 * doesn't free the scrub context before or while the workers are
186 * doing the wakeup() call.
191 struct scrub_warning
{
192 struct btrfs_path
*path
;
193 u64 extent_item_size
;
197 struct btrfs_device
*dev
;
200 struct full_stripe_lock
{
207 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
208 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
209 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
210 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
211 struct scrub_block
*sblocks_for_recheck
);
212 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
213 struct scrub_block
*sblock
,
214 int retry_failed_mirror
);
215 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
);
216 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
217 struct scrub_block
*sblock_good
);
218 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
219 struct scrub_block
*sblock_good
,
220 int page_num
, int force_write
);
221 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
222 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
224 static int scrub_checksum_data(struct scrub_block
*sblock
);
225 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
226 static int scrub_checksum_super(struct scrub_block
*sblock
);
227 static void scrub_block_get(struct scrub_block
*sblock
);
228 static void scrub_block_put(struct scrub_block
*sblock
);
229 static void scrub_page_get(struct scrub_page
*spage
);
230 static void scrub_page_put(struct scrub_page
*spage
);
231 static void scrub_parity_get(struct scrub_parity
*sparity
);
232 static void scrub_parity_put(struct scrub_parity
*sparity
);
233 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
234 struct scrub_page
*spage
);
235 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
236 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
237 u64 gen
, int mirror_num
, u8
*csum
, int force
,
238 u64 physical_for_dev_replace
);
239 static void scrub_bio_end_io(struct bio
*bio
);
240 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
241 static void scrub_block_complete(struct scrub_block
*sblock
);
242 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
243 u64 extent_logical
, u64 extent_len
,
244 u64
*extent_physical
,
245 struct btrfs_device
**extent_dev
,
246 int *extent_mirror_num
);
247 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
248 struct scrub_page
*spage
);
249 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
250 static void scrub_wr_bio_end_io(struct bio
*bio
);
251 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
252 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
253 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
254 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
256 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
258 return page
->recover
&&
259 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
262 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
264 refcount_inc(&sctx
->refs
);
265 atomic_inc(&sctx
->bios_in_flight
);
268 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
270 atomic_dec(&sctx
->bios_in_flight
);
271 wake_up(&sctx
->list_wait
);
275 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
277 while (atomic_read(&fs_info
->scrub_pause_req
)) {
278 mutex_unlock(&fs_info
->scrub_lock
);
279 wait_event(fs_info
->scrub_pause_wait
,
280 atomic_read(&fs_info
->scrub_pause_req
) == 0);
281 mutex_lock(&fs_info
->scrub_lock
);
285 static void scrub_pause_on(struct btrfs_fs_info
*fs_info
)
287 atomic_inc(&fs_info
->scrubs_paused
);
288 wake_up(&fs_info
->scrub_pause_wait
);
291 static void scrub_pause_off(struct btrfs_fs_info
*fs_info
)
293 mutex_lock(&fs_info
->scrub_lock
);
294 __scrub_blocked_if_needed(fs_info
);
295 atomic_dec(&fs_info
->scrubs_paused
);
296 mutex_unlock(&fs_info
->scrub_lock
);
298 wake_up(&fs_info
->scrub_pause_wait
);
301 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
303 scrub_pause_on(fs_info
);
304 scrub_pause_off(fs_info
);
308 * Insert new full stripe lock into full stripe locks tree
310 * Return pointer to existing or newly inserted full_stripe_lock structure if
311 * everything works well.
312 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
314 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
317 static struct full_stripe_lock
*insert_full_stripe_lock(
318 struct btrfs_full_stripe_locks_tree
*locks_root
,
322 struct rb_node
*parent
= NULL
;
323 struct full_stripe_lock
*entry
;
324 struct full_stripe_lock
*ret
;
326 lockdep_assert_held(&locks_root
->lock
);
328 p
= &locks_root
->root
.rb_node
;
331 entry
= rb_entry(parent
, struct full_stripe_lock
, node
);
332 if (fstripe_logical
< entry
->logical
) {
334 } else if (fstripe_logical
> entry
->logical
) {
345 ret
= kmalloc(sizeof(*ret
), GFP_KERNEL
);
347 return ERR_PTR(-ENOMEM
);
348 ret
->logical
= fstripe_logical
;
350 mutex_init(&ret
->mutex
);
352 rb_link_node(&ret
->node
, parent
, p
);
353 rb_insert_color(&ret
->node
, &locks_root
->root
);
358 * Search for a full stripe lock of a block group
360 * Return pointer to existing full stripe lock if found
361 * Return NULL if not found
363 static struct full_stripe_lock
*search_full_stripe_lock(
364 struct btrfs_full_stripe_locks_tree
*locks_root
,
367 struct rb_node
*node
;
368 struct full_stripe_lock
*entry
;
370 lockdep_assert_held(&locks_root
->lock
);
372 node
= locks_root
->root
.rb_node
;
374 entry
= rb_entry(node
, struct full_stripe_lock
, node
);
375 if (fstripe_logical
< entry
->logical
)
376 node
= node
->rb_left
;
377 else if (fstripe_logical
> entry
->logical
)
378 node
= node
->rb_right
;
386 * Helper to get full stripe logical from a normal bytenr.
388 * Caller must ensure @cache is a RAID56 block group.
390 static u64
get_full_stripe_logical(struct btrfs_block_group_cache
*cache
,
396 * Due to chunk item size limit, full stripe length should not be
397 * larger than U32_MAX. Just a sanity check here.
399 WARN_ON_ONCE(cache
->full_stripe_len
>= U32_MAX
);
402 * round_down() can only handle power of 2, while RAID56 full
403 * stripe length can be 64KiB * n, so we need to manually round down.
405 ret
= div64_u64(bytenr
- cache
->key
.objectid
, cache
->full_stripe_len
) *
406 cache
->full_stripe_len
+ cache
->key
.objectid
;
411 * Lock a full stripe to avoid concurrency of recovery and read
413 * It's only used for profiles with parities (RAID5/6), for other profiles it
416 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
417 * So caller must call unlock_full_stripe() at the same context.
419 * Return <0 if encounters error.
421 static int lock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
424 struct btrfs_block_group_cache
*bg_cache
;
425 struct btrfs_full_stripe_locks_tree
*locks_root
;
426 struct full_stripe_lock
*existing
;
431 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
437 /* Profiles not based on parity don't need full stripe lock */
438 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
440 locks_root
= &bg_cache
->full_stripe_locks_root
;
442 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
444 /* Now insert the full stripe lock */
445 mutex_lock(&locks_root
->lock
);
446 existing
= insert_full_stripe_lock(locks_root
, fstripe_start
);
447 mutex_unlock(&locks_root
->lock
);
448 if (IS_ERR(existing
)) {
449 ret
= PTR_ERR(existing
);
452 mutex_lock(&existing
->mutex
);
455 btrfs_put_block_group(bg_cache
);
460 * Unlock a full stripe.
462 * NOTE: Caller must ensure it's the same context calling corresponding
463 * lock_full_stripe().
465 * Return 0 if we unlock full stripe without problem.
466 * Return <0 for error
468 static int unlock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
471 struct btrfs_block_group_cache
*bg_cache
;
472 struct btrfs_full_stripe_locks_tree
*locks_root
;
473 struct full_stripe_lock
*fstripe_lock
;
478 /* If we didn't acquire full stripe lock, no need to continue */
482 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
487 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
490 locks_root
= &bg_cache
->full_stripe_locks_root
;
491 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
493 mutex_lock(&locks_root
->lock
);
494 fstripe_lock
= search_full_stripe_lock(locks_root
, fstripe_start
);
495 /* Unpaired unlock_full_stripe() detected */
499 mutex_unlock(&locks_root
->lock
);
503 if (fstripe_lock
->refs
== 0) {
505 btrfs_warn(fs_info
, "full stripe lock at %llu refcount underflow",
506 fstripe_lock
->logical
);
508 fstripe_lock
->refs
--;
511 if (fstripe_lock
->refs
== 0) {
512 rb_erase(&fstripe_lock
->node
, &locks_root
->root
);
515 mutex_unlock(&locks_root
->lock
);
517 mutex_unlock(&fstripe_lock
->mutex
);
521 btrfs_put_block_group(bg_cache
);
525 static void scrub_free_csums(struct scrub_ctx
*sctx
)
527 while (!list_empty(&sctx
->csum_list
)) {
528 struct btrfs_ordered_sum
*sum
;
529 sum
= list_first_entry(&sctx
->csum_list
,
530 struct btrfs_ordered_sum
, list
);
531 list_del(&sum
->list
);
536 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
543 /* this can happen when scrub is cancelled */
544 if (sctx
->curr
!= -1) {
545 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
547 for (i
= 0; i
< sbio
->page_count
; i
++) {
548 WARN_ON(!sbio
->pagev
[i
]->page
);
549 scrub_block_put(sbio
->pagev
[i
]->sblock
);
554 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
555 struct scrub_bio
*sbio
= sctx
->bios
[i
];
562 kfree(sctx
->wr_curr_bio
);
563 scrub_free_csums(sctx
);
567 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
569 if (refcount_dec_and_test(&sctx
->refs
))
570 scrub_free_ctx(sctx
);
573 static noinline_for_stack
struct scrub_ctx
*scrub_setup_ctx(
574 struct btrfs_fs_info
*fs_info
, int is_dev_replace
)
576 struct scrub_ctx
*sctx
;
579 sctx
= kzalloc(sizeof(*sctx
), GFP_KERNEL
);
582 refcount_set(&sctx
->refs
, 1);
583 sctx
->is_dev_replace
= is_dev_replace
;
584 sctx
->pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
586 sctx
->fs_info
= fs_info
;
587 INIT_LIST_HEAD(&sctx
->csum_list
);
588 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
589 struct scrub_bio
*sbio
;
591 sbio
= kzalloc(sizeof(*sbio
), GFP_KERNEL
);
594 sctx
->bios
[i
] = sbio
;
598 sbio
->page_count
= 0;
599 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
600 scrub_bio_end_io_worker
, NULL
, NULL
);
602 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
603 sctx
->bios
[i
]->next_free
= i
+ 1;
605 sctx
->bios
[i
]->next_free
= -1;
607 sctx
->first_free
= 0;
608 atomic_set(&sctx
->bios_in_flight
, 0);
609 atomic_set(&sctx
->workers_pending
, 0);
610 atomic_set(&sctx
->cancel_req
, 0);
611 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
613 spin_lock_init(&sctx
->list_lock
);
614 spin_lock_init(&sctx
->stat_lock
);
615 init_waitqueue_head(&sctx
->list_wait
);
617 WARN_ON(sctx
->wr_curr_bio
!= NULL
);
618 mutex_init(&sctx
->wr_lock
);
619 sctx
->wr_curr_bio
= NULL
;
620 if (is_dev_replace
) {
621 WARN_ON(!fs_info
->dev_replace
.tgtdev
);
622 sctx
->pages_per_wr_bio
= SCRUB_PAGES_PER_WR_BIO
;
623 sctx
->wr_tgtdev
= fs_info
->dev_replace
.tgtdev
;
624 sctx
->flush_all_writes
= false;
630 scrub_free_ctx(sctx
);
631 return ERR_PTR(-ENOMEM
);
634 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
642 struct extent_buffer
*eb
;
643 struct btrfs_inode_item
*inode_item
;
644 struct scrub_warning
*swarn
= warn_ctx
;
645 struct btrfs_fs_info
*fs_info
= swarn
->dev
->fs_info
;
646 struct inode_fs_paths
*ipath
= NULL
;
647 struct btrfs_root
*local_root
;
648 struct btrfs_key root_key
;
649 struct btrfs_key key
;
651 root_key
.objectid
= root
;
652 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
653 root_key
.offset
= (u64
)-1;
654 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
655 if (IS_ERR(local_root
)) {
656 ret
= PTR_ERR(local_root
);
661 * this makes the path point to (inum INODE_ITEM ioff)
664 key
.type
= BTRFS_INODE_ITEM_KEY
;
667 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
669 btrfs_release_path(swarn
->path
);
673 eb
= swarn
->path
->nodes
[0];
674 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
675 struct btrfs_inode_item
);
676 isize
= btrfs_inode_size(eb
, inode_item
);
677 nlink
= btrfs_inode_nlink(eb
, inode_item
);
678 btrfs_release_path(swarn
->path
);
681 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
682 * uses GFP_NOFS in this context, so we keep it consistent but it does
683 * not seem to be strictly necessary.
685 nofs_flag
= memalloc_nofs_save();
686 ipath
= init_ipath(4096, local_root
, swarn
->path
);
687 memalloc_nofs_restore(nofs_flag
);
689 ret
= PTR_ERR(ipath
);
693 ret
= paths_from_inode(inum
, ipath
);
699 * we deliberately ignore the bit ipath might have been too small to
700 * hold all of the paths here
702 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
703 btrfs_warn_in_rcu(fs_info
,
704 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
705 swarn
->errstr
, swarn
->logical
,
706 rcu_str_deref(swarn
->dev
->name
),
709 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
710 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
716 btrfs_warn_in_rcu(fs_info
,
717 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
718 swarn
->errstr
, swarn
->logical
,
719 rcu_str_deref(swarn
->dev
->name
),
721 root
, inum
, offset
, ret
);
727 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
729 struct btrfs_device
*dev
;
730 struct btrfs_fs_info
*fs_info
;
731 struct btrfs_path
*path
;
732 struct btrfs_key found_key
;
733 struct extent_buffer
*eb
;
734 struct btrfs_extent_item
*ei
;
735 struct scrub_warning swarn
;
736 unsigned long ptr
= 0;
744 WARN_ON(sblock
->page_count
< 1);
745 dev
= sblock
->pagev
[0]->dev
;
746 fs_info
= sblock
->sctx
->fs_info
;
748 path
= btrfs_alloc_path();
752 swarn
.physical
= sblock
->pagev
[0]->physical
;
753 swarn
.logical
= sblock
->pagev
[0]->logical
;
754 swarn
.errstr
= errstr
;
757 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
762 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
763 swarn
.extent_item_size
= found_key
.offset
;
766 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
767 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
769 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
771 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
772 item_size
, &ref_root
,
774 btrfs_warn_in_rcu(fs_info
,
775 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
776 errstr
, swarn
.logical
,
777 rcu_str_deref(dev
->name
),
779 ref_level
? "node" : "leaf",
780 ret
< 0 ? -1 : ref_level
,
781 ret
< 0 ? -1 : ref_root
);
783 btrfs_release_path(path
);
785 btrfs_release_path(path
);
788 iterate_extent_inodes(fs_info
, found_key
.objectid
,
790 scrub_print_warning_inode
, &swarn
, false);
794 btrfs_free_path(path
);
797 static inline void scrub_get_recover(struct scrub_recover
*recover
)
799 refcount_inc(&recover
->refs
);
802 static inline void scrub_put_recover(struct btrfs_fs_info
*fs_info
,
803 struct scrub_recover
*recover
)
805 if (refcount_dec_and_test(&recover
->refs
)) {
806 btrfs_bio_counter_dec(fs_info
);
807 btrfs_put_bbio(recover
->bbio
);
813 * scrub_handle_errored_block gets called when either verification of the
814 * pages failed or the bio failed to read, e.g. with EIO. In the latter
815 * case, this function handles all pages in the bio, even though only one
817 * The goal of this function is to repair the errored block by using the
818 * contents of one of the mirrors.
820 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
822 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
823 struct btrfs_device
*dev
;
824 struct btrfs_fs_info
*fs_info
;
826 unsigned int failed_mirror_index
;
827 unsigned int is_metadata
;
828 unsigned int have_csum
;
829 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
830 struct scrub_block
*sblock_bad
;
835 bool full_stripe_locked
;
836 unsigned int nofs_flag
;
837 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
838 DEFAULT_RATELIMIT_BURST
);
840 BUG_ON(sblock_to_check
->page_count
< 1);
841 fs_info
= sctx
->fs_info
;
842 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
844 * if we find an error in a super block, we just report it.
845 * They will get written with the next transaction commit
848 spin_lock(&sctx
->stat_lock
);
849 ++sctx
->stat
.super_errors
;
850 spin_unlock(&sctx
->stat_lock
);
853 logical
= sblock_to_check
->pagev
[0]->logical
;
854 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
855 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
856 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
857 BTRFS_EXTENT_FLAG_DATA
);
858 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
859 dev
= sblock_to_check
->pagev
[0]->dev
;
862 * We must use GFP_NOFS because the scrub task might be waiting for a
863 * worker task executing this function and in turn a transaction commit
864 * might be waiting the scrub task to pause (which needs to wait for all
865 * the worker tasks to complete before pausing).
866 * We do allocations in the workers through insert_full_stripe_lock()
867 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
870 nofs_flag
= memalloc_nofs_save();
872 * For RAID5/6, race can happen for a different device scrub thread.
873 * For data corruption, Parity and Data threads will both try
874 * to recovery the data.
875 * Race can lead to doubly added csum error, or even unrecoverable
878 ret
= lock_full_stripe(fs_info
, logical
, &full_stripe_locked
);
880 memalloc_nofs_restore(nofs_flag
);
881 spin_lock(&sctx
->stat_lock
);
883 sctx
->stat
.malloc_errors
++;
884 sctx
->stat
.read_errors
++;
885 sctx
->stat
.uncorrectable_errors
++;
886 spin_unlock(&sctx
->stat_lock
);
891 * read all mirrors one after the other. This includes to
892 * re-read the extent or metadata block that failed (that was
893 * the cause that this fixup code is called) another time,
894 * page by page this time in order to know which pages
895 * caused I/O errors and which ones are good (for all mirrors).
896 * It is the goal to handle the situation when more than one
897 * mirror contains I/O errors, but the errors do not
898 * overlap, i.e. the data can be repaired by selecting the
899 * pages from those mirrors without I/O error on the
900 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
901 * would be that mirror #1 has an I/O error on the first page,
902 * the second page is good, and mirror #2 has an I/O error on
903 * the second page, but the first page is good.
904 * Then the first page of the first mirror can be repaired by
905 * taking the first page of the second mirror, and the
906 * second page of the second mirror can be repaired by
907 * copying the contents of the 2nd page of the 1st mirror.
908 * One more note: if the pages of one mirror contain I/O
909 * errors, the checksum cannot be verified. In order to get
910 * the best data for repairing, the first attempt is to find
911 * a mirror without I/O errors and with a validated checksum.
912 * Only if this is not possible, the pages are picked from
913 * mirrors with I/O errors without considering the checksum.
914 * If the latter is the case, at the end, the checksum of the
915 * repaired area is verified in order to correctly maintain
919 sblocks_for_recheck
= kcalloc(BTRFS_MAX_MIRRORS
,
920 sizeof(*sblocks_for_recheck
), GFP_KERNEL
);
921 if (!sblocks_for_recheck
) {
922 spin_lock(&sctx
->stat_lock
);
923 sctx
->stat
.malloc_errors
++;
924 sctx
->stat
.read_errors
++;
925 sctx
->stat
.uncorrectable_errors
++;
926 spin_unlock(&sctx
->stat_lock
);
927 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
931 /* setup the context, map the logical blocks and alloc the pages */
932 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
934 spin_lock(&sctx
->stat_lock
);
935 sctx
->stat
.read_errors
++;
936 sctx
->stat
.uncorrectable_errors
++;
937 spin_unlock(&sctx
->stat_lock
);
938 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
941 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
942 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
944 /* build and submit the bios for the failed mirror, check checksums */
945 scrub_recheck_block(fs_info
, sblock_bad
, 1);
947 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
948 sblock_bad
->no_io_error_seen
) {
950 * the error disappeared after reading page by page, or
951 * the area was part of a huge bio and other parts of the
952 * bio caused I/O errors, or the block layer merged several
953 * read requests into one and the error is caused by a
954 * different bio (usually one of the two latter cases is
957 spin_lock(&sctx
->stat_lock
);
958 sctx
->stat
.unverified_errors
++;
959 sblock_to_check
->data_corrected
= 1;
960 spin_unlock(&sctx
->stat_lock
);
962 if (sctx
->is_dev_replace
)
963 scrub_write_block_to_dev_replace(sblock_bad
);
967 if (!sblock_bad
->no_io_error_seen
) {
968 spin_lock(&sctx
->stat_lock
);
969 sctx
->stat
.read_errors
++;
970 spin_unlock(&sctx
->stat_lock
);
971 if (__ratelimit(&_rs
))
972 scrub_print_warning("i/o error", sblock_to_check
);
973 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
974 } else if (sblock_bad
->checksum_error
) {
975 spin_lock(&sctx
->stat_lock
);
976 sctx
->stat
.csum_errors
++;
977 spin_unlock(&sctx
->stat_lock
);
978 if (__ratelimit(&_rs
))
979 scrub_print_warning("checksum error", sblock_to_check
);
980 btrfs_dev_stat_inc_and_print(dev
,
981 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
982 } else if (sblock_bad
->header_error
) {
983 spin_lock(&sctx
->stat_lock
);
984 sctx
->stat
.verify_errors
++;
985 spin_unlock(&sctx
->stat_lock
);
986 if (__ratelimit(&_rs
))
987 scrub_print_warning("checksum/header error",
989 if (sblock_bad
->generation_error
)
990 btrfs_dev_stat_inc_and_print(dev
,
991 BTRFS_DEV_STAT_GENERATION_ERRS
);
993 btrfs_dev_stat_inc_and_print(dev
,
994 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
997 if (sctx
->readonly
) {
998 ASSERT(!sctx
->is_dev_replace
);
1003 * now build and submit the bios for the other mirrors, check
1005 * First try to pick the mirror which is completely without I/O
1006 * errors and also does not have a checksum error.
1007 * If one is found, and if a checksum is present, the full block
1008 * that is known to contain an error is rewritten. Afterwards
1009 * the block is known to be corrected.
1010 * If a mirror is found which is completely correct, and no
1011 * checksum is present, only those pages are rewritten that had
1012 * an I/O error in the block to be repaired, since it cannot be
1013 * determined, which copy of the other pages is better (and it
1014 * could happen otherwise that a correct page would be
1015 * overwritten by a bad one).
1017 for (mirror_index
= 0; ;mirror_index
++) {
1018 struct scrub_block
*sblock_other
;
1020 if (mirror_index
== failed_mirror_index
)
1023 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1024 if (!scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1025 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1027 if (!sblocks_for_recheck
[mirror_index
].page_count
)
1030 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1032 struct scrub_recover
*r
= sblock_bad
->pagev
[0]->recover
;
1033 int max_allowed
= r
->bbio
->num_stripes
-
1034 r
->bbio
->num_tgtdevs
;
1036 if (mirror_index
>= max_allowed
)
1038 if (!sblocks_for_recheck
[1].page_count
)
1041 ASSERT(failed_mirror_index
== 0);
1042 sblock_other
= sblocks_for_recheck
+ 1;
1043 sblock_other
->pagev
[0]->mirror_num
= 1 + mirror_index
;
1046 /* build and submit the bios, check checksums */
1047 scrub_recheck_block(fs_info
, sblock_other
, 0);
1049 if (!sblock_other
->header_error
&&
1050 !sblock_other
->checksum_error
&&
1051 sblock_other
->no_io_error_seen
) {
1052 if (sctx
->is_dev_replace
) {
1053 scrub_write_block_to_dev_replace(sblock_other
);
1054 goto corrected_error
;
1056 ret
= scrub_repair_block_from_good_copy(
1057 sblock_bad
, sblock_other
);
1059 goto corrected_error
;
1064 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1065 goto did_not_correct_error
;
1068 * In case of I/O errors in the area that is supposed to be
1069 * repaired, continue by picking good copies of those pages.
1070 * Select the good pages from mirrors to rewrite bad pages from
1071 * the area to fix. Afterwards verify the checksum of the block
1072 * that is supposed to be repaired. This verification step is
1073 * only done for the purpose of statistic counting and for the
1074 * final scrub report, whether errors remain.
1075 * A perfect algorithm could make use of the checksum and try
1076 * all possible combinations of pages from the different mirrors
1077 * until the checksum verification succeeds. For example, when
1078 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1079 * of mirror #2 is readable but the final checksum test fails,
1080 * then the 2nd page of mirror #3 could be tried, whether now
1081 * the final checksum succeeds. But this would be a rare
1082 * exception and is therefore not implemented. At least it is
1083 * avoided that the good copy is overwritten.
1084 * A more useful improvement would be to pick the sectors
1085 * without I/O error based on sector sizes (512 bytes on legacy
1086 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1087 * mirror could be repaired by taking 512 byte of a different
1088 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1089 * area are unreadable.
1092 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1094 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1095 struct scrub_block
*sblock_other
= NULL
;
1097 /* skip no-io-error page in scrub */
1098 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1101 if (scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1103 * In case of dev replace, if raid56 rebuild process
1104 * didn't work out correct data, then copy the content
1105 * in sblock_bad to make sure target device is identical
1106 * to source device, instead of writing garbage data in
1107 * sblock_for_recheck array to target device.
1109 sblock_other
= NULL
;
1110 } else if (page_bad
->io_error
) {
1111 /* try to find no-io-error page in mirrors */
1112 for (mirror_index
= 0;
1113 mirror_index
< BTRFS_MAX_MIRRORS
&&
1114 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1116 if (!sblocks_for_recheck
[mirror_index
].
1117 pagev
[page_num
]->io_error
) {
1118 sblock_other
= sblocks_for_recheck
+
1127 if (sctx
->is_dev_replace
) {
1129 * did not find a mirror to fetch the page
1130 * from. scrub_write_page_to_dev_replace()
1131 * handles this case (page->io_error), by
1132 * filling the block with zeros before
1133 * submitting the write request
1136 sblock_other
= sblock_bad
;
1138 if (scrub_write_page_to_dev_replace(sblock_other
,
1141 &fs_info
->dev_replace
.num_write_errors
);
1144 } else if (sblock_other
) {
1145 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1149 page_bad
->io_error
= 0;
1155 if (success
&& !sctx
->is_dev_replace
) {
1156 if (is_metadata
|| have_csum
) {
1158 * need to verify the checksum now that all
1159 * sectors on disk are repaired (the write
1160 * request for data to be repaired is on its way).
1161 * Just be lazy and use scrub_recheck_block()
1162 * which re-reads the data before the checksum
1163 * is verified, but most likely the data comes out
1164 * of the page cache.
1166 scrub_recheck_block(fs_info
, sblock_bad
, 1);
1167 if (!sblock_bad
->header_error
&&
1168 !sblock_bad
->checksum_error
&&
1169 sblock_bad
->no_io_error_seen
)
1170 goto corrected_error
;
1172 goto did_not_correct_error
;
1175 spin_lock(&sctx
->stat_lock
);
1176 sctx
->stat
.corrected_errors
++;
1177 sblock_to_check
->data_corrected
= 1;
1178 spin_unlock(&sctx
->stat_lock
);
1179 btrfs_err_rl_in_rcu(fs_info
,
1180 "fixed up error at logical %llu on dev %s",
1181 logical
, rcu_str_deref(dev
->name
));
1184 did_not_correct_error
:
1185 spin_lock(&sctx
->stat_lock
);
1186 sctx
->stat
.uncorrectable_errors
++;
1187 spin_unlock(&sctx
->stat_lock
);
1188 btrfs_err_rl_in_rcu(fs_info
,
1189 "unable to fixup (regular) error at logical %llu on dev %s",
1190 logical
, rcu_str_deref(dev
->name
));
1194 if (sblocks_for_recheck
) {
1195 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1197 struct scrub_block
*sblock
= sblocks_for_recheck
+
1199 struct scrub_recover
*recover
;
1202 for (page_index
= 0; page_index
< sblock
->page_count
;
1204 sblock
->pagev
[page_index
]->sblock
= NULL
;
1205 recover
= sblock
->pagev
[page_index
]->recover
;
1207 scrub_put_recover(fs_info
, recover
);
1208 sblock
->pagev
[page_index
]->recover
=
1211 scrub_page_put(sblock
->pagev
[page_index
]);
1214 kfree(sblocks_for_recheck
);
1217 ret
= unlock_full_stripe(fs_info
, logical
, full_stripe_locked
);
1218 memalloc_nofs_restore(nofs_flag
);
1224 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1226 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1228 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1231 return (int)bbio
->num_stripes
;
1234 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1237 int nstripes
, int mirror
,
1243 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1245 for (i
= 0; i
< nstripes
; i
++) {
1246 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1247 raid_map
[i
] == RAID5_P_STRIPE
)
1250 if (logical
>= raid_map
[i
] &&
1251 logical
< raid_map
[i
] + mapped_length
)
1256 *stripe_offset
= logical
- raid_map
[i
];
1258 /* The other RAID type */
1259 *stripe_index
= mirror
;
1264 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1265 struct scrub_block
*sblocks_for_recheck
)
1267 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1268 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1269 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1270 u64 logical
= original_sblock
->pagev
[0]->logical
;
1271 u64 generation
= original_sblock
->pagev
[0]->generation
;
1272 u64 flags
= original_sblock
->pagev
[0]->flags
;
1273 u64 have_csum
= original_sblock
->pagev
[0]->have_csum
;
1274 struct scrub_recover
*recover
;
1275 struct btrfs_bio
*bbio
;
1286 * note: the two members refs and outstanding_pages
1287 * are not used (and not set) in the blocks that are used for
1288 * the recheck procedure
1291 while (length
> 0) {
1292 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1293 mapped_length
= sublen
;
1297 * with a length of PAGE_SIZE, each returned stripe
1298 * represents one mirror
1300 btrfs_bio_counter_inc_blocked(fs_info
);
1301 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
1302 logical
, &mapped_length
, &bbio
);
1303 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1304 btrfs_put_bbio(bbio
);
1305 btrfs_bio_counter_dec(fs_info
);
1309 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1311 btrfs_put_bbio(bbio
);
1312 btrfs_bio_counter_dec(fs_info
);
1316 refcount_set(&recover
->refs
, 1);
1317 recover
->bbio
= bbio
;
1318 recover
->map_length
= mapped_length
;
1320 BUG_ON(page_index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
1322 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1324 for (mirror_index
= 0; mirror_index
< nmirrors
;
1326 struct scrub_block
*sblock
;
1327 struct scrub_page
*page
;
1329 sblock
= sblocks_for_recheck
+ mirror_index
;
1330 sblock
->sctx
= sctx
;
1332 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1335 spin_lock(&sctx
->stat_lock
);
1336 sctx
->stat
.malloc_errors
++;
1337 spin_unlock(&sctx
->stat_lock
);
1338 scrub_put_recover(fs_info
, recover
);
1341 scrub_page_get(page
);
1342 sblock
->pagev
[page_index
] = page
;
1343 page
->sblock
= sblock
;
1344 page
->flags
= flags
;
1345 page
->generation
= generation
;
1346 page
->logical
= logical
;
1347 page
->have_csum
= have_csum
;
1350 original_sblock
->pagev
[0]->csum
,
1353 scrub_stripe_index_and_offset(logical
,
1362 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1364 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1366 BUG_ON(page_index
>= original_sblock
->page_count
);
1367 page
->physical_for_dev_replace
=
1368 original_sblock
->pagev
[page_index
]->
1369 physical_for_dev_replace
;
1370 /* for missing devices, dev->bdev is NULL */
1371 page
->mirror_num
= mirror_index
+ 1;
1372 sblock
->page_count
++;
1373 page
->page
= alloc_page(GFP_NOFS
);
1377 scrub_get_recover(recover
);
1378 page
->recover
= recover
;
1380 scrub_put_recover(fs_info
, recover
);
1389 static void scrub_bio_wait_endio(struct bio
*bio
)
1391 complete(bio
->bi_private
);
1394 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1396 struct scrub_page
*page
)
1398 DECLARE_COMPLETION_ONSTACK(done
);
1402 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1403 bio
->bi_private
= &done
;
1404 bio
->bi_end_io
= scrub_bio_wait_endio
;
1406 mirror_num
= page
->sblock
->pagev
[0]->mirror_num
;
1407 ret
= raid56_parity_recover(fs_info
, bio
, page
->recover
->bbio
,
1408 page
->recover
->map_length
,
1413 wait_for_completion_io(&done
);
1414 return blk_status_to_errno(bio
->bi_status
);
1417 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info
*fs_info
,
1418 struct scrub_block
*sblock
)
1420 struct scrub_page
*first_page
= sblock
->pagev
[0];
1424 /* All pages in sblock belong to the same stripe on the same device. */
1425 ASSERT(first_page
->dev
);
1426 if (!first_page
->dev
->bdev
)
1429 bio
= btrfs_io_bio_alloc(BIO_MAX_PAGES
);
1430 bio_set_dev(bio
, first_page
->dev
->bdev
);
1432 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1433 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1435 WARN_ON(!page
->page
);
1436 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1439 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, first_page
)) {
1446 scrub_recheck_block_checksum(sblock
);
1450 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++)
1451 sblock
->pagev
[page_num
]->io_error
= 1;
1453 sblock
->no_io_error_seen
= 0;
1457 * this function will check the on disk data for checksum errors, header
1458 * errors and read I/O errors. If any I/O errors happen, the exact pages
1459 * which are errored are marked as being bad. The goal is to enable scrub
1460 * to take those pages that are not errored from all the mirrors so that
1461 * the pages that are errored in the just handled mirror can be repaired.
1463 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1464 struct scrub_block
*sblock
,
1465 int retry_failed_mirror
)
1469 sblock
->no_io_error_seen
= 1;
1471 /* short cut for raid56 */
1472 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(sblock
->pagev
[0]))
1473 return scrub_recheck_block_on_raid56(fs_info
, sblock
);
1475 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1477 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1479 if (page
->dev
->bdev
== NULL
) {
1481 sblock
->no_io_error_seen
= 0;
1485 WARN_ON(!page
->page
);
1486 bio
= btrfs_io_bio_alloc(1);
1487 bio_set_dev(bio
, page
->dev
->bdev
);
1489 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1490 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1491 bio
->bi_opf
= REQ_OP_READ
;
1493 if (btrfsic_submit_bio_wait(bio
)) {
1495 sblock
->no_io_error_seen
= 0;
1501 if (sblock
->no_io_error_seen
)
1502 scrub_recheck_block_checksum(sblock
);
1505 static inline int scrub_check_fsid(u8 fsid
[],
1506 struct scrub_page
*spage
)
1508 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1511 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1515 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
)
1517 sblock
->header_error
= 0;
1518 sblock
->checksum_error
= 0;
1519 sblock
->generation_error
= 0;
1521 if (sblock
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_DATA
)
1522 scrub_checksum_data(sblock
);
1524 scrub_checksum_tree_block(sblock
);
1527 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1528 struct scrub_block
*sblock_good
)
1533 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1536 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1546 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1547 struct scrub_block
*sblock_good
,
1548 int page_num
, int force_write
)
1550 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1551 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1552 struct btrfs_fs_info
*fs_info
= sblock_bad
->sctx
->fs_info
;
1554 BUG_ON(page_bad
->page
== NULL
);
1555 BUG_ON(page_good
->page
== NULL
);
1556 if (force_write
|| sblock_bad
->header_error
||
1557 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1561 if (!page_bad
->dev
->bdev
) {
1562 btrfs_warn_rl(fs_info
,
1563 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1567 bio
= btrfs_io_bio_alloc(1);
1568 bio_set_dev(bio
, page_bad
->dev
->bdev
);
1569 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1570 bio
->bi_opf
= REQ_OP_WRITE
;
1572 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1573 if (PAGE_SIZE
!= ret
) {
1578 if (btrfsic_submit_bio_wait(bio
)) {
1579 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1580 BTRFS_DEV_STAT_WRITE_ERRS
);
1581 atomic64_inc(&fs_info
->dev_replace
.num_write_errors
);
1591 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1593 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
1597 * This block is used for the check of the parity on the source device,
1598 * so the data needn't be written into the destination device.
1600 if (sblock
->sparity
)
1603 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1606 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1608 atomic64_inc(&fs_info
->dev_replace
.num_write_errors
);
1612 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1615 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1617 BUG_ON(spage
->page
== NULL
);
1618 if (spage
->io_error
) {
1619 void *mapped_buffer
= kmap_atomic(spage
->page
);
1621 clear_page(mapped_buffer
);
1622 flush_dcache_page(spage
->page
);
1623 kunmap_atomic(mapped_buffer
);
1625 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1628 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1629 struct scrub_page
*spage
)
1631 struct scrub_bio
*sbio
;
1634 mutex_lock(&sctx
->wr_lock
);
1636 if (!sctx
->wr_curr_bio
) {
1637 sctx
->wr_curr_bio
= kzalloc(sizeof(*sctx
->wr_curr_bio
),
1639 if (!sctx
->wr_curr_bio
) {
1640 mutex_unlock(&sctx
->wr_lock
);
1643 sctx
->wr_curr_bio
->sctx
= sctx
;
1644 sctx
->wr_curr_bio
->page_count
= 0;
1646 sbio
= sctx
->wr_curr_bio
;
1647 if (sbio
->page_count
== 0) {
1650 sbio
->physical
= spage
->physical_for_dev_replace
;
1651 sbio
->logical
= spage
->logical
;
1652 sbio
->dev
= sctx
->wr_tgtdev
;
1655 bio
= btrfs_io_bio_alloc(sctx
->pages_per_wr_bio
);
1659 bio
->bi_private
= sbio
;
1660 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1661 bio_set_dev(bio
, sbio
->dev
->bdev
);
1662 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1663 bio
->bi_opf
= REQ_OP_WRITE
;
1665 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1666 spage
->physical_for_dev_replace
||
1667 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1669 scrub_wr_submit(sctx
);
1673 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1674 if (ret
!= PAGE_SIZE
) {
1675 if (sbio
->page_count
< 1) {
1678 mutex_unlock(&sctx
->wr_lock
);
1681 scrub_wr_submit(sctx
);
1685 sbio
->pagev
[sbio
->page_count
] = spage
;
1686 scrub_page_get(spage
);
1688 if (sbio
->page_count
== sctx
->pages_per_wr_bio
)
1689 scrub_wr_submit(sctx
);
1690 mutex_unlock(&sctx
->wr_lock
);
1695 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1697 struct scrub_bio
*sbio
;
1699 if (!sctx
->wr_curr_bio
)
1702 sbio
= sctx
->wr_curr_bio
;
1703 sctx
->wr_curr_bio
= NULL
;
1704 WARN_ON(!sbio
->bio
->bi_disk
);
1705 scrub_pending_bio_inc(sctx
);
1706 /* process all writes in a single worker thread. Then the block layer
1707 * orders the requests before sending them to the driver which
1708 * doubled the write performance on spinning disks when measured
1710 btrfsic_submit_bio(sbio
->bio
);
1713 static void scrub_wr_bio_end_io(struct bio
*bio
)
1715 struct scrub_bio
*sbio
= bio
->bi_private
;
1716 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
1718 sbio
->status
= bio
->bi_status
;
1721 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1722 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1723 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1726 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1728 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1729 struct scrub_ctx
*sctx
= sbio
->sctx
;
1732 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1734 struct btrfs_dev_replace
*dev_replace
=
1735 &sbio
->sctx
->fs_info
->dev_replace
;
1737 for (i
= 0; i
< sbio
->page_count
; i
++) {
1738 struct scrub_page
*spage
= sbio
->pagev
[i
];
1740 spage
->io_error
= 1;
1741 atomic64_inc(&dev_replace
->num_write_errors
);
1745 for (i
= 0; i
< sbio
->page_count
; i
++)
1746 scrub_page_put(sbio
->pagev
[i
]);
1750 scrub_pending_bio_dec(sctx
);
1753 static int scrub_checksum(struct scrub_block
*sblock
)
1759 * No need to initialize these stats currently,
1760 * because this function only use return value
1761 * instead of these stats value.
1766 sblock
->header_error
= 0;
1767 sblock
->generation_error
= 0;
1768 sblock
->checksum_error
= 0;
1770 WARN_ON(sblock
->page_count
< 1);
1771 flags
= sblock
->pagev
[0]->flags
;
1773 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1774 ret
= scrub_checksum_data(sblock
);
1775 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1776 ret
= scrub_checksum_tree_block(sblock
);
1777 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1778 (void)scrub_checksum_super(sblock
);
1782 scrub_handle_errored_block(sblock
);
1787 static int scrub_checksum_data(struct scrub_block
*sblock
)
1789 struct scrub_ctx
*sctx
= sblock
->sctx
;
1790 u8 csum
[BTRFS_CSUM_SIZE
];
1798 BUG_ON(sblock
->page_count
< 1);
1799 if (!sblock
->pagev
[0]->have_csum
)
1802 on_disk_csum
= sblock
->pagev
[0]->csum
;
1803 page
= sblock
->pagev
[0]->page
;
1804 buffer
= kmap_atomic(page
);
1806 len
= sctx
->fs_info
->sectorsize
;
1809 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1811 crc
= btrfs_csum_data(buffer
, crc
, l
);
1812 kunmap_atomic(buffer
);
1817 BUG_ON(index
>= sblock
->page_count
);
1818 BUG_ON(!sblock
->pagev
[index
]->page
);
1819 page
= sblock
->pagev
[index
]->page
;
1820 buffer
= kmap_atomic(page
);
1823 btrfs_csum_final(crc
, csum
);
1824 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1825 sblock
->checksum_error
= 1;
1827 return sblock
->checksum_error
;
1830 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1832 struct scrub_ctx
*sctx
= sblock
->sctx
;
1833 struct btrfs_header
*h
;
1834 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1835 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1836 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1838 void *mapped_buffer
;
1845 BUG_ON(sblock
->page_count
< 1);
1846 page
= sblock
->pagev
[0]->page
;
1847 mapped_buffer
= kmap_atomic(page
);
1848 h
= (struct btrfs_header
*)mapped_buffer
;
1849 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1852 * we don't use the getter functions here, as we
1853 * a) don't have an extent buffer and
1854 * b) the page is already kmapped
1856 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1857 sblock
->header_error
= 1;
1859 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
)) {
1860 sblock
->header_error
= 1;
1861 sblock
->generation_error
= 1;
1864 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1865 sblock
->header_error
= 1;
1867 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1869 sblock
->header_error
= 1;
1871 len
= sctx
->fs_info
->nodesize
- BTRFS_CSUM_SIZE
;
1872 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1873 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1876 u64 l
= min_t(u64
, len
, mapped_size
);
1878 crc
= btrfs_csum_data(p
, crc
, l
);
1879 kunmap_atomic(mapped_buffer
);
1884 BUG_ON(index
>= sblock
->page_count
);
1885 BUG_ON(!sblock
->pagev
[index
]->page
);
1886 page
= sblock
->pagev
[index
]->page
;
1887 mapped_buffer
= kmap_atomic(page
);
1888 mapped_size
= PAGE_SIZE
;
1892 btrfs_csum_final(crc
, calculated_csum
);
1893 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1894 sblock
->checksum_error
= 1;
1896 return sblock
->header_error
|| sblock
->checksum_error
;
1899 static int scrub_checksum_super(struct scrub_block
*sblock
)
1901 struct btrfs_super_block
*s
;
1902 struct scrub_ctx
*sctx
= sblock
->sctx
;
1903 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1904 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1906 void *mapped_buffer
;
1915 BUG_ON(sblock
->page_count
< 1);
1916 page
= sblock
->pagev
[0]->page
;
1917 mapped_buffer
= kmap_atomic(page
);
1918 s
= (struct btrfs_super_block
*)mapped_buffer
;
1919 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1921 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1924 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1927 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
1930 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1931 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1932 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1935 u64 l
= min_t(u64
, len
, mapped_size
);
1937 crc
= btrfs_csum_data(p
, crc
, l
);
1938 kunmap_atomic(mapped_buffer
);
1943 BUG_ON(index
>= sblock
->page_count
);
1944 BUG_ON(!sblock
->pagev
[index
]->page
);
1945 page
= sblock
->pagev
[index
]->page
;
1946 mapped_buffer
= kmap_atomic(page
);
1947 mapped_size
= PAGE_SIZE
;
1951 btrfs_csum_final(crc
, calculated_csum
);
1952 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1955 if (fail_cor
+ fail_gen
) {
1957 * if we find an error in a super block, we just report it.
1958 * They will get written with the next transaction commit
1961 spin_lock(&sctx
->stat_lock
);
1962 ++sctx
->stat
.super_errors
;
1963 spin_unlock(&sctx
->stat_lock
);
1965 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1966 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1968 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1969 BTRFS_DEV_STAT_GENERATION_ERRS
);
1972 return fail_cor
+ fail_gen
;
1975 static void scrub_block_get(struct scrub_block
*sblock
)
1977 refcount_inc(&sblock
->refs
);
1980 static void scrub_block_put(struct scrub_block
*sblock
)
1982 if (refcount_dec_and_test(&sblock
->refs
)) {
1985 if (sblock
->sparity
)
1986 scrub_parity_put(sblock
->sparity
);
1988 for (i
= 0; i
< sblock
->page_count
; i
++)
1989 scrub_page_put(sblock
->pagev
[i
]);
1994 static void scrub_page_get(struct scrub_page
*spage
)
1996 atomic_inc(&spage
->refs
);
1999 static void scrub_page_put(struct scrub_page
*spage
)
2001 if (atomic_dec_and_test(&spage
->refs
)) {
2003 __free_page(spage
->page
);
2008 static void scrub_submit(struct scrub_ctx
*sctx
)
2010 struct scrub_bio
*sbio
;
2012 if (sctx
->curr
== -1)
2015 sbio
= sctx
->bios
[sctx
->curr
];
2017 scrub_pending_bio_inc(sctx
);
2018 btrfsic_submit_bio(sbio
->bio
);
2021 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2022 struct scrub_page
*spage
)
2024 struct scrub_block
*sblock
= spage
->sblock
;
2025 struct scrub_bio
*sbio
;
2030 * grab a fresh bio or wait for one to become available
2032 while (sctx
->curr
== -1) {
2033 spin_lock(&sctx
->list_lock
);
2034 sctx
->curr
= sctx
->first_free
;
2035 if (sctx
->curr
!= -1) {
2036 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2037 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2038 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2039 spin_unlock(&sctx
->list_lock
);
2041 spin_unlock(&sctx
->list_lock
);
2042 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2045 sbio
= sctx
->bios
[sctx
->curr
];
2046 if (sbio
->page_count
== 0) {
2049 sbio
->physical
= spage
->physical
;
2050 sbio
->logical
= spage
->logical
;
2051 sbio
->dev
= spage
->dev
;
2054 bio
= btrfs_io_bio_alloc(sctx
->pages_per_rd_bio
);
2058 bio
->bi_private
= sbio
;
2059 bio
->bi_end_io
= scrub_bio_end_io
;
2060 bio_set_dev(bio
, sbio
->dev
->bdev
);
2061 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2062 bio
->bi_opf
= REQ_OP_READ
;
2064 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2066 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2068 sbio
->dev
!= spage
->dev
) {
2073 sbio
->pagev
[sbio
->page_count
] = spage
;
2074 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2075 if (ret
!= PAGE_SIZE
) {
2076 if (sbio
->page_count
< 1) {
2085 scrub_block_get(sblock
); /* one for the page added to the bio */
2086 atomic_inc(&sblock
->outstanding_pages
);
2088 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2094 static void scrub_missing_raid56_end_io(struct bio
*bio
)
2096 struct scrub_block
*sblock
= bio
->bi_private
;
2097 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
2100 sblock
->no_io_error_seen
= 0;
2104 btrfs_queue_work(fs_info
->scrub_workers
, &sblock
->work
);
2107 static void scrub_missing_raid56_worker(struct btrfs_work
*work
)
2109 struct scrub_block
*sblock
= container_of(work
, struct scrub_block
, work
);
2110 struct scrub_ctx
*sctx
= sblock
->sctx
;
2111 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2113 struct btrfs_device
*dev
;
2115 logical
= sblock
->pagev
[0]->logical
;
2116 dev
= sblock
->pagev
[0]->dev
;
2118 if (sblock
->no_io_error_seen
)
2119 scrub_recheck_block_checksum(sblock
);
2121 if (!sblock
->no_io_error_seen
) {
2122 spin_lock(&sctx
->stat_lock
);
2123 sctx
->stat
.read_errors
++;
2124 spin_unlock(&sctx
->stat_lock
);
2125 btrfs_err_rl_in_rcu(fs_info
,
2126 "IO error rebuilding logical %llu for dev %s",
2127 logical
, rcu_str_deref(dev
->name
));
2128 } else if (sblock
->header_error
|| sblock
->checksum_error
) {
2129 spin_lock(&sctx
->stat_lock
);
2130 sctx
->stat
.uncorrectable_errors
++;
2131 spin_unlock(&sctx
->stat_lock
);
2132 btrfs_err_rl_in_rcu(fs_info
,
2133 "failed to rebuild valid logical %llu for dev %s",
2134 logical
, rcu_str_deref(dev
->name
));
2136 scrub_write_block_to_dev_replace(sblock
);
2139 scrub_block_put(sblock
);
2141 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2142 mutex_lock(&sctx
->wr_lock
);
2143 scrub_wr_submit(sctx
);
2144 mutex_unlock(&sctx
->wr_lock
);
2147 scrub_pending_bio_dec(sctx
);
2150 static void scrub_missing_raid56_pages(struct scrub_block
*sblock
)
2152 struct scrub_ctx
*sctx
= sblock
->sctx
;
2153 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2154 u64 length
= sblock
->page_count
* PAGE_SIZE
;
2155 u64 logical
= sblock
->pagev
[0]->logical
;
2156 struct btrfs_bio
*bbio
= NULL
;
2158 struct btrfs_raid_bio
*rbio
;
2162 btrfs_bio_counter_inc_blocked(fs_info
);
2163 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
, logical
,
2165 if (ret
|| !bbio
|| !bbio
->raid_map
)
2168 if (WARN_ON(!sctx
->is_dev_replace
||
2169 !(bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))) {
2171 * We shouldn't be scrubbing a missing device. Even for dev
2172 * replace, we should only get here for RAID 5/6. We either
2173 * managed to mount something with no mirrors remaining or
2174 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2179 bio
= btrfs_io_bio_alloc(0);
2180 bio
->bi_iter
.bi_sector
= logical
>> 9;
2181 bio
->bi_private
= sblock
;
2182 bio
->bi_end_io
= scrub_missing_raid56_end_io
;
2184 rbio
= raid56_alloc_missing_rbio(fs_info
, bio
, bbio
, length
);
2188 for (i
= 0; i
< sblock
->page_count
; i
++) {
2189 struct scrub_page
*spage
= sblock
->pagev
[i
];
2191 raid56_add_scrub_pages(rbio
, spage
->page
, spage
->logical
);
2194 btrfs_init_work(&sblock
->work
, btrfs_scrub_helper
,
2195 scrub_missing_raid56_worker
, NULL
, NULL
);
2196 scrub_block_get(sblock
);
2197 scrub_pending_bio_inc(sctx
);
2198 raid56_submit_missing_rbio(rbio
);
2204 btrfs_bio_counter_dec(fs_info
);
2205 btrfs_put_bbio(bbio
);
2206 spin_lock(&sctx
->stat_lock
);
2207 sctx
->stat
.malloc_errors
++;
2208 spin_unlock(&sctx
->stat_lock
);
2211 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2212 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2213 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2214 u64 physical_for_dev_replace
)
2216 struct scrub_block
*sblock
;
2219 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2221 spin_lock(&sctx
->stat_lock
);
2222 sctx
->stat
.malloc_errors
++;
2223 spin_unlock(&sctx
->stat_lock
);
2227 /* one ref inside this function, plus one for each page added to
2229 refcount_set(&sblock
->refs
, 1);
2230 sblock
->sctx
= sctx
;
2231 sblock
->no_io_error_seen
= 1;
2233 for (index
= 0; len
> 0; index
++) {
2234 struct scrub_page
*spage
;
2235 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2237 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2240 spin_lock(&sctx
->stat_lock
);
2241 sctx
->stat
.malloc_errors
++;
2242 spin_unlock(&sctx
->stat_lock
);
2243 scrub_block_put(sblock
);
2246 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2247 scrub_page_get(spage
);
2248 sblock
->pagev
[index
] = spage
;
2249 spage
->sblock
= sblock
;
2251 spage
->flags
= flags
;
2252 spage
->generation
= gen
;
2253 spage
->logical
= logical
;
2254 spage
->physical
= physical
;
2255 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2256 spage
->mirror_num
= mirror_num
;
2258 spage
->have_csum
= 1;
2259 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2261 spage
->have_csum
= 0;
2263 sblock
->page_count
++;
2264 spage
->page
= alloc_page(GFP_KERNEL
);
2270 physical_for_dev_replace
+= l
;
2273 WARN_ON(sblock
->page_count
== 0);
2274 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2276 * This case should only be hit for RAID 5/6 device replace. See
2277 * the comment in scrub_missing_raid56_pages() for details.
2279 scrub_missing_raid56_pages(sblock
);
2281 for (index
= 0; index
< sblock
->page_count
; index
++) {
2282 struct scrub_page
*spage
= sblock
->pagev
[index
];
2285 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2287 scrub_block_put(sblock
);
2296 /* last one frees, either here or in bio completion for last page */
2297 scrub_block_put(sblock
);
2301 static void scrub_bio_end_io(struct bio
*bio
)
2303 struct scrub_bio
*sbio
= bio
->bi_private
;
2304 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
2306 sbio
->status
= bio
->bi_status
;
2309 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2312 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2314 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2315 struct scrub_ctx
*sctx
= sbio
->sctx
;
2318 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2320 for (i
= 0; i
< sbio
->page_count
; i
++) {
2321 struct scrub_page
*spage
= sbio
->pagev
[i
];
2323 spage
->io_error
= 1;
2324 spage
->sblock
->no_io_error_seen
= 0;
2328 /* now complete the scrub_block items that have all pages completed */
2329 for (i
= 0; i
< sbio
->page_count
; i
++) {
2330 struct scrub_page
*spage
= sbio
->pagev
[i
];
2331 struct scrub_block
*sblock
= spage
->sblock
;
2333 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2334 scrub_block_complete(sblock
);
2335 scrub_block_put(sblock
);
2340 spin_lock(&sctx
->list_lock
);
2341 sbio
->next_free
= sctx
->first_free
;
2342 sctx
->first_free
= sbio
->index
;
2343 spin_unlock(&sctx
->list_lock
);
2345 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2346 mutex_lock(&sctx
->wr_lock
);
2347 scrub_wr_submit(sctx
);
2348 mutex_unlock(&sctx
->wr_lock
);
2351 scrub_pending_bio_dec(sctx
);
2354 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2355 unsigned long *bitmap
,
2361 int sectorsize
= sparity
->sctx
->fs_info
->sectorsize
;
2363 if (len
>= sparity
->stripe_len
) {
2364 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2368 start
-= sparity
->logic_start
;
2369 start
= div64_u64_rem(start
, sparity
->stripe_len
, &offset
);
2370 offset
= div_u64(offset
, sectorsize
);
2371 nsectors64
= div_u64(len
, sectorsize
);
2373 ASSERT(nsectors64
< UINT_MAX
);
2374 nsectors
= (u32
)nsectors64
;
2376 if (offset
+ nsectors
<= sparity
->nsectors
) {
2377 bitmap_set(bitmap
, offset
, nsectors
);
2381 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2382 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2385 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2388 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2391 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2394 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2397 static void scrub_block_complete(struct scrub_block
*sblock
)
2401 if (!sblock
->no_io_error_seen
) {
2403 scrub_handle_errored_block(sblock
);
2406 * if has checksum error, write via repair mechanism in
2407 * dev replace case, otherwise write here in dev replace
2410 corrupted
= scrub_checksum(sblock
);
2411 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2412 scrub_write_block_to_dev_replace(sblock
);
2415 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2416 u64 start
= sblock
->pagev
[0]->logical
;
2417 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2420 scrub_parity_mark_sectors_error(sblock
->sparity
,
2421 start
, end
- start
);
2425 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u8
*csum
)
2427 struct btrfs_ordered_sum
*sum
= NULL
;
2428 unsigned long index
;
2429 unsigned long num_sectors
;
2431 while (!list_empty(&sctx
->csum_list
)) {
2432 sum
= list_first_entry(&sctx
->csum_list
,
2433 struct btrfs_ordered_sum
, list
);
2434 if (sum
->bytenr
> logical
)
2436 if (sum
->bytenr
+ sum
->len
> logical
)
2439 ++sctx
->stat
.csum_discards
;
2440 list_del(&sum
->list
);
2447 index
= div_u64(logical
- sum
->bytenr
, sctx
->fs_info
->sectorsize
);
2448 ASSERT(index
< UINT_MAX
);
2450 num_sectors
= sum
->len
/ sctx
->fs_info
->sectorsize
;
2451 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2452 if (index
== num_sectors
- 1) {
2453 list_del(&sum
->list
);
2459 /* scrub extent tries to collect up to 64 kB for each bio */
2460 static int scrub_extent(struct scrub_ctx
*sctx
, struct map_lookup
*map
,
2461 u64 logical
, u64 len
,
2462 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2463 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2466 u8 csum
[BTRFS_CSUM_SIZE
];
2469 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2470 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2471 blocksize
= map
->stripe_len
;
2473 blocksize
= sctx
->fs_info
->sectorsize
;
2474 spin_lock(&sctx
->stat_lock
);
2475 sctx
->stat
.data_extents_scrubbed
++;
2476 sctx
->stat
.data_bytes_scrubbed
+= len
;
2477 spin_unlock(&sctx
->stat_lock
);
2478 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2479 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2480 blocksize
= map
->stripe_len
;
2482 blocksize
= sctx
->fs_info
->nodesize
;
2483 spin_lock(&sctx
->stat_lock
);
2484 sctx
->stat
.tree_extents_scrubbed
++;
2485 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2486 spin_unlock(&sctx
->stat_lock
);
2488 blocksize
= sctx
->fs_info
->sectorsize
;
2493 u64 l
= min_t(u64
, len
, blocksize
);
2496 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2497 /* push csums to sbio */
2498 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2500 ++sctx
->stat
.no_csum
;
2502 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2503 mirror_num
, have_csum
? csum
: NULL
, 0,
2504 physical_for_dev_replace
);
2510 physical_for_dev_replace
+= l
;
2515 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2516 u64 logical
, u64 len
,
2517 u64 physical
, struct btrfs_device
*dev
,
2518 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2520 struct scrub_ctx
*sctx
= sparity
->sctx
;
2521 struct scrub_block
*sblock
;
2524 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2526 spin_lock(&sctx
->stat_lock
);
2527 sctx
->stat
.malloc_errors
++;
2528 spin_unlock(&sctx
->stat_lock
);
2532 /* one ref inside this function, plus one for each page added to
2534 refcount_set(&sblock
->refs
, 1);
2535 sblock
->sctx
= sctx
;
2536 sblock
->no_io_error_seen
= 1;
2537 sblock
->sparity
= sparity
;
2538 scrub_parity_get(sparity
);
2540 for (index
= 0; len
> 0; index
++) {
2541 struct scrub_page
*spage
;
2542 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2544 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2547 spin_lock(&sctx
->stat_lock
);
2548 sctx
->stat
.malloc_errors
++;
2549 spin_unlock(&sctx
->stat_lock
);
2550 scrub_block_put(sblock
);
2553 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2554 /* For scrub block */
2555 scrub_page_get(spage
);
2556 sblock
->pagev
[index
] = spage
;
2557 /* For scrub parity */
2558 scrub_page_get(spage
);
2559 list_add_tail(&spage
->list
, &sparity
->spages
);
2560 spage
->sblock
= sblock
;
2562 spage
->flags
= flags
;
2563 spage
->generation
= gen
;
2564 spage
->logical
= logical
;
2565 spage
->physical
= physical
;
2566 spage
->mirror_num
= mirror_num
;
2568 spage
->have_csum
= 1;
2569 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2571 spage
->have_csum
= 0;
2573 sblock
->page_count
++;
2574 spage
->page
= alloc_page(GFP_KERNEL
);
2582 WARN_ON(sblock
->page_count
== 0);
2583 for (index
= 0; index
< sblock
->page_count
; index
++) {
2584 struct scrub_page
*spage
= sblock
->pagev
[index
];
2587 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2589 scrub_block_put(sblock
);
2594 /* last one frees, either here or in bio completion for last page */
2595 scrub_block_put(sblock
);
2599 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2600 u64 logical
, u64 len
,
2601 u64 physical
, struct btrfs_device
*dev
,
2602 u64 flags
, u64 gen
, int mirror_num
)
2604 struct scrub_ctx
*sctx
= sparity
->sctx
;
2606 u8 csum
[BTRFS_CSUM_SIZE
];
2609 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2610 scrub_parity_mark_sectors_error(sparity
, logical
, len
);
2614 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2615 blocksize
= sparity
->stripe_len
;
2616 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2617 blocksize
= sparity
->stripe_len
;
2619 blocksize
= sctx
->fs_info
->sectorsize
;
2624 u64 l
= min_t(u64
, len
, blocksize
);
2627 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2628 /* push csums to sbio */
2629 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2633 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2634 flags
, gen
, mirror_num
,
2635 have_csum
? csum
: NULL
);
2647 * Given a physical address, this will calculate it's
2648 * logical offset. if this is a parity stripe, it will return
2649 * the most left data stripe's logical offset.
2651 * return 0 if it is a data stripe, 1 means parity stripe.
2653 static int get_raid56_logic_offset(u64 physical
, int num
,
2654 struct map_lookup
*map
, u64
*offset
,
2664 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2665 nr_data_stripes(map
);
2667 *stripe_start
= last_offset
;
2669 *offset
= last_offset
;
2670 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2671 *offset
= last_offset
+ i
* map
->stripe_len
;
2673 stripe_nr
= div64_u64(*offset
, map
->stripe_len
);
2674 stripe_nr
= div_u64(stripe_nr
, nr_data_stripes(map
));
2676 /* Work out the disk rotation on this stripe-set */
2677 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
, &rot
);
2678 /* calculate which stripe this data locates */
2680 stripe_index
= rot
% map
->num_stripes
;
2681 if (stripe_index
== num
)
2683 if (stripe_index
< num
)
2686 *offset
= last_offset
+ j
* map
->stripe_len
;
2690 static void scrub_free_parity(struct scrub_parity
*sparity
)
2692 struct scrub_ctx
*sctx
= sparity
->sctx
;
2693 struct scrub_page
*curr
, *next
;
2696 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2698 spin_lock(&sctx
->stat_lock
);
2699 sctx
->stat
.read_errors
+= nbits
;
2700 sctx
->stat
.uncorrectable_errors
+= nbits
;
2701 spin_unlock(&sctx
->stat_lock
);
2704 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2705 list_del_init(&curr
->list
);
2706 scrub_page_put(curr
);
2712 static void scrub_parity_bio_endio_worker(struct btrfs_work
*work
)
2714 struct scrub_parity
*sparity
= container_of(work
, struct scrub_parity
,
2716 struct scrub_ctx
*sctx
= sparity
->sctx
;
2718 scrub_free_parity(sparity
);
2719 scrub_pending_bio_dec(sctx
);
2722 static void scrub_parity_bio_endio(struct bio
*bio
)
2724 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2725 struct btrfs_fs_info
*fs_info
= sparity
->sctx
->fs_info
;
2728 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2733 btrfs_init_work(&sparity
->work
, btrfs_scrubparity_helper
,
2734 scrub_parity_bio_endio_worker
, NULL
, NULL
);
2735 btrfs_queue_work(fs_info
->scrub_parity_workers
, &sparity
->work
);
2738 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2740 struct scrub_ctx
*sctx
= sparity
->sctx
;
2741 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2743 struct btrfs_raid_bio
*rbio
;
2744 struct btrfs_bio
*bbio
= NULL
;
2748 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2752 length
= sparity
->logic_end
- sparity
->logic_start
;
2754 btrfs_bio_counter_inc_blocked(fs_info
);
2755 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_WRITE
, sparity
->logic_start
,
2757 if (ret
|| !bbio
|| !bbio
->raid_map
)
2760 bio
= btrfs_io_bio_alloc(0);
2761 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2762 bio
->bi_private
= sparity
;
2763 bio
->bi_end_io
= scrub_parity_bio_endio
;
2765 rbio
= raid56_parity_alloc_scrub_rbio(fs_info
, bio
, bbio
,
2766 length
, sparity
->scrub_dev
,
2772 scrub_pending_bio_inc(sctx
);
2773 raid56_parity_submit_scrub_rbio(rbio
);
2779 btrfs_bio_counter_dec(fs_info
);
2780 btrfs_put_bbio(bbio
);
2781 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2783 spin_lock(&sctx
->stat_lock
);
2784 sctx
->stat
.malloc_errors
++;
2785 spin_unlock(&sctx
->stat_lock
);
2787 scrub_free_parity(sparity
);
2790 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2792 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * sizeof(long);
2795 static void scrub_parity_get(struct scrub_parity
*sparity
)
2797 refcount_inc(&sparity
->refs
);
2800 static void scrub_parity_put(struct scrub_parity
*sparity
)
2802 if (!refcount_dec_and_test(&sparity
->refs
))
2805 scrub_parity_check_and_repair(sparity
);
2808 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2809 struct map_lookup
*map
,
2810 struct btrfs_device
*sdev
,
2811 struct btrfs_path
*path
,
2815 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2816 struct btrfs_root
*root
= fs_info
->extent_root
;
2817 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2818 struct btrfs_extent_item
*extent
;
2819 struct btrfs_bio
*bbio
= NULL
;
2823 struct extent_buffer
*l
;
2824 struct btrfs_key key
;
2827 u64 extent_physical
;
2830 struct btrfs_device
*extent_dev
;
2831 struct scrub_parity
*sparity
;
2834 int extent_mirror_num
;
2837 nsectors
= div_u64(map
->stripe_len
, fs_info
->sectorsize
);
2838 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2839 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2842 spin_lock(&sctx
->stat_lock
);
2843 sctx
->stat
.malloc_errors
++;
2844 spin_unlock(&sctx
->stat_lock
);
2848 sparity
->stripe_len
= map
->stripe_len
;
2849 sparity
->nsectors
= nsectors
;
2850 sparity
->sctx
= sctx
;
2851 sparity
->scrub_dev
= sdev
;
2852 sparity
->logic_start
= logic_start
;
2853 sparity
->logic_end
= logic_end
;
2854 refcount_set(&sparity
->refs
, 1);
2855 INIT_LIST_HEAD(&sparity
->spages
);
2856 sparity
->dbitmap
= sparity
->bitmap
;
2857 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2860 while (logic_start
< logic_end
) {
2861 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2862 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2864 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2865 key
.objectid
= logic_start
;
2866 key
.offset
= (u64
)-1;
2868 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2873 ret
= btrfs_previous_extent_item(root
, path
, 0);
2877 btrfs_release_path(path
);
2878 ret
= btrfs_search_slot(NULL
, root
, &key
,
2890 slot
= path
->slots
[0];
2891 if (slot
>= btrfs_header_nritems(l
)) {
2892 ret
= btrfs_next_leaf(root
, path
);
2901 btrfs_item_key_to_cpu(l
, &key
, slot
);
2903 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2904 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2907 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2908 bytes
= fs_info
->nodesize
;
2912 if (key
.objectid
+ bytes
<= logic_start
)
2915 if (key
.objectid
>= logic_end
) {
2920 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2921 logic_start
+= map
->stripe_len
;
2923 extent
= btrfs_item_ptr(l
, slot
,
2924 struct btrfs_extent_item
);
2925 flags
= btrfs_extent_flags(l
, extent
);
2926 generation
= btrfs_extent_generation(l
, extent
);
2928 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
2929 (key
.objectid
< logic_start
||
2930 key
.objectid
+ bytes
>
2931 logic_start
+ map
->stripe_len
)) {
2933 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2934 key
.objectid
, logic_start
);
2935 spin_lock(&sctx
->stat_lock
);
2936 sctx
->stat
.uncorrectable_errors
++;
2937 spin_unlock(&sctx
->stat_lock
);
2941 extent_logical
= key
.objectid
;
2944 if (extent_logical
< logic_start
) {
2945 extent_len
-= logic_start
- extent_logical
;
2946 extent_logical
= logic_start
;
2949 if (extent_logical
+ extent_len
>
2950 logic_start
+ map
->stripe_len
)
2951 extent_len
= logic_start
+ map
->stripe_len
-
2954 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2957 mapped_length
= extent_len
;
2959 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
,
2960 extent_logical
, &mapped_length
, &bbio
,
2963 if (!bbio
|| mapped_length
< extent_len
)
2967 btrfs_put_bbio(bbio
);
2970 extent_physical
= bbio
->stripes
[0].physical
;
2971 extent_mirror_num
= bbio
->mirror_num
;
2972 extent_dev
= bbio
->stripes
[0].dev
;
2973 btrfs_put_bbio(bbio
);
2975 ret
= btrfs_lookup_csums_range(csum_root
,
2977 extent_logical
+ extent_len
- 1,
2978 &sctx
->csum_list
, 1);
2982 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2989 scrub_free_csums(sctx
);
2994 if (extent_logical
+ extent_len
<
2995 key
.objectid
+ bytes
) {
2996 logic_start
+= map
->stripe_len
;
2998 if (logic_start
>= logic_end
) {
3003 if (logic_start
< key
.objectid
+ bytes
) {
3012 btrfs_release_path(path
);
3017 logic_start
+= map
->stripe_len
;
3021 scrub_parity_mark_sectors_error(sparity
, logic_start
,
3022 logic_end
- logic_start
);
3023 scrub_parity_put(sparity
);
3025 mutex_lock(&sctx
->wr_lock
);
3026 scrub_wr_submit(sctx
);
3027 mutex_unlock(&sctx
->wr_lock
);
3029 btrfs_release_path(path
);
3030 return ret
< 0 ? ret
: 0;
3033 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
3034 struct map_lookup
*map
,
3035 struct btrfs_device
*scrub_dev
,
3036 int num
, u64 base
, u64 length
)
3038 struct btrfs_path
*path
, *ppath
;
3039 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3040 struct btrfs_root
*root
= fs_info
->extent_root
;
3041 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
3042 struct btrfs_extent_item
*extent
;
3043 struct blk_plug plug
;
3048 struct extent_buffer
*l
;
3055 struct reada_control
*reada1
;
3056 struct reada_control
*reada2
;
3057 struct btrfs_key key
;
3058 struct btrfs_key key_end
;
3059 u64 increment
= map
->stripe_len
;
3062 u64 extent_physical
;
3066 struct btrfs_device
*extent_dev
;
3067 int extent_mirror_num
;
3070 physical
= map
->stripes
[num
].physical
;
3072 nstripes
= div64_u64(length
, map
->stripe_len
);
3073 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3074 offset
= map
->stripe_len
* num
;
3075 increment
= map
->stripe_len
* map
->num_stripes
;
3077 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3078 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3079 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3080 increment
= map
->stripe_len
* factor
;
3081 mirror_num
= num
% map
->sub_stripes
+ 1;
3082 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3083 increment
= map
->stripe_len
;
3084 mirror_num
= num
% map
->num_stripes
+ 1;
3085 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3086 increment
= map
->stripe_len
;
3087 mirror_num
= num
% map
->num_stripes
+ 1;
3088 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3089 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3090 increment
= map
->stripe_len
* nr_data_stripes(map
);
3093 increment
= map
->stripe_len
;
3097 path
= btrfs_alloc_path();
3101 ppath
= btrfs_alloc_path();
3103 btrfs_free_path(path
);
3108 * work on commit root. The related disk blocks are static as
3109 * long as COW is applied. This means, it is save to rewrite
3110 * them to repair disk errors without any race conditions
3112 path
->search_commit_root
= 1;
3113 path
->skip_locking
= 1;
3115 ppath
->search_commit_root
= 1;
3116 ppath
->skip_locking
= 1;
3118 * trigger the readahead for extent tree csum tree and wait for
3119 * completion. During readahead, the scrub is officially paused
3120 * to not hold off transaction commits
3122 logical
= base
+ offset
;
3123 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3124 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3125 get_raid56_logic_offset(physical_end
, num
,
3126 map
, &logic_end
, NULL
);
3129 logic_end
= logical
+ increment
* nstripes
;
3131 wait_event(sctx
->list_wait
,
3132 atomic_read(&sctx
->bios_in_flight
) == 0);
3133 scrub_blocked_if_needed(fs_info
);
3135 /* FIXME it might be better to start readahead at commit root */
3136 key
.objectid
= logical
;
3137 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3138 key
.offset
= (u64
)0;
3139 key_end
.objectid
= logic_end
;
3140 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3141 key_end
.offset
= (u64
)-1;
3142 reada1
= btrfs_reada_add(root
, &key
, &key_end
);
3144 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3145 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
3146 key
.offset
= logical
;
3147 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3148 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3149 key_end
.offset
= logic_end
;
3150 reada2
= btrfs_reada_add(csum_root
, &key
, &key_end
);
3152 if (!IS_ERR(reada1
))
3153 btrfs_reada_wait(reada1
);
3154 if (!IS_ERR(reada2
))
3155 btrfs_reada_wait(reada2
);
3159 * collect all data csums for the stripe to avoid seeking during
3160 * the scrub. This might currently (crc32) end up to be about 1MB
3162 blk_start_plug(&plug
);
3165 * now find all extents for each stripe and scrub them
3168 while (physical
< physical_end
) {
3172 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3173 atomic_read(&sctx
->cancel_req
)) {
3178 * check to see if we have to pause
3180 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3181 /* push queued extents */
3182 sctx
->flush_all_writes
= true;
3184 mutex_lock(&sctx
->wr_lock
);
3185 scrub_wr_submit(sctx
);
3186 mutex_unlock(&sctx
->wr_lock
);
3187 wait_event(sctx
->list_wait
,
3188 atomic_read(&sctx
->bios_in_flight
) == 0);
3189 sctx
->flush_all_writes
= false;
3190 scrub_blocked_if_needed(fs_info
);
3193 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3194 ret
= get_raid56_logic_offset(physical
, num
, map
,
3199 /* it is parity strip */
3200 stripe_logical
+= base
;
3201 stripe_end
= stripe_logical
+ increment
;
3202 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3203 ppath
, stripe_logical
,
3211 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3212 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3214 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3215 key
.objectid
= logical
;
3216 key
.offset
= (u64
)-1;
3218 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3223 ret
= btrfs_previous_extent_item(root
, path
, 0);
3227 /* there's no smaller item, so stick with the
3229 btrfs_release_path(path
);
3230 ret
= btrfs_search_slot(NULL
, root
, &key
,
3242 slot
= path
->slots
[0];
3243 if (slot
>= btrfs_header_nritems(l
)) {
3244 ret
= btrfs_next_leaf(root
, path
);
3253 btrfs_item_key_to_cpu(l
, &key
, slot
);
3255 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3256 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3259 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3260 bytes
= fs_info
->nodesize
;
3264 if (key
.objectid
+ bytes
<= logical
)
3267 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3268 /* out of this device extent */
3269 if (key
.objectid
>= logic_end
)
3274 extent
= btrfs_item_ptr(l
, slot
,
3275 struct btrfs_extent_item
);
3276 flags
= btrfs_extent_flags(l
, extent
);
3277 generation
= btrfs_extent_generation(l
, extent
);
3279 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3280 (key
.objectid
< logical
||
3281 key
.objectid
+ bytes
>
3282 logical
+ map
->stripe_len
)) {
3284 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3285 key
.objectid
, logical
);
3286 spin_lock(&sctx
->stat_lock
);
3287 sctx
->stat
.uncorrectable_errors
++;
3288 spin_unlock(&sctx
->stat_lock
);
3293 extent_logical
= key
.objectid
;
3297 * trim extent to this stripe
3299 if (extent_logical
< logical
) {
3300 extent_len
-= logical
- extent_logical
;
3301 extent_logical
= logical
;
3303 if (extent_logical
+ extent_len
>
3304 logical
+ map
->stripe_len
) {
3305 extent_len
= logical
+ map
->stripe_len
-
3309 extent_physical
= extent_logical
- logical
+ physical
;
3310 extent_dev
= scrub_dev
;
3311 extent_mirror_num
= mirror_num
;
3312 if (sctx
->is_dev_replace
)
3313 scrub_remap_extent(fs_info
, extent_logical
,
3314 extent_len
, &extent_physical
,
3316 &extent_mirror_num
);
3318 ret
= btrfs_lookup_csums_range(csum_root
,
3322 &sctx
->csum_list
, 1);
3326 ret
= scrub_extent(sctx
, map
, extent_logical
, extent_len
,
3327 extent_physical
, extent_dev
, flags
,
3328 generation
, extent_mirror_num
,
3329 extent_logical
- logical
+ physical
);
3331 scrub_free_csums(sctx
);
3336 if (extent_logical
+ extent_len
<
3337 key
.objectid
+ bytes
) {
3338 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3340 * loop until we find next data stripe
3341 * or we have finished all stripes.
3344 physical
+= map
->stripe_len
;
3345 ret
= get_raid56_logic_offset(physical
,
3350 if (ret
&& physical
< physical_end
) {
3351 stripe_logical
+= base
;
3352 stripe_end
= stripe_logical
+
3354 ret
= scrub_raid56_parity(sctx
,
3355 map
, scrub_dev
, ppath
,
3363 physical
+= map
->stripe_len
;
3364 logical
+= increment
;
3366 if (logical
< key
.objectid
+ bytes
) {
3371 if (physical
>= physical_end
) {
3379 btrfs_release_path(path
);
3381 logical
+= increment
;
3382 physical
+= map
->stripe_len
;
3383 spin_lock(&sctx
->stat_lock
);
3385 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3388 sctx
->stat
.last_physical
= physical
;
3389 spin_unlock(&sctx
->stat_lock
);
3394 /* push queued extents */
3396 mutex_lock(&sctx
->wr_lock
);
3397 scrub_wr_submit(sctx
);
3398 mutex_unlock(&sctx
->wr_lock
);
3400 blk_finish_plug(&plug
);
3401 btrfs_free_path(path
);
3402 btrfs_free_path(ppath
);
3403 return ret
< 0 ? ret
: 0;
3406 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3407 struct btrfs_device
*scrub_dev
,
3408 u64 chunk_offset
, u64 length
,
3410 struct btrfs_block_group_cache
*cache
)
3412 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3413 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
3414 struct map_lookup
*map
;
3415 struct extent_map
*em
;
3419 read_lock(&map_tree
->map_tree
.lock
);
3420 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3421 read_unlock(&map_tree
->map_tree
.lock
);
3425 * Might have been an unused block group deleted by the cleaner
3426 * kthread or relocation.
3428 spin_lock(&cache
->lock
);
3429 if (!cache
->removed
)
3431 spin_unlock(&cache
->lock
);
3436 map
= em
->map_lookup
;
3437 if (em
->start
!= chunk_offset
)
3440 if (em
->len
< length
)
3443 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3444 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3445 map
->stripes
[i
].physical
== dev_offset
) {
3446 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3447 chunk_offset
, length
);
3453 free_extent_map(em
);
3458 static noinline_for_stack
3459 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3460 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
)
3462 struct btrfs_dev_extent
*dev_extent
= NULL
;
3463 struct btrfs_path
*path
;
3464 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3465 struct btrfs_root
*root
= fs_info
->dev_root
;
3471 struct extent_buffer
*l
;
3472 struct btrfs_key key
;
3473 struct btrfs_key found_key
;
3474 struct btrfs_block_group_cache
*cache
;
3475 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3477 path
= btrfs_alloc_path();
3481 path
->reada
= READA_FORWARD
;
3482 path
->search_commit_root
= 1;
3483 path
->skip_locking
= 1;
3485 key
.objectid
= scrub_dev
->devid
;
3487 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3490 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3494 if (path
->slots
[0] >=
3495 btrfs_header_nritems(path
->nodes
[0])) {
3496 ret
= btrfs_next_leaf(root
, path
);
3509 slot
= path
->slots
[0];
3511 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3513 if (found_key
.objectid
!= scrub_dev
->devid
)
3516 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3519 if (found_key
.offset
>= end
)
3522 if (found_key
.offset
< key
.offset
)
3525 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3526 length
= btrfs_dev_extent_length(l
, dev_extent
);
3528 if (found_key
.offset
+ length
<= start
)
3531 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3534 * get a reference on the corresponding block group to prevent
3535 * the chunk from going away while we scrub it
3537 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3539 /* some chunks are removed but not committed to disk yet,
3540 * continue scrubbing */
3545 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3546 * to avoid deadlock caused by:
3547 * btrfs_inc_block_group_ro()
3548 * -> btrfs_wait_for_commit()
3549 * -> btrfs_commit_transaction()
3550 * -> btrfs_scrub_pause()
3552 scrub_pause_on(fs_info
);
3553 ret
= btrfs_inc_block_group_ro(cache
);
3554 if (!ret
&& sctx
->is_dev_replace
) {
3556 * If we are doing a device replace wait for any tasks
3557 * that started delalloc right before we set the block
3558 * group to RO mode, as they might have just allocated
3559 * an extent from it or decided they could do a nocow
3560 * write. And if any such tasks did that, wait for their
3561 * ordered extents to complete and then commit the
3562 * current transaction, so that we can later see the new
3563 * extent items in the extent tree - the ordered extents
3564 * create delayed data references (for cow writes) when
3565 * they complete, which will be run and insert the
3566 * corresponding extent items into the extent tree when
3567 * we commit the transaction they used when running
3568 * inode.c:btrfs_finish_ordered_io(). We later use
3569 * the commit root of the extent tree to find extents
3570 * to copy from the srcdev into the tgtdev, and we don't
3571 * want to miss any new extents.
3573 btrfs_wait_block_group_reservations(cache
);
3574 btrfs_wait_nocow_writers(cache
);
3575 ret
= btrfs_wait_ordered_roots(fs_info
, U64_MAX
,
3576 cache
->key
.objectid
,
3579 struct btrfs_trans_handle
*trans
;
3581 trans
= btrfs_join_transaction(root
);
3583 ret
= PTR_ERR(trans
);
3585 ret
= btrfs_commit_transaction(trans
);
3587 scrub_pause_off(fs_info
);
3588 btrfs_put_block_group(cache
);
3593 scrub_pause_off(fs_info
);
3597 } else if (ret
== -ENOSPC
) {
3599 * btrfs_inc_block_group_ro return -ENOSPC when it
3600 * failed in creating new chunk for metadata.
3601 * It is not a problem for scrub/replace, because
3602 * metadata are always cowed, and our scrub paused
3603 * commit_transactions.
3608 "failed setting block group ro: %d", ret
);
3609 btrfs_put_block_group(cache
);
3613 down_write(&fs_info
->dev_replace
.rwsem
);
3614 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3615 dev_replace
->cursor_left
= found_key
.offset
;
3616 dev_replace
->item_needs_writeback
= 1;
3617 up_write(&dev_replace
->rwsem
);
3619 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_offset
, length
,
3620 found_key
.offset
, cache
);
3623 * flush, submit all pending read and write bios, afterwards
3625 * Note that in the dev replace case, a read request causes
3626 * write requests that are submitted in the read completion
3627 * worker. Therefore in the current situation, it is required
3628 * that all write requests are flushed, so that all read and
3629 * write requests are really completed when bios_in_flight
3632 sctx
->flush_all_writes
= true;
3634 mutex_lock(&sctx
->wr_lock
);
3635 scrub_wr_submit(sctx
);
3636 mutex_unlock(&sctx
->wr_lock
);
3638 wait_event(sctx
->list_wait
,
3639 atomic_read(&sctx
->bios_in_flight
) == 0);
3641 scrub_pause_on(fs_info
);
3644 * must be called before we decrease @scrub_paused.
3645 * make sure we don't block transaction commit while
3646 * we are waiting pending workers finished.
3648 wait_event(sctx
->list_wait
,
3649 atomic_read(&sctx
->workers_pending
) == 0);
3650 sctx
->flush_all_writes
= false;
3652 scrub_pause_off(fs_info
);
3654 down_write(&fs_info
->dev_replace
.rwsem
);
3655 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3656 dev_replace
->item_needs_writeback
= 1;
3657 up_write(&fs_info
->dev_replace
.rwsem
);
3660 btrfs_dec_block_group_ro(cache
);
3663 * We might have prevented the cleaner kthread from deleting
3664 * this block group if it was already unused because we raced
3665 * and set it to RO mode first. So add it back to the unused
3666 * list, otherwise it might not ever be deleted unless a manual
3667 * balance is triggered or it becomes used and unused again.
3669 spin_lock(&cache
->lock
);
3670 if (!cache
->removed
&& !cache
->ro
&& cache
->reserved
== 0 &&
3671 btrfs_block_group_used(&cache
->item
) == 0) {
3672 spin_unlock(&cache
->lock
);
3673 btrfs_mark_bg_unused(cache
);
3675 spin_unlock(&cache
->lock
);
3678 btrfs_put_block_group(cache
);
3681 if (sctx
->is_dev_replace
&&
3682 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3686 if (sctx
->stat
.malloc_errors
> 0) {
3691 key
.offset
= found_key
.offset
+ length
;
3692 btrfs_release_path(path
);
3695 btrfs_free_path(path
);
3700 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3701 struct btrfs_device
*scrub_dev
)
3707 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3709 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
3712 /* Seed devices of a new filesystem has their own generation. */
3713 if (scrub_dev
->fs_devices
!= fs_info
->fs_devices
)
3714 gen
= scrub_dev
->generation
;
3716 gen
= fs_info
->last_trans_committed
;
3718 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3719 bytenr
= btrfs_sb_offset(i
);
3720 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3721 scrub_dev
->commit_total_bytes
)
3724 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3725 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3730 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3736 * get a reference count on fs_info->scrub_workers. start worker if necessary
3738 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3741 unsigned int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3742 int max_active
= fs_info
->thread_pool_size
;
3744 lockdep_assert_held(&fs_info
->scrub_lock
);
3746 if (refcount_read(&fs_info
->scrub_workers_refcnt
) == 0) {
3747 ASSERT(fs_info
->scrub_workers
== NULL
);
3748 fs_info
->scrub_workers
= btrfs_alloc_workqueue(fs_info
, "scrub",
3749 flags
, is_dev_replace
? 1 : max_active
, 4);
3750 if (!fs_info
->scrub_workers
)
3751 goto fail_scrub_workers
;
3753 ASSERT(fs_info
->scrub_wr_completion_workers
== NULL
);
3754 fs_info
->scrub_wr_completion_workers
=
3755 btrfs_alloc_workqueue(fs_info
, "scrubwrc", flags
,
3757 if (!fs_info
->scrub_wr_completion_workers
)
3758 goto fail_scrub_wr_completion_workers
;
3760 ASSERT(fs_info
->scrub_parity_workers
== NULL
);
3761 fs_info
->scrub_parity_workers
=
3762 btrfs_alloc_workqueue(fs_info
, "scrubparity", flags
,
3764 if (!fs_info
->scrub_parity_workers
)
3765 goto fail_scrub_parity_workers
;
3767 refcount_set(&fs_info
->scrub_workers_refcnt
, 1);
3769 refcount_inc(&fs_info
->scrub_workers_refcnt
);
3773 fail_scrub_parity_workers
:
3774 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3775 fail_scrub_wr_completion_workers
:
3776 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3781 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3782 u64 end
, struct btrfs_scrub_progress
*progress
,
3783 int readonly
, int is_dev_replace
)
3785 struct scrub_ctx
*sctx
;
3787 struct btrfs_device
*dev
;
3788 unsigned int nofs_flag
;
3789 struct btrfs_workqueue
*scrub_workers
= NULL
;
3790 struct btrfs_workqueue
*scrub_wr_comp
= NULL
;
3791 struct btrfs_workqueue
*scrub_parity
= NULL
;
3793 if (btrfs_fs_closing(fs_info
))
3796 if (fs_info
->nodesize
> BTRFS_STRIPE_LEN
) {
3798 * in this case scrub is unable to calculate the checksum
3799 * the way scrub is implemented. Do not handle this
3800 * situation at all because it won't ever happen.
3803 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3809 if (fs_info
->sectorsize
!= PAGE_SIZE
) {
3810 /* not supported for data w/o checksums */
3811 btrfs_err_rl(fs_info
,
3812 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3813 fs_info
->sectorsize
, PAGE_SIZE
);
3817 if (fs_info
->nodesize
>
3818 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3819 fs_info
->sectorsize
> PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3821 * would exhaust the array bounds of pagev member in
3822 * struct scrub_block
3825 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3827 SCRUB_MAX_PAGES_PER_BLOCK
,
3828 fs_info
->sectorsize
,
3829 SCRUB_MAX_PAGES_PER_BLOCK
);
3833 /* Allocate outside of device_list_mutex */
3834 sctx
= scrub_setup_ctx(fs_info
, is_dev_replace
);
3836 return PTR_ERR(sctx
);
3838 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3839 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
3840 if (!dev
|| (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) &&
3842 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3847 if (!is_dev_replace
&& !readonly
&&
3848 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
)) {
3849 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3850 btrfs_err_in_rcu(fs_info
, "scrub: device %s is not writable",
3851 rcu_str_deref(dev
->name
));
3856 mutex_lock(&fs_info
->scrub_lock
);
3857 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
3858 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &dev
->dev_state
)) {
3859 mutex_unlock(&fs_info
->scrub_lock
);
3860 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3865 down_read(&fs_info
->dev_replace
.rwsem
);
3866 if (dev
->scrub_ctx
||
3868 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3869 up_read(&fs_info
->dev_replace
.rwsem
);
3870 mutex_unlock(&fs_info
->scrub_lock
);
3871 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3875 up_read(&fs_info
->dev_replace
.rwsem
);
3877 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3879 mutex_unlock(&fs_info
->scrub_lock
);
3880 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3884 sctx
->readonly
= readonly
;
3885 dev
->scrub_ctx
= sctx
;
3886 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3889 * checking @scrub_pause_req here, we can avoid
3890 * race between committing transaction and scrubbing.
3892 __scrub_blocked_if_needed(fs_info
);
3893 atomic_inc(&fs_info
->scrubs_running
);
3894 mutex_unlock(&fs_info
->scrub_lock
);
3897 * In order to avoid deadlock with reclaim when there is a transaction
3898 * trying to pause scrub, make sure we use GFP_NOFS for all the
3899 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3900 * invoked by our callees. The pausing request is done when the
3901 * transaction commit starts, and it blocks the transaction until scrub
3902 * is paused (done at specific points at scrub_stripe() or right above
3903 * before incrementing fs_info->scrubs_running).
3905 nofs_flag
= memalloc_nofs_save();
3906 if (!is_dev_replace
) {
3907 btrfs_info(fs_info
, "scrub: started on devid %llu", devid
);
3909 * by holding device list mutex, we can
3910 * kick off writing super in log tree sync.
3912 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3913 ret
= scrub_supers(sctx
, dev
);
3914 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3918 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
);
3919 memalloc_nofs_restore(nofs_flag
);
3921 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3922 atomic_dec(&fs_info
->scrubs_running
);
3923 wake_up(&fs_info
->scrub_pause_wait
);
3925 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3928 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3930 if (!is_dev_replace
)
3931 btrfs_info(fs_info
, "scrub: %s on devid %llu with status: %d",
3932 ret
? "not finished" : "finished", devid
, ret
);
3934 mutex_lock(&fs_info
->scrub_lock
);
3935 dev
->scrub_ctx
= NULL
;
3936 if (refcount_dec_and_test(&fs_info
->scrub_workers_refcnt
)) {
3937 scrub_workers
= fs_info
->scrub_workers
;
3938 scrub_wr_comp
= fs_info
->scrub_wr_completion_workers
;
3939 scrub_parity
= fs_info
->scrub_parity_workers
;
3941 fs_info
->scrub_workers
= NULL
;
3942 fs_info
->scrub_wr_completion_workers
= NULL
;
3943 fs_info
->scrub_parity_workers
= NULL
;
3945 mutex_unlock(&fs_info
->scrub_lock
);
3947 btrfs_destroy_workqueue(scrub_workers
);
3948 btrfs_destroy_workqueue(scrub_wr_comp
);
3949 btrfs_destroy_workqueue(scrub_parity
);
3950 scrub_put_ctx(sctx
);
3955 scrub_free_ctx(sctx
);
3960 void btrfs_scrub_pause(struct btrfs_fs_info
*fs_info
)
3962 mutex_lock(&fs_info
->scrub_lock
);
3963 atomic_inc(&fs_info
->scrub_pause_req
);
3964 while (atomic_read(&fs_info
->scrubs_paused
) !=
3965 atomic_read(&fs_info
->scrubs_running
)) {
3966 mutex_unlock(&fs_info
->scrub_lock
);
3967 wait_event(fs_info
->scrub_pause_wait
,
3968 atomic_read(&fs_info
->scrubs_paused
) ==
3969 atomic_read(&fs_info
->scrubs_running
));
3970 mutex_lock(&fs_info
->scrub_lock
);
3972 mutex_unlock(&fs_info
->scrub_lock
);
3975 void btrfs_scrub_continue(struct btrfs_fs_info
*fs_info
)
3977 atomic_dec(&fs_info
->scrub_pause_req
);
3978 wake_up(&fs_info
->scrub_pause_wait
);
3981 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3983 mutex_lock(&fs_info
->scrub_lock
);
3984 if (!atomic_read(&fs_info
->scrubs_running
)) {
3985 mutex_unlock(&fs_info
->scrub_lock
);
3989 atomic_inc(&fs_info
->scrub_cancel_req
);
3990 while (atomic_read(&fs_info
->scrubs_running
)) {
3991 mutex_unlock(&fs_info
->scrub_lock
);
3992 wait_event(fs_info
->scrub_pause_wait
,
3993 atomic_read(&fs_info
->scrubs_running
) == 0);
3994 mutex_lock(&fs_info
->scrub_lock
);
3996 atomic_dec(&fs_info
->scrub_cancel_req
);
3997 mutex_unlock(&fs_info
->scrub_lock
);
4002 int btrfs_scrub_cancel_dev(struct btrfs_device
*dev
)
4004 struct btrfs_fs_info
*fs_info
= dev
->fs_info
;
4005 struct scrub_ctx
*sctx
;
4007 mutex_lock(&fs_info
->scrub_lock
);
4008 sctx
= dev
->scrub_ctx
;
4010 mutex_unlock(&fs_info
->scrub_lock
);
4013 atomic_inc(&sctx
->cancel_req
);
4014 while (dev
->scrub_ctx
) {
4015 mutex_unlock(&fs_info
->scrub_lock
);
4016 wait_event(fs_info
->scrub_pause_wait
,
4017 dev
->scrub_ctx
== NULL
);
4018 mutex_lock(&fs_info
->scrub_lock
);
4020 mutex_unlock(&fs_info
->scrub_lock
);
4025 int btrfs_scrub_progress(struct btrfs_fs_info
*fs_info
, u64 devid
,
4026 struct btrfs_scrub_progress
*progress
)
4028 struct btrfs_device
*dev
;
4029 struct scrub_ctx
*sctx
= NULL
;
4031 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
4032 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
4034 sctx
= dev
->scrub_ctx
;
4036 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
4037 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
4039 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
4042 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
4043 u64 extent_logical
, u64 extent_len
,
4044 u64
*extent_physical
,
4045 struct btrfs_device
**extent_dev
,
4046 int *extent_mirror_num
)
4049 struct btrfs_bio
*bbio
= NULL
;
4052 mapped_length
= extent_len
;
4053 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
, extent_logical
,
4054 &mapped_length
, &bbio
, 0);
4055 if (ret
|| !bbio
|| mapped_length
< extent_len
||
4056 !bbio
->stripes
[0].dev
->bdev
) {
4057 btrfs_put_bbio(bbio
);
4061 *extent_physical
= bbio
->stripes
[0].physical
;
4062 *extent_mirror_num
= bbio
->mirror_num
;
4063 *extent_dev
= bbio
->stripes
[0].dev
;
4064 btrfs_put_bbio(bbio
);