]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/scrub.c
Btrfs: remove BUG_ON()'s in btrfs_map_block
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / scrub.c
CommitLineData
a2de733c 1/*
b6bfebc1 2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
a2de733c
AJ
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a2de733c 19#include <linux/blkdev.h>
558540c1 20#include <linux/ratelimit.h>
a2de733c
AJ
21#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
0ef8e451 25#include "transaction.h"
558540c1 26#include "backref.h"
5da6fcbc 27#include "extent_io.h"
ff023aac 28#include "dev-replace.h"
21adbd5c 29#include "check-integrity.h"
606686ee 30#include "rcu-string.h"
53b381b3 31#include "raid56.h"
a2de733c
AJ
32
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
a2de733c
AJ
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
a2de733c 42 * - track and record media errors, throw out bad devices
a2de733c 43 * - add a mode to also read unallocated space
a2de733c
AJ
44 */
45
b5d67f64 46struct scrub_block;
d9d181c1 47struct scrub_ctx;
a2de733c 48
ff023aac
SB
49/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
7a9e9987
SB
58
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
b5d67f64 64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
a2de733c 65
af8e2d1d
MX
66struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
af8e2d1d
MX
69 u64 map_length;
70};
71
a2de733c 72struct scrub_page {
b5d67f64
SB
73 struct scrub_block *sblock;
74 struct page *page;
442a4f63 75 struct btrfs_device *dev;
5a6ac9ea 76 struct list_head list;
a2de733c
AJ
77 u64 flags; /* extent flags */
78 u64 generation;
b5d67f64
SB
79 u64 logical;
80 u64 physical;
ff023aac 81 u64 physical_for_dev_replace;
57019345 82 atomic_t refs;
b5d67f64
SB
83 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
a2de733c 88 u8 csum[BTRFS_CSUM_SIZE];
af8e2d1d
MX
89
90 struct scrub_recover *recover;
a2de733c
AJ
91};
92
93struct scrub_bio {
94 int index;
d9d181c1 95 struct scrub_ctx *sctx;
a36cf8b8 96 struct btrfs_device *dev;
a2de733c
AJ
97 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
ff023aac
SB
101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
b5d67f64 106 int page_count;
a2de733c
AJ
107 int next_free;
108 struct btrfs_work work;
109};
110
b5d67f64 111struct scrub_block {
7a9e9987 112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
b5d67f64
SB
113 int page_count;
114 atomic_t outstanding_pages;
57019345 115 atomic_t refs; /* free mem on transition to zero */
d9d181c1 116 struct scrub_ctx *sctx;
5a6ac9ea 117 struct scrub_parity *sparity;
b5d67f64
SB
118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
442a4f63 122 unsigned int generation_error:1; /* also sets header_error */
5a6ac9ea
MX
123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
b5d67f64 127 };
73ff61db 128 struct btrfs_work work;
b5d67f64
SB
129};
130
5a6ac9ea
MX
131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
57019345 145 atomic_t refs;
5a6ac9ea
MX
146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
ff023aac
SB
164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
d9d181c1 172struct scrub_ctx {
ff023aac 173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
a36cf8b8 174 struct btrfs_root *dev_root;
a2de733c
AJ
175 int first_free;
176 int curr;
b6bfebc1
SB
177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
a2de733c
AJ
179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
8628764e 184 int readonly;
ff023aac 185 int pages_per_rd_bio;
b5d67f64
SB
186 u32 sectorsize;
187 u32 nodesize;
63a212ab
SB
188
189 int is_dev_replace;
ff023aac 190 struct scrub_wr_ctx wr_ctx;
63a212ab 191
a2de733c
AJ
192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
f55985f4
FM
197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
205 atomic_t refs;
a2de733c
AJ
206};
207
0ef8e451 208struct scrub_fixup_nodatasum {
d9d181c1 209 struct scrub_ctx *sctx;
a36cf8b8 210 struct btrfs_device *dev;
0ef8e451
JS
211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
652f25a2
JB
217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
ff023aac
SB
224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
652f25a2 230 struct list_head inodes;
ff023aac
SB
231 struct btrfs_work work;
232};
233
558540c1
JS
234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
558540c1
JS
237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
558540c1
JS
241};
242
b6bfebc1
SB
243static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
b5d67f64 247static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
be50a8dd 248static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
ff023aac 249 struct scrub_block *sblocks_for_recheck);
34f5c8e9 250static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
affe4a5a
ZL
251 struct scrub_block *sblock,
252 int retry_failed_mirror);
ba7cf988 253static void scrub_recheck_block_checksum(struct scrub_block *sblock);
b5d67f64 254static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 255 struct scrub_block *sblock_good);
b5d67f64
SB
256static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good,
258 int page_num, int force_write);
ff023aac
SB
259static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261 int page_num);
b5d67f64
SB
262static int scrub_checksum_data(struct scrub_block *sblock);
263static int scrub_checksum_tree_block(struct scrub_block *sblock);
264static int scrub_checksum_super(struct scrub_block *sblock);
265static void scrub_block_get(struct scrub_block *sblock);
266static void scrub_block_put(struct scrub_block *sblock);
7a9e9987
SB
267static void scrub_page_get(struct scrub_page *spage);
268static void scrub_page_put(struct scrub_page *spage);
5a6ac9ea
MX
269static void scrub_parity_get(struct scrub_parity *sparity);
270static void scrub_parity_put(struct scrub_parity *sparity);
ff023aac
SB
271static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272 struct scrub_page *spage);
d9d181c1 273static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 274 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac
SB
275 u64 gen, int mirror_num, u8 *csum, int force,
276 u64 physical_for_dev_replace);
4246a0b6 277static void scrub_bio_end_io(struct bio *bio);
b5d67f64
SB
278static void scrub_bio_end_io_worker(struct btrfs_work *work);
279static void scrub_block_complete(struct scrub_block *sblock);
ff023aac
SB
280static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281 u64 extent_logical, u64 extent_len,
282 u64 *extent_physical,
283 struct btrfs_device **extent_dev,
284 int *extent_mirror_num);
285static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286 struct scrub_wr_ctx *wr_ctx,
287 struct btrfs_fs_info *fs_info,
288 struct btrfs_device *dev,
289 int is_dev_replace);
290static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292 struct scrub_page *spage);
293static void scrub_wr_submit(struct scrub_ctx *sctx);
4246a0b6 294static void scrub_wr_bio_end_io(struct bio *bio);
ff023aac
SB
295static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296static int write_page_nocow(struct scrub_ctx *sctx,
297 u64 physical_for_dev_replace, struct page *page);
298static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
652f25a2 299 struct scrub_copy_nocow_ctx *ctx);
ff023aac
SB
300static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301 int mirror_num, u64 physical_for_dev_replace);
302static void copy_nocow_pages_worker(struct btrfs_work *work);
cb7ab021 303static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
3cb0929a 304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
f55985f4 305static void scrub_put_ctx(struct scrub_ctx *sctx);
1623edeb
SB
306
307
b6bfebc1
SB
308static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
309{
f55985f4 310 atomic_inc(&sctx->refs);
b6bfebc1
SB
311 atomic_inc(&sctx->bios_in_flight);
312}
313
314static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
315{
316 atomic_dec(&sctx->bios_in_flight);
317 wake_up(&sctx->list_wait);
f55985f4 318 scrub_put_ctx(sctx);
b6bfebc1
SB
319}
320
cb7ab021 321static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
3cb0929a
WS
322{
323 while (atomic_read(&fs_info->scrub_pause_req)) {
324 mutex_unlock(&fs_info->scrub_lock);
325 wait_event(fs_info->scrub_pause_wait,
326 atomic_read(&fs_info->scrub_pause_req) == 0);
327 mutex_lock(&fs_info->scrub_lock);
328 }
329}
330
0e22be89 331static void scrub_pause_on(struct btrfs_fs_info *fs_info)
cb7ab021
WS
332{
333 atomic_inc(&fs_info->scrubs_paused);
334 wake_up(&fs_info->scrub_pause_wait);
0e22be89 335}
cb7ab021 336
0e22be89
Z
337static void scrub_pause_off(struct btrfs_fs_info *fs_info)
338{
cb7ab021
WS
339 mutex_lock(&fs_info->scrub_lock);
340 __scrub_blocked_if_needed(fs_info);
341 atomic_dec(&fs_info->scrubs_paused);
342 mutex_unlock(&fs_info->scrub_lock);
343
344 wake_up(&fs_info->scrub_pause_wait);
345}
346
0e22be89
Z
347static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
348{
349 scrub_pause_on(fs_info);
350 scrub_pause_off(fs_info);
351}
352
b6bfebc1
SB
353/*
354 * used for workers that require transaction commits (i.e., for the
355 * NOCOW case)
356 */
357static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
358{
359 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
360
f55985f4 361 atomic_inc(&sctx->refs);
b6bfebc1
SB
362 /*
363 * increment scrubs_running to prevent cancel requests from
364 * completing as long as a worker is running. we must also
365 * increment scrubs_paused to prevent deadlocking on pause
366 * requests used for transactions commits (as the worker uses a
367 * transaction context). it is safe to regard the worker
368 * as paused for all matters practical. effectively, we only
369 * avoid cancellation requests from completing.
370 */
371 mutex_lock(&fs_info->scrub_lock);
372 atomic_inc(&fs_info->scrubs_running);
373 atomic_inc(&fs_info->scrubs_paused);
374 mutex_unlock(&fs_info->scrub_lock);
32a44789
WS
375
376 /*
377 * check if @scrubs_running=@scrubs_paused condition
378 * inside wait_event() is not an atomic operation.
379 * which means we may inc/dec @scrub_running/paused
380 * at any time. Let's wake up @scrub_pause_wait as
381 * much as we can to let commit transaction blocked less.
382 */
383 wake_up(&fs_info->scrub_pause_wait);
384
b6bfebc1
SB
385 atomic_inc(&sctx->workers_pending);
386}
387
388/* used for workers that require transaction commits */
389static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
390{
391 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
392
393 /*
394 * see scrub_pending_trans_workers_inc() why we're pretending
395 * to be paused in the scrub counters
396 */
397 mutex_lock(&fs_info->scrub_lock);
398 atomic_dec(&fs_info->scrubs_running);
399 atomic_dec(&fs_info->scrubs_paused);
400 mutex_unlock(&fs_info->scrub_lock);
401 atomic_dec(&sctx->workers_pending);
402 wake_up(&fs_info->scrub_pause_wait);
403 wake_up(&sctx->list_wait);
f55985f4 404 scrub_put_ctx(sctx);
b6bfebc1
SB
405}
406
d9d181c1 407static void scrub_free_csums(struct scrub_ctx *sctx)
a2de733c 408{
d9d181c1 409 while (!list_empty(&sctx->csum_list)) {
a2de733c 410 struct btrfs_ordered_sum *sum;
d9d181c1 411 sum = list_first_entry(&sctx->csum_list,
a2de733c
AJ
412 struct btrfs_ordered_sum, list);
413 list_del(&sum->list);
414 kfree(sum);
415 }
416}
417
d9d181c1 418static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
a2de733c
AJ
419{
420 int i;
a2de733c 421
d9d181c1 422 if (!sctx)
a2de733c
AJ
423 return;
424
ff023aac
SB
425 scrub_free_wr_ctx(&sctx->wr_ctx);
426
b5d67f64 427 /* this can happen when scrub is cancelled */
d9d181c1
SB
428 if (sctx->curr != -1) {
429 struct scrub_bio *sbio = sctx->bios[sctx->curr];
b5d67f64
SB
430
431 for (i = 0; i < sbio->page_count; i++) {
ff023aac 432 WARN_ON(!sbio->pagev[i]->page);
b5d67f64
SB
433 scrub_block_put(sbio->pagev[i]->sblock);
434 }
435 bio_put(sbio->bio);
436 }
437
ff023aac 438 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
d9d181c1 439 struct scrub_bio *sbio = sctx->bios[i];
a2de733c
AJ
440
441 if (!sbio)
442 break;
a2de733c
AJ
443 kfree(sbio);
444 }
445
d9d181c1
SB
446 scrub_free_csums(sctx);
447 kfree(sctx);
a2de733c
AJ
448}
449
f55985f4
FM
450static void scrub_put_ctx(struct scrub_ctx *sctx)
451{
452 if (atomic_dec_and_test(&sctx->refs))
453 scrub_free_ctx(sctx);
454}
455
a2de733c 456static noinline_for_stack
63a212ab 457struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
a2de733c 458{
d9d181c1 459 struct scrub_ctx *sctx;
a2de733c 460 int i;
a2de733c 461 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
ff023aac 462 int ret;
a2de733c 463
58c4e173 464 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
d9d181c1 465 if (!sctx)
a2de733c 466 goto nomem;
f55985f4 467 atomic_set(&sctx->refs, 1);
63a212ab 468 sctx->is_dev_replace = is_dev_replace;
b54ffb73 469 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
d9d181c1 470 sctx->curr = -1;
a36cf8b8 471 sctx->dev_root = dev->dev_root;
ff023aac 472 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
a2de733c
AJ
473 struct scrub_bio *sbio;
474
58c4e173 475 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
a2de733c
AJ
476 if (!sbio)
477 goto nomem;
d9d181c1 478 sctx->bios[i] = sbio;
a2de733c 479
a2de733c 480 sbio->index = i;
d9d181c1 481 sbio->sctx = sctx;
b5d67f64 482 sbio->page_count = 0;
9e0af237
LB
483 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
484 scrub_bio_end_io_worker, NULL, NULL);
a2de733c 485
ff023aac 486 if (i != SCRUB_BIOS_PER_SCTX - 1)
d9d181c1 487 sctx->bios[i]->next_free = i + 1;
0ef8e451 488 else
d9d181c1
SB
489 sctx->bios[i]->next_free = -1;
490 }
491 sctx->first_free = 0;
492 sctx->nodesize = dev->dev_root->nodesize;
d9d181c1 493 sctx->sectorsize = dev->dev_root->sectorsize;
b6bfebc1
SB
494 atomic_set(&sctx->bios_in_flight, 0);
495 atomic_set(&sctx->workers_pending, 0);
d9d181c1
SB
496 atomic_set(&sctx->cancel_req, 0);
497 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
498 INIT_LIST_HEAD(&sctx->csum_list);
499
500 spin_lock_init(&sctx->list_lock);
501 spin_lock_init(&sctx->stat_lock);
502 init_waitqueue_head(&sctx->list_wait);
ff023aac
SB
503
504 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
505 fs_info->dev_replace.tgtdev, is_dev_replace);
506 if (ret) {
507 scrub_free_ctx(sctx);
508 return ERR_PTR(ret);
509 }
d9d181c1 510 return sctx;
a2de733c
AJ
511
512nomem:
d9d181c1 513 scrub_free_ctx(sctx);
a2de733c
AJ
514 return ERR_PTR(-ENOMEM);
515}
516
ff023aac
SB
517static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
518 void *warn_ctx)
558540c1
JS
519{
520 u64 isize;
521 u32 nlink;
522 int ret;
523 int i;
524 struct extent_buffer *eb;
525 struct btrfs_inode_item *inode_item;
ff023aac 526 struct scrub_warning *swarn = warn_ctx;
558540c1
JS
527 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
528 struct inode_fs_paths *ipath = NULL;
529 struct btrfs_root *local_root;
530 struct btrfs_key root_key;
1d4c08e0 531 struct btrfs_key key;
558540c1
JS
532
533 root_key.objectid = root;
534 root_key.type = BTRFS_ROOT_ITEM_KEY;
535 root_key.offset = (u64)-1;
536 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
537 if (IS_ERR(local_root)) {
538 ret = PTR_ERR(local_root);
539 goto err;
540 }
541
14692cc1
DS
542 /*
543 * this makes the path point to (inum INODE_ITEM ioff)
544 */
1d4c08e0
DS
545 key.objectid = inum;
546 key.type = BTRFS_INODE_ITEM_KEY;
547 key.offset = 0;
548
549 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
558540c1
JS
550 if (ret) {
551 btrfs_release_path(swarn->path);
552 goto err;
553 }
554
555 eb = swarn->path->nodes[0];
556 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
557 struct btrfs_inode_item);
558 isize = btrfs_inode_size(eb, inode_item);
559 nlink = btrfs_inode_nlink(eb, inode_item);
560 btrfs_release_path(swarn->path);
561
562 ipath = init_ipath(4096, local_root, swarn->path);
26bdef54
DC
563 if (IS_ERR(ipath)) {
564 ret = PTR_ERR(ipath);
565 ipath = NULL;
566 goto err;
567 }
558540c1
JS
568 ret = paths_from_inode(inum, ipath);
569
570 if (ret < 0)
571 goto err;
572
573 /*
574 * we deliberately ignore the bit ipath might have been too small to
575 * hold all of the paths here
576 */
577 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
ecaeb14b 578 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
558540c1 579 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
ecaeb14b 580 "length %llu, links %u (path: %s)", swarn->errstr,
606686ee 581 swarn->logical, rcu_str_deref(swarn->dev->name),
558540c1
JS
582 (unsigned long long)swarn->sector, root, inum, offset,
583 min(isize - offset, (u64)PAGE_SIZE), nlink,
745c4d8e 584 (char *)(unsigned long)ipath->fspath->val[i]);
558540c1
JS
585
586 free_ipath(ipath);
587 return 0;
588
589err:
ecaeb14b 590 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
558540c1 591 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
ecaeb14b 592 "resolving failed with ret=%d", swarn->errstr,
606686ee 593 swarn->logical, rcu_str_deref(swarn->dev->name),
558540c1
JS
594 (unsigned long long)swarn->sector, root, inum, offset, ret);
595
596 free_ipath(ipath);
597 return 0;
598}
599
b5d67f64 600static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
558540c1 601{
a36cf8b8
SB
602 struct btrfs_device *dev;
603 struct btrfs_fs_info *fs_info;
558540c1
JS
604 struct btrfs_path *path;
605 struct btrfs_key found_key;
606 struct extent_buffer *eb;
607 struct btrfs_extent_item *ei;
608 struct scrub_warning swarn;
69917e43
LB
609 unsigned long ptr = 0;
610 u64 extent_item_pos;
611 u64 flags = 0;
558540c1 612 u64 ref_root;
69917e43 613 u32 item_size;
07c9a8e0 614 u8 ref_level = 0;
69917e43 615 int ret;
558540c1 616
a36cf8b8 617 WARN_ON(sblock->page_count < 1);
7a9e9987 618 dev = sblock->pagev[0]->dev;
a36cf8b8
SB
619 fs_info = sblock->sctx->dev_root->fs_info;
620
558540c1 621 path = btrfs_alloc_path();
8b9456da
DS
622 if (!path)
623 return;
558540c1 624
7a9e9987
SB
625 swarn.sector = (sblock->pagev[0]->physical) >> 9;
626 swarn.logical = sblock->pagev[0]->logical;
558540c1 627 swarn.errstr = errstr;
a36cf8b8 628 swarn.dev = NULL;
558540c1 629
69917e43
LB
630 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
631 &flags);
558540c1
JS
632 if (ret < 0)
633 goto out;
634
4692cf58 635 extent_item_pos = swarn.logical - found_key.objectid;
558540c1
JS
636 swarn.extent_item_size = found_key.offset;
637
638 eb = path->nodes[0];
639 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
640 item_size = btrfs_item_size_nr(eb, path->slots[0]);
641
69917e43 642 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
558540c1 643 do {
6eda71d0
LB
644 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
645 item_size, &ref_root,
646 &ref_level);
ecaeb14b
DS
647 btrfs_warn_in_rcu(fs_info,
648 "%s at logical %llu on dev %s, "
558540c1 649 "sector %llu: metadata %s (level %d) in tree "
ecaeb14b 650 "%llu", errstr, swarn.logical,
606686ee 651 rcu_str_deref(dev->name),
558540c1
JS
652 (unsigned long long)swarn.sector,
653 ref_level ? "node" : "leaf",
654 ret < 0 ? -1 : ref_level,
655 ret < 0 ? -1 : ref_root);
656 } while (ret != 1);
d8fe29e9 657 btrfs_release_path(path);
558540c1 658 } else {
d8fe29e9 659 btrfs_release_path(path);
558540c1 660 swarn.path = path;
a36cf8b8 661 swarn.dev = dev;
7a3ae2f8
JS
662 iterate_extent_inodes(fs_info, found_key.objectid,
663 extent_item_pos, 1,
558540c1
JS
664 scrub_print_warning_inode, &swarn);
665 }
666
667out:
668 btrfs_free_path(path);
558540c1
JS
669}
670
ff023aac 671static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
0ef8e451 672{
5da6fcbc 673 struct page *page = NULL;
0ef8e451 674 unsigned long index;
ff023aac 675 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
0ef8e451 676 int ret;
5da6fcbc 677 int corrected = 0;
0ef8e451 678 struct btrfs_key key;
5da6fcbc 679 struct inode *inode = NULL;
6f1c3605 680 struct btrfs_fs_info *fs_info;
0ef8e451
JS
681 u64 end = offset + PAGE_SIZE - 1;
682 struct btrfs_root *local_root;
6f1c3605 683 int srcu_index;
0ef8e451
JS
684
685 key.objectid = root;
686 key.type = BTRFS_ROOT_ITEM_KEY;
687 key.offset = (u64)-1;
6f1c3605
LB
688
689 fs_info = fixup->root->fs_info;
690 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
691
692 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
693 if (IS_ERR(local_root)) {
694 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
0ef8e451 695 return PTR_ERR(local_root);
6f1c3605 696 }
0ef8e451
JS
697
698 key.type = BTRFS_INODE_ITEM_KEY;
699 key.objectid = inum;
700 key.offset = 0;
6f1c3605
LB
701 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
702 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
0ef8e451
JS
703 if (IS_ERR(inode))
704 return PTR_ERR(inode);
705
09cbfeaf 706 index = offset >> PAGE_SHIFT;
0ef8e451
JS
707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
5da6fcbc
JS
709 if (!page) {
710 ret = -ENOMEM;
711 goto out;
712 }
713
714 if (PageUptodate(page)) {
5da6fcbc
JS
715 if (PageDirty(page)) {
716 /*
717 * we need to write the data to the defect sector. the
718 * data that was in that sector is not in memory,
719 * because the page was modified. we must not write the
720 * modified page to that sector.
721 *
722 * TODO: what could be done here: wait for the delalloc
723 * runner to write out that page (might involve
724 * COW) and see whether the sector is still
725 * referenced afterwards.
726 *
727 * For the meantime, we'll treat this error
728 * incorrectable, although there is a chance that a
729 * later scrub will find the bad sector again and that
730 * there's no dirty page in memory, then.
731 */
732 ret = -EIO;
733 goto out;
734 }
1203b681 735 ret = repair_io_failure(inode, offset, PAGE_SIZE,
5da6fcbc 736 fixup->logical, page,
ffdd2018 737 offset - page_offset(page),
5da6fcbc
JS
738 fixup->mirror_num);
739 unlock_page(page);
740 corrected = !ret;
741 } else {
742 /*
743 * we need to get good data first. the general readpage path
744 * will call repair_io_failure for us, we just have to make
745 * sure we read the bad mirror.
746 */
747 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
748 EXTENT_DAMAGED, GFP_NOFS);
749 if (ret) {
750 /* set_extent_bits should give proper error */
751 WARN_ON(ret > 0);
752 if (ret > 0)
753 ret = -EFAULT;
754 goto out;
755 }
756
757 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
758 btrfs_get_extent,
759 fixup->mirror_num);
760 wait_on_page_locked(page);
761
762 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
763 end, EXTENT_DAMAGED, 0, NULL);
764 if (!corrected)
765 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
766 EXTENT_DAMAGED, GFP_NOFS);
767 }
768
769out:
770 if (page)
771 put_page(page);
7fb18a06
TK
772
773 iput(inode);
0ef8e451
JS
774
775 if (ret < 0)
776 return ret;
777
778 if (ret == 0 && corrected) {
779 /*
780 * we only need to call readpage for one of the inodes belonging
781 * to this extent. so make iterate_extent_inodes stop
782 */
783 return 1;
784 }
785
786 return -EIO;
787}
788
789static void scrub_fixup_nodatasum(struct btrfs_work *work)
790{
791 int ret;
792 struct scrub_fixup_nodatasum *fixup;
d9d181c1 793 struct scrub_ctx *sctx;
0ef8e451 794 struct btrfs_trans_handle *trans = NULL;
0ef8e451
JS
795 struct btrfs_path *path;
796 int uncorrectable = 0;
797
798 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
d9d181c1 799 sctx = fixup->sctx;
0ef8e451
JS
800
801 path = btrfs_alloc_path();
802 if (!path) {
d9d181c1
SB
803 spin_lock(&sctx->stat_lock);
804 ++sctx->stat.malloc_errors;
805 spin_unlock(&sctx->stat_lock);
0ef8e451
JS
806 uncorrectable = 1;
807 goto out;
808 }
809
810 trans = btrfs_join_transaction(fixup->root);
811 if (IS_ERR(trans)) {
812 uncorrectable = 1;
813 goto out;
814 }
815
816 /*
817 * the idea is to trigger a regular read through the standard path. we
818 * read a page from the (failed) logical address by specifying the
819 * corresponding copynum of the failed sector. thus, that readpage is
820 * expected to fail.
821 * that is the point where on-the-fly error correction will kick in
822 * (once it's finished) and rewrite the failed sector if a good copy
823 * can be found.
824 */
825 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
826 path, scrub_fixup_readpage,
827 fixup);
828 if (ret < 0) {
829 uncorrectable = 1;
830 goto out;
831 }
832 WARN_ON(ret != 1);
833
d9d181c1
SB
834 spin_lock(&sctx->stat_lock);
835 ++sctx->stat.corrected_errors;
836 spin_unlock(&sctx->stat_lock);
0ef8e451
JS
837
838out:
839 if (trans && !IS_ERR(trans))
840 btrfs_end_transaction(trans, fixup->root);
841 if (uncorrectable) {
d9d181c1
SB
842 spin_lock(&sctx->stat_lock);
843 ++sctx->stat.uncorrectable_errors;
844 spin_unlock(&sctx->stat_lock);
ff023aac
SB
845 btrfs_dev_replace_stats_inc(
846 &sctx->dev_root->fs_info->dev_replace.
847 num_uncorrectable_read_errors);
b14af3b4
DS
848 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
849 "unable to fixup (nodatasum) error at logical %llu on dev %s",
c1c9ff7c 850 fixup->logical, rcu_str_deref(fixup->dev->name));
0ef8e451
JS
851 }
852
853 btrfs_free_path(path);
854 kfree(fixup);
855
b6bfebc1 856 scrub_pending_trans_workers_dec(sctx);
0ef8e451
JS
857}
858
af8e2d1d
MX
859static inline void scrub_get_recover(struct scrub_recover *recover)
860{
861 atomic_inc(&recover->refs);
862}
863
864static inline void scrub_put_recover(struct scrub_recover *recover)
865{
866 if (atomic_dec_and_test(&recover->refs)) {
6e9606d2 867 btrfs_put_bbio(recover->bbio);
af8e2d1d
MX
868 kfree(recover);
869 }
870}
871
a2de733c 872/*
b5d67f64
SB
873 * scrub_handle_errored_block gets called when either verification of the
874 * pages failed or the bio failed to read, e.g. with EIO. In the latter
875 * case, this function handles all pages in the bio, even though only one
876 * may be bad.
877 * The goal of this function is to repair the errored block by using the
878 * contents of one of the mirrors.
a2de733c 879 */
b5d67f64 880static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
a2de733c 881{
d9d181c1 882 struct scrub_ctx *sctx = sblock_to_check->sctx;
a36cf8b8 883 struct btrfs_device *dev;
b5d67f64
SB
884 struct btrfs_fs_info *fs_info;
885 u64 length;
886 u64 logical;
b5d67f64
SB
887 unsigned int failed_mirror_index;
888 unsigned int is_metadata;
889 unsigned int have_csum;
b5d67f64
SB
890 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
891 struct scrub_block *sblock_bad;
892 int ret;
893 int mirror_index;
894 int page_num;
895 int success;
558540c1 896 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
b5d67f64
SB
897 DEFAULT_RATELIMIT_BURST);
898
899 BUG_ON(sblock_to_check->page_count < 1);
a36cf8b8 900 fs_info = sctx->dev_root->fs_info;
4ded4f63
SB
901 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
902 /*
903 * if we find an error in a super block, we just report it.
904 * They will get written with the next transaction commit
905 * anyway
906 */
907 spin_lock(&sctx->stat_lock);
908 ++sctx->stat.super_errors;
909 spin_unlock(&sctx->stat_lock);
910 return 0;
911 }
b5d67f64 912 length = sblock_to_check->page_count * PAGE_SIZE;
7a9e9987 913 logical = sblock_to_check->pagev[0]->logical;
7a9e9987
SB
914 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
915 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
916 is_metadata = !(sblock_to_check->pagev[0]->flags &
b5d67f64 917 BTRFS_EXTENT_FLAG_DATA);
7a9e9987 918 have_csum = sblock_to_check->pagev[0]->have_csum;
7a9e9987 919 dev = sblock_to_check->pagev[0]->dev;
13db62b7 920
ff023aac
SB
921 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
922 sblocks_for_recheck = NULL;
923 goto nodatasum_case;
924 }
925
b5d67f64
SB
926 /*
927 * read all mirrors one after the other. This includes to
928 * re-read the extent or metadata block that failed (that was
929 * the cause that this fixup code is called) another time,
930 * page by page this time in order to know which pages
931 * caused I/O errors and which ones are good (for all mirrors).
932 * It is the goal to handle the situation when more than one
933 * mirror contains I/O errors, but the errors do not
934 * overlap, i.e. the data can be repaired by selecting the
935 * pages from those mirrors without I/O error on the
936 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
937 * would be that mirror #1 has an I/O error on the first page,
938 * the second page is good, and mirror #2 has an I/O error on
939 * the second page, but the first page is good.
940 * Then the first page of the first mirror can be repaired by
941 * taking the first page of the second mirror, and the
942 * second page of the second mirror can be repaired by
943 * copying the contents of the 2nd page of the 1st mirror.
944 * One more note: if the pages of one mirror contain I/O
945 * errors, the checksum cannot be verified. In order to get
946 * the best data for repairing, the first attempt is to find
947 * a mirror without I/O errors and with a validated checksum.
948 * Only if this is not possible, the pages are picked from
949 * mirrors with I/O errors without considering the checksum.
950 * If the latter is the case, at the end, the checksum of the
951 * repaired area is verified in order to correctly maintain
952 * the statistics.
953 */
954
31e818fe
DS
955 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
956 sizeof(*sblocks_for_recheck), GFP_NOFS);
b5d67f64 957 if (!sblocks_for_recheck) {
d9d181c1
SB
958 spin_lock(&sctx->stat_lock);
959 sctx->stat.malloc_errors++;
960 sctx->stat.read_errors++;
961 sctx->stat.uncorrectable_errors++;
962 spin_unlock(&sctx->stat_lock);
a36cf8b8 963 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64 964 goto out;
a2de733c
AJ
965 }
966
b5d67f64 967 /* setup the context, map the logical blocks and alloc the pages */
be50a8dd 968 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
b5d67f64 969 if (ret) {
d9d181c1
SB
970 spin_lock(&sctx->stat_lock);
971 sctx->stat.read_errors++;
972 sctx->stat.uncorrectable_errors++;
973 spin_unlock(&sctx->stat_lock);
a36cf8b8 974 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64
SB
975 goto out;
976 }
977 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
978 sblock_bad = sblocks_for_recheck + failed_mirror_index;
13db62b7 979
b5d67f64 980 /* build and submit the bios for the failed mirror, check checksums */
affe4a5a 981 scrub_recheck_block(fs_info, sblock_bad, 1);
a2de733c 982
b5d67f64
SB
983 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
984 sblock_bad->no_io_error_seen) {
985 /*
986 * the error disappeared after reading page by page, or
987 * the area was part of a huge bio and other parts of the
988 * bio caused I/O errors, or the block layer merged several
989 * read requests into one and the error is caused by a
990 * different bio (usually one of the two latter cases is
991 * the cause)
992 */
d9d181c1
SB
993 spin_lock(&sctx->stat_lock);
994 sctx->stat.unverified_errors++;
5a6ac9ea 995 sblock_to_check->data_corrected = 1;
d9d181c1 996 spin_unlock(&sctx->stat_lock);
a2de733c 997
ff023aac
SB
998 if (sctx->is_dev_replace)
999 scrub_write_block_to_dev_replace(sblock_bad);
b5d67f64 1000 goto out;
a2de733c 1001 }
a2de733c 1002
b5d67f64 1003 if (!sblock_bad->no_io_error_seen) {
d9d181c1
SB
1004 spin_lock(&sctx->stat_lock);
1005 sctx->stat.read_errors++;
1006 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1007 if (__ratelimit(&_rs))
1008 scrub_print_warning("i/o error", sblock_to_check);
a36cf8b8 1009 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64 1010 } else if (sblock_bad->checksum_error) {
d9d181c1
SB
1011 spin_lock(&sctx->stat_lock);
1012 sctx->stat.csum_errors++;
1013 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1014 if (__ratelimit(&_rs))
1015 scrub_print_warning("checksum error", sblock_to_check);
a36cf8b8 1016 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1017 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1018 } else if (sblock_bad->header_error) {
d9d181c1
SB
1019 spin_lock(&sctx->stat_lock);
1020 sctx->stat.verify_errors++;
1021 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1022 if (__ratelimit(&_rs))
1023 scrub_print_warning("checksum/header error",
1024 sblock_to_check);
442a4f63 1025 if (sblock_bad->generation_error)
a36cf8b8 1026 btrfs_dev_stat_inc_and_print(dev,
442a4f63
SB
1027 BTRFS_DEV_STAT_GENERATION_ERRS);
1028 else
a36cf8b8 1029 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1030 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1031 }
a2de733c 1032
33ef30ad
ID
1033 if (sctx->readonly) {
1034 ASSERT(!sctx->is_dev_replace);
1035 goto out;
1036 }
a2de733c 1037
b5d67f64
SB
1038 if (!is_metadata && !have_csum) {
1039 struct scrub_fixup_nodatasum *fixup_nodatasum;
a2de733c 1040
ff023aac
SB
1041 WARN_ON(sctx->is_dev_replace);
1042
b25c94c5
ZL
1043nodatasum_case:
1044
b5d67f64
SB
1045 /*
1046 * !is_metadata and !have_csum, this means that the data
1047 * might not be COW'ed, that it might be modified
1048 * concurrently. The general strategy to work on the
1049 * commit root does not help in the case when COW is not
1050 * used.
1051 */
1052 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1053 if (!fixup_nodatasum)
1054 goto did_not_correct_error;
d9d181c1 1055 fixup_nodatasum->sctx = sctx;
a36cf8b8 1056 fixup_nodatasum->dev = dev;
b5d67f64
SB
1057 fixup_nodatasum->logical = logical;
1058 fixup_nodatasum->root = fs_info->extent_root;
1059 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
b6bfebc1 1060 scrub_pending_trans_workers_inc(sctx);
9e0af237
LB
1061 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1062 scrub_fixup_nodatasum, NULL, NULL);
0339ef2f
QW
1063 btrfs_queue_work(fs_info->scrub_workers,
1064 &fixup_nodatasum->work);
b5d67f64 1065 goto out;
a2de733c
AJ
1066 }
1067
b5d67f64
SB
1068 /*
1069 * now build and submit the bios for the other mirrors, check
cb2ced73
SB
1070 * checksums.
1071 * First try to pick the mirror which is completely without I/O
b5d67f64
SB
1072 * errors and also does not have a checksum error.
1073 * If one is found, and if a checksum is present, the full block
1074 * that is known to contain an error is rewritten. Afterwards
1075 * the block is known to be corrected.
1076 * If a mirror is found which is completely correct, and no
1077 * checksum is present, only those pages are rewritten that had
1078 * an I/O error in the block to be repaired, since it cannot be
1079 * determined, which copy of the other pages is better (and it
1080 * could happen otherwise that a correct page would be
1081 * overwritten by a bad one).
1082 */
1083 for (mirror_index = 0;
1084 mirror_index < BTRFS_MAX_MIRRORS &&
1085 sblocks_for_recheck[mirror_index].page_count > 0;
1086 mirror_index++) {
cb2ced73 1087 struct scrub_block *sblock_other;
b5d67f64 1088
cb2ced73
SB
1089 if (mirror_index == failed_mirror_index)
1090 continue;
1091 sblock_other = sblocks_for_recheck + mirror_index;
1092
1093 /* build and submit the bios, check checksums */
affe4a5a 1094 scrub_recheck_block(fs_info, sblock_other, 0);
34f5c8e9
SB
1095
1096 if (!sblock_other->header_error &&
b5d67f64
SB
1097 !sblock_other->checksum_error &&
1098 sblock_other->no_io_error_seen) {
ff023aac
SB
1099 if (sctx->is_dev_replace) {
1100 scrub_write_block_to_dev_replace(sblock_other);
114ab50d 1101 goto corrected_error;
ff023aac 1102 } else {
ff023aac 1103 ret = scrub_repair_block_from_good_copy(
114ab50d
ZL
1104 sblock_bad, sblock_other);
1105 if (!ret)
1106 goto corrected_error;
ff023aac 1107 }
b5d67f64
SB
1108 }
1109 }
a2de733c 1110
b968fed1
ZL
1111 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1112 goto did_not_correct_error;
ff023aac
SB
1113
1114 /*
ff023aac 1115 * In case of I/O errors in the area that is supposed to be
b5d67f64
SB
1116 * repaired, continue by picking good copies of those pages.
1117 * Select the good pages from mirrors to rewrite bad pages from
1118 * the area to fix. Afterwards verify the checksum of the block
1119 * that is supposed to be repaired. This verification step is
1120 * only done for the purpose of statistic counting and for the
1121 * final scrub report, whether errors remain.
1122 * A perfect algorithm could make use of the checksum and try
1123 * all possible combinations of pages from the different mirrors
1124 * until the checksum verification succeeds. For example, when
1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1126 * of mirror #2 is readable but the final checksum test fails,
1127 * then the 2nd page of mirror #3 could be tried, whether now
1128 * the final checksum succeedes. But this would be a rare
1129 * exception and is therefore not implemented. At least it is
1130 * avoided that the good copy is overwritten.
1131 * A more useful improvement would be to pick the sectors
1132 * without I/O error based on sector sizes (512 bytes on legacy
1133 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1134 * mirror could be repaired by taking 512 byte of a different
1135 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1136 * area are unreadable.
a2de733c 1137 */
b5d67f64 1138 success = 1;
b968fed1
ZL
1139 for (page_num = 0; page_num < sblock_bad->page_count;
1140 page_num++) {
7a9e9987 1141 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
b968fed1 1142 struct scrub_block *sblock_other = NULL;
b5d67f64 1143
b968fed1
ZL
1144 /* skip no-io-error page in scrub */
1145 if (!page_bad->io_error && !sctx->is_dev_replace)
a2de733c 1146 continue;
b5d67f64 1147
b968fed1
ZL
1148 /* try to find no-io-error page in mirrors */
1149 if (page_bad->io_error) {
1150 for (mirror_index = 0;
1151 mirror_index < BTRFS_MAX_MIRRORS &&
1152 sblocks_for_recheck[mirror_index].page_count > 0;
1153 mirror_index++) {
1154 if (!sblocks_for_recheck[mirror_index].
1155 pagev[page_num]->io_error) {
1156 sblock_other = sblocks_for_recheck +
1157 mirror_index;
1158 break;
b5d67f64
SB
1159 }
1160 }
b968fed1
ZL
1161 if (!sblock_other)
1162 success = 0;
96e36920 1163 }
a2de733c 1164
b968fed1
ZL
1165 if (sctx->is_dev_replace) {
1166 /*
1167 * did not find a mirror to fetch the page
1168 * from. scrub_write_page_to_dev_replace()
1169 * handles this case (page->io_error), by
1170 * filling the block with zeros before
1171 * submitting the write request
1172 */
1173 if (!sblock_other)
1174 sblock_other = sblock_bad;
1175
1176 if (scrub_write_page_to_dev_replace(sblock_other,
1177 page_num) != 0) {
1178 btrfs_dev_replace_stats_inc(
1179 &sctx->dev_root->
1180 fs_info->dev_replace.
1181 num_write_errors);
1182 success = 0;
1183 }
1184 } else if (sblock_other) {
1185 ret = scrub_repair_page_from_good_copy(sblock_bad,
1186 sblock_other,
1187 page_num, 0);
1188 if (0 == ret)
1189 page_bad->io_error = 0;
1190 else
1191 success = 0;
b5d67f64 1192 }
a2de733c 1193 }
a2de733c 1194
b968fed1 1195 if (success && !sctx->is_dev_replace) {
b5d67f64
SB
1196 if (is_metadata || have_csum) {
1197 /*
1198 * need to verify the checksum now that all
1199 * sectors on disk are repaired (the write
1200 * request for data to be repaired is on its way).
1201 * Just be lazy and use scrub_recheck_block()
1202 * which re-reads the data before the checksum
1203 * is verified, but most likely the data comes out
1204 * of the page cache.
1205 */
affe4a5a 1206 scrub_recheck_block(fs_info, sblock_bad, 1);
34f5c8e9 1207 if (!sblock_bad->header_error &&
b5d67f64
SB
1208 !sblock_bad->checksum_error &&
1209 sblock_bad->no_io_error_seen)
1210 goto corrected_error;
1211 else
1212 goto did_not_correct_error;
1213 } else {
1214corrected_error:
d9d181c1
SB
1215 spin_lock(&sctx->stat_lock);
1216 sctx->stat.corrected_errors++;
5a6ac9ea 1217 sblock_to_check->data_corrected = 1;
d9d181c1 1218 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1219 btrfs_err_rl_in_rcu(fs_info,
1220 "fixed up error at logical %llu on dev %s",
c1c9ff7c 1221 logical, rcu_str_deref(dev->name));
8628764e 1222 }
b5d67f64
SB
1223 } else {
1224did_not_correct_error:
d9d181c1
SB
1225 spin_lock(&sctx->stat_lock);
1226 sctx->stat.uncorrectable_errors++;
1227 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1228 btrfs_err_rl_in_rcu(fs_info,
1229 "unable to fixup (regular) error at logical %llu on dev %s",
c1c9ff7c 1230 logical, rcu_str_deref(dev->name));
96e36920 1231 }
a2de733c 1232
b5d67f64
SB
1233out:
1234 if (sblocks_for_recheck) {
1235 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1236 mirror_index++) {
1237 struct scrub_block *sblock = sblocks_for_recheck +
1238 mirror_index;
af8e2d1d 1239 struct scrub_recover *recover;
b5d67f64
SB
1240 int page_index;
1241
7a9e9987
SB
1242 for (page_index = 0; page_index < sblock->page_count;
1243 page_index++) {
1244 sblock->pagev[page_index]->sblock = NULL;
af8e2d1d
MX
1245 recover = sblock->pagev[page_index]->recover;
1246 if (recover) {
1247 scrub_put_recover(recover);
1248 sblock->pagev[page_index]->recover =
1249 NULL;
1250 }
7a9e9987
SB
1251 scrub_page_put(sblock->pagev[page_index]);
1252 }
b5d67f64
SB
1253 }
1254 kfree(sblocks_for_recheck);
1255 }
a2de733c 1256
b5d67f64
SB
1257 return 0;
1258}
a2de733c 1259
8e5cfb55 1260static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
af8e2d1d 1261{
10f11900
ZL
1262 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1263 return 2;
1264 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1265 return 3;
1266 else
af8e2d1d 1267 return (int)bbio->num_stripes;
af8e2d1d
MX
1268}
1269
10f11900
ZL
1270static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1271 u64 *raid_map,
af8e2d1d
MX
1272 u64 mapped_length,
1273 int nstripes, int mirror,
1274 int *stripe_index,
1275 u64 *stripe_offset)
1276{
1277 int i;
1278
ffe2d203 1279 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
af8e2d1d
MX
1280 /* RAID5/6 */
1281 for (i = 0; i < nstripes; i++) {
1282 if (raid_map[i] == RAID6_Q_STRIPE ||
1283 raid_map[i] == RAID5_P_STRIPE)
1284 continue;
1285
1286 if (logical >= raid_map[i] &&
1287 logical < raid_map[i] + mapped_length)
1288 break;
1289 }
1290
1291 *stripe_index = i;
1292 *stripe_offset = logical - raid_map[i];
1293 } else {
1294 /* The other RAID type */
1295 *stripe_index = mirror;
1296 *stripe_offset = 0;
1297 }
1298}
1299
be50a8dd 1300static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
b5d67f64
SB
1301 struct scrub_block *sblocks_for_recheck)
1302{
be50a8dd
ZL
1303 struct scrub_ctx *sctx = original_sblock->sctx;
1304 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1305 u64 length = original_sblock->page_count * PAGE_SIZE;
1306 u64 logical = original_sblock->pagev[0]->logical;
4734b7ed
ZL
1307 u64 generation = original_sblock->pagev[0]->generation;
1308 u64 flags = original_sblock->pagev[0]->flags;
1309 u64 have_csum = original_sblock->pagev[0]->have_csum;
af8e2d1d
MX
1310 struct scrub_recover *recover;
1311 struct btrfs_bio *bbio;
af8e2d1d
MX
1312 u64 sublen;
1313 u64 mapped_length;
1314 u64 stripe_offset;
1315 int stripe_index;
be50a8dd 1316 int page_index = 0;
b5d67f64 1317 int mirror_index;
af8e2d1d 1318 int nmirrors;
b5d67f64
SB
1319 int ret;
1320
1321 /*
57019345 1322 * note: the two members refs and outstanding_pages
b5d67f64
SB
1323 * are not used (and not set) in the blocks that are used for
1324 * the recheck procedure
1325 */
1326
b5d67f64 1327 while (length > 0) {
af8e2d1d
MX
1328 sublen = min_t(u64, length, PAGE_SIZE);
1329 mapped_length = sublen;
1330 bbio = NULL;
a2de733c 1331
b5d67f64
SB
1332 /*
1333 * with a length of PAGE_SIZE, each returned stripe
1334 * represents one mirror
1335 */
af8e2d1d 1336 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
8e5cfb55 1337 &mapped_length, &bbio, 0, 1);
b5d67f64 1338 if (ret || !bbio || mapped_length < sublen) {
6e9606d2 1339 btrfs_put_bbio(bbio);
b5d67f64
SB
1340 return -EIO;
1341 }
a2de733c 1342
af8e2d1d
MX
1343 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1344 if (!recover) {
6e9606d2 1345 btrfs_put_bbio(bbio);
af8e2d1d
MX
1346 return -ENOMEM;
1347 }
1348
1349 atomic_set(&recover->refs, 1);
1350 recover->bbio = bbio;
af8e2d1d
MX
1351 recover->map_length = mapped_length;
1352
ff023aac 1353 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
af8e2d1d 1354
be50a8dd 1355 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
10f11900 1356
af8e2d1d 1357 for (mirror_index = 0; mirror_index < nmirrors;
b5d67f64
SB
1358 mirror_index++) {
1359 struct scrub_block *sblock;
1360 struct scrub_page *page;
1361
b5d67f64 1362 sblock = sblocks_for_recheck + mirror_index;
7a9e9987 1363 sblock->sctx = sctx;
4734b7ed 1364
7a9e9987
SB
1365 page = kzalloc(sizeof(*page), GFP_NOFS);
1366 if (!page) {
1367leave_nomem:
d9d181c1
SB
1368 spin_lock(&sctx->stat_lock);
1369 sctx->stat.malloc_errors++;
1370 spin_unlock(&sctx->stat_lock);
af8e2d1d 1371 scrub_put_recover(recover);
b5d67f64
SB
1372 return -ENOMEM;
1373 }
7a9e9987
SB
1374 scrub_page_get(page);
1375 sblock->pagev[page_index] = page;
4734b7ed
ZL
1376 page->sblock = sblock;
1377 page->flags = flags;
1378 page->generation = generation;
7a9e9987 1379 page->logical = logical;
4734b7ed
ZL
1380 page->have_csum = have_csum;
1381 if (have_csum)
1382 memcpy(page->csum,
1383 original_sblock->pagev[0]->csum,
1384 sctx->csum_size);
af8e2d1d 1385
10f11900
ZL
1386 scrub_stripe_index_and_offset(logical,
1387 bbio->map_type,
1388 bbio->raid_map,
af8e2d1d 1389 mapped_length,
e34c330d
ZL
1390 bbio->num_stripes -
1391 bbio->num_tgtdevs,
af8e2d1d
MX
1392 mirror_index,
1393 &stripe_index,
1394 &stripe_offset);
1395 page->physical = bbio->stripes[stripe_index].physical +
1396 stripe_offset;
1397 page->dev = bbio->stripes[stripe_index].dev;
1398
ff023aac
SB
1399 BUG_ON(page_index >= original_sblock->page_count);
1400 page->physical_for_dev_replace =
1401 original_sblock->pagev[page_index]->
1402 physical_for_dev_replace;
7a9e9987 1403 /* for missing devices, dev->bdev is NULL */
7a9e9987 1404 page->mirror_num = mirror_index + 1;
b5d67f64 1405 sblock->page_count++;
7a9e9987
SB
1406 page->page = alloc_page(GFP_NOFS);
1407 if (!page->page)
1408 goto leave_nomem;
af8e2d1d
MX
1409
1410 scrub_get_recover(recover);
1411 page->recover = recover;
b5d67f64 1412 }
af8e2d1d 1413 scrub_put_recover(recover);
b5d67f64
SB
1414 length -= sublen;
1415 logical += sublen;
1416 page_index++;
1417 }
1418
1419 return 0;
96e36920
ID
1420}
1421
af8e2d1d
MX
1422struct scrub_bio_ret {
1423 struct completion event;
1424 int error;
1425};
1426
4246a0b6 1427static void scrub_bio_wait_endio(struct bio *bio)
af8e2d1d
MX
1428{
1429 struct scrub_bio_ret *ret = bio->bi_private;
1430
4246a0b6 1431 ret->error = bio->bi_error;
af8e2d1d
MX
1432 complete(&ret->event);
1433}
1434
1435static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1436{
10f11900 1437 return page->recover &&
ffe2d203 1438 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
af8e2d1d
MX
1439}
1440
1441static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1442 struct bio *bio,
1443 struct scrub_page *page)
1444{
1445 struct scrub_bio_ret done;
1446 int ret;
1447
1448 init_completion(&done.event);
1449 done.error = 0;
1450 bio->bi_iter.bi_sector = page->logical >> 9;
1451 bio->bi_private = &done;
1452 bio->bi_end_io = scrub_bio_wait_endio;
1453
1454 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
af8e2d1d 1455 page->recover->map_length,
4245215d 1456 page->mirror_num, 0);
af8e2d1d
MX
1457 if (ret)
1458 return ret;
1459
1460 wait_for_completion(&done.event);
1461 if (done.error)
1462 return -EIO;
1463
1464 return 0;
1465}
1466
b5d67f64
SB
1467/*
1468 * this function will check the on disk data for checksum errors, header
1469 * errors and read I/O errors. If any I/O errors happen, the exact pages
1470 * which are errored are marked as being bad. The goal is to enable scrub
1471 * to take those pages that are not errored from all the mirrors so that
1472 * the pages that are errored in the just handled mirror can be repaired.
1473 */
34f5c8e9 1474static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
affe4a5a
ZL
1475 struct scrub_block *sblock,
1476 int retry_failed_mirror)
96e36920 1477{
b5d67f64 1478 int page_num;
96e36920 1479
b5d67f64 1480 sblock->no_io_error_seen = 1;
96e36920 1481
b5d67f64
SB
1482 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1483 struct bio *bio;
7a9e9987 1484 struct scrub_page *page = sblock->pagev[page_num];
b5d67f64 1485
442a4f63 1486 if (page->dev->bdev == NULL) {
ea9947b4
SB
1487 page->io_error = 1;
1488 sblock->no_io_error_seen = 0;
1489 continue;
1490 }
1491
7a9e9987 1492 WARN_ON(!page->page);
9be3395b 1493 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
34f5c8e9
SB
1494 if (!bio) {
1495 page->io_error = 1;
1496 sblock->no_io_error_seen = 0;
1497 continue;
1498 }
442a4f63 1499 bio->bi_bdev = page->dev->bdev;
b5d67f64 1500
34f5c8e9 1501 bio_add_page(bio, page->page, PAGE_SIZE, 0);
af8e2d1d
MX
1502 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1503 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1504 sblock->no_io_error_seen = 0;
1505 } else {
1506 bio->bi_iter.bi_sector = page->physical >> 9;
1507
1508 if (btrfsic_submit_bio_wait(READ, bio))
1509 sblock->no_io_error_seen = 0;
1510 }
33879d45 1511
b5d67f64
SB
1512 bio_put(bio);
1513 }
96e36920 1514
b5d67f64 1515 if (sblock->no_io_error_seen)
ba7cf988 1516 scrub_recheck_block_checksum(sblock);
a2de733c
AJ
1517}
1518
17a9be2f
MX
1519static inline int scrub_check_fsid(u8 fsid[],
1520 struct scrub_page *spage)
1521{
1522 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1523 int ret;
1524
1525 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1526 return !ret;
1527}
1528
ba7cf988 1529static void scrub_recheck_block_checksum(struct scrub_block *sblock)
a2de733c 1530{
ba7cf988
ZL
1531 sblock->header_error = 0;
1532 sblock->checksum_error = 0;
1533 sblock->generation_error = 0;
b5d67f64 1534
ba7cf988
ZL
1535 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1536 scrub_checksum_data(sblock);
1537 else
1538 scrub_checksum_tree_block(sblock);
a2de733c
AJ
1539}
1540
b5d67f64 1541static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 1542 struct scrub_block *sblock_good)
b5d67f64
SB
1543{
1544 int page_num;
1545 int ret = 0;
96e36920 1546
b5d67f64
SB
1547 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1548 int ret_sub;
96e36920 1549
b5d67f64
SB
1550 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1551 sblock_good,
114ab50d 1552 page_num, 1);
b5d67f64
SB
1553 if (ret_sub)
1554 ret = ret_sub;
a2de733c 1555 }
b5d67f64
SB
1556
1557 return ret;
1558}
1559
1560static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1561 struct scrub_block *sblock_good,
1562 int page_num, int force_write)
1563{
7a9e9987
SB
1564 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1565 struct scrub_page *page_good = sblock_good->pagev[page_num];
b5d67f64 1566
7a9e9987
SB
1567 BUG_ON(page_bad->page == NULL);
1568 BUG_ON(page_good->page == NULL);
b5d67f64
SB
1569 if (force_write || sblock_bad->header_error ||
1570 sblock_bad->checksum_error || page_bad->io_error) {
1571 struct bio *bio;
1572 int ret;
b5d67f64 1573
ff023aac 1574 if (!page_bad->dev->bdev) {
94647322 1575 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
efe120a0 1576 "scrub_repair_page_from_good_copy(bdev == NULL) "
94647322 1577 "is unexpected");
ff023aac
SB
1578 return -EIO;
1579 }
1580
9be3395b 1581 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
e627ee7b
TI
1582 if (!bio)
1583 return -EIO;
442a4f63 1584 bio->bi_bdev = page_bad->dev->bdev;
4f024f37 1585 bio->bi_iter.bi_sector = page_bad->physical >> 9;
b5d67f64
SB
1586
1587 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1588 if (PAGE_SIZE != ret) {
1589 bio_put(bio);
1590 return -EIO;
13db62b7 1591 }
b5d67f64 1592
33879d45 1593 if (btrfsic_submit_bio_wait(WRITE, bio)) {
442a4f63
SB
1594 btrfs_dev_stat_inc_and_print(page_bad->dev,
1595 BTRFS_DEV_STAT_WRITE_ERRS);
ff023aac
SB
1596 btrfs_dev_replace_stats_inc(
1597 &sblock_bad->sctx->dev_root->fs_info->
1598 dev_replace.num_write_errors);
442a4f63
SB
1599 bio_put(bio);
1600 return -EIO;
1601 }
b5d67f64 1602 bio_put(bio);
a2de733c
AJ
1603 }
1604
b5d67f64
SB
1605 return 0;
1606}
1607
ff023aac
SB
1608static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1609{
1610 int page_num;
1611
5a6ac9ea
MX
1612 /*
1613 * This block is used for the check of the parity on the source device,
1614 * so the data needn't be written into the destination device.
1615 */
1616 if (sblock->sparity)
1617 return;
1618
ff023aac
SB
1619 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1620 int ret;
1621
1622 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1623 if (ret)
1624 btrfs_dev_replace_stats_inc(
1625 &sblock->sctx->dev_root->fs_info->dev_replace.
1626 num_write_errors);
1627 }
1628}
1629
1630static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1631 int page_num)
1632{
1633 struct scrub_page *spage = sblock->pagev[page_num];
1634
1635 BUG_ON(spage->page == NULL);
1636 if (spage->io_error) {
1637 void *mapped_buffer = kmap_atomic(spage->page);
1638
09cbfeaf 1639 memset(mapped_buffer, 0, PAGE_SIZE);
ff023aac
SB
1640 flush_dcache_page(spage->page);
1641 kunmap_atomic(mapped_buffer);
1642 }
1643 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1644}
1645
1646static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1647 struct scrub_page *spage)
1648{
1649 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1650 struct scrub_bio *sbio;
1651 int ret;
1652
1653 mutex_lock(&wr_ctx->wr_lock);
1654again:
1655 if (!wr_ctx->wr_curr_bio) {
1656 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
58c4e173 1657 GFP_KERNEL);
ff023aac
SB
1658 if (!wr_ctx->wr_curr_bio) {
1659 mutex_unlock(&wr_ctx->wr_lock);
1660 return -ENOMEM;
1661 }
1662 wr_ctx->wr_curr_bio->sctx = sctx;
1663 wr_ctx->wr_curr_bio->page_count = 0;
1664 }
1665 sbio = wr_ctx->wr_curr_bio;
1666 if (sbio->page_count == 0) {
1667 struct bio *bio;
1668
1669 sbio->physical = spage->physical_for_dev_replace;
1670 sbio->logical = spage->logical;
1671 sbio->dev = wr_ctx->tgtdev;
1672 bio = sbio->bio;
1673 if (!bio) {
58c4e173
DS
1674 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1675 wr_ctx->pages_per_wr_bio);
ff023aac
SB
1676 if (!bio) {
1677 mutex_unlock(&wr_ctx->wr_lock);
1678 return -ENOMEM;
1679 }
1680 sbio->bio = bio;
1681 }
1682
1683 bio->bi_private = sbio;
1684 bio->bi_end_io = scrub_wr_bio_end_io;
1685 bio->bi_bdev = sbio->dev->bdev;
4f024f37 1686 bio->bi_iter.bi_sector = sbio->physical >> 9;
ff023aac
SB
1687 sbio->err = 0;
1688 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1689 spage->physical_for_dev_replace ||
1690 sbio->logical + sbio->page_count * PAGE_SIZE !=
1691 spage->logical) {
1692 scrub_wr_submit(sctx);
1693 goto again;
1694 }
1695
1696 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1697 if (ret != PAGE_SIZE) {
1698 if (sbio->page_count < 1) {
1699 bio_put(sbio->bio);
1700 sbio->bio = NULL;
1701 mutex_unlock(&wr_ctx->wr_lock);
1702 return -EIO;
1703 }
1704 scrub_wr_submit(sctx);
1705 goto again;
1706 }
1707
1708 sbio->pagev[sbio->page_count] = spage;
1709 scrub_page_get(spage);
1710 sbio->page_count++;
1711 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1712 scrub_wr_submit(sctx);
1713 mutex_unlock(&wr_ctx->wr_lock);
1714
1715 return 0;
1716}
1717
1718static void scrub_wr_submit(struct scrub_ctx *sctx)
1719{
1720 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1721 struct scrub_bio *sbio;
1722
1723 if (!wr_ctx->wr_curr_bio)
1724 return;
1725
1726 sbio = wr_ctx->wr_curr_bio;
1727 wr_ctx->wr_curr_bio = NULL;
1728 WARN_ON(!sbio->bio->bi_bdev);
1729 scrub_pending_bio_inc(sctx);
1730 /* process all writes in a single worker thread. Then the block layer
1731 * orders the requests before sending them to the driver which
1732 * doubled the write performance on spinning disks when measured
1733 * with Linux 3.5 */
1734 btrfsic_submit_bio(WRITE, sbio->bio);
1735}
1736
4246a0b6 1737static void scrub_wr_bio_end_io(struct bio *bio)
ff023aac
SB
1738{
1739 struct scrub_bio *sbio = bio->bi_private;
1740 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1741
4246a0b6 1742 sbio->err = bio->bi_error;
ff023aac
SB
1743 sbio->bio = bio;
1744
9e0af237
LB
1745 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1746 scrub_wr_bio_end_io_worker, NULL, NULL);
0339ef2f 1747 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
ff023aac
SB
1748}
1749
1750static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1751{
1752 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1753 struct scrub_ctx *sctx = sbio->sctx;
1754 int i;
1755
1756 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1757 if (sbio->err) {
1758 struct btrfs_dev_replace *dev_replace =
1759 &sbio->sctx->dev_root->fs_info->dev_replace;
1760
1761 for (i = 0; i < sbio->page_count; i++) {
1762 struct scrub_page *spage = sbio->pagev[i];
1763
1764 spage->io_error = 1;
1765 btrfs_dev_replace_stats_inc(&dev_replace->
1766 num_write_errors);
1767 }
1768 }
1769
1770 for (i = 0; i < sbio->page_count; i++)
1771 scrub_page_put(sbio->pagev[i]);
1772
1773 bio_put(sbio->bio);
1774 kfree(sbio);
1775 scrub_pending_bio_dec(sctx);
1776}
1777
1778static int scrub_checksum(struct scrub_block *sblock)
b5d67f64
SB
1779{
1780 u64 flags;
1781 int ret;
1782
ba7cf988
ZL
1783 /*
1784 * No need to initialize these stats currently,
1785 * because this function only use return value
1786 * instead of these stats value.
1787 *
1788 * Todo:
1789 * always use stats
1790 */
1791 sblock->header_error = 0;
1792 sblock->generation_error = 0;
1793 sblock->checksum_error = 0;
1794
7a9e9987
SB
1795 WARN_ON(sblock->page_count < 1);
1796 flags = sblock->pagev[0]->flags;
b5d67f64
SB
1797 ret = 0;
1798 if (flags & BTRFS_EXTENT_FLAG_DATA)
1799 ret = scrub_checksum_data(sblock);
1800 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1801 ret = scrub_checksum_tree_block(sblock);
1802 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1803 (void)scrub_checksum_super(sblock);
1804 else
1805 WARN_ON(1);
1806 if (ret)
1807 scrub_handle_errored_block(sblock);
ff023aac
SB
1808
1809 return ret;
a2de733c
AJ
1810}
1811
b5d67f64 1812static int scrub_checksum_data(struct scrub_block *sblock)
a2de733c 1813{
d9d181c1 1814 struct scrub_ctx *sctx = sblock->sctx;
a2de733c 1815 u8 csum[BTRFS_CSUM_SIZE];
b5d67f64
SB
1816 u8 *on_disk_csum;
1817 struct page *page;
1818 void *buffer;
a2de733c 1819 u32 crc = ~(u32)0;
b5d67f64
SB
1820 u64 len;
1821 int index;
a2de733c 1822
b5d67f64 1823 BUG_ON(sblock->page_count < 1);
7a9e9987 1824 if (!sblock->pagev[0]->have_csum)
a2de733c
AJ
1825 return 0;
1826
7a9e9987
SB
1827 on_disk_csum = sblock->pagev[0]->csum;
1828 page = sblock->pagev[0]->page;
9613bebb 1829 buffer = kmap_atomic(page);
b5d67f64 1830
d9d181c1 1831 len = sctx->sectorsize;
b5d67f64
SB
1832 index = 0;
1833 for (;;) {
1834 u64 l = min_t(u64, len, PAGE_SIZE);
1835
b0496686 1836 crc = btrfs_csum_data(buffer, crc, l);
9613bebb 1837 kunmap_atomic(buffer);
b5d67f64
SB
1838 len -= l;
1839 if (len == 0)
1840 break;
1841 index++;
1842 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
1843 BUG_ON(!sblock->pagev[index]->page);
1844 page = sblock->pagev[index]->page;
9613bebb 1845 buffer = kmap_atomic(page);
b5d67f64
SB
1846 }
1847
a2de733c 1848 btrfs_csum_final(crc, csum);
d9d181c1 1849 if (memcmp(csum, on_disk_csum, sctx->csum_size))
ba7cf988 1850 sblock->checksum_error = 1;
a2de733c 1851
ba7cf988 1852 return sblock->checksum_error;
a2de733c
AJ
1853}
1854
b5d67f64 1855static int scrub_checksum_tree_block(struct scrub_block *sblock)
a2de733c 1856{
d9d181c1 1857 struct scrub_ctx *sctx = sblock->sctx;
a2de733c 1858 struct btrfs_header *h;
a36cf8b8 1859 struct btrfs_root *root = sctx->dev_root;
a2de733c 1860 struct btrfs_fs_info *fs_info = root->fs_info;
b5d67f64
SB
1861 u8 calculated_csum[BTRFS_CSUM_SIZE];
1862 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1863 struct page *page;
1864 void *mapped_buffer;
1865 u64 mapped_size;
1866 void *p;
a2de733c 1867 u32 crc = ~(u32)0;
b5d67f64
SB
1868 u64 len;
1869 int index;
1870
1871 BUG_ON(sblock->page_count < 1);
7a9e9987 1872 page = sblock->pagev[0]->page;
9613bebb 1873 mapped_buffer = kmap_atomic(page);
b5d67f64 1874 h = (struct btrfs_header *)mapped_buffer;
d9d181c1 1875 memcpy(on_disk_csum, h->csum, sctx->csum_size);
a2de733c
AJ
1876
1877 /*
1878 * we don't use the getter functions here, as we
1879 * a) don't have an extent buffer and
1880 * b) the page is already kmapped
1881 */
3cae210f 1882 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
ba7cf988 1883 sblock->header_error = 1;
a2de733c 1884
ba7cf988
ZL
1885 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1886 sblock->header_error = 1;
1887 sblock->generation_error = 1;
1888 }
a2de733c 1889
17a9be2f 1890 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
ba7cf988 1891 sblock->header_error = 1;
a2de733c
AJ
1892
1893 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1894 BTRFS_UUID_SIZE))
ba7cf988 1895 sblock->header_error = 1;
a2de733c 1896
d9d181c1 1897 len = sctx->nodesize - BTRFS_CSUM_SIZE;
b5d67f64
SB
1898 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1899 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1900 index = 0;
1901 for (;;) {
1902 u64 l = min_t(u64, len, mapped_size);
1903
b0496686 1904 crc = btrfs_csum_data(p, crc, l);
9613bebb 1905 kunmap_atomic(mapped_buffer);
b5d67f64
SB
1906 len -= l;
1907 if (len == 0)
1908 break;
1909 index++;
1910 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
1911 BUG_ON(!sblock->pagev[index]->page);
1912 page = sblock->pagev[index]->page;
9613bebb 1913 mapped_buffer = kmap_atomic(page);
b5d67f64
SB
1914 mapped_size = PAGE_SIZE;
1915 p = mapped_buffer;
1916 }
1917
1918 btrfs_csum_final(crc, calculated_csum);
d9d181c1 1919 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
ba7cf988 1920 sblock->checksum_error = 1;
a2de733c 1921
ba7cf988 1922 return sblock->header_error || sblock->checksum_error;
a2de733c
AJ
1923}
1924
b5d67f64 1925static int scrub_checksum_super(struct scrub_block *sblock)
a2de733c
AJ
1926{
1927 struct btrfs_super_block *s;
d9d181c1 1928 struct scrub_ctx *sctx = sblock->sctx;
b5d67f64
SB
1929 u8 calculated_csum[BTRFS_CSUM_SIZE];
1930 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1931 struct page *page;
1932 void *mapped_buffer;
1933 u64 mapped_size;
1934 void *p;
a2de733c 1935 u32 crc = ~(u32)0;
442a4f63
SB
1936 int fail_gen = 0;
1937 int fail_cor = 0;
b5d67f64
SB
1938 u64 len;
1939 int index;
a2de733c 1940
b5d67f64 1941 BUG_ON(sblock->page_count < 1);
7a9e9987 1942 page = sblock->pagev[0]->page;
9613bebb 1943 mapped_buffer = kmap_atomic(page);
b5d67f64 1944 s = (struct btrfs_super_block *)mapped_buffer;
d9d181c1 1945 memcpy(on_disk_csum, s->csum, sctx->csum_size);
a2de733c 1946
3cae210f 1947 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
442a4f63 1948 ++fail_cor;
a2de733c 1949
3cae210f 1950 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
442a4f63 1951 ++fail_gen;
a2de733c 1952
17a9be2f 1953 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
442a4f63 1954 ++fail_cor;
a2de733c 1955
b5d67f64
SB
1956 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1957 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1958 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1959 index = 0;
1960 for (;;) {
1961 u64 l = min_t(u64, len, mapped_size);
1962
b0496686 1963 crc = btrfs_csum_data(p, crc, l);
9613bebb 1964 kunmap_atomic(mapped_buffer);
b5d67f64
SB
1965 len -= l;
1966 if (len == 0)
1967 break;
1968 index++;
1969 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
1970 BUG_ON(!sblock->pagev[index]->page);
1971 page = sblock->pagev[index]->page;
9613bebb 1972 mapped_buffer = kmap_atomic(page);
b5d67f64
SB
1973 mapped_size = PAGE_SIZE;
1974 p = mapped_buffer;
1975 }
1976
1977 btrfs_csum_final(crc, calculated_csum);
d9d181c1 1978 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
442a4f63 1979 ++fail_cor;
a2de733c 1980
442a4f63 1981 if (fail_cor + fail_gen) {
a2de733c
AJ
1982 /*
1983 * if we find an error in a super block, we just report it.
1984 * They will get written with the next transaction commit
1985 * anyway
1986 */
d9d181c1
SB
1987 spin_lock(&sctx->stat_lock);
1988 ++sctx->stat.super_errors;
1989 spin_unlock(&sctx->stat_lock);
442a4f63 1990 if (fail_cor)
7a9e9987 1991 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
442a4f63
SB
1992 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1993 else
7a9e9987 1994 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
442a4f63 1995 BTRFS_DEV_STAT_GENERATION_ERRS);
a2de733c
AJ
1996 }
1997
442a4f63 1998 return fail_cor + fail_gen;
a2de733c
AJ
1999}
2000
b5d67f64
SB
2001static void scrub_block_get(struct scrub_block *sblock)
2002{
57019345 2003 atomic_inc(&sblock->refs);
b5d67f64
SB
2004}
2005
2006static void scrub_block_put(struct scrub_block *sblock)
2007{
57019345 2008 if (atomic_dec_and_test(&sblock->refs)) {
b5d67f64
SB
2009 int i;
2010
5a6ac9ea
MX
2011 if (sblock->sparity)
2012 scrub_parity_put(sblock->sparity);
2013
b5d67f64 2014 for (i = 0; i < sblock->page_count; i++)
7a9e9987 2015 scrub_page_put(sblock->pagev[i]);
b5d67f64
SB
2016 kfree(sblock);
2017 }
2018}
2019
7a9e9987
SB
2020static void scrub_page_get(struct scrub_page *spage)
2021{
57019345 2022 atomic_inc(&spage->refs);
7a9e9987
SB
2023}
2024
2025static void scrub_page_put(struct scrub_page *spage)
2026{
57019345 2027 if (atomic_dec_and_test(&spage->refs)) {
7a9e9987
SB
2028 if (spage->page)
2029 __free_page(spage->page);
2030 kfree(spage);
2031 }
2032}
2033
d9d181c1 2034static void scrub_submit(struct scrub_ctx *sctx)
a2de733c
AJ
2035{
2036 struct scrub_bio *sbio;
2037
d9d181c1 2038 if (sctx->curr == -1)
1623edeb 2039 return;
a2de733c 2040
d9d181c1
SB
2041 sbio = sctx->bios[sctx->curr];
2042 sctx->curr = -1;
b6bfebc1 2043 scrub_pending_bio_inc(sctx);
03679ade 2044 btrfsic_submit_bio(READ, sbio->bio);
a2de733c
AJ
2045}
2046
ff023aac
SB
2047static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2048 struct scrub_page *spage)
a2de733c 2049{
b5d67f64 2050 struct scrub_block *sblock = spage->sblock;
a2de733c 2051 struct scrub_bio *sbio;
69f4cb52 2052 int ret;
a2de733c
AJ
2053
2054again:
2055 /*
2056 * grab a fresh bio or wait for one to become available
2057 */
d9d181c1
SB
2058 while (sctx->curr == -1) {
2059 spin_lock(&sctx->list_lock);
2060 sctx->curr = sctx->first_free;
2061 if (sctx->curr != -1) {
2062 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2063 sctx->bios[sctx->curr]->next_free = -1;
2064 sctx->bios[sctx->curr]->page_count = 0;
2065 spin_unlock(&sctx->list_lock);
a2de733c 2066 } else {
d9d181c1
SB
2067 spin_unlock(&sctx->list_lock);
2068 wait_event(sctx->list_wait, sctx->first_free != -1);
a2de733c
AJ
2069 }
2070 }
d9d181c1 2071 sbio = sctx->bios[sctx->curr];
b5d67f64 2072 if (sbio->page_count == 0) {
69f4cb52
AJ
2073 struct bio *bio;
2074
b5d67f64
SB
2075 sbio->physical = spage->physical;
2076 sbio->logical = spage->logical;
a36cf8b8 2077 sbio->dev = spage->dev;
b5d67f64
SB
2078 bio = sbio->bio;
2079 if (!bio) {
58c4e173
DS
2080 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2081 sctx->pages_per_rd_bio);
b5d67f64
SB
2082 if (!bio)
2083 return -ENOMEM;
2084 sbio->bio = bio;
2085 }
69f4cb52
AJ
2086
2087 bio->bi_private = sbio;
2088 bio->bi_end_io = scrub_bio_end_io;
a36cf8b8 2089 bio->bi_bdev = sbio->dev->bdev;
4f024f37 2090 bio->bi_iter.bi_sector = sbio->physical >> 9;
69f4cb52 2091 sbio->err = 0;
b5d67f64
SB
2092 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2093 spage->physical ||
2094 sbio->logical + sbio->page_count * PAGE_SIZE !=
a36cf8b8
SB
2095 spage->logical ||
2096 sbio->dev != spage->dev) {
d9d181c1 2097 scrub_submit(sctx);
a2de733c
AJ
2098 goto again;
2099 }
69f4cb52 2100
b5d67f64
SB
2101 sbio->pagev[sbio->page_count] = spage;
2102 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2103 if (ret != PAGE_SIZE) {
2104 if (sbio->page_count < 1) {
2105 bio_put(sbio->bio);
2106 sbio->bio = NULL;
2107 return -EIO;
2108 }
d9d181c1 2109 scrub_submit(sctx);
69f4cb52
AJ
2110 goto again;
2111 }
2112
ff023aac 2113 scrub_block_get(sblock); /* one for the page added to the bio */
b5d67f64
SB
2114 atomic_inc(&sblock->outstanding_pages);
2115 sbio->page_count++;
ff023aac 2116 if (sbio->page_count == sctx->pages_per_rd_bio)
d9d181c1 2117 scrub_submit(sctx);
b5d67f64
SB
2118
2119 return 0;
2120}
2121
22365979 2122static void scrub_missing_raid56_end_io(struct bio *bio)
73ff61db
OS
2123{
2124 struct scrub_block *sblock = bio->bi_private;
2125 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2126
22365979 2127 if (bio->bi_error)
73ff61db
OS
2128 sblock->no_io_error_seen = 0;
2129
2130 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2131}
2132
2133static void scrub_missing_raid56_worker(struct btrfs_work *work)
2134{
2135 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2136 struct scrub_ctx *sctx = sblock->sctx;
73ff61db
OS
2137 u64 logical;
2138 struct btrfs_device *dev;
2139
73ff61db
OS
2140 logical = sblock->pagev[0]->logical;
2141 dev = sblock->pagev[0]->dev;
2142
affe4a5a 2143 if (sblock->no_io_error_seen)
ba7cf988 2144 scrub_recheck_block_checksum(sblock);
73ff61db
OS
2145
2146 if (!sblock->no_io_error_seen) {
2147 spin_lock(&sctx->stat_lock);
2148 sctx->stat.read_errors++;
2149 spin_unlock(&sctx->stat_lock);
ba7cf988 2150 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
b14af3b4 2151 "IO error rebuilding logical %llu for dev %s",
73ff61db
OS
2152 logical, rcu_str_deref(dev->name));
2153 } else if (sblock->header_error || sblock->checksum_error) {
2154 spin_lock(&sctx->stat_lock);
2155 sctx->stat.uncorrectable_errors++;
2156 spin_unlock(&sctx->stat_lock);
ba7cf988 2157 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
b14af3b4 2158 "failed to rebuild valid logical %llu for dev %s",
73ff61db
OS
2159 logical, rcu_str_deref(dev->name));
2160 } else {
2161 scrub_write_block_to_dev_replace(sblock);
2162 }
2163
2164 scrub_block_put(sblock);
2165
2166 if (sctx->is_dev_replace &&
2167 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2168 mutex_lock(&sctx->wr_ctx.wr_lock);
2169 scrub_wr_submit(sctx);
2170 mutex_unlock(&sctx->wr_ctx.wr_lock);
2171 }
2172
2173 scrub_pending_bio_dec(sctx);
2174}
2175
2176static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2177{
2178 struct scrub_ctx *sctx = sblock->sctx;
2179 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2180 u64 length = sblock->page_count * PAGE_SIZE;
2181 u64 logical = sblock->pagev[0]->logical;
2182 struct btrfs_bio *bbio;
2183 struct bio *bio;
2184 struct btrfs_raid_bio *rbio;
2185 int ret;
2186 int i;
2187
2188 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2189 &bbio, 0, 1);
2190 if (ret || !bbio || !bbio->raid_map)
2191 goto bbio_out;
2192
2193 if (WARN_ON(!sctx->is_dev_replace ||
2194 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2195 /*
2196 * We shouldn't be scrubbing a missing device. Even for dev
2197 * replace, we should only get here for RAID 5/6. We either
2198 * managed to mount something with no mirrors remaining or
2199 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2200 */
2201 goto bbio_out;
2202 }
2203
2204 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2205 if (!bio)
2206 goto bbio_out;
2207
2208 bio->bi_iter.bi_sector = logical >> 9;
2209 bio->bi_private = sblock;
2210 bio->bi_end_io = scrub_missing_raid56_end_io;
2211
2212 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2213 if (!rbio)
2214 goto rbio_out;
2215
2216 for (i = 0; i < sblock->page_count; i++) {
2217 struct scrub_page *spage = sblock->pagev[i];
2218
2219 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2220 }
2221
2222 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2223 scrub_missing_raid56_worker, NULL, NULL);
2224 scrub_block_get(sblock);
2225 scrub_pending_bio_inc(sctx);
2226 raid56_submit_missing_rbio(rbio);
2227 return;
2228
2229rbio_out:
2230 bio_put(bio);
2231bbio_out:
2232 btrfs_put_bbio(bbio);
2233 spin_lock(&sctx->stat_lock);
2234 sctx->stat.malloc_errors++;
2235 spin_unlock(&sctx->stat_lock);
2236}
2237
d9d181c1 2238static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 2239 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac
SB
2240 u64 gen, int mirror_num, u8 *csum, int force,
2241 u64 physical_for_dev_replace)
b5d67f64
SB
2242{
2243 struct scrub_block *sblock;
2244 int index;
2245
58c4e173 2246 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
b5d67f64 2247 if (!sblock) {
d9d181c1
SB
2248 spin_lock(&sctx->stat_lock);
2249 sctx->stat.malloc_errors++;
2250 spin_unlock(&sctx->stat_lock);
b5d67f64 2251 return -ENOMEM;
a2de733c 2252 }
b5d67f64 2253
7a9e9987
SB
2254 /* one ref inside this function, plus one for each page added to
2255 * a bio later on */
57019345 2256 atomic_set(&sblock->refs, 1);
d9d181c1 2257 sblock->sctx = sctx;
b5d67f64
SB
2258 sblock->no_io_error_seen = 1;
2259
2260 for (index = 0; len > 0; index++) {
7a9e9987 2261 struct scrub_page *spage;
b5d67f64
SB
2262 u64 l = min_t(u64, len, PAGE_SIZE);
2263
58c4e173 2264 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
7a9e9987
SB
2265 if (!spage) {
2266leave_nomem:
d9d181c1
SB
2267 spin_lock(&sctx->stat_lock);
2268 sctx->stat.malloc_errors++;
2269 spin_unlock(&sctx->stat_lock);
7a9e9987 2270 scrub_block_put(sblock);
b5d67f64
SB
2271 return -ENOMEM;
2272 }
7a9e9987
SB
2273 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2274 scrub_page_get(spage);
2275 sblock->pagev[index] = spage;
b5d67f64 2276 spage->sblock = sblock;
a36cf8b8 2277 spage->dev = dev;
b5d67f64
SB
2278 spage->flags = flags;
2279 spage->generation = gen;
2280 spage->logical = logical;
2281 spage->physical = physical;
ff023aac 2282 spage->physical_for_dev_replace = physical_for_dev_replace;
b5d67f64
SB
2283 spage->mirror_num = mirror_num;
2284 if (csum) {
2285 spage->have_csum = 1;
d9d181c1 2286 memcpy(spage->csum, csum, sctx->csum_size);
b5d67f64
SB
2287 } else {
2288 spage->have_csum = 0;
2289 }
2290 sblock->page_count++;
58c4e173 2291 spage->page = alloc_page(GFP_KERNEL);
7a9e9987
SB
2292 if (!spage->page)
2293 goto leave_nomem;
b5d67f64
SB
2294 len -= l;
2295 logical += l;
2296 physical += l;
ff023aac 2297 physical_for_dev_replace += l;
b5d67f64
SB
2298 }
2299
7a9e9987 2300 WARN_ON(sblock->page_count == 0);
73ff61db
OS
2301 if (dev->missing) {
2302 /*
2303 * This case should only be hit for RAID 5/6 device replace. See
2304 * the comment in scrub_missing_raid56_pages() for details.
2305 */
2306 scrub_missing_raid56_pages(sblock);
2307 } else {
2308 for (index = 0; index < sblock->page_count; index++) {
2309 struct scrub_page *spage = sblock->pagev[index];
2310 int ret;
1bc87793 2311
73ff61db
OS
2312 ret = scrub_add_page_to_rd_bio(sctx, spage);
2313 if (ret) {
2314 scrub_block_put(sblock);
2315 return ret;
2316 }
b5d67f64 2317 }
a2de733c 2318
73ff61db
OS
2319 if (force)
2320 scrub_submit(sctx);
2321 }
a2de733c 2322
b5d67f64
SB
2323 /* last one frees, either here or in bio completion for last page */
2324 scrub_block_put(sblock);
a2de733c
AJ
2325 return 0;
2326}
2327
4246a0b6 2328static void scrub_bio_end_io(struct bio *bio)
b5d67f64
SB
2329{
2330 struct scrub_bio *sbio = bio->bi_private;
a36cf8b8 2331 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
b5d67f64 2332
4246a0b6 2333 sbio->err = bio->bi_error;
b5d67f64
SB
2334 sbio->bio = bio;
2335
0339ef2f 2336 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
b5d67f64
SB
2337}
2338
2339static void scrub_bio_end_io_worker(struct btrfs_work *work)
2340{
2341 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
d9d181c1 2342 struct scrub_ctx *sctx = sbio->sctx;
b5d67f64
SB
2343 int i;
2344
ff023aac 2345 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
b5d67f64
SB
2346 if (sbio->err) {
2347 for (i = 0; i < sbio->page_count; i++) {
2348 struct scrub_page *spage = sbio->pagev[i];
2349
2350 spage->io_error = 1;
2351 spage->sblock->no_io_error_seen = 0;
2352 }
2353 }
2354
2355 /* now complete the scrub_block items that have all pages completed */
2356 for (i = 0; i < sbio->page_count; i++) {
2357 struct scrub_page *spage = sbio->pagev[i];
2358 struct scrub_block *sblock = spage->sblock;
2359
2360 if (atomic_dec_and_test(&sblock->outstanding_pages))
2361 scrub_block_complete(sblock);
2362 scrub_block_put(sblock);
2363 }
2364
b5d67f64
SB
2365 bio_put(sbio->bio);
2366 sbio->bio = NULL;
d9d181c1
SB
2367 spin_lock(&sctx->list_lock);
2368 sbio->next_free = sctx->first_free;
2369 sctx->first_free = sbio->index;
2370 spin_unlock(&sctx->list_lock);
ff023aac
SB
2371
2372 if (sctx->is_dev_replace &&
2373 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2374 mutex_lock(&sctx->wr_ctx.wr_lock);
2375 scrub_wr_submit(sctx);
2376 mutex_unlock(&sctx->wr_ctx.wr_lock);
2377 }
2378
b6bfebc1 2379 scrub_pending_bio_dec(sctx);
b5d67f64
SB
2380}
2381
5a6ac9ea
MX
2382static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2383 unsigned long *bitmap,
2384 u64 start, u64 len)
2385{
9d644a62 2386 u32 offset;
5a6ac9ea
MX
2387 int nsectors;
2388 int sectorsize = sparity->sctx->dev_root->sectorsize;
2389
2390 if (len >= sparity->stripe_len) {
2391 bitmap_set(bitmap, 0, sparity->nsectors);
2392 return;
2393 }
2394
2395 start -= sparity->logic_start;
47c5713f 2396 start = div_u64_rem(start, sparity->stripe_len, &offset);
5a6ac9ea
MX
2397 offset /= sectorsize;
2398 nsectors = (int)len / sectorsize;
2399
2400 if (offset + nsectors <= sparity->nsectors) {
2401 bitmap_set(bitmap, offset, nsectors);
2402 return;
2403 }
2404
2405 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2406 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2407}
2408
2409static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2410 u64 start, u64 len)
2411{
2412 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2413}
2414
2415static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2416 u64 start, u64 len)
2417{
2418 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2419}
2420
b5d67f64
SB
2421static void scrub_block_complete(struct scrub_block *sblock)
2422{
5a6ac9ea
MX
2423 int corrupted = 0;
2424
ff023aac 2425 if (!sblock->no_io_error_seen) {
5a6ac9ea 2426 corrupted = 1;
b5d67f64 2427 scrub_handle_errored_block(sblock);
ff023aac
SB
2428 } else {
2429 /*
2430 * if has checksum error, write via repair mechanism in
2431 * dev replace case, otherwise write here in dev replace
2432 * case.
2433 */
5a6ac9ea
MX
2434 corrupted = scrub_checksum(sblock);
2435 if (!corrupted && sblock->sctx->is_dev_replace)
ff023aac
SB
2436 scrub_write_block_to_dev_replace(sblock);
2437 }
5a6ac9ea
MX
2438
2439 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2440 u64 start = sblock->pagev[0]->logical;
2441 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2442 PAGE_SIZE;
2443
2444 scrub_parity_mark_sectors_error(sblock->sparity,
2445 start, end - start);
2446 }
b5d67f64
SB
2447}
2448
3b5753ec 2449static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
a2de733c
AJ
2450{
2451 struct btrfs_ordered_sum *sum = NULL;
f51a4a18 2452 unsigned long index;
a2de733c 2453 unsigned long num_sectors;
a2de733c 2454
d9d181c1
SB
2455 while (!list_empty(&sctx->csum_list)) {
2456 sum = list_first_entry(&sctx->csum_list,
a2de733c
AJ
2457 struct btrfs_ordered_sum, list);
2458 if (sum->bytenr > logical)
2459 return 0;
2460 if (sum->bytenr + sum->len > logical)
2461 break;
2462
d9d181c1 2463 ++sctx->stat.csum_discards;
a2de733c
AJ
2464 list_del(&sum->list);
2465 kfree(sum);
2466 sum = NULL;
2467 }
2468 if (!sum)
2469 return 0;
2470
f51a4a18 2471 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
d9d181c1 2472 num_sectors = sum->len / sctx->sectorsize;
f51a4a18
MX
2473 memcpy(csum, sum->sums + index, sctx->csum_size);
2474 if (index == num_sectors - 1) {
a2de733c
AJ
2475 list_del(&sum->list);
2476 kfree(sum);
2477 }
f51a4a18 2478 return 1;
a2de733c
AJ
2479}
2480
2481/* scrub extent tries to collect up to 64 kB for each bio */
d9d181c1 2482static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 2483 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac 2484 u64 gen, int mirror_num, u64 physical_for_dev_replace)
a2de733c
AJ
2485{
2486 int ret;
2487 u8 csum[BTRFS_CSUM_SIZE];
b5d67f64
SB
2488 u32 blocksize;
2489
2490 if (flags & BTRFS_EXTENT_FLAG_DATA) {
d9d181c1
SB
2491 blocksize = sctx->sectorsize;
2492 spin_lock(&sctx->stat_lock);
2493 sctx->stat.data_extents_scrubbed++;
2494 sctx->stat.data_bytes_scrubbed += len;
2495 spin_unlock(&sctx->stat_lock);
b5d67f64 2496 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
d9d181c1
SB
2497 blocksize = sctx->nodesize;
2498 spin_lock(&sctx->stat_lock);
2499 sctx->stat.tree_extents_scrubbed++;
2500 sctx->stat.tree_bytes_scrubbed += len;
2501 spin_unlock(&sctx->stat_lock);
b5d67f64 2502 } else {
d9d181c1 2503 blocksize = sctx->sectorsize;
ff023aac 2504 WARN_ON(1);
b5d67f64 2505 }
a2de733c
AJ
2506
2507 while (len) {
b5d67f64 2508 u64 l = min_t(u64, len, blocksize);
a2de733c
AJ
2509 int have_csum = 0;
2510
2511 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2512 /* push csums to sbio */
3b5753ec 2513 have_csum = scrub_find_csum(sctx, logical, csum);
a2de733c 2514 if (have_csum == 0)
d9d181c1 2515 ++sctx->stat.no_csum;
ff023aac
SB
2516 if (sctx->is_dev_replace && !have_csum) {
2517 ret = copy_nocow_pages(sctx, logical, l,
2518 mirror_num,
2519 physical_for_dev_replace);
2520 goto behind_scrub_pages;
2521 }
a2de733c 2522 }
a36cf8b8 2523 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
ff023aac
SB
2524 mirror_num, have_csum ? csum : NULL, 0,
2525 physical_for_dev_replace);
2526behind_scrub_pages:
a2de733c
AJ
2527 if (ret)
2528 return ret;
2529 len -= l;
2530 logical += l;
2531 physical += l;
ff023aac 2532 physical_for_dev_replace += l;
a2de733c
AJ
2533 }
2534 return 0;
2535}
2536
5a6ac9ea
MX
2537static int scrub_pages_for_parity(struct scrub_parity *sparity,
2538 u64 logical, u64 len,
2539 u64 physical, struct btrfs_device *dev,
2540 u64 flags, u64 gen, int mirror_num, u8 *csum)
2541{
2542 struct scrub_ctx *sctx = sparity->sctx;
2543 struct scrub_block *sblock;
2544 int index;
2545
58c4e173 2546 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
5a6ac9ea
MX
2547 if (!sblock) {
2548 spin_lock(&sctx->stat_lock);
2549 sctx->stat.malloc_errors++;
2550 spin_unlock(&sctx->stat_lock);
2551 return -ENOMEM;
2552 }
2553
2554 /* one ref inside this function, plus one for each page added to
2555 * a bio later on */
57019345 2556 atomic_set(&sblock->refs, 1);
5a6ac9ea
MX
2557 sblock->sctx = sctx;
2558 sblock->no_io_error_seen = 1;
2559 sblock->sparity = sparity;
2560 scrub_parity_get(sparity);
2561
2562 for (index = 0; len > 0; index++) {
2563 struct scrub_page *spage;
2564 u64 l = min_t(u64, len, PAGE_SIZE);
2565
58c4e173 2566 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
5a6ac9ea
MX
2567 if (!spage) {
2568leave_nomem:
2569 spin_lock(&sctx->stat_lock);
2570 sctx->stat.malloc_errors++;
2571 spin_unlock(&sctx->stat_lock);
2572 scrub_block_put(sblock);
2573 return -ENOMEM;
2574 }
2575 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2576 /* For scrub block */
2577 scrub_page_get(spage);
2578 sblock->pagev[index] = spage;
2579 /* For scrub parity */
2580 scrub_page_get(spage);
2581 list_add_tail(&spage->list, &sparity->spages);
2582 spage->sblock = sblock;
2583 spage->dev = dev;
2584 spage->flags = flags;
2585 spage->generation = gen;
2586 spage->logical = logical;
2587 spage->physical = physical;
2588 spage->mirror_num = mirror_num;
2589 if (csum) {
2590 spage->have_csum = 1;
2591 memcpy(spage->csum, csum, sctx->csum_size);
2592 } else {
2593 spage->have_csum = 0;
2594 }
2595 sblock->page_count++;
58c4e173 2596 spage->page = alloc_page(GFP_KERNEL);
5a6ac9ea
MX
2597 if (!spage->page)
2598 goto leave_nomem;
2599 len -= l;
2600 logical += l;
2601 physical += l;
2602 }
2603
2604 WARN_ON(sblock->page_count == 0);
2605 for (index = 0; index < sblock->page_count; index++) {
2606 struct scrub_page *spage = sblock->pagev[index];
2607 int ret;
2608
2609 ret = scrub_add_page_to_rd_bio(sctx, spage);
2610 if (ret) {
2611 scrub_block_put(sblock);
2612 return ret;
2613 }
2614 }
2615
2616 /* last one frees, either here or in bio completion for last page */
2617 scrub_block_put(sblock);
2618 return 0;
2619}
2620
2621static int scrub_extent_for_parity(struct scrub_parity *sparity,
2622 u64 logical, u64 len,
2623 u64 physical, struct btrfs_device *dev,
2624 u64 flags, u64 gen, int mirror_num)
2625{
2626 struct scrub_ctx *sctx = sparity->sctx;
2627 int ret;
2628 u8 csum[BTRFS_CSUM_SIZE];
2629 u32 blocksize;
2630
4a770891
OS
2631 if (dev->missing) {
2632 scrub_parity_mark_sectors_error(sparity, logical, len);
2633 return 0;
2634 }
2635
5a6ac9ea
MX
2636 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2637 blocksize = sctx->sectorsize;
2638 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2639 blocksize = sctx->nodesize;
2640 } else {
2641 blocksize = sctx->sectorsize;
2642 WARN_ON(1);
2643 }
2644
2645 while (len) {
2646 u64 l = min_t(u64, len, blocksize);
2647 int have_csum = 0;
2648
2649 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2650 /* push csums to sbio */
3b5753ec 2651 have_csum = scrub_find_csum(sctx, logical, csum);
5a6ac9ea
MX
2652 if (have_csum == 0)
2653 goto skip;
2654 }
2655 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2656 flags, gen, mirror_num,
2657 have_csum ? csum : NULL);
5a6ac9ea
MX
2658 if (ret)
2659 return ret;
6b6d24b3 2660skip:
5a6ac9ea
MX
2661 len -= l;
2662 logical += l;
2663 physical += l;
2664 }
2665 return 0;
2666}
2667
3b080b25
WS
2668/*
2669 * Given a physical address, this will calculate it's
2670 * logical offset. if this is a parity stripe, it will return
2671 * the most left data stripe's logical offset.
2672 *
2673 * return 0 if it is a data stripe, 1 means parity stripe.
2674 */
2675static int get_raid56_logic_offset(u64 physical, int num,
5a6ac9ea
MX
2676 struct map_lookup *map, u64 *offset,
2677 u64 *stripe_start)
3b080b25
WS
2678{
2679 int i;
2680 int j = 0;
2681 u64 stripe_nr;
2682 u64 last_offset;
9d644a62
DS
2683 u32 stripe_index;
2684 u32 rot;
3b080b25
WS
2685
2686 last_offset = (physical - map->stripes[num].physical) *
2687 nr_data_stripes(map);
5a6ac9ea
MX
2688 if (stripe_start)
2689 *stripe_start = last_offset;
2690
3b080b25
WS
2691 *offset = last_offset;
2692 for (i = 0; i < nr_data_stripes(map); i++) {
2693 *offset = last_offset + i * map->stripe_len;
2694
b8b93add
DS
2695 stripe_nr = div_u64(*offset, map->stripe_len);
2696 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
3b080b25
WS
2697
2698 /* Work out the disk rotation on this stripe-set */
47c5713f 2699 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
3b080b25
WS
2700 /* calculate which stripe this data locates */
2701 rot += i;
e4fbaee2 2702 stripe_index = rot % map->num_stripes;
3b080b25
WS
2703 if (stripe_index == num)
2704 return 0;
2705 if (stripe_index < num)
2706 j++;
2707 }
2708 *offset = last_offset + j * map->stripe_len;
2709 return 1;
2710}
2711
5a6ac9ea
MX
2712static void scrub_free_parity(struct scrub_parity *sparity)
2713{
2714 struct scrub_ctx *sctx = sparity->sctx;
2715 struct scrub_page *curr, *next;
2716 int nbits;
2717
2718 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2719 if (nbits) {
2720 spin_lock(&sctx->stat_lock);
2721 sctx->stat.read_errors += nbits;
2722 sctx->stat.uncorrectable_errors += nbits;
2723 spin_unlock(&sctx->stat_lock);
2724 }
2725
2726 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2727 list_del_init(&curr->list);
2728 scrub_page_put(curr);
2729 }
2730
2731 kfree(sparity);
2732}
2733
20b2e302
ZL
2734static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2735{
2736 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2737 work);
2738 struct scrub_ctx *sctx = sparity->sctx;
2739
2740 scrub_free_parity(sparity);
2741 scrub_pending_bio_dec(sctx);
2742}
2743
4246a0b6 2744static void scrub_parity_bio_endio(struct bio *bio)
5a6ac9ea
MX
2745{
2746 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
5a6ac9ea 2747
4246a0b6 2748 if (bio->bi_error)
5a6ac9ea
MX
2749 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2750 sparity->nsectors);
2751
5a6ac9ea 2752 bio_put(bio);
20b2e302
ZL
2753
2754 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2755 scrub_parity_bio_endio_worker, NULL, NULL);
2756 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2757 &sparity->work);
5a6ac9ea
MX
2758}
2759
2760static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2761{
2762 struct scrub_ctx *sctx = sparity->sctx;
2763 struct bio *bio;
2764 struct btrfs_raid_bio *rbio;
2765 struct scrub_page *spage;
2766 struct btrfs_bio *bbio = NULL;
5a6ac9ea
MX
2767 u64 length;
2768 int ret;
2769
2770 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2771 sparity->nsectors))
2772 goto out;
2773
a0dd59de 2774 length = sparity->logic_end - sparity->logic_start;
76035976 2775 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
5a6ac9ea 2776 sparity->logic_start,
8e5cfb55
ZL
2777 &length, &bbio, 0, 1);
2778 if (ret || !bbio || !bbio->raid_map)
5a6ac9ea
MX
2779 goto bbio_out;
2780
2781 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2782 if (!bio)
2783 goto bbio_out;
2784
2785 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2786 bio->bi_private = sparity;
2787 bio->bi_end_io = scrub_parity_bio_endio;
2788
2789 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
8e5cfb55 2790 length, sparity->scrub_dev,
5a6ac9ea
MX
2791 sparity->dbitmap,
2792 sparity->nsectors);
2793 if (!rbio)
2794 goto rbio_out;
2795
2796 list_for_each_entry(spage, &sparity->spages, list)
b4ee1782 2797 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
5a6ac9ea
MX
2798
2799 scrub_pending_bio_inc(sctx);
2800 raid56_parity_submit_scrub_rbio(rbio);
2801 return;
2802
2803rbio_out:
2804 bio_put(bio);
2805bbio_out:
6e9606d2 2806 btrfs_put_bbio(bbio);
5a6ac9ea
MX
2807 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2808 sparity->nsectors);
2809 spin_lock(&sctx->stat_lock);
2810 sctx->stat.malloc_errors++;
2811 spin_unlock(&sctx->stat_lock);
2812out:
2813 scrub_free_parity(sparity);
2814}
2815
2816static inline int scrub_calc_parity_bitmap_len(int nsectors)
2817{
bfca9a6d 2818 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
5a6ac9ea
MX
2819}
2820
2821static void scrub_parity_get(struct scrub_parity *sparity)
2822{
57019345 2823 atomic_inc(&sparity->refs);
5a6ac9ea
MX
2824}
2825
2826static void scrub_parity_put(struct scrub_parity *sparity)
2827{
57019345 2828 if (!atomic_dec_and_test(&sparity->refs))
5a6ac9ea
MX
2829 return;
2830
2831 scrub_parity_check_and_repair(sparity);
2832}
2833
2834static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2835 struct map_lookup *map,
2836 struct btrfs_device *sdev,
2837 struct btrfs_path *path,
2838 u64 logic_start,
2839 u64 logic_end)
2840{
2841 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2842 struct btrfs_root *root = fs_info->extent_root;
2843 struct btrfs_root *csum_root = fs_info->csum_root;
2844 struct btrfs_extent_item *extent;
4a770891 2845 struct btrfs_bio *bbio = NULL;
5a6ac9ea
MX
2846 u64 flags;
2847 int ret;
2848 int slot;
2849 struct extent_buffer *l;
2850 struct btrfs_key key;
2851 u64 generation;
2852 u64 extent_logical;
2853 u64 extent_physical;
2854 u64 extent_len;
4a770891 2855 u64 mapped_length;
5a6ac9ea
MX
2856 struct btrfs_device *extent_dev;
2857 struct scrub_parity *sparity;
2858 int nsectors;
2859 int bitmap_len;
2860 int extent_mirror_num;
2861 int stop_loop = 0;
2862
3d8da678 2863 nsectors = div_u64(map->stripe_len, root->sectorsize);
5a6ac9ea
MX
2864 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2865 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2866 GFP_NOFS);
2867 if (!sparity) {
2868 spin_lock(&sctx->stat_lock);
2869 sctx->stat.malloc_errors++;
2870 spin_unlock(&sctx->stat_lock);
2871 return -ENOMEM;
2872 }
2873
2874 sparity->stripe_len = map->stripe_len;
2875 sparity->nsectors = nsectors;
2876 sparity->sctx = sctx;
2877 sparity->scrub_dev = sdev;
2878 sparity->logic_start = logic_start;
2879 sparity->logic_end = logic_end;
57019345 2880 atomic_set(&sparity->refs, 1);
5a6ac9ea
MX
2881 INIT_LIST_HEAD(&sparity->spages);
2882 sparity->dbitmap = sparity->bitmap;
2883 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2884
2885 ret = 0;
2886 while (logic_start < logic_end) {
2887 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2888 key.type = BTRFS_METADATA_ITEM_KEY;
2889 else
2890 key.type = BTRFS_EXTENT_ITEM_KEY;
2891 key.objectid = logic_start;
2892 key.offset = (u64)-1;
2893
2894 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2895 if (ret < 0)
2896 goto out;
2897
2898 if (ret > 0) {
2899 ret = btrfs_previous_extent_item(root, path, 0);
2900 if (ret < 0)
2901 goto out;
2902 if (ret > 0) {
2903 btrfs_release_path(path);
2904 ret = btrfs_search_slot(NULL, root, &key,
2905 path, 0, 0);
2906 if (ret < 0)
2907 goto out;
2908 }
2909 }
2910
2911 stop_loop = 0;
2912 while (1) {
2913 u64 bytes;
2914
2915 l = path->nodes[0];
2916 slot = path->slots[0];
2917 if (slot >= btrfs_header_nritems(l)) {
2918 ret = btrfs_next_leaf(root, path);
2919 if (ret == 0)
2920 continue;
2921 if (ret < 0)
2922 goto out;
2923
2924 stop_loop = 1;
2925 break;
2926 }
2927 btrfs_item_key_to_cpu(l, &key, slot);
2928
d7cad238
ZL
2929 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2930 key.type != BTRFS_METADATA_ITEM_KEY)
2931 goto next;
2932
5a6ac9ea
MX
2933 if (key.type == BTRFS_METADATA_ITEM_KEY)
2934 bytes = root->nodesize;
2935 else
2936 bytes = key.offset;
2937
2938 if (key.objectid + bytes <= logic_start)
2939 goto next;
2940
a0dd59de 2941 if (key.objectid >= logic_end) {
5a6ac9ea
MX
2942 stop_loop = 1;
2943 break;
2944 }
2945
2946 while (key.objectid >= logic_start + map->stripe_len)
2947 logic_start += map->stripe_len;
2948
2949 extent = btrfs_item_ptr(l, slot,
2950 struct btrfs_extent_item);
2951 flags = btrfs_extent_flags(l, extent);
2952 generation = btrfs_extent_generation(l, extent);
2953
a323e813
ZL
2954 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2955 (key.objectid < logic_start ||
2956 key.objectid + bytes >
2957 logic_start + map->stripe_len)) {
2958 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2959 key.objectid, logic_start);
9799d2c3
ZL
2960 spin_lock(&sctx->stat_lock);
2961 sctx->stat.uncorrectable_errors++;
2962 spin_unlock(&sctx->stat_lock);
5a6ac9ea
MX
2963 goto next;
2964 }
2965again:
2966 extent_logical = key.objectid;
2967 extent_len = bytes;
2968
2969 if (extent_logical < logic_start) {
2970 extent_len -= logic_start - extent_logical;
2971 extent_logical = logic_start;
2972 }
2973
2974 if (extent_logical + extent_len >
2975 logic_start + map->stripe_len)
2976 extent_len = logic_start + map->stripe_len -
2977 extent_logical;
2978
2979 scrub_parity_mark_sectors_data(sparity, extent_logical,
2980 extent_len);
2981
4a770891
OS
2982 mapped_length = extent_len;
2983 ret = btrfs_map_block(fs_info, READ, extent_logical,
2984 &mapped_length, &bbio, 0);
2985 if (!ret) {
2986 if (!bbio || mapped_length < extent_len)
2987 ret = -EIO;
2988 }
2989 if (ret) {
2990 btrfs_put_bbio(bbio);
2991 goto out;
2992 }
2993 extent_physical = bbio->stripes[0].physical;
2994 extent_mirror_num = bbio->mirror_num;
2995 extent_dev = bbio->stripes[0].dev;
2996 btrfs_put_bbio(bbio);
5a6ac9ea
MX
2997
2998 ret = btrfs_lookup_csums_range(csum_root,
2999 extent_logical,
3000 extent_logical + extent_len - 1,
3001 &sctx->csum_list, 1);
3002 if (ret)
3003 goto out;
3004
3005 ret = scrub_extent_for_parity(sparity, extent_logical,
3006 extent_len,
3007 extent_physical,
3008 extent_dev, flags,
3009 generation,
3010 extent_mirror_num);
6fa96d72
ZL
3011
3012 scrub_free_csums(sctx);
3013
5a6ac9ea
MX
3014 if (ret)
3015 goto out;
3016
5a6ac9ea
MX
3017 if (extent_logical + extent_len <
3018 key.objectid + bytes) {
3019 logic_start += map->stripe_len;
3020
3021 if (logic_start >= logic_end) {
3022 stop_loop = 1;
3023 break;
3024 }
3025
3026 if (logic_start < key.objectid + bytes) {
3027 cond_resched();
3028 goto again;
3029 }
3030 }
3031next:
3032 path->slots[0]++;
3033 }
3034
3035 btrfs_release_path(path);
3036
3037 if (stop_loop)
3038 break;
3039
3040 logic_start += map->stripe_len;
3041 }
3042out:
3043 if (ret < 0)
3044 scrub_parity_mark_sectors_error(sparity, logic_start,
a0dd59de 3045 logic_end - logic_start);
5a6ac9ea
MX
3046 scrub_parity_put(sparity);
3047 scrub_submit(sctx);
3048 mutex_lock(&sctx->wr_ctx.wr_lock);
3049 scrub_wr_submit(sctx);
3050 mutex_unlock(&sctx->wr_ctx.wr_lock);
3051
3052 btrfs_release_path(path);
3053 return ret < 0 ? ret : 0;
3054}
3055
d9d181c1 3056static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
a36cf8b8
SB
3057 struct map_lookup *map,
3058 struct btrfs_device *scrub_dev,
ff023aac
SB
3059 int num, u64 base, u64 length,
3060 int is_dev_replace)
a2de733c 3061{
5a6ac9ea 3062 struct btrfs_path *path, *ppath;
a36cf8b8 3063 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
a2de733c
AJ
3064 struct btrfs_root *root = fs_info->extent_root;
3065 struct btrfs_root *csum_root = fs_info->csum_root;
3066 struct btrfs_extent_item *extent;
e7786c3a 3067 struct blk_plug plug;
a2de733c
AJ
3068 u64 flags;
3069 int ret;
3070 int slot;
a2de733c 3071 u64 nstripes;
a2de733c 3072 struct extent_buffer *l;
a2de733c
AJ
3073 u64 physical;
3074 u64 logical;
625f1c8d 3075 u64 logic_end;
3b080b25 3076 u64 physical_end;
a2de733c 3077 u64 generation;
e12fa9cd 3078 int mirror_num;
7a26285e
AJ
3079 struct reada_control *reada1;
3080 struct reada_control *reada2;
e6c11f9a 3081 struct btrfs_key key;
7a26285e 3082 struct btrfs_key key_end;
a2de733c
AJ
3083 u64 increment = map->stripe_len;
3084 u64 offset;
ff023aac
SB
3085 u64 extent_logical;
3086 u64 extent_physical;
3087 u64 extent_len;
5a6ac9ea
MX
3088 u64 stripe_logical;
3089 u64 stripe_end;
ff023aac
SB
3090 struct btrfs_device *extent_dev;
3091 int extent_mirror_num;
3b080b25 3092 int stop_loop = 0;
53b381b3 3093
3b080b25 3094 physical = map->stripes[num].physical;
a2de733c 3095 offset = 0;
b8b93add 3096 nstripes = div_u64(length, map->stripe_len);
a2de733c
AJ
3097 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3098 offset = map->stripe_len * num;
3099 increment = map->stripe_len * map->num_stripes;
193ea74b 3100 mirror_num = 1;
a2de733c
AJ
3101 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3102 int factor = map->num_stripes / map->sub_stripes;
3103 offset = map->stripe_len * (num / map->sub_stripes);
3104 increment = map->stripe_len * factor;
193ea74b 3105 mirror_num = num % map->sub_stripes + 1;
a2de733c
AJ
3106 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3107 increment = map->stripe_len;
193ea74b 3108 mirror_num = num % map->num_stripes + 1;
a2de733c
AJ
3109 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3110 increment = map->stripe_len;
193ea74b 3111 mirror_num = num % map->num_stripes + 1;
ffe2d203 3112 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5a6ac9ea 3113 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3b080b25
WS
3114 increment = map->stripe_len * nr_data_stripes(map);
3115 mirror_num = 1;
a2de733c
AJ
3116 } else {
3117 increment = map->stripe_len;
193ea74b 3118 mirror_num = 1;
a2de733c
AJ
3119 }
3120
3121 path = btrfs_alloc_path();
3122 if (!path)
3123 return -ENOMEM;
3124
5a6ac9ea
MX
3125 ppath = btrfs_alloc_path();
3126 if (!ppath) {
379d6854 3127 btrfs_free_path(path);
5a6ac9ea
MX
3128 return -ENOMEM;
3129 }
3130
b5d67f64
SB
3131 /*
3132 * work on commit root. The related disk blocks are static as
3133 * long as COW is applied. This means, it is save to rewrite
3134 * them to repair disk errors without any race conditions
3135 */
a2de733c
AJ
3136 path->search_commit_root = 1;
3137 path->skip_locking = 1;
3138
063c54dc
GH
3139 ppath->search_commit_root = 1;
3140 ppath->skip_locking = 1;
a2de733c 3141 /*
7a26285e
AJ
3142 * trigger the readahead for extent tree csum tree and wait for
3143 * completion. During readahead, the scrub is officially paused
3144 * to not hold off transaction commits
a2de733c
AJ
3145 */
3146 logical = base + offset;
3b080b25 3147 physical_end = physical + nstripes * map->stripe_len;
ffe2d203 3148 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3b080b25 3149 get_raid56_logic_offset(physical_end, num,
5a6ac9ea 3150 map, &logic_end, NULL);
3b080b25
WS
3151 logic_end += base;
3152 } else {
3153 logic_end = logical + increment * nstripes;
3154 }
d9d181c1 3155 wait_event(sctx->list_wait,
b6bfebc1 3156 atomic_read(&sctx->bios_in_flight) == 0);
cb7ab021 3157 scrub_blocked_if_needed(fs_info);
7a26285e
AJ
3158
3159 /* FIXME it might be better to start readahead at commit root */
e6c11f9a
DS
3160 key.objectid = logical;
3161 key.type = BTRFS_EXTENT_ITEM_KEY;
3162 key.offset = (u64)0;
3b080b25 3163 key_end.objectid = logic_end;
3173a18f
JB
3164 key_end.type = BTRFS_METADATA_ITEM_KEY;
3165 key_end.offset = (u64)-1;
e6c11f9a 3166 reada1 = btrfs_reada_add(root, &key, &key_end);
7a26285e 3167
e6c11f9a
DS
3168 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3169 key.type = BTRFS_EXTENT_CSUM_KEY;
3170 key.offset = logical;
7a26285e
AJ
3171 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3172 key_end.type = BTRFS_EXTENT_CSUM_KEY;
3b080b25 3173 key_end.offset = logic_end;
e6c11f9a 3174 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
7a26285e
AJ
3175
3176 if (!IS_ERR(reada1))
3177 btrfs_reada_wait(reada1);
3178 if (!IS_ERR(reada2))
3179 btrfs_reada_wait(reada2);
3180
a2de733c
AJ
3181
3182 /*
3183 * collect all data csums for the stripe to avoid seeking during
3184 * the scrub. This might currently (crc32) end up to be about 1MB
3185 */
e7786c3a 3186 blk_start_plug(&plug);
a2de733c 3187
a2de733c
AJ
3188 /*
3189 * now find all extents for each stripe and scrub them
3190 */
a2de733c 3191 ret = 0;
3b080b25 3192 while (physical < physical_end) {
a2de733c
AJ
3193 /*
3194 * canceled?
3195 */
3196 if (atomic_read(&fs_info->scrub_cancel_req) ||
d9d181c1 3197 atomic_read(&sctx->cancel_req)) {
a2de733c
AJ
3198 ret = -ECANCELED;
3199 goto out;
3200 }
3201 /*
3202 * check to see if we have to pause
3203 */
3204 if (atomic_read(&fs_info->scrub_pause_req)) {
3205 /* push queued extents */
ff023aac 3206 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
d9d181c1 3207 scrub_submit(sctx);
ff023aac
SB
3208 mutex_lock(&sctx->wr_ctx.wr_lock);
3209 scrub_wr_submit(sctx);
3210 mutex_unlock(&sctx->wr_ctx.wr_lock);
d9d181c1 3211 wait_event(sctx->list_wait,
b6bfebc1 3212 atomic_read(&sctx->bios_in_flight) == 0);
ff023aac 3213 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3cb0929a 3214 scrub_blocked_if_needed(fs_info);
a2de733c
AJ
3215 }
3216
f2f66a2f
ZL
3217 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3218 ret = get_raid56_logic_offset(physical, num, map,
3219 &logical,
3220 &stripe_logical);
3221 logical += base;
3222 if (ret) {
7955323b 3223 /* it is parity strip */
f2f66a2f 3224 stripe_logical += base;
a0dd59de 3225 stripe_end = stripe_logical + increment;
f2f66a2f
ZL
3226 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3227 ppath, stripe_logical,
3228 stripe_end);
3229 if (ret)
3230 goto out;
3231 goto skip;
3232 }
3233 }
3234
7c76edb7
WS
3235 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3236 key.type = BTRFS_METADATA_ITEM_KEY;
3237 else
3238 key.type = BTRFS_EXTENT_ITEM_KEY;
a2de733c 3239 key.objectid = logical;
625f1c8d 3240 key.offset = (u64)-1;
a2de733c
AJ
3241
3242 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3243 if (ret < 0)
3244 goto out;
3173a18f 3245
8c51032f 3246 if (ret > 0) {
ade2e0b3 3247 ret = btrfs_previous_extent_item(root, path, 0);
a2de733c
AJ
3248 if (ret < 0)
3249 goto out;
8c51032f
AJ
3250 if (ret > 0) {
3251 /* there's no smaller item, so stick with the
3252 * larger one */
3253 btrfs_release_path(path);
3254 ret = btrfs_search_slot(NULL, root, &key,
3255 path, 0, 0);
3256 if (ret < 0)
3257 goto out;
3258 }
a2de733c
AJ
3259 }
3260
625f1c8d 3261 stop_loop = 0;
a2de733c 3262 while (1) {
3173a18f
JB
3263 u64 bytes;
3264
a2de733c
AJ
3265 l = path->nodes[0];
3266 slot = path->slots[0];
3267 if (slot >= btrfs_header_nritems(l)) {
3268 ret = btrfs_next_leaf(root, path);
3269 if (ret == 0)
3270 continue;
3271 if (ret < 0)
3272 goto out;
3273
625f1c8d 3274 stop_loop = 1;
a2de733c
AJ
3275 break;
3276 }
3277 btrfs_item_key_to_cpu(l, &key, slot);
3278
d7cad238
ZL
3279 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3280 key.type != BTRFS_METADATA_ITEM_KEY)
3281 goto next;
3282
3173a18f 3283 if (key.type == BTRFS_METADATA_ITEM_KEY)
707e8a07 3284 bytes = root->nodesize;
3173a18f
JB
3285 else
3286 bytes = key.offset;
3287
3288 if (key.objectid + bytes <= logical)
a2de733c
AJ
3289 goto next;
3290
625f1c8d
LB
3291 if (key.objectid >= logical + map->stripe_len) {
3292 /* out of this device extent */
3293 if (key.objectid >= logic_end)
3294 stop_loop = 1;
3295 break;
3296 }
a2de733c
AJ
3297
3298 extent = btrfs_item_ptr(l, slot,
3299 struct btrfs_extent_item);
3300 flags = btrfs_extent_flags(l, extent);
3301 generation = btrfs_extent_generation(l, extent);
3302
a323e813
ZL
3303 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3304 (key.objectid < logical ||
3305 key.objectid + bytes >
3306 logical + map->stripe_len)) {
efe120a0
FH
3307 btrfs_err(fs_info,
3308 "scrub: tree block %llu spanning "
3309 "stripes, ignored. logical=%llu",
c1c9ff7c 3310 key.objectid, logical);
9799d2c3
ZL
3311 spin_lock(&sctx->stat_lock);
3312 sctx->stat.uncorrectable_errors++;
3313 spin_unlock(&sctx->stat_lock);
a2de733c
AJ
3314 goto next;
3315 }
3316
625f1c8d
LB
3317again:
3318 extent_logical = key.objectid;
3319 extent_len = bytes;
3320
a2de733c
AJ
3321 /*
3322 * trim extent to this stripe
3323 */
625f1c8d
LB
3324 if (extent_logical < logical) {
3325 extent_len -= logical - extent_logical;
3326 extent_logical = logical;
a2de733c 3327 }
625f1c8d 3328 if (extent_logical + extent_len >
a2de733c 3329 logical + map->stripe_len) {
625f1c8d
LB
3330 extent_len = logical + map->stripe_len -
3331 extent_logical;
a2de733c
AJ
3332 }
3333
625f1c8d 3334 extent_physical = extent_logical - logical + physical;
ff023aac
SB
3335 extent_dev = scrub_dev;
3336 extent_mirror_num = mirror_num;
3337 if (is_dev_replace)
3338 scrub_remap_extent(fs_info, extent_logical,
3339 extent_len, &extent_physical,
3340 &extent_dev,
3341 &extent_mirror_num);
625f1c8d 3342
fe8cf654
ZL
3343 ret = btrfs_lookup_csums_range(csum_root,
3344 extent_logical,
3345 extent_logical +
3346 extent_len - 1,
3347 &sctx->csum_list, 1);
625f1c8d
LB
3348 if (ret)
3349 goto out;
3350
ff023aac
SB
3351 ret = scrub_extent(sctx, extent_logical, extent_len,
3352 extent_physical, extent_dev, flags,
3353 generation, extent_mirror_num,
115930cb 3354 extent_logical - logical + physical);
6fa96d72
ZL
3355
3356 scrub_free_csums(sctx);
3357
a2de733c
AJ
3358 if (ret)
3359 goto out;
3360
625f1c8d
LB
3361 if (extent_logical + extent_len <
3362 key.objectid + bytes) {
ffe2d203 3363 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3b080b25
WS
3364 /*
3365 * loop until we find next data stripe
3366 * or we have finished all stripes.
3367 */
5a6ac9ea
MX
3368loop:
3369 physical += map->stripe_len;
3370 ret = get_raid56_logic_offset(physical,
3371 num, map, &logical,
3372 &stripe_logical);
3373 logical += base;
3374
3375 if (ret && physical < physical_end) {
3376 stripe_logical += base;
3377 stripe_end = stripe_logical +
a0dd59de 3378 increment;
5a6ac9ea
MX
3379 ret = scrub_raid56_parity(sctx,
3380 map, scrub_dev, ppath,
3381 stripe_logical,
3382 stripe_end);
3383 if (ret)
3384 goto out;
3385 goto loop;
3386 }
3b080b25
WS
3387 } else {
3388 physical += map->stripe_len;
3389 logical += increment;
3390 }
625f1c8d
LB
3391 if (logical < key.objectid + bytes) {
3392 cond_resched();
3393 goto again;
3394 }
3395
3b080b25 3396 if (physical >= physical_end) {
625f1c8d
LB
3397 stop_loop = 1;
3398 break;
3399 }
3400 }
a2de733c
AJ
3401next:
3402 path->slots[0]++;
3403 }
71267333 3404 btrfs_release_path(path);
3b080b25 3405skip:
a2de733c
AJ
3406 logical += increment;
3407 physical += map->stripe_len;
d9d181c1 3408 spin_lock(&sctx->stat_lock);
625f1c8d
LB
3409 if (stop_loop)
3410 sctx->stat.last_physical = map->stripes[num].physical +
3411 length;
3412 else
3413 sctx->stat.last_physical = physical;
d9d181c1 3414 spin_unlock(&sctx->stat_lock);
625f1c8d
LB
3415 if (stop_loop)
3416 break;
a2de733c 3417 }
ff023aac 3418out:
a2de733c 3419 /* push queued extents */
d9d181c1 3420 scrub_submit(sctx);
ff023aac
SB
3421 mutex_lock(&sctx->wr_ctx.wr_lock);
3422 scrub_wr_submit(sctx);
3423 mutex_unlock(&sctx->wr_ctx.wr_lock);
a2de733c 3424
e7786c3a 3425 blk_finish_plug(&plug);
a2de733c 3426 btrfs_free_path(path);
5a6ac9ea 3427 btrfs_free_path(ppath);
a2de733c
AJ
3428 return ret < 0 ? ret : 0;
3429}
3430
d9d181c1 3431static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
a36cf8b8 3432 struct btrfs_device *scrub_dev,
a36cf8b8 3433 u64 chunk_offset, u64 length,
020d5b73
FM
3434 u64 dev_offset,
3435 struct btrfs_block_group_cache *cache,
3436 int is_dev_replace)
a2de733c
AJ
3437{
3438 struct btrfs_mapping_tree *map_tree =
a36cf8b8 3439 &sctx->dev_root->fs_info->mapping_tree;
a2de733c
AJ
3440 struct map_lookup *map;
3441 struct extent_map *em;
3442 int i;
ff023aac 3443 int ret = 0;
a2de733c
AJ
3444
3445 read_lock(&map_tree->map_tree.lock);
3446 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3447 read_unlock(&map_tree->map_tree.lock);
3448
020d5b73
FM
3449 if (!em) {
3450 /*
3451 * Might have been an unused block group deleted by the cleaner
3452 * kthread or relocation.
3453 */
3454 spin_lock(&cache->lock);
3455 if (!cache->removed)
3456 ret = -EINVAL;
3457 spin_unlock(&cache->lock);
3458
3459 return ret;
3460 }
a2de733c 3461
95617d69 3462 map = em->map_lookup;
a2de733c
AJ
3463 if (em->start != chunk_offset)
3464 goto out;
3465
3466 if (em->len < length)
3467 goto out;
3468
3469 for (i = 0; i < map->num_stripes; ++i) {
a36cf8b8 3470 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
859acaf1 3471 map->stripes[i].physical == dev_offset) {
a36cf8b8 3472 ret = scrub_stripe(sctx, map, scrub_dev, i,
ff023aac
SB
3473 chunk_offset, length,
3474 is_dev_replace);
a2de733c
AJ
3475 if (ret)
3476 goto out;
3477 }
3478 }
3479out:
3480 free_extent_map(em);
3481
3482 return ret;
3483}
3484
3485static noinline_for_stack
a36cf8b8 3486int scrub_enumerate_chunks(struct scrub_ctx *sctx,
ff023aac
SB
3487 struct btrfs_device *scrub_dev, u64 start, u64 end,
3488 int is_dev_replace)
a2de733c
AJ
3489{
3490 struct btrfs_dev_extent *dev_extent = NULL;
3491 struct btrfs_path *path;
a36cf8b8 3492 struct btrfs_root *root = sctx->dev_root;
a2de733c
AJ
3493 struct btrfs_fs_info *fs_info = root->fs_info;
3494 u64 length;
a2de733c 3495 u64 chunk_offset;
55e3a601 3496 int ret = 0;
76a8efa1 3497 int ro_set;
a2de733c
AJ
3498 int slot;
3499 struct extent_buffer *l;
3500 struct btrfs_key key;
3501 struct btrfs_key found_key;
3502 struct btrfs_block_group_cache *cache;
ff023aac 3503 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
a2de733c
AJ
3504
3505 path = btrfs_alloc_path();
3506 if (!path)
3507 return -ENOMEM;
3508
e4058b54 3509 path->reada = READA_FORWARD;
a2de733c
AJ
3510 path->search_commit_root = 1;
3511 path->skip_locking = 1;
3512
a36cf8b8 3513 key.objectid = scrub_dev->devid;
a2de733c
AJ
3514 key.offset = 0ull;
3515 key.type = BTRFS_DEV_EXTENT_KEY;
3516
a2de733c
AJ
3517 while (1) {
3518 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3519 if (ret < 0)
8c51032f
AJ
3520 break;
3521 if (ret > 0) {
3522 if (path->slots[0] >=
3523 btrfs_header_nritems(path->nodes[0])) {
3524 ret = btrfs_next_leaf(root, path);
55e3a601
Z
3525 if (ret < 0)
3526 break;
3527 if (ret > 0) {
3528 ret = 0;
8c51032f 3529 break;
55e3a601
Z
3530 }
3531 } else {
3532 ret = 0;
8c51032f
AJ
3533 }
3534 }
a2de733c
AJ
3535
3536 l = path->nodes[0];
3537 slot = path->slots[0];
3538
3539 btrfs_item_key_to_cpu(l, &found_key, slot);
3540
a36cf8b8 3541 if (found_key.objectid != scrub_dev->devid)
a2de733c
AJ
3542 break;
3543
962a298f 3544 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
a2de733c
AJ
3545 break;
3546
3547 if (found_key.offset >= end)
3548 break;
3549
3550 if (found_key.offset < key.offset)
3551 break;
3552
3553 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3554 length = btrfs_dev_extent_length(l, dev_extent);
3555
ced96edc
QW
3556 if (found_key.offset + length <= start)
3557 goto skip;
a2de733c 3558
a2de733c
AJ
3559 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3560
3561 /*
3562 * get a reference on the corresponding block group to prevent
3563 * the chunk from going away while we scrub it
3564 */
3565 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ced96edc
QW
3566
3567 /* some chunks are removed but not committed to disk yet,
3568 * continue scrubbing */
3569 if (!cache)
3570 goto skip;
3571
55e3a601
Z
3572 /*
3573 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3574 * to avoid deadlock caused by:
3575 * btrfs_inc_block_group_ro()
3576 * -> btrfs_wait_for_commit()
3577 * -> btrfs_commit_transaction()
3578 * -> btrfs_scrub_pause()
3579 */
3580 scrub_pause_on(fs_info);
3581 ret = btrfs_inc_block_group_ro(root, cache);
3582 scrub_pause_off(fs_info);
76a8efa1
Z
3583
3584 if (ret == 0) {
3585 ro_set = 1;
3586 } else if (ret == -ENOSPC) {
3587 /*
3588 * btrfs_inc_block_group_ro return -ENOSPC when it
3589 * failed in creating new chunk for metadata.
3590 * It is not a problem for scrub/replace, because
3591 * metadata are always cowed, and our scrub paused
3592 * commit_transactions.
3593 */
3594 ro_set = 0;
3595 } else {
3596 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3597 ret);
55e3a601
Z
3598 btrfs_put_block_group(cache);
3599 break;
3600 }
3601
ff023aac
SB
3602 dev_replace->cursor_right = found_key.offset + length;
3603 dev_replace->cursor_left = found_key.offset;
3604 dev_replace->item_needs_writeback = 1;
8c204c96 3605 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
020d5b73 3606 found_key.offset, cache, is_dev_replace);
ff023aac
SB
3607
3608 /*
3609 * flush, submit all pending read and write bios, afterwards
3610 * wait for them.
3611 * Note that in the dev replace case, a read request causes
3612 * write requests that are submitted in the read completion
3613 * worker. Therefore in the current situation, it is required
3614 * that all write requests are flushed, so that all read and
3615 * write requests are really completed when bios_in_flight
3616 * changes to 0.
3617 */
3618 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3619 scrub_submit(sctx);
3620 mutex_lock(&sctx->wr_ctx.wr_lock);
3621 scrub_wr_submit(sctx);
3622 mutex_unlock(&sctx->wr_ctx.wr_lock);
3623
3624 wait_event(sctx->list_wait,
3625 atomic_read(&sctx->bios_in_flight) == 0);
b708ce96
Z
3626
3627 scrub_pause_on(fs_info);
12cf9372
WS
3628
3629 /*
3630 * must be called before we decrease @scrub_paused.
3631 * make sure we don't block transaction commit while
3632 * we are waiting pending workers finished.
3633 */
ff023aac
SB
3634 wait_event(sctx->list_wait,
3635 atomic_read(&sctx->workers_pending) == 0);
12cf9372
WS
3636 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3637
b708ce96 3638 scrub_pause_off(fs_info);
ff023aac 3639
76a8efa1
Z
3640 if (ro_set)
3641 btrfs_dec_block_group_ro(root, cache);
ff023aac 3642
758f2dfc
FM
3643 /*
3644 * We might have prevented the cleaner kthread from deleting
3645 * this block group if it was already unused because we raced
3646 * and set it to RO mode first. So add it back to the unused
3647 * list, otherwise it might not ever be deleted unless a manual
3648 * balance is triggered or it becomes used and unused again.
3649 */
3650 spin_lock(&cache->lock);
3651 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3652 btrfs_block_group_used(&cache->item) == 0) {
3653 spin_unlock(&cache->lock);
3654 spin_lock(&fs_info->unused_bgs_lock);
3655 if (list_empty(&cache->bg_list)) {
3656 btrfs_get_block_group(cache);
3657 list_add_tail(&cache->bg_list,
3658 &fs_info->unused_bgs);
3659 }
3660 spin_unlock(&fs_info->unused_bgs_lock);
3661 } else {
3662 spin_unlock(&cache->lock);
3663 }
3664
a2de733c
AJ
3665 btrfs_put_block_group(cache);
3666 if (ret)
3667 break;
af1be4f8
SB
3668 if (is_dev_replace &&
3669 atomic64_read(&dev_replace->num_write_errors) > 0) {
ff023aac
SB
3670 ret = -EIO;
3671 break;
3672 }
3673 if (sctx->stat.malloc_errors > 0) {
3674 ret = -ENOMEM;
3675 break;
3676 }
a2de733c 3677
539f358a
ID
3678 dev_replace->cursor_left = dev_replace->cursor_right;
3679 dev_replace->item_needs_writeback = 1;
ced96edc 3680skip:
a2de733c 3681 key.offset = found_key.offset + length;
71267333 3682 btrfs_release_path(path);
a2de733c
AJ
3683 }
3684
a2de733c 3685 btrfs_free_path(path);
8c51032f 3686
55e3a601 3687 return ret;
a2de733c
AJ
3688}
3689
a36cf8b8
SB
3690static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3691 struct btrfs_device *scrub_dev)
a2de733c
AJ
3692{
3693 int i;
3694 u64 bytenr;
3695 u64 gen;
3696 int ret;
a36cf8b8 3697 struct btrfs_root *root = sctx->dev_root;
a2de733c 3698
87533c47 3699 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
79787eaa
JM
3700 return -EIO;
3701
5f546063
MX
3702 /* Seed devices of a new filesystem has their own generation. */
3703 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3704 gen = scrub_dev->generation;
3705 else
3706 gen = root->fs_info->last_trans_committed;
a2de733c
AJ
3707
3708 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3709 bytenr = btrfs_sb_offset(i);
935e5cc9
MX
3710 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3711 scrub_dev->commit_total_bytes)
a2de733c
AJ
3712 break;
3713
d9d181c1 3714 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
a36cf8b8 3715 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
ff023aac 3716 NULL, 1, bytenr);
a2de733c
AJ
3717 if (ret)
3718 return ret;
3719 }
b6bfebc1 3720 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
3721
3722 return 0;
3723}
3724
3725/*
3726 * get a reference count on fs_info->scrub_workers. start worker if necessary
3727 */
ff023aac
SB
3728static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3729 int is_dev_replace)
a2de733c 3730{
6f011058 3731 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
0339ef2f 3732 int max_active = fs_info->thread_pool_size;
a2de733c 3733
632dd772 3734 if (fs_info->scrub_workers_refcnt == 0) {
ff023aac 3735 if (is_dev_replace)
0339ef2f 3736 fs_info->scrub_workers =
0de270fa 3737 btrfs_alloc_workqueue("scrub", flags,
0339ef2f 3738 1, 4);
ff023aac 3739 else
0339ef2f 3740 fs_info->scrub_workers =
0de270fa 3741 btrfs_alloc_workqueue("scrub", flags,
0339ef2f 3742 max_active, 4);
e82afc52
ZL
3743 if (!fs_info->scrub_workers)
3744 goto fail_scrub_workers;
3745
0339ef2f 3746 fs_info->scrub_wr_completion_workers =
0de270fa 3747 btrfs_alloc_workqueue("scrubwrc", flags,
0339ef2f 3748 max_active, 2);
e82afc52
ZL
3749 if (!fs_info->scrub_wr_completion_workers)
3750 goto fail_scrub_wr_completion_workers;
3751
0339ef2f 3752 fs_info->scrub_nocow_workers =
0de270fa 3753 btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
e82afc52
ZL
3754 if (!fs_info->scrub_nocow_workers)
3755 goto fail_scrub_nocow_workers;
20b2e302 3756 fs_info->scrub_parity_workers =
0de270fa 3757 btrfs_alloc_workqueue("scrubparity", flags,
20b2e302 3758 max_active, 2);
e82afc52
ZL
3759 if (!fs_info->scrub_parity_workers)
3760 goto fail_scrub_parity_workers;
632dd772 3761 }
a2de733c 3762 ++fs_info->scrub_workers_refcnt;
e82afc52
ZL
3763 return 0;
3764
3765fail_scrub_parity_workers:
3766 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3767fail_scrub_nocow_workers:
3768 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3769fail_scrub_wr_completion_workers:
3770 btrfs_destroy_workqueue(fs_info->scrub_workers);
3771fail_scrub_workers:
3772 return -ENOMEM;
a2de733c
AJ
3773}
3774
aa1b8cd4 3775static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
a2de733c 3776{
ff023aac 3777 if (--fs_info->scrub_workers_refcnt == 0) {
0339ef2f
QW
3778 btrfs_destroy_workqueue(fs_info->scrub_workers);
3779 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3780 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
20b2e302 3781 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
ff023aac 3782 }
a2de733c 3783 WARN_ON(fs_info->scrub_workers_refcnt < 0);
a2de733c
AJ
3784}
3785
aa1b8cd4
SB
3786int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3787 u64 end, struct btrfs_scrub_progress *progress,
63a212ab 3788 int readonly, int is_dev_replace)
a2de733c 3789{
d9d181c1 3790 struct scrub_ctx *sctx;
a2de733c
AJ
3791 int ret;
3792 struct btrfs_device *dev;
5d68da3b 3793 struct rcu_string *name;
a2de733c 3794
aa1b8cd4 3795 if (btrfs_fs_closing(fs_info))
a2de733c
AJ
3796 return -EINVAL;
3797
aa1b8cd4 3798 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
b5d67f64
SB
3799 /*
3800 * in this case scrub is unable to calculate the checksum
3801 * the way scrub is implemented. Do not handle this
3802 * situation at all because it won't ever happen.
3803 */
efe120a0
FH
3804 btrfs_err(fs_info,
3805 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
aa1b8cd4 3806 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
b5d67f64
SB
3807 return -EINVAL;
3808 }
3809
aa1b8cd4 3810 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
b5d67f64 3811 /* not supported for data w/o checksums */
efe120a0
FH
3812 btrfs_err(fs_info,
3813 "scrub: size assumption sectorsize != PAGE_SIZE "
3814 "(%d != %lu) fails",
27f9f023 3815 fs_info->chunk_root->sectorsize, PAGE_SIZE);
a2de733c
AJ
3816 return -EINVAL;
3817 }
3818
7a9e9987
SB
3819 if (fs_info->chunk_root->nodesize >
3820 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3821 fs_info->chunk_root->sectorsize >
3822 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3823 /*
3824 * would exhaust the array bounds of pagev member in
3825 * struct scrub_block
3826 */
efe120a0
FH
3827 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3828 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
7a9e9987
SB
3829 fs_info->chunk_root->nodesize,
3830 SCRUB_MAX_PAGES_PER_BLOCK,
3831 fs_info->chunk_root->sectorsize,
3832 SCRUB_MAX_PAGES_PER_BLOCK);
3833 return -EINVAL;
3834 }
3835
a2de733c 3836
aa1b8cd4
SB
3837 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3838 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
63a212ab 3839 if (!dev || (dev->missing && !is_dev_replace)) {
aa1b8cd4 3840 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c
AJ
3841 return -ENODEV;
3842 }
a2de733c 3843
5d68da3b
MX
3844 if (!is_dev_replace && !readonly && !dev->writeable) {
3845 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3846 rcu_read_lock();
3847 name = rcu_dereference(dev->name);
3848 btrfs_err(fs_info, "scrub: device %s is not writable",
3849 name->str);
3850 rcu_read_unlock();
3851 return -EROFS;
3852 }
3853
3b7a016f 3854 mutex_lock(&fs_info->scrub_lock);
63a212ab 3855 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
a2de733c 3856 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 3857 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
aa1b8cd4 3858 return -EIO;
a2de733c
AJ
3859 }
3860
73beece9 3861 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
8dabb742
SB
3862 if (dev->scrub_device ||
3863 (!is_dev_replace &&
3864 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
73beece9 3865 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
a2de733c 3866 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 3867 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c
AJ
3868 return -EINPROGRESS;
3869 }
73beece9 3870 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3b7a016f
WS
3871
3872 ret = scrub_workers_get(fs_info, is_dev_replace);
3873 if (ret) {
3874 mutex_unlock(&fs_info->scrub_lock);
3875 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3876 return ret;
3877 }
3878
63a212ab 3879 sctx = scrub_setup_ctx(dev, is_dev_replace);
d9d181c1 3880 if (IS_ERR(sctx)) {
a2de733c 3881 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4
SB
3882 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3883 scrub_workers_put(fs_info);
d9d181c1 3884 return PTR_ERR(sctx);
a2de733c 3885 }
d9d181c1
SB
3886 sctx->readonly = readonly;
3887 dev->scrub_device = sctx;
3cb0929a 3888 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 3889
3cb0929a
WS
3890 /*
3891 * checking @scrub_pause_req here, we can avoid
3892 * race between committing transaction and scrubbing.
3893 */
cb7ab021 3894 __scrub_blocked_if_needed(fs_info);
a2de733c
AJ
3895 atomic_inc(&fs_info->scrubs_running);
3896 mutex_unlock(&fs_info->scrub_lock);
a2de733c 3897
ff023aac 3898 if (!is_dev_replace) {
9b011adf
WS
3899 /*
3900 * by holding device list mutex, we can
3901 * kick off writing super in log tree sync.
3902 */
3cb0929a 3903 mutex_lock(&fs_info->fs_devices->device_list_mutex);
ff023aac 3904 ret = scrub_supers(sctx, dev);
3cb0929a 3905 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ff023aac 3906 }
a2de733c
AJ
3907
3908 if (!ret)
ff023aac
SB
3909 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3910 is_dev_replace);
a2de733c 3911
b6bfebc1 3912 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
3913 atomic_dec(&fs_info->scrubs_running);
3914 wake_up(&fs_info->scrub_pause_wait);
3915
b6bfebc1 3916 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
0ef8e451 3917
a2de733c 3918 if (progress)
d9d181c1 3919 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c
AJ
3920
3921 mutex_lock(&fs_info->scrub_lock);
3922 dev->scrub_device = NULL;
3b7a016f 3923 scrub_workers_put(fs_info);
a2de733c
AJ
3924 mutex_unlock(&fs_info->scrub_lock);
3925
f55985f4 3926 scrub_put_ctx(sctx);
a2de733c
AJ
3927
3928 return ret;
3929}
3930
143bede5 3931void btrfs_scrub_pause(struct btrfs_root *root)
a2de733c
AJ
3932{
3933 struct btrfs_fs_info *fs_info = root->fs_info;
3934
3935 mutex_lock(&fs_info->scrub_lock);
3936 atomic_inc(&fs_info->scrub_pause_req);
3937 while (atomic_read(&fs_info->scrubs_paused) !=
3938 atomic_read(&fs_info->scrubs_running)) {
3939 mutex_unlock(&fs_info->scrub_lock);
3940 wait_event(fs_info->scrub_pause_wait,
3941 atomic_read(&fs_info->scrubs_paused) ==
3942 atomic_read(&fs_info->scrubs_running));
3943 mutex_lock(&fs_info->scrub_lock);
3944 }
3945 mutex_unlock(&fs_info->scrub_lock);
a2de733c
AJ
3946}
3947
143bede5 3948void btrfs_scrub_continue(struct btrfs_root *root)
a2de733c
AJ
3949{
3950 struct btrfs_fs_info *fs_info = root->fs_info;
3951
3952 atomic_dec(&fs_info->scrub_pause_req);
3953 wake_up(&fs_info->scrub_pause_wait);
a2de733c
AJ
3954}
3955
aa1b8cd4 3956int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
a2de733c 3957{
a2de733c
AJ
3958 mutex_lock(&fs_info->scrub_lock);
3959 if (!atomic_read(&fs_info->scrubs_running)) {
3960 mutex_unlock(&fs_info->scrub_lock);
3961 return -ENOTCONN;
3962 }
3963
3964 atomic_inc(&fs_info->scrub_cancel_req);
3965 while (atomic_read(&fs_info->scrubs_running)) {
3966 mutex_unlock(&fs_info->scrub_lock);
3967 wait_event(fs_info->scrub_pause_wait,
3968 atomic_read(&fs_info->scrubs_running) == 0);
3969 mutex_lock(&fs_info->scrub_lock);
3970 }
3971 atomic_dec(&fs_info->scrub_cancel_req);
3972 mutex_unlock(&fs_info->scrub_lock);
3973
3974 return 0;
3975}
3976
aa1b8cd4
SB
3977int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3978 struct btrfs_device *dev)
49b25e05 3979{
d9d181c1 3980 struct scrub_ctx *sctx;
a2de733c
AJ
3981
3982 mutex_lock(&fs_info->scrub_lock);
d9d181c1
SB
3983 sctx = dev->scrub_device;
3984 if (!sctx) {
a2de733c
AJ
3985 mutex_unlock(&fs_info->scrub_lock);
3986 return -ENOTCONN;
3987 }
d9d181c1 3988 atomic_inc(&sctx->cancel_req);
a2de733c
AJ
3989 while (dev->scrub_device) {
3990 mutex_unlock(&fs_info->scrub_lock);
3991 wait_event(fs_info->scrub_pause_wait,
3992 dev->scrub_device == NULL);
3993 mutex_lock(&fs_info->scrub_lock);
3994 }
3995 mutex_unlock(&fs_info->scrub_lock);
3996
3997 return 0;
3998}
1623edeb 3999
a2de733c
AJ
4000int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4001 struct btrfs_scrub_progress *progress)
4002{
4003 struct btrfs_device *dev;
d9d181c1 4004 struct scrub_ctx *sctx = NULL;
a2de733c
AJ
4005
4006 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
aa1b8cd4 4007 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
a2de733c 4008 if (dev)
d9d181c1
SB
4009 sctx = dev->scrub_device;
4010 if (sctx)
4011 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c
AJ
4012 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4013
d9d181c1 4014 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
a2de733c 4015}
ff023aac
SB
4016
4017static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4018 u64 extent_logical, u64 extent_len,
4019 u64 *extent_physical,
4020 struct btrfs_device **extent_dev,
4021 int *extent_mirror_num)
4022{
4023 u64 mapped_length;
4024 struct btrfs_bio *bbio = NULL;
4025 int ret;
4026
4027 mapped_length = extent_len;
4028 ret = btrfs_map_block(fs_info, READ, extent_logical,
4029 &mapped_length, &bbio, 0);
4030 if (ret || !bbio || mapped_length < extent_len ||
4031 !bbio->stripes[0].dev->bdev) {
6e9606d2 4032 btrfs_put_bbio(bbio);
ff023aac
SB
4033 return;
4034 }
4035
4036 *extent_physical = bbio->stripes[0].physical;
4037 *extent_mirror_num = bbio->mirror_num;
4038 *extent_dev = bbio->stripes[0].dev;
6e9606d2 4039 btrfs_put_bbio(bbio);
ff023aac
SB
4040}
4041
4042static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4043 struct scrub_wr_ctx *wr_ctx,
4044 struct btrfs_fs_info *fs_info,
4045 struct btrfs_device *dev,
4046 int is_dev_replace)
4047{
4048 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4049
4050 mutex_init(&wr_ctx->wr_lock);
4051 wr_ctx->wr_curr_bio = NULL;
4052 if (!is_dev_replace)
4053 return 0;
4054
4055 WARN_ON(!dev->bdev);
b54ffb73 4056 wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
ff023aac
SB
4057 wr_ctx->tgtdev = dev;
4058 atomic_set(&wr_ctx->flush_all_writes, 0);
4059 return 0;
4060}
4061
4062static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4063{
4064 mutex_lock(&wr_ctx->wr_lock);
4065 kfree(wr_ctx->wr_curr_bio);
4066 wr_ctx->wr_curr_bio = NULL;
4067 mutex_unlock(&wr_ctx->wr_lock);
4068}
4069
4070static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4071 int mirror_num, u64 physical_for_dev_replace)
4072{
4073 struct scrub_copy_nocow_ctx *nocow_ctx;
4074 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4075
4076 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4077 if (!nocow_ctx) {
4078 spin_lock(&sctx->stat_lock);
4079 sctx->stat.malloc_errors++;
4080 spin_unlock(&sctx->stat_lock);
4081 return -ENOMEM;
4082 }
4083
4084 scrub_pending_trans_workers_inc(sctx);
4085
4086 nocow_ctx->sctx = sctx;
4087 nocow_ctx->logical = logical;
4088 nocow_ctx->len = len;
4089 nocow_ctx->mirror_num = mirror_num;
4090 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
9e0af237
LB
4091 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4092 copy_nocow_pages_worker, NULL, NULL);
652f25a2 4093 INIT_LIST_HEAD(&nocow_ctx->inodes);
0339ef2f
QW
4094 btrfs_queue_work(fs_info->scrub_nocow_workers,
4095 &nocow_ctx->work);
ff023aac
SB
4096
4097 return 0;
4098}
4099
652f25a2
JB
4100static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4101{
4102 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4103 struct scrub_nocow_inode *nocow_inode;
4104
4105 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4106 if (!nocow_inode)
4107 return -ENOMEM;
4108 nocow_inode->inum = inum;
4109 nocow_inode->offset = offset;
4110 nocow_inode->root = root;
4111 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4112 return 0;
4113}
4114
4115#define COPY_COMPLETE 1
4116
ff023aac
SB
4117static void copy_nocow_pages_worker(struct btrfs_work *work)
4118{
4119 struct scrub_copy_nocow_ctx *nocow_ctx =
4120 container_of(work, struct scrub_copy_nocow_ctx, work);
4121 struct scrub_ctx *sctx = nocow_ctx->sctx;
4122 u64 logical = nocow_ctx->logical;
4123 u64 len = nocow_ctx->len;
4124 int mirror_num = nocow_ctx->mirror_num;
4125 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4126 int ret;
4127 struct btrfs_trans_handle *trans = NULL;
4128 struct btrfs_fs_info *fs_info;
4129 struct btrfs_path *path;
4130 struct btrfs_root *root;
4131 int not_written = 0;
4132
4133 fs_info = sctx->dev_root->fs_info;
4134 root = fs_info->extent_root;
4135
4136 path = btrfs_alloc_path();
4137 if (!path) {
4138 spin_lock(&sctx->stat_lock);
4139 sctx->stat.malloc_errors++;
4140 spin_unlock(&sctx->stat_lock);
4141 not_written = 1;
4142 goto out;
4143 }
4144
4145 trans = btrfs_join_transaction(root);
4146 if (IS_ERR(trans)) {
4147 not_written = 1;
4148 goto out;
4149 }
4150
4151 ret = iterate_inodes_from_logical(logical, fs_info, path,
652f25a2 4152 record_inode_for_nocow, nocow_ctx);
ff023aac 4153 if (ret != 0 && ret != -ENOENT) {
efe120a0
FH
4154 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4155 "phys %llu, len %llu, mir %u, ret %d",
118a0a25
GU
4156 logical, physical_for_dev_replace, len, mirror_num,
4157 ret);
ff023aac
SB
4158 not_written = 1;
4159 goto out;
4160 }
4161
652f25a2
JB
4162 btrfs_end_transaction(trans, root);
4163 trans = NULL;
4164 while (!list_empty(&nocow_ctx->inodes)) {
4165 struct scrub_nocow_inode *entry;
4166 entry = list_first_entry(&nocow_ctx->inodes,
4167 struct scrub_nocow_inode,
4168 list);
4169 list_del_init(&entry->list);
4170 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4171 entry->root, nocow_ctx);
4172 kfree(entry);
4173 if (ret == COPY_COMPLETE) {
4174 ret = 0;
4175 break;
4176 } else if (ret) {
4177 break;
4178 }
4179 }
ff023aac 4180out:
652f25a2
JB
4181 while (!list_empty(&nocow_ctx->inodes)) {
4182 struct scrub_nocow_inode *entry;
4183 entry = list_first_entry(&nocow_ctx->inodes,
4184 struct scrub_nocow_inode,
4185 list);
4186 list_del_init(&entry->list);
4187 kfree(entry);
4188 }
ff023aac
SB
4189 if (trans && !IS_ERR(trans))
4190 btrfs_end_transaction(trans, root);
4191 if (not_written)
4192 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4193 num_uncorrectable_read_errors);
4194
4195 btrfs_free_path(path);
4196 kfree(nocow_ctx);
4197
4198 scrub_pending_trans_workers_dec(sctx);
4199}
4200
32159242
GH
4201static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4202 u64 logical)
4203{
4204 struct extent_state *cached_state = NULL;
4205 struct btrfs_ordered_extent *ordered;
4206 struct extent_io_tree *io_tree;
4207 struct extent_map *em;
4208 u64 lockstart = start, lockend = start + len - 1;
4209 int ret = 0;
4210
4211 io_tree = &BTRFS_I(inode)->io_tree;
4212
ff13db41 4213 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
32159242
GH
4214 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4215 if (ordered) {
4216 btrfs_put_ordered_extent(ordered);
4217 ret = 1;
4218 goto out_unlock;
4219 }
4220
4221 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4222 if (IS_ERR(em)) {
4223 ret = PTR_ERR(em);
4224 goto out_unlock;
4225 }
4226
4227 /*
4228 * This extent does not actually cover the logical extent anymore,
4229 * move on to the next inode.
4230 */
4231 if (em->block_start > logical ||
4232 em->block_start + em->block_len < logical + len) {
4233 free_extent_map(em);
4234 ret = 1;
4235 goto out_unlock;
4236 }
4237 free_extent_map(em);
4238
4239out_unlock:
4240 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4241 GFP_NOFS);
4242 return ret;
4243}
4244
652f25a2
JB
4245static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4246 struct scrub_copy_nocow_ctx *nocow_ctx)
ff023aac 4247{
826aa0a8 4248 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
ff023aac 4249 struct btrfs_key key;
826aa0a8
MX
4250 struct inode *inode;
4251 struct page *page;
ff023aac 4252 struct btrfs_root *local_root;
652f25a2 4253 struct extent_io_tree *io_tree;
ff023aac 4254 u64 physical_for_dev_replace;
32159242 4255 u64 nocow_ctx_logical;
652f25a2 4256 u64 len = nocow_ctx->len;
826aa0a8 4257 unsigned long index;
6f1c3605 4258 int srcu_index;
652f25a2
JB
4259 int ret = 0;
4260 int err = 0;
ff023aac
SB
4261
4262 key.objectid = root;
4263 key.type = BTRFS_ROOT_ITEM_KEY;
4264 key.offset = (u64)-1;
6f1c3605
LB
4265
4266 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4267
ff023aac 4268 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
6f1c3605
LB
4269 if (IS_ERR(local_root)) {
4270 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
ff023aac 4271 return PTR_ERR(local_root);
6f1c3605 4272 }
ff023aac
SB
4273
4274 key.type = BTRFS_INODE_ITEM_KEY;
4275 key.objectid = inum;
4276 key.offset = 0;
4277 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
6f1c3605 4278 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
ff023aac
SB
4279 if (IS_ERR(inode))
4280 return PTR_ERR(inode);
4281
edd1400b 4282 /* Avoid truncate/dio/punch hole.. */
5955102c 4283 inode_lock(inode);
edd1400b
MX
4284 inode_dio_wait(inode);
4285
ff023aac 4286 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
652f25a2 4287 io_tree = &BTRFS_I(inode)->io_tree;
32159242 4288 nocow_ctx_logical = nocow_ctx->logical;
652f25a2 4289
32159242
GH
4290 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4291 if (ret) {
4292 ret = ret > 0 ? 0 : ret;
4293 goto out;
652f25a2 4294 }
652f25a2 4295
09cbfeaf
KS
4296 while (len >= PAGE_SIZE) {
4297 index = offset >> PAGE_SHIFT;
edd1400b 4298again:
ff023aac
SB
4299 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4300 if (!page) {
efe120a0 4301 btrfs_err(fs_info, "find_or_create_page() failed");
ff023aac 4302 ret = -ENOMEM;
826aa0a8 4303 goto out;
ff023aac
SB
4304 }
4305
4306 if (PageUptodate(page)) {
4307 if (PageDirty(page))
4308 goto next_page;
4309 } else {
4310 ClearPageError(page);
32159242 4311 err = extent_read_full_page(io_tree, page,
652f25a2
JB
4312 btrfs_get_extent,
4313 nocow_ctx->mirror_num);
826aa0a8
MX
4314 if (err) {
4315 ret = err;
ff023aac
SB
4316 goto next_page;
4317 }
edd1400b 4318
26b25891 4319 lock_page(page);
edd1400b
MX
4320 /*
4321 * If the page has been remove from the page cache,
4322 * the data on it is meaningless, because it may be
4323 * old one, the new data may be written into the new
4324 * page in the page cache.
4325 */
4326 if (page->mapping != inode->i_mapping) {
652f25a2 4327 unlock_page(page);
09cbfeaf 4328 put_page(page);
edd1400b
MX
4329 goto again;
4330 }
ff023aac
SB
4331 if (!PageUptodate(page)) {
4332 ret = -EIO;
4333 goto next_page;
4334 }
4335 }
32159242
GH
4336
4337 ret = check_extent_to_block(inode, offset, len,
4338 nocow_ctx_logical);
4339 if (ret) {
4340 ret = ret > 0 ? 0 : ret;
4341 goto next_page;
4342 }
4343
826aa0a8
MX
4344 err = write_page_nocow(nocow_ctx->sctx,
4345 physical_for_dev_replace, page);
4346 if (err)
4347 ret = err;
ff023aac 4348next_page:
826aa0a8 4349 unlock_page(page);
09cbfeaf 4350 put_page(page);
826aa0a8
MX
4351
4352 if (ret)
4353 break;
4354
09cbfeaf
KS
4355 offset += PAGE_SIZE;
4356 physical_for_dev_replace += PAGE_SIZE;
4357 nocow_ctx_logical += PAGE_SIZE;
4358 len -= PAGE_SIZE;
ff023aac 4359 }
652f25a2 4360 ret = COPY_COMPLETE;
826aa0a8 4361out:
5955102c 4362 inode_unlock(inode);
826aa0a8 4363 iput(inode);
ff023aac
SB
4364 return ret;
4365}
4366
4367static int write_page_nocow(struct scrub_ctx *sctx,
4368 u64 physical_for_dev_replace, struct page *page)
4369{
4370 struct bio *bio;
4371 struct btrfs_device *dev;
4372 int ret;
ff023aac
SB
4373
4374 dev = sctx->wr_ctx.tgtdev;
4375 if (!dev)
4376 return -EIO;
4377 if (!dev->bdev) {
94647322
DS
4378 btrfs_warn_rl(dev->dev_root->fs_info,
4379 "scrub write_page_nocow(bdev == NULL) is unexpected");
ff023aac
SB
4380 return -EIO;
4381 }
9be3395b 4382 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
ff023aac
SB
4383 if (!bio) {
4384 spin_lock(&sctx->stat_lock);
4385 sctx->stat.malloc_errors++;
4386 spin_unlock(&sctx->stat_lock);
4387 return -ENOMEM;
4388 }
4f024f37
KO
4389 bio->bi_iter.bi_size = 0;
4390 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
ff023aac 4391 bio->bi_bdev = dev->bdev;
09cbfeaf
KS
4392 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4393 if (ret != PAGE_SIZE) {
ff023aac
SB
4394leave_with_eio:
4395 bio_put(bio);
4396 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4397 return -EIO;
4398 }
ff023aac 4399
33879d45 4400 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
ff023aac
SB
4401 goto leave_with_eio;
4402
4403 bio_put(bio);
4404 return 0;
4405}