]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/dsl_scrub.c
Move the world out of /zfs/ and seperate out module build tree
[mirror_zfs-debian.git] / module / zfs / dsl_scrub.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
43
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
45
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48
49 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
52
53 extern int zfs_txg_timeout;
54
55 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
56 NULL,
57 dsl_pool_scrub_clean_cb
58 };
59
60 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
61 { \
62 (zb)->zb_objset = objset; \
63 (zb)->zb_object = object; \
64 (zb)->zb_level = level; \
65 (zb)->zb_blkid = blkid; \
66 }
67
68 /* ARGSUSED */
69 static void
70 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
71 {
72 dsl_pool_t *dp = arg1;
73 enum scrub_func *funcp = arg2;
74 dmu_object_type_t ot = 0;
75 boolean_t complete = B_FALSE;
76
77 dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
78
79 ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
80 ASSERT(*funcp > SCRUB_FUNC_NONE);
81 ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
82
83 dp->dp_scrub_min_txg = 0;
84 dp->dp_scrub_max_txg = tx->tx_txg;
85
86 if (*funcp == SCRUB_FUNC_CLEAN) {
87 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
88
89 /* rewrite all disk labels */
90 vdev_config_dirty(rvd);
91
92 if (vdev_resilver_needed(rvd,
93 &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
94 spa_event_notify(dp->dp_spa, NULL,
95 ESC_ZFS_RESILVER_START);
96 dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
97 tx->tx_txg);
98 }
99
100 /* zero out the scrub stats in all vdev_stat_t's */
101 vdev_scrub_stat_update(rvd,
102 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
103 POOL_SCRUB_EVERYTHING, B_FALSE);
104
105 dp->dp_spa->spa_scrub_started = B_TRUE;
106 }
107
108 /* back to the generic stuff */
109
110 if (dp->dp_blkstats == NULL) {
111 dp->dp_blkstats =
112 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
113 }
114 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
115
116 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
117 ot = DMU_OT_ZAP_OTHER;
118
119 dp->dp_scrub_func = *funcp;
120 dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
121 ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
122 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
123 dp->dp_scrub_restart = B_FALSE;
124 dp->dp_spa->spa_scrub_errors = 0;
125
126 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
127 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
128 &dp->dp_scrub_func, tx));
129 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
130 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
131 &dp->dp_scrub_queue_obj, tx));
132 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
133 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
134 &dp->dp_scrub_min_txg, tx));
135 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
137 &dp->dp_scrub_max_txg, tx));
138 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
139 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
140 &dp->dp_scrub_bookmark, tx));
141 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
142 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
143 &dp->dp_spa->spa_scrub_errors, tx));
144
145 spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
146 "func=%u mintxg=%llu maxtxg=%llu",
147 *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
148 }
149
150 int
151 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
152 {
153 return (dsl_sync_task_do(dp, NULL,
154 dsl_pool_scrub_setup_sync, dp, &func, 0));
155 }
156
157 /* ARGSUSED */
158 static void
159 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
160 {
161 dsl_pool_t *dp = arg1;
162 boolean_t *completep = arg2;
163
164 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
165 return;
166
167 mutex_enter(&dp->dp_scrub_cancel_lock);
168
169 if (dp->dp_scrub_restart) {
170 dp->dp_scrub_restart = B_FALSE;
171 *completep = B_FALSE;
172 }
173
174 /* XXX this is scrub-clean specific */
175 mutex_enter(&dp->dp_spa->spa_scrub_lock);
176 while (dp->dp_spa->spa_scrub_inflight > 0) {
177 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
178 &dp->dp_spa->spa_scrub_lock);
179 }
180 mutex_exit(&dp->dp_spa->spa_scrub_lock);
181 dp->dp_spa->spa_scrub_started = B_FALSE;
182 dp->dp_spa->spa_scrub_active = B_FALSE;
183
184 dp->dp_scrub_func = SCRUB_FUNC_NONE;
185 VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
186 dp->dp_scrub_queue_obj, tx));
187 dp->dp_scrub_queue_obj = 0;
188 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
189
190 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
191 DMU_POOL_SCRUB_QUEUE, tx));
192 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
193 DMU_POOL_SCRUB_MIN_TXG, tx));
194 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
195 DMU_POOL_SCRUB_MAX_TXG, tx));
196 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
197 DMU_POOL_SCRUB_BOOKMARK, tx));
198 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
199 DMU_POOL_SCRUB_FUNC, tx));
200 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
201 DMU_POOL_SCRUB_ERRORS, tx));
202
203 spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
204 "complete=%u", *completep);
205
206 /* below is scrub-clean specific */
207 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
208 *completep);
209 /*
210 * If the scrub/resilver completed, update all DTLs to reflect this.
211 * Whether it succeeded or not, vacate all temporary scrub DTLs.
212 */
213 vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
214 *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
215 if (dp->dp_scrub_min_txg && *completep)
216 spa_event_notify(dp->dp_spa, NULL, ESC_ZFS_RESILVER_FINISH);
217 spa_errlog_rotate(dp->dp_spa);
218
219 /*
220 * We may have finished replacing a device.
221 * Let the async thread assess this and handle the detach.
222 */
223 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
224
225 dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
226 mutex_exit(&dp->dp_scrub_cancel_lock);
227 }
228
229 int
230 dsl_pool_scrub_cancel(dsl_pool_t *dp)
231 {
232 boolean_t complete = B_FALSE;
233
234 return (dsl_sync_task_do(dp, NULL,
235 dsl_pool_scrub_cancel_sync, dp, &complete, 3));
236 }
237
238 int
239 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
240 zio_done_func_t *done, void *private, uint32_t arc_flags)
241 {
242 /*
243 * This function will be used by bp-rewrite wad to intercept frees.
244 */
245 return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
246 done, private, arc_flags));
247 }
248
249 static boolean_t
250 bookmark_is_zero(const zbookmark_t *zb)
251 {
252 return (zb->zb_objset == 0 && zb->zb_object == 0 &&
253 zb->zb_level == 0 && zb->zb_blkid == 0);
254 }
255
256 /* dnp is the dnode for zb1->zb_object */
257 static boolean_t
258 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
259 const zbookmark_t *zb2)
260 {
261 uint64_t zb1nextL0, zb2thisobj;
262
263 ASSERT(zb1->zb_objset == zb2->zb_objset);
264 ASSERT(zb1->zb_object != -1ULL);
265 ASSERT(zb2->zb_level == 0);
266
267 /*
268 * A bookmark in the deadlist is considered to be after
269 * everything else.
270 */
271 if (zb2->zb_object == -1ULL)
272 return (B_TRUE);
273
274 /* The objset_phys_t isn't before anything. */
275 if (dnp == NULL)
276 return (B_FALSE);
277
278 zb1nextL0 = (zb1->zb_blkid + 1) <<
279 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
280
281 zb2thisobj = zb2->zb_object ? zb2->zb_object :
282 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
283
284 if (zb1->zb_object == 0) {
285 uint64_t nextobj = zb1nextL0 *
286 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
287 return (nextobj <= zb2thisobj);
288 }
289
290 if (zb1->zb_object < zb2thisobj)
291 return (B_TRUE);
292 if (zb1->zb_object > zb2thisobj)
293 return (B_FALSE);
294 if (zb2->zb_object == 0)
295 return (B_FALSE);
296 return (zb1nextL0 <= zb2->zb_blkid);
297 }
298
299 static boolean_t
300 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
301 {
302 int elapsed_ticks;
303 int mintime;
304
305 if (dp->dp_scrub_pausing)
306 return (B_TRUE); /* we're already pausing */
307
308 if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
309 return (B_FALSE); /* we're resuming */
310
311 /* We only know how to resume from level-0 blocks. */
312 if (zb->zb_level != 0)
313 return (B_FALSE);
314
315 mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
316 zfs_scrub_min_time;
317 elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
318 if (elapsed_ticks > hz * zfs_txg_timeout ||
319 (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
320 dprintf("pausing at %llx/%llx/%llx/%llx\n",
321 (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
322 (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
323 dp->dp_scrub_pausing = B_TRUE;
324 dp->dp_scrub_bookmark = *zb;
325 return (B_TRUE);
326 }
327 return (B_FALSE);
328 }
329
330 typedef struct zil_traverse_arg {
331 dsl_pool_t *zta_dp;
332 zil_header_t *zta_zh;
333 } zil_traverse_arg_t;
334
335 /* ARGSUSED */
336 static void
337 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
338 {
339 zil_traverse_arg_t *zta = arg;
340 dsl_pool_t *dp = zta->zta_dp;
341 zil_header_t *zh = zta->zta_zh;
342 zbookmark_t zb;
343
344 if (bp->blk_birth <= dp->dp_scrub_min_txg)
345 return;
346
347 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
348 return;
349
350 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
351 zb.zb_object = 0;
352 zb.zb_level = -1;
353 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
354 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
355 }
356
357 /* ARGSUSED */
358 static void
359 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
360 {
361 if (lrc->lrc_txtype == TX_WRITE) {
362 zil_traverse_arg_t *zta = arg;
363 dsl_pool_t *dp = zta->zta_dp;
364 zil_header_t *zh = zta->zta_zh;
365 lr_write_t *lr = (lr_write_t *)lrc;
366 blkptr_t *bp = &lr->lr_blkptr;
367 zbookmark_t zb;
368
369 if (bp->blk_birth <= dp->dp_scrub_min_txg)
370 return;
371
372 if (claim_txg == 0 || bp->blk_birth < claim_txg)
373 return;
374
375 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
376 zb.zb_object = lr->lr_foid;
377 zb.zb_level = BP_GET_LEVEL(bp);
378 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
379 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
380 }
381 }
382
383 static void
384 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
385 {
386 uint64_t claim_txg = zh->zh_claim_txg;
387 zil_traverse_arg_t zta = { dp, zh };
388 zilog_t *zilog;
389
390 /*
391 * We only want to visit blocks that have been claimed but not yet
392 * replayed (or, in read-only mode, blocks that *would* be claimed).
393 */
394 if (claim_txg == 0 && (spa_mode & FWRITE))
395 return;
396
397 zilog = zil_alloc(dp->dp_meta_objset, zh);
398
399 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
400 claim_txg);
401
402 zil_free(zilog);
403 }
404
405 static void
406 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
407 arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
408 {
409 int err;
410 arc_buf_t *buf = NULL;
411
412 if (bp->blk_birth == 0)
413 return;
414
415 if (bp->blk_birth <= dp->dp_scrub_min_txg)
416 return;
417
418 if (scrub_pause(dp, zb))
419 return;
420
421 if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
422 /*
423 * If we already visited this bp & everything below (in
424 * a prior txg), don't bother doing it again.
425 */
426 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
427 return;
428
429 /*
430 * If we found the block we're trying to resume from, or
431 * we went past it to a different object, zero it out to
432 * indicate that it's OK to start checking for pausing
433 * again.
434 */
435 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
436 zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
437 dprintf("resuming at %llx/%llx/%llx/%llx\n",
438 (longlong_t)zb->zb_objset,
439 (longlong_t)zb->zb_object,
440 (longlong_t)zb->zb_level,
441 (longlong_t)zb->zb_blkid);
442 bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
443 }
444 }
445
446 if (BP_GET_LEVEL(bp) > 0) {
447 uint32_t flags = ARC_WAIT;
448 int i;
449 blkptr_t *cbp;
450 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
451
452 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
453 arc_getbuf_func, &buf,
454 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
455 if (err) {
456 mutex_enter(&dp->dp_spa->spa_scrub_lock);
457 dp->dp_spa->spa_scrub_errors++;
458 mutex_exit(&dp->dp_spa->spa_scrub_lock);
459 return;
460 }
461 cbp = buf->b_data;
462
463 for (i = 0; i < epb; i++, cbp++) {
464 zbookmark_t czb;
465
466 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
467 zb->zb_level - 1,
468 zb->zb_blkid * epb + i);
469 scrub_visitbp(dp, dnp, buf, cbp, &czb);
470 }
471 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
472 uint32_t flags = ARC_WAIT;
473 dnode_phys_t *child_dnp;
474 int i, j;
475 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
476
477 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
478 arc_getbuf_func, &buf,
479 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
480 if (err) {
481 mutex_enter(&dp->dp_spa->spa_scrub_lock);
482 dp->dp_spa->spa_scrub_errors++;
483 mutex_exit(&dp->dp_spa->spa_scrub_lock);
484 return;
485 }
486 child_dnp = buf->b_data;
487
488 for (i = 0; i < epb; i++, child_dnp++) {
489 for (j = 0; j < child_dnp->dn_nblkptr; j++) {
490 zbookmark_t czb;
491
492 SET_BOOKMARK(&czb, zb->zb_objset,
493 zb->zb_blkid * epb + i,
494 child_dnp->dn_nlevels - 1, j);
495 scrub_visitbp(dp, child_dnp, buf,
496 &child_dnp->dn_blkptr[j], &czb);
497 }
498 }
499 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
500 uint32_t flags = ARC_WAIT;
501 objset_phys_t *osp;
502 int j;
503
504 err = arc_read_nolock(NULL, dp->dp_spa, bp,
505 arc_getbuf_func, &buf,
506 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
507 if (err) {
508 mutex_enter(&dp->dp_spa->spa_scrub_lock);
509 dp->dp_spa->spa_scrub_errors++;
510 mutex_exit(&dp->dp_spa->spa_scrub_lock);
511 return;
512 }
513
514 osp = buf->b_data;
515
516 traverse_zil(dp, &osp->os_zil_header);
517
518 for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) {
519 zbookmark_t czb;
520
521 SET_BOOKMARK(&czb, zb->zb_objset, 0,
522 osp->os_meta_dnode.dn_nlevels - 1, j);
523 scrub_visitbp(dp, &osp->os_meta_dnode, buf,
524 &osp->os_meta_dnode.dn_blkptr[j], &czb);
525 }
526 }
527
528 (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
529 if (buf)
530 (void) arc_buf_remove_ref(buf, &buf);
531 }
532
533 static void
534 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
535 {
536 zbookmark_t zb;
537
538 SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
539 scrub_visitbp(dp, NULL, NULL, bp, &zb);
540 }
541
542 void
543 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
544 {
545 dsl_pool_t *dp = ds->ds_dir->dd_pool;
546
547 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
548 return;
549
550 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
551 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
552 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
553 ds->ds_object, tx) != 0) {
554 return;
555 }
556
557 if (ds->ds_phys->ds_next_snap_obj != 0) {
558 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
559 ds->ds_phys->ds_next_snap_obj, tx) == 0);
560 }
561 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
562 }
563
564 void
565 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
566 {
567 dsl_pool_t *dp = ds->ds_dir->dd_pool;
568
569 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
570 return;
571
572 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
573
574 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
575 dp->dp_scrub_bookmark.zb_objset =
576 ds->ds_phys->ds_prev_snap_obj;
577 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
578 ds->ds_object, tx) == 0) {
579 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
580 ds->ds_phys->ds_prev_snap_obj, tx) == 0);
581 }
582 }
583
584 void
585 dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
586 {
587 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
588
589 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
590 return;
591
592 if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) {
593 dp->dp_scrub_bookmark.zb_objset = ds2->ds_object;
594 } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) {
595 dp->dp_scrub_bookmark.zb_objset = ds1->ds_object;
596 }
597
598 if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
599 ds1->ds_object, tx) == 0) {
600 int err = zap_add_int(dp->dp_meta_objset,
601 dp->dp_scrub_queue_obj, ds2->ds_object, tx);
602 VERIFY(err == 0 || err == EEXIST);
603 if (err == EEXIST) {
604 /* Both were there to begin with */
605 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
606 dp->dp_scrub_queue_obj, ds1->ds_object, tx));
607 }
608 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
609 ds2->ds_object, tx) == 0) {
610 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
611 dp->dp_scrub_queue_obj, ds1->ds_object, tx));
612 }
613 }
614
615 struct enqueue_clones_arg {
616 dmu_tx_t *tx;
617 uint64_t originobj;
618 };
619
620 /* ARGSUSED */
621 static int
622 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
623 {
624 struct enqueue_clones_arg *eca = arg;
625 dsl_dataset_t *ds;
626 int err;
627 dsl_pool_t *dp;
628
629 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
630 if (err)
631 return (err);
632 dp = ds->ds_dir->dd_pool;
633
634 if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
635 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
636 dsl_dataset_t *prev;
637 err = dsl_dataset_hold_obj(dp,
638 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
639
640 dsl_dataset_rele(ds, FTAG);
641 if (err)
642 return (err);
643 ds = prev;
644 }
645 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
646 ds->ds_object, eca->tx) == 0);
647 }
648 dsl_dataset_rele(ds, FTAG);
649 return (0);
650 }
651
652 static void
653 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
654 {
655 dsl_dataset_t *ds;
656 uint64_t min_txg_save;
657
658 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
659
660 /*
661 * Iterate over the bps in this ds.
662 */
663 min_txg_save = dp->dp_scrub_min_txg;
664 dp->dp_scrub_min_txg =
665 MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
666 scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
667 dp->dp_scrub_min_txg = min_txg_save;
668
669 if (dp->dp_scrub_pausing)
670 goto out;
671
672 /*
673 * Add descendent datasets to work queue.
674 */
675 if (ds->ds_phys->ds_next_snap_obj != 0) {
676 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
677 ds->ds_phys->ds_next_snap_obj, tx) == 0);
678 }
679 if (ds->ds_phys->ds_num_children > 1) {
680 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
681 struct enqueue_clones_arg eca;
682 eca.tx = tx;
683 eca.originobj = ds->ds_object;
684
685 (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
686 NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
687 } else {
688 VERIFY(zap_join(dp->dp_meta_objset,
689 ds->ds_phys->ds_next_clones_obj,
690 dp->dp_scrub_queue_obj, tx) == 0);
691 }
692 }
693
694 out:
695 dsl_dataset_rele(ds, FTAG);
696 }
697
698 /* ARGSUSED */
699 static int
700 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
701 {
702 dmu_tx_t *tx = arg;
703 dsl_dataset_t *ds;
704 int err;
705 dsl_pool_t *dp;
706
707 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
708 if (err)
709 return (err);
710
711 dp = ds->ds_dir->dd_pool;
712
713 while (ds->ds_phys->ds_prev_snap_obj != 0) {
714 dsl_dataset_t *prev;
715 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
716 FTAG, &prev);
717 if (err) {
718 dsl_dataset_rele(ds, FTAG);
719 return (err);
720 }
721
722 /*
723 * If this is a clone, we don't need to worry about it for now.
724 */
725 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
726 dsl_dataset_rele(ds, FTAG);
727 dsl_dataset_rele(prev, FTAG);
728 return (0);
729 }
730 dsl_dataset_rele(ds, FTAG);
731 ds = prev;
732 }
733
734 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
735 ds->ds_object, tx) == 0);
736 dsl_dataset_rele(ds, FTAG);
737 return (0);
738 }
739
740 void
741 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
742 {
743 zap_cursor_t zc;
744 zap_attribute_t za;
745 boolean_t complete = B_TRUE;
746
747 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
748 return;
749
750 /* If the spa is not fully loaded, don't bother. */
751 if (dp->dp_spa->spa_load_state != SPA_LOAD_NONE)
752 return;
753
754 if (dp->dp_scrub_restart) {
755 enum scrub_func func = dp->dp_scrub_func;
756 dp->dp_scrub_restart = B_FALSE;
757 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
758 }
759
760 if (dp->dp_spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
761 /*
762 * We must have resumed after rebooting; reset the vdev
763 * stats to know that we're doing a scrub (although it
764 * will think we're just starting now).
765 */
766 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev,
767 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
768 POOL_SCRUB_EVERYTHING, B_FALSE);
769 }
770
771 dp->dp_scrub_pausing = B_FALSE;
772 dp->dp_scrub_start_time = lbolt64;
773 dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
774 dp->dp_spa->spa_scrub_active = B_TRUE;
775
776 if (dp->dp_scrub_bookmark.zb_objset == 0) {
777 /* First do the MOS & ORIGIN */
778 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
779 if (dp->dp_scrub_pausing)
780 goto out;
781
782 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
783 VERIFY(0 == dmu_objset_find_spa(dp->dp_spa,
784 NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
785 } else {
786 scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
787 }
788 ASSERT(!dp->dp_scrub_pausing);
789 } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
790 /*
791 * If we were paused, continue from here. Note if the
792 * ds we were paused on was deleted, the zb_objset will
793 * be -1, so we will skip this and find a new objset
794 * below.
795 */
796 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
797 if (dp->dp_scrub_pausing)
798 goto out;
799 }
800
801 /*
802 * In case we were paused right at the end of the ds, zero the
803 * bookmark so we don't think that we're still trying to resume.
804 */
805 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
806
807 /* keep pulling things out of the zap-object-as-queue */
808 while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
809 zap_cursor_retrieve(&zc, &za) == 0) {
810 VERIFY(0 == zap_remove(dp->dp_meta_objset,
811 dp->dp_scrub_queue_obj, za.za_name, tx));
812 scrub_visitds(dp, za.za_first_integer, tx);
813 if (dp->dp_scrub_pausing)
814 break;
815 zap_cursor_fini(&zc);
816 }
817 zap_cursor_fini(&zc);
818 if (dp->dp_scrub_pausing)
819 goto out;
820
821 /* done. */
822
823 dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
824 return;
825 out:
826 VERIFY(0 == zap_update(dp->dp_meta_objset,
827 DMU_POOL_DIRECTORY_OBJECT,
828 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
829 &dp->dp_scrub_bookmark, tx));
830 VERIFY(0 == zap_update(dp->dp_meta_objset,
831 DMU_POOL_DIRECTORY_OBJECT,
832 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
833 &dp->dp_spa->spa_scrub_errors, tx));
834
835 /* XXX this is scrub-clean specific */
836 mutex_enter(&dp->dp_spa->spa_scrub_lock);
837 while (dp->dp_spa->spa_scrub_inflight > 0) {
838 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
839 &dp->dp_spa->spa_scrub_lock);
840 }
841 mutex_exit(&dp->dp_spa->spa_scrub_lock);
842 }
843
844 void
845 dsl_pool_scrub_restart(dsl_pool_t *dp)
846 {
847 mutex_enter(&dp->dp_scrub_cancel_lock);
848 dp->dp_scrub_restart = B_TRUE;
849 mutex_exit(&dp->dp_scrub_cancel_lock);
850 }
851
852 /*
853 * scrub consumers
854 */
855
856 static void
857 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
858 {
859 int i;
860
861 /*
862 * If we resume after a reboot, zab will be NULL; don't record
863 * incomplete stats in that case.
864 */
865 if (zab == NULL)
866 return;
867
868 for (i = 0; i < 4; i++) {
869 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
870 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
871 zfs_blkstat_t *zb = &zab->zab_type[l][t];
872 int equal;
873
874 zb->zb_count++;
875 zb->zb_asize += BP_GET_ASIZE(bp);
876 zb->zb_lsize += BP_GET_LSIZE(bp);
877 zb->zb_psize += BP_GET_PSIZE(bp);
878 zb->zb_gangs += BP_COUNT_GANG(bp);
879
880 switch (BP_GET_NDVAS(bp)) {
881 case 2:
882 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
883 DVA_GET_VDEV(&bp->blk_dva[1]))
884 zb->zb_ditto_2_of_2_samevdev++;
885 break;
886 case 3:
887 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
888 DVA_GET_VDEV(&bp->blk_dva[1])) +
889 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
890 DVA_GET_VDEV(&bp->blk_dva[2])) +
891 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
892 DVA_GET_VDEV(&bp->blk_dva[2]));
893 if (equal == 1)
894 zb->zb_ditto_2_of_3_samevdev++;
895 else if (equal == 3)
896 zb->zb_ditto_3_of_3_samevdev++;
897 break;
898 }
899 }
900 }
901
902 static void
903 dsl_pool_scrub_clean_done(zio_t *zio)
904 {
905 spa_t *spa = zio->io_spa;
906
907 zio_data_buf_free(zio->io_data, zio->io_size);
908
909 mutex_enter(&spa->spa_scrub_lock);
910 spa->spa_scrub_inflight--;
911 cv_broadcast(&spa->spa_scrub_io_cv);
912
913 if (zio->io_error && (zio->io_error != ECKSUM ||
914 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
915 spa->spa_scrub_errors++;
916 mutex_exit(&spa->spa_scrub_lock);
917 }
918
919 static int
920 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
921 const blkptr_t *bp, const zbookmark_t *zb)
922 {
923 size_t size = BP_GET_LSIZE(bp);
924 int d;
925 spa_t *spa = dp->dp_spa;
926 boolean_t needs_io;
927 int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
928 int zio_priority;
929
930 count_block(dp->dp_blkstats, bp);
931
932 if (dp->dp_scrub_isresilver == 0) {
933 /* It's a scrub */
934 zio_flags |= ZIO_FLAG_SCRUB;
935 zio_priority = ZIO_PRIORITY_SCRUB;
936 needs_io = B_TRUE;
937 } else {
938 /* It's a resilver */
939 zio_flags |= ZIO_FLAG_RESILVER;
940 zio_priority = ZIO_PRIORITY_RESILVER;
941 needs_io = B_FALSE;
942 }
943
944 /* If it's an intent log block, failure is expected. */
945 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
946 zio_flags |= ZIO_FLAG_SPECULATIVE;
947
948 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
949 vdev_t *vd = vdev_lookup_top(spa,
950 DVA_GET_VDEV(&bp->blk_dva[d]));
951
952 /*
953 * Keep track of how much data we've examined so that
954 * zpool(1M) status can make useful progress reports.
955 */
956 mutex_enter(&vd->vdev_stat_lock);
957 vd->vdev_stat.vs_scrub_examined +=
958 DVA_GET_ASIZE(&bp->blk_dva[d]);
959 mutex_exit(&vd->vdev_stat_lock);
960
961 /* if it's a resilver, this may not be in the target range */
962 if (!needs_io) {
963 if (DVA_GET_GANG(&bp->blk_dva[d])) {
964 /*
965 * Gang members may be spread across multiple
966 * vdevs, so the best we can do is look at the
967 * pool-wide DTL.
968 * XXX -- it would be better to change our
969 * allocation policy to ensure that this can't
970 * happen.
971 */
972 vd = spa->spa_root_vdev;
973 }
974 needs_io = vdev_dtl_contains(&vd->vdev_dtl_map,
975 bp->blk_birth, 1);
976 }
977 }
978
979 if (needs_io && !zfs_no_scrub_io) {
980 void *data = zio_data_buf_alloc(size);
981
982 mutex_enter(&spa->spa_scrub_lock);
983 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
984 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
985 spa->spa_scrub_inflight++;
986 mutex_exit(&spa->spa_scrub_lock);
987
988 zio_nowait(zio_read(NULL, spa, bp, data, size,
989 dsl_pool_scrub_clean_done, NULL, zio_priority,
990 zio_flags, zb));
991 }
992
993 /* do not relocate this block */
994 return (0);
995 }
996
997 int
998 dsl_pool_scrub_clean(dsl_pool_t *dp)
999 {
1000 /*
1001 * Purge all vdev caches. We do this here rather than in sync
1002 * context because this requires a writer lock on the spa_config
1003 * lock, which we can't do from sync context. The
1004 * spa_scrub_reopen flag indicates that vdev_open() should not
1005 * attempt to start another scrub.
1006 */
1007 spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER);
1008 dp->dp_spa->spa_scrub_reopen = B_TRUE;
1009 vdev_reopen(dp->dp_spa->spa_root_vdev);
1010 dp->dp_spa->spa_scrub_reopen = B_FALSE;
1011 spa_config_exit(dp->dp_spa, SCL_ALL, FTAG);
1012
1013 return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));
1014 }