4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
44 typedef int (scrub_cb_t
)(dsl_pool_t
*, const blkptr_t
*, const zbookmark_t
*);
46 static scrub_cb_t dsl_pool_scrub_clean_cb
;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync
;
49 int zfs_scrub_min_time
= 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time
= 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io
= B_FALSE
; /* set to disable scrub i/o */
53 extern int zfs_txg_timeout
;
55 static scrub_cb_t
*scrub_funcs
[SCRUB_FUNC_NUMFUNCS
] = {
57 dsl_pool_scrub_clean_cb
60 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
62 (zb)->zb_objset = objset; \
63 (zb)->zb_object = object; \
64 (zb)->zb_level = level; \
65 (zb)->zb_blkid = blkid; \
70 dsl_pool_scrub_setup_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
72 dsl_pool_t
*dp
= arg1
;
73 enum scrub_func
*funcp
= arg2
;
74 dmu_object_type_t ot
= 0;
75 boolean_t complete
= B_FALSE
;
77 dsl_pool_scrub_cancel_sync(dp
, &complete
, cr
, tx
);
79 ASSERT(dp
->dp_scrub_func
== SCRUB_FUNC_NONE
);
80 ASSERT(*funcp
> SCRUB_FUNC_NONE
);
81 ASSERT(*funcp
< SCRUB_FUNC_NUMFUNCS
);
83 dp
->dp_scrub_min_txg
= 0;
84 dp
->dp_scrub_max_txg
= tx
->tx_txg
;
86 if (*funcp
== SCRUB_FUNC_CLEAN
) {
87 vdev_t
*rvd
= dp
->dp_spa
->spa_root_vdev
;
89 /* rewrite all disk labels */
90 vdev_config_dirty(rvd
);
92 if (vdev_resilver_needed(rvd
,
93 &dp
->dp_scrub_min_txg
, &dp
->dp_scrub_max_txg
)) {
94 spa_event_notify(dp
->dp_spa
, NULL
,
95 ESC_ZFS_RESILVER_START
);
96 dp
->dp_scrub_max_txg
= MIN(dp
->dp_scrub_max_txg
,
99 spa_event_notify(dp
->dp_spa
, NULL
,
100 ESC_ZFS_SCRUB_START
);
103 /* zero out the scrub stats in all vdev_stat_t's */
104 vdev_scrub_stat_update(rvd
,
105 dp
->dp_scrub_min_txg
? POOL_SCRUB_RESILVER
:
106 POOL_SCRUB_EVERYTHING
, B_FALSE
);
108 dp
->dp_spa
->spa_scrub_started
= B_TRUE
;
111 /* back to the generic stuff */
113 if (dp
->dp_blkstats
== NULL
) {
115 kmem_alloc(sizeof (zfs_all_blkstats_t
), KM_SLEEP
);
117 bzero(dp
->dp_blkstats
, sizeof (zfs_all_blkstats_t
));
119 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
)
120 ot
= DMU_OT_ZAP_OTHER
;
122 dp
->dp_scrub_func
= *funcp
;
123 dp
->dp_scrub_queue_obj
= zap_create(dp
->dp_meta_objset
,
124 ot
? ot
: DMU_OT_SCRUB_QUEUE
, DMU_OT_NONE
, 0, tx
);
125 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
126 dp
->dp_scrub_restart
= B_FALSE
;
127 dp
->dp_spa
->spa_scrub_errors
= 0;
129 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
130 DMU_POOL_SCRUB_FUNC
, sizeof (uint32_t), 1,
131 &dp
->dp_scrub_func
, tx
));
132 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
133 DMU_POOL_SCRUB_QUEUE
, sizeof (uint64_t), 1,
134 &dp
->dp_scrub_queue_obj
, tx
));
135 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
136 DMU_POOL_SCRUB_MIN_TXG
, sizeof (uint64_t), 1,
137 &dp
->dp_scrub_min_txg
, tx
));
138 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
139 DMU_POOL_SCRUB_MAX_TXG
, sizeof (uint64_t), 1,
140 &dp
->dp_scrub_max_txg
, tx
));
141 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
142 DMU_POOL_SCRUB_BOOKMARK
, sizeof (uint64_t), 4,
143 &dp
->dp_scrub_bookmark
, tx
));
144 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
145 DMU_POOL_SCRUB_ERRORS
, sizeof (uint64_t), 1,
146 &dp
->dp_spa
->spa_scrub_errors
, tx
));
148 spa_history_internal_log(LOG_POOL_SCRUB
, dp
->dp_spa
, tx
, cr
,
149 "func=%u mintxg=%llu maxtxg=%llu",
150 *funcp
, dp
->dp_scrub_min_txg
, dp
->dp_scrub_max_txg
);
154 dsl_pool_scrub_setup(dsl_pool_t
*dp
, enum scrub_func func
)
156 return (dsl_sync_task_do(dp
, NULL
,
157 dsl_pool_scrub_setup_sync
, dp
, &func
, 0));
162 dsl_pool_scrub_cancel_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
164 dsl_pool_t
*dp
= arg1
;
165 boolean_t
*completep
= arg2
;
167 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
170 mutex_enter(&dp
->dp_scrub_cancel_lock
);
172 if (dp
->dp_scrub_restart
) {
173 dp
->dp_scrub_restart
= B_FALSE
;
174 *completep
= B_FALSE
;
177 /* XXX this is scrub-clean specific */
178 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
179 while (dp
->dp_spa
->spa_scrub_inflight
> 0) {
180 cv_wait(&dp
->dp_spa
->spa_scrub_io_cv
,
181 &dp
->dp_spa
->spa_scrub_lock
);
183 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
184 dp
->dp_spa
->spa_scrub_started
= B_FALSE
;
185 dp
->dp_spa
->spa_scrub_active
= B_FALSE
;
187 dp
->dp_scrub_func
= SCRUB_FUNC_NONE
;
188 VERIFY(0 == dmu_object_free(dp
->dp_meta_objset
,
189 dp
->dp_scrub_queue_obj
, tx
));
190 dp
->dp_scrub_queue_obj
= 0;
191 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
193 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
194 DMU_POOL_SCRUB_QUEUE
, tx
));
195 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
196 DMU_POOL_SCRUB_MIN_TXG
, tx
));
197 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
198 DMU_POOL_SCRUB_MAX_TXG
, tx
));
199 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
200 DMU_POOL_SCRUB_BOOKMARK
, tx
));
201 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
202 DMU_POOL_SCRUB_FUNC
, tx
));
203 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
204 DMU_POOL_SCRUB_ERRORS
, tx
));
206 spa_history_internal_log(LOG_POOL_SCRUB_DONE
, dp
->dp_spa
, tx
, cr
,
207 "complete=%u", *completep
);
209 /* below is scrub-clean specific */
210 vdev_scrub_stat_update(dp
->dp_spa
->spa_root_vdev
, POOL_SCRUB_NONE
,
213 * If the scrub/resilver completed, update all DTLs to reflect this.
214 * Whether it succeeded or not, vacate all temporary scrub DTLs.
216 vdev_dtl_reassess(dp
->dp_spa
->spa_root_vdev
, tx
->tx_txg
,
217 *completep
? dp
->dp_scrub_max_txg
: 0, B_TRUE
);
219 spa_event_notify(dp
->dp_spa
, NULL
, dp
->dp_scrub_min_txg
?
220 ESC_ZFS_RESILVER_FINISH
: ESC_ZFS_SCRUB_FINISH
);
221 spa_errlog_rotate(dp
->dp_spa
);
224 * We may have finished replacing a device.
225 * Let the async thread assess this and handle the detach.
227 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER_DONE
);
229 dp
->dp_scrub_min_txg
= dp
->dp_scrub_max_txg
= 0;
230 mutex_exit(&dp
->dp_scrub_cancel_lock
);
234 dsl_pool_scrub_cancel(dsl_pool_t
*dp
)
236 boolean_t complete
= B_FALSE
;
238 return (dsl_sync_task_do(dp
, NULL
,
239 dsl_pool_scrub_cancel_sync
, dp
, &complete
, 3));
243 dsl_free(zio_t
*pio
, dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bpp
,
244 zio_done_func_t
*done
, void *private, uint32_t arc_flags
)
247 * This function will be used by bp-rewrite wad to intercept frees.
249 return (arc_free(pio
, dp
->dp_spa
, txg
, (blkptr_t
*)bpp
,
250 done
, private, arc_flags
));
254 bookmark_is_zero(const zbookmark_t
*zb
)
256 return (zb
->zb_objset
== 0 && zb
->zb_object
== 0 &&
257 zb
->zb_level
== 0 && zb
->zb_blkid
== 0);
260 /* dnp is the dnode for zb1->zb_object */
262 bookmark_is_before(dnode_phys_t
*dnp
, const zbookmark_t
*zb1
,
263 const zbookmark_t
*zb2
)
265 uint64_t zb1nextL0
, zb2thisobj
;
267 ASSERT(zb1
->zb_objset
== zb2
->zb_objset
);
268 ASSERT(zb1
->zb_object
!= -1ULL);
269 ASSERT(zb2
->zb_level
== 0);
272 * A bookmark in the deadlist is considered to be after
275 if (zb2
->zb_object
== -1ULL)
278 /* The objset_phys_t isn't before anything. */
282 zb1nextL0
= (zb1
->zb_blkid
+ 1) <<
283 ((zb1
->zb_level
) * (dnp
->dn_indblkshift
- SPA_BLKPTRSHIFT
));
285 zb2thisobj
= zb2
->zb_object
? zb2
->zb_object
:
286 zb2
->zb_blkid
<< (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
);
288 if (zb1
->zb_object
== 0) {
289 uint64_t nextobj
= zb1nextL0
*
290 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
) >> DNODE_SHIFT
;
291 return (nextobj
<= zb2thisobj
);
294 if (zb1
->zb_object
< zb2thisobj
)
296 if (zb1
->zb_object
> zb2thisobj
)
298 if (zb2
->zb_object
== 0)
300 return (zb1nextL0
<= zb2
->zb_blkid
);
304 scrub_pause(dsl_pool_t
*dp
, const zbookmark_t
*zb
)
309 if (dp
->dp_scrub_pausing
)
310 return (B_TRUE
); /* we're already pausing */
312 if (!bookmark_is_zero(&dp
->dp_scrub_bookmark
))
313 return (B_FALSE
); /* we're resuming */
315 /* We only know how to resume from level-0 blocks. */
316 if (zb
->zb_level
!= 0)
319 mintime
= dp
->dp_scrub_isresilver
? zfs_resilver_min_time
:
321 elapsed_ticks
= lbolt64
- dp
->dp_scrub_start_time
;
322 if (elapsed_ticks
> hz
* zfs_txg_timeout
||
323 (elapsed_ticks
> hz
* mintime
&& txg_sync_waiting(dp
))) {
324 dprintf("pausing at %llx/%llx/%llx/%llx\n",
325 (longlong_t
)zb
->zb_objset
, (longlong_t
)zb
->zb_object
,
326 (longlong_t
)zb
->zb_level
, (longlong_t
)zb
->zb_blkid
);
327 dp
->dp_scrub_pausing
= B_TRUE
;
328 dp
->dp_scrub_bookmark
= *zb
;
334 typedef struct zil_traverse_arg
{
336 zil_header_t
*zta_zh
;
337 } zil_traverse_arg_t
;
341 traverse_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
343 zil_traverse_arg_t
*zta
= arg
;
344 dsl_pool_t
*dp
= zta
->zta_dp
;
345 zil_header_t
*zh
= zta
->zta_zh
;
348 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
351 if (claim_txg
== 0 && bp
->blk_birth
>= spa_first_txg(dp
->dp_spa
))
354 zb
.zb_objset
= zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
357 zb
.zb_blkid
= bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
358 VERIFY(0 == scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, &zb
));
363 traverse_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
365 if (lrc
->lrc_txtype
== TX_WRITE
) {
366 zil_traverse_arg_t
*zta
= arg
;
367 dsl_pool_t
*dp
= zta
->zta_dp
;
368 zil_header_t
*zh
= zta
->zta_zh
;
369 lr_write_t
*lr
= (lr_write_t
*)lrc
;
370 blkptr_t
*bp
= &lr
->lr_blkptr
;
373 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
376 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
379 zb
.zb_objset
= zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
380 zb
.zb_object
= lr
->lr_foid
;
381 zb
.zb_level
= BP_GET_LEVEL(bp
);
382 zb
.zb_blkid
= lr
->lr_offset
/ BP_GET_LSIZE(bp
);
383 VERIFY(0 == scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, &zb
));
388 traverse_zil(dsl_pool_t
*dp
, zil_header_t
*zh
)
390 uint64_t claim_txg
= zh
->zh_claim_txg
;
391 zil_traverse_arg_t zta
= { dp
, zh
};
395 * We only want to visit blocks that have been claimed but not yet
396 * replayed (or, in read-only mode, blocks that *would* be claimed).
398 if (claim_txg
== 0 && spa_writeable(dp
->dp_spa
))
401 zilog
= zil_alloc(dp
->dp_meta_objset
, zh
);
403 (void) zil_parse(zilog
, traverse_zil_block
, traverse_zil_record
, &zta
,
410 scrub_visitbp(dsl_pool_t
*dp
, dnode_phys_t
*dnp
,
411 arc_buf_t
*pbuf
, blkptr_t
*bp
, const zbookmark_t
*zb
)
414 arc_buf_t
*buf
= NULL
;
416 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
419 if (scrub_pause(dp
, zb
))
422 if (!bookmark_is_zero(&dp
->dp_scrub_bookmark
)) {
424 * If we already visited this bp & everything below (in
425 * a prior txg), don't bother doing it again.
427 if (bookmark_is_before(dnp
, zb
, &dp
->dp_scrub_bookmark
))
431 * If we found the block we're trying to resume from, or
432 * we went past it to a different object, zero it out to
433 * indicate that it's OK to start checking for pausing
436 if (bcmp(zb
, &dp
->dp_scrub_bookmark
, sizeof (*zb
)) == 0 ||
437 zb
->zb_object
> dp
->dp_scrub_bookmark
.zb_object
) {
438 dprintf("resuming at %llx/%llx/%llx/%llx\n",
439 (longlong_t
)zb
->zb_objset
,
440 (longlong_t
)zb
->zb_object
,
441 (longlong_t
)zb
->zb_level
,
442 (longlong_t
)zb
->zb_blkid
);
443 bzero(&dp
->dp_scrub_bookmark
, sizeof (*zb
));
447 if (BP_GET_LEVEL(bp
) > 0) {
448 uint32_t flags
= ARC_WAIT
;
451 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
453 err
= arc_read(NULL
, dp
->dp_spa
, bp
, pbuf
,
454 arc_getbuf_func
, &buf
,
455 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
457 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
458 dp
->dp_spa
->spa_scrub_errors
++;
459 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
464 for (i
= 0; i
< epb
; i
++, cbp
++) {
467 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
469 zb
->zb_blkid
* epb
+ i
);
470 scrub_visitbp(dp
, dnp
, buf
, cbp
, &czb
);
472 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
473 uint32_t flags
= ARC_WAIT
;
474 dnode_phys_t
*child_dnp
;
476 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
478 err
= arc_read(NULL
, dp
->dp_spa
, bp
, pbuf
,
479 arc_getbuf_func
, &buf
,
480 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
482 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
483 dp
->dp_spa
->spa_scrub_errors
++;
484 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
487 child_dnp
= buf
->b_data
;
489 for (i
= 0; i
< epb
; i
++, child_dnp
++) {
490 for (j
= 0; j
< child_dnp
->dn_nblkptr
; j
++) {
493 SET_BOOKMARK(&czb
, zb
->zb_objset
,
494 zb
->zb_blkid
* epb
+ i
,
495 child_dnp
->dn_nlevels
- 1, j
);
496 scrub_visitbp(dp
, child_dnp
, buf
,
497 &child_dnp
->dn_blkptr
[j
], &czb
);
500 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
501 uint32_t flags
= ARC_WAIT
;
505 err
= arc_read_nolock(NULL
, dp
->dp_spa
, bp
,
506 arc_getbuf_func
, &buf
,
507 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
509 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
510 dp
->dp_spa
->spa_scrub_errors
++;
511 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
517 traverse_zil(dp
, &osp
->os_zil_header
);
519 for (j
= 0; j
< osp
->os_meta_dnode
.dn_nblkptr
; j
++) {
522 SET_BOOKMARK(&czb
, zb
->zb_objset
, 0,
523 osp
->os_meta_dnode
.dn_nlevels
- 1, j
);
524 scrub_visitbp(dp
, &osp
->os_meta_dnode
, buf
,
525 &osp
->os_meta_dnode
.dn_blkptr
[j
], &czb
);
529 (void) scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, zb
);
531 (void) arc_buf_remove_ref(buf
, &buf
);
535 scrub_visit_rootbp(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, blkptr_t
*bp
)
539 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: 0, 0, -1, 0);
540 scrub_visitbp(dp
, NULL
, NULL
, bp
, &zb
);
544 dsl_pool_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
546 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
548 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
551 if (dp
->dp_scrub_bookmark
.zb_objset
== ds
->ds_object
) {
552 SET_BOOKMARK(&dp
->dp_scrub_bookmark
, -1, 0, 0, 0);
553 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
554 ds
->ds_object
, tx
) != 0) {
558 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
559 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
560 ds
->ds_phys
->ds_next_snap_obj
, tx
) == 0);
562 ASSERT3U(ds
->ds_phys
->ds_num_children
, <=, 1);
566 dsl_pool_ds_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
568 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
570 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
573 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
!= 0);
575 if (dp
->dp_scrub_bookmark
.zb_objset
== ds
->ds_object
) {
576 dp
->dp_scrub_bookmark
.zb_objset
=
577 ds
->ds_phys
->ds_prev_snap_obj
;
578 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
579 ds
->ds_object
, tx
) == 0) {
580 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
581 ds
->ds_phys
->ds_prev_snap_obj
, tx
) == 0);
586 dsl_pool_ds_clone_swapped(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
, dmu_tx_t
*tx
)
588 dsl_pool_t
*dp
= ds1
->ds_dir
->dd_pool
;
590 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
593 if (dp
->dp_scrub_bookmark
.zb_objset
== ds1
->ds_object
) {
594 dp
->dp_scrub_bookmark
.zb_objset
= ds2
->ds_object
;
595 } else if (dp
->dp_scrub_bookmark
.zb_objset
== ds2
->ds_object
) {
596 dp
->dp_scrub_bookmark
.zb_objset
= ds1
->ds_object
;
599 if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
600 ds1
->ds_object
, tx
) == 0) {
601 int err
= zap_add_int(dp
->dp_meta_objset
,
602 dp
->dp_scrub_queue_obj
, ds2
->ds_object
, tx
);
603 VERIFY(err
== 0 || err
== EEXIST
);
605 /* Both were there to begin with */
606 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
607 dp
->dp_scrub_queue_obj
, ds1
->ds_object
, tx
));
609 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
610 ds2
->ds_object
, tx
) == 0) {
611 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
612 dp
->dp_scrub_queue_obj
, ds1
->ds_object
, tx
));
616 struct enqueue_clones_arg
{
623 enqueue_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
625 struct enqueue_clones_arg
*eca
= arg
;
630 err
= dsl_dataset_hold_obj(spa
->spa_dsl_pool
, dsobj
, FTAG
, &ds
);
633 dp
= ds
->ds_dir
->dd_pool
;
635 if (ds
->ds_dir
->dd_phys
->dd_origin_obj
== eca
->originobj
) {
636 while (ds
->ds_phys
->ds_prev_snap_obj
!= eca
->originobj
) {
638 err
= dsl_dataset_hold_obj(dp
,
639 ds
->ds_phys
->ds_prev_snap_obj
, FTAG
, &prev
);
641 dsl_dataset_rele(ds
, FTAG
);
646 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
647 ds
->ds_object
, eca
->tx
) == 0);
649 dsl_dataset_rele(ds
, FTAG
);
654 scrub_visitds(dsl_pool_t
*dp
, uint64_t dsobj
, dmu_tx_t
*tx
)
657 uint64_t min_txg_save
;
659 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
662 * Iterate over the bps in this ds.
664 min_txg_save
= dp
->dp_scrub_min_txg
;
665 dp
->dp_scrub_min_txg
=
666 MAX(dp
->dp_scrub_min_txg
, ds
->ds_phys
->ds_prev_snap_txg
);
667 scrub_visit_rootbp(dp
, ds
, &ds
->ds_phys
->ds_bp
);
668 dp
->dp_scrub_min_txg
= min_txg_save
;
670 if (dp
->dp_scrub_pausing
)
674 * Add descendent datasets to work queue.
676 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
677 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
678 ds
->ds_phys
->ds_next_snap_obj
, tx
) == 0);
680 if (ds
->ds_phys
->ds_num_children
> 1) {
681 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
682 struct enqueue_clones_arg eca
;
684 eca
.originobj
= ds
->ds_object
;
686 (void) dmu_objset_find_spa(ds
->ds_dir
->dd_pool
->dp_spa
,
687 NULL
, enqueue_clones_cb
, &eca
, DS_FIND_CHILDREN
);
689 VERIFY(zap_join(dp
->dp_meta_objset
,
690 ds
->ds_phys
->ds_next_clones_obj
,
691 dp
->dp_scrub_queue_obj
, tx
) == 0);
696 dsl_dataset_rele(ds
, FTAG
);
701 enqueue_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
708 err
= dsl_dataset_hold_obj(spa
->spa_dsl_pool
, dsobj
, FTAG
, &ds
);
712 dp
= ds
->ds_dir
->dd_pool
;
714 while (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
716 err
= dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
719 dsl_dataset_rele(ds
, FTAG
);
724 * If this is a clone, we don't need to worry about it for now.
726 if (prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
) {
727 dsl_dataset_rele(ds
, FTAG
);
728 dsl_dataset_rele(prev
, FTAG
);
731 dsl_dataset_rele(ds
, FTAG
);
735 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
736 ds
->ds_object
, tx
) == 0);
737 dsl_dataset_rele(ds
, FTAG
);
742 dsl_pool_scrub_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
744 spa_t
*spa
= dp
->dp_spa
;
747 boolean_t complete
= B_TRUE
;
749 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
753 * If the pool is not loaded, or is trying to unload, leave it alone.
755 if (spa
->spa_load_state
!= SPA_LOAD_NONE
|| spa_shutting_down(spa
))
758 if (dp
->dp_scrub_restart
) {
759 enum scrub_func func
= dp
->dp_scrub_func
;
760 dp
->dp_scrub_restart
= B_FALSE
;
761 dsl_pool_scrub_setup_sync(dp
, &func
, kcred
, tx
);
764 if (spa
->spa_root_vdev
->vdev_stat
.vs_scrub_type
== 0) {
766 * We must have resumed after rebooting; reset the vdev
767 * stats to know that we're doing a scrub (although it
768 * will think we're just starting now).
770 vdev_scrub_stat_update(spa
->spa_root_vdev
,
771 dp
->dp_scrub_min_txg
? POOL_SCRUB_RESILVER
:
772 POOL_SCRUB_EVERYTHING
, B_FALSE
);
775 dp
->dp_scrub_pausing
= B_FALSE
;
776 dp
->dp_scrub_start_time
= lbolt64
;
777 dp
->dp_scrub_isresilver
= (dp
->dp_scrub_min_txg
!= 0);
778 spa
->spa_scrub_active
= B_TRUE
;
780 if (dp
->dp_scrub_bookmark
.zb_objset
== 0) {
781 /* First do the MOS & ORIGIN */
782 scrub_visit_rootbp(dp
, NULL
, &dp
->dp_meta_rootbp
);
783 if (dp
->dp_scrub_pausing
)
786 if (spa_version(spa
) < SPA_VERSION_DSL_SCRUB
) {
787 VERIFY(0 == dmu_objset_find_spa(spa
,
788 NULL
, enqueue_cb
, tx
, DS_FIND_CHILDREN
));
790 scrub_visitds(dp
, dp
->dp_origin_snap
->ds_object
, tx
);
792 ASSERT(!dp
->dp_scrub_pausing
);
793 } else if (dp
->dp_scrub_bookmark
.zb_objset
!= -1ULL) {
795 * If we were paused, continue from here. Note if the
796 * ds we were paused on was deleted, the zb_objset will
797 * be -1, so we will skip this and find a new objset
800 scrub_visitds(dp
, dp
->dp_scrub_bookmark
.zb_objset
, tx
);
801 if (dp
->dp_scrub_pausing
)
806 * In case we were paused right at the end of the ds, zero the
807 * bookmark so we don't think that we're still trying to resume.
809 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
811 /* keep pulling things out of the zap-object-as-queue */
812 while (zap_cursor_init(&zc
, dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
),
813 zap_cursor_retrieve(&zc
, &za
) == 0) {
814 VERIFY(0 == zap_remove(dp
->dp_meta_objset
,
815 dp
->dp_scrub_queue_obj
, za
.za_name
, tx
));
816 scrub_visitds(dp
, za
.za_first_integer
, tx
);
817 if (dp
->dp_scrub_pausing
)
819 zap_cursor_fini(&zc
);
821 zap_cursor_fini(&zc
);
822 if (dp
->dp_scrub_pausing
)
827 dsl_pool_scrub_cancel_sync(dp
, &complete
, kcred
, tx
);
830 VERIFY(0 == zap_update(dp
->dp_meta_objset
,
831 DMU_POOL_DIRECTORY_OBJECT
,
832 DMU_POOL_SCRUB_BOOKMARK
, sizeof (uint64_t), 4,
833 &dp
->dp_scrub_bookmark
, tx
));
834 VERIFY(0 == zap_update(dp
->dp_meta_objset
,
835 DMU_POOL_DIRECTORY_OBJECT
,
836 DMU_POOL_SCRUB_ERRORS
, sizeof (uint64_t), 1,
837 &spa
->spa_scrub_errors
, tx
));
839 /* XXX this is scrub-clean specific */
840 mutex_enter(&spa
->spa_scrub_lock
);
841 while (spa
->spa_scrub_inflight
> 0)
842 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
843 mutex_exit(&spa
->spa_scrub_lock
);
847 dsl_pool_scrub_restart(dsl_pool_t
*dp
)
849 mutex_enter(&dp
->dp_scrub_cancel_lock
);
850 dp
->dp_scrub_restart
= B_TRUE
;
851 mutex_exit(&dp
->dp_scrub_cancel_lock
);
859 count_block(zfs_all_blkstats_t
*zab
, const blkptr_t
*bp
)
864 * If we resume after a reboot, zab will be NULL; don't record
865 * incomplete stats in that case.
870 for (i
= 0; i
< 4; i
++) {
871 int l
= (i
< 2) ? BP_GET_LEVEL(bp
) : DN_MAX_LEVELS
;
872 int t
= (i
& 1) ? BP_GET_TYPE(bp
) : DMU_OT_TOTAL
;
873 zfs_blkstat_t
*zb
= &zab
->zab_type
[l
][t
];
877 zb
->zb_asize
+= BP_GET_ASIZE(bp
);
878 zb
->zb_lsize
+= BP_GET_LSIZE(bp
);
879 zb
->zb_psize
+= BP_GET_PSIZE(bp
);
880 zb
->zb_gangs
+= BP_COUNT_GANG(bp
);
882 switch (BP_GET_NDVAS(bp
)) {
884 if (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
885 DVA_GET_VDEV(&bp
->blk_dva
[1]))
886 zb
->zb_ditto_2_of_2_samevdev
++;
889 equal
= (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
890 DVA_GET_VDEV(&bp
->blk_dva
[1])) +
891 (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
892 DVA_GET_VDEV(&bp
->blk_dva
[2])) +
893 (DVA_GET_VDEV(&bp
->blk_dva
[1]) ==
894 DVA_GET_VDEV(&bp
->blk_dva
[2]));
896 zb
->zb_ditto_2_of_3_samevdev
++;
898 zb
->zb_ditto_3_of_3_samevdev
++;
905 dsl_pool_scrub_clean_done(zio_t
*zio
)
907 spa_t
*spa
= zio
->io_spa
;
909 zio_data_buf_free(zio
->io_data
, zio
->io_size
);
911 mutex_enter(&spa
->spa_scrub_lock
);
912 spa
->spa_scrub_inflight
--;
913 cv_broadcast(&spa
->spa_scrub_io_cv
);
915 if (zio
->io_error
&& (zio
->io_error
!= ECKSUM
||
916 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)))
917 spa
->spa_scrub_errors
++;
918 mutex_exit(&spa
->spa_scrub_lock
);
922 dsl_pool_scrub_clean_cb(dsl_pool_t
*dp
,
923 const blkptr_t
*bp
, const zbookmark_t
*zb
)
925 size_t size
= BP_GET_PSIZE(bp
);
926 spa_t
*spa
= dp
->dp_spa
;
928 int zio_flags
= ZIO_FLAG_SCRUB_THREAD
| ZIO_FLAG_RAW
| ZIO_FLAG_CANFAIL
;
931 ASSERT(bp
->blk_birth
> dp
->dp_scrub_min_txg
);
933 if (bp
->blk_birth
>= dp
->dp_scrub_max_txg
)
936 count_block(dp
->dp_blkstats
, bp
);
938 if (dp
->dp_scrub_isresilver
== 0) {
940 zio_flags
|= ZIO_FLAG_SCRUB
;
941 zio_priority
= ZIO_PRIORITY_SCRUB
;
944 /* It's a resilver */
945 zio_flags
|= ZIO_FLAG_RESILVER
;
946 zio_priority
= ZIO_PRIORITY_RESILVER
;
950 /* If it's an intent log block, failure is expected. */
951 if (zb
->zb_level
== -1 && BP_GET_TYPE(bp
) != DMU_OT_OBJSET
)
952 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
954 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++) {
955 vdev_t
*vd
= vdev_lookup_top(spa
,
956 DVA_GET_VDEV(&bp
->blk_dva
[d
]));
959 * Keep track of how much data we've examined so that
960 * zpool(1M) status can make useful progress reports.
962 mutex_enter(&vd
->vdev_stat_lock
);
963 vd
->vdev_stat
.vs_scrub_examined
+=
964 DVA_GET_ASIZE(&bp
->blk_dva
[d
]);
965 mutex_exit(&vd
->vdev_stat_lock
);
967 /* if it's a resilver, this may not be in the target range */
969 if (DVA_GET_GANG(&bp
->blk_dva
[d
])) {
971 * Gang members may be spread across multiple
972 * vdevs, so the best estimate we have is the
973 * scrub range, which has already been checked.
974 * XXX -- it would be better to change our
975 * allocation policy to ensure that all
976 * gang members reside on the same vdev.
980 needs_io
= vdev_dtl_contains(vd
, DTL_PARTIAL
,
986 if (needs_io
&& !zfs_no_scrub_io
) {
987 void *data
= zio_data_buf_alloc(size
);
989 mutex_enter(&spa
->spa_scrub_lock
);
990 while (spa
->spa_scrub_inflight
>= spa
->spa_scrub_maxinflight
)
991 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
992 spa
->spa_scrub_inflight
++;
993 mutex_exit(&spa
->spa_scrub_lock
);
995 zio_nowait(zio_read(NULL
, spa
, bp
, data
, size
,
996 dsl_pool_scrub_clean_done
, NULL
, zio_priority
,
1000 /* do not relocate this block */
1005 dsl_pool_scrub_clean(dsl_pool_t
*dp
)
1008 * Purge all vdev caches. We do this here rather than in sync
1009 * context because this requires a writer lock on the spa_config
1010 * lock, which we can't do from sync context. The
1011 * spa_scrub_reopen flag indicates that vdev_open() should not
1012 * attempt to start another scrub.
1014 spa_config_enter(dp
->dp_spa
, SCL_ALL
, FTAG
, RW_WRITER
);
1015 dp
->dp_spa
->spa_scrub_reopen
= B_TRUE
;
1016 vdev_reopen(dp
->dp_spa
->spa_root_vdev
);
1017 dp
->dp_spa
->spa_scrub_reopen
= B_FALSE
;
1018 spa_config_exit(dp
->dp_spa
, SCL_ALL
, FTAG
);
1020 return (dsl_pool_scrub_setup(dp
, SCRUB_FUNC_CLEAN
));