* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
+#include <sys/arc_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
+#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
+#include <sys/dbuf.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
-static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
+static uint64_t dsl_scan_count_data_disks(spa_t *spa);
+static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb);
-extern int zfs_vdev_async_write_active_min_dirty_percent;
+extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
+static int zfs_scan_blkstats = 0;
+
+/*
+ * 'zpool status' uses bytes processed per pass to report throughput and
+ * estimate time remaining. We define a pass to start when the scanning
+ * phase completes for a sequential resilver. Optionally, this value
+ * may be used to reset the pass statistics every N txgs to provide an
+ * estimated completion time based on currently observed performance.
+ */
+static uint_t zfs_scan_report_txgs = 0;
/*
* By default zfs will check to ensure it is not over the hard memory
* this value can be set to 1 to enable checking before scanning each
* block.
*/
-int zfs_scan_strict_mem_lim = B_FALSE;
+static int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
-unsigned long zfs_scan_vdev_limit = 4 << 20;
+static uint64_t zfs_scan_vdev_limit = 16 << 20;
+
+static uint_t zfs_scan_issue_strategy = 0;
-int zfs_scan_issue_strategy = 0;
-int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
-unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
+/* don't queue & sort zios, go direct */
+static int zfs_scan_legacy = B_FALSE;
+static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
-int zfs_scan_fill_weight = 3;
+static uint_t zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
-uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
-uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
-int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
-int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
-
-int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
-int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */
-int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
-int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
-int zfs_scan_checkpoint_intval = 7200; /* in seconds */
+static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
+static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
+
+
+/* fraction of physmem */
+static uint_t zfs_scan_mem_lim_fact = 20;
+
+/* fraction of mem lim above */
+static uint_t zfs_scan_mem_lim_soft_fact = 20;
+
+/* minimum milliseconds to scrub per txg */
+static uint_t zfs_scrub_min_time_ms = 1000;
+
+/* minimum milliseconds to obsolete per txg */
+static uint_t zfs_obsolete_min_time_ms = 500;
+
+/* minimum milliseconds to free per txg */
+static uint_t zfs_free_min_time_ms = 1000;
+
+/* minimum milliseconds to resilver per txg */
+static uint_t zfs_resilver_min_time_ms = 3000;
+
+static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
-int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
-int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
-enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
+static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
+static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
+static const ddt_class_t zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
-unsigned long zfs_async_block_max_blocks = ULONG_MAX;
+static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
/* max number of dedup blocks to free in a single TXG */
-unsigned long zfs_max_async_dedup_frees = 100000;
+static uint64_t zfs_max_async_dedup_frees = 100000;
-int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */
+/* set to disable resilver deferring */
+static int zfs_resilver_disable_defer = B_FALSE;
/*
* We wait a few txgs after importing a pool to begin scanning so that
/*
* Enable/disable the processing of the free_bpobj object.
*/
-int zfs_free_bpobj_enabled = 1;
+static int zfs_free_bpobj_enabled = 1;
+
+/* Error blocks to be scrubbed in one txg. */
+static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
/*
* This controls what conditions are placed on dsl_scan_sync_state():
- * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
- * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
- * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
+ * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
+ * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
+ * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
- dva_t sio_dva[0];
+ dva_t sio_dva[];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
+ zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
- zfs_btree_t q_exts_by_size;
+ zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
+ uint64_t q_last_ext_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
- bzero(bp, sizeof (*bp));
+ memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
- bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
+ memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
/*
* Calculate the max number of in-flight bytes for pool-wide
- * scanning operations (minimum 1MB). Limits for the issuing
- * phase are done per top-level vdev and are handled separately.
+ * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
+ * Limits for the issuing phase are done per top-level vdev and
+ * are handled separately.
*/
- scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
- dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
+ scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
+ zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
+ err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_ERRORSCRUB, sizeof (uint64_t),
+ ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys);
+
+ if (err != 0 && err != ENOENT)
+ return (err);
+
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
+
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
return (EOVERFLOW);
}
- bcopy(zaptmp, &scn->scn_phys,
+ memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
- scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
+ scn->scn_issued_before_pass = scn->scn_phys.scn_examined -
+ scn->scn_phys.scn_skipped;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
}
}
- bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
}
spa_scan_stat_init(spa);
+ vdev_scan_stat_init(spa->spa_root_vdev);
+
return (0);
}
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
+boolean_t
+dsl_errorscrubbing(const dsl_pool_t *dp)
+{
+ dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys;
+
+ return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING &&
+ errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB);
+}
+
+boolean_t
+dsl_errorscrub_is_paused(const dsl_scan_t *scn)
+{
+ return (dsl_errorscrubbing(scn->scn_dp) &&
+ scn->errorscrub_phys.dep_paused_flags);
+}
+
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
+static void
+dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
+{
+ scn->errorscrub_phys.dep_cursor =
+ zap_cursor_serialize(&scn->errorscrub_cursor);
+
+ VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS,
+ &scn->errorscrub_phys, tx));
+}
+
+static void
+dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx)
+{
+ dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
+ pool_scan_func_t *funcp = arg;
+ dsl_pool_t *dp = scn->scn_dp;
+ spa_t *spa = dp->dp_spa;
+
+ ASSERT(!dsl_scan_is_running(scn));
+ ASSERT(!dsl_errorscrubbing(scn->scn_dp));
+ ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
+
+ memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
+ scn->errorscrub_phys.dep_func = *funcp;
+ scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING;
+ scn->errorscrub_phys.dep_start_time = gethrestime_sec();
+ scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa);
+ scn->errorscrub_phys.dep_examined = 0;
+ scn->errorscrub_phys.dep_errors = 0;
+ scn->errorscrub_phys.dep_cursor = 0;
+ zap_cursor_init_serialized(&scn->errorscrub_cursor,
+ spa->spa_meta_objset, spa->spa_errlog_last,
+ scn->errorscrub_phys.dep_cursor);
+
+ vdev_config_dirty(spa->spa_root_vdev);
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START);
+
+ dsl_errorscrub_sync_state(scn, tx);
+
+ spa_history_log_internal(spa, "error scrub setup", tx,
+ "func=%u mintxg=%u maxtxg=%llu",
+ *funcp, 0, (u_longlong_t)tx->tx_txg);
+}
+
+static int
+dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx)
+{
+ (void) arg;
+ dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
+
+ if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) {
+ return (SET_ERROR(EBUSY));
+ }
+
+ if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) {
+ return (ECANCELED);
+ }
+ return (0);
+}
+
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
- * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
+ * (scn_queues_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
int i;
spa_t *spa = scn->scn_dp->dp_spa;
- ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
- if (scn->scn_bytes_pending == 0) {
+ ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
+ if (scn->scn_queues_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
- bcopy(&scn->scn_phys, &scn->scn_phys_cached,
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
- if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd))
+ if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) ||
+ dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(EBUSY));
return (0);
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
- bzero(&scn->scn_phys, sizeof (scn->scn_phys));
+ memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
+
+ /*
+ * If we are starting a fresh scrub, we erase the error scrub
+ * information from disk.
+ */
+ memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
+ dsl_errorscrub_sync_state(scn, tx);
+
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
+ vdev_scan_stat_init(spa->spa_root_vdev);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* back to the generic stuff */
- if (dp->dp_blkstats == NULL) {
- dp->dp_blkstats =
- vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
- mutex_init(&dp->dp_blkstats->zab_lock, NULL,
- MUTEX_DEFAULT, NULL);
+ if (zfs_scan_blkstats) {
+ if (dp->dp_blkstats == NULL) {
+ dp->dp_blkstats =
+ vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
+ }
+ memset(&dp->dp_blkstats->zab_type, 0,
+ sizeof (dp->dp_blkstats->zab_type));
+ } else {
+ if (dp->dp_blkstats) {
+ vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
+ dp->dp_blkstats = NULL;
+ }
}
- bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
- bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
}
/*
- * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
- * Can also be called to resume a paused scrub.
+ * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
+ * error scrub or resilver. Can also be called to resume a paused scrub or
+ * error scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
return (0);
}
+ if (func == POOL_SCAN_ERRORSCRUB) {
+ if (dsl_errorscrub_is_paused(dp->dp_scan)) {
+ /*
+ * got error scrub start cmd, resume paused error scrub.
+ */
+ int err = dsl_scrub_set_pause_resume(scn->scn_dp,
+ POOL_SCRUB_NORMAL);
+ if (err == 0) {
+ spa_event_notify(spa, NULL, NULL,
+ ESC_ZFS_ERRORSCRUB_RESUME);
+ return (ECANCELED);
+ }
+ return (SET_ERROR(err));
+ }
+
+ return (dsl_sync_task(spa_name(dp->dp_spa),
+ dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync,
+ &func, 0, ZFS_SPACE_CHECK_RESERVED));
+ }
+
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
-
return (SET_ERROR(err));
}
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
+static void
+dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
+{
+ dsl_pool_t *dp = scn->scn_dp;
+ spa_t *spa = dp->dp_spa;
+
+ if (complete) {
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH);
+ spa_history_log_internal(spa, "error scrub done", tx,
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
+ } else {
+ spa_history_log_internal(spa, "error scrub canceled", tx,
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
+ }
+
+ scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED;
+ spa->spa_scrub_active = B_FALSE;
+ spa_errlog_rotate(spa);
+ scn->errorscrub_phys.dep_end_time = gethrestime_sec();
+ zap_cursor_fini(&scn->errorscrub_cursor);
+
+ if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
+ spa->spa_errata = 0;
+
+ ASSERT(!dsl_errorscrubbing(scn->scn_dp));
+}
+
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
- (u_longlong_t)spa_get_errlog_size(spa));
+ (u_longlong_t)spa_approx_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
ASSERT(!dsl_scan_is_running(scn));
}
+static int
+dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx)
+{
+ pool_scrub_cmd_t *cmd = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+ dsl_scan_t *scn = dp->dp_scan;
+
+ if (*cmd == POOL_SCRUB_PAUSE) {
+ /*
+ * can't pause a error scrub when there is no in-progress
+ * error scrub.
+ */
+ if (!dsl_errorscrubbing(dp))
+ return (SET_ERROR(ENOENT));
+
+ /* can't pause a paused error scrub */
+ if (dsl_errorscrub_is_paused(scn))
+ return (SET_ERROR(EBUSY));
+ } else if (*cmd != POOL_SCRUB_NORMAL) {
+ return (SET_ERROR(ENOTSUP));
+ }
+
+ return (0);
+}
+
+static void
+dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
+{
+ pool_scrub_cmd_t *cmd = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+ spa_t *spa = dp->dp_spa;
+ dsl_scan_t *scn = dp->dp_scan;
+
+ if (*cmd == POOL_SCRUB_PAUSE) {
+ spa->spa_scan_pass_errorscrub_pause = gethrestime_sec();
+ scn->errorscrub_phys.dep_paused_flags = B_TRUE;
+ dsl_errorscrub_sync_state(scn, tx);
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED);
+ } else {
+ ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
+ if (dsl_errorscrub_is_paused(scn)) {
+ /*
+ * We need to keep track of how much time we spend
+ * paused per pass so that we can adjust the error scrub
+ * rate shown in the output of 'zpool status'.
+ */
+ spa->spa_scan_pass_errorscrub_spent_paused +=
+ gethrestime_sec() -
+ spa->spa_scan_pass_errorscrub_pause;
+
+ spa->spa_scan_pass_errorscrub_pause = 0;
+ scn->errorscrub_phys.dep_paused_flags = B_FALSE;
+
+ zap_cursor_init_serialized(
+ &scn->errorscrub_cursor,
+ spa->spa_meta_objset, spa->spa_errlog_last,
+ scn->errorscrub_phys.dep_cursor);
+
+ dsl_errorscrub_sync_state(scn, tx);
+ }
+ }
+}
+
+static int
+dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx)
+{
+ (void) arg;
+ dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
+ /* can't cancel a error scrub when there is no one in-progress */
+ if (!dsl_errorscrubbing(scn->scn_dp))
+ return (SET_ERROR(ENOENT));
+ return (0);
+}
+
+static void
+dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx)
+{
+ (void) arg;
+ dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
+
+ dsl_errorscrub_done(scn, B_FALSE, tx);
+ dsl_errorscrub_sync_state(scn, tx);
+ spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL,
+ ESC_ZFS_ERRORSCRUB_ABORT);
+}
+
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
int
dsl_scan_cancel(dsl_pool_t *dp)
{
+ if (dsl_errorscrubbing(dp)) {
+ return (dsl_sync_task(spa_name(dp->dp_spa),
+ dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync,
+ NULL, 3, ZFS_SPACE_CHECK_RESERVED));
+ }
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
+ if (dsl_errorscrubbing(dp)) {
+ return (dsl_sync_task(spa_name(dp->dp_spa),
+ dsl_errorscrub_pause_resume_check,
+ dsl_errorscrub_pause_resume_sync, &cmd, 3,
+ ZFS_SPACE_CHECK_RESERVED));
+ }
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
- ASSERT0(scn->scn_bytes_pending);
+ ASSERT0(scn->scn_queues_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
- /* # extents in exts_by_size = # in exts_by_addr */
+ /*
+ * # of extents in exts_by_addr = # in exts_by_size.
+ * B-tree efficiency is ~75%, but can be as low as 50%.
+ */
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
- sizeof (range_seg_gap_t) + queue->q_sio_memused;
+ ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
+ 3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
- ASSERT0(scn->scn_bytes_pending);
+ ASSERT0(scn->scn_queues_pending);
/*
* If we are above our hard limit, we need to clear out memory.
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
- int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
+ uint64_t dirty_min_bytes = zfs_dirty_data_max *
+ zfs_vdev_async_write_active_min_dirty_percent / 100;
+ uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
- (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
+ (scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
return (B_FALSE);
}
+static boolean_t
+dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
+{
+ /*
+ * We suspend if:
+ * - we have scrubbed for at least the minimum time (default 1 sec
+ * for error scrub), someone is explicitly waiting for this txg
+ * to complete, or we have used up all of the time in the txg
+ * timeout (default 5 sec).
+ * or
+ * - the spa is shutting down because this pool is being exported
+ * or the machine is rebooting.
+ */
+ uint64_t curr_time_ns = gethrtime();
+ uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time;
+ uint64_t sync_time_ns = curr_time_ns -
+ scn->scn_dp->dp_spa->spa_sync_starttime;
+ int mintime = zfs_scrub_min_time_ms;
+
+ if ((NSEC2MSEC(error_scrub_time_ns) > mintime &&
+ (txg_sync_waiting(scn->scn_dp) ||
+ NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
+ spa_shutting_down(scn->scn_dp->dp_spa)) {
+ if (zb) {
+ dprintf("error scrub suspending at bookmark "
+ "%llx/%llx/%llx/%llx\n",
+ (longlong_t)zb->zb_objset,
+ (longlong_t)zb->zb_object,
+ (longlong_t)zb->zb_level,
+ (longlong_t)zb->zb_blkid);
+ }
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
+ ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
}
static void
-scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
+scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
}
static scan_prefetch_ctx_t *
-scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
+scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
{
scan_prefetch_ctx_t *spc;
}
static void
-scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
+scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
+ if (OBJSET_BUF_HAS_PROJECTUSED(buf)) {
+ dsl_scan_prefetch_dnode(scn,
+ &osp->os_projectused_dnode, zb->zb_objset,
+ DMU_PROJECTUSED_OBJECT);
+ }
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
zio_flags |= ZIO_FLAG_RAW;
}
+ /* We don't need data L1 buffer since we do not prefetch L0. */
+ blkptr_t *bp = &spic->spic_bp;
+ if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
+ BP_GET_TYPE(bp) != DMU_OT_OBJSET)
+ flags |= ARC_FLAG_NO_BUF;
+
/* issue the prefetch asynchronously */
- (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
- &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
- ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
+ (void) arc_read(scn->scn_zio_root, spa, bp,
+ dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB,
+ zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
/*
* If we found the block we're trying to resume from, or
- * we went past it to a different object, zero it out to
- * indicate that it's OK to start checking for suspending
- * again.
+ * we went past it, zero it out to indicate that it's OK
+ * to start checking for suspending again.
*/
- if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
- zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
+ if (zbookmark_subtree_tbd(dnp, zb,
+ &scn->scn_phys.scn_bookmark)) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
- bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
+ memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
}
-static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
+static void dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
+ spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
+ /*
+ * There is an unlikely case of encountering dnodes with contradicting
+ * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
+ * or modified before commit 4254acb was merged. As it is not possible
+ * to know which of the two is correct, report an error.
+ */
+ if (dnp != NULL &&
+ dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
+ scn->scn_phys.scn_errors++;
+ spa_log_error(spa, zb, &bp->blk_birth);
+ return (SET_ERROR(EINVAL));
+ }
+
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
- err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
+ err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
zio_flags |= ZIO_FLAG_RAW;
}
- err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
+ err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
objset_phys_t *osp;
arc_buf_t *buf;
- err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
+ err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
+ } else if (!zfs_blkptr_verify(spa, bp,
+ BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
+ /*
+ * Sanity check the block pointer contents, this is handled
+ * by arc_read() for the cases above.
+ */
+ scn->scn_phys.scn_errors++;
+ spa_log_error(spa, zb, &bp->blk_birth);
+ return (SET_ERROR(EINVAL));
}
return (0);
* first 5; we want them to be useful.
*/
static void
-dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
+dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
- blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
scn->scn_visited_this_txg++;
- /*
- * This debugging is commented out to conserve stack space. This
- * function is called recursively and the debugging adds several
- * bytes to the stack for each call. It can be commented back in
- * if required to debug an issue in dsl_scan_visitbp().
- *
- * dprintf_bp(bp,
- * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
- * ds, ds ? ds->ds_object : 0,
- * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
- * bp);
- */
-
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
return;
}
+ /*
+ * Check if this block contradicts any filesystem flags.
+ */
+ spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
+ if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
+ f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
+ if (f != SPA_FEATURE_NONE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
+ f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
+ if (f != SPA_FEATURE_NONE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
- bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
- *bp_toread = *bp;
-
- if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
- goto out;
+ if (dsl_scan_recurse(scn, ds, ostype, dnp, bp, zb, tx) != 0)
+ return;
/*
* If dsl_scan_ddt() has already visited this block, it will have
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
- goto out;
+ return;
}
/*
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
- goto out;
+ return;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
-
-out:
- kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
- * We leverage the fact that the dde's replication class (enum ddt_class)
+ * We leverage the fact that the dde's replication class (ddt_class_t)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
- ddt_entry_t dde;
+ ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
- bzero(&dde, sizeof (ddt_entry_t));
-
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
- spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
- bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
+ memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
}
static uint64_t
-dsl_scan_count_data_disks(vdev_t *rvd)
+dsl_scan_count_data_disks(spa_t *spa)
{
+ vdev_t *rvd = spa->spa_root_vdev;
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
- int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
+ uint64_t dirty_min_bytes = zfs_dirty_data_max *
+ zfs_vdev_async_write_active_min_dirty_percent / 100;
+ uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
- (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
+ (scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
- int64_t bytes_issued = 0;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
}
sio2bp(sio, &bp);
- bytes_issued += SIO_GET_ASIZE(sio);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
-
- atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
-
return (suspended);
}
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
+ if (avl_is_empty(&queue->q_sios_by_addr))
+ atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
-
+ queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE);
} else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
+ queue->q_last_ext_addr = -1;
return (B_FALSE);
}
}
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
- /* handle tunable overrides */
- if (scn->scn_checkpointing || scn->scn_clearing) {
- if (zfs_scan_issue_strategy == 1) {
- return (range_tree_first(rt));
- } else if (zfs_scan_issue_strategy == 2) {
- /*
- * We need to get the original entry in the by_addr
- * tree so we can modify it.
- */
- range_seg_t *size_rs =
- zfs_btree_first(&queue->q_exts_by_size, NULL);
- if (size_rs == NULL)
- return (NULL);
- uint64_t start = rs_get_start(size_rs, rt);
- uint64_t size = rs_get_end(size_rs, rt) - start;
- range_seg_t *addr_rs = range_tree_find(rt, start,
- size);
- ASSERT3P(addr_rs, !=, NULL);
- ASSERT3U(rs_get_start(size_rs, rt), ==,
- rs_get_start(addr_rs, rt));
- ASSERT3U(rs_get_end(size_rs, rt), ==,
- rs_get_end(addr_rs, rt));
- return (addr_rs);
- }
- }
+ if (!scn->scn_checkpointing && !scn->scn_clearing)
+ return (NULL);
/*
* During normal clearing, we want to issue our largest segments
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
- if (scn->scn_checkpointing) {
+ if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
+ zfs_scan_issue_strategy == 1)
return (range_tree_first(rt));
- } else if (scn->scn_clearing) {
- /*
- * We need to get the original entry in the by_addr
- * tree so we can modify it.
- */
- range_seg_t *size_rs = zfs_btree_first(&queue->q_exts_by_size,
- NULL);
- if (size_rs == NULL)
- return (NULL);
- uint64_t start = rs_get_start(size_rs, rt);
- uint64_t size = rs_get_end(size_rs, rt) - start;
- range_seg_t *addr_rs = range_tree_find(rt, start, size);
- ASSERT3P(addr_rs, !=, NULL);
- ASSERT3U(rs_get_start(size_rs, rt), ==, rs_get_start(addr_rs,
- rt));
- ASSERT3U(rs_get_end(size_rs, rt), ==, rs_get_end(addr_rs, rt));
- return (addr_rs);
- } else {
- return (NULL);
+
+ /*
+ * Try to continue previous extent if it is not completed yet. After
+ * shrink in scan_io_queue_gather() it may no longer be the best, but
+ * otherwise we leave shorter remnant every txg.
+ */
+ uint64_t start;
+ uint64_t size = 1ULL << rt->rt_shift;
+ range_seg_t *addr_rs;
+ if (queue->q_last_ext_addr != -1) {
+ start = queue->q_last_ext_addr;
+ addr_rs = range_tree_find(rt, start, size);
+ if (addr_rs != NULL)
+ return (addr_rs);
}
+
+ /*
+ * Nothing to continue, so find new best extent.
+ */
+ uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
+ if (v == NULL)
+ return (NULL);
+ queue->q_last_ext_addr = start = *v << rt->rt_shift;
+
+ /*
+ * We need to get the original entry in the by_addr tree so we can
+ * modify it.
+ */
+ addr_rs = range_tree_find(rt, start, size);
+ ASSERT3P(addr_rs, !=, NULL);
+ ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
+ ASSERT3U(rs_get_end(addr_rs, rt), >, start);
+ return (addr_rs);
}
static void
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
- range_seg_t *rs = NULL;
- scan_io_t *sio = NULL;
+ range_seg_t *rs;
+ scan_io_t *sio;
+ zio_t *zio;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
+ zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
+ NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
mutex_enter(q_lock);
+ queue->q_zio = zio;
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
- boolean_t more_left = B_TRUE;
+ boolean_t more_left;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
- while (more_left) {
+ do {
scan_io_t *first_sio, *last_sio;
/*
if (suspended)
break;
- }
+ } while (more_left);
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
- while ((sio = list_head(&sio_list)) != NULL) {
- list_remove(&sio_list, sio);
+ while ((sio = list_remove_head(&sio_list)) != NULL)
scan_io_queue_insert_impl(queue, sio);
- }
+ queue->q_zio = NULL;
mutex_exit(q_lock);
+ zio_nowait(zio);
list_destroy(&sio_list);
}
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
- if (scn->scn_bytes_pending == 0)
+ if (scn->scn_queues_pending == 0)
return;
if (scn->scn_taskq == NULL) {
return ((used != 0) || (clones_left));
}
+boolean_t
+dsl_errorscrub_active(dsl_scan_t *scn)
+{
+ spa_t *spa = scn->scn_dp->dp_spa;
+ if (spa->spa_load_state != SPA_LOAD_NONE)
+ return (B_FALSE);
+ if (spa_shutting_down(spa))
+ return (B_FALSE);
+ if (dsl_errorscrubbing(scn->scn_dp))
+ return (B_TRUE);
+ return (B_FALSE);
+}
+
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
scn->scn_dedup_frees_this_txg = 0;
/*
- * Write out changes to the DDT that may be required as a
- * result of the blocks freed. This ensures that the DDT
- * is clean when a scrub/resilver runs.
+ * Write out changes to the DDT and the BRT that may be required
+ * as a result of the blocks freed. This ensures that the DDT
+ * and the BRT are clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
+ brt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
return (0);
}
+static void
+name_to_bookmark(char *buf, zbookmark_phys_t *zb)
+{
+ zb->zb_objset = zfs_strtonum(buf, &buf);
+ ASSERT(*buf == ':');
+ zb->zb_object = zfs_strtonum(buf + 1, &buf);
+ ASSERT(*buf == ':');
+ zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
+ ASSERT(*buf == ':');
+ zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
+ ASSERT(*buf == '\0');
+}
+
+static void
+name_to_object(char *buf, uint64_t *obj)
+{
+ *obj = zfs_strtonum(buf, &buf);
+ ASSERT(*buf == '\0');
+}
+
+static void
+read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb)
+{
+ dsl_pool_t *dp = scn->scn_dp;
+ dsl_dataset_t *ds;
+ objset_t *os;
+ if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0)
+ return;
+
+ if (dmu_objset_from_ds(ds, &os) != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ return;
+ }
+
+ /*
+ * If the key is not loaded dbuf_dnode_findbp() will error out with
+ * EACCES. However in that case dnode_hold() will eventually call
+ * dbuf_read()->zio_wait() which may call spa_log_error(). This will
+ * lead to a deadlock due to us holding the mutex spa_errlist_lock.
+ * Avoid this by checking here if the keys are loaded, if not return.
+ * If the keys are not loaded the head_errlog feature is meaningless
+ * as we cannot figure out the birth txg of the block pointer.
+ */
+ if (dsl_dataset_get_keystatus(ds->ds_dir) ==
+ ZFS_KEYSTATUS_UNAVAILABLE) {
+ dsl_dataset_rele(ds, FTAG);
+ return;
+ }
+
+ dnode_t *dn;
+ blkptr_t bp;
+
+ if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ return;
+ }
+
+ rw_enter(&dn->dn_struct_rwlock, RW_READER);
+ int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL,
+ NULL);
+
+ if (error) {
+ rw_exit(&dn->dn_struct_rwlock);
+ dnode_rele(dn, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ return;
+ }
+
+ if (!error && BP_IS_HOLE(&bp)) {
+ rw_exit(&dn->dn_struct_rwlock);
+ dnode_rele(dn, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ return;
+ }
+
+ int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW |
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB;
+
+ /* If it's an intent log block, failure is expected. */
+ if (zb.zb_level == ZB_ZIL_LEVEL)
+ zio_flags |= ZIO_FLAG_SPECULATIVE;
+
+ ASSERT(!BP_IS_EMBEDDED(&bp));
+ scan_exec_io(dp, &bp, zio_flags, &zb, NULL);
+ rw_exit(&dn->dn_struct_rwlock);
+ dnode_rele(dn, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+}
+
+/*
+ * We keep track of the scrubbed error blocks in "count". This will be used
+ * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
+ * function is modelled after check_filesystem().
+ */
+static int
+scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep,
+ int *count)
+{
+ dsl_dataset_t *ds;
+ dsl_pool_t *dp = spa->spa_dsl_pool;
+ dsl_scan_t *scn = dp->dp_scan;
+
+ int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds);
+ if (error != 0)
+ return (error);
+
+ uint64_t latest_txg;
+ uint64_t txg_to_consider = spa->spa_syncing_txg;
+ boolean_t check_snapshot = B_TRUE;
+
+ error = find_birth_txg(ds, zep, &latest_txg);
+
+ /*
+ * If find_birth_txg() errors out, then err on the side of caution and
+ * proceed. In worst case scenario scrub all objects. If zep->zb_birth
+ * is 0 (e.g. in case of encryption with unloaded keys) also proceed to
+ * scrub all objects.
+ */
+ if (error == 0 && zep->zb_birth == latest_txg) {
+ /* Block neither free nor re written. */
+ zbookmark_phys_t zb;
+ zep_to_zb(fs, zep, &zb);
+ scn->scn_zio_root = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL);
+ /* We have already acquired the config lock for spa */
+ read_by_block_level(scn, zb);
+
+ (void) zio_wait(scn->scn_zio_root);
+ scn->scn_zio_root = NULL;
+
+ scn->errorscrub_phys.dep_examined++;
+ scn->errorscrub_phys.dep_to_examine--;
+ (*count)++;
+ if ((*count) == zfs_scrub_error_blocks_per_txg ||
+ dsl_error_scrub_check_suspend(scn, &zb)) {
+ dsl_dataset_rele(ds, FTAG);
+ return (SET_ERROR(EFAULT));
+ }
+
+ check_snapshot = B_FALSE;
+ } else if (error == 0) {
+ txg_to_consider = latest_txg;
+ }
+
+ /*
+ * Retrieve the number of snapshots if the dataset is not a snapshot.
+ */
+ uint64_t snap_count = 0;
+ if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
+
+ error = zap_count(spa->spa_meta_objset,
+ dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
+
+ if (error != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ return (error);
+ }
+ }
+
+ if (snap_count == 0) {
+ /* Filesystem without snapshots. */
+ dsl_dataset_rele(ds, FTAG);
+ return (0);
+ }
+
+ uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
+ uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
+
+ dsl_dataset_rele(ds, FTAG);
+
+ /* Check only snapshots created from this file system. */
+ while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
+ snap_obj_txg <= txg_to_consider) {
+
+ error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
+ if (error != 0)
+ return (error);
+
+ if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) {
+ snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
+ snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
+ dsl_dataset_rele(ds, FTAG);
+ continue;
+ }
+
+ boolean_t affected = B_TRUE;
+ if (check_snapshot) {
+ uint64_t blk_txg;
+ error = find_birth_txg(ds, zep, &blk_txg);
+
+ /*
+ * Scrub the snapshot also when zb_birth == 0 or when
+ * find_birth_txg() returns an error.
+ */
+ affected = (error == 0 && zep->zb_birth == blk_txg) ||
+ (error != 0) || (zep->zb_birth == 0);
+ }
+
+ /* Scrub snapshots. */
+ if (affected) {
+ zbookmark_phys_t zb;
+ zep_to_zb(snap_obj, zep, &zb);
+ scn->scn_zio_root = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL);
+ /* We have already acquired the config lock for spa */
+ read_by_block_level(scn, zb);
+
+ (void) zio_wait(scn->scn_zio_root);
+ scn->scn_zio_root = NULL;
+
+ scn->errorscrub_phys.dep_examined++;
+ scn->errorscrub_phys.dep_to_examine--;
+ (*count)++;
+ if ((*count) == zfs_scrub_error_blocks_per_txg ||
+ dsl_error_scrub_check_suspend(scn, &zb)) {
+ dsl_dataset_rele(ds, FTAG);
+ return (EFAULT);
+ }
+ }
+ snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
+ snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
+ dsl_dataset_rele(ds, FTAG);
+ }
+ return (0);
+}
+
+void
+dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
+{
+ spa_t *spa = dp->dp_spa;
+ dsl_scan_t *scn = dp->dp_scan;
+
+ /*
+ * Only process scans in sync pass 1.
+ */
+
+ if (spa_sync_pass(spa) > 1)
+ return;
+
+ /*
+ * If the spa is shutting down, then stop scanning. This will
+ * ensure that the scan does not dirty any new data during the
+ * shutdown phase.
+ */
+ if (spa_shutting_down(spa))
+ return;
+
+ if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) {
+ return;
+ }
+
+ if (dsl_scan_resilvering(scn->scn_dp)) {
+ /* cancel the error scrub if resilver started */
+ dsl_scan_cancel(scn->scn_dp);
+ return;
+ }
+
+ spa->spa_scrub_active = B_TRUE;
+ scn->scn_sync_start_time = gethrtime();
+
+ /*
+ * zfs_scan_suspend_progress can be set to disable scrub progress.
+ * See more detailed comment in dsl_scan_sync().
+ */
+ if (zfs_scan_suspend_progress) {
+ uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
+ int mintime = zfs_scrub_min_time_ms;
+
+ while (zfs_scan_suspend_progress &&
+ !txg_sync_waiting(scn->scn_dp) &&
+ !spa_shutting_down(scn->scn_dp->dp_spa) &&
+ NSEC2MSEC(scan_time_ns) < mintime) {
+ delay(hz);
+ scan_time_ns = gethrtime() - scn->scn_sync_start_time;
+ }
+ return;
+ }
+
+ int i = 0;
+ zap_attribute_t *za;
+ zbookmark_phys_t *zb;
+ boolean_t limit_exceeded = B_FALSE;
+
+ za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
+ zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP);
+
+ if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
+ for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
+ zap_cursor_advance(&scn->errorscrub_cursor)) {
+ name_to_bookmark(za->za_name, zb);
+
+ scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
+ NULL, ZIO_FLAG_CANFAIL);
+ dsl_pool_config_enter(dp, FTAG);
+ read_by_block_level(scn, *zb);
+ dsl_pool_config_exit(dp, FTAG);
+
+ (void) zio_wait(scn->scn_zio_root);
+ scn->scn_zio_root = NULL;
+
+ scn->errorscrub_phys.dep_examined += 1;
+ scn->errorscrub_phys.dep_to_examine -= 1;
+ i++;
+ if (i == zfs_scrub_error_blocks_per_txg ||
+ dsl_error_scrub_check_suspend(scn, zb)) {
+ limit_exceeded = B_TRUE;
+ break;
+ }
+ }
+
+ if (!limit_exceeded)
+ dsl_errorscrub_done(scn, B_TRUE, tx);
+
+ dsl_errorscrub_sync_state(scn, tx);
+ kmem_free(za, sizeof (*za));
+ kmem_free(zb, sizeof (*zb));
+ return;
+ }
+
+ int error = 0;
+ for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
+ zap_cursor_advance(&scn->errorscrub_cursor)) {
+
+ zap_cursor_t *head_ds_cursor;
+ zap_attribute_t *head_ds_attr;
+ zbookmark_err_phys_t head_ds_block;
+
+ head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
+ head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
+
+ uint64_t head_ds_err_obj = za->za_first_integer;
+ uint64_t head_ds;
+ name_to_object(za->za_name, &head_ds);
+ boolean_t config_held = B_FALSE;
+ uint64_t top_affected_fs;
+
+ for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
+ head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
+ head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
+
+ name_to_errphys(head_ds_attr->za_name, &head_ds_block);
+
+ /*
+ * In case we are called from spa_sync the pool
+ * config is already held.
+ */
+ if (!dsl_pool_config_held(dp)) {
+ dsl_pool_config_enter(dp, FTAG);
+ config_held = B_TRUE;
+ }
+
+ error = find_top_affected_fs(spa,
+ head_ds, &head_ds_block, &top_affected_fs);
+ if (error)
+ break;
+
+ error = scrub_filesystem(spa, top_affected_fs,
+ &head_ds_block, &i);
+
+ if (error == SET_ERROR(EFAULT)) {
+ limit_exceeded = B_TRUE;
+ break;
+ }
+ }
+
+ zap_cursor_fini(head_ds_cursor);
+ kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
+ kmem_free(head_ds_attr, sizeof (*head_ds_attr));
+
+ if (config_held)
+ dsl_pool_config_exit(dp, FTAG);
+ }
+
+ kmem_free(za, sizeof (*za));
+ kmem_free(zb, sizeof (*zb));
+ if (!limit_exceeded)
+ dsl_errorscrub_done(scn, B_TRUE, tx);
+
+ dsl_errorscrub_sync_state(scn, tx);
+}
+
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
- zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
+ uint_t mintime = (scn->scn_phys.scn_func ==
+ POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
+ zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
return;
}
+ /*
+ * Disabled by default, set zfs_scan_report_txgs to report
+ * average performance over the last zfs_scan_report_txgs TXGs.
+ */
+ if (zfs_scan_report_txgs != 0 &&
+ tx->tx_txg % zfs_scan_report_txgs == 0) {
+ scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
+ spa_scan_stat_init(spa);
+ }
+
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
taskqid_t prefetch_tqid;
/*
- * Recalculate the max number of in-flight bytes for pool-wide
- * scanning operations (minimum 1MB). Limits for the issuing
- * phase are done per top-level vdev and are handled separately.
+ * Calculate the max number of in-flight bytes for pool-wide
+ * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
+ * Limits for the issuing phase are done per top-level vdev and
+ * are handled separately.
*/
- scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
- dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
+ scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
+ zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
+ scn->scn_issued_before_pass +=
+ spa->spa_scan_pass_issued;
+ spa_scan_stat_init(spa);
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
(longlong_t)tx->tx_txg);
}
- } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
+ } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
spa->spa_name);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
- ASSERT0(scn->scn_bytes_pending);
+ ASSERT0(scn->scn_queues_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
}
static void
-count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
+count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
- int i;
-
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
- if (scn->scn_is_sorted) {
- atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
- DVA_GET_ASIZE(&bp->blk_dva[0]));
- } else {
- spa_t *spa = scn->scn_dp->dp_spa;
+ atomic_add_64(&spa->spa_scan_pass_issued,
+ all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
+}
- for (i = 0; i < BP_GET_NDVAS(bp); i++) {
- atomic_add_64(&spa->spa_scan_pass_issued,
- DVA_GET_ASIZE(&bp->blk_dva[i]));
- }
- }
+static void
+count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all)
+{
+ if (BP_IS_EMBEDDED(bp))
+ return;
+ atomic_add_64(&scn->scn_phys.scn_skipped,
+ all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
+}
+static void
+count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
+{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
if (zab == NULL)
return;
- mutex_enter(&zab->zab_lock);
-
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
break;
}
}
-
- mutex_exit(&zab->zab_lock);
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
- int64_t asize = SIO_GET_ASIZE(sio);
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
+ if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
+ atomic_add_64(&scn->scn_queues_pending, 1);
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
- atomic_add_64(&scn->scn_bytes_pending, -asize);
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
- range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize);
+ range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
+ SIO_GET_ASIZE(sio));
}
/*
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
- dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
- /*
- * Increment the bytes pending counter now so that we can't
- * get an integer underflow in case the worker processes the
- * zio before we get to incrementing this counter.
- */
- atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio));
-
+ queue->q_last_ext_addr = -1;
scan_io_queue_insert_impl(queue, sio);
}
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
-
+ count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
- count_block(scn, dp->dp_blkstats, bp);
+ count_block_skipped(scn, bp, B_TRUE);
return (0);
}
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
- scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
- spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
+ uint64_t asize = DVA_GET_ASIZE(dva);
+ scn->scn_phys.scn_examined += asize;
+ spa->spa_scan_pass_exam += asize;
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
- count_block(scn, dp->dp_blkstats, bp);
+ count_block_skipped(scn, bp, B_TRUE);
}
/* do not relocate this block */
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
- atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
+ if (dsl_errorscrubbing(spa->spa_dsl_pool) &&
+ !dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) {
+ atomic_inc_64(&spa->spa_dsl_pool->dp_scan
+ ->errorscrub_phys.dep_errors);
+ } else {
+ atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys
+ .scn_errors);
+ }
}
}
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
+ zio_t *pio;
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
+ pio = scn->scn_zio_root;
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
+ pio = queue->q_zio;
mutex_exit(q_lock);
}
- count_block(scn, dp->dp_blkstats, bp);
- zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size,
- dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
+ ASSERT(pio != NULL);
+ count_block_issued(spa, bp, queue == NULL);
+ zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
+ queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
+ *
+ * Since we do not care if one extent is only few percent better than another,
+ * compress the score into 6 bits via binary logarithm AKA highbit64() and
+ * put into otherwise unused due to ashift high bits of offset. This allows
+ * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
+ * with single operation. Plus it makes scrubs more sequential and reduces
+ * chances that minor extent change move it within the B-tree.
*/
+__attribute__((always_inline)) inline
static int
ext_size_compare(const void *x, const void *y)
{
- const range_seg_gap_t *rsa = x, *rsb = y;
+ const uint64_t *a = x, *b = y;
- uint64_t sa = rsa->rs_end - rsa->rs_start;
- uint64_t sb = rsb->rs_end - rsb->rs_start;
- uint64_t score_a, score_b;
+ return (TREE_CMP(*a, *b));
+}
- score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
- fill_weight * rsa->rs_fill) >> 7);
- score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
- fill_weight * rsb->rs_fill) >> 7);
+ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
+ ext_size_compare)
- if (score_a > score_b)
- return (-1);
- if (score_a == score_b) {
- if (rsa->rs_start < rsb->rs_start)
- return (-1);
- if (rsa->rs_start == rsb->rs_start)
- return (0);
- return (1);
- }
- return (1);
+static void
+ext_size_create(range_tree_t *rt, void *arg)
+{
+ (void) rt;
+ zfs_btree_t *size_tree = arg;
+
+ zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf,
+ sizeof (uint64_t));
}
+static void
+ext_size_destroy(range_tree_t *rt, void *arg)
+{
+ (void) rt;
+ zfs_btree_t *size_tree = arg;
+ ASSERT0(zfs_btree_numnodes(size_tree));
+
+ zfs_btree_destroy(size_tree);
+}
+
+static uint64_t
+ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
+{
+ (void) rt;
+ uint64_t size = rsg->rs_end - rsg->rs_start;
+ uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
+ fill_weight * rsg->rs_fill) >> 7);
+ ASSERT3U(rt->rt_shift, >=, 8);
+ return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
+}
+
+static void
+ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
+{
+ zfs_btree_t *size_tree = arg;
+ ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
+ uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
+ zfs_btree_add(size_tree, &v);
+}
+
+static void
+ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
+{
+ zfs_btree_t *size_tree = arg;
+ ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
+ uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
+ zfs_btree_remove(size_tree, &v);
+}
+
+static void
+ext_size_vacate(range_tree_t *rt, void *arg)
+{
+ zfs_btree_t *size_tree = arg;
+ zfs_btree_clear(size_tree);
+ zfs_btree_destroy(size_tree);
+
+ ext_size_create(rt, arg);
+}
+
+static const range_tree_ops_t ext_size_ops = {
+ .rtop_create = ext_size_create,
+ .rtop_destroy = ext_size_destroy,
+ .rtop_add = ext_size_add,
+ .rtop_remove = ext_size_remove,
+ .rtop_vacate = ext_size_vacate
+};
+
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
+ q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
- q->q_exts_by_addr = range_tree_create_impl(&rt_btree_ops, RANGE_SEG_GAP,
- &q->q_exts_by_size, 0, 0, ext_size_compare, zfs_scan_max_ext_gap);
+ q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
+ &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
- int64_t bytes_dequeued = 0;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
+ if (!avl_is_empty(&queue->q_sios_by_addr))
+ atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
- bytes_dequeued += SIO_GET_ASIZE(sio);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
- atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
sio_free(srch_sio);
if (sio != NULL) {
- int64_t asize = SIO_GET_ASIZE(sio);
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
- ASSERT3U(size, ==, asize);
+ ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
avl_remove(&queue->q_sios_by_addr, sio);
+ if (avl_is_empty(&queue->q_sios_by_addr))
+ atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
- /*
- * We only update scn_bytes_pending in the cold path,
- * otherwise it will already have been accounted for as
- * part of the zio's execution.
- */
- atomic_add_64(&scn->scn_bytes_pending, -asize);
-
- /* count the block as though we issued it */
+ /* count the block as though we skipped it */
sio2bp(sio, &tmpbp);
- count_block(scn, dp->dp_blkstats, &tmpbp);
+ count_block_skipped(scn, &tmpbp, B_FALSE);
sio_free(sio);
}
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
-/* BEGIN CSTYLED */
-ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
-ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to scrub per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to obsolete per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to free per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
-ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
"Max number of blocks freed in one txg");
-ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
+ "Enable block statistics calculation during scrub");
+
+ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
- "IO issuing strategy during scrubbing. "
- "0 = default, 1 = LBA, 2 = size");
+ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
+ "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
+ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
+ "Tunable to report resilver performance over the last N txgs");
+
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
+
+ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
+ "Error blocks to be scrubbed in one txg");
/* END CSTYLED */