#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
+#include <sys/arc_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
+#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
-static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
+static uint64_t dsl_scan_count_data_disks(spa_t *spa);
-extern int zfs_vdev_async_write_active_min_dirty_percent;
+extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
+/*
+ * 'zpool status' uses bytes processed per pass to report throughput and
+ * estimate time remaining. We define a pass to start when the scanning
+ * phase completes for a sequential resilver. Optionally, this value
+ * may be used to reset the pass statistics every N txgs to provide an
+ * estimated completion time based on currently observed performance.
+ */
+static uint_t zfs_scan_report_txgs = 0;
+
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
-static unsigned long zfs_scan_vdev_limit = 4 << 20;
+static uint64_t zfs_scan_vdev_limit = 16 << 20;
-static int zfs_scan_issue_strategy = 0;
-static int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
-static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
+static uint_t zfs_scan_issue_strategy = 0;
+
+/* don't queue & sort zios, go direct */
+static int zfs_scan_legacy = B_FALSE;
+static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
-static int zfs_scan_fill_weight = 3;
+static uint_t zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
-static int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
-static int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
-
-static int zfs_scrub_min_time_ms = 1000; /* min millis to scrub per txg */
-static int zfs_obsolete_min_time_ms = 500; /* min millis to obsolete per txg */
-static int zfs_free_min_time_ms = 1000; /* min millis to free per txg */
-static int zfs_resilver_min_time_ms = 3000; /* min millis to resilver per txg */
-static int zfs_scan_checkpoint_intval = 7200; /* in seconds */
+
+
+/* fraction of physmem */
+static uint_t zfs_scan_mem_lim_fact = 20;
+
+/* fraction of mem lim above */
+static uint_t zfs_scan_mem_lim_soft_fact = 20;
+
+/* minimum milliseconds to scrub per txg */
+static uint_t zfs_scrub_min_time_ms = 1000;
+
+/* minimum milliseconds to obsolete per txg */
+static uint_t zfs_obsolete_min_time_ms = 500;
+
+/* minimum milliseconds to free per txg */
+static uint_t zfs_free_min_time_ms = 1000;
+
+/* minimum milliseconds to resilver per txg */
+static uint_t zfs_resilver_min_time_ms = 3000;
+
+static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
-static unsigned long zfs_async_block_max_blocks = ULONG_MAX;
+static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
/* max number of dedup blocks to free in a single TXG */
-static unsigned long zfs_max_async_dedup_frees = 100000;
+static uint64_t zfs_max_async_dedup_frees = 100000;
/* set to disable resilver deferring */
static int zfs_resilver_disable_defer = B_FALSE;
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
- dva_t sio_dva[0];
+ dva_t sio_dva[];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
/*
* Calculate the max number of in-flight bytes for pool-wide
- * scanning operations (minimum 1MB). Limits for the issuing
- * phase are done per top-level vdev and are handled separately.
+ * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
+ * Limits for the issuing phase are done per top-level vdev and
+ * are handled separately.
*/
- scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
- dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
+ scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
+ zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
}
spa_scan_stat_init(spa);
+ vdev_scan_stat_init(spa->spa_root_vdev);
+
return (0);
}
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
+ vdev_scan_stat_init(spa->spa_root_vdev);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
- "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
+ "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
- (u_longlong_t)spa_get_errlog_size(spa));
+ (u_longlong_t)spa_approx_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
+ uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
+ ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
- spa_log_error(spa, zb);
+ spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
- } else if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) {
+ } else if (!zfs_blkptr_verify(spa, bp,
+ BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
- spa_log_error(spa, zb);
+ spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
return;
}
+ /*
+ * Check if this block contradicts any filesystem flags.
+ */
+ spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
+ if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
+ f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
+ if (f != SPA_FEATURE_NONE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
+ f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
+ if (f != SPA_FEATURE_NONE)
+ ASSERT(dsl_dataset_feature_is_active(ds, f));
+
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
static uint64_t
-dsl_scan_count_data_disks(vdev_t *rvd)
+dsl_scan_count_data_disks(spa_t *spa)
{
+ vdev_t *rvd = spa->spa_root_vdev;
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
+ uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
scn->scn_dedup_frees_this_txg = 0;
/*
- * Write out changes to the DDT that may be required as a
- * result of the blocks freed. This ensures that the DDT
- * is clean when a scrub/resilver runs.
+ * Write out changes to the DDT and the BRT that may be required
+ * as a result of the blocks freed. This ensures that the DDT
+ * and the BRT are clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
+ brt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
- int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
- zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
+ uint_t mintime = (scn->scn_phys.scn_func ==
+ POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
+ zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
return;
}
+ /*
+ * Disabled by default, set zfs_scan_report_txgs to report
+ * average performance over the last zfs_scan_report_txgs TXGs.
+ */
+ if (!dsl_scan_is_paused_scrub(scn) && zfs_scan_report_txgs != 0 &&
+ tx->tx_txg % zfs_scan_report_txgs == 0) {
+ scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
+ spa_scan_stat_init(spa);
+ }
+
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
taskqid_t prefetch_tqid;
/*
- * Recalculate the max number of in-flight bytes for pool-wide
- * scanning operations (minimum 1MB). Limits for the issuing
- * phase are done per top-level vdev and are handled separately.
+ * Calculate the max number of in-flight bytes for pool-wide
+ * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
+ * Limits for the issuing phase are done per top-level vdev and
+ * are handled separately.
*/
- scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
- dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
+ scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
+ zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
+ scn->scn_issued_before_pass +=
+ spa->spa_scan_pass_issued;
+ spa_scan_stat_init(spa);
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
-ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
-ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to scrub per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to obsolete per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to free per txg");
-ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
-ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
"Max number of blocks freed in one txg");
-ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
-ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
+ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
+ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
+ "Tunable to report resilver performance over the last N txgs");
+
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");