NULL
};
-/* maximum scrub/resilver I/O queue per leaf vdev */
-int zfs_scrub_limit = 10;
-
/*
* Given a vdev type, return the appropriate ops vector.
*/
metaslab_group_destroy(vd->vdev_mg);
}
- ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
- ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
- ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
+ ASSERT0(vd->vdev_stat.vs_space);
+ ASSERT0(vd->vdev_stat.vs_dspace);
+ ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
- * For testing purposes, a higher ashift can be requested.
+ * For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
- vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
+ if (vd->vdev_ashift == 0)
+ vd->vdev_ashift = ashift;
} else {
/*
* Detect if the alignment requirement has increased.
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
uint64_t aux_guid = 0;
nvlist_t *nvl;
- uint64_t txg = strict ? spa->spa_config_txg : -1ULL;
+ uint64_t txg = spa_last_synced_txg(spa) != 0 ?
+ spa_last_synced_txg(spa) : -1ULL;
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
- (void) vdev_validate(vd, spa_last_synced_txg(spa));
+ (void) vdev_validate(vd, B_TRUE);
}
/*
if (vd->vdev_detached) {
if (smo->smo_object != 0) {
- VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx));
+ VERIFY0(dmu_object_free(mos, smo->smo_object, tx));
smo->smo_object = 0;
}
dmu_tx_commit(tx);
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
if (vd->vdev_dtl_smo.smo_object) {
- ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0);
+ ASSERT0(vd->vdev_dtl_smo.smo_alloc);
(void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
vd->vdev_dtl_smo.smo_object = 0;
}
if (msp == NULL || msp->ms_smo.smo_object == 0)
continue;
- ASSERT3U(msp->ms_smo.smo_alloc, ==, 0);
+ ASSERT0(msp->ms_smo.smo_alloc);
(void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
msp->ms_smo.smo_object = 0;
}
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
- ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0);
+ ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
vdev_propagate_state(cvd);
}
+void
+vdev_deadman(vdev_t *vd)
+{
+ int c;
+
+ for (c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+
+ vdev_deadman(cvd);
+ }
+
+ if (vd->vdev_ops->vdev_op_leaf) {
+ vdev_queue_t *vq = &vd->vdev_queue;
+
+ mutex_enter(&vq->vq_lock);
+ if (avl_numnodes(&vq->vq_pending_tree) > 0) {
+ spa_t *spa = vd->vdev_spa;
+ zio_t *fio;
+ uint64_t delta;
+
+ /*
+ * Look at the head of all the pending queues,
+ * if any I/O has been outstanding for longer than
+ * the spa_deadman_synctime we log a zevent.
+ */
+ fio = avl_first(&vq->vq_pending_tree);
+ delta = ddi_get_lbolt64() - fio->io_timestamp;
+ if (delta > NSEC_TO_TICK(spa_deadman_synctime(spa))) {
+ zfs_dbgmsg("SLOW IO: zio timestamp %llu, "
+ "delta %llu, last io %llu",
+ fio->io_timestamp, delta,
+ vq->vq_io_complete_ts);
+ zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
+ spa, vd, fio, 0, 0);
+ }
+ }
+ mutex_exit(&vq->vq_lock);
+ }
+}
+
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
-
-module_param(zfs_scrub_limit, int, 0644);
-MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev");
#endif