/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright 2017 Nexenta Systems, Inc.
+ * Copyright (c) 2014 Integros [integros.com]
+ * Copyright 2016 Toomas Soome <tsoome@me.com>
+ * Copyright 2017 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
+#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
+#include <sys/abd.h>
+#include <sys/zvol.h>
+#include <sys/zfs_ratelimit.h>
+
+/*
+ * When a vdev is added, it will be divided into approximately (but no
+ * more than) this number of metaslabs.
+ */
+int metaslabs_per_vdev = 200;
/*
* Virtual device management.
NULL
};
-/* maximum scrub/resilver I/O queue per leaf vdev */
-int zfs_scrub_limit = 10;
-
/*
* Given a vdev type, return the appropriate ops vector.
*/
vdev_t *pvd = vd->vdev_parent;
/*
- * The our parent is NULL (inactive spare or cache) or is the root,
+ * If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
* so each child must provide at least 1/Nth of its asize.
*/
if (pvd->vdev_ops == &vdev_raidz_ops)
- return (pvd->vdev_min_asize / pvd->vdev_children);
+ return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
+ pvd->vdev_children);
return (pvd->vdev_min_asize);
}
return (NULL);
}
+static int
+vdev_count_leaves_impl(vdev_t *vd)
+{
+ int n = 0;
+ int c;
+
+ if (vd->vdev_ops->vdev_op_leaf)
+ return (1);
+
+ for (c = 0; c < vd->vdev_children; c++)
+ n += vdev_count_leaves_impl(vd->vdev_child[c]);
+
+ return (n);
+}
+
+int
+vdev_count_leaves(spa_t *spa)
+{
+ return (vdev_count_leaves_impl(spa->spa_root_vdev));
+}
+
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
- newchild = kmem_zalloc(newsize, KM_SLEEP);
+ newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
if (pvd->vdev_child[c])
newc++;
- newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
+ newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
+ spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
- mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
+ /*
+ * Initialize rate limit structs for events. We rate limit ZIO delay
+ * and checksum events so that we don't overwhelm ZED with thousands
+ * of events when a disk is acting up.
+ */
+ zfs_ratelimit_init(&vd->vdev_delay_rl, DELAYS_PER_SECOND, 1);
+ zfs_ratelimit_init(&vd->vdev_checksum_rl, CHECKSUMS_PER_SECOND, 1);
+
+ list_link_init(&vd->vdev_config_dirty_node);
+ list_link_init(&vd->vdev_state_dirty_node);
+ mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
+
for (t = 0; t < DTL_TYPES; t++) {
- space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
+ vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
&vd->vdev_dtl_lock);
}
- txg_list_create(&vd->vdev_ms_list,
+ txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
- txg_list_create(&vd->vdev_dtl_list,
+ txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
char *type;
uint64_t guid = 0, islog, nparity;
vdev_t *vd;
+ char *tmp = NULL;
+ int rc;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
- return (ENOTSUP);
+ return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
- return (ENOTSUP);
+ return (SET_ERROR(ENOTSUP));
/*
* Set the nparity property for RAID-Z vdevs.
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 &&
spa_version(spa) < SPA_VERSION_RAIDZ2)
- return (ENOTSUP);
+ return (SET_ERROR(ENOTSUP));
if (nparity > 2 &&
spa_version(spa) < SPA_VERSION_RAIDZ3)
- return (ENOTSUP);
+ return (SET_ERROR(ENOTSUP));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
+
+ /*
+ * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
+ * fault on a vdev and want it to persist across imports (like with
+ * zpool offline -f).
+ */
+ rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
+ if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
+ vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
+ vd->vdev_faulted = 1;
+ vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
+ }
+
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
+
+ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
+ &vd->vdev_enc_sysfs_path) == 0)
+ vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
+
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
+ &vd->vdev_top_zap);
+ } else {
+ ASSERT0(vd->vdev_top_zap);
}
- if (parent && !parent->vdev_parent) {
+ if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
spa_log_class(spa) : spa_normal_class(spa), vd);
}
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
+ (void) nvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
+ } else {
+ ASSERT0(vd->vdev_leaf_zap);
+ }
+
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
+
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
- &vd->vdev_dtl_smo.smo_object);
+ &vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING,
- &vd->vdev_resilvering);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
+ &vd->vdev_resilver_txg);
/*
- * When importing a pool, we want to ignore the persistent fault
- * state, as the diagnosis made on another system may not be
- * valid in the current context. Local vdevs will
- * remain in the faulted state.
+ * In general, when importing a pool we want to ignore the
+ * persistent fault state, as the diagnosis made on another
+ * system may not be valid in the current context. The only
+ * exception is if we forced a vdev to a persistently faulted
+ * state with 'zpool offline -f'. The persistent fault will
+ * remain across imports until cleared.
+ *
+ * Local vdevs will remain in the faulted state.
*/
- if (spa_load_state(spa) == SPA_LOAD_OPEN) {
+ if (spa_load_state(spa) == SPA_LOAD_OPEN ||
+ spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
metaslab_group_destroy(vd->vdev_mg);
}
- ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
- ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
- ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
+ ASSERT0(vd->vdev_stat.vs_space);
+ ASSERT0(vd->vdev_stat.vs_dspace);
+ ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
+
+ if (vd->vdev_enc_sysfs_path)
+ spa_strfree(vd->vdev_enc_sysfs_path);
+
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
+ space_map_close(vd->vdev_dtl_sm);
for (t = 0; t < DTL_TYPES; t++) {
- space_map_unload(&vd->vdev_dtl[t]);
- space_map_destroy(&vd->vdev_dtl[t]);
+ range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
+ range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
+ mutex_destroy(&vd->vdev_queue_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
+ zfs_ratelimit_fini(&vd->vdev_delay_rl);
+ zfs_ratelimit_fini(&vd->vdev_checksum_rl);
+
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
ASSERT(tvd == tvd->vdev_top);
+ tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
+ tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
+ svd->vdev_top_zap = 0;
+ if (tvd->vdev_mg)
+ ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_ms = svd->vdev_ms;
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
+ mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
+
+ /*
+ * If pool not set for autoexpand, we need to also preserve
+ * mvd's asize to prevent automatic expansion of cvd.
+ * Otherwise if we are adjusting the mirror by attaching and
+ * detaching children of non-uniform sizes, the mirror could
+ * autoexpand, unexpectedly requiring larger devices to
+ * re-establish the mirror.
+ */
+ if (!cvd->vdev_spa->spa_autoexpand)
+ cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
/*
* Compute the raidz-deflation ratio. Note, we hard-code
- * in 128k (1 << 17) because it is the current "typical" blocksize.
- * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change,
- * or we will inconsistently account for existing bp's.
+ * in 128k (1 << 17) because it is the "typical" blocksize.
+ * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
+ * otherwise it would inconsistently account for existing bp's.
*/
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
ASSERT(oldc <= newc);
- mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
+ mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (oldc != 0) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
- kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
+ vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (m = oldc; m < newc; m++) {
- space_map_obj_t smo = { 0, 0, 0 };
+ uint64_t object = 0;
+
if (txg == 0) {
- uint64_t object = 0;
error = dmu_read(mos, vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error)
return (error);
- if (object != 0) {
- dmu_buf_t *db;
- error = dmu_bonus_hold(mos, object, FTAG, &db);
- if (error)
- return (error);
- ASSERT3U(db->db_size, >=, sizeof (smo));
- bcopy(db->db_data, &smo, sizeof (smo));
- ASSERT3U(smo.smo_object, ==, object);
- dmu_buf_rele(db, FTAG);
- }
}
- vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
- m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
+
+ error = metaslab_init(vd->vdev_mg, m, object, txg,
+ &(vd->vdev_ms[m]));
+ if (error)
+ return (error);
}
if (txg == 0)
if (vd->vdev_ms != NULL) {
metaslab_group_passivate(vd->vdev_mg);
- for (m = 0; m < count; m++)
- if (vd->vdev_ms[m] != NULL)
- metaslab_fini(vd->vdev_ms[m]);
- kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
+ for (m = 0; m < count; m++) {
+ metaslab_t *msp = vd->vdev_ms[m];
+
+ if (msp != NULL)
+ metaslab_fini(msp);
+ }
+ vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
}
+
+ ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
- zio->io_offset, zio->io_size, zio->io_data,
+ zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
- zio_buf_free(zio->io_data, zio->io_size);
+ abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
- zio_buf_free(zio->io_data, zio->io_size);
+ abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
+ zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
ASSERT(zio->io_error != 0);
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, 0, 0);
- zio->io_error = ENXIO;
+ zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
- while ((pio = zio_walk_parents(zio)) != NULL)
+ zl = NULL;
+ while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
- pio->io_error = ENXIO;
+ pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
- * Determine whether this device is accessible by reading and writing
- * to several known locations: the pad regions of each vdev label
- * but the first (which we leave alone in case it contains a VTOC).
+ * Determine whether this device is accessible.
+ *
+ * Read and write to several known locations: the pad regions of each
+ * vdev label but the first, which we leave alone in case it contains
+ * a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
for (l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
- offsetof(vdev_label_t, vl_pad2)),
- VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
+ offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
+ abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
vd->vdev_open_thread = NULL;
}
-boolean_t
+static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
int c;
- if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
- strlen(ZVOL_DIR)) == 0)
+#ifdef _KERNEL
+ if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
+#endif
+
for (c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
+
return (B_FALSE);
}
* spa_namespace_lock
*/
if (vdev_uses_zvols(vd)) {
+retry_sync:
for (c = 0; c < children; c++)
vd->vdev_child[c]->vdev_open_error =
vdev_open(vd->vdev_child[c]);
- return;
+ } else {
+ tq = taskq_create("vdev_open", children, minclsyspri,
+ children, children, TASKQ_PREPOPULATE);
+ if (tq == NULL)
+ goto retry_sync;
+
+ for (c = 0; c < children; c++)
+ VERIFY(taskq_dispatch(tq, vdev_open_child,
+ vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
+
+ taskq_destroy(tq);
}
- tq = taskq_create("vdev_open", children, minclsyspri,
- children, children, TASKQ_PREPOPULATE);
- for (c = 0; c < children; c++)
- VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
- TQ_SLEEP) != 0);
+ vd->vdev_nonrot = B_TRUE;
- taskq_destroy(tq);
+ for (c = 0; c < children; c++)
+ vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
}
/*
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
- uint64_t asize, psize;
+ uint64_t max_osize = 0;
+ uint64_t asize, max_asize, psize;
uint64_t ashift = 0;
int c;
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
- return (ENXIO);
+ return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
- return (ENXIO);
+ return (SET_ERROR(ENXIO));
}
- error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
+ error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
/*
* Reset the vdev_reopening flag so that we actually close
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
- return (ENXIO);
+ return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
+ max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
- return (EOVERFLOW);
+ return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
+ max_asize = max_osize - (VDEV_LABEL_START_SIZE +
+ VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
- return (EOVERFLOW);
+ return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
+ max_asize = max_osize;
}
+ /*
+ * If the vdev was expanded, record this so that we can re-create the
+ * uberblock rings in labels {2,3}, during the next sync.
+ */
+ if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
+ vd->vdev_copy_uberblocks = B_TRUE;
+
vd->vdev_psize = psize;
/*
- * Make sure the allocatable size hasn't shrunk.
+ * Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
}
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
- * For testing purposes, a higher ashift can be requested.
+ * For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
- vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
+ vd->vdev_max_asize = max_asize;
+ if (vd->vdev_ashift == 0) {
+ vd->vdev_ashift = ashift; /* use detected value */
+ }
+ if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
+ vd->vdev_ashift > ASHIFT_MAX)) {
+ vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_BAD_ASHIFT);
+ return (SET_ERROR(EDOM));
+ }
} else {
/*
- * Make sure the alignment requirement hasn't increased.
+ * Detect if the alignment requirement has increased.
+ * We don't want to make the pool unavailable, just
+ * post an event instead.
*/
- if (ashift > vd->vdev_top->vdev_ashift) {
- vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_BAD_LABEL);
- return (EINVAL);
+ if (ashift > vd->vdev_top->vdev_ashift &&
+ vd->vdev_ops->vdev_op_leaf) {
+ zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
+ spa, vd, NULL, 0, 0);
}
+
+ vd->vdev_max_asize = max_asize;
}
/*
- * If all children are healthy and the asize has increased,
- * then we've experienced dynamic LUN growth. If automatic
- * expansion is enabled then use the additional space.
+ * If all children are healthy we update asize if either:
+ * The asize has increased, due to a device expansion caused by dynamic
+ * LUN growth or vdev replacement, and automatic expansion is enabled;
+ * making the additional space available.
+ *
+ * The asize has decreased, due to a device shrink usually caused by a
+ * vdev replace with a smaller device. This ensures that calculations
+ * based of max_asize and asize e.g. esize are always valid. It's safe
+ * to do this as we've already validated that asize is greater than
+ * vdev_min_asize.
*/
- if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
- (vd->vdev_expanding || spa->spa_autoexpand))
+ if (vd->vdev_state == VDEV_STATE_HEALTHY &&
+ ((asize > vd->vdev_asize &&
+ (vd->vdev_expanding || spa->spa_autoexpand)) ||
+ (asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
return (error);
}
+ /*
+ * Track the min and max ashift values for normal data devices.
+ */
+ if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
+ !vd->vdev_islog && vd->vdev_aux == NULL) {
+ if (vd->vdev_ashift > spa->spa_max_ashift)
+ spa->spa_max_ashift = vd->vdev_ashift;
+ if (vd->vdev_ashift < spa->spa_min_ashift)
+ spa->spa_min_ashift = vd->vdev_ashift;
+ }
+
/*
* If a leaf vdev has a DTL, and seems healthy, then kick off a
* resilver. But don't do this if we are doing a reopen for a scrub,
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
+ * If 'strict' is false ignore the spa guid check. This is necessary because
+ * if the machine crashed during a re-guid the new guid might have been written
+ * to all of the vdev labels, but not the cached config. The strict check
+ * will be performed when the pool is opened again using the mos config.
+ *
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
-vdev_validate(vdev_t *vd)
+vdev_validate(vdev_t *vd, boolean_t strict)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
int c;
for (c = 0; c < vd->vdev_children; c++)
- if (vdev_validate(vd->vdev_child[c]) != 0)
- return (EBADF);
+ if (vdev_validate(vd->vdev_child[c], strict) != 0)
+ return (SET_ERROR(EBADF));
/*
* If the device has already failed, or was marked offline, don't do
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
uint64_t aux_guid = 0;
nvlist_t *nvl;
+ uint64_t txg = spa_last_synced_txg(spa) != 0 ?
+ spa_last_synced_txg(spa) : -1ULL;
- if ((label = vdev_label_read_config(vd)) == NULL) {
- vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
+ if ((label = vdev_label_read_config(vd, txg)) == NULL) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (0);
}
return (0);
}
- if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
- &guid) != 0 || guid != spa_guid(spa)) {
+ if (strict && (nvlist_lookup_uint64(label,
+ ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
+ guid != spa_guid(spa))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
- return (EBADF);
+ return (SET_ERROR(EBADF));
/*
* If we were able to open and validate a vdev that was
void
vdev_close(vdev_t *vd)
{
- spa_t *spa = vd->vdev_spa;
vdev_t *pvd = vd->vdev_parent;
+ ASSERTV(spa_t *spa = vd->vdev_spa);
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
- (void) vdev_validate(vd);
+ (void) vdev_validate(vd, B_TRUE);
}
/*
}
/*
- * Recursively initialize all labels.
+ * Recursively load DTLs and initialize all labels.
*/
- if ((error = vdev_label_init(vd, txg, isreplacing ?
+ if ((error = vdev_dtl_load(vd)) != 0 ||
+ (error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
vdev_metaslab_set_size(vdev_t *vd)
{
/*
- * Aim for roughly 200 metaslabs per vdev.
+ * Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
*/
- vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
+ vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
}
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
+void
+vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
+{
+ int c;
+
+ for (c = 0; c < vd->vdev_children; c++)
+ vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
+
+ if (vd->vdev_ops->vdev_op_leaf)
+ vdev_dirty(vd->vdev_top, flags, vd, txg);
+}
+
/*
* DTLs.
*
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
- space_map_t *sm = &vd->vdev_dtl[t];
+ range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
- mutex_enter(sm->sm_lock);
- if (!space_map_contains(sm, txg, size))
- space_map_add(sm, txg, size);
- mutex_exit(sm->sm_lock);
+ mutex_enter(rt->rt_lock);
+ if (!range_tree_contains(rt, txg, size))
+ range_tree_add(rt, txg, size);
+ mutex_exit(rt->rt_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
- space_map_t *sm = &vd->vdev_dtl[t];
+ range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
- mutex_enter(sm->sm_lock);
- if (sm->sm_space != 0)
- dirty = space_map_contains(sm, txg, size);
- mutex_exit(sm->sm_lock);
+ mutex_enter(rt->rt_lock);
+ if (range_tree_space(rt) != 0)
+ dirty = range_tree_contains(rt, txg, size);
+ mutex_exit(rt->rt_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
- space_map_t *sm = &vd->vdev_dtl[t];
+ range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
- mutex_enter(sm->sm_lock);
- empty = (sm->sm_space == 0);
- mutex_exit(sm->sm_lock);
+ mutex_enter(rt->rt_lock);
+ empty = (range_tree_space(rt) == 0);
+ mutex_exit(rt->rt_lock);
return (empty);
}
+/*
+ * Returns B_TRUE if vdev determines offset needs to be resilvered.
+ */
+boolean_t
+vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
+{
+ ASSERT(vd != vd->vdev_spa->spa_root_vdev);
+
+ if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
+ vd->vdev_ops->vdev_op_leaf)
+ return (B_TRUE);
+
+ return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
+}
+
+/*
+ * Returns the lowest txg in the DTL range.
+ */
+static uint64_t
+vdev_dtl_min(vdev_t *vd)
+{
+ range_seg_t *rs;
+
+ ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
+ ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
+ ASSERT0(vd->vdev_children);
+
+ rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
+ return (rs->rs_start - 1);
+}
+
+/*
+ * Returns the highest txg in the DTL.
+ */
+static uint64_t
+vdev_dtl_max(vdev_t *vd)
+{
+ range_seg_t *rs;
+
+ ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
+ ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
+ ASSERT0(vd->vdev_children);
+
+ rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
+ return (rs->rs_end);
+}
+
+/*
+ * Determine if a resilvering vdev should remove any DTL entries from
+ * its range. If the vdev was resilvering for the entire duration of the
+ * scan then it should excise that range from its DTLs. Otherwise, this
+ * vdev is considered partially resilvered and should leave its DTL
+ * entries intact. The comment in vdev_dtl_reassess() describes how we
+ * excise the DTLs.
+ */
+static boolean_t
+vdev_dtl_should_excise(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+ dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
+
+ ASSERT0(scn->scn_phys.scn_errors);
+ ASSERT0(vd->vdev_children);
+
+ if (vd->vdev_state < VDEV_STATE_DEGRADED)
+ return (B_FALSE);
+
+ if (vd->vdev_resilver_txg == 0 ||
+ range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
+ return (B_TRUE);
+
+ /*
+ * When a resilver is initiated the scan will assign the scn_max_txg
+ * value to the highest txg value that exists in all DTLs. If this
+ * device's max DTL is not part of this scan (i.e. it is not in
+ * the range (scn_min_txg, scn_max_txg] then it is not eligible
+ * for excision.
+ */
+ if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
+ ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
+ ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
+ ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
/*
* Reassess DTLs after a config change or scrub completion.
*/
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
mutex_enter(&vd->vdev_dtl_lock);
+
+ /*
+ * If we've completed a scan cleanly then determine
+ * if this vdev should remove any DTLs. We only want to
+ * excise regions on vdevs that were available during
+ * the entire duration of this scan.
+ */
if (scrub_txg != 0 &&
(spa->spa_scrub_started ||
- (scn && scn->scn_phys.scn_errors == 0))) {
+ (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
+ vdev_dtl_should_excise(vd)) {
/*
* We completed a scrub up to scrub_txg. If we
* did it without rebooting, then the scrub dtl
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
- space_map_ref_create(&reftree);
- space_map_ref_add_map(&reftree,
- &vd->vdev_dtl[DTL_MISSING], 1);
- space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
- space_map_ref_add_map(&reftree,
- &vd->vdev_dtl[DTL_SCRUB], 2);
- space_map_ref_generate_map(&reftree,
- &vd->vdev_dtl[DTL_MISSING], 1);
- space_map_ref_destroy(&reftree);
+ space_reftree_create(&reftree);
+ space_reftree_add_map(&reftree,
+ vd->vdev_dtl[DTL_MISSING], 1);
+ space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
+ space_reftree_add_map(&reftree,
+ vd->vdev_dtl[DTL_SCRUB], 2);
+ space_reftree_generate_map(&reftree,
+ vd->vdev_dtl[DTL_MISSING], 1);
+ space_reftree_destroy(&reftree);
}
- space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
- space_map_walk(&vd->vdev_dtl[DTL_MISSING],
- space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
+ range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
+ range_tree_walk(vd->vdev_dtl[DTL_MISSING],
+ range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
- space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
- space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
+ range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
+ range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
- space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
+ range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
- space_map_walk(&vd->vdev_dtl[DTL_MISSING],
- space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
+ range_tree_walk(vd->vdev_dtl[DTL_MISSING],
+ range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
+
+ /*
+ * If the vdev was resilvering and no longer has any
+ * DTLs then reset its resilvering flag and dirty
+ * the top level so that we persist the change.
+ */
+ if (vd->vdev_resilver_txg != 0 &&
+ range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
+ range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0) {
+ vd->vdev_resilver_txg = 0;
+ vdev_config_dirty(vd->vdev_top);
+ }
+
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
mutex_enter(&vd->vdev_dtl_lock);
for (t = 0; t < DTL_TYPES; t++) {
+ int c;
+
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
minref = vd->vdev_nparity + 1; /* RAID-Z */
else
minref = vd->vdev_children; /* any kind of mirror */
- space_map_ref_create(&reftree);
+ space_reftree_create(&reftree);
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
- space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1);
+ space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
- space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
- space_map_ref_destroy(&reftree);
+ space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
+ space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
-static int
+int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
- space_map_obj_t *smo = &vd->vdev_dtl_smo;
objset_t *mos = spa->spa_meta_objset;
- dmu_buf_t *db;
- int error;
+ int error = 0;
+ int c;
- ASSERT(vd->vdev_children == 0);
+ if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
+ ASSERT(!vd->vdev_ishole);
- if (smo->smo_object == 0)
- return (0);
+ error = space_map_open(&vd->vdev_dtl_sm, mos,
+ vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
+ if (error)
+ return (error);
+ ASSERT(vd->vdev_dtl_sm != NULL);
- ASSERT(!vd->vdev_ishole);
+ mutex_enter(&vd->vdev_dtl_lock);
- if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
- return (error);
+ /*
+ * Now that we've opened the space_map we need to update
+ * the in-core DTL.
+ */
+ space_map_update(vd->vdev_dtl_sm);
- ASSERT3U(db->db_size, >=, sizeof (*smo));
- bcopy(db->db_data, smo, sizeof (*smo));
- dmu_buf_rele(db, FTAG);
+ error = space_map_load(vd->vdev_dtl_sm,
+ vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
+ mutex_exit(&vd->vdev_dtl_lock);
- mutex_enter(&vd->vdev_dtl_lock);
- error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
- NULL, SM_ALLOC, smo, mos);
- mutex_exit(&vd->vdev_dtl_lock);
+ return (error);
+ }
+
+ for (c = 0; c < vd->vdev_children; c++) {
+ error = vdev_dtl_load(vd->vdev_child[c]);
+ if (error != 0)
+ break;
+ }
return (error);
}
+void
+vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
+ VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
+ zapobj, tx));
+}
+
+uint64_t
+vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+ uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
+ DMU_OT_NONE, 0, tx);
+
+ ASSERT(zap != 0);
+ VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
+ zap, tx));
+
+ return (zap);
+}
+
+void
+vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
+{
+ uint64_t i;
+
+ if (vd->vdev_ops != &vdev_hole_ops &&
+ vd->vdev_ops != &vdev_missing_ops &&
+ vd->vdev_ops != &vdev_root_ops &&
+ !vd->vdev_top->vdev_removing) {
+ if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
+ vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
+ }
+ if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
+ vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
+ }
+ }
+ for (i = 0; i < vd->vdev_children; i++) {
+ vdev_construct_zaps(vd->vdev_child[i], tx);
+ }
+}
+
void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
- space_map_obj_t *smo = &vd->vdev_dtl_smo;
- space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
+ range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
- space_map_t smsync;
- kmutex_t smlock;
- dmu_buf_t *db;
+ range_tree_t *rtsync;
+ kmutex_t rtlock;
dmu_tx_t *tx;
+ uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(!vd->vdev_ishole);
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
- if (vd->vdev_detached) {
- if (smo->smo_object != 0) {
- int err = dmu_object_free(mos, smo->smo_object, tx);
- ASSERT3U(err, ==, 0);
- smo->smo_object = 0;
+ if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
+ mutex_enter(&vd->vdev_dtl_lock);
+ space_map_free(vd->vdev_dtl_sm, tx);
+ space_map_close(vd->vdev_dtl_sm);
+ vd->vdev_dtl_sm = NULL;
+ mutex_exit(&vd->vdev_dtl_lock);
+
+ /*
+ * We only destroy the leaf ZAP for detached leaves or for
+ * removed log devices. Removed data devices handle leaf ZAP
+ * cleanup later, once cancellation is no longer possible.
+ */
+ if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
+ vd->vdev_top->vdev_islog)) {
+ vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
+ vd->vdev_leaf_zap = 0;
}
+
dmu_tx_commit(tx);
return;
}
- if (smo->smo_object == 0) {
- ASSERT(smo->smo_objsize == 0);
- ASSERT(smo->smo_alloc == 0);
- smo->smo_object = dmu_object_alloc(mos,
- DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
- DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
- ASSERT(smo->smo_object != 0);
- vdev_config_dirty(vd->vdev_top);
+ if (vd->vdev_dtl_sm == NULL) {
+ uint64_t new_object;
+
+ new_object = space_map_alloc(mos, tx);
+ VERIFY3U(new_object, !=, 0);
+
+ VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
+ 0, -1ULL, 0, &vd->vdev_dtl_lock));
+ ASSERT(vd->vdev_dtl_sm != NULL);
}
- mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&rtlock, NULL, MUTEX_DEFAULT, NULL);
- space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
- &smlock);
+ rtsync = range_tree_create(NULL, NULL, &rtlock);
- mutex_enter(&smlock);
+ mutex_enter(&rtlock);
mutex_enter(&vd->vdev_dtl_lock);
- space_map_walk(sm, space_map_add, &smsync);
+ range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
- space_map_truncate(smo, mos, tx);
- space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
+ space_map_truncate(vd->vdev_dtl_sm, tx);
+ space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
+ range_tree_vacate(rtsync, NULL, NULL);
- space_map_destroy(&smsync);
+ range_tree_destroy(rtsync);
- mutex_exit(&smlock);
- mutex_destroy(&smlock);
+ mutex_exit(&rtlock);
+ mutex_destroy(&rtlock);
- VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
- dmu_buf_will_dirty(db, tx);
- ASSERT3U(db->db_size, >=, sizeof (*smo));
- bcopy(smo, db->db_data, sizeof (*smo));
- dmu_buf_rele(db, FTAG);
+ /*
+ * If the object for the space map has changed then dirty
+ * the top level so that we update the config.
+ */
+ if (object != space_map_object(vd->vdev_dtl_sm)) {
+ zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
+ "new object %llu", txg, spa_name(spa), object,
+ space_map_object(vd->vdev_dtl_sm));
+ vdev_config_dirty(vd->vdev_top);
+ }
dmu_tx_commit(tx);
+
+ mutex_enter(&vd->vdev_dtl_lock);
+ space_map_update(vd->vdev_dtl_sm);
+ mutex_exit(&vd->vdev_dtl_lock);
}
/*
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
- if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
+ if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
vdev_writeable(vd)) {
- space_seg_t *ss;
- ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
- thismin = ss->ss_start - 1;
- ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
- thismax = ss->ss_end;
+ thismin = vdev_dtl_min(vd);
+ thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
vdev_metaslab_init(vd, 0) != 0))
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
-
/*
* If this is a leaf vdev, load its DTL.
*/
if (!vdev_readable(vd))
return (0);
- if ((label = vdev_label_read_config(vd)) == NULL) {
+ if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
- version > SPA_VERSION ||
+ !SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
dmu_tx_t *tx;
- int m;
+ int m, i;
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
-
- if (vd->vdev_dtl_smo.smo_object) {
- ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0);
- (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
- vd->vdev_dtl_smo.smo_object = 0;
- }
+ ASSERT(vd == vd->vdev_top);
+ ASSERT3U(txg, ==, spa_syncing_txg(spa));
if (vd->vdev_ms != NULL) {
+ metaslab_group_t *mg = vd->vdev_mg;
+
+ metaslab_group_histogram_verify(mg);
+ metaslab_class_histogram_verify(mg->mg_class);
+
for (m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
- if (msp == NULL || msp->ms_smo.smo_object == 0)
+ if (msp == NULL || msp->ms_sm == NULL)
continue;
- ASSERT3U(msp->ms_smo.smo_alloc, ==, 0);
- (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
- msp->ms_smo.smo_object = 0;
+ mutex_enter(&msp->ms_lock);
+ /*
+ * If the metaslab was not loaded when the vdev
+ * was removed then the histogram accounting may
+ * not be accurate. Update the histogram information
+ * here so that we ensure that the metaslab group
+ * and metaslab class are up-to-date.
+ */
+ metaslab_group_histogram_remove(mg, msp);
+
+ VERIFY0(space_map_allocated(msp->ms_sm));
+ space_map_free(msp->ms_sm, tx);
+ space_map_close(msp->ms_sm);
+ msp->ms_sm = NULL;
+ mutex_exit(&msp->ms_lock);
}
+
+ metaslab_group_histogram_verify(mg);
+ metaslab_class_histogram_verify(mg->mg_class);
+ for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
+ ASSERT0(mg->mg_histogram[i]);
+
}
if (vd->vdev_ms_array) {
(void) dmu_object_free(mos, vd->vdev_ms_array, tx);
vd->vdev_ms_array = 0;
- vd->vdev_ms_shift = 0;
+ }
+
+ if (vd->vdev_islog && vd->vdev_top_zap != 0) {
+ vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
+ vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
ASSERT(!vd->vdev_ishole);
- while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
+ while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
metaslab_sync_done(msp, txg);
if (reassess)
tvd = vd->vdev_top;
+ /*
+ * If user did a 'zpool offline -f' then make the fault persist across
+ * reboots.
+ */
+ if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
+ /*
+ * There are two kinds of forced faults: temporary and
+ * persistent. Temporary faults go away at pool import, while
+ * persistent faults stay set. Both types of faults can be
+ * cleared with a zpool clear.
+ *
+ * We tell if a vdev is persistently faulted by looking at the
+ * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
+ * import then it's a persistent fault. Otherwise, it's
+ * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
+ * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
+ * tells vdev_config_generate() (which gets run later) to set
+ * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
+ */
+ vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
+ vd->vdev_tmpoffline = B_FALSE;
+ aux = VDEV_AUX_EXTERNAL;
+ } else {
+ vd->vdev_tmpoffline = B_TRUE;
+ }
+
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
}
/*
- * Online the given vdev. If 'unspare' is set, it implies two things. First,
- * any attached spare device should be detached when the device finishes
- * resilvering. Second, the online should be treated like a 'test' online case,
- * so no FMA events are generated if the device fails to open.
+ * Online the given vdev.
+ *
+ * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
+ * spare device should be detached when the device finishes resilvering.
+ * Second, the online should be treated like a 'test' online case, so no FMA
+ * events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
+ boolean_t wasoffline;
+ vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
+ wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
+ oldstate = vd->vdev_state;
+
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
+
+ if (wasoffline ||
+ (oldstate < VDEV_STATE_DEGRADED &&
+ vd->vdev_state >= VDEV_STATE_DEGRADED))
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
+
return (spa_vdev_state_exit(spa, vd, 0));
}
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
- ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0);
+ ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
-
/*
- * When reopening in reponse to a clear event, it may be due to
+ * When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
+ vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
spa_async_request(spa, SPA_ASYNC_RESILVER);
- spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
- !vd->vdev_cant_write && !vd->vdev_ishole);
+ !vd->vdev_cant_write && !vd->vdev_ishole &&
+ vd->vdev_mg->mg_initialized);
}
boolean_t
return (B_TRUE);
}
+static void
+vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
+{
+ int t;
+ for (t = 0; t < ZIO_TYPES; t++) {
+ vs->vs_ops[t] += cvs->vs_ops[t];
+ vs->vs_bytes[t] += cvs->vs_bytes[t];
+ }
+
+ cvs->vs_scan_removing = cvd->vdev_removing;
+}
+
/*
- * Get statistics for the given vdev.
+ * Get extended stats
*/
-void
-vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
+static void
+vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
- vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
- int c, t;
+ int t, b;
+ for (t = 0; t < ZIO_TYPES; t++) {
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
+ vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
- mutex_enter(&vd->vdev_stat_lock);
- bcopy(&vd->vdev_stat, vs, sizeof (*vs));
- vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
- vs->vs_state = vd->vdev_state;
- vs->vs_rsize = vdev_get_min_asize(vd);
- if (vd->vdev_ops->vdev_op_leaf)
- vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
- mutex_exit(&vd->vdev_stat_lock);
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
+ vsx->vsx_total_histo[t][b] +=
+ cvsx->vsx_total_histo[t][b];
+ }
+ }
+
+ for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
+ vsx->vsx_queue_histo[t][b] +=
+ cvsx->vsx_queue_histo[t][b];
+ }
+ vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
+ vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
+
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
+ vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
+ vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
+ }
+
+}
+
+/*
+ * Get statistics for the given vdev.
+ */
+static void
+vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
+{
+ int c, t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
- if (vd == rvd) {
- for (c = 0; c < rvd->vdev_children; c++) {
- vdev_t *cvd = rvd->vdev_child[c];
+ if (!vd->vdev_ops->vdev_op_leaf) {
+ if (vs) {
+ memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
+ memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
+ }
+ if (vsx)
+ memset(vsx, 0, sizeof (*vsx));
+
+ for (c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
+ vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
+
+ vdev_get_stats_ex_impl(cvd, cvs, cvsx);
+ if (vs)
+ vdev_get_child_stat(cvd, vs, cvs);
+ if (vsx)
+ vdev_get_child_stat_ex(cvd, vsx, cvsx);
- mutex_enter(&vd->vdev_stat_lock);
- for (t = 0; t < ZIO_TYPES; t++) {
- vs->vs_ops[t] += cvs->vs_ops[t];
- vs->vs_bytes[t] += cvs->vs_bytes[t];
- }
- cvs->vs_scan_removing = cvd->vdev_removing;
- mutex_exit(&vd->vdev_stat_lock);
+ }
+ } else {
+ /*
+ * We're a leaf. Just copy our ZIO active queue stats in. The
+ * other leaf stats are updated in vdev_stat_update().
+ */
+ if (!vsx)
+ return;
+
+ memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
+
+ for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
+ vsx->vsx_active_queue[t] =
+ vd->vdev_queue.vq_class[t].vqc_active;
+ vsx->vsx_pend_queue[t] = avl_numnodes(
+ &vd->vdev_queue.vq_class[t].vqc_queued_tree);
+ }
+ }
+}
+
+void
+vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
+{
+ vdev_t *tvd = vd->vdev_top;
+ mutex_enter(&vd->vdev_stat_lock);
+ if (vs) {
+ bcopy(&vd->vdev_stat, vs, sizeof (*vs));
+ vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
+ vs->vs_state = vd->vdev_state;
+ vs->vs_rsize = vdev_get_min_asize(vd);
+ if (vd->vdev_ops->vdev_op_leaf)
+ vs->vs_rsize += VDEV_LABEL_START_SIZE +
+ VDEV_LABEL_END_SIZE;
+ /*
+ * Report expandable space on top-level, non-auxillary devices
+ * only. The expandable space is reported in terms of metaslab
+ * sized units since that determines how much space the pool
+ * can expand.
+ */
+ if (vd->vdev_aux == NULL && tvd != NULL) {
+ vs->vs_esize = P2ALIGN(
+ vd->vdev_max_asize - vd->vdev_asize,
+ 1ULL << tvd->vdev_ms_shift);
+ }
+ vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
+ if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
+ !vd->vdev_ishole) {
+ vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
}
}
+
+ ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_READER) != 0);
+ vdev_get_stats_ex_impl(vd, vs, vsx);
+ mutex_exit(&vd->vdev_stat_lock);
+}
+
+void
+vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
+{
+ return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
+ vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
vs->vs_self_healed += psize;
}
- vs->vs_ops[type]++;
- vs->vs_bytes[type] += psize;
+ /*
+ * The bytes/ops/histograms are recorded at the leaf level and
+ * aggregated into the higher level vdevs in vdev_get_stats().
+ */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
+
+ vs->vs_ops[type]++;
+ vs->vs_bytes[type] += psize;
+
+ if (flags & ZIO_FLAG_DELEGATED) {
+ vsx->vsx_agg_histo[zio->io_priority]
+ [RQ_HISTO(zio->io_size)]++;
+ } else {
+ vsx->vsx_ind_histo[zio->io_priority]
+ [RQ_HISTO(zio->io_size)]++;
+ }
+
+ if (zio->io_delta && zio->io_delay) {
+ vsx->vsx_queue_histo[zio->io_priority]
+ [L_HISTO(zio->io_delta - zio->io_delay)]++;
+ vsx->vsx_disk_histo[type]
+ [L_HISTO(zio->io_delay)]++;
+ vsx->vsx_total_histo[type]
+ [L_HISTO(zio->io_delta)]++;
+ }
+ }
mutex_exit(&vd->vdev_stat_lock);
return;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
+ /*
+ * Since vdev_offline() code path is already in an offline
+ * state we can miss a statechange event to OFFLINE. Check
+ * the previous state to catch this condition.
+ */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (state == VDEV_STATE_OFFLINE) &&
+ (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
+ /* post an offline state change */
+ zfs_post_state_change(spa, vd, vd->vdev_prevstate);
+ }
vd->vdev_stat.vs_aux = aux;
return;
}
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
- /*
- * If we have brought this vdev back into service, we need
- * to notify fmd so that it can gracefully repair any outstanding
- * cases due to a missing device. We do this in all cases, even those
- * that probably don't correlate to a repaired fault. This is sure to
- * catch all cases, and we let the zfs-retire agent sort it out. If
- * this is a transient state it's OK, as the retire agent will
- * double-check the state of the vdev before repairing it.
- */
- if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
- vd->vdev_prevstate != state)
- zfs_post_state_change(spa, vd);
-
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
+ case VDEV_AUX_BAD_ASHIFT:
+ class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
+ break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
vd->vdev_removed = B_FALSE;
}
+ /*
+ * Notify ZED of any significant state-change on a leaf vdev.
+ *
+ */
+ if (vd->vdev_ops->vdev_op_leaf) {
+ /* preserve original state from a vdev_reopen() */
+ if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
+ (vd->vdev_prevstate != vd->vdev_state) &&
+ (save_state <= VDEV_STATE_CLOSED))
+ save_state = vd->vdev_prevstate;
+
+ /* filter out state change due to initial vdev_open */
+ if (save_state > VDEV_STATE_CLOSED)
+ zfs_post_state_change(spa, vd, save_state);
+ }
+
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
- * a root pool. Currently, we do not support RAID-Z or partial configuration.
- * In addition, only a single top-level vdev is allowed and none of the leaves
- * can be wholedisks.
+ * a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
- int c;
-
if (!vd->vdev_ops->vdev_op_leaf) {
- char *vdev_type = vd->vdev_ops->vdev_op_type;
+ const char *vdev_type = vd->vdev_ops->vdev_op_type;
- if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
- vd->vdev_children > 1) {
- return (B_FALSE);
- } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
- strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
+ if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
- }
- } else if (vd->vdev_wholedisk == 1) {
- return (B_FALSE);
}
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
int c;
ASSERT(nvd->vdev_top->vdev_islog);
- ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
+ ASSERT(spa_config_held(nvd->vdev_spa,
+ SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
for (c = 0; c < nvd->vdev_children; c++)
}
vdev_propagate_state(cvd);
}
+
+void
+vdev_deadman(vdev_t *vd)
+{
+ int c;
+
+ for (c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+
+ vdev_deadman(cvd);
+ }
+
+ if (vd->vdev_ops->vdev_op_leaf) {
+ vdev_queue_t *vq = &vd->vdev_queue;
+
+ mutex_enter(&vq->vq_lock);
+ if (avl_numnodes(&vq->vq_active_tree) > 0) {
+ spa_t *spa = vd->vdev_spa;
+ zio_t *fio;
+ uint64_t delta;
+
+ /*
+ * Look at the head of all the pending queues,
+ * if any I/O has been outstanding for longer than
+ * the spa_deadman_synctime we log a zevent.
+ */
+ fio = avl_first(&vq->vq_active_tree);
+ delta = gethrtime() - fio->io_timestamp;
+ if (delta > spa_deadman_synctime(spa)) {
+ zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
+ "delta %lluns, last io %lluns",
+ fio->io_timestamp, delta,
+ vq->vq_io_complete_ts);
+ zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
+ spa, vd, fio, 0, 0);
+ }
+ }
+ mutex_exit(&vq->vq_lock);
+ }
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(vdev_fault);
+EXPORT_SYMBOL(vdev_degrade);
+EXPORT_SYMBOL(vdev_online);
+EXPORT_SYMBOL(vdev_offline);
+EXPORT_SYMBOL(vdev_clear);
+/* BEGIN CSTYLED */
+module_param(metaslabs_per_vdev, int, 0644);
+MODULE_PARM_DESC(metaslabs_per_vdev,
+ "Divide added vdev into approximately (but no more than) this number "
+ "of metaslabs");
+/* END CSTYLED */
+#endif