/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright 2017 Nexenta Systems, Inc.
+ * Copyright (c) 2014 Integros [integros.com]
+ * Copyright 2016 Toomas Soome <tsoome@me.com>
+ * Copyright 2017 Joyent, Inc.
+ * Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
+#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
+#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
+#include <sys/abd.h>
#include <sys/zvol.h>
+#include <sys/zfs_ratelimit.h>
+
+/* target number of metaslabs per top-level vdev */
+int vdev_max_ms_count = 200;
+
+/* minimum number of metaslabs per top-level vdev */
+int vdev_min_ms_count = 16;
+
+/* practical upper limit of total metaslabs per top-level vdev */
+int vdev_ms_count_limit = 1ULL << 17;
+
+/* lower limit for metaslab size (512M) */
+int vdev_default_ms_shift = 29;
+
+/* upper limit for metaslab size (256G) */
+int vdev_max_ms_shift = 38;
+
+int vdev_validate_skip = B_FALSE;
+
+/*
+ * Since the DTL space map of a vdev is not expected to have a lot of
+ * entries, we default its block size to 4K.
+ */
+int vdev_dtl_sm_blksz = (1 << 12);
+
+/*
+ * Rate limit delay events to this many IO delays per second.
+ */
+unsigned int zfs_delays_per_second = 20;
/*
- * When a vdev is added, it will be divided into approximately (but no
- * more than) this number of metaslabs.
+ * Rate limit checksum events after this many checksum errors per second.
*/
-int metaslabs_per_vdev = 200;
+unsigned int zfs_checksums_per_second = 20;
+
+/*
+ * Ignore errors during scrub/resilver. Allows to work around resilver
+ * upon import when there are pool errors.
+ */
+int zfs_scan_ignore_errors = 0;
+
+/*
+ * vdev-wide space maps that have lots of entries written to them at
+ * the end of each transaction can benefit from a higher I/O bandwidth
+ * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
+ */
+int vdev_standard_sm_blksz = (1 << 17);
+
+/*PRINTFLIKE2*/
+void
+vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ if (vd->vdev_path != NULL) {
+ zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
+ vd->vdev_path, buf);
+ } else {
+ zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
+ vd->vdev_ops->vdev_op_type,
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)vd->vdev_guid, buf);
+ }
+}
+
+void
+vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
+{
+ char state[20];
+
+ if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
+ zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id,
+ vd->vdev_ops->vdev_op_type);
+ return;
+ }
+
+ switch (vd->vdev_state) {
+ case VDEV_STATE_UNKNOWN:
+ (void) snprintf(state, sizeof (state), "unknown");
+ break;
+ case VDEV_STATE_CLOSED:
+ (void) snprintf(state, sizeof (state), "closed");
+ break;
+ case VDEV_STATE_OFFLINE:
+ (void) snprintf(state, sizeof (state), "offline");
+ break;
+ case VDEV_STATE_REMOVED:
+ (void) snprintf(state, sizeof (state), "removed");
+ break;
+ case VDEV_STATE_CANT_OPEN:
+ (void) snprintf(state, sizeof (state), "can't open");
+ break;
+ case VDEV_STATE_FAULTED:
+ (void) snprintf(state, sizeof (state), "faulted");
+ break;
+ case VDEV_STATE_DEGRADED:
+ (void) snprintf(state, sizeof (state), "degraded");
+ break;
+ case VDEV_STATE_HEALTHY:
+ (void) snprintf(state, sizeof (state), "healthy");
+ break;
+ default:
+ (void) snprintf(state, sizeof (state), "<state %u>",
+ (uint_t)vd->vdev_state);
+ }
+
+ zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
+ "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
+ vd->vdev_islog ? " (log)" : "",
+ (u_longlong_t)vd->vdev_guid,
+ vd->vdev_path ? vd->vdev_path : "N/A", state);
+
+ for (uint64_t i = 0; i < vd->vdev_children; i++)
+ vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
+}
/*
* Virtual device management.
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
+ &vdev_indirect_ops,
NULL
};
return (ops);
}
+/*
+ * Derive the enumerated alloction bias from string input.
+ * String origin is either the per-vdev zap or zpool(1M).
+ */
+static vdev_alloc_bias_t
+vdev_derive_alloc_bias(const char *bias)
+{
+ vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
+
+ if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
+ alloc_bias = VDEV_BIAS_LOG;
+ else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
+ alloc_bias = VDEV_BIAS_SPECIAL;
+ else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
+ alloc_bias = VDEV_BIAS_DEDUP;
+
+ return (alloc_bias);
+}
+
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
- int c;
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
* so each child must provide at least 1/Nth of its asize.
*/
if (pvd->vdev_ops == &vdev_raidz_ops)
- return (pvd->vdev_min_asize / pvd->vdev_children);
+ return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
+ pvd->vdev_children);
return (pvd->vdev_min_asize);
}
void
vdev_set_min_asize(vdev_t *vd)
{
- int c;
vd->vdev_min_asize = vdev_get_min_asize(vd);
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
- int c;
if (vd->vdev_guid == guid)
return (vd);
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
- int c;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
int
vdev_count_leaves(spa_t *spa)
{
- return (vdev_count_leaves_impl(spa->spa_root_vdev));
+ int rc;
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ rc = vdev_count_leaves_impl(spa->spa_root_vdev);
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ return (rc);
}
void
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
- int c;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- for (c = newc = 0; c < oldc; c++)
+ if (oldc == 0)
+ return;
+
+ for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
- newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
+ if (newc > 0) {
+ newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
- for (c = newc = 0; c < oldc; c++) {
- if ((cvd = pvd->vdev_child[c]) != NULL) {
- newchild[newc] = cvd;
- cvd->vdev_id = newc++;
+ for (int c = newc = 0; c < oldc; c++) {
+ if ((cvd = pvd->vdev_child[c]) != NULL) {
+ newchild[newc] = cvd;
+ cvd->vdev_id = newc++;
+ }
}
+ } else {
+ newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
- int t;
+ vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
+ vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
+ vic->vic_prev_indirect_vdev = UINT64_MAX;
+
+ rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
+ mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
+ vd->vdev_obsolete_segments = range_tree_create(NULL, NULL);
+
+ /*
+ * Initialize rate limit structs for events. We rate limit ZIO delay
+ * and checksum events so that we don't overwhelm ZED with thousands
+ * of events when a disk is acting up.
+ */
+ zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_delays_per_second, 1);
+ zfs_ratelimit_init(&vd->vdev_checksum_rl, &zfs_checksums_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
- for (t = 0; t < DTL_TYPES; t++) {
- vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
- &vd->vdev_dtl_lock);
+ mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
+
+ for (int t = 0; t < DTL_TYPES; t++) {
+ vd->vdev_dtl[t] = range_tree_create(NULL, NULL);
}
- txg_list_create(&vd->vdev_ms_list,
+ txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
- txg_list_create(&vd->vdev_dtl_list,
+ txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
char *type;
uint64_t guid = 0, islog, nparity;
vdev_t *vd;
+ vdev_indirect_config_t *vic;
+ char *tmp = NULL;
+ int rc;
+ vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
+ boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
}
ASSERT(nparity != -1ULL);
+ /*
+ * If creating a top-level vdev, check for allocation classes input
+ */
+ if (top_level && alloctype == VDEV_ALLOC_ADD) {
+ char *bias;
+
+ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
+ &bias) == 0) {
+ alloc_bias = vdev_derive_alloc_bias(bias);
+
+ /* spa_vdev_add() expects feature to be enabled */
+ if (spa->spa_load_state != SPA_LOAD_CREATE &&
+ !spa_feature_is_enabled(spa,
+ SPA_FEATURE_ALLOCATION_CLASSES)) {
+ return (SET_ERROR(ENOTSUP));
+ }
+ }
+ }
+
vd = vdev_alloc_common(spa, id, guid, ops);
+ vic = &vd->vdev_indirect_config;
vd->vdev_islog = islog;
vd->vdev_nparity = nparity;
+ if (top_level && alloc_bias != VDEV_BIAS_NONE)
+ vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
+
+ /*
+ * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
+ * fault on a vdev and want it to persist across imports (like with
+ * zpool offline -f).
+ */
+ rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
+ if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
+ vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
+ vd->vdev_faulted = 1;
+ vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
+ }
+
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
+
+ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
+ &vd->vdev_enc_sysfs_path) == 0)
+ vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
+
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
+ ASSERT0(vic->vic_mapping_object);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
+ &vic->vic_mapping_object);
+ ASSERT0(vic->vic_births_object);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
+ &vic->vic_births_object);
+ ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
+ &vic->vic_prev_indirect_vdev);
+
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
- if (parent && !parent->vdev_parent &&
+ if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
ASSERT0(vd->vdev_top_zap);
}
- if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
+ if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
- vd->vdev_mg = metaslab_group_create(islog ?
- spa_log_class(spa) : spa_normal_class(spa), vd);
+ /* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
&vd->vdev_resilver_txg);
/*
- * When importing a pool, we want to ignore the persistent fault
- * state, as the diagnosis made on another system may not be
- * valid in the current context. Local vdevs will
- * remain in the faulted state.
+ * In general, when importing a pool we want to ignore the
+ * persistent fault state, as the diagnosis made on another
+ * system may not be valid in the current context. The only
+ * exception is if we forced a vdev to a persistently faulted
+ * state with 'zpool offline -f'. The persistent fault will
+ * remain across imports until cleared.
+ *
+ * Local vdevs will remain in the faulted state.
*/
- if (spa_load_state(spa) == SPA_LOAD_OPEN) {
+ if (spa_load_state(spa) == SPA_LOAD_OPEN ||
+ spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
+ else
+ vd->vdev_faulted = 0ULL;
}
}
}
void
vdev_free(vdev_t *vd)
{
- int c, t;
spa_t *spa = vd->vdev_spa;
+ /*
+ * Scan queues are normally destroyed at the end of a scan. If the
+ * queue exists here, that implies the vdev is being removed while
+ * the scan is still running.
+ */
+ if (vd->vdev_scan_io_queue != NULL) {
+ mutex_enter(&vd->vdev_scan_io_queue_lock);
+ dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
+ vd->vdev_scan_io_queue = NULL;
+ mutex_exit(&vd->vdev_scan_io_queue_lock);
+ }
+
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
/*
* Free all children.
*/
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
+
+ if (vd->vdev_enc_sysfs_path)
+ spa_strfree(vd->vdev_enc_sysfs_path);
+
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
- for (t = 0; t < DTL_TYPES; t++) {
+ for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
+ EQUIV(vd->vdev_indirect_births != NULL,
+ vd->vdev_indirect_mapping != NULL);
+ if (vd->vdev_indirect_births != NULL) {
+ vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
+ vdev_indirect_births_close(vd->vdev_indirect_births);
+ }
+
+ if (vd->vdev_obsolete_sm != NULL) {
+ ASSERT(vd->vdev_removing ||
+ vd->vdev_ops == &vdev_indirect_ops);
+ space_map_close(vd->vdev_obsolete_sm);
+ vd->vdev_obsolete_sm = NULL;
+ }
+ range_tree_destroy(vd->vdev_obsolete_segments);
+ rw_destroy(&vd->vdev_indirect_rwlock);
+ mutex_destroy(&vd->vdev_obsolete_lock);
+
+ mutex_destroy(&vd->vdev_queue_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
+ mutex_destroy(&vd->vdev_scan_io_queue_lock);
+
+ zfs_ratelimit_fini(&vd->vdev_delay_rl);
+ zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
ASSERT(tvd == tvd->vdev_top);
+ tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
+ tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
+ svd->vdev_checkpoint_sm = NULL;
+
+ tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
+ svd->vdev_alloc_bias = VDEV_BIAS_NONE;
+
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
+ /*
+ * State which may be set on a top-level vdev that's in the
+ * process of being removed.
+ */
+ ASSERT0(tvd->vdev_indirect_config.vic_births_object);
+ ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
+ ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
+ ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
+ ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
+ ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0(tvd->vdev_removing);
+ tvd->vdev_removing = svd->vdev_removing;
+ tvd->vdev_indirect_config = svd->vdev_indirect_config;
+ tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
+ tvd->vdev_indirect_births = svd->vdev_indirect_births;
+ range_tree_swap(&svd->vdev_obsolete_segments,
+ &tvd->vdev_obsolete_segments);
+ tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
+ svd->vdev_indirect_config.vic_mapping_object = 0;
+ svd->vdev_indirect_config.vic_births_object = 0;
+ svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
+ svd->vdev_indirect_mapping = NULL;
+ svd->vdev_indirect_births = NULL;
+ svd->vdev_obsolete_sm = NULL;
+ svd->vdev_removing = 0;
+
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
+
+ dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
- int c;
-
if (vd == NULL)
return;
vd->vdev_top = tvd;
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
+ mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_free(mvd);
}
+static void
+vdev_metaslab_group_create(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ /*
+ * metaslab_group_create was delayed until allocation bias was available
+ */
+ if (vd->vdev_mg == NULL) {
+ metaslab_class_t *mc;
+
+ if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
+ vd->vdev_alloc_bias = VDEV_BIAS_LOG;
+
+ ASSERT3U(vd->vdev_islog, ==,
+ (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
+
+ switch (vd->vdev_alloc_bias) {
+ case VDEV_BIAS_LOG:
+ mc = spa_log_class(spa);
+ break;
+ case VDEV_BIAS_SPECIAL:
+ mc = spa_special_class(spa);
+ break;
+ case VDEV_BIAS_DEDUP:
+ mc = spa_dedup_class(spa);
+ break;
+ default:
+ mc = spa_normal_class(spa);
+ }
+
+ vd->vdev_mg = metaslab_group_create(mc, vd,
+ spa->spa_alloc_count);
+
+ /*
+ * The spa ashift values currently only reflect the
+ * general vdev classes. Class destination is late
+ * binding so ashift checking had to wait until now
+ */
+ if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
+ mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
+ if (vd->vdev_ashift > spa->spa_max_ashift)
+ spa->spa_max_ashift = vd->vdev_ashift;
+ if (vd->vdev_ashift < spa->spa_min_ashift)
+ spa->spa_min_ashift = vd->vdev_ashift;
+ }
+ }
+}
+
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
+ boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
ASSERT(!vd->vdev_ishole);
- /*
- * Compute the raidz-deflation ratio. Note, we hard-code
- * in 128k (1 << 17) because it is the "typical" blocksize.
- * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
- * otherwise it would inconsistently account for existing bp's.
- */
- vd->vdev_deflate_ratio = (1 << 17) /
- (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
-
ASSERT(oldc <= newc);
- mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
+ mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
- if (oldc != 0) {
+ if (expanding) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
- kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
+ vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
-
for (m = oldc; m < newc; m++) {
uint64_t object = 0;
- if (txg == 0) {
+ /*
+ * vdev_ms_array may be 0 if we are creating the "fake"
+ * metaslabs for an indirect vdev for zdb's leak detection.
+ * See zdb_leak_init().
+ */
+ if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(mos, vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
- if (error)
+ if (error != 0) {
+ vdev_dbgmsg(vd, "unable to read the metaslab "
+ "array [error=%d]", error);
return (error);
+ }
}
+#ifndef _KERNEL
+ /*
+ * To accomodate zdb_leak_init() fake indirect
+ * metaslabs, we allocate a metaslab group for
+ * indirect vdevs which normally don't have one.
+ */
+ if (vd->vdev_mg == NULL) {
+ ASSERT0(vdev_is_concrete(vd));
+ vdev_metaslab_group_create(vd);
+ }
+#endif
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
- if (error)
+ if (error != 0) {
+ vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
+ error);
return (error);
+ }
}
if (txg == 0)
* the metaslabs since we want to ensure that no new
* allocations are performed on this device.
*/
- if (oldc == 0 && !vd->vdev_removing)
+ if (!expanding && !vd->vdev_removing) {
metaslab_group_activate(vd->vdev_mg);
+ }
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
void
vdev_metaslab_fini(vdev_t *vd)
{
- uint64_t m;
- uint64_t count = vd->vdev_ms_count;
+ if (vd->vdev_checkpoint_sm != NULL) {
+ ASSERT(spa_feature_is_active(vd->vdev_spa,
+ SPA_FEATURE_POOL_CHECKPOINT));
+ space_map_close(vd->vdev_checkpoint_sm);
+ /*
+ * Even though we close the space map, we need to set its
+ * pointer to NULL. The reason is that vdev_metaslab_fini()
+ * may be called multiple times for certain operations
+ * (i.e. when destroying a pool) so we need to ensure that
+ * this clause never executes twice. This logic is similar
+ * to the one used for the vdev_ms clause below.
+ */
+ vd->vdev_checkpoint_sm = NULL;
+ }
if (vd->vdev_ms != NULL) {
+ uint64_t count = vd->vdev_ms_count;
+
metaslab_group_passivate(vd->vdev_mg);
- for (m = 0; m < count; m++) {
+ for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
- kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
+ vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
- }
+ vd->vdev_ms_count = 0;
+ }
+ ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
- zio->io_offset, zio->io_size, zio->io_data,
+ zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
- zio_buf_free(zio->io_data, zio->io_size);
+ abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
- zio_buf_free(zio->io_data, zio->io_size);
+ abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
+ zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
+ vdev_dbgmsg(vd, "failed probe");
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
- spa, vd, NULL, 0, 0);
+ spa, vd, NULL, NULL, 0, 0);
zio->io_error = SET_ERROR(ENXIO);
}
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
- while ((pio = zio_walk_parents(zio)) != NULL)
+ zl = NULL;
+ while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
- int l;
ASSERT(vd->vdev_ops->vdev_op_leaf);
return (NULL);
}
- for (l = 1; l < VDEV_LABELS; l++) {
+ for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
- offsetof(vdev_label_t, vl_pad2)),
- VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
+ offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
+ abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
- vd->vdev_parent->vdev_nonrot &= vd->vdev_nonrot;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
- int c;
-
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
{
taskq_t *tq;
int children = vd->vdev_children;
- int c;
-
- vd->vdev_nonrot = B_TRUE;
/*
* in order to handle pools on top of zvols, do the opens
* spa_namespace_lock
*/
if (vdev_uses_zvols(vd)) {
- for (c = 0; c < children; c++) {
+retry_sync:
+ for (int c = 0; c < children; c++)
vd->vdev_child[c]->vdev_open_error =
vdev_open(vd->vdev_child[c]);
- vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
- }
- return;
- }
- tq = taskq_create("vdev_open", children, minclsyspri,
- children, children, TASKQ_PREPOPULATE);
+ } else {
+ tq = taskq_create("vdev_open", children, minclsyspri,
+ children, children, TASKQ_PREPOPULATE);
+ if (tq == NULL)
+ goto retry_sync;
- for (c = 0; c < children; c++)
- VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
- TQ_SLEEP) != 0);
+ for (int c = 0; c < children; c++)
+ VERIFY(taskq_dispatch(tq, vdev_open_child,
+ vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
- taskq_destroy(tq);
+ taskq_destroy(tq);
+ }
+
+ vd->vdev_nonrot = B_TRUE;
- for (c = 0; c < children; c++)
+ for (int c = 0; c < children; c++)
vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
}
+/*
+ * Compute the raidz-deflation ratio. Note, we hard-code
+ * in 128k (1 << 17) because it is the "typical" blocksize.
+ * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
+ * otherwise it would inconsistently account for existing bp's.
+ */
+static void
+vdev_set_deflate_ratio(vdev_t *vd)
+{
+ if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
+ vd->vdev_deflate_ratio = (1 << 17) /
+ (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
+ }
+}
+
/*
* Prepare a virtual device for access.
*/
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t ashift = 0;
- int c;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
- vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
- vd->vdev_stat.vs_aux);
+ if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
+ vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
+ vd->vdev_stat.vs_aux);
+ } else {
+ vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
+ vd->vdev_stat.vs_aux);
+ }
return (error);
}
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
max_asize = max_osize;
}
+ /*
+ * If the vdev was expanded, record this so that we can re-create the
+ * uberblock rings in labels {2,3}, during the next sync.
+ */
+ if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
+ vd->vdev_copy_uberblocks = B_TRUE;
+
vd->vdev_psize = psize;
/*
- * Make sure the allocatable size hasn't shrunk.
+ * Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
- if (vd->vdev_ashift == 0)
- vd->vdev_ashift = ashift;
+ if (vd->vdev_ashift == 0) {
+ vd->vdev_ashift = ashift; /* use detected value */
+ }
+ if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
+ vd->vdev_ashift > ASHIFT_MAX)) {
+ vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_BAD_ASHIFT);
+ return (SET_ERROR(EDOM));
+ }
} else {
/*
* Detect if the alignment requirement has increased.
if (ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
- spa, vd, NULL, 0, 0);
+ spa, vd, NULL, NULL, 0, 0);
}
vd->vdev_max_asize = max_asize;
}
/*
- * If all children are healthy and the asize has increased,
- * then we've experienced dynamic LUN growth. If automatic
- * expansion is enabled then use the additional space.
+ * If all children are healthy we update asize if either:
+ * The asize has increased, due to a device expansion caused by dynamic
+ * LUN growth or vdev replacement, and automatic expansion is enabled;
+ * making the additional space available.
+ *
+ * The asize has decreased, due to a device shrink usually caused by a
+ * vdev replace with a smaller device. This ensures that calculations
+ * based of max_asize and asize e.g. esize are always valid. It's safe
+ * to do this as we've already validated that asize is greater than
+ * vdev_min_asize.
*/
- if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
- (vd->vdev_expanding || spa->spa_autoexpand))
+ if (vd->vdev_state == VDEV_STATE_HEALTHY &&
+ ((asize > vd->vdev_asize &&
+ (vd->vdev_expanding || spa->spa_autoexpand)) ||
+ (asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Track the min and max ashift values for normal data devices.
+ *
+ * DJB - TBD these should perhaps be tracked per allocation class
+ * (e.g. spa_min_ashift is used to round up post compression buffers)
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
- !vd->vdev_islog && vd->vdev_aux == NULL) {
+ vd->vdev_alloc_bias == VDEV_BIAS_NONE &&
+ vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
/*
* Called once the vdevs are all opened, this routine validates the label
- * contents. This needs to be done before vdev_load() so that we don't
+ * contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
- * If 'strict' is false ignore the spa guid check. This is necessary because
- * if the machine crashed during a re-guid the new guid might have been written
- * to all of the vdev labels, but not the cached config. The strict check
- * will be performed when the pool is opened again using the mos config.
- *
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
-vdev_validate(vdev_t *vd, boolean_t strict)
+vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
- uint64_t guid = 0, top_guid;
+ uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
- int c;
+ nvlist_t *nvl;
+ uint64_t txg;
+
+ if (vdev_validate_skip)
+ return (0);
- for (c = 0; c < vd->vdev_children; c++)
- if (vdev_validate(vd->vdev_child[c], strict) != 0)
+ for (uint64_t c = 0; c < vd->vdev_children; c++)
+ if (vdev_validate(vd->vdev_child[c]) != 0)
return (SET_ERROR(EBADF));
/*
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
- if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
- uint64_t aux_guid = 0;
- nvlist_t *nvl;
- uint64_t txg = spa_last_synced_txg(spa) != 0 ?
- spa_last_synced_txg(spa) : -1ULL;
+ if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
+ return (0);
- if ((label = vdev_label_read_config(vd, txg)) == NULL) {
- vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_BAD_LABEL);
- return (0);
- }
+ /*
+ * If we are performing an extreme rewind, we allow for a label that
+ * was modified at a point after the current txg.
+ * If config lock is not held do not check for the txg. spa_sync could
+ * be updating the vdev's label before updating spa_last_synced_txg.
+ */
+ if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
+ spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
+ txg = UINT64_MAX;
+ else
+ txg = spa_last_synced_txg(spa);
- /*
- * Determine if this vdev has been split off into another
- * pool. If so, then refuse to open it.
- */
- if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
- &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
- vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_SPLIT_POOL);
- nvlist_free(label);
- return (0);
- }
+ if ((label = vdev_label_read_config(vd, txg)) == NULL) {
+ vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_BAD_LABEL);
+ vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
+ "txg %llu", (u_longlong_t)txg);
+ return (0);
+ }
- if (strict && (nvlist_lookup_uint64(label,
- ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
- guid != spa_guid(spa))) {
- vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_CORRUPT_DATA);
- nvlist_free(label);
- return (0);
- }
+ /*
+ * Determine if this vdev has been split off into another
+ * pool. If so, then refuse to open it.
+ */
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
+ &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_SPLIT_POOL);
+ nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
+ return (0);
+ }
- if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
- != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
- &aux_guid) != 0)
- aux_guid = 0;
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
+ ZPOOL_CONFIG_POOL_GUID);
+ return (0);
+ }
- /*
- * If this vdev just became a top-level vdev because its
- * sibling was detached, it will have adopted the parent's
- * vdev guid -- but the label may or may not be on disk yet.
- * Fortunately, either version of the label will have the
- * same top guid, so if we're a top-level vdev, we can
- * safely compare to that instead.
- *
- * If we split this vdev off instead, then we also check the
- * original pool's guid. We don't want to consider the vdev
- * corrupt if it is partway through a split operation.
- */
- if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
- &guid) != 0 ||
- nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
- &top_guid) != 0 ||
- ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
- (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
- vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_CORRUPT_DATA);
- nvlist_free(label);
- return (0);
+ /*
+ * If config is not trusted then ignore the spa guid check. This is
+ * necessary because if the machine crashed during a re-guid the new
+ * guid might have been written to all of the vdev labels, but not the
+ * cached config. The check will be performed again once we have the
+ * trusted config from the MOS.
+ */
+ if (spa->spa_trust_config && guid != spa_guid(spa)) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
+ "match config (%llu != %llu)", (u_longlong_t)guid,
+ (u_longlong_t)spa_guid(spa));
+ return (0);
+ }
+
+ if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
+ != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
+ &aux_guid) != 0)
+ aux_guid = 0;
+
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
+ ZPOOL_CONFIG_GUID);
+ return (0);
+ }
+
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
+ != 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
+ ZPOOL_CONFIG_TOP_GUID);
+ return (0);
+ }
+
+ /*
+ * If this vdev just became a top-level vdev because its sibling was
+ * detached, it will have adopted the parent's vdev guid -- but the
+ * label may or may not be on disk yet. Fortunately, either version
+ * of the label will have the same top guid, so if we're a top-level
+ * vdev, we can safely compare to that instead.
+ * However, if the config comes from a cachefile that failed to update
+ * after the detach, a top-level vdev will appear as a non top-level
+ * vdev in the config. Also relax the constraints if we perform an
+ * extreme rewind.
+ *
+ * If we split this vdev off instead, then we also check the
+ * original pool's guid. We don't want to consider the vdev
+ * corrupt if it is partway through a split operation.
+ */
+ if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
+ boolean_t mismatch = B_FALSE;
+ if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
+ if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
+ mismatch = B_TRUE;
+ } else {
+ if (vd->vdev_guid != top_guid &&
+ vd->vdev_top->vdev_guid != guid)
+ mismatch = B_TRUE;
}
- if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
- &state) != 0) {
+ if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: config guid "
+ "doesn't match label guid");
+ vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
+ (u_longlong_t)vd->vdev_guid,
+ (u_longlong_t)vd->vdev_top->vdev_guid);
+ vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
+ "aux_guid %llu", (u_longlong_t)guid,
+ (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
+ }
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
+ &state) != 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
+ ZPOOL_CONFIG_POOL_STATE);
+ return (0);
+ }
- /*
- * If this is a verbatim import, no need to check the
- * state of the pool.
- */
- if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
- spa_load_state(spa) == SPA_LOAD_OPEN &&
- state != POOL_STATE_ACTIVE)
- return (SET_ERROR(EBADF));
+ nvlist_free(label);
- /*
- * If we were able to open and validate a vdev that was
- * previously marked permanently unavailable, clear that state
- * now.
- */
- if (vd->vdev_not_present)
- vd->vdev_not_present = 0;
+ /*
+ * If this is a verbatim import, no need to check the
+ * state of the pool.
+ */
+ if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
+ spa_load_state(spa) == SPA_LOAD_OPEN &&
+ state != POOL_STATE_ACTIVE) {
+ vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
+ "for spa %s", (u_longlong_t)state, spa->spa_name);
+ return (SET_ERROR(EBADF));
+ }
+
+ /*
+ * If we were able to open and validate a vdev that was
+ * previously marked permanently unavailable, clear that state
+ * now.
+ */
+ if (vd->vdev_not_present)
+ vd->vdev_not_present = 0;
+
+ return (0);
+}
+
+static void
+vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
+{
+ if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
+ if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
+ zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
+ "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
+ dvd->vdev_path, svd->vdev_path);
+ spa_strfree(dvd->vdev_path);
+ dvd->vdev_path = spa_strdup(svd->vdev_path);
+ }
+ } else if (svd->vdev_path != NULL) {
+ dvd->vdev_path = spa_strdup(svd->vdev_path);
+ zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
+ (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
+ }
+}
+
+/*
+ * Recursively copy vdev paths from one vdev to another. Source and destination
+ * vdev trees must have same geometry otherwise return error. Intended to copy
+ * paths from userland config into MOS config.
+ */
+int
+vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
+{
+ if ((svd->vdev_ops == &vdev_missing_ops) ||
+ (svd->vdev_ishole && dvd->vdev_ishole) ||
+ (dvd->vdev_ops == &vdev_indirect_ops))
+ return (0);
+
+ if (svd->vdev_ops != dvd->vdev_ops) {
+ vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
+ svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
+ return (SET_ERROR(EINVAL));
+ }
+
+ if (svd->vdev_guid != dvd->vdev_guid) {
+ vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
+ "%llu)", (u_longlong_t)svd->vdev_guid,
+ (u_longlong_t)dvd->vdev_guid);
+ return (SET_ERROR(EINVAL));
}
+ if (svd->vdev_children != dvd->vdev_children) {
+ vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
+ "%llu != %llu", (u_longlong_t)svd->vdev_children,
+ (u_longlong_t)dvd->vdev_children);
+ return (SET_ERROR(EINVAL));
+ }
+
+ for (uint64_t i = 0; i < svd->vdev_children; i++) {
+ int error = vdev_copy_path_strict(svd->vdev_child[i],
+ dvd->vdev_child[i]);
+ if (error != 0)
+ return (error);
+ }
+
+ if (svd->vdev_ops->vdev_op_leaf)
+ vdev_copy_path_impl(svd, dvd);
+
return (0);
}
+static void
+vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
+{
+ ASSERT(stvd->vdev_top == stvd);
+ ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
+
+ for (uint64_t i = 0; i < dvd->vdev_children; i++) {
+ vdev_copy_path_search(stvd, dvd->vdev_child[i]);
+ }
+
+ if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
+ return;
+
+ /*
+ * The idea here is that while a vdev can shift positions within
+ * a top vdev (when replacing, attaching mirror, etc.) it cannot
+ * step outside of it.
+ */
+ vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
+
+ if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
+ return;
+
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+ vdev_copy_path_impl(vd, dvd);
+}
+
+/*
+ * Recursively copy vdev paths from one root vdev to another. Source and
+ * destination vdev trees may differ in geometry. For each destination leaf
+ * vdev, search a vdev with the same guid and top vdev id in the source.
+ * Intended to copy paths from userland config into MOS config.
+ */
+void
+vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
+{
+ uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
+ ASSERT(srvd->vdev_ops == &vdev_root_ops);
+ ASSERT(drvd->vdev_ops == &vdev_root_ops);
+
+ for (uint64_t i = 0; i < children; i++) {
+ vdev_copy_path_search(srvd->vdev_child[i],
+ drvd->vdev_child[i]);
+ }
+}
+
/*
* Close a virtual device.
*/
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
- int c;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf)
void
vdev_rele(vdev_t *vd)
{
- int c;
-
ASSERT(spa_is_root(vd->vdev_spa));
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf)
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
- (void) vdev_validate(vd, B_TRUE);
+ (void) vdev_validate(vd);
}
/*
void
vdev_metaslab_set_size(vdev_t *vd)
{
+ uint64_t asize = vd->vdev_asize;
+ uint64_t ms_count = asize >> vdev_default_ms_shift;
+ uint64_t ms_shift;
+
/*
- * Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
+ * There are two dimensions to the metaslab sizing calculation:
+ * the size of the metaslab and the count of metaslabs per vdev.
+ * In general, we aim for vdev_max_ms_count (200) metaslabs. The
+ * range of the dimensions are as follows:
+ *
+ * 2^29 <= ms_size <= 2^38
+ * 16 <= ms_count <= 131,072
+ *
+ * On the lower end of vdev sizes, we aim for metaslabs sizes of
+ * at least 512MB (2^29) to minimize fragmentation effects when
+ * testing with smaller devices. However, the count constraint
+ * of at least 16 metaslabs will override this minimum size goal.
+ *
+ * On the upper end of vdev sizes, we aim for a maximum metaslab
+ * size of 256GB. However, we will cap the total count to 2^17
+ * metaslabs to keep our memory footprint in check.
+ *
+ * The net effect of applying above constrains is summarized below.
+ *
+ * vdev size metaslab count
+ * -------------|-----------------
+ * < 8GB ~16
+ * 8GB - 100GB one per 512MB
+ * 100GB - 50TB ~200
+ * 50TB - 32PB one per 256GB
+ * > 32PB ~131,072
+ * -------------------------------
*/
- vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
- vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
+
+ if (ms_count < vdev_min_ms_count)
+ ms_shift = highbit64(asize / vdev_min_ms_count);
+ else if (ms_count > vdev_max_ms_count)
+ ms_shift = highbit64(asize / vdev_max_ms_count);
+ else
+ ms_shift = vdev_default_ms_shift;
+
+ if (ms_shift < SPA_MAXBLOCKSHIFT) {
+ ms_shift = SPA_MAXBLOCKSHIFT;
+ } else if (ms_shift > vdev_max_ms_shift) {
+ ms_shift = vdev_max_ms_shift;
+ /* cap the total count to constrain memory footprint */
+ if ((asize >> ms_shift) > vdev_ms_count_limit)
+ ms_shift = highbit64(asize / vdev_ms_count_limit);
+ }
+
+ vd->vdev_ms_shift = ms_shift;
+ ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
- ASSERT(!vd->vdev_ishole);
+ /* indirect vdevs don't have metaslabs or dtls */
+ ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
- int c;
-
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
- mutex_enter(rt->rt_lock);
+ mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
- mutex_exit(rt->rt_lock);
+ mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
- mutex_enter(rt->rt_lock);
- if (range_tree_space(rt) != 0)
+ /*
+ * While we are loading the pool, the DTLs have not been loaded yet.
+ * Ignore the DTLs and try all devices. This avoids a recursive
+ * mutex enter on the vdev_dtl_lock, and also makes us try hard
+ * when loading the pool (relying on the checksum to ensure that
+ * we get the right data -- note that we while loading, we are
+ * only reading the MOS, which is always checksummed).
+ */
+ if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE)
+ return (B_FALSE);
+
+ mutex_enter(&vd->vdev_dtl_lock);
+ if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
- mutex_exit(rt->rt_lock);
+ mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
- mutex_enter(rt->rt_lock);
- empty = (range_tree_space(rt) == 0);
- mutex_exit(rt->rt_lock);
+ mutex_enter(&vd->vdev_dtl_lock);
+ empty = range_tree_is_empty(rt);
+ mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
+/*
+ * Returns B_TRUE if vdev determines offset needs to be resilvered.
+ */
+boolean_t
+vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
+{
+ ASSERT(vd != vd->vdev_spa->spa_root_vdev);
+
+ if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
+ vd->vdev_ops->vdev_op_leaf)
+ return (B_TRUE);
+
+ return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
+}
+
/*
* Returns the lowest txg in the DTL range.
*/
ASSERT0(scn->scn_phys.scn_errors);
ASSERT0(vd->vdev_children);
+ if (vd->vdev_state < VDEV_STATE_DEGRADED)
+ return (B_FALSE);
+
if (vd->vdev_resilver_txg == 0 ||
- range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
+ range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
/*
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
- int c, t, minref;
+ int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done);
- if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
+ if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_dtl_lock);
+ /*
+ * If requested, pretend the scan completed cleanly.
+ */
+ if (zfs_scan_ignore_errors && scn)
+ scn->scn_phys.scn_errors = 0;
+
/*
* If we've completed a scan cleanly then determine
* if this vdev should remove any DTLs. We only want to
/*
* If the vdev was resilvering and no longer has any
- * DTLs then reset its resilvering flag.
+ * DTLs then reset its resilvering flag and dirty
+ * the top level so that we persist the change.
*/
if (vd->vdev_resilver_txg != 0 &&
- range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
- range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0)
+ range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
+ range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
vd->vdev_resilver_txg = 0;
+ vdev_config_dirty(vd->vdev_top);
+ }
mutex_exit(&vd->vdev_dtl_lock);
}
mutex_enter(&vd->vdev_dtl_lock);
- for (t = 0; t < DTL_TYPES; t++) {
- int c;
-
+ for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int error = 0;
- int c;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
error = space_map_open(&vd->vdev_dtl_sm, mos,
- vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
+ vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
return (error);
}
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
return (error);
}
+static void
+vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+ objset_t *mos = spa->spa_meta_objset;
+ vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
+ const char *string;
+
+ ASSERT(alloc_bias != VDEV_BIAS_NONE);
+
+ string =
+ (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
+ (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
+ (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
+
+ ASSERT(string != NULL);
+ VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
+ 1, strlen(string) + 1, string, tx));
+
+ if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
+ spa_activate_allocation_classes(spa, tx);
+ }
+}
+
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
- uint64_t i;
-
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
+ if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
+ vdev_zap_allocation_data(vd, tx);
}
}
- for (i = 0; i < vd->vdev_children; i++) {
+
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
- kmutex_t rtlock;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
- new_object = space_map_alloc(mos, tx);
+ new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
- 0, -1ULL, 0, &vd->vdev_dtl_lock));
+ 0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
- mutex_init(&rtlock, NULL, MUTEX_DEFAULT, NULL);
-
- rtsync = range_tree_create(NULL, NULL, &rtlock);
-
- mutex_enter(&rtlock);
+ rtsync = range_tree_create(NULL, NULL);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
- space_map_truncate(vd->vdev_dtl_sm, tx);
- space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
+ space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx);
+ space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
- mutex_exit(&rtlock);
- mutex_destroy(&rtlock);
-
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
- zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
- "new object %llu", txg, spa_name(spa), object,
- space_map_object(vd->vdev_dtl_sm));
+ vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
+ "new object %llu", (u_longlong_t)txg, spa_name(spa),
+ (u_longlong_t)object,
+ (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
- int c;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
- if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
+ if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
return (needed);
}
-void
+/*
+ * Gets the checkpoint space map object from the vdev's ZAP.
+ * Returns the spacemap object, or 0 if it wasn't in the ZAP,
+ * the ZAP doesn't exist yet, or the ZAP is damaged.
+ */
+int
+vdev_checkpoint_sm_object(vdev_t *vd)
+{
+ ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+ if (vd->vdev_top_zap == 0) {
+ return (0);
+ }
+
+ uint64_t sm_obj = 0;
+ int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
+ VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj);
+
+ if (err != 0 && err != ENOENT) {
+ vdev_dbgmsg(vd, "vdev_load: vdev_checkpoint_sm_objset "
+ "failed to retrieve checkpoint space map object from "
+ "vdev ZAP [error=%d]", err);
+ ASSERT3S(err, ==, ECKSUM);
+ }
+
+ return (sm_obj);
+}
+
+int
vdev_load(vdev_t *vd)
{
- int c;
+ int error = 0;
/*
* Recursively load all children.
*/
- for (c = 0; c < vd->vdev_children; c++)
- vdev_load(vd->vdev_child[c]);
+ for (int c = 0; c < vd->vdev_children; c++) {
+ error = vdev_load(vd->vdev_child[c]);
+ if (error != 0) {
+ return (error);
+ }
+ }
+
+ vdev_set_deflate_ratio(vd);
+
+ /*
+ * On spa_load path, grab the allocation bias from our zap
+ */
+ if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
+ spa_t *spa = vd->vdev_spa;
+ char bias_str[64];
+
+ if (zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
+ bias_str) == 0) {
+ ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
+ vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
+ }
+ }
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
- if (vd == vd->vdev_top && !vd->vdev_ishole &&
- (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
- vdev_metaslab_init(vd, 0) != 0))
- vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_CORRUPT_DATA);
+ if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
+ vdev_metaslab_group_create(vd);
+
+ if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
+ "asize=%llu", (u_longlong_t)vd->vdev_ashift,
+ (u_longlong_t)vd->vdev_asize);
+ return (SET_ERROR(ENXIO));
+ } else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
+ vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
+ "[error=%d]", error);
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ return (error);
+ }
+
+ uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd);
+ if (checkpoint_sm_obj != 0) {
+ objset_t *mos = spa_meta_objset(vd->vdev_spa);
+ ASSERT(vd->vdev_asize != 0);
+ ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
+
+ if ((error = space_map_open(&vd->vdev_checkpoint_sm,
+ mos, checkpoint_sm_obj, 0, vd->vdev_asize,
+ vd->vdev_ashift))) {
+ vdev_dbgmsg(vd, "vdev_load: space_map_open "
+ "failed for checkpoint spacemap (obj %llu) "
+ "[error=%d]",
+ (u_longlong_t)checkpoint_sm_obj, error);
+ return (error);
+ }
+ ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
+ space_map_update(vd->vdev_checkpoint_sm);
+
+ /*
+ * Since the checkpoint_sm contains free entries
+ * exclusively we can use sm_alloc to indicate the
+ * culmulative checkpointed space that has been freed.
+ */
+ vd->vdev_stat.vs_checkpoint_space =
+ -vd->vdev_checkpoint_sm->sm_alloc;
+ vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
+ vd->vdev_stat.vs_checkpoint_space;
+ }
+ }
/*
* If this is a leaf vdev, load its DTL.
*/
- if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
+ if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
+ "[error=%d]", error);
+ return (error);
+ }
+
+ uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
+ if (obsolete_sm_object != 0) {
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ ASSERT(vd->vdev_asize != 0);
+ ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+
+ if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
+ obsolete_sm_object, 0, vd->vdev_asize, 0))) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
+ "obsolete spacemap (obj %llu) [error=%d]",
+ (u_longlong_t)obsolete_sm_object, error);
+ return (error);
+ }
+ space_map_update(vd->vdev_obsolete_sm);
+ }
+
+ return (0);
}
/*
return (0);
}
+/*
+ * Free the objects used to store this vdev's spacemaps, and the array
+ * that points to them.
+ */
void
-vdev_remove(vdev_t *vd, uint64_t txg)
+vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
+{
+ if (vd->vdev_ms_array == 0)
+ return;
+
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
+ size_t array_bytes = array_count * sizeof (uint64_t);
+ uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
+ VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
+ array_bytes, smobj_array, 0));
+
+ for (uint64_t i = 0; i < array_count; i++) {
+ uint64_t smobj = smobj_array[i];
+ if (smobj == 0)
+ continue;
+
+ space_map_free_obj(mos, smobj, tx);
+ }
+
+ kmem_free(smobj_array, array_bytes);
+ VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
+ vd->vdev_ms_array = 0;
+}
+
+static void
+vdev_remove_empty(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
- objset_t *mos = spa->spa_meta_objset;
dmu_tx_t *tx;
- int m, i;
- tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
- for (m = 0; m < vd->vdev_ms_count; m++) {
+ for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp == NULL || msp->ms_sm == NULL)
metaslab_group_histogram_remove(mg, msp);
VERIFY0(space_map_allocated(msp->ms_sm));
- space_map_free(msp->ms_sm, tx);
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
mutex_exit(&msp->ms_lock);
}
+ if (vd->vdev_checkpoint_sm != NULL) {
+ ASSERT(spa_has_checkpoint(spa));
+ space_map_close(vd->vdev_checkpoint_sm);
+ vd->vdev_checkpoint_sm = NULL;
+ }
+
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
- for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
- ASSERT0(mg->mg_histogram[i]);
+ for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
+ ASSERT0(mg->mg_histogram[i]);
}
- if (vd->vdev_ms_array) {
- (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
- vd->vdev_ms_array = 0;
- }
+ tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
+ vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_islog && vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
metaslab_sync_done(msp, txg);
metaslab_t *msp;
dmu_tx_t *tx;
- ASSERT(!vd->vdev_ishole);
+ if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
+ dmu_tx_t *tx;
+
+ ASSERT(vd->vdev_removing ||
+ vd->vdev_ops == &vdev_indirect_ops);
+
+ tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
+ vdev_indirect_sync_obsolete(vd, tx);
+ dmu_tx_commit(tx);
+
+ /*
+ * If the vdev is indirect, it can't have dirty
+ * metaslabs or DTLs.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
+ ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
+ return;
+ }
+ }
+
+ ASSERT(vdev_is_concrete(vd));
- if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
+ if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
+ !vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
+ ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
dmu_tx_commit(tx);
}
- /*
- * Remove the metadata associated with this vdev once it's empty.
- */
- if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
- vdev_remove(vd, txg);
-
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
+ /*
+ * Remove the metadata associated with this vdev once it's empty.
+ * Note that this is typically used for log/cache device removal;
+ * we don't empty toplevel vdevs when removing them. But if
+ * a toplevel happens to be emptied, this is not harmful.
+ */
+ if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) {
+ vdev_remove_empty(vd, txg);
+ }
+
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
}
tvd = vd->vdev_top;
+ /*
+ * If user did a 'zpool offline -f' then make the fault persist across
+ * reboots.
+ */
+ if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
+ /*
+ * There are two kinds of forced faults: temporary and
+ * persistent. Temporary faults go away at pool import, while
+ * persistent faults stay set. Both types of faults can be
+ * cleared with a zpool clear.
+ *
+ * We tell if a vdev is persistently faulted by looking at the
+ * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
+ * import then it's a persistent fault. Otherwise, it's
+ * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
+ * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
+ * tells vdev_config_generate() (which gets run later) to set
+ * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
+ */
+ vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
+ vd->vdev_tmpoffline = B_FALSE;
+ aux = VDEV_AUX_EXTERNAL;
+ } else {
+ vd->vdev_tmpoffline = B_TRUE;
+ }
+
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
+ boolean_t wasoffline;
+ vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
+ wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
+ oldstate = vd->vdev_state;
+
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
- pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
+ pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
+ spa->spa_autoexpand);
}
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
+
+ if (wasoffline ||
+ (oldstate < VDEV_STATE_DEGRADED &&
+ vd->vdev_state >= VDEV_STATE_DEGRADED))
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
+
return (spa_vdev_state_exit(spa, vd, 0));
}
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
- error = spa_offline_log(spa);
+ error = spa_reset_logs(spa);
+
+ /*
+ * If the log device was successfully reset but has
+ * checkpointed data, do not offline it.
+ */
+ if (error == 0 &&
+ tvd->vdev_checkpoint_sm != NULL) {
+ ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc,
+ !=, 0);
+ error = ZFS_ERR_CHECKPOINT_EXISTS;
+ }
spa_vdev_state_enter(spa, SCL_ALLOC);
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
- int c;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
+ /*
+ * It makes no sense to "clear" an indirect vdev.
+ */
+ if (!vdev_is_concrete(vd))
+ return;
+
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
-
/*
- * When reopening in reponse to a clear event, it may be due to
+ * When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
+ vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
spa_async_request(spa, SPA_ASYNC_RESILVER);
- spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR);
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
- return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
+ return (vd->vdev_state < VDEV_STATE_DEGRADED ||
+ vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
- return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
+ return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
+ vdev_is_concrete(vd));
}
boolean_t
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
- !vd->vdev_cant_write && !vd->vdev_ishole);
+ !vd->vdev_cant_write && vdev_is_concrete(vd) &&
+ vd->vdev_mg->mg_initialized);
}
boolean_t
return (B_TRUE);
}
+static void
+vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
+{
+ int t;
+ for (t = 0; t < ZIO_TYPES; t++) {
+ vs->vs_ops[t] += cvs->vs_ops[t];
+ vs->vs_bytes[t] += cvs->vs_bytes[t];
+ }
+
+ cvs->vs_scan_removing = cvd->vdev_removing;
+}
+
/*
- * Get statistics for the given vdev.
+ * Get extended stats
*/
-void
-vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
+static void
+vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
- spa_t *spa = vd->vdev_spa;
- vdev_t *rvd = spa->spa_root_vdev;
- int c, t;
+ int t, b;
+ for (t = 0; t < ZIO_TYPES; t++) {
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
+ vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
+
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
+ vsx->vsx_total_histo[t][b] +=
+ cvsx->vsx_total_histo[t][b];
+ }
+ }
- ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
+ for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
+ vsx->vsx_queue_histo[t][b] +=
+ cvsx->vsx_queue_histo[t][b];
+ }
+ vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
+ vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
- mutex_enter(&vd->vdev_stat_lock);
- bcopy(&vd->vdev_stat, vs, sizeof (*vs));
- vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
- vs->vs_state = vd->vdev_state;
- vs->vs_rsize = vdev_get_min_asize(vd);
- if (vd->vdev_ops->vdev_op_leaf)
- vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
- vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
- if (vd->vdev_aux == NULL && vd == vd->vdev_top && !vd->vdev_ishole) {
- vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
+ vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
+
+ for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
+ vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
+}
+
+boolean_t
+vdev_is_spacemap_addressable(vdev_t *vd)
+{
+ /*
+ * Assuming 47 bits of the space map entry dedicated for the entry's
+ * offset (see description in space_map.h), we calculate the maximum
+ * address that can be described by a space map entry for the given
+ * device.
+ */
+ uint64_t shift = vd->vdev_ashift + 47;
+
+ if (shift >= 63) /* detect potential overflow */
+ return (B_TRUE);
+
+ return (vd->vdev_asize < (1ULL << shift));
+}
+
+/*
+ * Get statistics for the given vdev.
+ */
+static void
+vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
+{
+ int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
- if (vd == rvd) {
- for (c = 0; c < rvd->vdev_children; c++) {
- vdev_t *cvd = rvd->vdev_child[c];
+ if (!vd->vdev_ops->vdev_op_leaf) {
+ if (vs) {
+ memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
+ memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
+ }
+ if (vsx)
+ memset(vsx, 0, sizeof (*vsx));
+
+ for (int c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
+ vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
- for (t = 0; t < ZIO_TYPES; t++) {
- vs->vs_ops[t] += cvs->vs_ops[t];
- vs->vs_bytes[t] += cvs->vs_bytes[t];
- }
- cvs->vs_scan_removing = cvd->vdev_removing;
+ vdev_get_stats_ex_impl(cvd, cvs, cvsx);
+ if (vs)
+ vdev_get_child_stat(cvd, vs, cvs);
+ if (vsx)
+ vdev_get_child_stat_ex(cvd, vsx, cvsx);
+
+ }
+ } else {
+ /*
+ * We're a leaf. Just copy our ZIO active queue stats in. The
+ * other leaf stats are updated in vdev_stat_update().
+ */
+ if (!vsx)
+ return;
+
+ memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
+
+ for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
+ vsx->vsx_active_queue[t] =
+ vd->vdev_queue.vq_class[t].vqc_active;
+ vsx->vsx_pend_queue[t] = avl_numnodes(
+ &vd->vdev_queue.vq_class[t].vqc_queued_tree);
+ }
+ }
+}
+
+void
+vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
+{
+ vdev_t *tvd = vd->vdev_top;
+ mutex_enter(&vd->vdev_stat_lock);
+ if (vs) {
+ bcopy(&vd->vdev_stat, vs, sizeof (*vs));
+ vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
+ vs->vs_state = vd->vdev_state;
+ vs->vs_rsize = vdev_get_min_asize(vd);
+ if (vd->vdev_ops->vdev_op_leaf)
+ vs->vs_rsize += VDEV_LABEL_START_SIZE +
+ VDEV_LABEL_END_SIZE;
+ /*
+ * Report expandable space on top-level, non-auxillary devices
+ * only. The expandable space is reported in terms of metaslab
+ * sized units since that determines how much space the pool
+ * can expand.
+ */
+ if (vd->vdev_aux == NULL && tvd != NULL) {
+ vs->vs_esize = P2ALIGN(
+ vd->vdev_max_asize - vd->vdev_asize,
+ 1ULL << tvd->vdev_ms_shift);
+ }
+ if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
+ vdev_is_concrete(vd)) {
+ vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
+ vd->vdev_mg->mg_fragmentation : 0;
}
}
+
+ ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_READER) != 0);
+ vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
+void
+vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
+{
+ return (vdev_get_stats_ex(vd, vs, NULL));
+}
+
void
vdev_clear_stats(vdev_t *vd)
{
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
- int c;
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
+ vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
vs->vs_self_healed += psize;
}
- vs->vs_ops[type]++;
- vs->vs_bytes[type] += psize;
+ /*
+ * The bytes/ops/histograms are recorded at the leaf level and
+ * aggregated into the higher level vdevs in vdev_get_stats().
+ */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
+
+ vs->vs_ops[type]++;
+ vs->vs_bytes[type] += psize;
+
+ if (flags & ZIO_FLAG_DELEGATED) {
+ vsx->vsx_agg_histo[zio->io_priority]
+ [RQ_HISTO(zio->io_size)]++;
+ } else {
+ vsx->vsx_ind_histo[zio->io_priority]
+ [RQ_HISTO(zio->io_size)]++;
+ }
+
+ if (zio->io_delta && zio->io_delay) {
+ vsx->vsx_queue_histo[zio->io_priority]
+ [L_HISTO(zio->io_delta - zio->io_delay)]++;
+ vsx->vsx_disk_histo[type]
+ [L_HISTO(zio->io_delay)]++;
+ vsx->vsx_total_histo[type]
+ [L_HISTO(zio->io_delta)]++;
+ }
+ }
mutex_exit(&vd->vdev_stat_lock);
return;
vs->vs_write_errors++;
mutex_exit(&vd->vdev_stat_lock);
- if (type == ZIO_TYPE_WRITE && txg != 0 &&
+ if (spa->spa_load_state == SPA_LOAD_NONE &&
+ type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
}
}
+int64_t
+vdev_deflated_space(vdev_t *vd, int64_t space)
+{
+ ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
+ ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
+
+ return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
+}
+
/*
- * Update the in-core space usage stats for this vdev, its metaslab class,
- * and the root vdev.
+ * Update the in-core space usage stats for this vdev and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
- int64_t dspace_delta = space_delta;
+ int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
- metaslab_group_t *mg = vd->vdev_mg;
- metaslab_class_t *mc = mg ? mg->mg_class : NULL;
ASSERT(vd == vd->vdev_top);
* because the root vdev's psize-to-asize is simply the max of its
* childrens', thus not accurate enough for us.
*/
- ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
- ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
- dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
- vd->vdev_deflate_ratio;
+ dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
- if (mc == spa_normal_class(spa)) {
+ /* every class but log contributes to root space stats */
+ if (vd->vdev_mg != NULL && !vd->vdev_islog) {
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
-
- if (mc != NULL) {
- ASSERT(rvd == vd->vdev_parent);
- ASSERT(vd->vdev_ms_count != 0);
-
- metaslab_class_space_update(mc,
- alloc_delta, defer_delta, space_delta, dspace_delta);
- }
+ /* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
- !vd->vdev_ishole)
+ vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
+ }
}
}
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
- if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
+ if (!list_link_active(&vd->vdev_state_dirty_node) &&
+ vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
- int c;
if (vd->vdev_children > 0) {
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
- * Don't factor holes into the decision.
+ * Don't factor holes or indirect vdevs into the
+ * decision.
*/
- if (child->vdev_ishole)
+ if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
+ /*
+ * Since vdev_offline() code path is already in an offline
+ * state we can miss a statechange event to OFFLINE. Check
+ * the previous state to catch this condition.
+ */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (state == VDEV_STATE_OFFLINE) &&
+ (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
+ /* post an offline state change */
+ zfs_post_state_change(spa, vd, vd->vdev_prevstate);
+ }
vd->vdev_stat.vs_aux = aux;
return;
}
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
- /*
- * If we have brought this vdev back into service, we need
- * to notify fmd so that it can gracefully repair any outstanding
- * cases due to a missing device. We do this in all cases, even those
- * that probably don't correlate to a repaired fault. This is sure to
- * catch all cases, and we let the zfs-retire agent sort it out. If
- * this is a transient state it's OK, as the retire agent will
- * double-check the state of the vdev before repairing it.
- */
- if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
- vd->vdev_prevstate != state)
- zfs_post_state_change(spa, vd);
-
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
+ case VDEV_AUX_BAD_ASHIFT:
+ class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
+ break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
- zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
+ zfs_ereport_post(class, spa, vd, NULL, NULL,
+ save_state, 0);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
}
+ /*
+ * Notify ZED of any significant state-change on a leaf vdev.
+ *
+ */
+ if (vd->vdev_ops->vdev_op_leaf) {
+ /* preserve original state from a vdev_reopen() */
+ if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
+ (vd->vdev_prevstate != vd->vdev_state) &&
+ (save_state <= VDEV_STATE_CLOSED))
+ save_state = vd->vdev_prevstate;
+
+ /* filter out state change due to initial vdev_open */
+ if (save_state > VDEV_STATE_CLOSED)
+ zfs_post_state_change(spa, vd, save_state);
+ }
+
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
+boolean_t
+vdev_children_are_offline(vdev_t *vd)
+{
+ ASSERT(!vd->vdev_ops->vdev_op_leaf);
+
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
+ if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
/*
* Check the vdev configuration to ensure that it's capable of supporting
- * a root pool.
+ * a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
-#if defined(__sun__) || defined(__sun)
- /*
- * Currently, we do not support RAID-Z or partial configuration.
- * In addition, only a single top-level vdev is allowed and none of the
- * leaves can be wholedisks.
- */
- int c;
-
if (!vd->vdev_ops->vdev_op_leaf) {
- char *vdev_type = vd->vdev_ops->vdev_op_type;
+ const char *vdev_type = vd->vdev_ops->vdev_op_type;
- if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
- vd->vdev_children > 1) {
- return (B_FALSE);
- } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
- strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
+ if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 ||
+ strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) {
return (B_FALSE);
}
- } else if (vd->vdev_wholedisk == 1) {
- return (B_FALSE);
}
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
-#endif /* __sun__ || __sun */
return (B_TRUE);
}
-/*
- * Load the state from the original vdev tree (ovd) which
- * we've retrieved from the MOS config object. If the original
- * vdev was offline or faulted then we transfer that state to the
- * device in the current vdev tree (nvd).
- */
-void
-vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
+boolean_t
+vdev_is_concrete(vdev_t *vd)
{
- int c;
-
- ASSERT(nvd->vdev_top->vdev_islog);
- ASSERT(spa_config_held(nvd->vdev_spa,
- SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
- ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
-
- for (c = 0; c < nvd->vdev_children; c++)
- vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
-
- if (nvd->vdev_ops->vdev_op_leaf) {
- /*
- * Restore the persistent vdev state
- */
- nvd->vdev_offline = ovd->vdev_offline;
- nvd->vdev_faulted = ovd->vdev_faulted;
- nvd->vdev_degraded = ovd->vdev_degraded;
- nvd->vdev_removed = ovd->vdev_removed;
+ vdev_ops_t *ops = vd->vdev_ops;
+ if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
+ ops == &vdev_missing_ops || ops == &vdev_root_ops) {
+ return (B_FALSE);
+ } else {
+ return (B_TRUE);
}
}
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
- int c;
-
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
+ ASSERT(vdev_is_concrete(vd));
- if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
+ vdev_set_deflate_ratio(vd);
+
+ if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
+ vdev_is_concrete(vd)) {
+ vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
void
-vdev_deadman(vdev_t *vd)
+vdev_deadman(vdev_t *vd, char *tag)
{
- int c;
-
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
- vdev_deadman(cvd);
+ vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
zio_t *fio;
uint64_t delta;
+ zfs_dbgmsg("slow vdev: %s has %d active IOs",
+ vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
+
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
- * the spa_deadman_synctime we log a zevent.
+ * the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
- if (delta > spa_deadman_synctime(spa)) {
- zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
- "delta %lluns, last io %lluns",
- fio->io_timestamp, delta,
- vq->vq_io_complete_ts);
- zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
- spa, vd, fio, 0, 0);
- }
+ if (delta > spa_deadman_synctime(spa))
+ zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
-#if defined(_KERNEL) && defined(HAVE_SPL)
+#if defined(_KERNEL)
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
-
-module_param(metaslabs_per_vdev, int, 0644);
-MODULE_PARM_DESC(metaslabs_per_vdev,
- "Divide added vdev into approximately (but no more than) this number "
- "of metaslabs");
+/* BEGIN CSTYLED */
+module_param(vdev_max_ms_count, int, 0644);
+MODULE_PARM_DESC(vdev_max_ms_count,
+ "Target number of metaslabs per top-level vdev");
+
+module_param(vdev_min_ms_count, int, 0644);
+MODULE_PARM_DESC(vdev_min_ms_count,
+ "Minimum number of metaslabs per top-level vdev");
+
+module_param(vdev_ms_count_limit, int, 0644);
+MODULE_PARM_DESC(vdev_ms_count_limit,
+ "Practical upper limit of total metaslabs per top-level vdev");
+
+module_param(zfs_delays_per_second, uint, 0644);
+MODULE_PARM_DESC(zfs_delays_per_second, "Rate limit delay events to this many "
+ "IO delays per second");
+
+module_param(zfs_checksums_per_second, uint, 0644);
+ MODULE_PARM_DESC(zfs_checksums_per_second, "Rate limit checksum events "
+ "to this many checksum errors per second (do not set below zed"
+ "threshold).");
+
+module_param(zfs_scan_ignore_errors, int, 0644);
+MODULE_PARM_DESC(zfs_scan_ignore_errors,
+ "Ignore errors during resilver/scrub");
+
+module_param(vdev_validate_skip, int, 0644);
+MODULE_PARM_DESC(vdev_validate_skip,
+ "Bypass vdev_validate()");
+/* END CSTYLED */
#endif