* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
- * Copyright (c) 2017 Datto Inc.
+ * Copyright (c) 2018 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+ * Copyright (c) 2017, Intel Corporation.
+ * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
+ * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
+ * Copyright (c) 2021, 2023, Klara Inc.
*/
-#include <ctype.h>
#include <errno.h>
-#include <devid.h>
-#include <fcntl.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
-#include <sys/vtoc.h>
#include <sys/zfs_ioctl.h>
+#include <sys/zfs_sysfs.h>
+#include <sys/vdev_disk.h>
+#include <sys/types.h>
#include <dlfcn.h>
+#include <libzutil.h>
+#include <fcntl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
-static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
- int create:1; /* Validate property on creation */
- int import:1; /* Validate property on import */
+ unsigned int create:1; /* Validate property on creation */
+ unsigned int import:1; /* Validate property on import */
+ unsigned int vdevprop:1; /* Validate property as a VDEV property */
} prop_flags_t;
/*
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
- return (-1);
+ zcmd_alloc_dst_nvlist(hdl, &zc, 0);
- while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
- if (errno == ENOMEM) {
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- } else {
+ while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
+ if (errno == ENOMEM)
+ zcmd_expand_dst_nvlist(hdl, &zc);
+ else {
zcmd_free_nvlists(&zc);
return (-1);
}
return (0);
}
-static int
+int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
return (0);
}
-static char *
+static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
- uint64_t ival;
- char *value;
+ const char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
- verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
- source = ival;
- verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+ source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ value = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
- if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
+ if ((value = zpool_prop_default_string(prop)) == NULL)
value = "-";
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
- verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
- source = value;
- verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
+ source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
/*
* Map VDEV STATE to printed strings.
*/
-char *
+const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
return (gettext("UNKNOWN"));
}
+/*
+ * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
+ * "SUSPENDED", etc).
+ */
+const char *
+zpool_get_state_str(zpool_handle_t *zhp)
+{
+ zpool_errata_t errata;
+ zpool_status_t status;
+ const char *str;
+
+ status = zpool_get_status(zhp, NULL, &errata);
+
+ if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
+ str = gettext("FAULTED");
+ } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
+ status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
+ status == ZPOOL_STATUS_IO_FAILURE_MMP) {
+ str = gettext("SUSPENDED");
+ } else {
+ nvlist_t *nvroot = fnvlist_lookup_nvlist(
+ zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
+ uint_t vsc;
+ vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
+ nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
+ str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
+ }
+ return (str);
+}
+
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
- nvlist_t *nvroot;
- vdev_stat_t *vs;
- uint_t vsc;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
break;
case ZPOOL_PROP_HEALTH:
- (void) strlcpy(buf, "FAULTED", len);
+ (void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_GUID:
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
+ case ZPOOL_PROP_COMPATIBILITY:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
len);
break;
}
- /* FALLTHROUGH */
+ zfs_fallthrough;
default:
(void) strlcpy(buf, "-", len);
break;
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
+ case ZPOOL_PROP_MAXBLOCKSIZE:
+ case ZPOOL_PROP_MAXDNODESIZE:
+ case ZPOOL_PROP_BCLONESAVED:
+ case ZPOOL_PROP_BCLONEUSED:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
break;
case ZPOOL_PROP_EXPANDSZ:
+ case ZPOOL_PROP_CHECKPOINT:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
}
break;
+ case ZPOOL_PROP_BCLONERATIO:
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
break;
case ZPOOL_PROP_HEALTH:
- verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
- ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
- verify(nvlist_lookup_uint64_array(nvroot,
- ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
- == 0);
-
- (void) strlcpy(buf, zpool_state_to_name(intval,
- vs->vs_aux), len);
+ (void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
- /* FALLTHROUGH */
+ zfs_fallthrough;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
return (0);
}
+/*
+ * Get a zpool property value for 'propname' and return the value in
+ * a pre-allocated buffer.
+ */
+int
+zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
+ size_t len, zprop_source_t *srctype)
+{
+ nvlist_t *nv, *nvl;
+ uint64_t ival;
+ const char *value;
+ zprop_source_t source = ZPROP_SRC_LOCAL;
+
+ nvl = zhp->zpool_props;
+ if (nvlist_lookup_nvlist(nvl, propname, &nv) == 0) {
+ if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
+ source = ival;
+ verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+ } else {
+ source = ZPROP_SRC_DEFAULT;
+ value = "-";
+ }
+
+ if (srctype)
+ *srctype = source;
+
+ (void) strlcpy(buf, value, len);
+
+ return (0);
+}
+
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
-bootfs_name_valid(const char *pool, char *bootfs)
+bootfs_name_valid(const char *pool, const char *bootfs)
{
int len = strlen(pool);
+ if (bootfs[0] == '\0')
+ return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
return (B_FALSE);
}
-boolean_t
-zpool_is_bootable(zpool_handle_t *zhp)
-{
- char bootfs[ZFS_MAX_DATASET_NAME_LEN];
-
- return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
- sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
- sizeof (bootfs)) != 0);
-}
-
-
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
- char *strval;
+ const char *strval;
uint64_t intval;
- char *slash, *check;
+ const char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
+ char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
+ if (flags.vdevprop && zpool_prop_vdev(propname)) {
+ vdev_prop_t vprop = vdev_name_to_prop(propname);
+
+ if (vdev_prop_readonly(vprop)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+ "is readonly"), propname);
+ (void) zfs_error(hdl, EZFS_PROPREADONLY,
+ errbuf);
+ goto error;
+ }
+
+ if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
+ retprops, &strval, &intval, errbuf) != 0)
+ goto error;
+
+ continue;
+ } else if (flags.vdevprop && vdev_prop_user(propname)) {
+ if (nvlist_add_nvpair(retprops, elem) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+ continue;
+ } else if (flags.vdevprop) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property: '%s'"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid feature '%s'"), fname);
+ "feature '%s' unsupported by kernel"),
+ fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
goto error;
}
+ if (!flags.create &&
+ strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' can only be set to "
+ "'disabled' at creation time"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
+ continue;
+ } else if (prop == ZPOOL_PROP_INVAL &&
+ zfs_prop_user(propname)) {
+ /*
+ * This is a user property: make sure it's a
+ * string, and that it's less than ZAP_MAXNAMELEN.
+ */
+ if (nvpair_type(elem) != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property name '%s' is too long"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ (void) nvpair_value_string(elem, &strval);
+
+ if (strlen(strval) >= ZFS_MAXPROPLEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property value '%s' is too long"),
+ strval);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (nvlist_add_string(retprops, propname,
+ strval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+
continue;
}
goto error;
}
+ if (!flags.create && zpool_prop_setonce(prop)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' can only be set at "
+ "creation time"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "property '%s' number %d is invalid."),
- propname, intval);
+ "property '%s' number %llu is invalid."),
+ propname, (unsigned long long)intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid '%s=%d' property: only values "
- "between %" PRId32 " and %" PRId32 " "
- "are allowed.\n"),
- propname, intval, ASHIFT_MIN, ASHIFT_MAX);
+ "property '%s' number %llu is invalid, "
+ "only values between %" PRId32 " and %"
+ PRId32 " are allowed."),
+ propname, (unsigned long long)intval,
+ ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
- if (strval[0] != '\0' && !bootfs_name_valid(poolname,
- strval)) {
+ if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
- *slash = '\0';
+ *(char *)slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
+ *(char *)slash = '/';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
goto error;
}
- *slash = '/';
+ *(char *)slash = '/';
+ break;
+
+ case ZPOOL_PROP_COMPATIBILITY:
+ switch (zpool_load_compat(strval, NULL, report, 1024)) {
+ case ZPOOL_COMPATIBILITY_OK:
+ case ZPOOL_COMPATIBILITY_WARNTOKEN:
+ break;
+ case ZPOOL_COMPATIBILITY_BADFILE:
+ case ZPOOL_COMPATIBILITY_BADTOKEN:
+ case ZPOOL_COMPATIBILITY_NOFILES:
+ zfs_error_aux(hdl, "%s", report);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
break;
case ZPOOL_PROP_COMMENT:
goto error;
}
break;
- case ZPOOL_PROP_TNAME:
- if (!flags.create) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "property '%s' can only be set at "
- "creation time"), propname);
- (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
- goto error;
- }
- break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
goto error;
}
break;
+ case ZPOOL_PROP_DEDUPDITTO:
+ printf("Note: property '%s' no longer has "
+ "any effect\n", propname);
+ break;
+
default:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "property '%s'(%d) not defined"), propname, prop);
break;
}
}
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
- char errbuf[1024];
+ char errbuf[ERRBUFLEN];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
- nvlist_free(nvl);
- return (-1);
- }
+ zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
}
int
-zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
+zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
+ zfs_type_t type, boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
boolean_t firstexpand = (NULL == *plp);
int i;
- if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
+ if (zprop_expand_list(hdl, plp, type) != 0)
return (-1);
+ if (type == ZFS_TYPE_VDEV)
+ return (0);
+
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
+ /* Handle userprops in the all properties case */
+ if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
+ return (-1);
+
+ nvp = NULL;
+ while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
+ NULL) {
+ const char *propname = nvpair_name(nvp);
+
+ if (!zfs_prop_user(propname))
+ continue;
+
+ entry = zfs_alloc(hdl, sizeof (zprop_list_t));
+ entry->pl_prop = ZPROP_USERPROP;
+ entry->pl_user_prop = zfs_strdup(hdl, propname);
+ entry->pl_width = strlen(entry->pl_user_prop);
+ entry->pl_all = B_TRUE;
+
+ *last = entry;
+ last = &entry->pl_next;
+ }
+
for (i = 0; i < SPA_FEATURES; i++) {
- zprop_list_t *entry = zfs_alloc(hdl,
- sizeof (zprop_list_t));
- entry->pl_prop = ZPROP_INVAL;
+ entry = zfs_alloc(hdl, sizeof (zprop_list_t));
+ entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
- zprop_list_t *entry;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
- entry->pl_prop = ZPROP_INVAL;
+ entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+ if (entry->pl_fixed && !literal)
+ continue;
+
+ if (entry->pl_prop != ZPROP_USERPROP &&
+ zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
+ NULL, literal) == 0) {
+ if (strlen(buf) > entry->pl_width)
+ entry->pl_width = strlen(buf);
+ } else if (entry->pl_prop == ZPROP_INVAL &&
+ zfs_prop_user(entry->pl_user_prop) &&
+ zpool_get_userprop(zhp, entry->pl_user_prop, buf,
+ sizeof (buf), NULL) == 0) {
+ if (strlen(buf) > entry->pl_width)
+ entry->pl_width = strlen(buf);
+ }
+ }
+
+ return (0);
+}
+int
+vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
+ zprop_list_t **plp)
+{
+ zprop_list_t *entry;
+ char buf[ZFS_MAXPROPLEN];
+ const char *strval = NULL;
+ int err = 0;
+ nvpair_t *elem = NULL;
+ nvlist_t *vprops = NULL;
+ nvlist_t *propval = NULL;
+ const char *propname;
+ vdev_prop_t prop;
+ zprop_list_t **last;
+
+ for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
- if (entry->pl_prop != ZPROP_INVAL &&
- zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
- NULL, B_FALSE) == 0) {
+ if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
+ entry->pl_user_prop, buf, sizeof (buf), NULL,
+ B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
+ if (entry->pl_prop == VDEV_PROP_NAME &&
+ strlen(vdevname) > entry->pl_width)
+ entry->pl_width = strlen(vdevname);
+ }
+
+ /* Handle the all properties case */
+ last = plp;
+ if (*last != NULL && (*last)->pl_all == B_TRUE) {
+ while (*last != NULL)
+ last = &(*last)->pl_next;
+
+ err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
+ if (err != 0)
+ return (err);
+
+ while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
+ propname = nvpair_name(elem);
+
+ /* Skip properties that are not user defined */
+ if ((prop = vdev_name_to_prop(propname)) !=
+ VDEV_PROP_USERPROP)
+ continue;
+
+ if (nvpair_value_nvlist(elem, &propval) != 0)
+ continue;
+
+ strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
+
+ entry = zfs_alloc(zhp->zpool_hdl,
+ sizeof (zprop_list_t));
+ entry->pl_prop = prop;
+ entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
+ propname);
+ entry->pl_width = strlen(strval);
+ entry->pl_all = B_TRUE;
+ *last = entry;
+ last = &entry->pl_next;
+ }
}
return (0);
return (0);
}
-/*
- * Don't start the slice at the default block of 34; many storage
- * devices will use a stripe width of 128k, other vendors prefer a 1m
- * alignment. It is best to play it safe and ensure a 1m alignment
- * given 512B blocks. When the block size is larger by a power of 2
- * we will still be 1m aligned. Some devices are sensitive to the
- * partition ending alignment as well.
- */
-#define NEW_START_BLOCK 2048
-#define PARTITION_END_ALIGNMENT 2048
-
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
+ strncmp(pool, "draid", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
return (NULL);
}
- if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
- return (NULL);
+ zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
zpool_handle_t *zhp;
boolean_t missing;
- if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
- return (-1);
+ zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
return (zhp->zpool_state);
}
+/*
+ * Check if vdev list contains a special vdev
+ */
+static boolean_t
+zpool_has_special_vdev(nvlist_t *nvroot)
+{
+ nvlist_t **child;
+ uint_t children;
+
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
+ &children) == 0) {
+ for (uint_t c = 0; c < children; c++) {
+ const char *bias;
+
+ if (nvlist_lookup_string(child[c],
+ ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
+ strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
+ return (B_TRUE);
+ }
+ }
+ }
+ return (B_FALSE);
+}
+
+/*
+ * Check if vdev list contains a dRAID vdev
+ */
+static boolean_t
+zpool_has_draid_vdev(nvlist_t *nvroot)
+{
+ nvlist_t **child;
+ uint_t children;
+
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (uint_t c = 0; c < children; c++) {
+ const char *type;
+
+ if (nvlist_lookup_string(child[c],
+ ZPOOL_CONFIG_TYPE, &type) == 0 &&
+ strcmp(type, VDEV_TYPE_DRAID) == 0) {
+ return (B_TRUE);
+ }
+ }
+ }
+ return (B_FALSE);
+}
+
+/*
+ * Output a dRAID top-level vdev name in to the provided buffer.
+ */
+static char *
+zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
+ uint64_t spares, uint64_t children)
+{
+ snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
+ VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
+ (u_longlong_t)children, (u_longlong_t)spares);
+
+ return (name);
+}
+
+/*
+ * Return B_TRUE if the provided name is a dRAID spare name.
+ */
+boolean_t
+zpool_is_draid_spare(const char *name)
+{
+ uint64_t spare_id, parity, vdev_id;
+
+ if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
+ (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
+ (u_longlong_t *)&spare_id) == 3) {
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
- char msg[1024];
+ char errbuf[ERRBUFLEN];
int ret = -1;
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
- return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
- if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
- return (-1);
+ zcmd_write_conf_nvlist(hdl, &zc, nvroot);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
- SPA_VERSION_1, flags, msg)) == NULL) {
+ SPA_VERSION_1, flags, errbuf)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
- char *zonestr;
+ const char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
- fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
+ fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
+ goto create_failed;
+ }
+
+ if (nvlist_exists(zc_fsprops,
+ zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
+ !zpool_has_special_vdev(nvroot)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "%s property requires a special vdev"),
+ zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto create_failed;
}
+
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
- if (zfs_crypto_create(hdl, NULL, zc_fsprops, props,
+ if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
&wkeydata, &wkeylen) != 0) {
- zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
+ zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
}
}
- if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
- goto create_failed;
+ if (zc_props)
+ zcmd_write_src_nvlist(hdl, &zc, zc_props);
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
- return (zfs_error(hdl, EZFS_BADDEV, msg));
+ return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ERANGE:
/*
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
- return (zfs_error(hdl, EZFS_BADPROP, msg));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
case EOVERFLOW:
/*
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
- return (zfs_error(hdl, EZFS_BADDEV, msg));
+ return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
- return (zfs_error(hdl, EZFS_BADDEV, msg));
+ return (zfs_error(hdl, EZFS_BADDEV, errbuf));
- case ENOTBLK:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "cache device must be a disk or disk slice"));
- return (zfs_error(hdl, EZFS_BADDEV, msg));
+ case EINVAL:
+ if (zpool_has_draid_vdev(nvroot) &&
+ zfeature_lookup_name("draid", NULL) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dRAID vdevs are unsupported by the "
+ "kernel"));
+ return (zfs_error(hdl, EZFS_BADDEV, errbuf));
+ } else {
+ return (zpool_standard_error(hdl, errno,
+ errbuf));
+ }
default:
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
}
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- char msg[1024];
+ char errbuf[ERRBUFLEN];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
} else {
- (void) zpool_standard_error(hdl, errno, msg);
+ (void) zpool_standard_error(hdl, errno, errbuf);
}
if (zfp)
}
/*
- * Add the given vdevs to the pool. The caller must have already performed the
- * necessary verification to ensure that the vdev specification is well-formed.
+ * Create a checkpoint in the given pool.
*/
int
-zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
+zpool_checkpoint(zpool_handle_t *zhp)
{
- zfs_cmd_t zc = {"\0"};
- int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- char msg[1024];
- nvlist_t **spares, **l2cache;
- uint_t nspares, nl2cache;
+ char errbuf[ERRBUFLEN];
+ int error;
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot add to '%s'"), zhp->zpool_name);
+ error = lzc_pool_checkpoint(zhp->zpool_name);
+ if (error != 0) {
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot checkpoint '%s'"), zhp->zpool_name);
+ (void) zpool_standard_error(hdl, error, errbuf);
+ return (-1);
+ }
- if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
+ return (0);
+}
+
+/*
+ * Discard the checkpoint from the given pool.
+ */
+int
+zpool_discard_checkpoint(zpool_handle_t *zhp)
+{
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ char errbuf[ERRBUFLEN];
+ int error;
+
+ error = lzc_pool_checkpoint_discard(zhp->zpool_name);
+ if (error != 0) {
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot discard checkpoint in '%s'"), zhp->zpool_name);
+ (void) zpool_standard_error(hdl, error, errbuf);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Add the given vdevs to the pool. The caller must have already performed the
+ * necessary verification to ensure that the vdev specification is well-formed.
+ */
+int
+zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
+{
+ zfs_cmd_t zc = {"\0"};
+ int ret;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ char errbuf[ERRBUFLEN];
+ nvlist_t **spares, **l2cache;
+ uint_t nspares, nl2cache;
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot add to '%s'"), zhp->zpool_name);
+
+ if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
- return (zfs_error(hdl, EZFS_BADVERSION, msg));
+ return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
- return (zfs_error(hdl, EZFS_BADVERSION, msg));
+ return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
- if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
- return (-1);
+ zcmd_write_conf_nvlist(hdl, &zc, nvroot);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
+ break;
+
+ case EINVAL:
+
+ if (zpool_has_draid_vdev(nvroot) &&
+ zfeature_lookup_name("draid", NULL) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dRAID vdevs are unsupported by the "
+ "kernel"));
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid config; a pool with removing/"
+ "removed vdevs does not support adding "
+ "raidz or dRAID vdevs"));
+ }
+
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
- * This occurrs when one of the devices is below
+ * This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
"device is less than the minimum "
"size (%s)"), buf);
}
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
- (void) zfs_error(hdl, EZFS_BADVERSION, msg);
- break;
-
- case ENOTBLK:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "cache device must be a disk or disk slice"));
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
- (void) zpool_standard_error(hdl, errno, msg);
+ (void) zpool_standard_error(hdl, errno, errbuf);
}
ret = -1;
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
-
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot export '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
- return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
- msg));
+ return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
+ dgettext(TEXT_DOMAIN, "cannot export '%s'"),
+ zhp->zpool_name));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
- msg));
+ dgettext(TEXT_DOMAIN, "cannot export '%s'"),
+ zhp->zpool_name));
}
}
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
- nvpair_t *nvp;
-
- verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
- 0);
- verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
- &unsup_feat) == 0);
- for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
- nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
- char *desc;
-
- verify(nvpair_type(nvp) == DATA_TYPE_STRING);
- verify(nvpair_value_string(nvp, &desc) == 0);
+ nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+ unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
+ for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
+ nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
+ const char *desc = fnvpair_value_string(nvp);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
- char *thename;
- char *origname;
+ const char *thename;
+ const char *origname;
int ret;
int error = 0;
- char errbuf[1024];
+ char errbuf[ERRBUFLEN];
- verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
- &origname) == 0);
+ origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
- thename = (char *)newname;
+ thename = newname;
} else {
thename = origname;
}
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
- verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
- &version) == 0);
+ version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
- if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
- nvlist_free(props);
- return (-1);
- }
+ zcmd_write_src_nvlist(hdl, &zc, props);
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
- verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
- &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
- if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
+ zcmd_write_conf_nvlist(hdl, &zc, config);
+ zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
- errno == ENOMEM) {
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- }
+ errno == ENOMEM)
+ zcmd_expand_dst_nvlist(hdl, &zc);
if (ret != 0)
error = errno;
zcmd_free_nvlists(&zc);
- zpool_get_rewind_policy(config, &policy);
+ zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
- if (policy.zrp_request & ZPOOL_TRY_REWIND) {
+ if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
- char *hostname = "<unknown>";
+ const char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
"the zgenhostid(8) command.\n"));
}
- (void) zfs_error_aux(hdl, aux);
+ (void) zfs_error_aux(hdl, "%s", aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
- "The devices below are missing, use "
- "'-m' to import the pool anyway:\n"));
+ "The devices below are missing or "
+ "corrupted, use '-m' to import the pool "
+ "anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
- if (policy.zrp_request &
+ if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
- ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
+ ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
- return (0);
}
return (ret);
}
+/*
+ * Translate vdev names to guids. If a vdev_path is determined to be
+ * unsuitable then a vd_errlist is allocated and the vdev path and errno
+ * are added to it.
+ */
+static int
+zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
+ nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
+{
+ nvlist_t *errlist = NULL;
+ int error = 0;
+
+ for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
+ elem = nvlist_next_nvpair(vds, elem)) {
+ boolean_t spare, cache;
+
+ const char *vd_path = nvpair_name(elem);
+ nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
+ NULL);
+
+ if ((tgt == NULL) || cache || spare) {
+ if (errlist == NULL) {
+ errlist = fnvlist_alloc();
+ error = EINVAL;
+ }
+
+ uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
+ (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
+ fnvlist_add_int64(errlist, vd_path, err);
+ continue;
+ }
+
+ uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
+ fnvlist_add_uint64(vdev_guids, vd_path, guid);
+
+ char msg[MAXNAMELEN];
+ (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
+ fnvlist_add_string(guids_to_paths, msg, vd_path);
+ }
+
+ if (error != 0) {
+ verify(errlist != NULL);
+ if (vd_errlist != NULL)
+ *vd_errlist = errlist;
+ else
+ fnvlist_free(errlist);
+ }
+
+ return (error);
+}
+
+static int
+xlate_init_err(int err)
+{
+ switch (err) {
+ case ENODEV:
+ return (EZFS_NODEVICE);
+ case EINVAL:
+ case EROFS:
+ return (EZFS_BADDEV);
+ case EBUSY:
+ return (EZFS_INITIALIZING);
+ case ESRCH:
+ return (EZFS_NO_INITIALIZE);
+ }
+ return (err);
+}
+
+/*
+ * Begin, suspend, cancel, or uninit (clear) the initialization (initializing
+ * of all free blocks) for the given vdevs in the given pool.
+ */
+static int
+zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
+ nvlist_t *vds, boolean_t wait)
+{
+ int err;
+
+ nvlist_t *vdev_guids = fnvlist_alloc();
+ nvlist_t *guids_to_paths = fnvlist_alloc();
+ nvlist_t *vd_errlist = NULL;
+ nvlist_t *errlist;
+ nvpair_t *elem;
+
+ err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
+ guids_to_paths, &vd_errlist);
+
+ if (err != 0) {
+ verify(vd_errlist != NULL);
+ goto list_errors;
+ }
+
+ err = lzc_initialize(zhp->zpool_name, cmd_type,
+ vdev_guids, &errlist);
+
+ if (err != 0) {
+ if (errlist != NULL && nvlist_lookup_nvlist(errlist,
+ ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
+ goto list_errors;
+ }
+
+ if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
+ zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
+ "uninitialize is not supported by kernel"));
+ }
+
+ (void) zpool_standard_error(zhp->zpool_hdl, err,
+ dgettext(TEXT_DOMAIN, "operation failed"));
+ goto out;
+ }
+
+ if (wait) {
+ for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
+ elem = nvlist_next_nvpair(vdev_guids, elem)) {
+
+ uint64_t guid = fnvpair_value_uint64(elem);
+
+ err = lzc_wait_tag(zhp->zpool_name,
+ ZPOOL_WAIT_INITIALIZE, guid, NULL);
+ if (err != 0) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl,
+ err, dgettext(TEXT_DOMAIN, "error "
+ "waiting for '%s' to initialize"),
+ nvpair_name(elem));
+
+ goto out;
+ }
+ }
+ }
+ goto out;
+
+list_errors:
+ for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
+ elem = nvlist_next_nvpair(vd_errlist, elem)) {
+ int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
+ const char *path;
+
+ if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
+ &path) != 0)
+ path = nvpair_name(elem);
+
+ (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
+ "cannot initialize '%s'", path);
+ }
+
+out:
+ fnvlist_free(vdev_guids);
+ fnvlist_free(guids_to_paths);
+
+ if (vd_errlist != NULL)
+ fnvlist_free(vd_errlist);
+
+ return (err == 0 ? 0 : -1);
+}
+
+int
+zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
+ nvlist_t *vds)
+{
+ return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
+}
+
+int
+zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
+ nvlist_t *vds)
+{
+ return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
+}
+
+static int
+xlate_trim_err(int err)
+{
+ switch (err) {
+ case ENODEV:
+ return (EZFS_NODEVICE);
+ case EINVAL:
+ case EROFS:
+ return (EZFS_BADDEV);
+ case EBUSY:
+ return (EZFS_TRIMMING);
+ case ESRCH:
+ return (EZFS_NO_TRIM);
+ case EOPNOTSUPP:
+ return (EZFS_TRIM_NOTSUP);
+ }
+ return (err);
+}
+
+static int
+zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
+{
+ int err;
+ nvpair_t *elem;
+
+ for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
+ elem = nvlist_next_nvpair(vdev_guids, elem)) {
+
+ uint64_t guid = fnvpair_value_uint64(elem);
+
+ err = lzc_wait_tag(zhp->zpool_name,
+ ZPOOL_WAIT_TRIM, guid, NULL);
+ if (err != 0) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl,
+ err, dgettext(TEXT_DOMAIN, "error "
+ "waiting to trim '%s'"), nvpair_name(elem));
+
+ return (err);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Check errlist and report any errors, omitting ones which should be
+ * suppressed. Returns B_TRUE if any errors were reported.
+ */
+static boolean_t
+check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
+ nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
+{
+ nvpair_t *elem;
+ boolean_t reported_errs = B_FALSE;
+ int num_vds = 0;
+ int num_suppressed_errs = 0;
+
+ for (elem = nvlist_next_nvpair(vds, NULL);
+ elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
+ num_vds++;
+ }
+
+ for (elem = nvlist_next_nvpair(errlist, NULL);
+ elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
+ int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
+ const char *path;
+
+ /*
+ * If only the pool was specified, and it was not a secure
+ * trim then suppress warnings for individual vdevs which
+ * do not support trimming.
+ */
+ if (vd_error == EZFS_TRIM_NOTSUP &&
+ trim_flags->fullpool &&
+ !trim_flags->secure) {
+ num_suppressed_errs++;
+ continue;
+ }
+
+ reported_errs = B_TRUE;
+ if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
+ &path) != 0)
+ path = nvpair_name(elem);
+
+ (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
+ "cannot trim '%s'", path);
+ }
+
+ if (num_suppressed_errs == num_vds) {
+ (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
+ "no devices in pool support trim operations"));
+ (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
+ dgettext(TEXT_DOMAIN, "cannot trim")));
+ reported_errs = B_TRUE;
+ }
+
+ return (reported_errs);
+}
+
+/*
+ * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
+ * the given vdevs in the given pool.
+ */
+int
+zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
+ trimflags_t *trim_flags)
+{
+ int err;
+ int retval = 0;
+
+ nvlist_t *vdev_guids = fnvlist_alloc();
+ nvlist_t *guids_to_paths = fnvlist_alloc();
+ nvlist_t *errlist = NULL;
+
+ err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
+ guids_to_paths, &errlist);
+ if (err != 0) {
+ check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
+ retval = -1;
+ goto out;
+ }
+
+ err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
+ trim_flags->secure, vdev_guids, &errlist);
+ if (err != 0) {
+ nvlist_t *vd_errlist;
+ if (errlist != NULL && nvlist_lookup_nvlist(errlist,
+ ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
+ if (check_trim_errs(zhp, trim_flags, guids_to_paths,
+ vds, vd_errlist)) {
+ retval = -1;
+ goto out;
+ }
+ } else {
+ char errbuf[ERRBUFLEN];
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "operation failed"));
+ zpool_standard_error(zhp->zpool_hdl, err, errbuf);
+ retval = -1;
+ goto out;
+ }
+ }
+
+
+ if (trim_flags->wait)
+ retval = zpool_trim_wait(zhp, vdev_guids);
+
+out:
+ if (errlist != NULL)
+ fnvlist_free(errlist);
+ fnvlist_free(vdev_guids);
+ fnvlist_free(guids_to_paths);
+ return (retval);
+}
+
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
- zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- zc.zc_cookie = func;
- zc.zc_flags = cmd;
-
- if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
- return (0);
+ nvlist_t *args = fnvlist_alloc();
+ fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
+ fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
- err = errno;
+ err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
+ fnvlist_free(args);
- /* ECANCELED on a scrub means we resumed a paused scrub */
- if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
- cmd == POOL_SCRUB_NORMAL)
+ if (err == 0) {
return (0);
+ } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
+ zfs_cmd_t zc = {"\0"};
+ (void) strlcpy(zc.zc_name, zhp->zpool_name,
+ sizeof (zc.zc_name));
+ zc.zc_cookie = func;
+ zc.zc_flags = cmd;
+
+ if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
+ return (0);
+ }
- if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
+ /*
+ * An ECANCELED on a scrub means one of the following:
+ * 1. we resumed a paused scrub.
+ * 2. we resumed a paused error scrub.
+ * 3. Error scrub is not run because of no error log.
+ */
+ if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
+ func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
+ return (0);
+ /*
+ * The following cases have been handled here:
+ * 1. Paused a scrub/error scrub if there is none in progress.
+ */
+ if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
+ POOL_SCRUB_PAUSE) {
return (0);
+ }
- if (func == POOL_SCAN_SCRUB) {
+ ASSERT3U(func, >=, POOL_SCAN_NONE);
+ ASSERT3U(func, <, POOL_SCAN_FUNCS);
+
+ if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot pause scrubbing %s"), zc.zc_name);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
+ zhp->zpool_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot scrub %s"), zc.zc_name);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot scrub %s"),
+ zhp->zpool_name);
}
+ } else if (func == POOL_SCAN_RESILVER) {
+ assert(cmd == POOL_SCRUB_NORMAL);
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot restart resilver on %s"), zhp->zpool_name);
} else if (func == POOL_SCAN_NONE) {
- (void) snprintf(msg, sizeof (msg),
- dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
- zc.zc_name);
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot cancel scrubbing %s"), zhp->zpool_name);
} else {
assert(!"unexpected result");
}
+ /*
+ * With EBUSY, five cases are possible:
+ *
+ * Current state Requested
+ * 1. Normal Scrub Running Normal Scrub or Error Scrub
+ * 2. Normal Scrub Paused Error Scrub
+ * 3. Normal Scrub Paused Pause Normal Scrub
+ * 4. Error Scrub Running Normal Scrub or Error Scrub
+ * 5. Error Scrub Paused Pause Error Scrub
+ * 6. Resilvering Anything else
+ */
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
- verify(nvlist_lookup_nvlist(zhp->zpool_config,
- ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
- if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
- if (cmd == POOL_SCRUB_PAUSE)
- return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
- else
- return (zfs_error(hdl, EZFS_SCRUBBING, msg));
+ if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
+ ps->pss_state == DSS_SCANNING) {
+ if (ps->pss_pass_scrub_pause == 0) {
+ /* handles case 1 */
+ assert(cmd == POOL_SCRUB_NORMAL);
+ return (zfs_error(hdl, EZFS_SCRUBBING,
+ errbuf));
+ } else {
+ if (func == POOL_SCAN_ERRORSCRUB) {
+ /* handles case 2 */
+ ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
+ return (zfs_error(hdl,
+ EZFS_SCRUB_PAUSED_TO_CANCEL,
+ errbuf));
+ } else {
+ /* handles case 3 */
+ ASSERT3U(func, ==, POOL_SCAN_SCRUB);
+ ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
+ return (zfs_error(hdl,
+ EZFS_SCRUB_PAUSED, errbuf));
+ }
+ }
+ } else if (ps &&
+ ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
+ ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
+ if (ps->pss_pass_error_scrub_pause == 0) {
+ /* handles case 4 */
+ ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
+ return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
+ errbuf));
+ } else {
+ /* handles case 5 */
+ ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
+ ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
+ return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
+ errbuf));
+ }
} else {
- return (zfs_error(hdl, EZFS_RESILVERING, msg));
+ /* handles case 6 */
+ return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
}
} else if (err == ENOENT) {
- return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
+ return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
+ } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
+ return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
} else {
- return (zpool_standard_error(hdl, err, msg));
+ return (zpool_standard_error(hdl, err, errbuf));
}
}
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
- char *srchkey;
+ const char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
- uint64_t srchval, theguid;
-
- verify(nvpair_value_uint64(pair, &srchval) == 0);
- verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
- &theguid) == 0);
+ uint64_t srchval = fnvpair_value_uint64(pair);
+ uint64_t theguid = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_GUID);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
- char *srchval, *val;
+ const char *srchval, *val;
- verify(nvpair_value_string(pair, &srchval) == 0);
+ srchval = fnvpair_value_string(pair);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
- } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
+ } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
}
verify(zpool_vdev_is_interior(type));
- verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
- &id) == 0);
+ id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
+ /*
+ * If we are looking for a raidz and a parity is
+ * specified, make sure it matches.
+ */
+ int rzlen = strlen(VDEV_TYPE_RAIDZ);
+ assert(rzlen == strlen(VDEV_TYPE_DRAID));
+ int typlen = strlen(type);
+ if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
+ strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
+ typlen != rzlen) {
+ uint64_t vdev_parity;
+ int parity = *(type + rzlen) - '0';
+
+ if (parity <= 0 || parity > 3 ||
+ (typlen - rzlen) != 1) {
+ /*
+ * Nonsense parity specified, can
+ * never match
+ */
+ free(type);
+ return (NULL);
+ }
+ vdev_parity = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_NPARITY);
+ if ((int)vdev_parity != parity) {
+ free(type);
+ break;
+ }
+ }
+
free(type);
if (errno != 0)
return (NULL);
}
/*
- * Given a physical path (minus the "/devices" prefix), find the
- * associated vdev.
+ * Given a physical path or guid, find the associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
+ uint64_t guid;
+ char *end;
+
+ search = fnvlist_alloc();
- verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
+ guid = strtoull(ppath, &end, 0);
+ if (guid != 0 && *end == '\0') {
+ fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
+ } else {
+ fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
+ }
- verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
+ nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
- nvlist_free(search);
+ fnvlist_free(search);
return (ret);
}
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
+ strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
+
+ if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
+ !zpool_is_draid_spare(name))
+ return (B_TRUE);
+
return (B_FALSE);
}
+/*
+ * Lookup the nvlist for a given vdev.
+ */
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
+ boolean_t __avail_spare, __l2cache, __log;
- verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ search = fnvlist_alloc();
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
- verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
+ fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else if (zpool_vdev_is_interior(path)) {
- verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
+ fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
} else {
- verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
+ fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
}
- verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
+ nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE);
+
+ /*
+ * User can pass NULL for avail_spare, l2cache, and log, but
+ * we still need to provide variables to vdev_to_nvlist_iter(), so
+ * just point them to junk variables here.
+ */
+ if (!avail_spare)
+ avail_spare = &__avail_spare;
+ if (!l2cache)
+ l2cache = &__l2cache;
+ if (!log)
+ log = &__log;
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
- nvlist_free(search);
+ fnvlist_free(search);
return (ret);
}
-static int
-vdev_is_online(nvlist_t *nv)
-{
- uint64_t ival;
-
- if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
- nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
- nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
- return (0);
-
- return (1);
-}
-
-/*
- * Helper function for zpool_get_physpaths().
- */
-static int
-vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
- size_t *bytes_written)
-{
- size_t bytes_left, pos, rsz;
- char *tmppath;
- const char *format;
-
- if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
- &tmppath) != 0)
- return (EZFS_NODEVICE);
-
- pos = *bytes_written;
- bytes_left = physpath_size - pos;
- format = (pos == 0) ? "%s" : " %s";
-
- rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
- *bytes_written += rsz;
-
- if (rsz >= bytes_left) {
- /* if physpath was not copied properly, clear it */
- if (bytes_left != 0) {
- physpath[pos] = 0;
- }
- return (EZFS_NOSPC);
- }
- return (0);
-}
-
-static int
-vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
- size_t *rsz, boolean_t is_spare)
-{
- char *type;
- int ret;
-
- if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
- return (EZFS_INVALCONFIG);
-
- if (strcmp(type, VDEV_TYPE_DISK) == 0) {
- /*
- * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
- * For a spare vdev, we only want to boot from the active
- * spare device.
- */
- if (is_spare) {
- uint64_t spare = 0;
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
- &spare);
- if (!spare)
- return (EZFS_INVALCONFIG);
- }
-
- if (vdev_is_online(nv)) {
- if ((ret = vdev_get_one_physpath(nv, physpath,
- phypath_size, rsz)) != 0)
- return (ret);
- }
- } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
- strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
- strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
- (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
- nvlist_t **child;
- uint_t count;
- int i, ret;
-
- if (nvlist_lookup_nvlist_array(nv,
- ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
- return (EZFS_INVALCONFIG);
-
- for (i = 0; i < count; i++) {
- ret = vdev_get_physpaths(child[i], physpath,
- phypath_size, rsz, is_spare);
- if (ret == EZFS_NOSPC)
- return (ret);
- }
- }
-
- return (EZFS_POOL_INVALARG);
-}
-
-/*
- * Get phys_path for a root pool config.
- * Return 0 on success; non-zero on failure.
- */
-static int
-zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
-{
- size_t rsz;
- nvlist_t *vdev_root;
- nvlist_t **child;
- uint_t count;
- char *type;
-
- rsz = 0;
-
- if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
- &vdev_root) != 0)
- return (EZFS_INVALCONFIG);
-
- if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
- nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
- &child, &count) != 0)
- return (EZFS_INVALCONFIG);
-
- /*
- * root pool can only have a single top-level vdev.
- */
- if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
- return (EZFS_POOL_INVALARG);
-
- (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
- B_FALSE);
-
- /* No online devices */
- if (rsz == 0)
- return (EZFS_NODEVICE);
-
- return (0);
-}
-
-/*
- * Get phys_path for a root pool
- * Return 0 on success; non-zero on failure.
- */
-int
-zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
-{
- return (zpool_get_config_physpath(zhp->zpool_config, physpath,
- phypath_size));
-}
-
-/*
- * If the device has being dynamically expanded then we need to relabel
- * the disk to use the new unallocated space.
- */
-static int
-zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
-{
- int fd, error;
-
- if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
- "relabel '%s': unable to open device: %d"), path, errno);
- return (zfs_error(hdl, EZFS_OPENFAILED, msg));
- }
-
- /*
- * It's possible that we might encounter an error if the device
- * does not have any unallocated space left. If so, we simply
- * ignore that error and continue on.
- *
- * Also, we don't call efi_rescan() - that would just return EBUSY.
- * The module will do it for us in vdev_disk_open().
- */
- error = efi_use_whole_disk(fd);
-
- /* Flush the buffers to disk and invalidate the page cache. */
- (void) fsync(fd);
- (void) ioctl(fd, BLKFLSBUF);
-
- (void) close(fd);
- if (error && error != VT_ENOSPC) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
- "relabel '%s': unable to read disk capacity"), path);
- return (zfs_error(hdl, EZFS_NOCAP, msg));
- }
-
- return (0);
-}
-
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
- uint64_t guid;
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
&log)) == NULL)
return (0);
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
if (is_log != NULL)
*is_log = log;
- return (guid);
+ return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
- char *pathname;
+ char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- int error;
if (flags & ZFS_ONLINE_EXPAND) {
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
- if (avail_spare)
- return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
+ return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
+#ifndef __FreeBSD__
+ const char *pathname;
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
- return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
+ return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
+ int error;
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
- msg));
+ errbuf));
fullpath = buf;
}
- error = zpool_relabel_disk(hdl, fullpath, msg);
+ error = zpool_relabel_disk(hdl, fullpath, errbuf);
if (error != 0)
return (error);
}
}
+#endif
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
- return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
+ return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
}
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
*newstate = zc.zc_cookie;
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (avail_spare)
- return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
/*
* There are no other replicas of this device.
*/
- return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+ return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
case EEXIST:
/*
* The log device has unplayed logs
*/
- return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
+ return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
default:
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
}
+/*
+ * Remove the specified vdev asynchronously from the configuration, so
+ * that it may come ONLINE if reinserted. This is called from zed on
+ * Udev remove event.
+ * Note: We also have a similar function zpool_vdev_remove() that
+ * removes the vdev from the pool.
+ */
+int
+zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = {"\0"};
+ char errbuf[ERRBUFLEN];
+ nvlist_t *tgt;
+ boolean_t avail_spare, l2cache;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ NULL)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
+
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
+
+ zc.zc_cookie = VDEV_STATE_REMOVED;
+
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, errbuf));
+}
+
/*
* Mark the given vdev faulted.
*/
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
/*
* There are no other replicas of this device.
*/
- return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+ return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
default:
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
- * Mark the given vdev degraded.
+ * Generic set vdev state function
*/
-int
-zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+static int
+zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
+ vdev_state_t state)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) snprintf(msg, sizeof (msg),
- dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
+ zpool_state_to_name(state, aux), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
- zc.zc_cookie = VDEV_STATE_DEGRADED;
+ zc.zc_cookie = state;
zc.zc_obj = aux;
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
+}
+
+/*
+ * Mark the given vdev degraded.
+ */
+int
+zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+ return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
+}
+
+/*
+ * Mark the given vdev as in a removed state (as if the device does not exist).
+ *
+ * This is different than zpool_vdev_remove() which does a removal of a device
+ * from the pool (but the device does exist).
+ */
+int
+zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+ return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
}
/*
{
nvlist_t **child;
uint_t c, children;
- char *type;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
- verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
- &type) == 0);
-
- if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
+ const char *type = fnvlist_lookup_string(search,
+ ZPOOL_CONFIG_TYPE);
+ if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
+ strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
children == 2 && child[which] == tgt)
return (B_TRUE);
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
-zpool_vdev_attach(zpool_handle_t *zhp,
- const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
+zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
+ const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
+ const char *type;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- boolean_t rootpool = zpool_is_bootable(zhp);
if (replacing)
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
- &islog)) == 0)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ &islog)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
- return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
- return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+ return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = replacing;
+ zc.zc_simple = rebuild;
+
+ if (rebuild &&
+ zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "the loaded zfs module doesn't support device rebuilds"));
+ return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
+ }
+
+ type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
+ if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
+ zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "the loaded zfs module doesn't support raidz expansion"));
+ return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
+ }
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
- return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
+ return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
}
- verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
- ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
+ config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+ ZPOOL_CONFIG_VDEV_TREE);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
- return (zfs_error(hdl, EZFS_BADTARGET, msg));
+ return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
}
free(newname);
- if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
- return (-1);
+ zcmd_write_conf_nvlist(hdl, &zc, nvroot);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
- if (ret == 0) {
- if (rootpool) {
- /*
- * XXX need a better way to prevent user from
- * booting up a half-baked vdev.
- */
- (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
- "sure to wait until resilver is done "
- "before rebooting.\n"));
- }
+ if (ret == 0)
return (0);
- }
switch (errno) {
case ENOTSUP:
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
- if (islog)
+ if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
- else if (version >= SPA_VERSION_MULTI_REPLACE)
+ } else if (rebuild) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "only mirror and dRAID vdevs support "
+ "sequential reconstruction"));
+ } else if (zpool_is_draid_spare(new_disk)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dRAID spares can only replace child "
+ "devices in their parent's dRAID vdev"));
+ } else if (version >= SPA_VERSION_MULTI_REPLACE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
- else
+ } else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
- } else {
+ }
+ } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "can only attach to mirrors and top-level "
- "disks"));
+ "raidz_expansion feature must be enabled "
+ "in order to attach a device to raidz"));
+ } else {
+ char status[64] = {0};
+ zpool_prop_get_feature(zhp,
+ "feature@device_rebuild", status, 63);
+ if (rebuild &&
+ strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "device_rebuild feature must be enabled "
+ "in order to use sequential "
+ "reconstruction"));
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "can only attach to mirrors and top-level "
+ "disks"));
+ }
}
- (void) zfs_error(hdl, EZFS_BADTARGET, msg);
+ (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EINVAL:
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
- (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
new_disk);
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
- (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
- (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
+ (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
+ break;
+
+ case ENXIO:
+ /*
+ * The existing raidz vdev has offline children
+ */
+ if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "raidz vdev has devices that are are offline or "
+ "being replaced"));
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
+ break;
+ } else {
+ (void) zpool_standard_error(hdl, errno, errbuf);
+ }
break;
+ case EADDRINUSE:
+ /*
+ * The boot reserved area is already being used (FreeBSD)
+ */
+ if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "the reserved boot area needed for the expansion "
+ "is already being used by a boot loader"));
+ (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
+ } else {
+ (void) zpool_standard_error(hdl, errno, errbuf);
+ }
+ break;
default:
- (void) zpool_standard_error(hdl, errno, msg);
+ (void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
- NULL)) == 0)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ NULL)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
- return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
- return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+ return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
- (void) zfs_error(hdl, EZFS_BADTARGET, msg);
+ (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
- (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
+ (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
break;
default:
- (void) zpool_standard_error(hdl, errno, msg);
+ (void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
+ const char *bias;
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
- uint64_t vers;
+ uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
- return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
return (-1);
}
- verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
- == 0);
- verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
+ tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
+ vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
- props, vers, flags, msg)) == NULL)
+ props, vers, flags, errbuf)) == NULL)
+ return (-1);
+ (void) nvlist_lookup_uint64(zc_props,
+ zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
+ if (readonly) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property %s can only be set at import time"),
+ zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
+ }
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
- char *type;
+ boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
+ const char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
continue;
}
lastlog = 0;
- verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
- == 0);
- if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
+ type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
+
+ if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
+ vdev = child[c];
+ if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
+ goto out;
+ continue;
+ } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
- retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+ retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
+ if (nvlist_lookup_string(child[c],
+ ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
+ if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
+ is_special = B_TRUE;
+ else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
+ is_dedup = B_TRUE;
+ }
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
+
+ if (flags.dryrun != 0) {
+ if (is_dedup == B_TRUE) {
+ if (nvlist_add_string(varray[vcount - 1],
+ ZPOOL_CONFIG_ALLOCATION_BIAS,
+ VDEV_ALLOC_BIAS_DEDUP) != 0)
+ goto out;
+ } else if (is_special == B_TRUE) {
+ if (nvlist_add_string(varray[vcount - 1],
+ ZPOOL_CONFIG_ALLOCATION_BIAS,
+ VDEV_ALLOC_BIAS_SPECIAL) != 0)
+ goto out;
+ }
+ }
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
- retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+ retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
}
/* Add all the children we found */
- if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
- lastlog == 0 ? vcount : lastlog) != 0)
+ if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
+ (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
- if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
- goto out;
- if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
- goto out;
+ zcmd_write_conf_nvlist(hdl, &zc, newconfig);
+ if (zc_props != NULL)
+ zcmd_write_src_nvlist(hdl, &zc, zc_props);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
- retval = zpool_standard_error(hdl, errno, msg);
+ retval = zpool_standard_error(hdl, errno, errbuf);
goto out;
}
}
/*
- * Remove the given device. Currently, this is supported only for hot spares,
- * cache, and log devices.
+ * Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
- (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
- &islog)) == 0)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
- /*
- * XXX - this should just go away.
- */
- if (!avail_spare && !l2cache && !islog) {
+ if (zpool_is_draid_spare(path)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "only inactive hot spares, cache, "
- "or log devices can be removed"));
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ "dRAID spares cannot be removed"));
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
}
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ &islog)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
+
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "pool must be upgrade to support log removal"));
- return (zfs_error(hdl, EZFS_BADVERSION, msg));
+ "pool must be upgraded to support log removal"));
+ return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
- return (zpool_standard_error(hdl, errno, msg));
+ switch (errno) {
+
+ case EALREADY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "removal for this vdev is already in progress."));
+ (void) zfs_error(hdl, EZFS_BUSY, errbuf);
+ break;
+
+ case EINVAL:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid config; all top-level vdevs must "
+ "have the same sector size and not be raidz."));
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
+ break;
+
+ case EBUSY:
+ if (islog) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Mount encrypted datasets to replay logs."));
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Pool busy; removal may already be in progress"));
+ }
+ (void) zfs_error(hdl, EZFS_BUSY, errbuf);
+ break;
+
+ case EACCES:
+ if (islog) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Mount encrypted datasets to replay logs."));
+ (void) zfs_error(hdl, EZFS_BUSY, errbuf);
+ } else {
+ (void) zpool_standard_error(hdl, errno, errbuf);
+ }
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, errbuf);
+ }
+ return (-1);
+}
+
+int
+zpool_vdev_remove_cancel(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = {{0}};
+ char errbuf[ERRBUFLEN];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot cancel removal"));
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_cookie = 1;
+
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, errbuf));
+}
+
+int
+zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
+ uint64_t *sizep)
+{
+ char errbuf[ERRBUFLEN];
+ nvlist_t *tgt;
+ boolean_t avail_spare, l2cache, islog;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
+ path);
+
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ &islog)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
+
+ if (avail_spare || l2cache || islog) {
+ *sizep = 0;
+ return (0);
+ }
+
+ if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "indirect size not available"));
+ return (zfs_error(hdl, EINVAL, errbuf));
+ }
+ return (0);
}
/*
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
nvlist_t *tgt;
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
- &l2cache, NULL)) == 0)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ &l2cache, NULL)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
- return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
- &zc.zc_guid) == 0);
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
}
- zpool_get_rewind_policy(rewindnvl, &policy);
- zc.zc_cookie = policy.zrp_request;
+ zpool_get_load_policy(rewindnvl, &policy);
+ zc.zc_cookie = policy.zlp_rewind;
- if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
- return (-1);
-
- if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
- return (-1);
+ zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
+ zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
- errno == ENOMEM) {
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- }
+ errno == ENOMEM)
+ zcmd_expand_dst_nvlist(hdl, &zc);
- if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
+ if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
- if (policy.zrp_request &
+ if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
- ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
+ ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
}
zcmd_free_nvlists(&zc);
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
/*
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
+ char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
+ if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
/*
int
zpool_reguid(zpool_handle_t *zhp)
{
- char msg[1024];
+ char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
- (void) snprintf(msg, sizeof (msg),
+ (void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
- return (zpool_standard_error(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno, errbuf));
}
/*
return (0);
}
-#if defined(__sun__) || defined(__sun)
-/*
- * Convert from a devid string to a path.
- */
-static char *
-devid_to_path(char *devid_str)
-{
- ddi_devid_t devid;
- char *minor;
- char *path;
- devid_nmlist_t *list = NULL;
- int ret;
-
- if (devid_str_decode(devid_str, &devid, &minor) != 0)
- return (NULL);
-
- ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
-
- devid_str_free(minor);
- devid_free(devid);
-
- if (ret != 0)
- return (NULL);
-
- /*
- * In a case the strdup() fails, we will just return NULL below.
- */
- path = strdup(list[0].devname);
-
- devid_free_nmlist(list);
-
- return (path);
-}
-
-/*
- * Convert from a path to a devid string.
- */
-static char *
-path_to_devid(const char *path)
-{
- int fd;
- ddi_devid_t devid;
- char *minor, *ret;
-
- if ((fd = open(path, O_RDONLY)) < 0)
- return (NULL);
-
- minor = NULL;
- ret = NULL;
- if (devid_get(fd, &devid) == 0) {
- if (devid_get_minor_name(fd, &minor) == 0)
- ret = devid_str_encode(devid, minor);
- if (minor != NULL)
- devid_str_free(minor);
- devid_free(devid);
- }
- (void) close(fd);
-
- return (ret);
-}
-
-/*
- * Issue the necessary ioctl() to update the stored path value for the vdev. We
- * ignore any failure here, since a common case is for an unprivileged user to
- * type 'zpool status', and we'll display the correct information anyway.
- */
-static void
-set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
-{
- zfs_cmd_t zc = {"\0"};
-
- (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
- verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
- &zc.zc_guid) == 0);
-
- (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
-}
-#endif /* sun */
-
-/*
- * Remove partition suffix from a vdev path. Partition suffixes may take three
- * forms: "-partX", "pX", or "X", where X is a string of digits. The second
- * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
- * third case only occurs when preceded by a string matching the regular
- * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
- *
- * caller must free the returned string
- */
-char *
-zfs_strip_partition(char *path)
-{
- char *tmp = strdup(path);
- char *part = NULL, *d = NULL;
- if (!tmp)
- return (NULL);
-
- if ((part = strstr(tmp, "-part")) && part != tmp) {
- d = part + 5;
- } else if ((part = strrchr(tmp, 'p')) &&
- part > tmp + 1 && isdigit(*(part-1))) {
- d = part + 1;
- } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
- tmp[1] == 'd') {
- for (d = &tmp[2]; isalpha(*d); part = ++d) { }
- } else if (strncmp("xvd", tmp, 3) == 0) {
- for (d = &tmp[3]; isalpha(*d); part = ++d) { }
- }
- if (part && d && *d != '\0') {
- for (; isdigit(*d); d++) { }
- if (*d == '\0')
- *part = '\0';
- }
-
- return (tmp);
-}
-
-/*
- * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
- *
- * path: /dev/sda1
- * returns: /dev/sda
- *
- * Returned string must be freed.
- */
-char *
-zfs_strip_partition_path(char *path)
-{
- char *newpath = strdup(path);
- char *sd_offset;
- char *new_sd;
-
- if (!newpath)
- return (NULL);
-
- /* Point to "sda1" part of "/dev/sda1" */
- sd_offset = strrchr(newpath, '/') + 1;
-
- /* Get our new name "sda" */
- new_sd = zfs_strip_partition(sd_offset);
- if (!new_sd) {
- free(newpath);
- return (NULL);
- }
-
- /* Paste the "sda" where "sda1" was */
- strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
-
- /* Free temporary "sda" */
- free(new_sd);
-
- return (newpath);
-}
-
#define PATH_BUF_LEN 64
/*
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
- char *path, *type, *env;
+ const char *type, *tpath;
+ const char *path;
uint64_t value;
char buf[PATH_BUF_LEN];
- char tmpbuf[PATH_BUF_LEN];
+ char tmpbuf[PATH_BUF_LEN * 2];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
- verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+ type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
- env = getenv("ZPOOL_VDEV_NAME_PATH");
- if (env && (strtoul(env, NULL, 0) > 0 ||
- !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
+ if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
name_flags |= VDEV_NAME_PATH;
-
- env = getenv("ZPOOL_VDEV_NAME_GUID");
- if (env && (strtoul(env, NULL, 0) > 0 ||
- !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
+ if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
name_flags |= VDEV_NAME_GUID;
-
- env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
- if (env && (strtoul(env, NULL, 0) > 0 ||
- !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
+ if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
- } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
-#if defined(__sun__) || defined(__sun)
- /*
- * Live VDEV path updates to a kernel VDEV during a
- * zpool_vdev_name lookup are not supported on Linux.
- */
- char *devid;
- vdev_stat_t *vs;
- uint_t vsc;
-
- /*
- * If the device is dead (faulted, offline, etc) then don't
- * bother opening it. Otherwise we may be forcing the user to
- * open a misbehaving device, which can have undesirable
- * effects.
- */
- if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
- (uint64_t **)&vs, &vsc) != 0 ||
- vs->vs_state >= VDEV_STATE_DEGRADED) &&
- zhp != NULL &&
- nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
- /*
- * Determine if the current path is correct.
- */
- char *newdevid = path_to_devid(path);
-
- if (newdevid == NULL ||
- strcmp(devid, newdevid) != 0) {
- char *newpath;
-
- if ((newpath = devid_to_path(devid)) != NULL) {
- /*
- * Update the path appropriately.
- */
- set_path(zhp, nv, newpath);
- if (nvlist_add_string(nv,
- ZPOOL_CONFIG_PATH, newpath) == 0)
- verify(nvlist_lookup_string(nv,
- ZPOOL_CONFIG_PATH,
- &path) == 0);
- free(newpath);
- }
- }
-
- if (newdevid)
- devid_str_free(newdevid);
- }
-#endif /* sun */
+ } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
+ path = tpath;
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
- path = strrchr(path, '/');
- path++;
+ path = zfs_strip_path(path);
}
/*
- * Remove the partition from the path it this is a whole disk.
+ * Remove the partition from the path if this is a whole disk.
*/
- if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
+ if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
+ nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
- verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
- &value) == 0);
+ value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
+ /*
+ * If it's a dRAID device, we add parity, groups, and spares.
+ */
+ if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
+ uint64_t ndata, nparity, nspares;
+ nvlist_t **child;
+ uint_t children;
+
+ verify(nvlist_lookup_nvlist_array(nv,
+ ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
+ nparity = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_NPARITY);
+ ndata = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_DRAID_NDATA);
+ nspares = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_DRAID_NSPARES);
+
+ path = zpool_draid_name(buf, sizeof (buf), ndata,
+ nparity, nspares, children);
+ }
+
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
- uint64_t id;
- verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
- &id) == 0);
+ uint64_t id = fnvlist_lookup_uint64(nv,
+ ZPOOL_CONFIG_ID);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
- uint64_t count;
- zbookmark_phys_t *zb = NULL;
- int i;
+ zbookmark_phys_t *buf;
+ uint64_t buflen = 10000; /* approx. 1MB of RAM */
+
+ if (fnvlist_lookup_uint64(zhp->zpool_config,
+ ZPOOL_CONFIG_ERRCOUNT) == 0)
+ return (0);
/*
- * Retrieve the raw error list from the kernel. If the number of errors
- * has increased, allocate more space and continue until we get the
- * entire list.
+ * Retrieve the raw error list from the kernel. If it doesn't fit,
+ * allocate a larger buffer and retry.
*/
- verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
- &count) == 0);
- if (count == 0)
- return (0);
- zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
- count * sizeof (zbookmark_phys_t));
- zc.zc_nvlist_dst_size = count;
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
+ buf = zfs_alloc(zhp->zpool_hdl,
+ buflen * sizeof (zbookmark_phys_t));
+ zc.zc_nvlist_dst = (uintptr_t)buf;
+ zc.zc_nvlist_dst_size = buflen;
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
- free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ free(buf);
if (errno == ENOMEM) {
- void *dst;
-
- count = zc.zc_nvlist_dst_size;
- dst = zfs_alloc(zhp->zpool_hdl, count *
- sizeof (zbookmark_phys_t));
- zc.zc_nvlist_dst = (uintptr_t)dst;
+ buflen *= 2;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
- * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
+ * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
- zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
- zc.zc_nvlist_dst_size;
- count -= zc.zc_nvlist_dst_size;
+ zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
+ uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
- qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
+ qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
- for (i = 0; i < count; i++) {
+ for (uint64_t i = 0; i < zblen; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
nvlist_free(nv);
}
- free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ free(buf);
return (0);
nomem:
- free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ free(buf);
return (no_memory(zhp->zpool_hdl));
}
{
int i;
- (void) strlcpy(string, basename(argv[0]), len);
+ (void) strlcpy(string, zfs_basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
- int err;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
- err = zcmd_write_src_nvlist(hdl, &zc, args);
- if (err == 0)
- err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
+ zcmd_write_src_nvlist(hdl, &zc, args);
+ int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
+ if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
return (0);
}
-/*
- * Process the buffer of nvlists, unpacking and storing each nvlist record
- * into 'records'. 'leftover' is set to the number of bytes that weren't
- * processed as there wasn't a complete record.
- */
-int
-zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
- nvlist_t ***records, uint_t *numrecords)
-{
- uint64_t reclen;
- nvlist_t *nv;
- int i;
- void *tmp;
-
- while (bytes_read > sizeof (reclen)) {
-
- /* get length of packed record (stored as little endian) */
- for (i = 0, reclen = 0; i < sizeof (reclen); i++)
- reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
-
- if (bytes_read < sizeof (reclen) + reclen)
- break;
-
- /* unpack record */
- if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
- return (ENOMEM);
- bytes_read -= sizeof (reclen) + reclen;
- buf += sizeof (reclen) + reclen;
-
- /* add record to nvlist array */
- (*numrecords)++;
- if (ISP2(*numrecords + 1)) {
- tmp = realloc(*records,
- *numrecords * 2 * sizeof (nvlist_t *));
- if (tmp == NULL) {
- nvlist_free(nv);
- (*numrecords)--;
- return (ENOMEM);
- }
- *records = tmp;
- }
- (*records)[*numrecords - 1] = nv;
- }
-
- *leftover = bytes_read;
- return (0);
-}
-
/*
* Retrieve the command history of a pool.
*/
int
-zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
+zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
+ boolean_t *eof)
{
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
char *buf;
int buflen = 128 * 1024;
- uint64_t off = 0;
nvlist_t **records = NULL;
uint_t numrecords = 0;
- int err, i;
+ int err = 0, i;
+ uint64_t start = *off;
- buf = malloc(buflen);
- if (buf == NULL)
- return (ENOMEM);
- do {
+ buf = zfs_alloc(hdl, buflen);
+
+ /* process about 1MiB a time */
+ while (*off - start < 1024 * 1024) {
uint64_t bytes_read = buflen;
uint64_t leftover;
- if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
+ if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
- if (!bytes_read)
+ if (!bytes_read) {
+ *eof = B_TRUE;
break;
+ }
if ((err = zpool_history_unpack(buf, bytes_read,
- &leftover, &records, &numrecords)) != 0)
+ &leftover, &records, &numrecords)) != 0) {
+ zpool_standard_error_fmt(hdl, err,
+ dgettext(TEXT_DOMAIN,
+ "cannot get history for '%s'"), zhp->zpool_name);
break;
- off -= leftover;
+ }
+ *off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
*/
buflen *= 2;
free(buf);
- buf = malloc(buflen);
- if (buf == NULL)
- return (ENOMEM);
+ buf = zfs_alloc(hdl, buflen);
}
-
- /* CONSTCOND */
- } while (1);
+ }
free(buf);
if (!err) {
- verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
- verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
- records, numrecords) == 0);
+ *nvhisp = fnvlist_alloc();
+ fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
+ (const nvlist_t **)records, numrecords);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
- if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
- return (-1);
+ zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
goto out;
case ENOMEM:
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- error = zfs_error_fmt(hdl, EZFS_NOMEM,
- dgettext(TEXT_DOMAIN, "cannot get event"));
- goto out;
- } else {
- goto retry;
- }
+ zcmd_expand_dst_nvlist(hdl, &zc);
+ goto retry;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
- char msg[1024];
-
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot clear events"));
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
- return (zpool_standard_error_fmt(hdl, errno, msg));
+ return (zpool_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot clear events")));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (error);
}
-void
-zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
- char *pathname, size_t len)
+static void
+zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
+ char *pathname, size_t len, boolean_t always_unmounted)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
- if (ioctl(zhp->zpool_hdl->libzfs_fd,
+ if (zfs_ioctl(zhp->zpool_hdl,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
- mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
+ mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
+ &mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
free(mntpnt);
}
-/*
- * Read the EFI label from the config, if a label does not exist then
- * pass back the error to the caller. If the caller has passed a non-NULL
- * diskaddr argument then we set it to the starting address of the EFI
- * partition.
- */
-static int
-read_efi_label(nvlist_t *config, diskaddr_t *sb)
+void
+zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
+ char *pathname, size_t len)
{
- char *path;
- int fd;
- char diskname[MAXPATHLEN];
- int err = -1;
-
- if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
- return (err);
-
- (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
- strrchr(path, '/'));
- if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
- struct dk_gpt *vtoc;
-
- if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
- if (sb != NULL)
- *sb = vtoc->efi_parts[0].p_start;
- efi_free(vtoc);
- }
- (void) close(fd);
- }
- return (err);
+ zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
}
+void
+zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
+ char *pathname, size_t len)
+{
+ zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
+}
/*
- * determine where a partition starts on a disk in the current
- * configuration
+ * Wait while the specified activity is in progress in the pool.
*/
-static diskaddr_t
-find_start_block(nvlist_t *config)
+int
+zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
- nvlist_t **child;
- uint_t c, children;
- diskaddr_t sb = MAXOFFSET_T;
- uint64_t wholedisk;
-
- if (nvlist_lookup_nvlist_array(config,
- ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
- if (nvlist_lookup_uint64(config,
- ZPOOL_CONFIG_WHOLE_DISK,
- &wholedisk) != 0 || !wholedisk) {
- return (MAXOFFSET_T);
- }
- if (read_efi_label(config, &sb) < 0)
- sb = MAXOFFSET_T;
- return (sb);
- }
+ boolean_t missing;
- for (c = 0; c < children; c++) {
- sb = find_start_block(child[c]);
- if (sb != MAXOFFSET_T) {
- return (sb);
- }
+ int error = zpool_wait_status(zhp, activity, &missing, NULL);
+
+ if (missing) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
+ dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
+ zhp->zpool_name);
+ return (ENOENT);
+ } else {
+ return (error);
}
- return (MAXOFFSET_T);
}
-static int
-zpool_label_disk_check(char *path)
+/*
+ * Wait for the given activity and return the status of the wait (whether or not
+ * any waiting was done) in the 'waited' parameter. Non-existent pools are
+ * reported via the 'missing' parameter, rather than by printing an error
+ * message. This is convenient when this function is called in a loop over a
+ * long period of time (as it is, for example, by zpool's wait cmd). In that
+ * scenario, a pool being exported or destroyed should be considered a normal
+ * event, so we don't want to print an error when we find that the pool doesn't
+ * exist.
+ */
+int
+zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
+ boolean_t *missing, boolean_t *waited)
{
- struct dk_gpt *vtoc;
- int fd, err;
-
- if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
- return (errno);
+ int error = lzc_wait(zhp->zpool_name, activity, waited);
+ *missing = (error == ENOENT);
+ if (*missing)
+ return (0);
- if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
- (void) close(fd);
- return (err);
+ if (error != 0) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
+ dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
+ zhp->zpool_name);
}
- if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
- efi_free(vtoc);
- (void) close(fd);
- return (EIDRM);
+ return (error);
+}
+
+int
+zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
+{
+ int error = lzc_set_bootenv(zhp->zpool_name, envmap);
+ if (error != 0) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
+ dgettext(TEXT_DOMAIN,
+ "error setting bootenv in pool '%s'"), zhp->zpool_name);
}
- efi_free(vtoc);
- (void) close(fd);
- return (0);
+ return (error);
}
-/*
- * Generate a unique partition name for the ZFS member. Partitions must
- * have unique names to ensure udev will be able to create symlinks under
- * /dev/disk/by-partlabel/ for all pool members. The partition names are
- * of the form <pool>-<unique-id>.
- */
-static void
-zpool_label_name(char *label_name, int label_size)
+int
+zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
{
- uint64_t id = 0;
- int fd;
-
- fd = open("/dev/urandom", O_RDONLY);
- if (fd >= 0) {
- if (read(fd, &id, sizeof (id)) != sizeof (id))
- id = 0;
+ nvlist_t *nvl;
+ int error;
- close(fd);
+ nvl = NULL;
+ error = lzc_get_bootenv(zhp->zpool_name, &nvl);
+ if (error != 0) {
+ (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
+ dgettext(TEXT_DOMAIN,
+ "error getting bootenv in pool '%s'"), zhp->zpool_name);
+ } else {
+ *nvlp = nvl;
}
- if (id == 0)
- id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
-
- snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
+ return (error);
}
/*
- * Label an individual disk. The name provided is the short name,
- * stripped of any leading /dev path.
+ * Attempt to read and parse feature file(s) (from "compatibility" property).
+ * Files contain zpool feature names, comma or whitespace-separated.
+ * Comments (# character to next newline) are discarded.
+ *
+ * Arguments:
+ * compatibility : string containing feature filenames
+ * features : either NULL or pointer to array of boolean
+ * report : either NULL or pointer to string buffer
+ * rlen : length of "report" buffer
+ *
+ * compatibility is NULL (unset), "", "off", "legacy", or list of
+ * comma-separated filenames. filenames should either be absolute,
+ * or relative to:
+ * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
+ * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
+ * (Unset), "" or "off" => enable all features
+ * "legacy" => disable all features
+ *
+ * Any feature names read from files which match unames in spa_feature_table
+ * will have the corresponding boolean set in the features array (if non-NULL).
+ * If more than one feature set specified, only features present in *all* of
+ * them will be set.
+ *
+ * "report" if not NULL will be populated with a suitable status message.
+ *
+ * Return values:
+ * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
+ * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
+ * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
+ * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
+ * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
*/
-int
-zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
+zpool_compat_status_t
+zpool_load_compat(const char *compat, boolean_t *features, char *report,
+ size_t rlen)
{
- char path[MAXPATHLEN];
- struct dk_gpt *vtoc;
- int rval, fd;
- size_t resv = EFI_MIN_RESV_SIZE;
- uint64_t slice_size;
- diskaddr_t start_block;
- char errbuf[1024];
-
- /* prepare an error message just in case */
- (void) snprintf(errbuf, sizeof (errbuf),
- dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
-
- if (zhp) {
- nvlist_t *nvroot;
-
- verify(nvlist_lookup_nvlist(zhp->zpool_config,
- ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ int sdirfd, ddirfd, featfd;
+ struct stat fs;
+ char *fc;
+ char *ps, *ls, *ws;
+ char *file, *line, *word;
+
+ char l_compat[ZFS_MAXPROPLEN];
+
+ boolean_t ret_nofiles = B_TRUE;
+ boolean_t ret_badfile = B_FALSE;
+ boolean_t ret_badtoken = B_FALSE;
+ boolean_t ret_warntoken = B_FALSE;
+
+ /* special cases (unset), "" and "off" => enable all features */
+ if (compat == NULL || compat[0] == '\0' ||
+ strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
+ if (features != NULL)
+ for (uint_t i = 0; i < SPA_FEATURES; i++)
+ features[i] = B_TRUE;
+ if (report != NULL)
+ strlcpy(report, gettext("all features enabled"), rlen);
+ return (ZPOOL_COMPATIBILITY_OK);
+ }
- if (zhp->zpool_start_block == 0)
- start_block = find_start_block(nvroot);
- else
- start_block = zhp->zpool_start_block;
- zhp->zpool_start_block = start_block;
- } else {
- /* new pool */
- start_block = NEW_START_BLOCK;
+ /* Final special case "legacy" => disable all features */
+ if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
+ if (features != NULL)
+ for (uint_t i = 0; i < SPA_FEATURES; i++)
+ features[i] = B_FALSE;
+ if (report != NULL)
+ strlcpy(report, gettext("all features disabled"), rlen);
+ return (ZPOOL_COMPATIBILITY_OK);
}
- (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
+ /*
+ * Start with all true; will be ANDed with results from each file
+ */
+ if (features != NULL)
+ for (uint_t i = 0; i < SPA_FEATURES; i++)
+ features[i] = B_TRUE;
- if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
- /*
- * This shouldn't happen. We've long since verified that this
- * is a valid device.
- */
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
- "label '%s': unable to open device: %d"), path, errno);
- return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
- }
+ char err_badfile[ZFS_MAXPROPLEN] = "";
+ char err_badtoken[ZFS_MAXPROPLEN] = "";
- if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
- /*
- * The only way this can fail is if we run out of memory, or we
- * were unable to read the disk's capacity
- */
- if (errno == ENOMEM)
- (void) no_memory(hdl);
+ /*
+ * We ignore errors from the directory open()
+ * as they're only needed if the filename is relative
+ * which will be checked during the openat().
+ */
- (void) close(fd);
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
- "label '%s': unable to read disk capacity"), path);
+/* O_PATH safer than O_RDONLY if system allows it */
+#if defined(O_PATH)
+#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
+#else
+#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
+#endif
- return (zfs_error(hdl, EZFS_NOCAP, errbuf));
- }
+ sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
+ ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
- slice_size = vtoc->efi_last_u_lba + 1;
- slice_size -= EFI_MIN_RESV_SIZE;
- if (start_block == MAXOFFSET_T)
- start_block = NEW_START_BLOCK;
- slice_size -= start_block;
- slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
+ (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
- vtoc->efi_parts[0].p_start = start_block;
- vtoc->efi_parts[0].p_size = slice_size;
+ for (file = strtok_r(l_compat, ",", &ps);
+ file != NULL;
+ file = strtok_r(NULL, ",", &ps)) {
- /*
- * Why we use V_USR: V_BACKUP confuses users, and is considered
- * disposable by some EFI utilities (since EFI doesn't have a backup
- * slice). V_UNASSIGNED is supposed to be used only for zero size
- * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
- * etc. were all pretty specific. V_USR is as close to reality as we
- * can get, in the absence of V_OTHER.
- */
- vtoc->efi_parts[0].p_tag = V_USR;
- zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
+ boolean_t l_features[SPA_FEATURES];
- vtoc->efi_parts[8].p_start = slice_size + start_block;
- vtoc->efi_parts[8].p_size = resv;
- vtoc->efi_parts[8].p_tag = V_RESERVED;
+ enum { Z_SYSCONF, Z_DATA } source;
- rval = efi_write(fd, vtoc);
+ /* try sysconfdir first, then datadir */
+ source = Z_SYSCONF;
+ if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
+ featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
+ source = Z_DATA;
+ }
- /* Flush the buffers to disk and invalidate the page cache. */
- (void) fsync(fd);
- (void) ioctl(fd, BLKFLSBUF);
+ /* File readable and correct size? */
+ if (featfd < 0 ||
+ fstat(featfd, &fs) < 0 ||
+ fs.st_size < 1 ||
+ fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
+ (void) close(featfd);
+ strlcat(err_badfile, file, ZFS_MAXPROPLEN);
+ strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
+ ret_badfile = B_TRUE;
+ continue;
+ }
- if (rval == 0)
- rval = efi_rescan(fd);
+/* Prefault the file if system allows */
+#if defined(MAP_POPULATE)
+#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
+#elif defined(MAP_PREFAULT_READ)
+#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
+#else
+#define ZC_MMAP_FLAGS (MAP_PRIVATE)
+#endif
+
+ /* private mmap() so we can strtok safely */
+ fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
+ ZC_MMAP_FLAGS, featfd, 0);
+ (void) close(featfd);
+
+ /* map ok, and last character == newline? */
+ if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
+ (void) munmap((void *) fc, fs.st_size);
+ strlcat(err_badfile, file, ZFS_MAXPROPLEN);
+ strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
+ ret_badfile = B_TRUE;
+ continue;
+ }
- /*
- * Some block drivers (like pcata) may not support EFI GPT labels.
- * Print out a helpful error message directing the user to manually
- * label the disk and give a specific slice.
- */
- if (rval != 0) {
- (void) close(fd);
- efi_free(vtoc);
+ ret_nofiles = B_FALSE;
+
+ for (uint_t i = 0; i < SPA_FEATURES; i++)
+ l_features[i] = B_FALSE;
+
+ /* replace final newline with NULL to ensure string ends */
+ fc[fs.st_size - 1] = '\0';
+
+ for (line = strtok_r(fc, "\n", &ls);
+ line != NULL;
+ line = strtok_r(NULL, "\n", &ls)) {
+ /* discard comments */
+ char *r = strchr(line, '#');
+ if (r != NULL)
+ *r = '\0';
+
+ for (word = strtok_r(line, ", \t", &ws);
+ word != NULL;
+ word = strtok_r(NULL, ", \t", &ws)) {
+ /* Find matching feature name */
+ uint_t f;
+ for (f = 0; f < SPA_FEATURES; f++) {
+ zfeature_info_t *fi =
+ &spa_feature_table[f];
+ if (strcmp(word, fi->fi_uname) == 0) {
+ l_features[f] = B_TRUE;
+ break;
+ }
+ }
+ if (f < SPA_FEATURES)
+ continue;
+
+ /* found an unrecognized word */
+ /* lightly sanitize it */
+ if (strlen(word) > 32)
+ word[32] = '\0';
+ for (char *c = word; *c != '\0'; c++)
+ if (!isprint(*c))
+ *c = '?';
+
+ strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
+ strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
+ if (source == Z_SYSCONF)
+ ret_badtoken = B_TRUE;
+ else
+ ret_warntoken = B_TRUE;
+ }
+ }
+ (void) munmap((void *) fc, fs.st_size);
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
- "parted(8) and then provide a specific slice: %d"), rval);
- return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
+ if (features != NULL)
+ for (uint_t i = 0; i < SPA_FEATURES; i++)
+ features[i] &= l_features[i];
}
+ (void) close(sdirfd);
+ (void) close(ddirfd);
+
+ /* Return the most serious error */
+ if (ret_badfile) {
+ if (report != NULL)
+ snprintf(report, rlen, gettext("could not read/"
+ "parse feature file(s): %s"), err_badfile);
+ return (ZPOOL_COMPATIBILITY_BADFILE);
+ }
+ if (ret_nofiles) {
+ if (report != NULL)
+ strlcpy(report,
+ gettext("no valid compatibility files specified"),
+ rlen);
+ return (ZPOOL_COMPATIBILITY_NOFILES);
+ }
+ if (ret_badtoken) {
+ if (report != NULL)
+ snprintf(report, rlen, gettext("invalid feature "
+ "name(s) in local compatibility files: %s"),
+ err_badtoken);
+ return (ZPOOL_COMPATIBILITY_BADTOKEN);
+ }
+ if (ret_warntoken) {
+ if (report != NULL)
+ snprintf(report, rlen, gettext("unrecognized feature "
+ "name(s) in distribution compatibility files: %s"),
+ err_badtoken);
+ return (ZPOOL_COMPATIBILITY_WARNTOKEN);
+ }
+ if (report != NULL)
+ strlcpy(report, gettext("compatibility set ok"), rlen);
+ return (ZPOOL_COMPATIBILITY_OK);
+}
- (void) close(fd);
- efi_free(vtoc);
-
- (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
- (void) zfs_append_partition(path, MAXPATHLEN);
+static int
+zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
+{
+ nvlist_t *tgt;
+ boolean_t avail_spare, l2cache;
- /* Wait to udev to signal use the device has settled. */
- rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
- if (rval) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
- "detect device partitions on '%s': %d"), path, rval);
- return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
+ verify(zhp != NULL);
+ if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
+ char errbuf[ERRBUFLEN];
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
+ return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
}
- /* We can't be to paranoid. Read the label back and verify it. */
- (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
- rval = zpool_label_disk_check(path);
- if (rval) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
- "EFI label on '%s' is damaged. Ensure\nthis device "
- "is not in in use, and is functioning properly: %d"),
- path, rval);
- return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
+ if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
+ NULL)) == NULL) {
+ char errbuf[ERRBUFLEN];
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "can not find %s in %s"),
+ vdevname, zhp->zpool_name);
+ return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
}
+ *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
return (0);
}
/*
- * Allocate and return the underlying device name for a device mapper device.
- * If a device mapper device maps to multiple devices, return the first device.
- *
- * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
- * DM device (like /dev/disk/by-vdev/A0) are also allowed.
- *
- * Returns device name, or NULL on error or no match. If dm_name is not a DM
- * device then return NULL.
- *
- * NOTE: The returned name string must be *freed*.
+ * Get a vdev property value for 'prop' and return the value in
+ * a pre-allocated buffer.
*/
-char *
-dm_get_underlying_path(char *dm_name)
+int
+zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
+ char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
{
- DIR *dp = NULL;
- struct dirent *ep;
- char *realp;
- char *tmp = NULL;
- char *path = NULL;
- char *dev_str;
- int size;
-
- if (dm_name == NULL)
- return (NULL);
+ nvlist_t *nv;
+ const char *strval;
+ uint64_t intval;
+ zprop_source_t src = ZPROP_SRC_NONE;
- /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
- realp = realpath(dm_name, NULL);
- if (realp == NULL)
- return (NULL);
+ if (prop == VDEV_PROP_USERPROP) {
+ /* user property, prop_name must contain the property name */
+ assert(prop_name != NULL);
+ if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
+ src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
+ } else {
+ /* user prop not found */
+ return (-1);
+ }
+ (void) strlcpy(buf, strval, len);
+ if (srctype)
+ *srctype = src;
+ return (0);
+ }
- /*
- * If they preface 'dev' with a path (like "/dev") then strip it off.
- * We just want the 'dm-N' part.
- */
- tmp = strrchr(realp, '/');
- if (tmp != NULL)
- dev_str = tmp + 1; /* +1 since we want the chr after '/' */
- else
- dev_str = tmp;
+ if (prop_name == NULL)
+ prop_name = (char *)vdev_prop_to_name(prop);
- size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
- if (size == -1 || !tmp)
- goto end;
+ switch (vdev_prop_get_type(prop)) {
+ case PROP_TYPE_STRING:
+ if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
+ src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
+ } else {
+ src = ZPROP_SRC_DEFAULT;
+ if ((strval = vdev_prop_default_string(prop)) == NULL)
+ strval = "-";
+ }
+ (void) strlcpy(buf, strval, len);
+ break;
- dp = opendir(tmp);
- if (dp == NULL)
- goto end;
+ case PROP_TYPE_NUMBER:
+ if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
+ src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
+ } else {
+ src = ZPROP_SRC_DEFAULT;
+ intval = vdev_prop_default_numeric(prop);
+ }
- /* Return first sd* entry in /sys/block/dm-N/slaves/ */
- while ((ep = readdir(dp))) {
- if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
- size = asprintf(&path, "/dev/%s", ep->d_name);
+ switch (prop) {
+ case VDEV_PROP_ASIZE:
+ case VDEV_PROP_PSIZE:
+ case VDEV_PROP_SIZE:
+ case VDEV_PROP_BOOTSIZE:
+ case VDEV_PROP_ALLOCATED:
+ case VDEV_PROP_FREE:
+ case VDEV_PROP_READ_ERRORS:
+ case VDEV_PROP_WRITE_ERRORS:
+ case VDEV_PROP_CHECKSUM_ERRORS:
+ case VDEV_PROP_INITIALIZE_ERRORS:
+ case VDEV_PROP_OPS_NULL:
+ case VDEV_PROP_OPS_READ:
+ case VDEV_PROP_OPS_WRITE:
+ case VDEV_PROP_OPS_FREE:
+ case VDEV_PROP_OPS_CLAIM:
+ case VDEV_PROP_OPS_TRIM:
+ case VDEV_PROP_BYTES_NULL:
+ case VDEV_PROP_BYTES_READ:
+ case VDEV_PROP_BYTES_WRITE:
+ case VDEV_PROP_BYTES_FREE:
+ case VDEV_PROP_BYTES_CLAIM:
+ case VDEV_PROP_BYTES_TRIM:
+ if (literal) {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
+ } else {
+ (void) zfs_nicenum(intval, buf, len);
+ }
+ break;
+ case VDEV_PROP_EXPANDSZ:
+ if (intval == 0) {
+ (void) strlcpy(buf, "-", len);
+ } else if (literal) {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
+ } else {
+ (void) zfs_nicenum(intval, buf, len);
+ }
+ break;
+ case VDEV_PROP_CAPACITY:
+ if (literal) {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
+ } else {
+ (void) snprintf(buf, len, "%llu%%",
+ (u_longlong_t)intval);
+ }
+ break;
+ case VDEV_PROP_CHECKSUM_N:
+ case VDEV_PROP_CHECKSUM_T:
+ case VDEV_PROP_IO_N:
+ case VDEV_PROP_IO_T:
+ case VDEV_PROP_SLOW_IO_N:
+ case VDEV_PROP_SLOW_IO_T:
+ if (intval == UINT64_MAX) {
+ (void) strlcpy(buf, "-", len);
+ } else {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
+ }
+ break;
+ case VDEV_PROP_FRAGMENTATION:
+ if (intval == UINT64_MAX) {
+ (void) strlcpy(buf, "-", len);
+ } else {
+ (void) snprintf(buf, len, "%llu%%",
+ (u_longlong_t)intval);
+ }
+ break;
+ case VDEV_PROP_STATE:
+ if (literal) {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
+ } else {
+ (void) strlcpy(buf, zpool_state_to_name(intval,
+ VDEV_AUX_NONE), len);
+ }
break;
+ default:
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
}
+ break;
+
+ case PROP_TYPE_INDEX:
+ if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
+ src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
+ intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
+ } else {
+ src = ZPROP_SRC_DEFAULT;
+ intval = vdev_prop_default_numeric(prop);
+ /* Only use if provided by the RAIDZ VDEV above */
+ if (prop == VDEV_PROP_RAIDZ_EXPANDING)
+ return (ENOENT);
+ }
+ if (vdev_prop_index_to_string(prop, intval,
+ (const char **)&strval) != 0)
+ return (-1);
+ (void) strlcpy(buf, strval, len);
+ break;
+
+ default:
+ abort();
}
-end:
- if (dp != NULL)
- closedir(dp);
- free(tmp);
- free(realp);
- return (path);
+ if (srctype)
+ *srctype = src;
+
+ return (0);
}
/*
- * Return 1 if device is a device mapper or multipath device.
- * Return 0 if not.
+ * Get a vdev property value for 'prop_name' and return the value in
+ * a pre-allocated buffer.
*/
int
-zfs_dev_is_dm(char *dev_name)
+zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
+ char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
+ boolean_t literal)
{
+ nvlist_t *reqnvl, *reqprops;
+ nvlist_t *retprops = NULL;
+ uint64_t vdev_guid = 0;
+ int ret;
- char *tmp;
- tmp = dm_get_underlying_path(dev_name);
- if (tmp == NULL)
- return (0);
+ if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
+ return (ret);
- free(tmp);
- return (1);
-}
+ if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
+ if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
-/*
- * By "whole disk" we mean an entire physical disk (something we can
- * label, toggle the write cache on, etc.) as opposed to the full
- * capacity of a pseudo-device such as lofi or did. We act as if we
- * are labeling the disk, which should be a pretty good test of whether
- * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
- * it isn't.
- */
-int
-zfs_dev_is_whole_disk(char *dev_name)
-{
- struct dk_gpt *label;
- int fd;
+ fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
- if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
- return (0);
+ if (prop != VDEV_PROP_USERPROP) {
+ /* prop_name overrides prop value */
+ if (prop_name != NULL)
+ prop = vdev_name_to_prop(prop_name);
+ else
+ prop_name = (char *)vdev_prop_to_name(prop);
+ assert(prop < VDEV_NUM_PROPS);
+ }
- if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
- (void) close(fd);
- return (0);
+ assert(prop_name != NULL);
+ if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
+ nvlist_free(reqnvl);
+ nvlist_free(reqprops);
+ return (no_memory(zhp->zpool_hdl));
+ }
+
+ fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
+
+ ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
+
+ if (ret == 0) {
+ ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
+ len, srctype, literal);
+ } else {
+ char errbuf[ERRBUFLEN];
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
+ " %s in %s"), prop_name, vdevname, zhp->zpool_name);
+ (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
}
- efi_free(label);
- (void) close(fd);
+ nvlist_free(reqnvl);
+ nvlist_free(reqprops);
+ nvlist_free(retprops);
- return (1);
+ return (ret);
}
/*
- * Lookup the underlying device for a device name
- *
- * Often you'll have a symlink to a device, a partition device,
- * or a multipath device, and want to look up the underlying device.
- * This function returns the underlying device name. If the device
- * name is already the underlying device, then just return the same
- * name. If the device is a DM device with multiple underlying devices
- * then return the first one.
- *
- * For example:
- *
- * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
- * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
- * returns: /dev/sda
- *
- * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
- * dev_name: /dev/mapper/mpatha
- * returns: /dev/sda (first device)
- *
- * 3. /dev/sda (already the underlying device)
- * dev_name: /dev/sda
- * returns: /dev/sda
- *
- * 4. /dev/dm-3 (mapped to /dev/sda)
- * dev_name: /dev/dm-3
- * returns: /dev/sda
- *
- * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
- * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
- * returns: /dev/sdb
- *
- * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
- * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
- * returns: /dev/sda
- *
- * Returns underlying device name, or NULL on error or no match.
- *
- * NOTE: The returned name string must be *freed*.
+ * Get all vdev properties
*/
-char *
-zfs_get_underlying_path(char *dev_name)
+int
+zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
+ nvlist_t **outnvl)
{
- char *name = NULL;
- char *tmp;
+ nvlist_t *nvl = NULL;
+ uint64_t vdev_guid = 0;
+ int ret;
- if (dev_name == NULL)
- return (NULL);
+ if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
+ return (ret);
- tmp = dm_get_underlying_path(dev_name);
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
+
+ fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
+
+ ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
- /* dev_name not a DM device, so just un-symlinkize it */
- if (tmp == NULL)
- tmp = realpath(dev_name, NULL);
+ nvlist_free(nvl);
- if (tmp != NULL) {
- name = zfs_strip_partition_path(tmp);
- free(tmp);
+ if (ret) {
+ char errbuf[ERRBUFLEN];
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
+ " %s in %s"), vdevname, zhp->zpool_name);
+ (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
}
- return (name);
+ return (ret);
}
/*
- * Given a dev name like "sda", return the full enclosure sysfs path to
- * the disk. You can also pass in the name with "/dev" prepended
- * to it (like /dev/sda).
- *
- * For example, disk "sda" in enclosure slot 1:
- * dev: "sda"
- * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
- *
- * 'dev' must be a non-devicemapper device.
- *
- * Returned string must be freed.
+ * Set vdev property
*/
-char *
-zfs_get_enclosure_sysfs_path(char *dev_name)
+int
+zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
+ const char *propname, const char *propval)
{
- DIR *dp = NULL;
- struct dirent *ep;
- char buf[MAXPATHLEN];
- char *tmp1 = NULL;
- char *tmp2 = NULL;
- char *tmp3 = NULL;
- char *path = NULL;
- size_t size;
- int tmpsize;
-
- if (dev_name == NULL)
- return (NULL);
-
- /* If they preface 'dev' with a path (like "/dev") then strip it off */
- tmp1 = strrchr(dev_name, '/');
- if (tmp1 != NULL)
- dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
-
- tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
- if (tmpsize == -1 || tmp1 == NULL) {
- tmp1 = NULL;
- goto end;
- }
+ int ret;
+ nvlist_t *nvl = NULL;
+ nvlist_t *outnvl = NULL;
+ nvlist_t *props;
+ nvlist_t *realprops;
+ prop_flags_t flags = { 0 };
+ uint64_t version;
+ uint64_t vdev_guid;
- dp = opendir(tmp1);
- if (dp == NULL) {
- tmp1 = NULL; /* To make free() at the end a NOP */
- goto end;
- }
+ if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
+ return (ret);
- /*
- * Look though all sysfs entries in /sys/block/<dev>/device for
- * the enclosure symlink.
- */
- while ((ep = readdir(dp))) {
- /* Ignore everything that's not our enclosure_device link */
- if (strstr(ep->d_name, "enclosure_device") == NULL)
- continue;
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
+ if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
- if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
- tmp2 == NULL)
- break;
+ fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
- size = readlink(tmp2, buf, sizeof (buf));
+ if (nvlist_add_string(props, propname, propval) != 0) {
+ nvlist_free(props);
+ return (no_memory(zhp->zpool_hdl));
+ }
- /* Did readlink fail or crop the link name? */
- if (size == -1 || size >= sizeof (buf)) {
- free(tmp2);
- tmp2 = NULL; /* To make free() at the end a NOP */
- break;
- }
+ char errbuf[ERRBUFLEN];
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
+ propname, vdevname, zhp->zpool_name);
- /*
- * We got a valid link. readlink() doesn't terminate strings
- * so we have to do it.
- */
- buf[size] = '\0';
+ flags.vdevprop = 1;
+ version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+ if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
+ zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
+ nvlist_free(props);
+ nvlist_free(nvl);
+ return (-1);
+ }
- /*
- * Our link will look like:
- *
- * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
- *
- * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
- */
- tmp3 = strstr(buf, "enclosure");
- if (tmp3 == NULL)
- break;
+ nvlist_free(props);
+ props = realprops;
- if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
- /* If asprintf() fails, 'path' is undefined */
- path = NULL;
- break;
- }
+ fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
- if (path == NULL)
- break;
- }
+ ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
-end:
- free(tmp2);
- free(tmp1);
+ nvlist_free(props);
+ nvlist_free(nvl);
+ nvlist_free(outnvl);
- if (dp != NULL)
- closedir(dp);
+ if (ret)
+ (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
- return (path);
+ return (ret);
}