4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
53 typedef struct prop_flags
{
54 int create
:1; /* Validate property on creation */
55 int import
:1; /* Validate property on import */
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
67 zfs_cmd_t zc
= {"\0"};
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
185 case VDEV_STATE_CLOSED
:
186 case VDEV_STATE_OFFLINE
:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED
:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN
:
191 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
192 return (gettext("FAULTED"));
193 else if (aux
== VDEV_AUX_SPLIT_POOL
)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED
:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED
:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY
:
202 return (gettext("ONLINE"));
205 return (gettext("UNKNOWN"));
209 * Map POOL STATE to printed strings.
212 zpool_pool_state_to_name(pool_state_t state
)
217 case POOL_STATE_ACTIVE
:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED
:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED
:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE
:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE
:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED
:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL
:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE
:
232 return (gettext("POTENTIALLY_ACTIVE"));
235 return (gettext("UNKNOWN"));
239 * API compatibility wrapper around zpool_get_prop_literal
242 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
243 zprop_source_t
*srctype
)
245 return (zpool_get_prop_literal(zhp
, prop
, buf
, len
, srctype
, B_FALSE
));
249 * Get a zpool property value for 'prop' and return the value in
250 * a pre-allocated buffer.
253 zpool_get_prop_literal(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
,
254 size_t len
, zprop_source_t
*srctype
, boolean_t literal
)
258 zprop_source_t src
= ZPROP_SRC_NONE
;
263 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
265 case ZPOOL_PROP_NAME
:
266 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
269 case ZPOOL_PROP_HEALTH
:
270 (void) strlcpy(buf
, "FAULTED", len
);
273 case ZPOOL_PROP_GUID
:
274 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
275 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
278 case ZPOOL_PROP_ALTROOT
:
279 case ZPOOL_PROP_CACHEFILE
:
280 case ZPOOL_PROP_COMMENT
:
281 if (zhp
->zpool_props
!= NULL
||
282 zpool_get_all_props(zhp
) == 0) {
284 zpool_get_prop_string(zhp
, prop
, &src
),
292 (void) strlcpy(buf
, "-", len
);
301 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
302 prop
!= ZPOOL_PROP_NAME
)
305 switch (zpool_prop_get_type(prop
)) {
306 case PROP_TYPE_STRING
:
307 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
311 case PROP_TYPE_NUMBER
:
312 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
315 case ZPOOL_PROP_SIZE
:
316 case ZPOOL_PROP_ALLOCATED
:
317 case ZPOOL_PROP_FREE
:
318 case ZPOOL_PROP_FREEING
:
319 case ZPOOL_PROP_EXPANDSZ
:
320 case ZPOOL_PROP_ASHIFT
:
322 (void) snprintf(buf
, len
, "%llu",
323 (u_longlong_t
)intval
);
325 (void) zfs_nicenum(intval
, buf
, len
);
328 case ZPOOL_PROP_CAPACITY
:
329 (void) snprintf(buf
, len
, "%llu%%",
330 (u_longlong_t
)intval
);
333 case ZPOOL_PROP_DEDUPRATIO
:
334 (void) snprintf(buf
, len
, "%llu.%02llux",
335 (u_longlong_t
)(intval
/ 100),
336 (u_longlong_t
)(intval
% 100));
339 case ZPOOL_PROP_HEALTH
:
340 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
341 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
342 verify(nvlist_lookup_uint64_array(nvroot
,
343 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
346 (void) strlcpy(buf
, zpool_state_to_name(intval
,
349 case ZPOOL_PROP_VERSION
:
350 if (intval
>= SPA_VERSION_FEATURES
) {
351 (void) snprintf(buf
, len
, "-");
356 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
360 case PROP_TYPE_INDEX
:
361 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
362 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
365 (void) strlcpy(buf
, strval
, len
);
379 * Check if the bootfs name has the same pool name as it is set to.
380 * Assuming bootfs is a valid dataset name.
383 bootfs_name_valid(const char *pool
, char *bootfs
)
385 int len
= strlen(pool
);
387 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
390 if (strncmp(pool
, bootfs
, len
) == 0 &&
391 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
397 #if defined(__sun__) || defined(__sun)
399 * Inspect the configuration to determine if any of the devices contain
403 pool_uses_efi(nvlist_t
*config
)
408 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
409 &child
, &children
) != 0)
410 return (read_efi_label(config
, NULL
) >= 0);
412 for (c
= 0; c
< children
; c
++) {
413 if (pool_uses_efi(child
[c
]))
421 zpool_is_bootable(zpool_handle_t
*zhp
)
423 char bootfs
[ZPOOL_MAXNAMELEN
];
425 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
426 sizeof (bootfs
), NULL
) == 0 && strncmp(bootfs
, "-",
427 sizeof (bootfs
)) != 0);
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
437 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
438 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
446 struct stat64 statbuf
;
450 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
451 (void) no_memory(hdl
);
456 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
457 const char *propname
= nvpair_name(elem
);
459 prop
= zpool_name_to_prop(propname
);
460 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
462 zfeature_info_t
*feature
;
463 char *fname
= strchr(propname
, '@') + 1;
465 err
= zfeature_lookup_name(fname
, &feature
);
467 ASSERT3U(err
, ==, ENOENT
);
468 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
469 "invalid feature '%s'"), fname
);
470 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
474 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
475 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
476 "'%s' must be a string"), propname
);
477 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
481 (void) nvpair_value_string(elem
, &strval
);
482 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
483 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
484 "property '%s' can only be set to "
485 "'enabled'"), propname
);
486 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
490 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
491 (void) no_memory(hdl
);
498 * Make sure this property is valid and applies to this type.
500 if (prop
== ZPROP_INVAL
) {
501 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
502 "invalid property '%s'"), propname
);
503 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
507 if (zpool_prop_readonly(prop
)) {
508 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
509 "is readonly"), propname
);
510 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
514 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
515 &strval
, &intval
, errbuf
) != 0)
519 * Perform additional checking for specific properties.
524 case ZPOOL_PROP_VERSION
:
525 if (intval
< version
||
526 !SPA_VERSION_IS_SUPPORTED(intval
)) {
527 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
528 "property '%s' number %d is invalid."),
530 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
535 case ZPOOL_PROP_ASHIFT
:
537 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
538 "property '%s' can only be set at "
539 "creation time"), propname
);
540 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
544 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
545 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
546 "property '%s' number %d is invalid."),
548 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
553 case ZPOOL_PROP_BOOTFS
:
554 if (flags
.create
|| flags
.import
) {
555 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
556 "property '%s' cannot be set at creation "
557 "or import time"), propname
);
558 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
562 if (version
< SPA_VERSION_BOOTFS
) {
563 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
564 "pool must be upgraded to support "
565 "'%s' property"), propname
);
566 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
571 * bootfs property value has to be a dataset name and
572 * the dataset has to be in the same pool as it sets to.
574 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
576 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
577 "is an invalid name"), strval
);
578 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
582 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
583 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
584 "could not open pool '%s'"), poolname
);
585 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
588 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
589 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
591 #if defined(__sun__) || defined(__sun)
593 * bootfs property cannot be set on a disk which has
596 if (pool_uses_efi(nvroot
)) {
597 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
598 "property '%s' not supported on "
599 "EFI labeled devices"), propname
);
600 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
608 case ZPOOL_PROP_ALTROOT
:
609 if (!flags
.create
&& !flags
.import
) {
610 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
611 "property '%s' can only be set during pool "
612 "creation or import"), propname
);
613 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
617 if (strval
[0] != '/') {
618 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
619 "bad alternate root '%s'"), strval
);
620 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
625 case ZPOOL_PROP_CACHEFILE
:
626 if (strval
[0] == '\0')
629 if (strcmp(strval
, "none") == 0)
632 if (strval
[0] != '/') {
633 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
634 "property '%s' must be empty, an "
635 "absolute path, or 'none'"), propname
);
636 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
640 slash
= strrchr(strval
, '/');
642 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
643 strcmp(slash
, "/..") == 0) {
644 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
645 "'%s' is not a valid file"), strval
);
646 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
652 if (strval
[0] != '\0' &&
653 (stat64(strval
, &statbuf
) != 0 ||
654 !S_ISDIR(statbuf
.st_mode
))) {
655 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
656 "'%s' is not a valid directory"),
658 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
665 case ZPOOL_PROP_COMMENT
:
666 for (check
= strval
; *check
!= '\0'; check
++) {
667 if (!isprint(*check
)) {
669 dgettext(TEXT_DOMAIN
,
670 "comment may only have printable "
672 (void) zfs_error(hdl
, EZFS_BADPROP
,
677 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
678 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
679 "comment must not exceed %d characters"),
681 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
685 case ZPOOL_PROP_READONLY
:
687 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
688 "property '%s' can only be set at "
689 "import time"), propname
);
690 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
699 nvlist_free(retprops
);
704 * Set zpool property : propname=propval.
707 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
709 zfs_cmd_t zc
= {"\0"};
712 nvlist_t
*nvl
= NULL
;
715 prop_flags_t flags
= { 0 };
717 (void) snprintf(errbuf
, sizeof (errbuf
),
718 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
721 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
722 return (no_memory(zhp
->zpool_hdl
));
724 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
726 return (no_memory(zhp
->zpool_hdl
));
729 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
730 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
731 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
740 * Execute the corresponding ioctl() to set this property.
742 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
744 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
749 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
751 zcmd_free_nvlists(&zc
);
755 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
757 (void) zpool_props_refresh(zhp
);
763 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
765 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
767 char buf
[ZFS_MAXPROPLEN
];
768 nvlist_t
*features
= NULL
;
771 boolean_t firstexpand
= (NULL
== *plp
);
774 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
778 while (*last
!= NULL
)
779 last
= &(*last
)->pl_next
;
782 features
= zpool_get_features(zhp
);
784 if ((*plp
)->pl_all
&& firstexpand
) {
785 for (i
= 0; i
< SPA_FEATURES
; i
++) {
786 zprop_list_t
*entry
= zfs_alloc(hdl
,
787 sizeof (zprop_list_t
));
788 entry
->pl_prop
= ZPROP_INVAL
;
789 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
790 spa_feature_table
[i
].fi_uname
);
791 entry
->pl_width
= strlen(entry
->pl_user_prop
);
792 entry
->pl_all
= B_TRUE
;
795 last
= &entry
->pl_next
;
799 /* add any unsupported features */
800 for (nvp
= nvlist_next_nvpair(features
, NULL
);
801 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
806 if (zfeature_is_supported(nvpair_name(nvp
)))
809 propname
= zfs_asprintf(hdl
, "unsupported@%s",
813 * Before adding the property to the list make sure that no
814 * other pool already added the same property.
818 while (entry
!= NULL
) {
819 if (entry
->pl_user_prop
!= NULL
&&
820 strcmp(propname
, entry
->pl_user_prop
) == 0) {
824 entry
= entry
->pl_next
;
831 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
832 entry
->pl_prop
= ZPROP_INVAL
;
833 entry
->pl_user_prop
= propname
;
834 entry
->pl_width
= strlen(entry
->pl_user_prop
);
835 entry
->pl_all
= B_TRUE
;
838 last
= &entry
->pl_next
;
841 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
846 if (entry
->pl_prop
!= ZPROP_INVAL
&&
847 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
849 if (strlen(buf
) > entry
->pl_width
)
850 entry
->pl_width
= strlen(buf
);
858 * Get the state for the given feature on the given ZFS pool.
861 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
865 boolean_t found
= B_FALSE
;
866 nvlist_t
*features
= zpool_get_features(zhp
);
868 const char *feature
= strchr(propname
, '@') + 1;
870 supported
= zpool_prop_feature(propname
);
871 ASSERT(supported
|| zpool_prop_unsupported(propname
));
874 * Convert from feature name to feature guid. This conversion is
875 * unecessary for unsupported@... properties because they already
882 ret
= zfeature_lookup_name(feature
, &fi
);
884 (void) strlcpy(buf
, "-", len
);
887 feature
= fi
->fi_guid
;
890 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
895 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
898 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
900 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
905 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
907 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
910 (void) strlcpy(buf
, "-", len
);
919 * Don't start the slice at the default block of 34; many storage
920 * devices will use a stripe width of 128k, other vendors prefer a 1m
921 * alignment. It is best to play it safe and ensure a 1m alignment
922 * given 512B blocks. When the block size is larger by a power of 2
923 * we will still be 1m aligned. Some devices are sensitive to the
924 * partition ending alignment as well.
926 #define NEW_START_BLOCK 2048
927 #define PARTITION_END_ALIGNMENT 2048
930 * Validate the given pool name, optionally putting an extended error message in
934 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
940 ret
= pool_namecheck(pool
, &why
, &what
);
943 * The rules for reserved pool names were extended at a later point.
944 * But we need to support users with existing pools that may now be
945 * invalid. So we only check for this expanded set of names during a
946 * create (or import), and only in userland.
948 if (ret
== 0 && !isopen
&&
949 (strncmp(pool
, "mirror", 6) == 0 ||
950 strncmp(pool
, "raidz", 5) == 0 ||
951 strncmp(pool
, "spare", 5) == 0 ||
952 strcmp(pool
, "log") == 0)) {
955 dgettext(TEXT_DOMAIN
, "name is reserved"));
963 case NAME_ERR_TOOLONG
:
965 dgettext(TEXT_DOMAIN
, "name is too long"));
968 case NAME_ERR_INVALCHAR
:
970 dgettext(TEXT_DOMAIN
, "invalid character "
971 "'%c' in pool name"), what
);
974 case NAME_ERR_NOLETTER
:
975 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
976 "name must begin with a letter"));
979 case NAME_ERR_RESERVED
:
980 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
981 "name is reserved"));
984 case NAME_ERR_DISKLIKE
:
985 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
986 "pool name is reserved"));
989 case NAME_ERR_LEADING_SLASH
:
990 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
991 "leading slash in name"));
994 case NAME_ERR_EMPTY_COMPONENT
:
995 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
996 "empty component in name"));
999 case NAME_ERR_TRAILING_SLASH
:
1000 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1001 "trailing slash in name"));
1004 case NAME_ERR_MULTIPLE_AT
:
1005 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1006 "multiple '@' delimiters in name"));
1008 case NAME_ERR_NO_AT
:
1009 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1010 "permission set is missing '@'"));
1021 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1025 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1027 zpool_handle_t
*zhp
;
1031 * Make sure the pool name is valid.
1033 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1034 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1035 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1040 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1043 zhp
->zpool_hdl
= hdl
;
1044 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1046 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1052 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1053 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1054 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1063 * Like the above, but silent on error. Used when iterating over pools (because
1064 * the configuration cache may be out of date).
1067 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1069 zpool_handle_t
*zhp
;
1072 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1075 zhp
->zpool_hdl
= hdl
;
1076 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1078 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1094 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1098 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1100 zpool_handle_t
*zhp
;
1102 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1105 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1106 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1107 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1116 * Close the handle. Simply frees the memory associated with the handle.
1119 zpool_close(zpool_handle_t
*zhp
)
1121 if (zhp
->zpool_config
)
1122 nvlist_free(zhp
->zpool_config
);
1123 if (zhp
->zpool_old_config
)
1124 nvlist_free(zhp
->zpool_old_config
);
1125 if (zhp
->zpool_props
)
1126 nvlist_free(zhp
->zpool_props
);
1131 * Return the name of the pool.
1134 zpool_get_name(zpool_handle_t
*zhp
)
1136 return (zhp
->zpool_name
);
1141 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1144 zpool_get_state(zpool_handle_t
*zhp
)
1146 return (zhp
->zpool_state
);
1150 * Create the named pool, using the provided vdev list. It is assumed
1151 * that the consumer has already validated the contents of the nvlist, so we
1152 * don't have to worry about error semantics.
1155 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1156 nvlist_t
*props
, nvlist_t
*fsprops
)
1158 zfs_cmd_t zc
= {"\0"};
1159 nvlist_t
*zc_fsprops
= NULL
;
1160 nvlist_t
*zc_props
= NULL
;
1164 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1165 "cannot create '%s'"), pool
);
1167 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1168 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1170 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1174 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1176 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1177 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1186 zoned
= ((nvlist_lookup_string(fsprops
,
1187 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1188 strcmp(zonestr
, "on") == 0);
1190 if ((zc_fsprops
= zfs_valid_proplist(hdl
,
1191 ZFS_TYPE_FILESYSTEM
, fsprops
, zoned
, NULL
, msg
)) == NULL
) {
1195 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1198 if (nvlist_add_nvlist(zc_props
,
1199 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1204 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1207 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1209 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1211 zcmd_free_nvlists(&zc
);
1212 nvlist_free(zc_props
);
1213 nvlist_free(zc_fsprops
);
1218 * This can happen if the user has specified the same
1219 * device multiple times. We can't reliably detect this
1220 * until we try to add it and see we already have a
1221 * label. This can also happen under if the device is
1222 * part of an active md or lvm device.
1224 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1225 "one or more vdevs refer to the same device, or "
1226 "one of\nthe devices is part of an active md or "
1228 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1232 * This occurs when one of the devices is below
1233 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1234 * device was the problem device since there's no
1235 * reliable way to determine device size from userland.
1240 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1242 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1243 "one or more devices is less than the "
1244 "minimum size (%s)"), buf
);
1246 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1249 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1250 "one or more devices is out of space"));
1251 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1254 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1255 "cache device must be a disk or disk slice"));
1256 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1259 return (zpool_standard_error(hdl
, errno
, msg
));
1264 zcmd_free_nvlists(&zc
);
1265 nvlist_free(zc_props
);
1266 nvlist_free(zc_fsprops
);
1271 * Destroy the given pool. It is up to the caller to ensure that there are no
1272 * datasets left in the pool.
1275 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1277 zfs_cmd_t zc
= {"\0"};
1278 zfs_handle_t
*zfp
= NULL
;
1279 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1282 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1283 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1286 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1287 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1289 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1290 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1291 "cannot destroy '%s'"), zhp
->zpool_name
);
1293 if (errno
== EROFS
) {
1294 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1295 "one or more devices is read only"));
1296 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1298 (void) zpool_standard_error(hdl
, errno
, msg
);
1307 remove_mountpoint(zfp
);
1315 * Add the given vdevs to the pool. The caller must have already performed the
1316 * necessary verification to ensure that the vdev specification is well-formed.
1319 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1321 zfs_cmd_t zc
= {"\0"};
1323 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1325 nvlist_t
**spares
, **l2cache
;
1326 uint_t nspares
, nl2cache
;
1328 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1329 "cannot add to '%s'"), zhp
->zpool_name
);
1331 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1332 SPA_VERSION_SPARES
&&
1333 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1334 &spares
, &nspares
) == 0) {
1335 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1336 "upgraded to add hot spares"));
1337 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1340 #if defined(__sun__) || defined(__sun)
1341 if (zpool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1342 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1345 for (s
= 0; s
< nspares
; s
++) {
1348 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1349 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1350 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1351 "device '%s' contains an EFI label and "
1352 "cannot be used on root pools."),
1353 zpool_vdev_name(hdl
, NULL
, spares
[s
],
1355 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1361 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1362 SPA_VERSION_L2CACHE
&&
1363 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1364 &l2cache
, &nl2cache
) == 0) {
1365 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1366 "upgraded to add cache devices"));
1367 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1370 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1372 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1374 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1378 * This can happen if the user has specified the same
1379 * device multiple times. We can't reliably detect this
1380 * until we try to add it and see we already have a
1383 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1384 "one or more vdevs refer to the same device"));
1385 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1390 * This occurrs when one of the devices is below
1391 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1392 * device was the problem device since there's no
1393 * reliable way to determine device size from userland.
1398 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1400 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1401 "device is less than the minimum "
1404 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1408 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1409 "pool must be upgraded to add these vdevs"));
1410 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1414 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1415 "cache device must be a disk or disk slice"));
1416 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1420 (void) zpool_standard_error(hdl
, errno
, msg
);
1428 zcmd_free_nvlists(&zc
);
1434 * Exports the pool from the system. The caller must ensure that there are no
1435 * mounted datasets in the pool.
1438 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1439 const char *log_str
)
1441 zfs_cmd_t zc
= {"\0"};
1444 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1445 "cannot export '%s'"), zhp
->zpool_name
);
1447 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1448 zc
.zc_cookie
= force
;
1449 zc
.zc_guid
= hardforce
;
1450 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1452 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1455 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1456 "use '-f' to override the following errors:\n"
1457 "'%s' has an active shared spare which could be"
1458 " used by other pools once '%s' is exported."),
1459 zhp
->zpool_name
, zhp
->zpool_name
);
1460 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1463 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1472 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1474 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1478 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1480 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1484 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1487 nvlist_t
*nv
= NULL
;
1493 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1496 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1497 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1501 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1503 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1505 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1506 strftime(timestr
, 128, "%c", &t
) != 0) {
1508 (void) printf(dgettext(TEXT_DOMAIN
,
1509 "Would be able to return %s "
1510 "to its state as of %s.\n"),
1513 (void) printf(dgettext(TEXT_DOMAIN
,
1514 "Pool %s returned to its state as of %s.\n"),
1518 (void) printf(dgettext(TEXT_DOMAIN
,
1519 "%s approximately %lld "),
1520 dryrun
? "Would discard" : "Discarded",
1521 ((longlong_t
)loss
+ 30) / 60);
1522 (void) printf(dgettext(TEXT_DOMAIN
,
1523 "minutes of transactions.\n"));
1524 } else if (loss
> 0) {
1525 (void) printf(dgettext(TEXT_DOMAIN
,
1526 "%s approximately %lld "),
1527 dryrun
? "Would discard" : "Discarded",
1529 (void) printf(dgettext(TEXT_DOMAIN
,
1530 "seconds of transactions.\n"));
1536 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1539 nvlist_t
*nv
= NULL
;
1541 uint64_t edata
= UINT64_MAX
;
1546 if (!hdl
->libzfs_printerr
)
1550 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1552 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1554 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1555 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1556 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1557 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1560 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1561 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1564 (void) printf(dgettext(TEXT_DOMAIN
,
1565 "Recovery is possible, but will result in some data loss.\n"));
1567 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1568 strftime(timestr
, 128, "%c", &t
) != 0) {
1569 (void) printf(dgettext(TEXT_DOMAIN
,
1570 "\tReturning the pool to its state as of %s\n"
1571 "\tshould correct the problem. "),
1574 (void) printf(dgettext(TEXT_DOMAIN
,
1575 "\tReverting the pool to an earlier state "
1576 "should correct the problem.\n\t"));
1580 (void) printf(dgettext(TEXT_DOMAIN
,
1581 "Approximately %lld minutes of data\n"
1582 "\tmust be discarded, irreversibly. "),
1583 ((longlong_t
)loss
+ 30) / 60);
1584 } else if (loss
> 0) {
1585 (void) printf(dgettext(TEXT_DOMAIN
,
1586 "Approximately %lld seconds of data\n"
1587 "\tmust be discarded, irreversibly. "),
1590 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1592 (void) printf(dgettext(TEXT_DOMAIN
,
1593 "After rewind, at least\n"
1594 "\tone persistent user-data error will remain. "));
1596 (void) printf(dgettext(TEXT_DOMAIN
,
1597 "After rewind, several\n"
1598 "\tpersistent user-data errors will remain. "));
1601 (void) printf(dgettext(TEXT_DOMAIN
,
1602 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1603 reason
>= 0 ? "clear" : "import", name
);
1605 (void) printf(dgettext(TEXT_DOMAIN
,
1606 "A scrub of the pool\n"
1607 "\tis strongly recommended after recovery.\n"));
1611 (void) printf(dgettext(TEXT_DOMAIN
,
1612 "Destroy and re-create the pool from\n\ta backup source.\n"));
1616 * zpool_import() is a contracted interface. Should be kept the same
1619 * Applications should use zpool_import_props() to import a pool with
1620 * new properties value to be set.
1623 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1626 nvlist_t
*props
= NULL
;
1629 if (altroot
!= NULL
) {
1630 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1631 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1632 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1636 if (nvlist_add_string(props
,
1637 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1638 nvlist_add_string(props
,
1639 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1641 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1642 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1647 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1655 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1661 uint64_t is_log
= 0;
1663 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1667 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1668 is_log
? " [log]" : "");
1670 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1671 &child
, &children
) != 0)
1674 for (c
= 0; c
< children
; c
++) {
1675 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], B_TRUE
);
1676 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1682 zpool_print_unsup_feat(nvlist_t
*config
)
1684 nvlist_t
*nvinfo
, *unsup_feat
;
1687 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1689 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1692 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1693 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1696 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1697 verify(nvpair_value_string(nvp
, &desc
) == 0);
1699 if (strlen(desc
) > 0)
1700 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1702 (void) printf("\t%s\n", nvpair_name(nvp
));
1707 * Import the given pool using the known configuration and a list of
1708 * properties to be set. The configuration should have come from
1709 * zpool_find_import(). The 'newname' parameters control whether the pool
1710 * is imported with a different name.
1713 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1714 nvlist_t
*props
, int flags
)
1716 zfs_cmd_t zc
= {"\0"};
1717 zpool_rewind_policy_t policy
;
1718 nvlist_t
*nv
= NULL
;
1719 nvlist_t
*nvinfo
= NULL
;
1720 nvlist_t
*missing
= NULL
;
1727 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1730 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1731 "cannot import pool '%s'"), origname
);
1733 if (newname
!= NULL
) {
1734 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1735 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1736 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1738 thename
= (char *)newname
;
1745 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1747 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1750 if ((props
= zpool_valid_proplist(hdl
, origname
,
1751 props
, version
, flags
, errbuf
)) == NULL
) {
1753 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1759 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1761 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1764 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1768 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1773 zc
.zc_cookie
= flags
;
1774 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1776 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1777 zcmd_free_nvlists(&zc
);
1784 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1785 zpool_get_rewind_policy(config
, &policy
);
1791 * Dry-run failed, but we print out what success
1792 * looks like if we found a best txg
1794 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1795 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1801 if (newname
== NULL
)
1802 (void) snprintf(desc
, sizeof (desc
),
1803 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1806 (void) snprintf(desc
, sizeof (desc
),
1807 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1812 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1813 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1814 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1815 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1816 "pool uses the following feature(s) not "
1817 "supported by this system:\n"));
1818 zpool_print_unsup_feat(nv
);
1819 if (nvlist_exists(nvinfo
,
1820 ZPOOL_CONFIG_CAN_RDONLY
)) {
1821 (void) printf(dgettext(TEXT_DOMAIN
,
1822 "All unsupported features are only "
1823 "required for writing to the pool."
1824 "\nThe pool can be imported using "
1825 "'-o readonly=on'.\n"));
1829 * Unsupported version.
1831 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1835 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1839 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1840 "one or more devices is read only"));
1841 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1845 if (nv
&& nvlist_lookup_nvlist(nv
,
1846 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1847 nvlist_lookup_nvlist(nvinfo
,
1848 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1849 (void) printf(dgettext(TEXT_DOMAIN
,
1850 "The devices below are missing, use "
1851 "'-m' to import the pool anyway:\n"));
1852 print_vdev_tree(hdl
, NULL
, missing
, 2);
1853 (void) printf("\n");
1855 (void) zpool_standard_error(hdl
, error
, desc
);
1859 (void) zpool_standard_error(hdl
, error
, desc
);
1863 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1864 "one or more devices are already in use\n"));
1865 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1869 (void) zpool_standard_error(hdl
, error
, desc
);
1870 zpool_explain_recover(hdl
,
1871 newname
? origname
: thename
, -error
, nv
);
1878 zpool_handle_t
*zhp
;
1881 * This should never fail, but play it safe anyway.
1883 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1885 else if (zhp
!= NULL
)
1887 if (policy
.zrp_request
&
1888 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1889 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1890 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1896 zcmd_free_nvlists(&zc
);
1906 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1908 zfs_cmd_t zc
= {"\0"};
1910 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1912 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1913 zc
.zc_cookie
= func
;
1915 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1916 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1919 if (func
== POOL_SCAN_SCRUB
) {
1920 (void) snprintf(msg
, sizeof (msg
),
1921 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1922 } else if (func
== POOL_SCAN_NONE
) {
1923 (void) snprintf(msg
, sizeof (msg
),
1924 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1927 assert(!"unexpected result");
1930 if (errno
== EBUSY
) {
1932 pool_scan_stat_t
*ps
= NULL
;
1935 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1936 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1937 (void) nvlist_lookup_uint64_array(nvroot
,
1938 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1939 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1940 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1942 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1943 } else if (errno
== ENOENT
) {
1944 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1946 return (zpool_standard_error(hdl
, errno
, msg
));
1951 * Find a vdev that matches the search criteria specified. We use the
1952 * the nvpair name to determine how we should look for the device.
1953 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1954 * spare; but FALSE if its an INUSE spare.
1957 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1958 boolean_t
*l2cache
, boolean_t
*log
)
1965 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1967 /* Nothing to look for */
1968 if (search
== NULL
|| pair
== NULL
)
1971 /* Obtain the key we will use to search */
1972 srchkey
= nvpair_name(pair
);
1974 switch (nvpair_type(pair
)) {
1975 case DATA_TYPE_UINT64
:
1976 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1977 uint64_t srchval
, theguid
;
1979 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1980 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1982 if (theguid
== srchval
)
1987 case DATA_TYPE_STRING
: {
1988 char *srchval
, *val
;
1990 verify(nvpair_value_string(pair
, &srchval
) == 0);
1991 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1995 * Search for the requested value. Special cases:
1997 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1998 * "-part1", or "p1". The suffix is hidden from the user,
1999 * but included in the string, so this matches around it.
2000 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2001 * is used to check all possible expanded paths.
2002 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2004 * Otherwise, all other searches are simple string compares.
2006 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2007 uint64_t wholedisk
= 0;
2009 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2011 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2014 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2015 char *type
, *idx
, *end
, *p
;
2016 uint64_t id
, vdev_id
;
2019 * Determine our vdev type, keeping in mind
2020 * that the srchval is composed of a type and
2021 * vdev id pair (i.e. mirror-4).
2023 if ((type
= strdup(srchval
)) == NULL
)
2026 if ((p
= strrchr(type
, '-')) == NULL
) {
2034 * If the types don't match then keep looking.
2036 if (strncmp(val
, type
, strlen(val
)) != 0) {
2041 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2042 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2043 strncmp(type
, VDEV_TYPE_MIRROR
,
2044 strlen(VDEV_TYPE_MIRROR
)) == 0);
2045 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2049 vdev_id
= strtoull(idx
, &end
, 10);
2056 * Now verify that we have the correct vdev id.
2065 if (strcmp(srchval
, val
) == 0)
2074 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2075 &child
, &children
) != 0)
2078 for (c
= 0; c
< children
; c
++) {
2079 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2080 avail_spare
, l2cache
, NULL
)) != NULL
) {
2082 * The 'is_log' value is only set for the toplevel
2083 * vdev, not the leaf vdevs. So we always lookup the
2084 * log device from the root of the vdev tree (where
2085 * 'log' is non-NULL).
2088 nvlist_lookup_uint64(child
[c
],
2089 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2097 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2098 &child
, &children
) == 0) {
2099 for (c
= 0; c
< children
; c
++) {
2100 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2101 avail_spare
, l2cache
, NULL
)) != NULL
) {
2102 *avail_spare
= B_TRUE
;
2108 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2109 &child
, &children
) == 0) {
2110 for (c
= 0; c
< children
; c
++) {
2111 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2112 avail_spare
, l2cache
, NULL
)) != NULL
) {
2123 * Given a physical path (minus the "/devices" prefix), find the
2127 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2128 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2130 nvlist_t
*search
, *nvroot
, *ret
;
2132 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2133 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2135 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2138 *avail_spare
= B_FALSE
;
2142 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2143 nvlist_free(search
);
2149 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2152 zpool_vdev_is_interior(const char *name
)
2154 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2155 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2161 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2162 boolean_t
*l2cache
, boolean_t
*log
)
2165 nvlist_t
*nvroot
, *search
, *ret
;
2168 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2170 guid
= strtoull(path
, &end
, 10);
2171 if (guid
!= 0 && *end
== '\0') {
2172 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2173 } else if (zpool_vdev_is_interior(path
)) {
2174 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2176 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2179 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2182 *avail_spare
= B_FALSE
;
2186 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2187 nvlist_free(search
);
2193 vdev_online(nvlist_t
*nv
)
2197 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2198 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2199 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2206 * Helper function for zpool_get_physpaths().
2209 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2210 size_t *bytes_written
)
2212 size_t bytes_left
, pos
, rsz
;
2216 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2218 return (EZFS_NODEVICE
);
2220 pos
= *bytes_written
;
2221 bytes_left
= physpath_size
- pos
;
2222 format
= (pos
== 0) ? "%s" : " %s";
2224 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2225 *bytes_written
+= rsz
;
2227 if (rsz
>= bytes_left
) {
2228 /* if physpath was not copied properly, clear it */
2229 if (bytes_left
!= 0) {
2232 return (EZFS_NOSPC
);
2238 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2239 size_t *rsz
, boolean_t is_spare
)
2244 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2245 return (EZFS_INVALCONFIG
);
2247 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2249 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2250 * For a spare vdev, we only want to boot from the active
2255 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2258 return (EZFS_INVALCONFIG
);
2261 if (vdev_online(nv
)) {
2262 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2263 phypath_size
, rsz
)) != 0)
2266 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2267 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2268 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2273 if (nvlist_lookup_nvlist_array(nv
,
2274 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2275 return (EZFS_INVALCONFIG
);
2277 for (i
= 0; i
< count
; i
++) {
2278 ret
= vdev_get_physpaths(child
[i
], physpath
,
2279 phypath_size
, rsz
, is_spare
);
2280 if (ret
== EZFS_NOSPC
)
2285 return (EZFS_POOL_INVALARG
);
2289 * Get phys_path for a root pool config.
2290 * Return 0 on success; non-zero on failure.
2293 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2296 nvlist_t
*vdev_root
;
2303 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2305 return (EZFS_INVALCONFIG
);
2307 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2308 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2309 &child
, &count
) != 0)
2310 return (EZFS_INVALCONFIG
);
2312 #if defined(__sun__) || defined(__sun)
2314 * root pool can not have EFI labeled disks and can only have
2315 * a single top-level vdev.
2317 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2318 pool_uses_efi(vdev_root
))
2319 return (EZFS_POOL_INVALARG
);
2322 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2325 /* No online devices */
2327 return (EZFS_NODEVICE
);
2333 * Get phys_path for a root pool
2334 * Return 0 on success; non-zero on failure.
2337 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2339 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2344 * If the device has being dynamically expanded then we need to relabel
2345 * the disk to use the new unallocated space.
2348 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2352 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2353 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2354 "relabel '%s': unable to open device: %d"), path
, errno
);
2355 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2359 * It's possible that we might encounter an error if the device
2360 * does not have any unallocated space left. If so, we simply
2361 * ignore that error and continue on.
2363 * Also, we don't call efi_rescan() - that would just return EBUSY.
2364 * The module will do it for us in vdev_disk_open().
2366 error
= efi_use_whole_disk(fd
);
2368 if (error
&& error
!= VT_ENOSPC
) {
2369 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2370 "relabel '%s': unable to read disk capacity"), path
);
2371 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2377 * Bring the specified vdev online. The 'flags' parameter is a set of the
2378 * ZFS_ONLINE_* flags.
2381 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2382 vdev_state_t
*newstate
)
2384 zfs_cmd_t zc
= {"\0"};
2387 boolean_t avail_spare
, l2cache
, islog
;
2388 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2391 if (flags
& ZFS_ONLINE_EXPAND
) {
2392 (void) snprintf(msg
, sizeof (msg
),
2393 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2395 (void) snprintf(msg
, sizeof (msg
),
2396 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2399 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2400 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2402 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2404 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2407 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2409 if (flags
& ZFS_ONLINE_EXPAND
||
2410 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2411 uint64_t wholedisk
= 0;
2413 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2417 * XXX - L2ARC 1.0 devices can't support expansion.
2420 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2421 "cannot expand cache devices"));
2422 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2426 const char *fullpath
= path
;
2427 char buf
[MAXPATHLEN
];
2429 if (path
[0] != '/') {
2430 error
= zfs_resolve_shortname(path
, buf
,
2433 return (zfs_error(hdl
, EZFS_NODEVICE
,
2439 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2445 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2448 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2449 if (errno
== EINVAL
) {
2450 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2451 "from this pool into a new one. Use '%s' "
2452 "instead"), "zpool detach");
2453 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2455 return (zpool_standard_error(hdl
, errno
, msg
));
2458 *newstate
= zc
.zc_cookie
;
2463 * Take the specified vdev offline
2466 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2468 zfs_cmd_t zc
= {"\0"};
2471 boolean_t avail_spare
, l2cache
;
2472 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2474 (void) snprintf(msg
, sizeof (msg
),
2475 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2477 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2478 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2480 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2482 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2485 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2487 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2488 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2490 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2497 * There are no other replicas of this device.
2499 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2503 * The log device has unplayed logs
2505 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2508 return (zpool_standard_error(hdl
, errno
, msg
));
2513 * Mark the given vdev faulted.
2516 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2518 zfs_cmd_t zc
= {"\0"};
2520 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2522 (void) snprintf(msg
, sizeof (msg
),
2523 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2525 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2527 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2530 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2537 * There are no other replicas of this device.
2539 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2542 return (zpool_standard_error(hdl
, errno
, msg
));
2548 * Mark the given vdev degraded.
2551 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2553 zfs_cmd_t zc
= {"\0"};
2555 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2557 (void) snprintf(msg
, sizeof (msg
),
2558 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2560 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2562 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2565 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2568 return (zpool_standard_error(hdl
, errno
, msg
));
2572 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2576 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2582 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2584 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2587 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2588 children
== 2 && child
[which
] == tgt
)
2591 for (c
= 0; c
< children
; c
++)
2592 if (is_replacing_spare(child
[c
], tgt
, which
))
2600 * Attach new_disk (fully described by nvroot) to old_disk.
2601 * If 'replacing' is specified, the new disk will replace the old one.
2604 zpool_vdev_attach(zpool_handle_t
*zhp
,
2605 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2607 zfs_cmd_t zc
= {"\0"};
2611 boolean_t avail_spare
, l2cache
, islog
;
2616 nvlist_t
*config_root
;
2617 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2618 boolean_t rootpool
= zpool_is_bootable(zhp
);
2621 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2622 "cannot replace %s with %s"), old_disk
, new_disk
);
2624 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2625 "cannot attach %s to %s"), new_disk
, old_disk
);
2627 #if defined(__sun__) || defined(__sun)
2629 * If this is a root pool, make sure that we're not attaching an
2630 * EFI labeled device.
2632 if (rootpool
&& pool_uses_efi(nvroot
)) {
2633 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2634 "EFI labeled devices are not supported on root pools."));
2635 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2639 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2640 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2642 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2645 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2648 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2650 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2651 zc
.zc_cookie
= replacing
;
2653 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2654 &child
, &children
) != 0 || children
!= 1) {
2655 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2656 "new device must be a single disk"));
2657 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2660 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2661 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2663 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], B_FALSE
)) == NULL
)
2667 * If the target is a hot spare that has been swapped in, we can only
2668 * replace it with another hot spare.
2671 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2672 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2673 NULL
) == NULL
|| !avail_spare
) &&
2674 is_replacing_spare(config_root
, tgt
, 1)) {
2675 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2676 "can only be replaced by another hot spare"));
2678 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2683 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2686 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2688 zcmd_free_nvlists(&zc
);
2693 * XXX need a better way to prevent user from
2694 * booting up a half-baked vdev.
2696 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2697 "sure to wait until resilver is done "
2698 "before rebooting.\n"));
2706 * Can't attach to or replace this type of vdev.
2709 uint64_t version
= zpool_get_prop_int(zhp
,
2710 ZPOOL_PROP_VERSION
, NULL
);
2713 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2714 "cannot replace a log with a spare"));
2715 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2716 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2717 "already in replacing/spare config; wait "
2718 "for completion or use 'zpool detach'"));
2720 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2721 "cannot replace a replacing device"));
2723 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2724 "can only attach to mirrors and top-level "
2727 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2732 * The new device must be a single disk.
2734 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2735 "new device must be a single disk"));
2736 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2740 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2742 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2747 * The new device is too small.
2749 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2750 "device is too small"));
2751 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2756 * The new device has a different alignment requirement.
2758 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2759 "devices have different sector alignment"));
2760 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2765 * The resulting top-level vdev spec won't fit in the label.
2767 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2771 (void) zpool_standard_error(hdl
, errno
, msg
);
2778 * Detach the specified device.
2781 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2783 zfs_cmd_t zc
= {"\0"};
2786 boolean_t avail_spare
, l2cache
;
2787 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2789 (void) snprintf(msg
, sizeof (msg
),
2790 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2792 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2793 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2795 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2798 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2801 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2803 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2805 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2812 * Can't detach from this type of vdev.
2814 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2815 "applicable to mirror and replacing vdevs"));
2816 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2821 * There are no other replicas of this device.
2823 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2827 (void) zpool_standard_error(hdl
, errno
, msg
);
2834 * Find a mirror vdev in the source nvlist.
2836 * The mchild array contains a list of disks in one of the top-level mirrors
2837 * of the source pool. The schild array contains a list of disks that the
2838 * user specified on the command line. We loop over the mchild array to
2839 * see if any entry in the schild array matches.
2841 * If a disk in the mchild array is found in the schild array, we return
2842 * the index of that entry. Otherwise we return -1.
2845 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2846 nvlist_t
**schild
, uint_t schildren
)
2850 for (mc
= 0; mc
< mchildren
; mc
++) {
2852 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2853 mchild
[mc
], B_FALSE
);
2855 for (sc
= 0; sc
< schildren
; sc
++) {
2856 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2857 schild
[sc
], B_FALSE
);
2858 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2874 * Split a mirror pool. If newroot points to null, then a new nvlist
2875 * is generated and it is the responsibility of the caller to free it.
2878 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2879 nvlist_t
*props
, splitflags_t flags
)
2881 zfs_cmd_t zc
= {"\0"};
2883 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2884 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2885 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2886 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2888 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2891 (void) snprintf(msg
, sizeof (msg
),
2892 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2894 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2895 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2897 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2898 (void) fprintf(stderr
, gettext("Internal error: unable to "
2899 "retrieve pool configuration\n"));
2903 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2905 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2908 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2909 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2910 props
, vers
, flags
, msg
)) == NULL
)
2914 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2916 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2917 "Source pool is missing vdev tree"));
2919 nvlist_free(zc_props
);
2923 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2926 if (*newroot
== NULL
||
2927 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2928 &newchild
, &newchildren
) != 0)
2931 for (c
= 0; c
< children
; c
++) {
2932 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2934 nvlist_t
**mchild
, *vdev
;
2939 * Unlike cache & spares, slogs are stored in the
2940 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2942 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2944 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2946 if (is_log
|| is_hole
) {
2948 * Create a hole vdev and put it in the config.
2950 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2952 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2953 VDEV_TYPE_HOLE
) != 0)
2955 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2960 varray
[vcount
++] = vdev
;
2964 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2966 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2967 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2968 "Source pool must be composed only of mirrors\n"));
2969 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2973 verify(nvlist_lookup_nvlist_array(child
[c
],
2974 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2976 /* find or add an entry for this top-level vdev */
2977 if (newchildren
> 0 &&
2978 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2979 newchild
, newchildren
)) >= 0) {
2980 /* We found a disk that the user specified. */
2981 vdev
= mchild
[entry
];
2984 /* User didn't specify a disk for this vdev. */
2985 vdev
= mchild
[mchildren
- 1];
2988 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
2992 /* did we find every disk the user specified? */
2993 if (found
!= newchildren
) {
2994 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
2995 "include at most one disk from each mirror"));
2996 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3000 /* Prepare the nvlist for populating. */
3001 if (*newroot
== NULL
) {
3002 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3005 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3006 VDEV_TYPE_ROOT
) != 0)
3009 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3012 /* Add all the children we found */
3013 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3014 lastlog
== 0 ? vcount
: lastlog
) != 0)
3018 * If we're just doing a dry run, exit now with success.
3021 memory_err
= B_FALSE
;
3026 /* now build up the config list & call the ioctl */
3027 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3030 if (nvlist_add_nvlist(newconfig
,
3031 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3032 nvlist_add_string(newconfig
,
3033 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3034 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3038 * The new pool is automatically part of the namespace unless we
3039 * explicitly export it.
3042 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3043 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3044 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3045 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3047 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3050 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3051 retval
= zpool_standard_error(hdl
, errno
, msg
);
3056 memory_err
= B_FALSE
;
3059 if (varray
!= NULL
) {
3062 for (v
= 0; v
< vcount
; v
++)
3063 nvlist_free(varray
[v
]);
3066 zcmd_free_nvlists(&zc
);
3068 nvlist_free(zc_props
);
3070 nvlist_free(newconfig
);
3072 nvlist_free(*newroot
);
3080 return (no_memory(hdl
));
3086 * Remove the given device. Currently, this is supported only for hot spares
3087 * and level 2 cache devices.
3090 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3092 zfs_cmd_t zc
= {"\0"};
3095 boolean_t avail_spare
, l2cache
, islog
;
3096 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3099 (void) snprintf(msg
, sizeof (msg
),
3100 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3102 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3103 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3105 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3107 * XXX - this should just go away.
3109 if (!avail_spare
&& !l2cache
&& !islog
) {
3110 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3111 "only inactive hot spares, cache, top-level, "
3112 "or log devices can be removed"));
3113 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3116 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3117 if (islog
&& version
< SPA_VERSION_HOLES
) {
3118 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3119 "pool must be upgrade to support log removal"));
3120 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3123 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3125 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3128 return (zpool_standard_error(hdl
, errno
, msg
));
3132 * Clear the errors for the pool, or the particular device if specified.
3135 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3137 zfs_cmd_t zc
= {"\0"};
3140 zpool_rewind_policy_t policy
;
3141 boolean_t avail_spare
, l2cache
;
3142 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3143 nvlist_t
*nvi
= NULL
;
3147 (void) snprintf(msg
, sizeof (msg
),
3148 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3151 (void) snprintf(msg
, sizeof (msg
),
3152 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3155 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3157 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3158 &l2cache
, NULL
)) == 0)
3159 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3162 * Don't allow error clearing for hot spares. Do allow
3163 * error clearing for l2cache devices.
3166 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3168 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3172 zpool_get_rewind_policy(rewindnvl
, &policy
);
3173 zc
.zc_cookie
= policy
.zrp_request
;
3175 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3178 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3181 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3183 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3184 zcmd_free_nvlists(&zc
);
3189 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3190 errno
!= EPERM
&& errno
!= EACCES
)) {
3191 if (policy
.zrp_request
&
3192 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3193 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3194 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3195 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3199 zcmd_free_nvlists(&zc
);
3203 zcmd_free_nvlists(&zc
);
3204 return (zpool_standard_error(hdl
, errno
, msg
));
3208 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3211 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3213 zfs_cmd_t zc
= {"\0"};
3215 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3217 (void) snprintf(msg
, sizeof (msg
),
3218 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3219 (u_longlong_t
)guid
);
3221 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3223 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3225 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3228 return (zpool_standard_error(hdl
, errno
, msg
));
3232 * Change the GUID for a pool.
3235 zpool_reguid(zpool_handle_t
*zhp
)
3238 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3239 zfs_cmd_t zc
= {"\0"};
3241 (void) snprintf(msg
, sizeof (msg
),
3242 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3244 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3245 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3248 return (zpool_standard_error(hdl
, errno
, msg
));
3255 zpool_reopen(zpool_handle_t
*zhp
)
3257 zfs_cmd_t zc
= {"\0"};
3259 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3261 (void) snprintf(msg
, sizeof (msg
),
3262 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3265 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3266 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3268 return (zpool_standard_error(hdl
, errno
, msg
));
3272 * Convert from a devid string to a path.
3275 devid_to_path(char *devid_str
)
3280 devid_nmlist_t
*list
= NULL
;
3283 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3286 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3288 devid_str_free(minor
);
3294 if ((path
= strdup(list
[0].devname
)) == NULL
)
3297 devid_free_nmlist(list
);
3303 * Convert from a path to a devid string.
3306 path_to_devid(const char *path
)
3312 if ((fd
= open(path
, O_RDONLY
)) < 0)
3317 if (devid_get(fd
, &devid
) == 0) {
3318 if (devid_get_minor_name(fd
, &minor
) == 0)
3319 ret
= devid_str_encode(devid
, minor
);
3321 devid_str_free(minor
);
3330 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3331 * ignore any failure here, since a common case is for an unprivileged user to
3332 * type 'zpool status', and we'll display the correct information anyway.
3335 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3337 zfs_cmd_t zc
= {"\0"};
3339 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3340 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3341 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3344 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3348 * Remove partition suffix from a vdev path. Partition suffixes may take three
3349 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3350 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3351 * third case only occurs when preceded by a string matching the regular
3352 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3355 strip_partition(libzfs_handle_t
*hdl
, char *path
)
3357 char *tmp
= zfs_strdup(hdl
, path
);
3358 char *part
= NULL
, *d
= NULL
;
3360 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3362 } else if ((part
= strrchr(tmp
, 'p')) &&
3363 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3365 } else if ((tmp
[0] == 'h' || tmp
[0] == 's') && tmp
[1] == 'd') {
3366 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
);
3368 if (part
&& d
&& *d
!= '\0') {
3369 for (; isdigit(*d
); d
++);
3376 #define PATH_BUF_LEN 64
3379 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3380 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3381 * We also check if this is a whole disk, in which case we strip off the
3382 * trailing 's0' slice name.
3384 * This routine is also responsible for identifying when disks have been
3385 * reconfigured in a new location. The kernel will have opened the device by
3386 * devid, but the path will still refer to the old location. To catch this, we
3387 * first do a path -> devid translation (which is fast for the common case). If
3388 * the devid matches, we're done. If not, we do a reverse devid -> path
3389 * translation and issue the appropriate ioctl() to update the path of the vdev.
3390 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3394 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3397 char *path
, *devid
, *type
;
3399 char buf
[PATH_BUF_LEN
];
3400 char tmpbuf
[PATH_BUF_LEN
];
3404 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
3406 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3408 (void) snprintf(buf
, sizeof (buf
), "%llu",
3409 (u_longlong_t
)value
);
3411 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3413 * If the device is dead (faulted, offline, etc) then don't
3414 * bother opening it. Otherwise we may be forcing the user to
3415 * open a misbehaving device, which can have undesirable
3418 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3419 (uint64_t **)&vs
, &vsc
) != 0 ||
3420 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3422 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3424 * Determine if the current path is correct.
3426 char *newdevid
= path_to_devid(path
);
3428 if (newdevid
== NULL
||
3429 strcmp(devid
, newdevid
) != 0) {
3432 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3434 * Update the path appropriately.
3436 set_path(zhp
, nv
, newpath
);
3437 if (nvlist_add_string(nv
,
3438 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3439 verify(nvlist_lookup_string(nv
,
3447 devid_str_free(newdevid
);
3451 * For a block device only use the name.
3453 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3454 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
3455 path
= strrchr(path
, '/');
3460 * Remove the partition from the path it this is a whole disk.
3462 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
3463 &value
) == 0 && value
) {
3464 return (strip_partition(hdl
, path
));
3467 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3470 * If it's a raidz device, we need to stick in the parity level.
3472 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3474 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3476 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3477 (u_longlong_t
)value
);
3482 * We identify each top-level vdev by using a <type-id>
3483 * naming convention.
3488 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3490 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3491 path
, (u_longlong_t
)id
);
3496 return (zfs_strdup(hdl
, path
));
3500 zbookmark_compare(const void *a
, const void *b
)
3502 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
3506 * Retrieve the persistent error log, uniquify the members, and return to the
3510 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3512 zfs_cmd_t zc
= {"\0"};
3514 zbookmark_t
*zb
= NULL
;
3518 * Retrieve the raw error list from the kernel. If the number of errors
3519 * has increased, allocate more space and continue until we get the
3522 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3526 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3527 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3529 zc
.zc_nvlist_dst_size
= count
;
3530 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3532 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3534 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3535 if (errno
== ENOMEM
) {
3536 count
= zc
.zc_nvlist_dst_size
;
3537 if ((zc
.zc_nvlist_dst
= (uintptr_t)
3538 zfs_alloc(zhp
->zpool_hdl
, count
*
3539 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3550 * Sort the resulting bookmarks. This is a little confusing due to the
3551 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3552 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3553 * _not_ copied as part of the process. So we point the start of our
3554 * array appropriate and decrement the total number of elements.
3556 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3557 zc
.zc_nvlist_dst_size
;
3558 count
-= zc
.zc_nvlist_dst_size
;
3560 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
3562 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3565 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3567 for (i
= 0; i
< count
; i
++) {
3570 /* ignoring zb_blkid and zb_level for now */
3571 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3572 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3575 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3577 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3578 zb
[i
].zb_objset
) != 0) {
3582 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3583 zb
[i
].zb_object
) != 0) {
3587 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3594 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3598 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3599 return (no_memory(zhp
->zpool_hdl
));
3603 * Upgrade a ZFS pool to the latest on-disk version.
3606 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3608 zfs_cmd_t zc
= {"\0"};
3609 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3611 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3612 zc
.zc_cookie
= new_version
;
3614 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3615 return (zpool_standard_error_fmt(hdl
, errno
,
3616 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3622 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3626 (void) strlcpy(string
, basename(argv
[0]), len
);
3627 for (i
= 1; i
< argc
; i
++) {
3628 (void) strlcat(string
, " ", len
);
3629 (void) strlcat(string
, argv
[i
], len
);
3634 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3636 zfs_cmd_t zc
= {"\0"};
3640 args
= fnvlist_alloc();
3641 fnvlist_add_string(args
, "message", message
);
3642 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3644 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3646 zcmd_free_nvlists(&zc
);
3651 * Perform ioctl to get some command history of a pool.
3653 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3654 * logical offset of the history buffer to start reading from.
3656 * Upon return, 'off' is the next logical offset to read from and
3657 * 'len' is the actual amount of bytes read into 'buf'.
3660 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3662 zfs_cmd_t zc
= {"\0"};
3663 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3665 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3667 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3668 zc
.zc_history_len
= *len
;
3669 zc
.zc_history_offset
= *off
;
3671 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3674 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3675 dgettext(TEXT_DOMAIN
,
3676 "cannot show history for pool '%s'"),
3679 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3680 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3681 "'%s'"), zhp
->zpool_name
));
3683 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3684 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3685 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3687 return (zpool_standard_error_fmt(hdl
, errno
,
3688 dgettext(TEXT_DOMAIN
,
3689 "cannot get history for '%s'"), zhp
->zpool_name
));
3693 *len
= zc
.zc_history_len
;
3694 *off
= zc
.zc_history_offset
;
3700 * Process the buffer of nvlists, unpacking and storing each nvlist record
3701 * into 'records'. 'leftover' is set to the number of bytes that weren't
3702 * processed as there wasn't a complete record.
3705 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3706 nvlist_t
***records
, uint_t
*numrecords
)
3712 while (bytes_read
> sizeof (reclen
)) {
3714 /* get length of packed record (stored as little endian) */
3715 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3716 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3718 if (bytes_read
< sizeof (reclen
) + reclen
)
3722 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3724 bytes_read
-= sizeof (reclen
) + reclen
;
3725 buf
+= sizeof (reclen
) + reclen
;
3727 /* add record to nvlist array */
3729 if (ISP2(*numrecords
+ 1)) {
3730 *records
= realloc(*records
,
3731 *numrecords
* 2 * sizeof (nvlist_t
*));
3733 (*records
)[*numrecords
- 1] = nv
;
3736 *leftover
= bytes_read
;
3740 #define HIS_BUF_LEN (128*1024)
3743 * Retrieve the command history of a pool.
3746 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3748 char buf
[HIS_BUF_LEN
];
3750 nvlist_t
**records
= NULL
;
3751 uint_t numrecords
= 0;
3755 uint64_t bytes_read
= sizeof (buf
);
3758 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3761 /* if nothing else was read in, we're at EOF, just return */
3765 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3766 &leftover
, &records
, &numrecords
)) != 0)
3774 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3775 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3776 records
, numrecords
) == 0);
3778 for (i
= 0; i
< numrecords
; i
++)
3779 nvlist_free(records
[i
]);
3786 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3787 * If there is a new event available 'nvp' will contain a newly allocated
3788 * nvlist and 'dropped' will be set to the number of missed events since
3789 * the last call to this function. When 'nvp' is set to NULL it indicates
3790 * no new events are available. In either case the function returns 0 and
3791 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3792 * function will return a non-zero value. When the function is called in
3793 * blocking mode it will not return until a new event is available.
3796 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3797 int *dropped
, int block
, int zevent_fd
)
3799 zfs_cmd_t zc
= {"\0"};
3804 zc
.zc_cleanup_fd
= zevent_fd
;
3807 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3809 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3813 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3816 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3817 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3820 /* Blocking error case should not occur */
3822 error
= zpool_standard_error_fmt(hdl
, errno
,
3823 dgettext(TEXT_DOMAIN
, "cannot get event"));
3827 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3828 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3829 dgettext(TEXT_DOMAIN
, "cannot get event"));
3835 error
= zpool_standard_error_fmt(hdl
, errno
,
3836 dgettext(TEXT_DOMAIN
, "cannot get event"));
3841 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3845 *dropped
= (int)zc
.zc_cookie
;
3847 zcmd_free_nvlists(&zc
);
3856 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3858 zfs_cmd_t zc
= {"\0"};
3861 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3862 "cannot clear events"));
3864 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3865 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3868 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3874 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3875 * the passed zevent_fd file handle. On success zero is returned,
3876 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3879 zpool_events_seek(libzfs_handle_t
*hdl
, uint64_t eid
, int zevent_fd
)
3881 zfs_cmd_t zc
= {"\0"};
3885 zc
.zc_cleanup_fd
= zevent_fd
;
3887 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_SEEK
, &zc
) != 0) {
3890 error
= zfs_error_fmt(hdl
, EZFS_NOENT
,
3891 dgettext(TEXT_DOMAIN
, "cannot get event"));
3895 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3896 dgettext(TEXT_DOMAIN
, "cannot get event"));
3900 error
= zpool_standard_error_fmt(hdl
, errno
,
3901 dgettext(TEXT_DOMAIN
, "cannot get event"));
3910 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3911 char *pathname
, size_t len
)
3913 zfs_cmd_t zc
= {"\0"};
3914 boolean_t mounted
= B_FALSE
;
3915 char *mntpnt
= NULL
;
3916 char dsname
[MAXNAMELEN
];
3919 /* special case for the MOS */
3920 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>",
3925 /* get the dataset's name */
3926 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3928 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
3929 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
3930 /* just write out a path of two object numbers */
3931 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
3932 (longlong_t
)dsobj
, (longlong_t
)obj
);
3935 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
3937 /* find out if the dataset is mounted */
3938 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
3940 /* get the corrupted object's path */
3941 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
3943 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
3946 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
3949 (void) snprintf(pathname
, len
, "%s:%s",
3950 dsname
, zc
.zc_value
);
3953 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
,
3960 * Read the EFI label from the config, if a label does not exist then
3961 * pass back the error to the caller. If the caller has passed a non-NULL
3962 * diskaddr argument then we set it to the starting address of the EFI
3966 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
3970 char diskname
[MAXPATHLEN
];
3973 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
3976 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
3977 strrchr(path
, '/'));
3978 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
3979 struct dk_gpt
*vtoc
;
3981 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
3983 *sb
= vtoc
->efi_parts
[0].p_start
;
3992 * determine where a partition starts on a disk in the current
3996 find_start_block(nvlist_t
*config
)
4000 diskaddr_t sb
= MAXOFFSET_T
;
4003 if (nvlist_lookup_nvlist_array(config
,
4004 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
4005 if (nvlist_lookup_uint64(config
,
4006 ZPOOL_CONFIG_WHOLE_DISK
,
4007 &wholedisk
) != 0 || !wholedisk
) {
4008 return (MAXOFFSET_T
);
4010 if (read_efi_label(config
, &sb
) < 0)
4015 for (c
= 0; c
< children
; c
++) {
4016 sb
= find_start_block(child
[c
]);
4017 if (sb
!= MAXOFFSET_T
) {
4021 return (MAXOFFSET_T
);
4025 zpool_label_disk_wait(char *path
, int timeout
)
4027 struct stat64 statbuf
;
4031 * Wait timeout miliseconds for a newly created device to be available
4032 * from the given path. There is a small window when a /dev/ device
4033 * will exist and the udev link will not, so we must wait for the
4034 * symlink. Depending on the udev rules this may take a few seconds.
4036 for (i
= 0; i
< timeout
; i
++) {
4040 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0))
4048 zpool_label_disk_check(char *path
)
4050 struct dk_gpt
*vtoc
;
4053 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4056 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4061 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4073 * Label an individual disk. The name provided is the short name,
4074 * stripped of any leading /dev path.
4077 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4079 char path
[MAXPATHLEN
];
4080 struct dk_gpt
*vtoc
;
4082 size_t resv
= EFI_MIN_RESV_SIZE
;
4083 uint64_t slice_size
;
4084 diskaddr_t start_block
;
4087 /* prepare an error message just in case */
4088 (void) snprintf(errbuf
, sizeof (errbuf
),
4089 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4094 #if defined(__sun__) || defined(__sun)
4095 if (zpool_is_bootable(zhp
)) {
4096 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4097 "EFI labeled devices are not supported on root "
4099 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
4103 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4104 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4106 if (zhp
->zpool_start_block
== 0)
4107 start_block
= find_start_block(nvroot
);
4109 start_block
= zhp
->zpool_start_block
;
4110 zhp
->zpool_start_block
= start_block
;
4113 start_block
= NEW_START_BLOCK
;
4116 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4118 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
4120 * This shouldn't happen. We've long since verified that this
4121 * is a valid device.
4123 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4124 "label '%s': unable to open device: %d"), path
, errno
);
4125 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4128 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4130 * The only way this can fail is if we run out of memory, or we
4131 * were unable to read the disk's capacity
4133 if (errno
== ENOMEM
)
4134 (void) no_memory(hdl
);
4137 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4138 "label '%s': unable to read disk capacity"), path
);
4140 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4143 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4144 slice_size
-= EFI_MIN_RESV_SIZE
;
4145 if (start_block
== MAXOFFSET_T
)
4146 start_block
= NEW_START_BLOCK
;
4147 slice_size
-= start_block
;
4148 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4150 vtoc
->efi_parts
[0].p_start
= start_block
;
4151 vtoc
->efi_parts
[0].p_size
= slice_size
;
4154 * Why we use V_USR: V_BACKUP confuses users, and is considered
4155 * disposable by some EFI utilities (since EFI doesn't have a backup
4156 * slice). V_UNASSIGNED is supposed to be used only for zero size
4157 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4158 * etc. were all pretty specific. V_USR is as close to reality as we
4159 * can get, in the absence of V_OTHER.
4161 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4162 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
4164 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4165 vtoc
->efi_parts
[8].p_size
= resv
;
4166 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4168 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4170 * Some block drivers (like pcata) may not support EFI
4171 * GPT labels. Print out a helpful error message dir-
4172 * ecting the user to manually label the disk and give
4178 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4179 "parted(8) and then provide a specific slice: %d"), rval
);
4180 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4186 /* Wait for the first expected partition to appear. */
4188 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4189 (void) zfs_append_partition(path
, MAXPATHLEN
);
4191 rval
= zpool_label_disk_wait(path
, 3000);
4193 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4194 "detect device partitions on '%s': %d"), path
, rval
);
4195 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4198 /* We can't be to paranoid. Read the label back and verify it. */
4199 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4200 rval
= zpool_label_disk_check(path
);
4202 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4203 "EFI label on '%s' is damaged. Ensure\nthis device "
4204 "is not in in use, and is functioning properly: %d"),
4206 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));