4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
39 #include <sys/efi_partition.h>
41 #include <sys/zfs_ioctl.h>
44 #include "zfs_namecheck.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48 #include "zfeature_common.h"
50 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
52 typedef struct prop_flags
{
53 int create
:1; /* Validate property on creation */
54 int import
:1; /* Validate property on import */
58 * ====================================================================
59 * zpool property functions
60 * ====================================================================
64 zpool_get_all_props(zpool_handle_t
*zhp
)
66 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
67 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
69 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
71 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
74 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
75 if (errno
== ENOMEM
) {
76 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
77 zcmd_free_nvlists(&zc
);
81 zcmd_free_nvlists(&zc
);
86 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
87 zcmd_free_nvlists(&zc
);
91 zcmd_free_nvlists(&zc
);
97 zpool_props_refresh(zpool_handle_t
*zhp
)
101 old_props
= zhp
->zpool_props
;
103 if (zpool_get_all_props(zhp
) != 0)
106 nvlist_free(old_props
);
111 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
117 zprop_source_t source
;
119 nvl
= zhp
->zpool_props
;
120 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
121 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
123 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
125 source
= ZPROP_SRC_DEFAULT
;
126 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
137 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
141 zprop_source_t source
;
143 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
145 * zpool_get_all_props() has most likely failed because
146 * the pool is faulted, but if all we need is the top level
147 * vdev's guid then get it from the zhp config nvlist.
149 if ((prop
== ZPOOL_PROP_GUID
) &&
150 (nvlist_lookup_nvlist(zhp
->zpool_config
,
151 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
152 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
156 return (zpool_prop_default_numeric(prop
));
159 nvl
= zhp
->zpool_props
;
160 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
161 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
163 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
165 source
= ZPROP_SRC_DEFAULT
;
166 value
= zpool_prop_default_numeric(prop
);
176 * Map VDEV STATE to printed strings.
179 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
184 case VDEV_STATE_CLOSED
:
185 case VDEV_STATE_OFFLINE
:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED
:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN
:
190 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
191 return (gettext("FAULTED"));
192 else if (aux
== VDEV_AUX_SPLIT_POOL
)
193 return (gettext("SPLIT"));
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED
:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED
:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY
:
201 return (gettext("ONLINE"));
204 return (gettext("UNKNOWN"));
208 * Get a zpool property value for 'prop' and return the value in
209 * a pre-allocated buffer.
212 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
213 zprop_source_t
*srctype
)
217 zprop_source_t src
= ZPROP_SRC_NONE
;
222 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
224 case ZPOOL_PROP_NAME
:
225 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
228 case ZPOOL_PROP_HEALTH
:
229 (void) strlcpy(buf
, "FAULTED", len
);
232 case ZPOOL_PROP_GUID
:
233 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
234 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
237 case ZPOOL_PROP_ALTROOT
:
238 case ZPOOL_PROP_CACHEFILE
:
239 case ZPOOL_PROP_COMMENT
:
240 if (zhp
->zpool_props
!= NULL
||
241 zpool_get_all_props(zhp
) == 0) {
243 zpool_get_prop_string(zhp
, prop
, &src
),
251 (void) strlcpy(buf
, "-", len
);
260 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
261 prop
!= ZPOOL_PROP_NAME
)
264 switch (zpool_prop_get_type(prop
)) {
265 case PROP_TYPE_STRING
:
266 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
270 case PROP_TYPE_NUMBER
:
271 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
274 case ZPOOL_PROP_SIZE
:
275 case ZPOOL_PROP_ALLOCATED
:
276 case ZPOOL_PROP_FREE
:
277 case ZPOOL_PROP_FREEING
:
278 case ZPOOL_PROP_EXPANDSZ
:
279 case ZPOOL_PROP_ASHIFT
:
280 (void) zfs_nicenum(intval
, buf
, len
);
283 case ZPOOL_PROP_CAPACITY
:
284 (void) snprintf(buf
, len
, "%llu%%",
285 (u_longlong_t
)intval
);
288 case ZPOOL_PROP_DEDUPRATIO
:
289 (void) snprintf(buf
, len
, "%llu.%02llux",
290 (u_longlong_t
)(intval
/ 100),
291 (u_longlong_t
)(intval
% 100));
294 case ZPOOL_PROP_HEALTH
:
295 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
296 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
297 verify(nvlist_lookup_uint64_array(nvroot
,
298 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
301 (void) strlcpy(buf
, zpool_state_to_name(intval
,
304 case ZPOOL_PROP_VERSION
:
305 if (intval
>= SPA_VERSION_FEATURES
) {
306 (void) snprintf(buf
, len
, "-");
311 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
315 case PROP_TYPE_INDEX
:
316 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
317 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
320 (void) strlcpy(buf
, strval
, len
);
334 * Check if the bootfs name has the same pool name as it is set to.
335 * Assuming bootfs is a valid dataset name.
338 bootfs_name_valid(const char *pool
, char *bootfs
)
340 int len
= strlen(pool
);
342 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
345 if (strncmp(pool
, bootfs
, len
) == 0 &&
346 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
352 #if defined(__sun__) || defined(__sun)
354 * Inspect the configuration to determine if any of the devices contain
358 pool_uses_efi(nvlist_t
*config
)
363 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
364 &child
, &children
) != 0)
365 return (read_efi_label(config
, NULL
) >= 0);
367 for (c
= 0; c
< children
; c
++) {
368 if (pool_uses_efi(child
[c
]))
376 zpool_is_bootable(zpool_handle_t
*zhp
)
378 char bootfs
[ZPOOL_MAXNAMELEN
];
380 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
381 sizeof (bootfs
), NULL
) == 0 && strncmp(bootfs
, "-",
382 sizeof (bootfs
)) != 0);
387 * Given an nvlist of zpool properties to be set, validate that they are
388 * correct, and parse any numeric properties (index, boolean, etc) if they are
389 * specified as strings.
392 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
393 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
401 struct stat64 statbuf
;
405 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
406 (void) no_memory(hdl
);
411 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
412 const char *propname
= nvpair_name(elem
);
414 prop
= zpool_name_to_prop(propname
);
415 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
417 zfeature_info_t
*feature
;
418 char *fname
= strchr(propname
, '@') + 1;
420 err
= zfeature_lookup_name(fname
, &feature
);
422 ASSERT3U(err
, ==, ENOENT
);
423 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
424 "invalid feature '%s'"), fname
);
425 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
429 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
430 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
431 "'%s' must be a string"), propname
);
432 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
436 (void) nvpair_value_string(elem
, &strval
);
437 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
438 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
439 "property '%s' can only be set to "
440 "'enabled'"), propname
);
441 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
445 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
446 (void) no_memory(hdl
);
453 * Make sure this property is valid and applies to this type.
455 if (prop
== ZPROP_INVAL
) {
456 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
457 "invalid property '%s'"), propname
);
458 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
462 if (zpool_prop_readonly(prop
)) {
463 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
464 "is readonly"), propname
);
465 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
469 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
470 &strval
, &intval
, errbuf
) != 0)
474 * Perform additional checking for specific properties.
479 case ZPOOL_PROP_VERSION
:
480 if (intval
< version
||
481 !SPA_VERSION_IS_SUPPORTED(intval
)) {
482 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
483 "property '%s' number %d is invalid."),
485 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
490 case ZPOOL_PROP_ASHIFT
:
492 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
493 "property '%s' can only be set at "
494 "creation time"), propname
);
495 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
499 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
500 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
501 "property '%s' number %d is invalid."),
503 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
508 case ZPOOL_PROP_BOOTFS
:
509 if (flags
.create
|| flags
.import
) {
510 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
511 "property '%s' cannot be set at creation "
512 "or import time"), propname
);
513 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
517 if (version
< SPA_VERSION_BOOTFS
) {
518 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
519 "pool must be upgraded to support "
520 "'%s' property"), propname
);
521 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
526 * bootfs property value has to be a dataset name and
527 * the dataset has to be in the same pool as it sets to.
529 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
531 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
532 "is an invalid name"), strval
);
533 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
537 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
538 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
539 "could not open pool '%s'"), poolname
);
540 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
543 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
544 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
546 #if defined(__sun__) || defined(__sun)
548 * bootfs property cannot be set on a disk which has
551 if (pool_uses_efi(nvroot
)) {
552 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
553 "property '%s' not supported on "
554 "EFI labeled devices"), propname
);
555 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
563 case ZPOOL_PROP_ALTROOT
:
564 if (!flags
.create
&& !flags
.import
) {
565 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
566 "property '%s' can only be set during pool "
567 "creation or import"), propname
);
568 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
572 if (strval
[0] != '/') {
573 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
574 "bad alternate root '%s'"), strval
);
575 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
580 case ZPOOL_PROP_CACHEFILE
:
581 if (strval
[0] == '\0')
584 if (strcmp(strval
, "none") == 0)
587 if (strval
[0] != '/') {
588 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
589 "property '%s' must be empty, an "
590 "absolute path, or 'none'"), propname
);
591 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
595 slash
= strrchr(strval
, '/');
597 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
598 strcmp(slash
, "/..") == 0) {
599 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
600 "'%s' is not a valid file"), strval
);
601 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
607 if (strval
[0] != '\0' &&
608 (stat64(strval
, &statbuf
) != 0 ||
609 !S_ISDIR(statbuf
.st_mode
))) {
610 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
611 "'%s' is not a valid directory"),
613 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
620 case ZPOOL_PROP_COMMENT
:
621 for (check
= strval
; *check
!= '\0'; check
++) {
622 if (!isprint(*check
)) {
624 dgettext(TEXT_DOMAIN
,
625 "comment may only have printable "
627 (void) zfs_error(hdl
, EZFS_BADPROP
,
632 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
633 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
634 "comment must not exceed %d characters"),
636 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
640 case ZPOOL_PROP_READONLY
:
642 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
643 "property '%s' can only be set at "
644 "import time"), propname
);
645 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
654 nvlist_free(retprops
);
659 * Set zpool property : propname=propval.
662 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
664 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
667 nvlist_t
*nvl
= NULL
;
670 prop_flags_t flags
= { 0 };
672 (void) snprintf(errbuf
, sizeof (errbuf
),
673 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
676 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
677 return (no_memory(zhp
->zpool_hdl
));
679 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
681 return (no_memory(zhp
->zpool_hdl
));
684 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
685 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
686 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
695 * Execute the corresponding ioctl() to set this property.
697 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
699 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
704 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
706 zcmd_free_nvlists(&zc
);
710 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
712 (void) zpool_props_refresh(zhp
);
718 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
720 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
722 char buf
[ZFS_MAXPROPLEN
];
723 nvlist_t
*features
= NULL
;
726 boolean_t firstexpand
= (NULL
== *plp
);
729 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
733 while (*last
!= NULL
)
734 last
= &(*last
)->pl_next
;
737 features
= zpool_get_features(zhp
);
739 if ((*plp
)->pl_all
&& firstexpand
) {
740 for (i
= 0; i
< SPA_FEATURES
; i
++) {
741 zprop_list_t
*entry
= zfs_alloc(hdl
,
742 sizeof (zprop_list_t
));
743 entry
->pl_prop
= ZPROP_INVAL
;
744 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
745 spa_feature_table
[i
].fi_uname
);
746 entry
->pl_width
= strlen(entry
->pl_user_prop
);
747 entry
->pl_all
= B_TRUE
;
750 last
= &entry
->pl_next
;
754 /* add any unsupported features */
755 for (nvp
= nvlist_next_nvpair(features
, NULL
);
756 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
761 if (zfeature_is_supported(nvpair_name(nvp
)))
764 propname
= zfs_asprintf(hdl
, "unsupported@%s",
768 * Before adding the property to the list make sure that no
769 * other pool already added the same property.
773 while (entry
!= NULL
) {
774 if (entry
->pl_user_prop
!= NULL
&&
775 strcmp(propname
, entry
->pl_user_prop
) == 0) {
779 entry
= entry
->pl_next
;
786 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
787 entry
->pl_prop
= ZPROP_INVAL
;
788 entry
->pl_user_prop
= propname
;
789 entry
->pl_width
= strlen(entry
->pl_user_prop
);
790 entry
->pl_all
= B_TRUE
;
793 last
= &entry
->pl_next
;
796 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
801 if (entry
->pl_prop
!= ZPROP_INVAL
&&
802 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
804 if (strlen(buf
) > entry
->pl_width
)
805 entry
->pl_width
= strlen(buf
);
813 * Get the state for the given feature on the given ZFS pool.
816 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
820 boolean_t found
= B_FALSE
;
821 nvlist_t
*features
= zpool_get_features(zhp
);
823 const char *feature
= strchr(propname
, '@') + 1;
825 supported
= zpool_prop_feature(propname
);
826 ASSERT(supported
|| zpool_prop_unsupported(propname
));
829 * Convert from feature name to feature guid. This conversion is
830 * unecessary for unsupported@... properties because they already
837 ret
= zfeature_lookup_name(feature
, &fi
);
839 (void) strlcpy(buf
, "-", len
);
842 feature
= fi
->fi_guid
;
845 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
850 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
853 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
855 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
860 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
862 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
865 (void) strlcpy(buf
, "-", len
);
874 * Don't start the slice at the default block of 34; many storage
875 * devices will use a stripe width of 128k, other vendors prefer a 1m
876 * alignment. It is best to play it safe and ensure a 1m alignment
877 * given 512B blocks. When the block size is larger by a power of 2
878 * we will still be 1m aligned. Some devices are sensitive to the
879 * partition ending alignment as well.
881 #define NEW_START_BLOCK 2048
882 #define PARTITION_END_ALIGNMENT 2048
885 * Validate the given pool name, optionally putting an extended error message in
889 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
895 ret
= pool_namecheck(pool
, &why
, &what
);
898 * The rules for reserved pool names were extended at a later point.
899 * But we need to support users with existing pools that may now be
900 * invalid. So we only check for this expanded set of names during a
901 * create (or import), and only in userland.
903 if (ret
== 0 && !isopen
&&
904 (strncmp(pool
, "mirror", 6) == 0 ||
905 strncmp(pool
, "raidz", 5) == 0 ||
906 strncmp(pool
, "spare", 5) == 0 ||
907 strcmp(pool
, "log") == 0)) {
910 dgettext(TEXT_DOMAIN
, "name is reserved"));
918 case NAME_ERR_TOOLONG
:
920 dgettext(TEXT_DOMAIN
, "name is too long"));
923 case NAME_ERR_INVALCHAR
:
925 dgettext(TEXT_DOMAIN
, "invalid character "
926 "'%c' in pool name"), what
);
929 case NAME_ERR_NOLETTER
:
930 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
931 "name must begin with a letter"));
934 case NAME_ERR_RESERVED
:
935 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
936 "name is reserved"));
939 case NAME_ERR_DISKLIKE
:
940 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
941 "pool name is reserved"));
944 case NAME_ERR_LEADING_SLASH
:
945 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
946 "leading slash in name"));
949 case NAME_ERR_EMPTY_COMPONENT
:
950 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
951 "empty component in name"));
954 case NAME_ERR_TRAILING_SLASH
:
955 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
956 "trailing slash in name"));
959 case NAME_ERR_MULTIPLE_AT
:
960 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
961 "multiple '@' delimiters in name"));
964 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
965 "permission set is missing '@'"));
976 * Open a handle to the given pool, even if the pool is currently in the FAULTED
980 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
986 * Make sure the pool name is valid.
988 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
989 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
990 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
995 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
998 zhp
->zpool_hdl
= hdl
;
999 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1001 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1007 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1008 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1009 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1018 * Like the above, but silent on error. Used when iterating over pools (because
1019 * the configuration cache may be out of date).
1022 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1024 zpool_handle_t
*zhp
;
1027 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1030 zhp
->zpool_hdl
= hdl
;
1031 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1033 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1049 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1053 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1055 zpool_handle_t
*zhp
;
1057 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1060 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1061 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1062 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1071 * Close the handle. Simply frees the memory associated with the handle.
1074 zpool_close(zpool_handle_t
*zhp
)
1076 if (zhp
->zpool_config
)
1077 nvlist_free(zhp
->zpool_config
);
1078 if (zhp
->zpool_old_config
)
1079 nvlist_free(zhp
->zpool_old_config
);
1080 if (zhp
->zpool_props
)
1081 nvlist_free(zhp
->zpool_props
);
1086 * Return the name of the pool.
1089 zpool_get_name(zpool_handle_t
*zhp
)
1091 return (zhp
->zpool_name
);
1096 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1099 zpool_get_state(zpool_handle_t
*zhp
)
1101 return (zhp
->zpool_state
);
1105 * Create the named pool, using the provided vdev list. It is assumed
1106 * that the consumer has already validated the contents of the nvlist, so we
1107 * don't have to worry about error semantics.
1110 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1111 nvlist_t
*props
, nvlist_t
*fsprops
)
1113 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1114 nvlist_t
*zc_fsprops
= NULL
;
1115 nvlist_t
*zc_props
= NULL
;
1120 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1121 "cannot create '%s'"), pool
);
1123 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1124 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1126 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1130 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1132 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1133 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1142 zoned
= ((nvlist_lookup_string(fsprops
,
1143 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1144 strcmp(zonestr
, "on") == 0);
1146 if ((zc_fsprops
= zfs_valid_proplist(hdl
,
1147 ZFS_TYPE_FILESYSTEM
, fsprops
, zoned
, NULL
, msg
)) == NULL
) {
1151 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1154 if (nvlist_add_nvlist(zc_props
,
1155 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1160 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1163 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1165 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1167 zcmd_free_nvlists(&zc
);
1168 nvlist_free(zc_props
);
1169 nvlist_free(zc_fsprops
);
1174 * This can happen if the user has specified the same
1175 * device multiple times. We can't reliably detect this
1176 * until we try to add it and see we already have a
1177 * label. This can also happen under if the device is
1178 * part of an active md or lvm device.
1180 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1181 "one or more vdevs refer to the same device, or one of\n"
1182 "the devices is part of an active md or lvm device"));
1183 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1187 * This occurs when one of the devices is below
1188 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1189 * device was the problem device since there's no
1190 * reliable way to determine device size from userland.
1195 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1197 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1198 "one or more devices is less than the "
1199 "minimum size (%s)"), buf
);
1201 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1204 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1205 "one or more devices is out of space"));
1206 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1209 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1210 "cache device must be a disk or disk slice"));
1211 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1214 return (zpool_standard_error(hdl
, errno
, msg
));
1219 * If this is an alternate root pool, then we automatically set the
1220 * mountpoint of the root dataset to be '/'.
1222 if (nvlist_lookup_string(props
, zpool_prop_to_name(ZPOOL_PROP_ALTROOT
),
1226 verify((zhp
= zfs_open(hdl
, pool
, ZFS_TYPE_DATASET
)) != NULL
);
1227 verify(zfs_prop_set(zhp
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
),
1234 zcmd_free_nvlists(&zc
);
1235 nvlist_free(zc_props
);
1236 nvlist_free(zc_fsprops
);
1241 * Destroy the given pool. It is up to the caller to ensure that there are no
1242 * datasets left in the pool.
1245 zpool_destroy(zpool_handle_t
*zhp
)
1247 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1248 zfs_handle_t
*zfp
= NULL
;
1249 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1252 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1253 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1256 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1258 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1259 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1260 "cannot destroy '%s'"), zhp
->zpool_name
);
1262 if (errno
== EROFS
) {
1263 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1264 "one or more devices is read only"));
1265 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1267 (void) zpool_standard_error(hdl
, errno
, msg
);
1276 remove_mountpoint(zfp
);
1284 * Add the given vdevs to the pool. The caller must have already performed the
1285 * necessary verification to ensure that the vdev specification is well-formed.
1288 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1290 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1292 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1294 nvlist_t
**spares
, **l2cache
;
1295 uint_t nspares
, nl2cache
;
1297 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1298 "cannot add to '%s'"), zhp
->zpool_name
);
1300 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1301 SPA_VERSION_SPARES
&&
1302 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1303 &spares
, &nspares
) == 0) {
1304 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1305 "upgraded to add hot spares"));
1306 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1309 #if defined(__sun__) || defined(__sun)
1310 if (zpool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1311 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1314 for (s
= 0; s
< nspares
; s
++) {
1317 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1318 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1319 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1320 "device '%s' contains an EFI label and "
1321 "cannot be used on root pools."),
1322 zpool_vdev_name(hdl
, NULL
, spares
[s
],
1324 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1330 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1331 SPA_VERSION_L2CACHE
&&
1332 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1333 &l2cache
, &nl2cache
) == 0) {
1334 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1335 "upgraded to add cache devices"));
1336 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1339 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1341 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1343 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1347 * This can happen if the user has specified the same
1348 * device multiple times. We can't reliably detect this
1349 * until we try to add it and see we already have a
1352 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1353 "one or more vdevs refer to the same device"));
1354 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1359 * This occurrs when one of the devices is below
1360 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1361 * device was the problem device since there's no
1362 * reliable way to determine device size from userland.
1367 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1369 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1370 "device is less than the minimum "
1373 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1377 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1378 "pool must be upgraded to add these vdevs"));
1379 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1383 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1384 "root pool can not have multiple vdevs"
1385 " or separate logs"));
1386 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
1390 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1391 "cache device must be a disk or disk slice"));
1392 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1396 (void) zpool_standard_error(hdl
, errno
, msg
);
1404 zcmd_free_nvlists(&zc
);
1410 * Exports the pool from the system. The caller must ensure that there are no
1411 * mounted datasets in the pool.
1414 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
)
1416 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1419 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1420 "cannot export '%s'"), zhp
->zpool_name
);
1422 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1423 zc
.zc_cookie
= force
;
1424 zc
.zc_guid
= hardforce
;
1426 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1429 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1430 "use '-f' to override the following errors:\n"
1431 "'%s' has an active shared spare which could be"
1432 " used by other pools once '%s' is exported."),
1433 zhp
->zpool_name
, zhp
->zpool_name
);
1434 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1437 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1446 zpool_export(zpool_handle_t
*zhp
, boolean_t force
)
1448 return (zpool_export_common(zhp
, force
, B_FALSE
));
1452 zpool_export_force(zpool_handle_t
*zhp
)
1454 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
));
1458 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1461 nvlist_t
*nv
= NULL
;
1467 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1470 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1471 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1475 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1477 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1479 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1480 strftime(timestr
, 128, "%c", &t
) != 0) {
1482 (void) printf(dgettext(TEXT_DOMAIN
,
1483 "Would be able to return %s "
1484 "to its state as of %s.\n"),
1487 (void) printf(dgettext(TEXT_DOMAIN
,
1488 "Pool %s returned to its state as of %s.\n"),
1492 (void) printf(dgettext(TEXT_DOMAIN
,
1493 "%s approximately %lld "),
1494 dryrun
? "Would discard" : "Discarded",
1495 ((longlong_t
)loss
+ 30) / 60);
1496 (void) printf(dgettext(TEXT_DOMAIN
,
1497 "minutes of transactions.\n"));
1498 } else if (loss
> 0) {
1499 (void) printf(dgettext(TEXT_DOMAIN
,
1500 "%s approximately %lld "),
1501 dryrun
? "Would discard" : "Discarded",
1503 (void) printf(dgettext(TEXT_DOMAIN
,
1504 "seconds of transactions.\n"));
1510 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1513 nvlist_t
*nv
= NULL
;
1515 uint64_t edata
= UINT64_MAX
;
1520 if (!hdl
->libzfs_printerr
)
1524 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1526 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1528 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1529 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1530 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1531 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1534 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1535 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1538 (void) printf(dgettext(TEXT_DOMAIN
,
1539 "Recovery is possible, but will result in some data loss.\n"));
1541 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1542 strftime(timestr
, 128, "%c", &t
) != 0) {
1543 (void) printf(dgettext(TEXT_DOMAIN
,
1544 "\tReturning the pool to its state as of %s\n"
1545 "\tshould correct the problem. "),
1548 (void) printf(dgettext(TEXT_DOMAIN
,
1549 "\tReverting the pool to an earlier state "
1550 "should correct the problem.\n\t"));
1554 (void) printf(dgettext(TEXT_DOMAIN
,
1555 "Approximately %lld minutes of data\n"
1556 "\tmust be discarded, irreversibly. "),
1557 ((longlong_t
)loss
+ 30) / 60);
1558 } else if (loss
> 0) {
1559 (void) printf(dgettext(TEXT_DOMAIN
,
1560 "Approximately %lld seconds of data\n"
1561 "\tmust be discarded, irreversibly. "),
1564 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1566 (void) printf(dgettext(TEXT_DOMAIN
,
1567 "After rewind, at least\n"
1568 "\tone persistent user-data error will remain. "));
1570 (void) printf(dgettext(TEXT_DOMAIN
,
1571 "After rewind, several\n"
1572 "\tpersistent user-data errors will remain. "));
1575 (void) printf(dgettext(TEXT_DOMAIN
,
1576 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1577 reason
>= 0 ? "clear" : "import", name
);
1579 (void) printf(dgettext(TEXT_DOMAIN
,
1580 "A scrub of the pool\n"
1581 "\tis strongly recommended after recovery.\n"));
1585 (void) printf(dgettext(TEXT_DOMAIN
,
1586 "Destroy and re-create the pool from\n\ta backup source.\n"));
1590 * zpool_import() is a contracted interface. Should be kept the same
1593 * Applications should use zpool_import_props() to import a pool with
1594 * new properties value to be set.
1597 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1600 nvlist_t
*props
= NULL
;
1603 if (altroot
!= NULL
) {
1604 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1605 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1606 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1610 if (nvlist_add_string(props
,
1611 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1612 nvlist_add_string(props
,
1613 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1615 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1616 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1621 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1629 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1635 uint64_t is_log
= 0;
1637 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1641 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1642 is_log
? " [log]" : "");
1644 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1645 &child
, &children
) != 0)
1648 for (c
= 0; c
< children
; c
++) {
1649 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], B_TRUE
);
1650 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1656 zpool_print_unsup_feat(nvlist_t
*config
)
1658 nvlist_t
*nvinfo
, *unsup_feat
;
1661 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1663 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1666 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1667 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1670 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1671 verify(nvpair_value_string(nvp
, &desc
) == 0);
1673 if (strlen(desc
) > 0)
1674 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1676 (void) printf("\t%s\n", nvpair_name(nvp
));
1681 * Import the given pool using the known configuration and a list of
1682 * properties to be set. The configuration should have come from
1683 * zpool_find_import(). The 'newname' parameters control whether the pool
1684 * is imported with a different name.
1687 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1688 nvlist_t
*props
, int flags
)
1690 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1691 zpool_rewind_policy_t policy
;
1692 nvlist_t
*nv
= NULL
;
1693 nvlist_t
*nvinfo
= NULL
;
1694 nvlist_t
*missing
= NULL
;
1701 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1704 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1705 "cannot import pool '%s'"), origname
);
1707 if (newname
!= NULL
) {
1708 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1709 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1710 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1712 thename
= (char *)newname
;
1719 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1721 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1724 if ((props
= zpool_valid_proplist(hdl
, origname
,
1725 props
, version
, flags
, errbuf
)) == NULL
) {
1727 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1733 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1735 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1738 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1742 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1747 zc
.zc_cookie
= flags
;
1748 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1750 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1751 zcmd_free_nvlists(&zc
);
1758 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1759 zpool_get_rewind_policy(config
, &policy
);
1765 * Dry-run failed, but we print out what success
1766 * looks like if we found a best txg
1768 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1769 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1775 if (newname
== NULL
)
1776 (void) snprintf(desc
, sizeof (desc
),
1777 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1780 (void) snprintf(desc
, sizeof (desc
),
1781 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1786 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1787 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1788 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1789 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1790 "pool uses the following feature(s) not "
1791 "supported by this system:\n"));
1792 zpool_print_unsup_feat(nv
);
1793 if (nvlist_exists(nvinfo
,
1794 ZPOOL_CONFIG_CAN_RDONLY
)) {
1795 (void) printf(dgettext(TEXT_DOMAIN
,
1796 "All unsupported features are only "
1797 "required for writing to the pool."
1798 "\nThe pool can be imported using "
1799 "'-o readonly=on'.\n"));
1803 * Unsupported version.
1805 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1809 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1813 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1814 "one or more devices is read only"));
1815 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1819 if (nv
&& nvlist_lookup_nvlist(nv
,
1820 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1821 nvlist_lookup_nvlist(nvinfo
,
1822 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1823 (void) printf(dgettext(TEXT_DOMAIN
,
1824 "The devices below are missing, use "
1825 "'-m' to import the pool anyway:\n"));
1826 print_vdev_tree(hdl
, NULL
, missing
, 2);
1827 (void) printf("\n");
1829 (void) zpool_standard_error(hdl
, error
, desc
);
1833 (void) zpool_standard_error(hdl
, error
, desc
);
1837 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1838 "one or more devices are already in use\n"));
1839 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1843 (void) zpool_standard_error(hdl
, error
, desc
);
1844 zpool_explain_recover(hdl
,
1845 newname
? origname
: thename
, -error
, nv
);
1852 zpool_handle_t
*zhp
;
1855 * This should never fail, but play it safe anyway.
1857 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1859 else if (zhp
!= NULL
)
1861 if (policy
.zrp_request
&
1862 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1863 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1864 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1870 zcmd_free_nvlists(&zc
);
1880 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1882 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
1884 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1886 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1887 zc
.zc_cookie
= func
;
1889 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1890 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1893 if (func
== POOL_SCAN_SCRUB
) {
1894 (void) snprintf(msg
, sizeof (msg
),
1895 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1896 } else if (func
== POOL_SCAN_NONE
) {
1897 (void) snprintf(msg
, sizeof (msg
),
1898 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1901 assert(!"unexpected result");
1904 if (errno
== EBUSY
) {
1906 pool_scan_stat_t
*ps
= NULL
;
1909 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1910 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1911 (void) nvlist_lookup_uint64_array(nvroot
,
1912 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1913 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1914 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1916 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1917 } else if (errno
== ENOENT
) {
1918 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1920 return (zpool_standard_error(hdl
, errno
, msg
));
1925 * Find a vdev that matches the search criteria specified. We use the
1926 * the nvpair name to determine how we should look for the device.
1927 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1928 * spare; but FALSE if its an INUSE spare.
1931 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1932 boolean_t
*l2cache
, boolean_t
*log
)
1939 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1941 /* Nothing to look for */
1942 if (search
== NULL
|| pair
== NULL
)
1945 /* Obtain the key we will use to search */
1946 srchkey
= nvpair_name(pair
);
1948 switch (nvpair_type(pair
)) {
1949 case DATA_TYPE_UINT64
:
1950 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1951 uint64_t srchval
, theguid
;
1953 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1954 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1956 if (theguid
== srchval
)
1961 case DATA_TYPE_STRING
: {
1962 char *srchval
, *val
;
1964 verify(nvpair_value_string(pair
, &srchval
) == 0);
1965 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1969 * Search for the requested value. Special cases:
1971 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1972 * "-part1", or "p1". The suffix is hidden from the user,
1973 * but included in the string, so this matches around it.
1974 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1975 * is used to check all possible expanded paths.
1976 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1978 * Otherwise, all other searches are simple string compares.
1980 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
1981 uint64_t wholedisk
= 0;
1983 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
1985 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
1988 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
1989 char *type
, *idx
, *end
, *p
;
1990 uint64_t id
, vdev_id
;
1993 * Determine our vdev type, keeping in mind
1994 * that the srchval is composed of a type and
1995 * vdev id pair (i.e. mirror-4).
1997 if ((type
= strdup(srchval
)) == NULL
)
2000 if ((p
= strrchr(type
, '-')) == NULL
) {
2008 * If the types don't match then keep looking.
2010 if (strncmp(val
, type
, strlen(val
)) != 0) {
2015 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2016 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2017 strncmp(type
, VDEV_TYPE_MIRROR
,
2018 strlen(VDEV_TYPE_MIRROR
)) == 0);
2019 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2023 vdev_id
= strtoull(idx
, &end
, 10);
2030 * Now verify that we have the correct vdev id.
2039 if (strcmp(srchval
, val
) == 0)
2048 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2049 &child
, &children
) != 0)
2052 for (c
= 0; c
< children
; c
++) {
2053 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2054 avail_spare
, l2cache
, NULL
)) != NULL
) {
2056 * The 'is_log' value is only set for the toplevel
2057 * vdev, not the leaf vdevs. So we always lookup the
2058 * log device from the root of the vdev tree (where
2059 * 'log' is non-NULL).
2062 nvlist_lookup_uint64(child
[c
],
2063 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2071 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2072 &child
, &children
) == 0) {
2073 for (c
= 0; c
< children
; c
++) {
2074 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2075 avail_spare
, l2cache
, NULL
)) != NULL
) {
2076 *avail_spare
= B_TRUE
;
2082 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2083 &child
, &children
) == 0) {
2084 for (c
= 0; c
< children
; c
++) {
2085 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2086 avail_spare
, l2cache
, NULL
)) != NULL
) {
2097 * Given a physical path (minus the "/devices" prefix), find the
2101 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2102 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2104 nvlist_t
*search
, *nvroot
, *ret
;
2106 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2107 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2109 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2112 *avail_spare
= B_FALSE
;
2116 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2117 nvlist_free(search
);
2123 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2126 zpool_vdev_is_interior(const char *name
)
2128 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2129 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2135 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2136 boolean_t
*l2cache
, boolean_t
*log
)
2139 nvlist_t
*nvroot
, *search
, *ret
;
2142 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2144 guid
= strtoull(path
, &end
, 10);
2145 if (guid
!= 0 && *end
== '\0') {
2146 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2147 } else if (zpool_vdev_is_interior(path
)) {
2148 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2150 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2153 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2156 *avail_spare
= B_FALSE
;
2160 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2161 nvlist_free(search
);
2167 vdev_online(nvlist_t
*nv
)
2171 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2172 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2173 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2180 * Helper function for zpool_get_physpaths().
2183 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2184 size_t *bytes_written
)
2186 size_t bytes_left
, pos
, rsz
;
2190 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2192 return (EZFS_NODEVICE
);
2194 pos
= *bytes_written
;
2195 bytes_left
= physpath_size
- pos
;
2196 format
= (pos
== 0) ? "%s" : " %s";
2198 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2199 *bytes_written
+= rsz
;
2201 if (rsz
>= bytes_left
) {
2202 /* if physpath was not copied properly, clear it */
2203 if (bytes_left
!= 0) {
2206 return (EZFS_NOSPC
);
2212 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2213 size_t *rsz
, boolean_t is_spare
)
2218 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2219 return (EZFS_INVALCONFIG
);
2221 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2223 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2224 * For a spare vdev, we only want to boot from the active
2229 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2232 return (EZFS_INVALCONFIG
);
2235 if (vdev_online(nv
)) {
2236 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2237 phypath_size
, rsz
)) != 0)
2240 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2241 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2242 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2247 if (nvlist_lookup_nvlist_array(nv
,
2248 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2249 return (EZFS_INVALCONFIG
);
2251 for (i
= 0; i
< count
; i
++) {
2252 ret
= vdev_get_physpaths(child
[i
], physpath
,
2253 phypath_size
, rsz
, is_spare
);
2254 if (ret
== EZFS_NOSPC
)
2259 return (EZFS_POOL_INVALARG
);
2263 * Get phys_path for a root pool config.
2264 * Return 0 on success; non-zero on failure.
2267 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2270 nvlist_t
*vdev_root
;
2277 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2279 return (EZFS_INVALCONFIG
);
2281 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2282 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2283 &child
, &count
) != 0)
2284 return (EZFS_INVALCONFIG
);
2286 #if defined(__sun__) || defined(__sun)
2288 * root pool can not have EFI labeled disks and can only have
2289 * a single top-level vdev.
2291 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2292 pool_uses_efi(vdev_root
))
2293 return (EZFS_POOL_INVALARG
);
2296 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2299 /* No online devices */
2301 return (EZFS_NODEVICE
);
2307 * Get phys_path for a root pool
2308 * Return 0 on success; non-zero on failure.
2311 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2313 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2318 * If the device has being dynamically expanded then we need to relabel
2319 * the disk to use the new unallocated space.
2322 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2326 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2327 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2328 "relabel '%s': unable to open device: %d"), path
, errno
);
2329 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2333 * It's possible that we might encounter an error if the device
2334 * does not have any unallocated space left. If so, we simply
2335 * ignore that error and continue on.
2337 * Also, we don't call efi_rescan() - that would just return EBUSY.
2338 * The module will do it for us in vdev_disk_open().
2340 error
= efi_use_whole_disk(fd
);
2342 if (error
&& error
!= VT_ENOSPC
) {
2343 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2344 "relabel '%s': unable to read disk capacity"), path
);
2345 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2351 * Bring the specified vdev online. The 'flags' parameter is a set of the
2352 * ZFS_ONLINE_* flags.
2355 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2356 vdev_state_t
*newstate
)
2358 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2361 boolean_t avail_spare
, l2cache
, islog
;
2362 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2365 if (flags
& ZFS_ONLINE_EXPAND
) {
2366 (void) snprintf(msg
, sizeof (msg
),
2367 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2369 (void) snprintf(msg
, sizeof (msg
),
2370 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2373 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2374 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2376 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2378 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2381 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2383 if (flags
& ZFS_ONLINE_EXPAND
||
2384 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2385 uint64_t wholedisk
= 0;
2387 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2391 * XXX - L2ARC 1.0 devices can't support expansion.
2394 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2395 "cannot expand cache devices"));
2396 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2400 const char *fullpath
= path
;
2401 char buf
[MAXPATHLEN
];
2403 if (path
[0] != '/') {
2404 error
= zfs_resolve_shortname(path
, buf
,
2407 return (zfs_error(hdl
, EZFS_NODEVICE
,
2413 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2419 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2422 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2423 if (errno
== EINVAL
) {
2424 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2425 "from this pool into a new one. Use '%s' "
2426 "instead"), "zpool detach");
2427 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2429 return (zpool_standard_error(hdl
, errno
, msg
));
2432 *newstate
= zc
.zc_cookie
;
2437 * Take the specified vdev offline
2440 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2442 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2445 boolean_t avail_spare
, l2cache
;
2446 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2448 (void) snprintf(msg
, sizeof (msg
),
2449 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2451 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2452 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2454 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2456 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2459 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2461 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2462 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2464 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2471 * There are no other replicas of this device.
2473 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2477 * The log device has unplayed logs
2479 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2482 return (zpool_standard_error(hdl
, errno
, msg
));
2487 * Mark the given vdev faulted.
2490 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2492 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2494 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2496 (void) snprintf(msg
, sizeof (msg
),
2497 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2499 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2501 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2504 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2511 * There are no other replicas of this device.
2513 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2516 return (zpool_standard_error(hdl
, errno
, msg
));
2522 * Mark the given vdev degraded.
2525 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2527 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2529 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2531 (void) snprintf(msg
, sizeof (msg
),
2532 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2534 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2536 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2539 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2542 return (zpool_standard_error(hdl
, errno
, msg
));
2546 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2550 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2556 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2558 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2561 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2562 children
== 2 && child
[which
] == tgt
)
2565 for (c
= 0; c
< children
; c
++)
2566 if (is_replacing_spare(child
[c
], tgt
, which
))
2574 * Attach new_disk (fully described by nvroot) to old_disk.
2575 * If 'replacing' is specified, the new disk will replace the old one.
2578 zpool_vdev_attach(zpool_handle_t
*zhp
,
2579 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2581 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2585 boolean_t avail_spare
, l2cache
, islog
;
2590 nvlist_t
*config_root
;
2591 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2592 boolean_t rootpool
= zpool_is_bootable(zhp
);
2595 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2596 "cannot replace %s with %s"), old_disk
, new_disk
);
2598 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2599 "cannot attach %s to %s"), new_disk
, old_disk
);
2601 #if defined(__sun__) || defined(__sun)
2603 * If this is a root pool, make sure that we're not attaching an
2604 * EFI labeled device.
2606 if (rootpool
&& pool_uses_efi(nvroot
)) {
2607 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2608 "EFI labeled devices are not supported on root pools."));
2609 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2613 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2614 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2616 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2619 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2622 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2624 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2625 zc
.zc_cookie
= replacing
;
2627 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2628 &child
, &children
) != 0 || children
!= 1) {
2629 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2630 "new device must be a single disk"));
2631 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2634 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2635 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2637 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], B_FALSE
)) == NULL
)
2641 * If the target is a hot spare that has been swapped in, we can only
2642 * replace it with another hot spare.
2645 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2646 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2647 NULL
) == NULL
|| !avail_spare
) &&
2648 is_replacing_spare(config_root
, tgt
, 1)) {
2649 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2650 "can only be replaced by another hot spare"));
2652 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2657 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2660 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2662 zcmd_free_nvlists(&zc
);
2667 * XXX need a better way to prevent user from
2668 * booting up a half-baked vdev.
2670 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2671 "sure to wait until resilver is done "
2672 "before rebooting.\n"));
2680 * Can't attach to or replace this type of vdev.
2683 uint64_t version
= zpool_get_prop_int(zhp
,
2684 ZPOOL_PROP_VERSION
, NULL
);
2687 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2688 "cannot replace a log with a spare"));
2689 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2690 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2691 "already in replacing/spare config; wait "
2692 "for completion or use 'zpool detach'"));
2694 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2695 "cannot replace a replacing device"));
2697 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2698 "can only attach to mirrors and top-level "
2701 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2706 * The new device must be a single disk.
2708 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2709 "new device must be a single disk"));
2710 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2714 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2716 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2721 * The new device is too small.
2723 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2724 "device is too small"));
2725 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2730 * The new device has a different alignment requirement.
2732 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2733 "devices have different sector alignment"));
2734 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2739 * The resulting top-level vdev spec won't fit in the label.
2741 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2745 (void) zpool_standard_error(hdl
, errno
, msg
);
2752 * Detach the specified device.
2755 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2757 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2760 boolean_t avail_spare
, l2cache
;
2761 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2763 (void) snprintf(msg
, sizeof (msg
),
2764 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2766 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2767 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2769 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2772 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2775 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2777 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2779 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2786 * Can't detach from this type of vdev.
2788 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2789 "applicable to mirror and replacing vdevs"));
2790 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2795 * There are no other replicas of this device.
2797 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2801 (void) zpool_standard_error(hdl
, errno
, msg
);
2808 * Find a mirror vdev in the source nvlist.
2810 * The mchild array contains a list of disks in one of the top-level mirrors
2811 * of the source pool. The schild array contains a list of disks that the
2812 * user specified on the command line. We loop over the mchild array to
2813 * see if any entry in the schild array matches.
2815 * If a disk in the mchild array is found in the schild array, we return
2816 * the index of that entry. Otherwise we return -1.
2819 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2820 nvlist_t
**schild
, uint_t schildren
)
2824 for (mc
= 0; mc
< mchildren
; mc
++) {
2826 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2827 mchild
[mc
], B_FALSE
);
2829 for (sc
= 0; sc
< schildren
; sc
++) {
2830 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2831 schild
[sc
], B_FALSE
);
2832 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2848 * Split a mirror pool. If newroot points to null, then a new nvlist
2849 * is generated and it is the responsibility of the caller to free it.
2852 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2853 nvlist_t
*props
, splitflags_t flags
)
2855 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
2857 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2858 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2859 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2860 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2862 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2865 (void) snprintf(msg
, sizeof (msg
),
2866 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2868 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2869 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2871 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2872 (void) fprintf(stderr
, gettext("Internal error: unable to "
2873 "retrieve pool configuration\n"));
2877 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2879 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2882 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2883 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2884 props
, vers
, flags
, msg
)) == NULL
)
2888 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2890 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2891 "Source pool is missing vdev tree"));
2893 nvlist_free(zc_props
);
2897 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2900 if (*newroot
== NULL
||
2901 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2902 &newchild
, &newchildren
) != 0)
2905 for (c
= 0; c
< children
; c
++) {
2906 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2908 nvlist_t
**mchild
, *vdev
;
2913 * Unlike cache & spares, slogs are stored in the
2914 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2916 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2918 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2920 if (is_log
|| is_hole
) {
2922 * Create a hole vdev and put it in the config.
2924 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2926 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2927 VDEV_TYPE_HOLE
) != 0)
2929 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2934 varray
[vcount
++] = vdev
;
2938 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2940 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2941 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2942 "Source pool must be composed only of mirrors\n"));
2943 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2947 verify(nvlist_lookup_nvlist_array(child
[c
],
2948 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2950 /* find or add an entry for this top-level vdev */
2951 if (newchildren
> 0 &&
2952 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2953 newchild
, newchildren
)) >= 0) {
2954 /* We found a disk that the user specified. */
2955 vdev
= mchild
[entry
];
2958 /* User didn't specify a disk for this vdev. */
2959 vdev
= mchild
[mchildren
- 1];
2962 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
2966 /* did we find every disk the user specified? */
2967 if (found
!= newchildren
) {
2968 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
2969 "include at most one disk from each mirror"));
2970 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2974 /* Prepare the nvlist for populating. */
2975 if (*newroot
== NULL
) {
2976 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
2979 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
2980 VDEV_TYPE_ROOT
) != 0)
2983 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
2986 /* Add all the children we found */
2987 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
2988 lastlog
== 0 ? vcount
: lastlog
) != 0)
2992 * If we're just doing a dry run, exit now with success.
2995 memory_err
= B_FALSE
;
3000 /* now build up the config list & call the ioctl */
3001 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3004 if (nvlist_add_nvlist(newconfig
,
3005 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3006 nvlist_add_string(newconfig
,
3007 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3008 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3012 * The new pool is automatically part of the namespace unless we
3013 * explicitly export it.
3016 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3017 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3018 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3019 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3021 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3024 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3025 retval
= zpool_standard_error(hdl
, errno
, msg
);
3030 memory_err
= B_FALSE
;
3033 if (varray
!= NULL
) {
3036 for (v
= 0; v
< vcount
; v
++)
3037 nvlist_free(varray
[v
]);
3040 zcmd_free_nvlists(&zc
);
3042 nvlist_free(zc_props
);
3044 nvlist_free(newconfig
);
3046 nvlist_free(*newroot
);
3054 return (no_memory(hdl
));
3060 * Remove the given device. Currently, this is supported only for hot spares
3061 * and level 2 cache devices.
3064 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3066 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3069 boolean_t avail_spare
, l2cache
, islog
;
3070 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3073 (void) snprintf(msg
, sizeof (msg
),
3074 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3076 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3077 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3079 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3081 * XXX - this should just go away.
3083 if (!avail_spare
&& !l2cache
&& !islog
) {
3084 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3085 "only inactive hot spares, cache, top-level, "
3086 "or log devices can be removed"));
3087 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3090 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3091 if (islog
&& version
< SPA_VERSION_HOLES
) {
3092 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3093 "pool must be upgrade to support log removal"));
3094 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3097 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3099 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3102 return (zpool_standard_error(hdl
, errno
, msg
));
3106 * Clear the errors for the pool, or the particular device if specified.
3109 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3111 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3114 zpool_rewind_policy_t policy
;
3115 boolean_t avail_spare
, l2cache
;
3116 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3117 nvlist_t
*nvi
= NULL
;
3121 (void) snprintf(msg
, sizeof (msg
),
3122 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3125 (void) snprintf(msg
, sizeof (msg
),
3126 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3129 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3131 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3132 &l2cache
, NULL
)) == 0)
3133 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3136 * Don't allow error clearing for hot spares. Do allow
3137 * error clearing for l2cache devices.
3140 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3142 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3146 zpool_get_rewind_policy(rewindnvl
, &policy
);
3147 zc
.zc_cookie
= policy
.zrp_request
;
3149 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3152 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3155 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3157 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3158 zcmd_free_nvlists(&zc
);
3163 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3164 errno
!= EPERM
&& errno
!= EACCES
)) {
3165 if (policy
.zrp_request
&
3166 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3167 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3168 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3169 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3173 zcmd_free_nvlists(&zc
);
3177 zcmd_free_nvlists(&zc
);
3178 return (zpool_standard_error(hdl
, errno
, msg
));
3182 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3185 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3187 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3189 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3191 (void) snprintf(msg
, sizeof (msg
),
3192 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3193 (u_longlong_t
)guid
);
3195 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3197 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3199 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3202 return (zpool_standard_error(hdl
, errno
, msg
));
3206 * Change the GUID for a pool.
3209 zpool_reguid(zpool_handle_t
*zhp
)
3212 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3213 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3215 (void) snprintf(msg
, sizeof (msg
),
3216 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3218 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3219 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3222 return (zpool_standard_error(hdl
, errno
, msg
));
3229 zpool_reopen(zpool_handle_t
*zhp
)
3231 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3233 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3235 (void) snprintf(msg
, sizeof (msg
),
3236 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3239 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3240 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3242 return (zpool_standard_error(hdl
, errno
, msg
));
3246 * Convert from a devid string to a path.
3249 devid_to_path(char *devid_str
)
3254 devid_nmlist_t
*list
= NULL
;
3257 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3260 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3262 devid_str_free(minor
);
3268 if ((path
= strdup(list
[0].devname
)) == NULL
)
3271 devid_free_nmlist(list
);
3277 * Convert from a path to a devid string.
3280 path_to_devid(const char *path
)
3286 if ((fd
= open(path
, O_RDONLY
)) < 0)
3291 if (devid_get(fd
, &devid
) == 0) {
3292 if (devid_get_minor_name(fd
, &minor
) == 0)
3293 ret
= devid_str_encode(devid
, minor
);
3295 devid_str_free(minor
);
3304 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3305 * ignore any failure here, since a common case is for an unprivileged user to
3306 * type 'zpool status', and we'll display the correct information anyway.
3309 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3311 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3313 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3314 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3315 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3318 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3322 * Remove partition suffix from a vdev path. Partition suffixes may take three
3323 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3324 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3325 * third case only occurs when preceded by a string matching the regular
3326 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3329 strip_partition(libzfs_handle_t
*hdl
, char *path
)
3331 char *tmp
= zfs_strdup(hdl
, path
);
3332 char *part
= NULL
, *d
= NULL
;
3334 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3336 } else if ((part
= strrchr(tmp
, 'p')) &&
3337 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3339 } else if ((tmp
[0] == 'h' || tmp
[0] == 's') && tmp
[1] == 'd') {
3340 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
);
3342 if (part
&& d
&& *d
!= '\0') {
3343 for (; isdigit(*d
); d
++);
3350 #define PATH_BUF_LEN 64
3353 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3354 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3355 * We also check if this is a whole disk, in which case we strip off the
3356 * trailing 's0' slice name.
3358 * This routine is also responsible for identifying when disks have been
3359 * reconfigured in a new location. The kernel will have opened the device by
3360 * devid, but the path will still refer to the old location. To catch this, we
3361 * first do a path -> devid translation (which is fast for the common case). If
3362 * the devid matches, we're done. If not, we do a reverse devid -> path
3363 * translation and issue the appropriate ioctl() to update the path of the vdev.
3364 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3368 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3371 char *path
, *devid
, *type
;
3373 char buf
[PATH_BUF_LEN
];
3374 char tmpbuf
[PATH_BUF_LEN
];
3378 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
3380 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3382 (void) snprintf(buf
, sizeof (buf
), "%llu",
3383 (u_longlong_t
)value
);
3385 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3387 * If the device is dead (faulted, offline, etc) then don't
3388 * bother opening it. Otherwise we may be forcing the user to
3389 * open a misbehaving device, which can have undesirable
3392 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3393 (uint64_t **)&vs
, &vsc
) != 0 ||
3394 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3396 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3398 * Determine if the current path is correct.
3400 char *newdevid
= path_to_devid(path
);
3402 if (newdevid
== NULL
||
3403 strcmp(devid
, newdevid
) != 0) {
3406 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3408 * Update the path appropriately.
3410 set_path(zhp
, nv
, newpath
);
3411 if (nvlist_add_string(nv
,
3412 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3413 verify(nvlist_lookup_string(nv
,
3421 devid_str_free(newdevid
);
3425 * For a block device only use the name.
3427 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3428 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
3429 path
= strrchr(path
, '/');
3434 * Remove the partition from the path it this is a whole disk.
3436 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
3437 &value
) == 0 && value
) {
3438 return strip_partition(hdl
, path
);
3441 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3444 * If it's a raidz device, we need to stick in the parity level.
3446 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3448 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3450 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3451 (u_longlong_t
)value
);
3456 * We identify each top-level vdev by using a <type-id>
3457 * naming convention.
3462 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3464 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3465 path
, (u_longlong_t
)id
);
3470 return (zfs_strdup(hdl
, path
));
3474 zbookmark_compare(const void *a
, const void *b
)
3476 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
3480 * Retrieve the persistent error log, uniquify the members, and return to the
3484 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3486 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3488 zbookmark_t
*zb
= NULL
;
3492 * Retrieve the raw error list from the kernel. If the number of errors
3493 * has increased, allocate more space and continue until we get the
3496 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3500 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3501 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3503 zc
.zc_nvlist_dst_size
= count
;
3504 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3506 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3508 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3509 if (errno
== ENOMEM
) {
3510 count
= zc
.zc_nvlist_dst_size
;
3511 if ((zc
.zc_nvlist_dst
= (uintptr_t)
3512 zfs_alloc(zhp
->zpool_hdl
, count
*
3513 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3524 * Sort the resulting bookmarks. This is a little confusing due to the
3525 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3526 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3527 * _not_ copied as part of the process. So we point the start of our
3528 * array appropriate and decrement the total number of elements.
3530 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3531 zc
.zc_nvlist_dst_size
;
3532 count
-= zc
.zc_nvlist_dst_size
;
3534 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
3536 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3539 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3541 for (i
= 0; i
< count
; i
++) {
3544 /* ignoring zb_blkid and zb_level for now */
3545 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3546 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3549 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3551 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3552 zb
[i
].zb_objset
) != 0) {
3556 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3557 zb
[i
].zb_object
) != 0) {
3561 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3568 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3572 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3573 return (no_memory(zhp
->zpool_hdl
));
3577 * Upgrade a ZFS pool to the latest on-disk version.
3580 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3582 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3583 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3585 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3586 zc
.zc_cookie
= new_version
;
3588 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3589 return (zpool_standard_error_fmt(hdl
, errno
,
3590 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3596 zpool_set_history_str(const char *subcommand
, int argc
, char **argv
,
3601 (void) strlcpy(history_str
, subcommand
, HIS_MAX_RECORD_LEN
);
3602 for (i
= 1; i
< argc
; i
++) {
3603 if (strlen(history_str
) + 1 + strlen(argv
[i
]) >
3606 (void) strlcat(history_str
, " ", HIS_MAX_RECORD_LEN
);
3607 (void) strlcat(history_str
, argv
[i
], HIS_MAX_RECORD_LEN
);
3612 * Stage command history for logging.
3615 zpool_stage_history(libzfs_handle_t
*hdl
, const char *history_str
)
3617 if (history_str
== NULL
)
3620 if (strlen(history_str
) > HIS_MAX_RECORD_LEN
)
3623 if (hdl
->libzfs_log_str
!= NULL
)
3624 free(hdl
->libzfs_log_str
);
3626 if ((hdl
->libzfs_log_str
= strdup(history_str
)) == NULL
)
3627 return (no_memory(hdl
));
3633 * Perform ioctl to get some command history of a pool.
3635 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3636 * logical offset of the history buffer to start reading from.
3638 * Upon return, 'off' is the next logical offset to read from and
3639 * 'len' is the actual amount of bytes read into 'buf'.
3642 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3644 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3645 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3647 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3649 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3650 zc
.zc_history_len
= *len
;
3651 zc
.zc_history_offset
= *off
;
3653 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3656 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3657 dgettext(TEXT_DOMAIN
,
3658 "cannot show history for pool '%s'"),
3661 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3662 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3663 "'%s'"), zhp
->zpool_name
));
3665 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3666 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3667 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3669 return (zpool_standard_error_fmt(hdl
, errno
,
3670 dgettext(TEXT_DOMAIN
,
3671 "cannot get history for '%s'"), zhp
->zpool_name
));
3675 *len
= zc
.zc_history_len
;
3676 *off
= zc
.zc_history_offset
;
3682 * Process the buffer of nvlists, unpacking and storing each nvlist record
3683 * into 'records'. 'leftover' is set to the number of bytes that weren't
3684 * processed as there wasn't a complete record.
3687 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3688 nvlist_t
***records
, uint_t
*numrecords
)
3694 while (bytes_read
> sizeof (reclen
)) {
3696 /* get length of packed record (stored as little endian) */
3697 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3698 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3700 if (bytes_read
< sizeof (reclen
) + reclen
)
3704 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3706 bytes_read
-= sizeof (reclen
) + reclen
;
3707 buf
+= sizeof (reclen
) + reclen
;
3709 /* add record to nvlist array */
3711 if (ISP2(*numrecords
+ 1)) {
3712 *records
= realloc(*records
,
3713 *numrecords
* 2 * sizeof (nvlist_t
*));
3715 (*records
)[*numrecords
- 1] = nv
;
3718 *leftover
= bytes_read
;
3722 #define HIS_BUF_LEN (128*1024)
3725 * Retrieve the command history of a pool.
3728 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3730 char buf
[HIS_BUF_LEN
];
3732 nvlist_t
**records
= NULL
;
3733 uint_t numrecords
= 0;
3737 uint64_t bytes_read
= sizeof (buf
);
3740 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3743 /* if nothing else was read in, we're at EOF, just return */
3747 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3748 &leftover
, &records
, &numrecords
)) != 0)
3756 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3757 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3758 records
, numrecords
) == 0);
3760 for (i
= 0; i
< numrecords
; i
++)
3761 nvlist_free(records
[i
]);
3768 * Retrieve the next event. If there is a new event available 'nvp' will
3769 * contain a newly allocated nvlist and 'dropped' will be set to the number
3770 * of missed events since the last call to this function. When 'nvp' is
3771 * set to NULL it indicates no new events are available. In either case
3772 * the function returns 0 and it is up to the caller to free 'nvp'. In
3773 * the case of a fatal error the function will return a non-zero value.
3774 * When the function is called in blocking mode it will not return until
3775 * a new event is available.
3778 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3779 int *dropped
, int block
, int cleanup_fd
)
3781 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3786 zc
.zc_cleanup_fd
= cleanup_fd
;
3789 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3791 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3795 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3798 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3799 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3802 /* Blocking error case should not occur */
3804 error
= zpool_standard_error_fmt(hdl
, errno
,
3805 dgettext(TEXT_DOMAIN
, "cannot get event"));
3809 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3810 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3811 dgettext(TEXT_DOMAIN
, "cannot get event"));
3817 error
= zpool_standard_error_fmt(hdl
, errno
,
3818 dgettext(TEXT_DOMAIN
, "cannot get event"));
3823 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3827 *dropped
= (int)zc
.zc_cookie
;
3829 zcmd_free_nvlists(&zc
);
3838 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3840 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3843 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3844 "cannot clear events"));
3846 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3847 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3850 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3856 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3857 char *pathname
, size_t len
)
3859 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
3860 boolean_t mounted
= B_FALSE
;
3861 char *mntpnt
= NULL
;
3862 char dsname
[MAXNAMELEN
];
3865 /* special case for the MOS */
3866 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", (longlong_t
)obj
);
3870 /* get the dataset's name */
3871 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3873 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
3874 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
3875 /* just write out a path of two object numbers */
3876 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
3877 (longlong_t
)dsobj
, (longlong_t
)obj
);
3880 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
3882 /* find out if the dataset is mounted */
3883 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
3885 /* get the corrupted object's path */
3886 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
3888 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
3891 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
3894 (void) snprintf(pathname
, len
, "%s:%s",
3895 dsname
, zc
.zc_value
);
3898 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, (longlong_t
)obj
);
3904 * Read the EFI label from the config, if a label does not exist then
3905 * pass back the error to the caller. If the caller has passed a non-NULL
3906 * diskaddr argument then we set it to the starting address of the EFI
3910 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
3914 char diskname
[MAXPATHLEN
];
3917 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
3920 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
3921 strrchr(path
, '/'));
3922 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
3923 struct dk_gpt
*vtoc
;
3925 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
3927 *sb
= vtoc
->efi_parts
[0].p_start
;
3936 * determine where a partition starts on a disk in the current
3940 find_start_block(nvlist_t
*config
)
3944 diskaddr_t sb
= MAXOFFSET_T
;
3947 if (nvlist_lookup_nvlist_array(config
,
3948 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
3949 if (nvlist_lookup_uint64(config
,
3950 ZPOOL_CONFIG_WHOLE_DISK
,
3951 &wholedisk
) != 0 || !wholedisk
) {
3952 return (MAXOFFSET_T
);
3954 if (read_efi_label(config
, &sb
) < 0)
3959 for (c
= 0; c
< children
; c
++) {
3960 sb
= find_start_block(child
[c
]);
3961 if (sb
!= MAXOFFSET_T
) {
3965 return (MAXOFFSET_T
);
3969 zpool_label_disk_wait(char *path
, int timeout
)
3971 struct stat64 statbuf
;
3975 * Wait timeout miliseconds for a newly created device to be available
3976 * from the given path. There is a small window when a /dev/ device
3977 * will exist and the udev link will not, so we must wait for the
3978 * symlink. Depending on the udev rules this may take a few seconds.
3980 for (i
= 0; i
< timeout
; i
++) {
3984 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0))
3992 zpool_label_disk_check(char *path
)
3994 struct dk_gpt
*vtoc
;
3997 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4000 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4005 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4017 * Label an individual disk. The name provided is the short name,
4018 * stripped of any leading /dev path.
4021 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4023 char path
[MAXPATHLEN
];
4024 struct dk_gpt
*vtoc
;
4026 size_t resv
= EFI_MIN_RESV_SIZE
;
4027 uint64_t slice_size
;
4028 diskaddr_t start_block
;
4031 /* prepare an error message just in case */
4032 (void) snprintf(errbuf
, sizeof (errbuf
),
4033 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4038 #if defined(__sun__) || defined(__sun)
4039 if (zpool_is_bootable(zhp
)) {
4040 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4041 "EFI labeled devices are not supported on root "
4043 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
4047 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4048 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4050 if (zhp
->zpool_start_block
== 0)
4051 start_block
= find_start_block(nvroot
);
4053 start_block
= zhp
->zpool_start_block
;
4054 zhp
->zpool_start_block
= start_block
;
4057 start_block
= NEW_START_BLOCK
;
4060 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4062 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
4064 * This shouldn't happen. We've long since verified that this
4065 * is a valid device.
4067 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4068 "label '%s': unable to open device: %d"), path
, errno
);
4069 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4072 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4074 * The only way this can fail is if we run out of memory, or we
4075 * were unable to read the disk's capacity
4077 if (errno
== ENOMEM
)
4078 (void) no_memory(hdl
);
4081 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4082 "label '%s': unable to read disk capacity"), path
);
4084 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4087 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4088 slice_size
-= EFI_MIN_RESV_SIZE
;
4089 if (start_block
== MAXOFFSET_T
)
4090 start_block
= NEW_START_BLOCK
;
4091 slice_size
-= start_block
;
4092 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4094 vtoc
->efi_parts
[0].p_start
= start_block
;
4095 vtoc
->efi_parts
[0].p_size
= slice_size
;
4098 * Why we use V_USR: V_BACKUP confuses users, and is considered
4099 * disposable by some EFI utilities (since EFI doesn't have a backup
4100 * slice). V_UNASSIGNED is supposed to be used only for zero size
4101 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4102 * etc. were all pretty specific. V_USR is as close to reality as we
4103 * can get, in the absence of V_OTHER.
4105 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4106 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
4108 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4109 vtoc
->efi_parts
[8].p_size
= resv
;
4110 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4112 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4114 * Some block drivers (like pcata) may not support EFI
4115 * GPT labels. Print out a helpful error message dir-
4116 * ecting the user to manually label the disk and give
4122 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4123 "parted(8) and then provide a specific slice: %d"), rval
);
4124 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4130 /* Wait for the first expected partition to appear. */
4132 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4133 (void) zfs_append_partition(path
, MAXPATHLEN
);
4135 rval
= zpool_label_disk_wait(path
, 3000);
4137 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4138 "detect device partitions on '%s': %d"), path
, rval
);
4139 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4142 /* We can't be to paranoid. Read the label back and verify it. */
4143 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4144 rval
= zpool_label_disk_check(path
);
4146 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4147 "EFI label on '%s' is damaged. Ensure\nthis device "
4148 "is not in in use, and is functioning properly: %d"),
4150 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));