4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
53 typedef struct prop_flags
{
54 int create
:1; /* Validate property on creation */
55 int import
:1; /* Validate property on import */
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
67 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
185 case VDEV_STATE_CLOSED
:
186 case VDEV_STATE_OFFLINE
:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED
:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN
:
191 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
192 return (gettext("FAULTED"));
193 else if (aux
== VDEV_AUX_SPLIT_POOL
)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED
:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED
:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY
:
202 return (gettext("ONLINE"));
205 return (gettext("UNKNOWN"));
209 * Map POOL STATE to printed strings.
212 zpool_pool_state_to_name(pool_state_t state
)
217 case POOL_STATE_ACTIVE
:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED
:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED
:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE
:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE
:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED
:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL
:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE
:
232 return (gettext("POTENTIALLY_ACTIVE"));
235 return (gettext("UNKNOWN"));
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
243 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
244 zprop_source_t
*srctype
)
248 zprop_source_t src
= ZPROP_SRC_NONE
;
253 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
255 case ZPOOL_PROP_NAME
:
256 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
259 case ZPOOL_PROP_HEALTH
:
260 (void) strlcpy(buf
, "FAULTED", len
);
263 case ZPOOL_PROP_GUID
:
264 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
265 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
268 case ZPOOL_PROP_ALTROOT
:
269 case ZPOOL_PROP_CACHEFILE
:
270 case ZPOOL_PROP_COMMENT
:
271 if (zhp
->zpool_props
!= NULL
||
272 zpool_get_all_props(zhp
) == 0) {
274 zpool_get_prop_string(zhp
, prop
, &src
),
282 (void) strlcpy(buf
, "-", len
);
291 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
292 prop
!= ZPOOL_PROP_NAME
)
295 switch (zpool_prop_get_type(prop
)) {
296 case PROP_TYPE_STRING
:
297 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
301 case PROP_TYPE_NUMBER
:
302 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
305 case ZPOOL_PROP_SIZE
:
306 case ZPOOL_PROP_ALLOCATED
:
307 case ZPOOL_PROP_FREE
:
308 case ZPOOL_PROP_FREEING
:
309 case ZPOOL_PROP_EXPANDSZ
:
310 case ZPOOL_PROP_ASHIFT
:
311 (void) zfs_nicenum(intval
, buf
, len
);
314 case ZPOOL_PROP_CAPACITY
:
315 (void) snprintf(buf
, len
, "%llu%%",
316 (u_longlong_t
)intval
);
319 case ZPOOL_PROP_DEDUPRATIO
:
320 (void) snprintf(buf
, len
, "%llu.%02llux",
321 (u_longlong_t
)(intval
/ 100),
322 (u_longlong_t
)(intval
% 100));
325 case ZPOOL_PROP_HEALTH
:
326 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
327 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
328 verify(nvlist_lookup_uint64_array(nvroot
,
329 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
332 (void) strlcpy(buf
, zpool_state_to_name(intval
,
335 case ZPOOL_PROP_VERSION
:
336 if (intval
>= SPA_VERSION_FEATURES
) {
337 (void) snprintf(buf
, len
, "-");
342 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
346 case PROP_TYPE_INDEX
:
347 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
348 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
351 (void) strlcpy(buf
, strval
, len
);
365 * Check if the bootfs name has the same pool name as it is set to.
366 * Assuming bootfs is a valid dataset name.
369 bootfs_name_valid(const char *pool
, char *bootfs
)
371 int len
= strlen(pool
);
373 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
376 if (strncmp(pool
, bootfs
, len
) == 0 &&
377 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
383 #if defined(__sun__) || defined(__sun)
385 * Inspect the configuration to determine if any of the devices contain
389 pool_uses_efi(nvlist_t
*config
)
394 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
395 &child
, &children
) != 0)
396 return (read_efi_label(config
, NULL
) >= 0);
398 for (c
= 0; c
< children
; c
++) {
399 if (pool_uses_efi(child
[c
]))
407 zpool_is_bootable(zpool_handle_t
*zhp
)
409 char bootfs
[ZPOOL_MAXNAMELEN
];
411 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
412 sizeof (bootfs
), NULL
) == 0 && strncmp(bootfs
, "-",
413 sizeof (bootfs
)) != 0);
418 * Given an nvlist of zpool properties to be set, validate that they are
419 * correct, and parse any numeric properties (index, boolean, etc) if they are
420 * specified as strings.
423 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
424 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
432 struct stat64 statbuf
;
436 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
437 (void) no_memory(hdl
);
442 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
443 const char *propname
= nvpair_name(elem
);
445 prop
= zpool_name_to_prop(propname
);
446 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
448 zfeature_info_t
*feature
;
449 char *fname
= strchr(propname
, '@') + 1;
451 err
= zfeature_lookup_name(fname
, &feature
);
453 ASSERT3U(err
, ==, ENOENT
);
454 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
455 "invalid feature '%s'"), fname
);
456 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
460 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
461 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
462 "'%s' must be a string"), propname
);
463 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
467 (void) nvpair_value_string(elem
, &strval
);
468 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
469 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
470 "property '%s' can only be set to "
471 "'enabled'"), propname
);
472 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
476 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
477 (void) no_memory(hdl
);
484 * Make sure this property is valid and applies to this type.
486 if (prop
== ZPROP_INVAL
) {
487 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
488 "invalid property '%s'"), propname
);
489 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
493 if (zpool_prop_readonly(prop
)) {
494 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
495 "is readonly"), propname
);
496 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
500 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
501 &strval
, &intval
, errbuf
) != 0)
505 * Perform additional checking for specific properties.
510 case ZPOOL_PROP_VERSION
:
511 if (intval
< version
||
512 !SPA_VERSION_IS_SUPPORTED(intval
)) {
513 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
514 "property '%s' number %d is invalid."),
516 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
521 case ZPOOL_PROP_ASHIFT
:
523 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
524 "property '%s' can only be set at "
525 "creation time"), propname
);
526 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
530 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
531 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
532 "property '%s' number %d is invalid."),
534 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
539 case ZPOOL_PROP_BOOTFS
:
540 if (flags
.create
|| flags
.import
) {
541 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
542 "property '%s' cannot be set at creation "
543 "or import time"), propname
);
544 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
548 if (version
< SPA_VERSION_BOOTFS
) {
549 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
550 "pool must be upgraded to support "
551 "'%s' property"), propname
);
552 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
557 * bootfs property value has to be a dataset name and
558 * the dataset has to be in the same pool as it sets to.
560 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
562 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
563 "is an invalid name"), strval
);
564 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
568 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
569 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
570 "could not open pool '%s'"), poolname
);
571 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
574 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
575 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
577 #if defined(__sun__) || defined(__sun)
579 * bootfs property cannot be set on a disk which has
582 if (pool_uses_efi(nvroot
)) {
583 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
584 "property '%s' not supported on "
585 "EFI labeled devices"), propname
);
586 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
594 case ZPOOL_PROP_ALTROOT
:
595 if (!flags
.create
&& !flags
.import
) {
596 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
597 "property '%s' can only be set during pool "
598 "creation or import"), propname
);
599 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
603 if (strval
[0] != '/') {
604 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
605 "bad alternate root '%s'"), strval
);
606 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
611 case ZPOOL_PROP_CACHEFILE
:
612 if (strval
[0] == '\0')
615 if (strcmp(strval
, "none") == 0)
618 if (strval
[0] != '/') {
619 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
620 "property '%s' must be empty, an "
621 "absolute path, or 'none'"), propname
);
622 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
626 slash
= strrchr(strval
, '/');
628 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
629 strcmp(slash
, "/..") == 0) {
630 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
631 "'%s' is not a valid file"), strval
);
632 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
638 if (strval
[0] != '\0' &&
639 (stat64(strval
, &statbuf
) != 0 ||
640 !S_ISDIR(statbuf
.st_mode
))) {
641 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
642 "'%s' is not a valid directory"),
644 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
651 case ZPOOL_PROP_COMMENT
:
652 for (check
= strval
; *check
!= '\0'; check
++) {
653 if (!isprint(*check
)) {
655 dgettext(TEXT_DOMAIN
,
656 "comment may only have printable "
658 (void) zfs_error(hdl
, EZFS_BADPROP
,
663 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
664 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
665 "comment must not exceed %d characters"),
667 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
671 case ZPOOL_PROP_READONLY
:
673 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
674 "property '%s' can only be set at "
675 "import time"), propname
);
676 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
685 nvlist_free(retprops
);
690 * Set zpool property : propname=propval.
693 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
695 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
698 nvlist_t
*nvl
= NULL
;
701 prop_flags_t flags
= { 0 };
703 (void) snprintf(errbuf
, sizeof (errbuf
),
704 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
707 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
708 return (no_memory(zhp
->zpool_hdl
));
710 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
712 return (no_memory(zhp
->zpool_hdl
));
715 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
716 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
717 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
726 * Execute the corresponding ioctl() to set this property.
728 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
730 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
735 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
737 zcmd_free_nvlists(&zc
);
741 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
743 (void) zpool_props_refresh(zhp
);
749 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
751 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
753 char buf
[ZFS_MAXPROPLEN
];
754 nvlist_t
*features
= NULL
;
757 boolean_t firstexpand
= (NULL
== *plp
);
760 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
764 while (*last
!= NULL
)
765 last
= &(*last
)->pl_next
;
768 features
= zpool_get_features(zhp
);
770 if ((*plp
)->pl_all
&& firstexpand
) {
771 for (i
= 0; i
< SPA_FEATURES
; i
++) {
772 zprop_list_t
*entry
= zfs_alloc(hdl
,
773 sizeof (zprop_list_t
));
774 entry
->pl_prop
= ZPROP_INVAL
;
775 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
776 spa_feature_table
[i
].fi_uname
);
777 entry
->pl_width
= strlen(entry
->pl_user_prop
);
778 entry
->pl_all
= B_TRUE
;
781 last
= &entry
->pl_next
;
785 /* add any unsupported features */
786 for (nvp
= nvlist_next_nvpair(features
, NULL
);
787 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
792 if (zfeature_is_supported(nvpair_name(nvp
)))
795 propname
= zfs_asprintf(hdl
, "unsupported@%s",
799 * Before adding the property to the list make sure that no
800 * other pool already added the same property.
804 while (entry
!= NULL
) {
805 if (entry
->pl_user_prop
!= NULL
&&
806 strcmp(propname
, entry
->pl_user_prop
) == 0) {
810 entry
= entry
->pl_next
;
817 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
818 entry
->pl_prop
= ZPROP_INVAL
;
819 entry
->pl_user_prop
= propname
;
820 entry
->pl_width
= strlen(entry
->pl_user_prop
);
821 entry
->pl_all
= B_TRUE
;
824 last
= &entry
->pl_next
;
827 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
832 if (entry
->pl_prop
!= ZPROP_INVAL
&&
833 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
835 if (strlen(buf
) > entry
->pl_width
)
836 entry
->pl_width
= strlen(buf
);
844 * Get the state for the given feature on the given ZFS pool.
847 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
851 boolean_t found
= B_FALSE
;
852 nvlist_t
*features
= zpool_get_features(zhp
);
854 const char *feature
= strchr(propname
, '@') + 1;
856 supported
= zpool_prop_feature(propname
);
857 ASSERT(supported
|| zpool_prop_unsupported(propname
));
860 * Convert from feature name to feature guid. This conversion is
861 * unecessary for unsupported@... properties because they already
868 ret
= zfeature_lookup_name(feature
, &fi
);
870 (void) strlcpy(buf
, "-", len
);
873 feature
= fi
->fi_guid
;
876 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
881 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
884 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
886 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
891 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
893 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
896 (void) strlcpy(buf
, "-", len
);
905 * Don't start the slice at the default block of 34; many storage
906 * devices will use a stripe width of 128k, other vendors prefer a 1m
907 * alignment. It is best to play it safe and ensure a 1m alignment
908 * given 512B blocks. When the block size is larger by a power of 2
909 * we will still be 1m aligned. Some devices are sensitive to the
910 * partition ending alignment as well.
912 #define NEW_START_BLOCK 2048
913 #define PARTITION_END_ALIGNMENT 2048
916 * Validate the given pool name, optionally putting an extended error message in
920 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
926 ret
= pool_namecheck(pool
, &why
, &what
);
929 * The rules for reserved pool names were extended at a later point.
930 * But we need to support users with existing pools that may now be
931 * invalid. So we only check for this expanded set of names during a
932 * create (or import), and only in userland.
934 if (ret
== 0 && !isopen
&&
935 (strncmp(pool
, "mirror", 6) == 0 ||
936 strncmp(pool
, "raidz", 5) == 0 ||
937 strncmp(pool
, "spare", 5) == 0 ||
938 strcmp(pool
, "log") == 0)) {
941 dgettext(TEXT_DOMAIN
, "name is reserved"));
949 case NAME_ERR_TOOLONG
:
951 dgettext(TEXT_DOMAIN
, "name is too long"));
954 case NAME_ERR_INVALCHAR
:
956 dgettext(TEXT_DOMAIN
, "invalid character "
957 "'%c' in pool name"), what
);
960 case NAME_ERR_NOLETTER
:
961 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
962 "name must begin with a letter"));
965 case NAME_ERR_RESERVED
:
966 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
967 "name is reserved"));
970 case NAME_ERR_DISKLIKE
:
971 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
972 "pool name is reserved"));
975 case NAME_ERR_LEADING_SLASH
:
976 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
977 "leading slash in name"));
980 case NAME_ERR_EMPTY_COMPONENT
:
981 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
982 "empty component in name"));
985 case NAME_ERR_TRAILING_SLASH
:
986 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
987 "trailing slash in name"));
990 case NAME_ERR_MULTIPLE_AT
:
991 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
992 "multiple '@' delimiters in name"));
995 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
996 "permission set is missing '@'"));
1007 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1011 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1013 zpool_handle_t
*zhp
;
1017 * Make sure the pool name is valid.
1019 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1020 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1021 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1026 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1029 zhp
->zpool_hdl
= hdl
;
1030 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1032 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1038 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1039 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1040 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1049 * Like the above, but silent on error. Used when iterating over pools (because
1050 * the configuration cache may be out of date).
1053 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1055 zpool_handle_t
*zhp
;
1058 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1061 zhp
->zpool_hdl
= hdl
;
1062 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1064 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1080 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1084 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1086 zpool_handle_t
*zhp
;
1088 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1091 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1092 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1093 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1102 * Close the handle. Simply frees the memory associated with the handle.
1105 zpool_close(zpool_handle_t
*zhp
)
1107 if (zhp
->zpool_config
)
1108 nvlist_free(zhp
->zpool_config
);
1109 if (zhp
->zpool_old_config
)
1110 nvlist_free(zhp
->zpool_old_config
);
1111 if (zhp
->zpool_props
)
1112 nvlist_free(zhp
->zpool_props
);
1117 * Return the name of the pool.
1120 zpool_get_name(zpool_handle_t
*zhp
)
1122 return (zhp
->zpool_name
);
1127 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1130 zpool_get_state(zpool_handle_t
*zhp
)
1132 return (zhp
->zpool_state
);
1136 * Create the named pool, using the provided vdev list. It is assumed
1137 * that the consumer has already validated the contents of the nvlist, so we
1138 * don't have to worry about error semantics.
1141 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1142 nvlist_t
*props
, nvlist_t
*fsprops
)
1144 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1145 nvlist_t
*zc_fsprops
= NULL
;
1146 nvlist_t
*zc_props
= NULL
;
1151 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1152 "cannot create '%s'"), pool
);
1154 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1155 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1157 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1161 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1163 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1164 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1173 zoned
= ((nvlist_lookup_string(fsprops
,
1174 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1175 strcmp(zonestr
, "on") == 0);
1177 if ((zc_fsprops
= zfs_valid_proplist(hdl
,
1178 ZFS_TYPE_FILESYSTEM
, fsprops
, zoned
, NULL
, msg
)) == NULL
) {
1182 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1185 if (nvlist_add_nvlist(zc_props
,
1186 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1191 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1194 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1196 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1198 zcmd_free_nvlists(&zc
);
1199 nvlist_free(zc_props
);
1200 nvlist_free(zc_fsprops
);
1205 * This can happen if the user has specified the same
1206 * device multiple times. We can't reliably detect this
1207 * until we try to add it and see we already have a
1208 * label. This can also happen under if the device is
1209 * part of an active md or lvm device.
1211 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1212 "one or more vdevs refer to the same device, or one of\n"
1213 "the devices is part of an active md or lvm device"));
1214 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1218 * This occurs when one of the devices is below
1219 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1220 * device was the problem device since there's no
1221 * reliable way to determine device size from userland.
1226 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1228 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1229 "one or more devices is less than the "
1230 "minimum size (%s)"), buf
);
1232 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1235 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1236 "one or more devices is out of space"));
1237 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1240 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1241 "cache device must be a disk or disk slice"));
1242 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1245 return (zpool_standard_error(hdl
, errno
, msg
));
1250 * If this is an alternate root pool, then we automatically set the
1251 * mountpoint of the root dataset to be '/'.
1253 if (nvlist_lookup_string(props
, zpool_prop_to_name(ZPOOL_PROP_ALTROOT
),
1257 verify((zhp
= zfs_open(hdl
, pool
, ZFS_TYPE_DATASET
)) != NULL
);
1258 verify(zfs_prop_set(zhp
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
),
1265 zcmd_free_nvlists(&zc
);
1266 nvlist_free(zc_props
);
1267 nvlist_free(zc_fsprops
);
1272 * Destroy the given pool. It is up to the caller to ensure that there are no
1273 * datasets left in the pool.
1276 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1278 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1279 zfs_handle_t
*zfp
= NULL
;
1280 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1283 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1284 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1287 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1288 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1290 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1291 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1292 "cannot destroy '%s'"), zhp
->zpool_name
);
1294 if (errno
== EROFS
) {
1295 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1296 "one or more devices is read only"));
1297 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1299 (void) zpool_standard_error(hdl
, errno
, msg
);
1308 remove_mountpoint(zfp
);
1316 * Add the given vdevs to the pool. The caller must have already performed the
1317 * necessary verification to ensure that the vdev specification is well-formed.
1320 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1322 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1324 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1326 nvlist_t
**spares
, **l2cache
;
1327 uint_t nspares
, nl2cache
;
1329 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1330 "cannot add to '%s'"), zhp
->zpool_name
);
1332 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1333 SPA_VERSION_SPARES
&&
1334 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1335 &spares
, &nspares
) == 0) {
1336 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1337 "upgraded to add hot spares"));
1338 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1341 #if defined(__sun__) || defined(__sun)
1342 if (zpool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1343 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1346 for (s
= 0; s
< nspares
; s
++) {
1349 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1350 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1351 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1352 "device '%s' contains an EFI label and "
1353 "cannot be used on root pools."),
1354 zpool_vdev_name(hdl
, NULL
, spares
[s
],
1356 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1362 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1363 SPA_VERSION_L2CACHE
&&
1364 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1365 &l2cache
, &nl2cache
) == 0) {
1366 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1367 "upgraded to add cache devices"));
1368 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1371 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1373 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1375 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1379 * This can happen if the user has specified the same
1380 * device multiple times. We can't reliably detect this
1381 * until we try to add it and see we already have a
1384 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1385 "one or more vdevs refer to the same device"));
1386 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1391 * This occurrs when one of the devices is below
1392 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1393 * device was the problem device since there's no
1394 * reliable way to determine device size from userland.
1399 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1401 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1402 "device is less than the minimum "
1405 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1409 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1410 "pool must be upgraded to add these vdevs"));
1411 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1415 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1416 "root pool can not have multiple vdevs"
1417 " or separate logs"));
1418 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
1422 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1423 "cache device must be a disk or disk slice"));
1424 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1428 (void) zpool_standard_error(hdl
, errno
, msg
);
1436 zcmd_free_nvlists(&zc
);
1442 * Exports the pool from the system. The caller must ensure that there are no
1443 * mounted datasets in the pool.
1446 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1447 const char *log_str
)
1449 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1452 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1453 "cannot export '%s'"), zhp
->zpool_name
);
1455 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1456 zc
.zc_cookie
= force
;
1457 zc
.zc_guid
= hardforce
;
1458 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1460 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1463 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1464 "use '-f' to override the following errors:\n"
1465 "'%s' has an active shared spare which could be"
1466 " used by other pools once '%s' is exported."),
1467 zhp
->zpool_name
, zhp
->zpool_name
);
1468 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1471 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1480 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1482 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1486 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1488 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1492 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1495 nvlist_t
*nv
= NULL
;
1501 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1504 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1505 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1509 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1511 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1513 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1514 strftime(timestr
, 128, "%c", &t
) != 0) {
1516 (void) printf(dgettext(TEXT_DOMAIN
,
1517 "Would be able to return %s "
1518 "to its state as of %s.\n"),
1521 (void) printf(dgettext(TEXT_DOMAIN
,
1522 "Pool %s returned to its state as of %s.\n"),
1526 (void) printf(dgettext(TEXT_DOMAIN
,
1527 "%s approximately %lld "),
1528 dryrun
? "Would discard" : "Discarded",
1529 ((longlong_t
)loss
+ 30) / 60);
1530 (void) printf(dgettext(TEXT_DOMAIN
,
1531 "minutes of transactions.\n"));
1532 } else if (loss
> 0) {
1533 (void) printf(dgettext(TEXT_DOMAIN
,
1534 "%s approximately %lld "),
1535 dryrun
? "Would discard" : "Discarded",
1537 (void) printf(dgettext(TEXT_DOMAIN
,
1538 "seconds of transactions.\n"));
1544 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1547 nvlist_t
*nv
= NULL
;
1549 uint64_t edata
= UINT64_MAX
;
1554 if (!hdl
->libzfs_printerr
)
1558 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1560 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1562 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1563 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1564 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1565 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1568 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1569 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1572 (void) printf(dgettext(TEXT_DOMAIN
,
1573 "Recovery is possible, but will result in some data loss.\n"));
1575 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1576 strftime(timestr
, 128, "%c", &t
) != 0) {
1577 (void) printf(dgettext(TEXT_DOMAIN
,
1578 "\tReturning the pool to its state as of %s\n"
1579 "\tshould correct the problem. "),
1582 (void) printf(dgettext(TEXT_DOMAIN
,
1583 "\tReverting the pool to an earlier state "
1584 "should correct the problem.\n\t"));
1588 (void) printf(dgettext(TEXT_DOMAIN
,
1589 "Approximately %lld minutes of data\n"
1590 "\tmust be discarded, irreversibly. "),
1591 ((longlong_t
)loss
+ 30) / 60);
1592 } else if (loss
> 0) {
1593 (void) printf(dgettext(TEXT_DOMAIN
,
1594 "Approximately %lld seconds of data\n"
1595 "\tmust be discarded, irreversibly. "),
1598 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1600 (void) printf(dgettext(TEXT_DOMAIN
,
1601 "After rewind, at least\n"
1602 "\tone persistent user-data error will remain. "));
1604 (void) printf(dgettext(TEXT_DOMAIN
,
1605 "After rewind, several\n"
1606 "\tpersistent user-data errors will remain. "));
1609 (void) printf(dgettext(TEXT_DOMAIN
,
1610 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1611 reason
>= 0 ? "clear" : "import", name
);
1613 (void) printf(dgettext(TEXT_DOMAIN
,
1614 "A scrub of the pool\n"
1615 "\tis strongly recommended after recovery.\n"));
1619 (void) printf(dgettext(TEXT_DOMAIN
,
1620 "Destroy and re-create the pool from\n\ta backup source.\n"));
1624 * zpool_import() is a contracted interface. Should be kept the same
1627 * Applications should use zpool_import_props() to import a pool with
1628 * new properties value to be set.
1631 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1634 nvlist_t
*props
= NULL
;
1637 if (altroot
!= NULL
) {
1638 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1639 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1640 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1644 if (nvlist_add_string(props
,
1645 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1646 nvlist_add_string(props
,
1647 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1649 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1650 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1655 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1663 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1669 uint64_t is_log
= 0;
1671 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1675 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1676 is_log
? " [log]" : "");
1678 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1679 &child
, &children
) != 0)
1682 for (c
= 0; c
< children
; c
++) {
1683 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], B_TRUE
);
1684 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1690 zpool_print_unsup_feat(nvlist_t
*config
)
1692 nvlist_t
*nvinfo
, *unsup_feat
;
1695 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1697 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1700 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1701 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1704 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1705 verify(nvpair_value_string(nvp
, &desc
) == 0);
1707 if (strlen(desc
) > 0)
1708 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1710 (void) printf("\t%s\n", nvpair_name(nvp
));
1715 * Import the given pool using the known configuration and a list of
1716 * properties to be set. The configuration should have come from
1717 * zpool_find_import(). The 'newname' parameters control whether the pool
1718 * is imported with a different name.
1721 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1722 nvlist_t
*props
, int flags
)
1724 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1725 zpool_rewind_policy_t policy
;
1726 nvlist_t
*nv
= NULL
;
1727 nvlist_t
*nvinfo
= NULL
;
1728 nvlist_t
*missing
= NULL
;
1735 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1738 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1739 "cannot import pool '%s'"), origname
);
1741 if (newname
!= NULL
) {
1742 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1743 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1744 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1746 thename
= (char *)newname
;
1753 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1755 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1758 if ((props
= zpool_valid_proplist(hdl
, origname
,
1759 props
, version
, flags
, errbuf
)) == NULL
) {
1761 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1767 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1769 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1772 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1776 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1781 zc
.zc_cookie
= flags
;
1782 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1784 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1785 zcmd_free_nvlists(&zc
);
1792 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1793 zpool_get_rewind_policy(config
, &policy
);
1799 * Dry-run failed, but we print out what success
1800 * looks like if we found a best txg
1802 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1803 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1809 if (newname
== NULL
)
1810 (void) snprintf(desc
, sizeof (desc
),
1811 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1814 (void) snprintf(desc
, sizeof (desc
),
1815 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1820 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1821 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1822 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1823 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1824 "pool uses the following feature(s) not "
1825 "supported by this system:\n"));
1826 zpool_print_unsup_feat(nv
);
1827 if (nvlist_exists(nvinfo
,
1828 ZPOOL_CONFIG_CAN_RDONLY
)) {
1829 (void) printf(dgettext(TEXT_DOMAIN
,
1830 "All unsupported features are only "
1831 "required for writing to the pool."
1832 "\nThe pool can be imported using "
1833 "'-o readonly=on'.\n"));
1837 * Unsupported version.
1839 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1843 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1847 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1848 "one or more devices is read only"));
1849 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1853 if (nv
&& nvlist_lookup_nvlist(nv
,
1854 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1855 nvlist_lookup_nvlist(nvinfo
,
1856 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1857 (void) printf(dgettext(TEXT_DOMAIN
,
1858 "The devices below are missing, use "
1859 "'-m' to import the pool anyway:\n"));
1860 print_vdev_tree(hdl
, NULL
, missing
, 2);
1861 (void) printf("\n");
1863 (void) zpool_standard_error(hdl
, error
, desc
);
1867 (void) zpool_standard_error(hdl
, error
, desc
);
1871 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1872 "one or more devices are already in use\n"));
1873 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1877 (void) zpool_standard_error(hdl
, error
, desc
);
1878 zpool_explain_recover(hdl
,
1879 newname
? origname
: thename
, -error
, nv
);
1886 zpool_handle_t
*zhp
;
1889 * This should never fail, but play it safe anyway.
1891 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1893 else if (zhp
!= NULL
)
1895 if (policy
.zrp_request
&
1896 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1897 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1898 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1904 zcmd_free_nvlists(&zc
);
1914 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1916 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1918 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1920 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1921 zc
.zc_cookie
= func
;
1923 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1924 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1927 if (func
== POOL_SCAN_SCRUB
) {
1928 (void) snprintf(msg
, sizeof (msg
),
1929 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1930 } else if (func
== POOL_SCAN_NONE
) {
1931 (void) snprintf(msg
, sizeof (msg
),
1932 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1935 assert(!"unexpected result");
1938 if (errno
== EBUSY
) {
1940 pool_scan_stat_t
*ps
= NULL
;
1943 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1944 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1945 (void) nvlist_lookup_uint64_array(nvroot
,
1946 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1947 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1948 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1950 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1951 } else if (errno
== ENOENT
) {
1952 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1954 return (zpool_standard_error(hdl
, errno
, msg
));
1959 * Find a vdev that matches the search criteria specified. We use the
1960 * the nvpair name to determine how we should look for the device.
1961 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1962 * spare; but FALSE if its an INUSE spare.
1965 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1966 boolean_t
*l2cache
, boolean_t
*log
)
1973 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1975 /* Nothing to look for */
1976 if (search
== NULL
|| pair
== NULL
)
1979 /* Obtain the key we will use to search */
1980 srchkey
= nvpair_name(pair
);
1982 switch (nvpair_type(pair
)) {
1983 case DATA_TYPE_UINT64
:
1984 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1985 uint64_t srchval
, theguid
;
1987 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1988 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1990 if (theguid
== srchval
)
1995 case DATA_TYPE_STRING
: {
1996 char *srchval
, *val
;
1998 verify(nvpair_value_string(pair
, &srchval
) == 0);
1999 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
2003 * Search for the requested value. Special cases:
2005 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2006 * "-part1", or "p1". The suffix is hidden from the user,
2007 * but included in the string, so this matches around it.
2008 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2009 * is used to check all possible expanded paths.
2010 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2012 * Otherwise, all other searches are simple string compares.
2014 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2015 uint64_t wholedisk
= 0;
2017 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2019 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2022 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2023 char *type
, *idx
, *end
, *p
;
2024 uint64_t id
, vdev_id
;
2027 * Determine our vdev type, keeping in mind
2028 * that the srchval is composed of a type and
2029 * vdev id pair (i.e. mirror-4).
2031 if ((type
= strdup(srchval
)) == NULL
)
2034 if ((p
= strrchr(type
, '-')) == NULL
) {
2042 * If the types don't match then keep looking.
2044 if (strncmp(val
, type
, strlen(val
)) != 0) {
2049 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2050 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2051 strncmp(type
, VDEV_TYPE_MIRROR
,
2052 strlen(VDEV_TYPE_MIRROR
)) == 0);
2053 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2057 vdev_id
= strtoull(idx
, &end
, 10);
2064 * Now verify that we have the correct vdev id.
2073 if (strcmp(srchval
, val
) == 0)
2082 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2083 &child
, &children
) != 0)
2086 for (c
= 0; c
< children
; c
++) {
2087 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2088 avail_spare
, l2cache
, NULL
)) != NULL
) {
2090 * The 'is_log' value is only set for the toplevel
2091 * vdev, not the leaf vdevs. So we always lookup the
2092 * log device from the root of the vdev tree (where
2093 * 'log' is non-NULL).
2096 nvlist_lookup_uint64(child
[c
],
2097 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2105 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2106 &child
, &children
) == 0) {
2107 for (c
= 0; c
< children
; c
++) {
2108 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2109 avail_spare
, l2cache
, NULL
)) != NULL
) {
2110 *avail_spare
= B_TRUE
;
2116 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2117 &child
, &children
) == 0) {
2118 for (c
= 0; c
< children
; c
++) {
2119 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2120 avail_spare
, l2cache
, NULL
)) != NULL
) {
2131 * Given a physical path (minus the "/devices" prefix), find the
2135 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2136 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2138 nvlist_t
*search
, *nvroot
, *ret
;
2140 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2141 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2143 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2146 *avail_spare
= B_FALSE
;
2150 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2151 nvlist_free(search
);
2157 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2160 zpool_vdev_is_interior(const char *name
)
2162 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2163 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2169 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2170 boolean_t
*l2cache
, boolean_t
*log
)
2173 nvlist_t
*nvroot
, *search
, *ret
;
2176 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2178 guid
= strtoull(path
, &end
, 10);
2179 if (guid
!= 0 && *end
== '\0') {
2180 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2181 } else if (zpool_vdev_is_interior(path
)) {
2182 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2184 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2187 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2190 *avail_spare
= B_FALSE
;
2194 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2195 nvlist_free(search
);
2201 vdev_online(nvlist_t
*nv
)
2205 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2206 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2207 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2214 * Helper function for zpool_get_physpaths().
2217 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2218 size_t *bytes_written
)
2220 size_t bytes_left
, pos
, rsz
;
2224 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2226 return (EZFS_NODEVICE
);
2228 pos
= *bytes_written
;
2229 bytes_left
= physpath_size
- pos
;
2230 format
= (pos
== 0) ? "%s" : " %s";
2232 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2233 *bytes_written
+= rsz
;
2235 if (rsz
>= bytes_left
) {
2236 /* if physpath was not copied properly, clear it */
2237 if (bytes_left
!= 0) {
2240 return (EZFS_NOSPC
);
2246 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2247 size_t *rsz
, boolean_t is_spare
)
2252 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2253 return (EZFS_INVALCONFIG
);
2255 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2257 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2258 * For a spare vdev, we only want to boot from the active
2263 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2266 return (EZFS_INVALCONFIG
);
2269 if (vdev_online(nv
)) {
2270 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2271 phypath_size
, rsz
)) != 0)
2274 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2275 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2276 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2281 if (nvlist_lookup_nvlist_array(nv
,
2282 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2283 return (EZFS_INVALCONFIG
);
2285 for (i
= 0; i
< count
; i
++) {
2286 ret
= vdev_get_physpaths(child
[i
], physpath
,
2287 phypath_size
, rsz
, is_spare
);
2288 if (ret
== EZFS_NOSPC
)
2293 return (EZFS_POOL_INVALARG
);
2297 * Get phys_path for a root pool config.
2298 * Return 0 on success; non-zero on failure.
2301 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2304 nvlist_t
*vdev_root
;
2311 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2313 return (EZFS_INVALCONFIG
);
2315 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2316 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2317 &child
, &count
) != 0)
2318 return (EZFS_INVALCONFIG
);
2320 #if defined(__sun__) || defined(__sun)
2322 * root pool can not have EFI labeled disks and can only have
2323 * a single top-level vdev.
2325 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2326 pool_uses_efi(vdev_root
))
2327 return (EZFS_POOL_INVALARG
);
2330 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2333 /* No online devices */
2335 return (EZFS_NODEVICE
);
2341 * Get phys_path for a root pool
2342 * Return 0 on success; non-zero on failure.
2345 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2347 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2352 * If the device has being dynamically expanded then we need to relabel
2353 * the disk to use the new unallocated space.
2356 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2360 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2361 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2362 "relabel '%s': unable to open device: %d"), path
, errno
);
2363 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2367 * It's possible that we might encounter an error if the device
2368 * does not have any unallocated space left. If so, we simply
2369 * ignore that error and continue on.
2371 * Also, we don't call efi_rescan() - that would just return EBUSY.
2372 * The module will do it for us in vdev_disk_open().
2374 error
= efi_use_whole_disk(fd
);
2376 if (error
&& error
!= VT_ENOSPC
) {
2377 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2378 "relabel '%s': unable to read disk capacity"), path
);
2379 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2385 * Bring the specified vdev online. The 'flags' parameter is a set of the
2386 * ZFS_ONLINE_* flags.
2389 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2390 vdev_state_t
*newstate
)
2392 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2395 boolean_t avail_spare
, l2cache
, islog
;
2396 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2399 if (flags
& ZFS_ONLINE_EXPAND
) {
2400 (void) snprintf(msg
, sizeof (msg
),
2401 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2403 (void) snprintf(msg
, sizeof (msg
),
2404 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2407 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2408 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2410 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2412 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2415 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2417 if (flags
& ZFS_ONLINE_EXPAND
||
2418 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2419 uint64_t wholedisk
= 0;
2421 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2425 * XXX - L2ARC 1.0 devices can't support expansion.
2428 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2429 "cannot expand cache devices"));
2430 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2434 const char *fullpath
= path
;
2435 char buf
[MAXPATHLEN
];
2437 if (path
[0] != '/') {
2438 error
= zfs_resolve_shortname(path
, buf
,
2441 return (zfs_error(hdl
, EZFS_NODEVICE
,
2447 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2453 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2456 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2457 if (errno
== EINVAL
) {
2458 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2459 "from this pool into a new one. Use '%s' "
2460 "instead"), "zpool detach");
2461 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2463 return (zpool_standard_error(hdl
, errno
, msg
));
2466 *newstate
= zc
.zc_cookie
;
2471 * Take the specified vdev offline
2474 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2476 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2479 boolean_t avail_spare
, l2cache
;
2480 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2482 (void) snprintf(msg
, sizeof (msg
),
2483 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2485 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2486 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2488 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2490 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2493 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2495 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2496 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2498 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2505 * There are no other replicas of this device.
2507 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2511 * The log device has unplayed logs
2513 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2516 return (zpool_standard_error(hdl
, errno
, msg
));
2521 * Mark the given vdev faulted.
2524 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2526 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2528 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2530 (void) snprintf(msg
, sizeof (msg
),
2531 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2533 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2535 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2538 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2545 * There are no other replicas of this device.
2547 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2550 return (zpool_standard_error(hdl
, errno
, msg
));
2556 * Mark the given vdev degraded.
2559 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2561 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2563 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2565 (void) snprintf(msg
, sizeof (msg
),
2566 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2568 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2570 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2573 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2576 return (zpool_standard_error(hdl
, errno
, msg
));
2580 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2584 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2590 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2592 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2595 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2596 children
== 2 && child
[which
] == tgt
)
2599 for (c
= 0; c
< children
; c
++)
2600 if (is_replacing_spare(child
[c
], tgt
, which
))
2608 * Attach new_disk (fully described by nvroot) to old_disk.
2609 * If 'replacing' is specified, the new disk will replace the old one.
2612 zpool_vdev_attach(zpool_handle_t
*zhp
,
2613 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2615 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2619 boolean_t avail_spare
, l2cache
, islog
;
2624 nvlist_t
*config_root
;
2625 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2626 boolean_t rootpool
= zpool_is_bootable(zhp
);
2629 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2630 "cannot replace %s with %s"), old_disk
, new_disk
);
2632 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2633 "cannot attach %s to %s"), new_disk
, old_disk
);
2635 #if defined(__sun__) || defined(__sun)
2637 * If this is a root pool, make sure that we're not attaching an
2638 * EFI labeled device.
2640 if (rootpool
&& pool_uses_efi(nvroot
)) {
2641 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2642 "EFI labeled devices are not supported on root pools."));
2643 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2647 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2648 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2650 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2653 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2656 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2658 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2659 zc
.zc_cookie
= replacing
;
2661 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2662 &child
, &children
) != 0 || children
!= 1) {
2663 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2664 "new device must be a single disk"));
2665 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2668 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2669 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2671 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], B_FALSE
)) == NULL
)
2675 * If the target is a hot spare that has been swapped in, we can only
2676 * replace it with another hot spare.
2679 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2680 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2681 NULL
) == NULL
|| !avail_spare
) &&
2682 is_replacing_spare(config_root
, tgt
, 1)) {
2683 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2684 "can only be replaced by another hot spare"));
2686 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2691 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2694 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2696 zcmd_free_nvlists(&zc
);
2701 * XXX need a better way to prevent user from
2702 * booting up a half-baked vdev.
2704 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2705 "sure to wait until resilver is done "
2706 "before rebooting.\n"));
2714 * Can't attach to or replace this type of vdev.
2717 uint64_t version
= zpool_get_prop_int(zhp
,
2718 ZPOOL_PROP_VERSION
, NULL
);
2721 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2722 "cannot replace a log with a spare"));
2723 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2724 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2725 "already in replacing/spare config; wait "
2726 "for completion or use 'zpool detach'"));
2728 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2729 "cannot replace a replacing device"));
2731 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2732 "can only attach to mirrors and top-level "
2735 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2740 * The new device must be a single disk.
2742 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2743 "new device must be a single disk"));
2744 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2748 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2750 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2755 * The new device is too small.
2757 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2758 "device is too small"));
2759 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2764 * The new device has a different alignment requirement.
2766 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2767 "devices have different sector alignment"));
2768 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2773 * The resulting top-level vdev spec won't fit in the label.
2775 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2779 (void) zpool_standard_error(hdl
, errno
, msg
);
2786 * Detach the specified device.
2789 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2791 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2794 boolean_t avail_spare
, l2cache
;
2795 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2797 (void) snprintf(msg
, sizeof (msg
),
2798 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2800 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2801 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2803 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2806 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2809 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2811 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2813 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2820 * Can't detach from this type of vdev.
2822 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2823 "applicable to mirror and replacing vdevs"));
2824 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2829 * There are no other replicas of this device.
2831 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2835 (void) zpool_standard_error(hdl
, errno
, msg
);
2842 * Find a mirror vdev in the source nvlist.
2844 * The mchild array contains a list of disks in one of the top-level mirrors
2845 * of the source pool. The schild array contains a list of disks that the
2846 * user specified on the command line. We loop over the mchild array to
2847 * see if any entry in the schild array matches.
2849 * If a disk in the mchild array is found in the schild array, we return
2850 * the index of that entry. Otherwise we return -1.
2853 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2854 nvlist_t
**schild
, uint_t schildren
)
2858 for (mc
= 0; mc
< mchildren
; mc
++) {
2860 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2861 mchild
[mc
], B_FALSE
);
2863 for (sc
= 0; sc
< schildren
; sc
++) {
2864 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2865 schild
[sc
], B_FALSE
);
2866 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2882 * Split a mirror pool. If newroot points to null, then a new nvlist
2883 * is generated and it is the responsibility of the caller to free it.
2886 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2887 nvlist_t
*props
, splitflags_t flags
)
2889 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
2891 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2892 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2893 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2894 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2896 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2899 (void) snprintf(msg
, sizeof (msg
),
2900 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2902 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2903 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2905 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2906 (void) fprintf(stderr
, gettext("Internal error: unable to "
2907 "retrieve pool configuration\n"));
2911 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2913 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2916 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2917 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2918 props
, vers
, flags
, msg
)) == NULL
)
2922 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2924 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2925 "Source pool is missing vdev tree"));
2927 nvlist_free(zc_props
);
2931 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2934 if (*newroot
== NULL
||
2935 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2936 &newchild
, &newchildren
) != 0)
2939 for (c
= 0; c
< children
; c
++) {
2940 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2942 nvlist_t
**mchild
, *vdev
;
2947 * Unlike cache & spares, slogs are stored in the
2948 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2950 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2952 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2954 if (is_log
|| is_hole
) {
2956 * Create a hole vdev and put it in the config.
2958 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2960 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2961 VDEV_TYPE_HOLE
) != 0)
2963 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2968 varray
[vcount
++] = vdev
;
2972 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2974 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2975 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2976 "Source pool must be composed only of mirrors\n"));
2977 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2981 verify(nvlist_lookup_nvlist_array(child
[c
],
2982 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2984 /* find or add an entry for this top-level vdev */
2985 if (newchildren
> 0 &&
2986 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2987 newchild
, newchildren
)) >= 0) {
2988 /* We found a disk that the user specified. */
2989 vdev
= mchild
[entry
];
2992 /* User didn't specify a disk for this vdev. */
2993 vdev
= mchild
[mchildren
- 1];
2996 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
3000 /* did we find every disk the user specified? */
3001 if (found
!= newchildren
) {
3002 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
3003 "include at most one disk from each mirror"));
3004 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3008 /* Prepare the nvlist for populating. */
3009 if (*newroot
== NULL
) {
3010 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3013 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3014 VDEV_TYPE_ROOT
) != 0)
3017 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3020 /* Add all the children we found */
3021 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3022 lastlog
== 0 ? vcount
: lastlog
) != 0)
3026 * If we're just doing a dry run, exit now with success.
3029 memory_err
= B_FALSE
;
3034 /* now build up the config list & call the ioctl */
3035 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3038 if (nvlist_add_nvlist(newconfig
,
3039 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3040 nvlist_add_string(newconfig
,
3041 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3042 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3046 * The new pool is automatically part of the namespace unless we
3047 * explicitly export it.
3050 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3051 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3052 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3053 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3055 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3058 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3059 retval
= zpool_standard_error(hdl
, errno
, msg
);
3064 memory_err
= B_FALSE
;
3067 if (varray
!= NULL
) {
3070 for (v
= 0; v
< vcount
; v
++)
3071 nvlist_free(varray
[v
]);
3074 zcmd_free_nvlists(&zc
);
3076 nvlist_free(zc_props
);
3078 nvlist_free(newconfig
);
3080 nvlist_free(*newroot
);
3088 return (no_memory(hdl
));
3094 * Remove the given device. Currently, this is supported only for hot spares
3095 * and level 2 cache devices.
3098 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3100 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3103 boolean_t avail_spare
, l2cache
, islog
;
3104 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3107 (void) snprintf(msg
, sizeof (msg
),
3108 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3110 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3111 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3113 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3115 * XXX - this should just go away.
3117 if (!avail_spare
&& !l2cache
&& !islog
) {
3118 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3119 "only inactive hot spares, cache, top-level, "
3120 "or log devices can be removed"));
3121 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3124 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3125 if (islog
&& version
< SPA_VERSION_HOLES
) {
3126 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3127 "pool must be upgrade to support log removal"));
3128 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3131 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3133 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3136 return (zpool_standard_error(hdl
, errno
, msg
));
3140 * Clear the errors for the pool, or the particular device if specified.
3143 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3145 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3148 zpool_rewind_policy_t policy
;
3149 boolean_t avail_spare
, l2cache
;
3150 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3151 nvlist_t
*nvi
= NULL
;
3155 (void) snprintf(msg
, sizeof (msg
),
3156 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3159 (void) snprintf(msg
, sizeof (msg
),
3160 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3163 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3165 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3166 &l2cache
, NULL
)) == 0)
3167 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3170 * Don't allow error clearing for hot spares. Do allow
3171 * error clearing for l2cache devices.
3174 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3176 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3180 zpool_get_rewind_policy(rewindnvl
, &policy
);
3181 zc
.zc_cookie
= policy
.zrp_request
;
3183 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3186 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3189 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3191 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3192 zcmd_free_nvlists(&zc
);
3197 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3198 errno
!= EPERM
&& errno
!= EACCES
)) {
3199 if (policy
.zrp_request
&
3200 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3201 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3202 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3203 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3207 zcmd_free_nvlists(&zc
);
3211 zcmd_free_nvlists(&zc
);
3212 return (zpool_standard_error(hdl
, errno
, msg
));
3216 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3219 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3221 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3223 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3225 (void) snprintf(msg
, sizeof (msg
),
3226 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3227 (u_longlong_t
)guid
);
3229 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3231 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3233 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3236 return (zpool_standard_error(hdl
, errno
, msg
));
3240 * Change the GUID for a pool.
3243 zpool_reguid(zpool_handle_t
*zhp
)
3246 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3247 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3249 (void) snprintf(msg
, sizeof (msg
),
3250 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3252 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3253 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3256 return (zpool_standard_error(hdl
, errno
, msg
));
3263 zpool_reopen(zpool_handle_t
*zhp
)
3265 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3267 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3269 (void) snprintf(msg
, sizeof (msg
),
3270 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3273 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3274 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3276 return (zpool_standard_error(hdl
, errno
, msg
));
3280 * Convert from a devid string to a path.
3283 devid_to_path(char *devid_str
)
3288 devid_nmlist_t
*list
= NULL
;
3291 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3294 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3296 devid_str_free(minor
);
3302 if ((path
= strdup(list
[0].devname
)) == NULL
)
3305 devid_free_nmlist(list
);
3311 * Convert from a path to a devid string.
3314 path_to_devid(const char *path
)
3320 if ((fd
= open(path
, O_RDONLY
)) < 0)
3325 if (devid_get(fd
, &devid
) == 0) {
3326 if (devid_get_minor_name(fd
, &minor
) == 0)
3327 ret
= devid_str_encode(devid
, minor
);
3329 devid_str_free(minor
);
3338 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3339 * ignore any failure here, since a common case is for an unprivileged user to
3340 * type 'zpool status', and we'll display the correct information anyway.
3343 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3345 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3347 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3348 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3349 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3352 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3356 * Remove partition suffix from a vdev path. Partition suffixes may take three
3357 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3358 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3359 * third case only occurs when preceded by a string matching the regular
3360 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3363 strip_partition(libzfs_handle_t
*hdl
, char *path
)
3365 char *tmp
= zfs_strdup(hdl
, path
);
3366 char *part
= NULL
, *d
= NULL
;
3368 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3370 } else if ((part
= strrchr(tmp
, 'p')) &&
3371 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3373 } else if ((tmp
[0] == 'h' || tmp
[0] == 's') && tmp
[1] == 'd') {
3374 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
);
3376 if (part
&& d
&& *d
!= '\0') {
3377 for (; isdigit(*d
); d
++);
3384 #define PATH_BUF_LEN 64
3387 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3388 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3389 * We also check if this is a whole disk, in which case we strip off the
3390 * trailing 's0' slice name.
3392 * This routine is also responsible for identifying when disks have been
3393 * reconfigured in a new location. The kernel will have opened the device by
3394 * devid, but the path will still refer to the old location. To catch this, we
3395 * first do a path -> devid translation (which is fast for the common case). If
3396 * the devid matches, we're done. If not, we do a reverse devid -> path
3397 * translation and issue the appropriate ioctl() to update the path of the vdev.
3398 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3402 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3405 char *path
, *devid
, *type
;
3407 char buf
[PATH_BUF_LEN
];
3408 char tmpbuf
[PATH_BUF_LEN
];
3412 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
3414 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3416 (void) snprintf(buf
, sizeof (buf
), "%llu",
3417 (u_longlong_t
)value
);
3419 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3421 * If the device is dead (faulted, offline, etc) then don't
3422 * bother opening it. Otherwise we may be forcing the user to
3423 * open a misbehaving device, which can have undesirable
3426 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3427 (uint64_t **)&vs
, &vsc
) != 0 ||
3428 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3430 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3432 * Determine if the current path is correct.
3434 char *newdevid
= path_to_devid(path
);
3436 if (newdevid
== NULL
||
3437 strcmp(devid
, newdevid
) != 0) {
3440 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3442 * Update the path appropriately.
3444 set_path(zhp
, nv
, newpath
);
3445 if (nvlist_add_string(nv
,
3446 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3447 verify(nvlist_lookup_string(nv
,
3455 devid_str_free(newdevid
);
3459 * For a block device only use the name.
3461 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3462 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
3463 path
= strrchr(path
, '/');
3468 * Remove the partition from the path it this is a whole disk.
3470 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
3471 &value
) == 0 && value
) {
3472 return strip_partition(hdl
, path
);
3475 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3478 * If it's a raidz device, we need to stick in the parity level.
3480 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3482 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3484 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3485 (u_longlong_t
)value
);
3490 * We identify each top-level vdev by using a <type-id>
3491 * naming convention.
3496 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3498 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3499 path
, (u_longlong_t
)id
);
3504 return (zfs_strdup(hdl
, path
));
3508 zbookmark_compare(const void *a
, const void *b
)
3510 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
3514 * Retrieve the persistent error log, uniquify the members, and return to the
3518 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3520 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3522 zbookmark_t
*zb
= NULL
;
3526 * Retrieve the raw error list from the kernel. If the number of errors
3527 * has increased, allocate more space and continue until we get the
3530 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3534 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3535 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3537 zc
.zc_nvlist_dst_size
= count
;
3538 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3540 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3542 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3543 if (errno
== ENOMEM
) {
3544 count
= zc
.zc_nvlist_dst_size
;
3545 if ((zc
.zc_nvlist_dst
= (uintptr_t)
3546 zfs_alloc(zhp
->zpool_hdl
, count
*
3547 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3558 * Sort the resulting bookmarks. This is a little confusing due to the
3559 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3560 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3561 * _not_ copied as part of the process. So we point the start of our
3562 * array appropriate and decrement the total number of elements.
3564 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3565 zc
.zc_nvlist_dst_size
;
3566 count
-= zc
.zc_nvlist_dst_size
;
3568 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
3570 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3573 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3575 for (i
= 0; i
< count
; i
++) {
3578 /* ignoring zb_blkid and zb_level for now */
3579 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3580 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3583 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3585 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3586 zb
[i
].zb_objset
) != 0) {
3590 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3591 zb
[i
].zb_object
) != 0) {
3595 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3602 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3606 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3607 return (no_memory(zhp
->zpool_hdl
));
3611 * Upgrade a ZFS pool to the latest on-disk version.
3614 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3616 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3617 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3619 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3620 zc
.zc_cookie
= new_version
;
3622 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3623 return (zpool_standard_error_fmt(hdl
, errno
,
3624 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3630 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3634 (void) strlcpy(string
, basename(argv
[0]), len
);
3635 for (i
= 1; i
< argc
; i
++) {
3636 (void) strlcat(string
, " ", len
);
3637 (void) strlcat(string
, argv
[i
], len
);
3642 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3644 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3648 args
= fnvlist_alloc();
3649 fnvlist_add_string(args
, "message", message
);
3650 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3652 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3654 zcmd_free_nvlists(&zc
);
3659 * Perform ioctl to get some command history of a pool.
3661 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3662 * logical offset of the history buffer to start reading from.
3664 * Upon return, 'off' is the next logical offset to read from and
3665 * 'len' is the actual amount of bytes read into 'buf'.
3668 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3670 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3671 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3673 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3675 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3676 zc
.zc_history_len
= *len
;
3677 zc
.zc_history_offset
= *off
;
3679 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3682 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3683 dgettext(TEXT_DOMAIN
,
3684 "cannot show history for pool '%s'"),
3687 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3688 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3689 "'%s'"), zhp
->zpool_name
));
3691 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3692 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3693 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3695 return (zpool_standard_error_fmt(hdl
, errno
,
3696 dgettext(TEXT_DOMAIN
,
3697 "cannot get history for '%s'"), zhp
->zpool_name
));
3701 *len
= zc
.zc_history_len
;
3702 *off
= zc
.zc_history_offset
;
3708 * Process the buffer of nvlists, unpacking and storing each nvlist record
3709 * into 'records'. 'leftover' is set to the number of bytes that weren't
3710 * processed as there wasn't a complete record.
3713 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3714 nvlist_t
***records
, uint_t
*numrecords
)
3720 while (bytes_read
> sizeof (reclen
)) {
3722 /* get length of packed record (stored as little endian) */
3723 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3724 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3726 if (bytes_read
< sizeof (reclen
) + reclen
)
3730 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3732 bytes_read
-= sizeof (reclen
) + reclen
;
3733 buf
+= sizeof (reclen
) + reclen
;
3735 /* add record to nvlist array */
3737 if (ISP2(*numrecords
+ 1)) {
3738 *records
= realloc(*records
,
3739 *numrecords
* 2 * sizeof (nvlist_t
*));
3741 (*records
)[*numrecords
- 1] = nv
;
3744 *leftover
= bytes_read
;
3748 #define HIS_BUF_LEN (128*1024)
3751 * Retrieve the command history of a pool.
3754 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3756 char buf
[HIS_BUF_LEN
];
3758 nvlist_t
**records
= NULL
;
3759 uint_t numrecords
= 0;
3763 uint64_t bytes_read
= sizeof (buf
);
3766 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3769 /* if nothing else was read in, we're at EOF, just return */
3773 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3774 &leftover
, &records
, &numrecords
)) != 0)
3782 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3783 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3784 records
, numrecords
) == 0);
3786 for (i
= 0; i
< numrecords
; i
++)
3787 nvlist_free(records
[i
]);
3794 * Retrieve the next event. If there is a new event available 'nvp' will
3795 * contain a newly allocated nvlist and 'dropped' will be set to the number
3796 * of missed events since the last call to this function. When 'nvp' is
3797 * set to NULL it indicates no new events are available. In either case
3798 * the function returns 0 and it is up to the caller to free 'nvp'. In
3799 * the case of a fatal error the function will return a non-zero value.
3800 * When the function is called in blocking mode it will not return until
3801 * a new event is available.
3804 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3805 int *dropped
, int block
, int cleanup_fd
)
3807 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3812 zc
.zc_cleanup_fd
= cleanup_fd
;
3815 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3817 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3821 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3824 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3825 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3828 /* Blocking error case should not occur */
3830 error
= zpool_standard_error_fmt(hdl
, errno
,
3831 dgettext(TEXT_DOMAIN
, "cannot get event"));
3835 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3836 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3837 dgettext(TEXT_DOMAIN
, "cannot get event"));
3843 error
= zpool_standard_error_fmt(hdl
, errno
,
3844 dgettext(TEXT_DOMAIN
, "cannot get event"));
3849 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3853 *dropped
= (int)zc
.zc_cookie
;
3855 zcmd_free_nvlists(&zc
);
3864 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3866 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3869 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3870 "cannot clear events"));
3872 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3873 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3876 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3882 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3883 char *pathname
, size_t len
)
3885 zfs_cmd_t zc
= {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3886 boolean_t mounted
= B_FALSE
;
3887 char *mntpnt
= NULL
;
3888 char dsname
[MAXNAMELEN
];
3891 /* special case for the MOS */
3892 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", (longlong_t
)obj
);
3896 /* get the dataset's name */
3897 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3899 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
3900 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
3901 /* just write out a path of two object numbers */
3902 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
3903 (longlong_t
)dsobj
, (longlong_t
)obj
);
3906 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
3908 /* find out if the dataset is mounted */
3909 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
3911 /* get the corrupted object's path */
3912 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
3914 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
3917 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
3920 (void) snprintf(pathname
, len
, "%s:%s",
3921 dsname
, zc
.zc_value
);
3924 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, (longlong_t
)obj
);
3930 * Read the EFI label from the config, if a label does not exist then
3931 * pass back the error to the caller. If the caller has passed a non-NULL
3932 * diskaddr argument then we set it to the starting address of the EFI
3936 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
3940 char diskname
[MAXPATHLEN
];
3943 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
3946 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
3947 strrchr(path
, '/'));
3948 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
3949 struct dk_gpt
*vtoc
;
3951 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
3953 *sb
= vtoc
->efi_parts
[0].p_start
;
3962 * determine where a partition starts on a disk in the current
3966 find_start_block(nvlist_t
*config
)
3970 diskaddr_t sb
= MAXOFFSET_T
;
3973 if (nvlist_lookup_nvlist_array(config
,
3974 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
3975 if (nvlist_lookup_uint64(config
,
3976 ZPOOL_CONFIG_WHOLE_DISK
,
3977 &wholedisk
) != 0 || !wholedisk
) {
3978 return (MAXOFFSET_T
);
3980 if (read_efi_label(config
, &sb
) < 0)
3985 for (c
= 0; c
< children
; c
++) {
3986 sb
= find_start_block(child
[c
]);
3987 if (sb
!= MAXOFFSET_T
) {
3991 return (MAXOFFSET_T
);
3995 zpool_label_disk_wait(char *path
, int timeout
)
3997 struct stat64 statbuf
;
4001 * Wait timeout miliseconds for a newly created device to be available
4002 * from the given path. There is a small window when a /dev/ device
4003 * will exist and the udev link will not, so we must wait for the
4004 * symlink. Depending on the udev rules this may take a few seconds.
4006 for (i
= 0; i
< timeout
; i
++) {
4010 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0))
4018 zpool_label_disk_check(char *path
)
4020 struct dk_gpt
*vtoc
;
4023 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4026 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4031 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4043 * Label an individual disk. The name provided is the short name,
4044 * stripped of any leading /dev path.
4047 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4049 char path
[MAXPATHLEN
];
4050 struct dk_gpt
*vtoc
;
4052 size_t resv
= EFI_MIN_RESV_SIZE
;
4053 uint64_t slice_size
;
4054 diskaddr_t start_block
;
4057 /* prepare an error message just in case */
4058 (void) snprintf(errbuf
, sizeof (errbuf
),
4059 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4064 #if defined(__sun__) || defined(__sun)
4065 if (zpool_is_bootable(zhp
)) {
4066 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4067 "EFI labeled devices are not supported on root "
4069 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
4073 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4074 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4076 if (zhp
->zpool_start_block
== 0)
4077 start_block
= find_start_block(nvroot
);
4079 start_block
= zhp
->zpool_start_block
;
4080 zhp
->zpool_start_block
= start_block
;
4083 start_block
= NEW_START_BLOCK
;
4086 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4088 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
4090 * This shouldn't happen. We've long since verified that this
4091 * is a valid device.
4093 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4094 "label '%s': unable to open device: %d"), path
, errno
);
4095 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4098 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4100 * The only way this can fail is if we run out of memory, or we
4101 * were unable to read the disk's capacity
4103 if (errno
== ENOMEM
)
4104 (void) no_memory(hdl
);
4107 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4108 "label '%s': unable to read disk capacity"), path
);
4110 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4113 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4114 slice_size
-= EFI_MIN_RESV_SIZE
;
4115 if (start_block
== MAXOFFSET_T
)
4116 start_block
= NEW_START_BLOCK
;
4117 slice_size
-= start_block
;
4118 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4120 vtoc
->efi_parts
[0].p_start
= start_block
;
4121 vtoc
->efi_parts
[0].p_size
= slice_size
;
4124 * Why we use V_USR: V_BACKUP confuses users, and is considered
4125 * disposable by some EFI utilities (since EFI doesn't have a backup
4126 * slice). V_UNASSIGNED is supposed to be used only for zero size
4127 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4128 * etc. were all pretty specific. V_USR is as close to reality as we
4129 * can get, in the absence of V_OTHER.
4131 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4132 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
4134 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4135 vtoc
->efi_parts
[8].p_size
= resv
;
4136 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4138 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4140 * Some block drivers (like pcata) may not support EFI
4141 * GPT labels. Print out a helpful error message dir-
4142 * ecting the user to manually label the disk and give
4148 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4149 "parted(8) and then provide a specific slice: %d"), rval
);
4150 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4156 /* Wait for the first expected partition to appear. */
4158 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4159 (void) zfs_append_partition(path
, MAXPATHLEN
);
4161 rval
= zpool_label_disk_wait(path
, 3000);
4163 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4164 "detect device partitions on '%s': %d"), path
, rval
);
4165 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4168 /* We can't be to paranoid. Read the label back and verify it. */
4169 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4170 rval
= zpool_label_disk_check(path
);
4172 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4173 "EFI label on '%s' is damaged. Ensure\nthis device "
4174 "is not in in use, and is functioning properly: %d"),
4176 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));