4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
53 typedef struct prop_flags
{
54 int create
:1; /* Validate property on creation */
55 int import
:1; /* Validate property on import */
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
67 zfs_cmd_t zc
= {"\0"};
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
185 case VDEV_STATE_CLOSED
:
186 case VDEV_STATE_OFFLINE
:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED
:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN
:
191 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
192 return (gettext("FAULTED"));
193 else if (aux
== VDEV_AUX_SPLIT_POOL
)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED
:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED
:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY
:
202 return (gettext("ONLINE"));
205 return (gettext("UNKNOWN"));
209 * Map POOL STATE to printed strings.
212 zpool_pool_state_to_name(pool_state_t state
)
217 case POOL_STATE_ACTIVE
:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED
:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED
:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE
:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE
:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED
:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL
:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE
:
232 return (gettext("POTENTIALLY_ACTIVE"));
235 return (gettext("UNKNOWN"));
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
243 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
,
244 size_t len
, zprop_source_t
*srctype
, boolean_t literal
)
248 zprop_source_t src
= ZPROP_SRC_NONE
;
253 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
255 case ZPOOL_PROP_NAME
:
256 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
259 case ZPOOL_PROP_HEALTH
:
260 (void) strlcpy(buf
, "FAULTED", len
);
263 case ZPOOL_PROP_GUID
:
264 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
265 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
268 case ZPOOL_PROP_ALTROOT
:
269 case ZPOOL_PROP_CACHEFILE
:
270 case ZPOOL_PROP_COMMENT
:
271 if (zhp
->zpool_props
!= NULL
||
272 zpool_get_all_props(zhp
) == 0) {
274 zpool_get_prop_string(zhp
, prop
, &src
),
280 (void) strlcpy(buf
, "-", len
);
289 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
290 prop
!= ZPOOL_PROP_NAME
)
293 switch (zpool_prop_get_type(prop
)) {
294 case PROP_TYPE_STRING
:
295 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
299 case PROP_TYPE_NUMBER
:
300 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
303 case ZPOOL_PROP_SIZE
:
304 case ZPOOL_PROP_ALLOCATED
:
305 case ZPOOL_PROP_FREE
:
306 case ZPOOL_PROP_FREEING
:
307 case ZPOOL_PROP_LEAKED
:
308 case ZPOOL_PROP_ASHIFT
:
310 (void) snprintf(buf
, len
, "%llu",
311 (u_longlong_t
)intval
);
313 (void) zfs_nicenum(intval
, buf
, len
);
316 case ZPOOL_PROP_EXPANDSZ
:
318 (void) strlcpy(buf
, "-", len
);
319 } else if (literal
) {
320 (void) snprintf(buf
, len
, "%llu",
321 (u_longlong_t
)intval
);
323 (void) zfs_nicenum(intval
, buf
, len
);
327 case ZPOOL_PROP_CAPACITY
:
329 (void) snprintf(buf
, len
, "%llu",
330 (u_longlong_t
)intval
);
332 (void) snprintf(buf
, len
, "%llu%%",
333 (u_longlong_t
)intval
);
337 case ZPOOL_PROP_FRAGMENTATION
:
338 if (intval
== UINT64_MAX
) {
339 (void) strlcpy(buf
, "-", len
);
340 } else if (literal
) {
341 (void) snprintf(buf
, len
, "%llu",
342 (u_longlong_t
)intval
);
344 (void) snprintf(buf
, len
, "%llu%%",
345 (u_longlong_t
)intval
);
349 case ZPOOL_PROP_DEDUPRATIO
:
351 (void) snprintf(buf
, len
, "%llu.%02llu",
352 (u_longlong_t
)(intval
/ 100),
353 (u_longlong_t
)(intval
% 100));
355 (void) snprintf(buf
, len
, "%llu.%02llux",
356 (u_longlong_t
)(intval
/ 100),
357 (u_longlong_t
)(intval
% 100));
360 case ZPOOL_PROP_HEALTH
:
361 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
362 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
363 verify(nvlist_lookup_uint64_array(nvroot
,
364 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
367 (void) strlcpy(buf
, zpool_state_to_name(intval
,
370 case ZPOOL_PROP_VERSION
:
371 if (intval
>= SPA_VERSION_FEATURES
) {
372 (void) snprintf(buf
, len
, "-");
377 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
381 case PROP_TYPE_INDEX
:
382 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
383 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
386 (void) strlcpy(buf
, strval
, len
);
400 * Check if the bootfs name has the same pool name as it is set to.
401 * Assuming bootfs is a valid dataset name.
404 bootfs_name_valid(const char *pool
, char *bootfs
)
406 int len
= strlen(pool
);
408 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
411 if (strncmp(pool
, bootfs
, len
) == 0 &&
412 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
418 #if defined(__sun__) || defined(__sun)
420 * Inspect the configuration to determine if any of the devices contain
424 pool_uses_efi(nvlist_t
*config
)
429 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
430 &child
, &children
) != 0)
431 return (read_efi_label(config
, NULL
) >= 0);
433 for (c
= 0; c
< children
; c
++) {
434 if (pool_uses_efi(child
[c
]))
442 zpool_is_bootable(zpool_handle_t
*zhp
)
444 char bootfs
[ZFS_MAX_DATASET_NAME_LEN
];
446 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
447 sizeof (bootfs
), NULL
, B_FALSE
) == 0 && strncmp(bootfs
, "-",
448 sizeof (bootfs
)) != 0);
453 * Given an nvlist of zpool properties to be set, validate that they are
454 * correct, and parse any numeric properties (index, boolean, etc) if they are
455 * specified as strings.
458 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
459 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
467 struct stat64 statbuf
;
471 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
472 (void) no_memory(hdl
);
477 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
478 const char *propname
= nvpair_name(elem
);
480 prop
= zpool_name_to_prop(propname
);
481 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
483 char *fname
= strchr(propname
, '@') + 1;
485 err
= zfeature_lookup_name(fname
, NULL
);
487 ASSERT3U(err
, ==, ENOENT
);
488 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
489 "invalid feature '%s'"), fname
);
490 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
494 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
495 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
496 "'%s' must be a string"), propname
);
497 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
501 (void) nvpair_value_string(elem
, &strval
);
502 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
503 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
504 "property '%s' can only be set to "
505 "'enabled'"), propname
);
506 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
510 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
511 (void) no_memory(hdl
);
518 * Make sure this property is valid and applies to this type.
520 if (prop
== ZPROP_INVAL
) {
521 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
522 "invalid property '%s'"), propname
);
523 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
527 if (zpool_prop_readonly(prop
)) {
528 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
529 "is readonly"), propname
);
530 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
534 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
535 &strval
, &intval
, errbuf
) != 0)
539 * Perform additional checking for specific properties.
544 case ZPOOL_PROP_VERSION
:
545 if (intval
< version
||
546 !SPA_VERSION_IS_SUPPORTED(intval
)) {
547 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
548 "property '%s' number %d is invalid."),
550 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
555 case ZPOOL_PROP_ASHIFT
:
557 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
558 "property '%s' can only be set at "
559 "creation time"), propname
);
560 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
564 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
565 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
566 "property '%s' number %d is invalid."),
568 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
573 case ZPOOL_PROP_BOOTFS
:
574 if (flags
.create
|| flags
.import
) {
575 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
576 "property '%s' cannot be set at creation "
577 "or import time"), propname
);
578 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
582 if (version
< SPA_VERSION_BOOTFS
) {
583 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
584 "pool must be upgraded to support "
585 "'%s' property"), propname
);
586 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
591 * bootfs property value has to be a dataset name and
592 * the dataset has to be in the same pool as it sets to.
594 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
596 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
597 "is an invalid name"), strval
);
598 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
602 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
603 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
604 "could not open pool '%s'"), poolname
);
605 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
608 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
609 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
611 #if defined(__sun__) || defined(__sun)
613 * bootfs property cannot be set on a disk which has
616 if (pool_uses_efi(nvroot
)) {
617 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
618 "property '%s' not supported on "
619 "EFI labeled devices"), propname
);
620 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
628 case ZPOOL_PROP_ALTROOT
:
629 if (!flags
.create
&& !flags
.import
) {
630 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
631 "property '%s' can only be set during pool "
632 "creation or import"), propname
);
633 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
637 if (strval
[0] != '/') {
638 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
639 "bad alternate root '%s'"), strval
);
640 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
645 case ZPOOL_PROP_CACHEFILE
:
646 if (strval
[0] == '\0')
649 if (strcmp(strval
, "none") == 0)
652 if (strval
[0] != '/') {
653 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
654 "property '%s' must be empty, an "
655 "absolute path, or 'none'"), propname
);
656 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
660 slash
= strrchr(strval
, '/');
662 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
663 strcmp(slash
, "/..") == 0) {
664 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
665 "'%s' is not a valid file"), strval
);
666 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
672 if (strval
[0] != '\0' &&
673 (stat64(strval
, &statbuf
) != 0 ||
674 !S_ISDIR(statbuf
.st_mode
))) {
675 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
676 "'%s' is not a valid directory"),
678 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
685 case ZPOOL_PROP_COMMENT
:
686 for (check
= strval
; *check
!= '\0'; check
++) {
687 if (!isprint(*check
)) {
689 dgettext(TEXT_DOMAIN
,
690 "comment may only have printable "
692 (void) zfs_error(hdl
, EZFS_BADPROP
,
697 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
698 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
699 "comment must not exceed %d characters"),
701 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
705 case ZPOOL_PROP_READONLY
:
707 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
708 "property '%s' can only be set at "
709 "import time"), propname
);
710 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
714 case ZPOOL_PROP_TNAME
:
716 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
717 "property '%s' can only be set at "
718 "creation time"), propname
);
719 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
728 nvlist_free(retprops
);
733 * Set zpool property : propname=propval.
736 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
738 zfs_cmd_t zc
= {"\0"};
741 nvlist_t
*nvl
= NULL
;
744 prop_flags_t flags
= { 0 };
746 (void) snprintf(errbuf
, sizeof (errbuf
),
747 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
750 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
751 return (no_memory(zhp
->zpool_hdl
));
753 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
755 return (no_memory(zhp
->zpool_hdl
));
758 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
759 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
760 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
769 * Execute the corresponding ioctl() to set this property.
771 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
773 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
778 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
780 zcmd_free_nvlists(&zc
);
784 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
786 (void) zpool_props_refresh(zhp
);
792 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
794 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
796 char buf
[ZFS_MAXPROPLEN
];
797 nvlist_t
*features
= NULL
;
800 boolean_t firstexpand
= (NULL
== *plp
);
803 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
807 while (*last
!= NULL
)
808 last
= &(*last
)->pl_next
;
811 features
= zpool_get_features(zhp
);
813 if ((*plp
)->pl_all
&& firstexpand
) {
814 for (i
= 0; i
< SPA_FEATURES
; i
++) {
815 zprop_list_t
*entry
= zfs_alloc(hdl
,
816 sizeof (zprop_list_t
));
817 entry
->pl_prop
= ZPROP_INVAL
;
818 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
819 spa_feature_table
[i
].fi_uname
);
820 entry
->pl_width
= strlen(entry
->pl_user_prop
);
821 entry
->pl_all
= B_TRUE
;
824 last
= &entry
->pl_next
;
828 /* add any unsupported features */
829 for (nvp
= nvlist_next_nvpair(features
, NULL
);
830 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
835 if (zfeature_is_supported(nvpair_name(nvp
)))
838 propname
= zfs_asprintf(hdl
, "unsupported@%s",
842 * Before adding the property to the list make sure that no
843 * other pool already added the same property.
847 while (entry
!= NULL
) {
848 if (entry
->pl_user_prop
!= NULL
&&
849 strcmp(propname
, entry
->pl_user_prop
) == 0) {
853 entry
= entry
->pl_next
;
860 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
861 entry
->pl_prop
= ZPROP_INVAL
;
862 entry
->pl_user_prop
= propname
;
863 entry
->pl_width
= strlen(entry
->pl_user_prop
);
864 entry
->pl_all
= B_TRUE
;
867 last
= &entry
->pl_next
;
870 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
875 if (entry
->pl_prop
!= ZPROP_INVAL
&&
876 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
877 NULL
, B_FALSE
) == 0) {
878 if (strlen(buf
) > entry
->pl_width
)
879 entry
->pl_width
= strlen(buf
);
887 * Get the state for the given feature on the given ZFS pool.
890 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
894 boolean_t found
= B_FALSE
;
895 nvlist_t
*features
= zpool_get_features(zhp
);
897 const char *feature
= strchr(propname
, '@') + 1;
899 supported
= zpool_prop_feature(propname
);
900 ASSERT(supported
|| zpool_prop_unsupported(propname
));
903 * Convert from feature name to feature guid. This conversion is
904 * unecessary for unsupported@... properties because they already
911 ret
= zfeature_lookup_name(feature
, &fid
);
913 (void) strlcpy(buf
, "-", len
);
916 feature
= spa_feature_table
[fid
].fi_guid
;
919 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
924 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
927 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
929 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
934 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
936 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
939 (void) strlcpy(buf
, "-", len
);
948 * Don't start the slice at the default block of 34; many storage
949 * devices will use a stripe width of 128k, other vendors prefer a 1m
950 * alignment. It is best to play it safe and ensure a 1m alignment
951 * given 512B blocks. When the block size is larger by a power of 2
952 * we will still be 1m aligned. Some devices are sensitive to the
953 * partition ending alignment as well.
955 #define NEW_START_BLOCK 2048
956 #define PARTITION_END_ALIGNMENT 2048
959 * Validate the given pool name, optionally putting an extended error message in
963 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
969 ret
= pool_namecheck(pool
, &why
, &what
);
972 * The rules for reserved pool names were extended at a later point.
973 * But we need to support users with existing pools that may now be
974 * invalid. So we only check for this expanded set of names during a
975 * create (or import), and only in userland.
977 if (ret
== 0 && !isopen
&&
978 (strncmp(pool
, "mirror", 6) == 0 ||
979 strncmp(pool
, "raidz", 5) == 0 ||
980 strncmp(pool
, "spare", 5) == 0 ||
981 strcmp(pool
, "log") == 0)) {
984 dgettext(TEXT_DOMAIN
, "name is reserved"));
992 case NAME_ERR_TOOLONG
:
994 dgettext(TEXT_DOMAIN
, "name is too long"));
997 case NAME_ERR_INVALCHAR
:
999 dgettext(TEXT_DOMAIN
, "invalid character "
1000 "'%c' in pool name"), what
);
1003 case NAME_ERR_NOLETTER
:
1004 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1005 "name must begin with a letter"));
1008 case NAME_ERR_RESERVED
:
1009 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1010 "name is reserved"));
1013 case NAME_ERR_DISKLIKE
:
1014 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1015 "pool name is reserved"));
1018 case NAME_ERR_LEADING_SLASH
:
1019 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1020 "leading slash in name"));
1023 case NAME_ERR_EMPTY_COMPONENT
:
1024 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1025 "empty component in name"));
1028 case NAME_ERR_TRAILING_SLASH
:
1029 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1030 "trailing slash in name"));
1033 case NAME_ERR_MULTIPLE_AT
:
1034 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1035 "multiple '@' delimiters in name"));
1037 case NAME_ERR_NO_AT
:
1038 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1039 "permission set is missing '@'"));
1050 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1054 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1056 zpool_handle_t
*zhp
;
1060 * Make sure the pool name is valid.
1062 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1063 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1064 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1069 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1072 zhp
->zpool_hdl
= hdl
;
1073 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1075 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1081 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1082 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1083 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1092 * Like the above, but silent on error. Used when iterating over pools (because
1093 * the configuration cache may be out of date).
1096 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1098 zpool_handle_t
*zhp
;
1101 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1104 zhp
->zpool_hdl
= hdl
;
1105 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1107 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1123 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1127 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1129 zpool_handle_t
*zhp
;
1131 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1134 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1135 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1136 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1145 * Close the handle. Simply frees the memory associated with the handle.
1148 zpool_close(zpool_handle_t
*zhp
)
1150 nvlist_free(zhp
->zpool_config
);
1151 nvlist_free(zhp
->zpool_old_config
);
1152 nvlist_free(zhp
->zpool_props
);
1157 * Return the name of the pool.
1160 zpool_get_name(zpool_handle_t
*zhp
)
1162 return (zhp
->zpool_name
);
1167 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1170 zpool_get_state(zpool_handle_t
*zhp
)
1172 return (zhp
->zpool_state
);
1176 * Create the named pool, using the provided vdev list. It is assumed
1177 * that the consumer has already validated the contents of the nvlist, so we
1178 * don't have to worry about error semantics.
1181 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1182 nvlist_t
*props
, nvlist_t
*fsprops
)
1184 zfs_cmd_t zc
= {"\0"};
1185 nvlist_t
*zc_fsprops
= NULL
;
1186 nvlist_t
*zc_props
= NULL
;
1190 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1191 "cannot create '%s'"), pool
);
1193 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1194 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1196 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1200 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1202 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1203 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1212 zoned
= ((nvlist_lookup_string(fsprops
,
1213 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1214 strcmp(zonestr
, "on") == 0);
1216 if ((zc_fsprops
= zfs_valid_proplist(hdl
, ZFS_TYPE_FILESYSTEM
,
1217 fsprops
, zoned
, NULL
, NULL
, msg
)) == NULL
) {
1221 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1224 if (nvlist_add_nvlist(zc_props
,
1225 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1230 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1233 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1235 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1237 zcmd_free_nvlists(&zc
);
1238 nvlist_free(zc_props
);
1239 nvlist_free(zc_fsprops
);
1244 * This can happen if the user has specified the same
1245 * device multiple times. We can't reliably detect this
1246 * until we try to add it and see we already have a
1247 * label. This can also happen under if the device is
1248 * part of an active md or lvm device.
1250 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1251 "one or more vdevs refer to the same device, or "
1252 "one of\nthe devices is part of an active md or "
1254 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1258 * This happens if the record size is smaller or larger
1259 * than the allowed size range, or not a power of 2.
1261 * NOTE: although zfs_valid_proplist is called earlier,
1262 * this case may have slipped through since the
1263 * pool does not exist yet and it is therefore
1264 * impossible to read properties e.g. max blocksize
1267 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1268 "record size invalid"));
1269 return (zfs_error(hdl
, EZFS_BADPROP
, msg
));
1273 * This occurs when one of the devices is below
1274 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1275 * device was the problem device since there's no
1276 * reliable way to determine device size from userland.
1281 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1283 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1284 "one or more devices is less than the "
1285 "minimum size (%s)"), buf
);
1287 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1290 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1291 "one or more devices is out of space"));
1292 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1295 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1296 "cache device must be a disk or disk slice"));
1297 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1300 return (zpool_standard_error(hdl
, errno
, msg
));
1305 zcmd_free_nvlists(&zc
);
1306 nvlist_free(zc_props
);
1307 nvlist_free(zc_fsprops
);
1312 * Destroy the given pool. It is up to the caller to ensure that there are no
1313 * datasets left in the pool.
1316 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1318 zfs_cmd_t zc
= {"\0"};
1319 zfs_handle_t
*zfp
= NULL
;
1320 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1323 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1324 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1327 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1328 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1330 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1331 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1332 "cannot destroy '%s'"), zhp
->zpool_name
);
1334 if (errno
== EROFS
) {
1335 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1336 "one or more devices is read only"));
1337 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1339 (void) zpool_standard_error(hdl
, errno
, msg
);
1348 remove_mountpoint(zfp
);
1356 * Add the given vdevs to the pool. The caller must have already performed the
1357 * necessary verification to ensure that the vdev specification is well-formed.
1360 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1362 zfs_cmd_t zc
= {"\0"};
1364 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1366 nvlist_t
**spares
, **l2cache
;
1367 uint_t nspares
, nl2cache
;
1369 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1370 "cannot add to '%s'"), zhp
->zpool_name
);
1372 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1373 SPA_VERSION_SPARES
&&
1374 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1375 &spares
, &nspares
) == 0) {
1376 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1377 "upgraded to add hot spares"));
1378 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1381 #if defined(__sun__) || defined(__sun)
1382 if (zpool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1383 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1386 for (s
= 0; s
< nspares
; s
++) {
1389 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1390 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1391 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1392 "device '%s' contains an EFI label and "
1393 "cannot be used on root pools."),
1394 zpool_vdev_name(hdl
, NULL
, spares
[s
], 0));
1395 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1401 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1402 SPA_VERSION_L2CACHE
&&
1403 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1404 &l2cache
, &nl2cache
) == 0) {
1405 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1406 "upgraded to add cache devices"));
1407 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1410 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1412 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1414 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1418 * This can happen if the user has specified the same
1419 * device multiple times. We can't reliably detect this
1420 * until we try to add it and see we already have a
1423 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1424 "one or more vdevs refer to the same device"));
1425 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1430 * This occurrs when one of the devices is below
1431 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1432 * device was the problem device since there's no
1433 * reliable way to determine device size from userland.
1438 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1440 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1441 "device is less than the minimum "
1444 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1448 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1449 "pool must be upgraded to add these vdevs"));
1450 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1454 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1455 "cache device must be a disk or disk slice"));
1456 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1460 (void) zpool_standard_error(hdl
, errno
, msg
);
1468 zcmd_free_nvlists(&zc
);
1474 * Exports the pool from the system. The caller must ensure that there are no
1475 * mounted datasets in the pool.
1478 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1479 const char *log_str
)
1481 zfs_cmd_t zc
= {"\0"};
1484 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1485 "cannot export '%s'"), zhp
->zpool_name
);
1487 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1488 zc
.zc_cookie
= force
;
1489 zc
.zc_guid
= hardforce
;
1490 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1492 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1495 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1496 "use '-f' to override the following errors:\n"
1497 "'%s' has an active shared spare which could be"
1498 " used by other pools once '%s' is exported."),
1499 zhp
->zpool_name
, zhp
->zpool_name
);
1500 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1503 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1512 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1514 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1518 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1520 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1524 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1527 nvlist_t
*nv
= NULL
;
1533 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1536 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1537 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1541 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1543 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1545 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1546 strftime(timestr
, 128, "%c", &t
) != 0) {
1548 (void) printf(dgettext(TEXT_DOMAIN
,
1549 "Would be able to return %s "
1550 "to its state as of %s.\n"),
1553 (void) printf(dgettext(TEXT_DOMAIN
,
1554 "Pool %s returned to its state as of %s.\n"),
1558 (void) printf(dgettext(TEXT_DOMAIN
,
1559 "%s approximately %lld "),
1560 dryrun
? "Would discard" : "Discarded",
1561 ((longlong_t
)loss
+ 30) / 60);
1562 (void) printf(dgettext(TEXT_DOMAIN
,
1563 "minutes of transactions.\n"));
1564 } else if (loss
> 0) {
1565 (void) printf(dgettext(TEXT_DOMAIN
,
1566 "%s approximately %lld "),
1567 dryrun
? "Would discard" : "Discarded",
1569 (void) printf(dgettext(TEXT_DOMAIN
,
1570 "seconds of transactions.\n"));
1576 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1579 nvlist_t
*nv
= NULL
;
1581 uint64_t edata
= UINT64_MAX
;
1586 if (!hdl
->libzfs_printerr
)
1590 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1592 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1594 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1595 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1596 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1597 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1600 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1601 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1604 (void) printf(dgettext(TEXT_DOMAIN
,
1605 "Recovery is possible, but will result in some data loss.\n"));
1607 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1608 strftime(timestr
, 128, "%c", &t
) != 0) {
1609 (void) printf(dgettext(TEXT_DOMAIN
,
1610 "\tReturning the pool to its state as of %s\n"
1611 "\tshould correct the problem. "),
1614 (void) printf(dgettext(TEXT_DOMAIN
,
1615 "\tReverting the pool to an earlier state "
1616 "should correct the problem.\n\t"));
1620 (void) printf(dgettext(TEXT_DOMAIN
,
1621 "Approximately %lld minutes of data\n"
1622 "\tmust be discarded, irreversibly. "),
1623 ((longlong_t
)loss
+ 30) / 60);
1624 } else if (loss
> 0) {
1625 (void) printf(dgettext(TEXT_DOMAIN
,
1626 "Approximately %lld seconds of data\n"
1627 "\tmust be discarded, irreversibly. "),
1630 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1632 (void) printf(dgettext(TEXT_DOMAIN
,
1633 "After rewind, at least\n"
1634 "\tone persistent user-data error will remain. "));
1636 (void) printf(dgettext(TEXT_DOMAIN
,
1637 "After rewind, several\n"
1638 "\tpersistent user-data errors will remain. "));
1641 (void) printf(dgettext(TEXT_DOMAIN
,
1642 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1643 reason
>= 0 ? "clear" : "import", name
);
1645 (void) printf(dgettext(TEXT_DOMAIN
,
1646 "A scrub of the pool\n"
1647 "\tis strongly recommended after recovery.\n"));
1651 (void) printf(dgettext(TEXT_DOMAIN
,
1652 "Destroy and re-create the pool from\n\ta backup source.\n"));
1656 * zpool_import() is a contracted interface. Should be kept the same
1659 * Applications should use zpool_import_props() to import a pool with
1660 * new properties value to be set.
1663 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1666 nvlist_t
*props
= NULL
;
1669 if (altroot
!= NULL
) {
1670 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1671 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1672 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1676 if (nvlist_add_string(props
,
1677 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1678 nvlist_add_string(props
,
1679 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1681 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1682 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1687 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1694 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1700 uint64_t is_log
= 0;
1702 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1706 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1707 is_log
? " [log]" : "");
1709 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1710 &child
, &children
) != 0)
1713 for (c
= 0; c
< children
; c
++) {
1714 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], VDEV_NAME_TYPE_ID
);
1715 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1721 zpool_print_unsup_feat(nvlist_t
*config
)
1723 nvlist_t
*nvinfo
, *unsup_feat
;
1726 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1728 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1731 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1732 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1735 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1736 verify(nvpair_value_string(nvp
, &desc
) == 0);
1738 if (strlen(desc
) > 0)
1739 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1741 (void) printf("\t%s\n", nvpair_name(nvp
));
1746 * Import the given pool using the known configuration and a list of
1747 * properties to be set. The configuration should have come from
1748 * zpool_find_import(). The 'newname' parameters control whether the pool
1749 * is imported with a different name.
1752 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1753 nvlist_t
*props
, int flags
)
1755 zfs_cmd_t zc
= {"\0"};
1756 zpool_rewind_policy_t policy
;
1757 nvlist_t
*nv
= NULL
;
1758 nvlist_t
*nvinfo
= NULL
;
1759 nvlist_t
*missing
= NULL
;
1766 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1769 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1770 "cannot import pool '%s'"), origname
);
1772 if (newname
!= NULL
) {
1773 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1774 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1775 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1777 thename
= (char *)newname
;
1782 if (props
!= NULL
) {
1784 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1786 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1789 if ((props
= zpool_valid_proplist(hdl
, origname
,
1790 props
, version
, flags
, errbuf
)) == NULL
)
1792 if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1799 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1801 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1804 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1805 zcmd_free_nvlists(&zc
);
1808 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1809 zcmd_free_nvlists(&zc
);
1813 zc
.zc_cookie
= flags
;
1814 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1816 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1817 zcmd_free_nvlists(&zc
);
1824 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1826 zcmd_free_nvlists(&zc
);
1828 zpool_get_rewind_policy(config
, &policy
);
1834 * Dry-run failed, but we print out what success
1835 * looks like if we found a best txg
1837 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1838 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1844 if (newname
== NULL
)
1845 (void) snprintf(desc
, sizeof (desc
),
1846 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1849 (void) snprintf(desc
, sizeof (desc
),
1850 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1855 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1856 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1857 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1858 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1859 "pool uses the following feature(s) not "
1860 "supported by this system:\n"));
1861 zpool_print_unsup_feat(nv
);
1862 if (nvlist_exists(nvinfo
,
1863 ZPOOL_CONFIG_CAN_RDONLY
)) {
1864 (void) printf(dgettext(TEXT_DOMAIN
,
1865 "All unsupported features are only "
1866 "required for writing to the pool."
1867 "\nThe pool can be imported using "
1868 "'-o readonly=on'.\n"));
1872 * Unsupported version.
1874 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1878 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1882 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1883 "one or more devices is read only"));
1884 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1888 if (nv
&& nvlist_lookup_nvlist(nv
,
1889 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1890 nvlist_lookup_nvlist(nvinfo
,
1891 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1892 (void) printf(dgettext(TEXT_DOMAIN
,
1893 "The devices below are missing, use "
1894 "'-m' to import the pool anyway:\n"));
1895 print_vdev_tree(hdl
, NULL
, missing
, 2);
1896 (void) printf("\n");
1898 (void) zpool_standard_error(hdl
, error
, desc
);
1902 (void) zpool_standard_error(hdl
, error
, desc
);
1906 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1907 "one or more devices are already in use\n"));
1908 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1912 (void) zpool_standard_error(hdl
, error
, desc
);
1913 zpool_explain_recover(hdl
,
1914 newname
? origname
: thename
, -error
, nv
);
1921 zpool_handle_t
*zhp
;
1924 * This should never fail, but play it safe anyway.
1926 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1928 else if (zhp
!= NULL
)
1930 if (policy
.zrp_request
&
1931 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1932 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1933 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1946 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1948 zfs_cmd_t zc
= {"\0"};
1950 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1952 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1953 zc
.zc_cookie
= func
;
1955 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1956 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1959 if (func
== POOL_SCAN_SCRUB
) {
1960 (void) snprintf(msg
, sizeof (msg
),
1961 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1962 } else if (func
== POOL_SCAN_NONE
) {
1963 (void) snprintf(msg
, sizeof (msg
),
1964 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1967 assert(!"unexpected result");
1970 if (errno
== EBUSY
) {
1972 pool_scan_stat_t
*ps
= NULL
;
1975 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1976 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1977 (void) nvlist_lookup_uint64_array(nvroot
,
1978 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1979 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1980 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1982 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1983 } else if (errno
== ENOENT
) {
1984 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1986 return (zpool_standard_error(hdl
, errno
, msg
));
1991 * Find a vdev that matches the search criteria specified. We use the
1992 * the nvpair name to determine how we should look for the device.
1993 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1994 * spare; but FALSE if its an INUSE spare.
1997 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1998 boolean_t
*l2cache
, boolean_t
*log
)
2005 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
2007 /* Nothing to look for */
2008 if (search
== NULL
|| pair
== NULL
)
2011 /* Obtain the key we will use to search */
2012 srchkey
= nvpair_name(pair
);
2014 switch (nvpair_type(pair
)) {
2015 case DATA_TYPE_UINT64
:
2016 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
2017 uint64_t srchval
, theguid
;
2019 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
2020 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2022 if (theguid
== srchval
)
2027 case DATA_TYPE_STRING
: {
2028 char *srchval
, *val
;
2030 verify(nvpair_value_string(pair
, &srchval
) == 0);
2031 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
2035 * Search for the requested value. Special cases:
2037 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2038 * "-part1", or "p1". The suffix is hidden from the user,
2039 * but included in the string, so this matches around it.
2040 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2041 * is used to check all possible expanded paths.
2042 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2044 * Otherwise, all other searches are simple string compares.
2046 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2047 uint64_t wholedisk
= 0;
2049 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2051 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2054 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2055 char *type
, *idx
, *end
, *p
;
2056 uint64_t id
, vdev_id
;
2059 * Determine our vdev type, keeping in mind
2060 * that the srchval is composed of a type and
2061 * vdev id pair (i.e. mirror-4).
2063 if ((type
= strdup(srchval
)) == NULL
)
2066 if ((p
= strrchr(type
, '-')) == NULL
) {
2074 * If the types don't match then keep looking.
2076 if (strncmp(val
, type
, strlen(val
)) != 0) {
2081 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2082 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2083 strncmp(type
, VDEV_TYPE_MIRROR
,
2084 strlen(VDEV_TYPE_MIRROR
)) == 0);
2085 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2089 vdev_id
= strtoull(idx
, &end
, 10);
2096 * Now verify that we have the correct vdev id.
2105 if (strcmp(srchval
, val
) == 0)
2114 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2115 &child
, &children
) != 0)
2118 for (c
= 0; c
< children
; c
++) {
2119 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2120 avail_spare
, l2cache
, NULL
)) != NULL
) {
2122 * The 'is_log' value is only set for the toplevel
2123 * vdev, not the leaf vdevs. So we always lookup the
2124 * log device from the root of the vdev tree (where
2125 * 'log' is non-NULL).
2128 nvlist_lookup_uint64(child
[c
],
2129 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2137 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2138 &child
, &children
) == 0) {
2139 for (c
= 0; c
< children
; c
++) {
2140 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2141 avail_spare
, l2cache
, NULL
)) != NULL
) {
2142 *avail_spare
= B_TRUE
;
2148 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2149 &child
, &children
) == 0) {
2150 for (c
= 0; c
< children
; c
++) {
2151 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2152 avail_spare
, l2cache
, NULL
)) != NULL
) {
2163 * Given a physical path (minus the "/devices" prefix), find the
2167 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2168 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2170 nvlist_t
*search
, *nvroot
, *ret
;
2172 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2173 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2175 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2178 *avail_spare
= B_FALSE
;
2182 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2183 nvlist_free(search
);
2189 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2192 zpool_vdev_is_interior(const char *name
)
2194 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2195 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2201 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2202 boolean_t
*l2cache
, boolean_t
*log
)
2205 nvlist_t
*nvroot
, *search
, *ret
;
2208 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2210 guid
= strtoull(path
, &end
, 0);
2211 if (guid
!= 0 && *end
== '\0') {
2212 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2213 } else if (zpool_vdev_is_interior(path
)) {
2214 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2216 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2219 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2222 *avail_spare
= B_FALSE
;
2226 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2227 nvlist_free(search
);
2233 vdev_online(nvlist_t
*nv
)
2237 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2238 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2239 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2246 * Helper function for zpool_get_physpaths().
2249 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2250 size_t *bytes_written
)
2252 size_t bytes_left
, pos
, rsz
;
2256 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2258 return (EZFS_NODEVICE
);
2260 pos
= *bytes_written
;
2261 bytes_left
= physpath_size
- pos
;
2262 format
= (pos
== 0) ? "%s" : " %s";
2264 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2265 *bytes_written
+= rsz
;
2267 if (rsz
>= bytes_left
) {
2268 /* if physpath was not copied properly, clear it */
2269 if (bytes_left
!= 0) {
2272 return (EZFS_NOSPC
);
2278 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2279 size_t *rsz
, boolean_t is_spare
)
2284 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2285 return (EZFS_INVALCONFIG
);
2287 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2289 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2290 * For a spare vdev, we only want to boot from the active
2295 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2298 return (EZFS_INVALCONFIG
);
2301 if (vdev_online(nv
)) {
2302 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2303 phypath_size
, rsz
)) != 0)
2306 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2307 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2308 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2313 if (nvlist_lookup_nvlist_array(nv
,
2314 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2315 return (EZFS_INVALCONFIG
);
2317 for (i
= 0; i
< count
; i
++) {
2318 ret
= vdev_get_physpaths(child
[i
], physpath
,
2319 phypath_size
, rsz
, is_spare
);
2320 if (ret
== EZFS_NOSPC
)
2325 return (EZFS_POOL_INVALARG
);
2329 * Get phys_path for a root pool config.
2330 * Return 0 on success; non-zero on failure.
2333 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2336 nvlist_t
*vdev_root
;
2343 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2345 return (EZFS_INVALCONFIG
);
2347 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2348 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2349 &child
, &count
) != 0)
2350 return (EZFS_INVALCONFIG
);
2352 #if defined(__sun__) || defined(__sun)
2354 * root pool can not have EFI labeled disks and can only have
2355 * a single top-level vdev.
2357 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2358 pool_uses_efi(vdev_root
))
2359 return (EZFS_POOL_INVALARG
);
2362 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2365 /* No online devices */
2367 return (EZFS_NODEVICE
);
2373 * Get phys_path for a root pool
2374 * Return 0 on success; non-zero on failure.
2377 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2379 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2384 * If the device has being dynamically expanded then we need to relabel
2385 * the disk to use the new unallocated space.
2388 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2392 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2393 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2394 "relabel '%s': unable to open device: %d"), path
, errno
);
2395 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2399 * It's possible that we might encounter an error if the device
2400 * does not have any unallocated space left. If so, we simply
2401 * ignore that error and continue on.
2403 * Also, we don't call efi_rescan() - that would just return EBUSY.
2404 * The module will do it for us in vdev_disk_open().
2406 error
= efi_use_whole_disk(fd
);
2408 if (error
&& error
!= VT_ENOSPC
) {
2409 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2410 "relabel '%s': unable to read disk capacity"), path
);
2411 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2417 * Bring the specified vdev online. The 'flags' parameter is a set of the
2418 * ZFS_ONLINE_* flags.
2421 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2422 vdev_state_t
*newstate
)
2424 zfs_cmd_t zc
= {"\0"};
2427 boolean_t avail_spare
, l2cache
, islog
;
2428 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2431 if (flags
& ZFS_ONLINE_EXPAND
) {
2432 (void) snprintf(msg
, sizeof (msg
),
2433 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2435 (void) snprintf(msg
, sizeof (msg
),
2436 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2439 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2440 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2442 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2444 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2447 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2449 if (flags
& ZFS_ONLINE_EXPAND
||
2450 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2451 uint64_t wholedisk
= 0;
2453 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2457 * XXX - L2ARC 1.0 devices can't support expansion.
2460 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2461 "cannot expand cache devices"));
2462 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2466 const char *fullpath
= path
;
2467 char buf
[MAXPATHLEN
];
2469 if (path
[0] != '/') {
2470 error
= zfs_resolve_shortname(path
, buf
,
2473 return (zfs_error(hdl
, EZFS_NODEVICE
,
2479 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2485 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2488 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2489 if (errno
== EINVAL
) {
2490 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2491 "from this pool into a new one. Use '%s' "
2492 "instead"), "zpool detach");
2493 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2495 return (zpool_standard_error(hdl
, errno
, msg
));
2498 *newstate
= zc
.zc_cookie
;
2503 * Take the specified vdev offline
2506 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2508 zfs_cmd_t zc
= {"\0"};
2511 boolean_t avail_spare
, l2cache
;
2512 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2514 (void) snprintf(msg
, sizeof (msg
),
2515 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2517 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2518 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2520 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2522 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2525 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2527 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2528 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2530 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2537 * There are no other replicas of this device.
2539 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2543 * The log device has unplayed logs
2545 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2548 return (zpool_standard_error(hdl
, errno
, msg
));
2553 * Mark the given vdev faulted.
2556 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2558 zfs_cmd_t zc
= {"\0"};
2560 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2562 (void) snprintf(msg
, sizeof (msg
),
2563 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2565 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2567 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2570 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2577 * There are no other replicas of this device.
2579 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2582 return (zpool_standard_error(hdl
, errno
, msg
));
2588 * Mark the given vdev degraded.
2591 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2593 zfs_cmd_t zc
= {"\0"};
2595 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2597 (void) snprintf(msg
, sizeof (msg
),
2598 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2600 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2602 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2605 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2608 return (zpool_standard_error(hdl
, errno
, msg
));
2612 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2616 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2622 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2624 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2627 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2628 children
== 2 && child
[which
] == tgt
)
2631 for (c
= 0; c
< children
; c
++)
2632 if (is_replacing_spare(child
[c
], tgt
, which
))
2640 * Attach new_disk (fully described by nvroot) to old_disk.
2641 * If 'replacing' is specified, the new disk will replace the old one.
2644 zpool_vdev_attach(zpool_handle_t
*zhp
,
2645 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2647 zfs_cmd_t zc
= {"\0"};
2651 boolean_t avail_spare
, l2cache
, islog
;
2656 nvlist_t
*config_root
;
2657 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2658 boolean_t rootpool
= zpool_is_bootable(zhp
);
2661 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2662 "cannot replace %s with %s"), old_disk
, new_disk
);
2664 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2665 "cannot attach %s to %s"), new_disk
, old_disk
);
2667 #if defined(__sun__) || defined(__sun)
2669 * If this is a root pool, make sure that we're not attaching an
2670 * EFI labeled device.
2672 if (rootpool
&& pool_uses_efi(nvroot
)) {
2673 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2674 "EFI labeled devices are not supported on root pools."));
2675 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2679 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2680 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2682 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2685 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2688 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2690 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2691 zc
.zc_cookie
= replacing
;
2693 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2694 &child
, &children
) != 0 || children
!= 1) {
2695 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2696 "new device must be a single disk"));
2697 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2700 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2701 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2703 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], 0)) == NULL
)
2707 * If the target is a hot spare that has been swapped in, we can only
2708 * replace it with another hot spare.
2711 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2712 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2713 NULL
) == NULL
|| !avail_spare
) &&
2714 is_replacing_spare(config_root
, tgt
, 1)) {
2715 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2716 "can only be replaced by another hot spare"));
2718 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2723 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2726 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2728 zcmd_free_nvlists(&zc
);
2733 * XXX need a better way to prevent user from
2734 * booting up a half-baked vdev.
2736 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2737 "sure to wait until resilver is done "
2738 "before rebooting.\n"));
2746 * Can't attach to or replace this type of vdev.
2749 uint64_t version
= zpool_get_prop_int(zhp
,
2750 ZPOOL_PROP_VERSION
, NULL
);
2753 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2754 "cannot replace a log with a spare"));
2755 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2756 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2757 "already in replacing/spare config; wait "
2758 "for completion or use 'zpool detach'"));
2760 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2761 "cannot replace a replacing device"));
2763 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2764 "can only attach to mirrors and top-level "
2767 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2772 * The new device must be a single disk.
2774 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2775 "new device must be a single disk"));
2776 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2780 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2782 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2787 * The new device is too small.
2789 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2790 "device is too small"));
2791 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2796 * The new device has a different optimal sector size.
2798 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2799 "new device has a different optimal sector size; use the "
2800 "option '-o ashift=N' to override the optimal size"));
2801 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2806 * The resulting top-level vdev spec won't fit in the label.
2808 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2812 (void) zpool_standard_error(hdl
, errno
, msg
);
2819 * Detach the specified device.
2822 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2824 zfs_cmd_t zc
= {"\0"};
2827 boolean_t avail_spare
, l2cache
;
2828 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2830 (void) snprintf(msg
, sizeof (msg
),
2831 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2833 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2834 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2836 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2839 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2842 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2844 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2846 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2853 * Can't detach from this type of vdev.
2855 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2856 "applicable to mirror and replacing vdevs"));
2857 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2862 * There are no other replicas of this device.
2864 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2868 (void) zpool_standard_error(hdl
, errno
, msg
);
2875 * Find a mirror vdev in the source nvlist.
2877 * The mchild array contains a list of disks in one of the top-level mirrors
2878 * of the source pool. The schild array contains a list of disks that the
2879 * user specified on the command line. We loop over the mchild array to
2880 * see if any entry in the schild array matches.
2882 * If a disk in the mchild array is found in the schild array, we return
2883 * the index of that entry. Otherwise we return -1.
2886 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2887 nvlist_t
**schild
, uint_t schildren
)
2891 for (mc
= 0; mc
< mchildren
; mc
++) {
2893 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2896 for (sc
= 0; sc
< schildren
; sc
++) {
2897 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2899 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2915 * Split a mirror pool. If newroot points to null, then a new nvlist
2916 * is generated and it is the responsibility of the caller to free it.
2919 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2920 nvlist_t
*props
, splitflags_t flags
)
2922 zfs_cmd_t zc
= {"\0"};
2924 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2925 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2926 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2927 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2929 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2932 (void) snprintf(msg
, sizeof (msg
),
2933 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2935 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2936 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2938 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2939 (void) fprintf(stderr
, gettext("Internal error: unable to "
2940 "retrieve pool configuration\n"));
2944 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2946 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2949 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2950 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2951 props
, vers
, flags
, msg
)) == NULL
)
2955 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2957 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2958 "Source pool is missing vdev tree"));
2959 nvlist_free(zc_props
);
2963 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2966 if (*newroot
== NULL
||
2967 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2968 &newchild
, &newchildren
) != 0)
2971 for (c
= 0; c
< children
; c
++) {
2972 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2974 nvlist_t
**mchild
, *vdev
;
2979 * Unlike cache & spares, slogs are stored in the
2980 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2982 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2984 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2986 if (is_log
|| is_hole
) {
2988 * Create a hole vdev and put it in the config.
2990 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2992 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2993 VDEV_TYPE_HOLE
) != 0)
2995 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
3000 varray
[vcount
++] = vdev
;
3004 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
3006 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
3007 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3008 "Source pool must be composed only of mirrors\n"));
3009 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3013 verify(nvlist_lookup_nvlist_array(child
[c
],
3014 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
3016 /* find or add an entry for this top-level vdev */
3017 if (newchildren
> 0 &&
3018 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
3019 newchild
, newchildren
)) >= 0) {
3020 /* We found a disk that the user specified. */
3021 vdev
= mchild
[entry
];
3024 /* User didn't specify a disk for this vdev. */
3025 vdev
= mchild
[mchildren
- 1];
3028 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
3032 /* did we find every disk the user specified? */
3033 if (found
!= newchildren
) {
3034 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
3035 "include at most one disk from each mirror"));
3036 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3040 /* Prepare the nvlist for populating. */
3041 if (*newroot
== NULL
) {
3042 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3045 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3046 VDEV_TYPE_ROOT
) != 0)
3049 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3052 /* Add all the children we found */
3053 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3054 lastlog
== 0 ? vcount
: lastlog
) != 0)
3058 * If we're just doing a dry run, exit now with success.
3061 memory_err
= B_FALSE
;
3066 /* now build up the config list & call the ioctl */
3067 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3070 if (nvlist_add_nvlist(newconfig
,
3071 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3072 nvlist_add_string(newconfig
,
3073 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3074 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3078 * The new pool is automatically part of the namespace unless we
3079 * explicitly export it.
3082 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3083 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3084 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3085 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3087 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3090 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3091 retval
= zpool_standard_error(hdl
, errno
, msg
);
3096 memory_err
= B_FALSE
;
3099 if (varray
!= NULL
) {
3102 for (v
= 0; v
< vcount
; v
++)
3103 nvlist_free(varray
[v
]);
3106 zcmd_free_nvlists(&zc
);
3107 nvlist_free(zc_props
);
3108 nvlist_free(newconfig
);
3110 nvlist_free(*newroot
);
3118 return (no_memory(hdl
));
3124 * Remove the given device. Currently, this is supported only for hot spares
3125 * and level 2 cache devices.
3128 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3130 zfs_cmd_t zc
= {"\0"};
3133 boolean_t avail_spare
, l2cache
, islog
;
3134 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3137 (void) snprintf(msg
, sizeof (msg
),
3138 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3140 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3141 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3143 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3145 * XXX - this should just go away.
3147 if (!avail_spare
&& !l2cache
&& !islog
) {
3148 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3149 "only inactive hot spares, cache, top-level, "
3150 "or log devices can be removed"));
3151 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3154 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3155 if (islog
&& version
< SPA_VERSION_HOLES
) {
3156 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3157 "pool must be upgrade to support log removal"));
3158 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3161 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3163 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3166 return (zpool_standard_error(hdl
, errno
, msg
));
3170 * Clear the errors for the pool, or the particular device if specified.
3173 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3175 zfs_cmd_t zc
= {"\0"};
3178 zpool_rewind_policy_t policy
;
3179 boolean_t avail_spare
, l2cache
;
3180 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3181 nvlist_t
*nvi
= NULL
;
3185 (void) snprintf(msg
, sizeof (msg
),
3186 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3189 (void) snprintf(msg
, sizeof (msg
),
3190 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3193 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3195 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3196 &l2cache
, NULL
)) == 0)
3197 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3200 * Don't allow error clearing for hot spares. Do allow
3201 * error clearing for l2cache devices.
3204 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3206 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3210 zpool_get_rewind_policy(rewindnvl
, &policy
);
3211 zc
.zc_cookie
= policy
.zrp_request
;
3213 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3216 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3219 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3221 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3222 zcmd_free_nvlists(&zc
);
3227 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3228 errno
!= EPERM
&& errno
!= EACCES
)) {
3229 if (policy
.zrp_request
&
3230 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3231 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3232 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3233 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3237 zcmd_free_nvlists(&zc
);
3241 zcmd_free_nvlists(&zc
);
3242 return (zpool_standard_error(hdl
, errno
, msg
));
3246 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3249 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3251 zfs_cmd_t zc
= {"\0"};
3253 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3255 (void) snprintf(msg
, sizeof (msg
),
3256 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3257 (u_longlong_t
)guid
);
3259 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3261 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3263 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3266 return (zpool_standard_error(hdl
, errno
, msg
));
3270 * Change the GUID for a pool.
3273 zpool_reguid(zpool_handle_t
*zhp
)
3276 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3277 zfs_cmd_t zc
= {"\0"};
3279 (void) snprintf(msg
, sizeof (msg
),
3280 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3282 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3283 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3286 return (zpool_standard_error(hdl
, errno
, msg
));
3293 zpool_reopen(zpool_handle_t
*zhp
)
3295 zfs_cmd_t zc
= {"\0"};
3297 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3299 (void) snprintf(msg
, sizeof (msg
),
3300 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3303 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3304 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3306 return (zpool_standard_error(hdl
, errno
, msg
));
3309 #if defined(__sun__) || defined(__sun)
3311 * Convert from a devid string to a path.
3314 devid_to_path(char *devid_str
)
3319 devid_nmlist_t
*list
= NULL
;
3322 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3325 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3327 devid_str_free(minor
);
3334 * In a case the strdup() fails, we will just return NULL below.
3336 path
= strdup(list
[0].devname
);
3338 devid_free_nmlist(list
);
3344 * Convert from a path to a devid string.
3347 path_to_devid(const char *path
)
3353 if ((fd
= open(path
, O_RDONLY
)) < 0)
3358 if (devid_get(fd
, &devid
) == 0) {
3359 if (devid_get_minor_name(fd
, &minor
) == 0)
3360 ret
= devid_str_encode(devid
, minor
);
3362 devid_str_free(minor
);
3371 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3372 * ignore any failure here, since a common case is for an unprivileged user to
3373 * type 'zpool status', and we'll display the correct information anyway.
3376 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3378 zfs_cmd_t zc
= {"\0"};
3380 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3381 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3382 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3385 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3390 * Remove partition suffix from a vdev path. Partition suffixes may take three
3391 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3392 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3393 * third case only occurs when preceded by a string matching the regular
3394 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3397 strip_partition(libzfs_handle_t
*hdl
, char *path
)
3399 char *tmp
= zfs_strdup(hdl
, path
);
3400 char *part
= NULL
, *d
= NULL
;
3402 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3404 } else if ((part
= strrchr(tmp
, 'p')) &&
3405 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3407 } else if ((tmp
[0] == 'h' || tmp
[0] == 's' || tmp
[0] == 'v') &&
3409 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
);
3410 } else if (strncmp("xvd", tmp
, 3) == 0) {
3411 for (d
= &tmp
[3]; isalpha(*d
); part
= ++d
);
3413 if (part
&& d
&& *d
!= '\0') {
3414 for (; isdigit(*d
); d
++);
3421 #define PATH_BUF_LEN 64
3424 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3425 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3426 * We also check if this is a whole disk, in which case we strip off the
3427 * trailing 's0' slice name.
3429 * This routine is also responsible for identifying when disks have been
3430 * reconfigured in a new location. The kernel will have opened the device by
3431 * devid, but the path will still refer to the old location. To catch this, we
3432 * first do a path -> devid translation (which is fast for the common case). If
3433 * the devid matches, we're done. If not, we do a reverse devid -> path
3434 * translation and issue the appropriate ioctl() to update the path of the vdev.
3435 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3439 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3442 char *path
, *type
, *env
;
3444 char buf
[PATH_BUF_LEN
];
3445 char tmpbuf
[PATH_BUF_LEN
];
3447 env
= getenv("ZPOOL_VDEV_NAME_PATH");
3448 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3449 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3450 name_flags
|= VDEV_NAME_PATH
;
3452 env
= getenv("ZPOOL_VDEV_NAME_GUID");
3453 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3454 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3455 name_flags
|= VDEV_NAME_GUID
;
3457 env
= getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3458 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3459 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3460 name_flags
|= VDEV_NAME_FOLLOW_LINKS
;
3462 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &value
) == 0 ||
3463 name_flags
& VDEV_NAME_GUID
) {
3464 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
);
3465 (void) snprintf(buf
, sizeof (buf
), "%llu", (u_longlong_t
)value
);
3467 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3468 #if defined(__sun__) || defined(__sun)
3470 * Live VDEV path updates to a kernel VDEV during a
3471 * zpool_vdev_name lookup are not supported on Linux.
3478 * If the device is dead (faulted, offline, etc) then don't
3479 * bother opening it. Otherwise we may be forcing the user to
3480 * open a misbehaving device, which can have undesirable
3483 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3484 (uint64_t **)&vs
, &vsc
) != 0 ||
3485 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3487 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3489 * Determine if the current path is correct.
3491 char *newdevid
= path_to_devid(path
);
3493 if (newdevid
== NULL
||
3494 strcmp(devid
, newdevid
) != 0) {
3497 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3499 * Update the path appropriately.
3501 set_path(zhp
, nv
, newpath
);
3502 if (nvlist_add_string(nv
,
3503 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3504 verify(nvlist_lookup_string(nv
,
3512 devid_str_free(newdevid
);
3516 if (name_flags
& VDEV_NAME_FOLLOW_LINKS
) {
3517 char *rp
= realpath(path
, NULL
);
3519 strlcpy(buf
, rp
, sizeof (buf
));
3526 * For a block device only use the name.
3528 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3529 if ((strcmp(type
, VDEV_TYPE_DISK
) == 0) &&
3530 !(name_flags
& VDEV_NAME_PATH
)) {
3531 path
= strrchr(path
, '/');
3536 * Remove the partition from the path it this is a whole disk.
3538 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &value
)
3539 == 0 && value
&& !(name_flags
& VDEV_NAME_PATH
)) {
3540 return (strip_partition(hdl
, path
));
3543 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3546 * If it's a raidz device, we need to stick in the parity level.
3548 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3549 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3551 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3552 (u_longlong_t
)value
);
3557 * We identify each top-level vdev by using a <type-id>
3558 * naming convention.
3560 if (name_flags
& VDEV_NAME_TYPE_ID
) {
3562 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3564 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3565 path
, (u_longlong_t
)id
);
3570 return (zfs_strdup(hdl
, path
));
3574 zbookmark_mem_compare(const void *a
, const void *b
)
3576 return (memcmp(a
, b
, sizeof (zbookmark_phys_t
)));
3580 * Retrieve the persistent error log, uniquify the members, and return to the
3584 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3586 zfs_cmd_t zc
= {"\0"};
3588 zbookmark_phys_t
*zb
= NULL
;
3592 * Retrieve the raw error list from the kernel. If the number of errors
3593 * has increased, allocate more space and continue until we get the
3596 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3600 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3601 count
* sizeof (zbookmark_phys_t
))) == (uintptr_t)NULL
)
3603 zc
.zc_nvlist_dst_size
= count
;
3604 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3606 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3608 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3609 if (errno
== ENOMEM
) {
3612 count
= zc
.zc_nvlist_dst_size
;
3613 dst
= zfs_alloc(zhp
->zpool_hdl
, count
*
3614 sizeof (zbookmark_phys_t
));
3617 zc
.zc_nvlist_dst
= (uintptr_t)dst
;
3627 * Sort the resulting bookmarks. This is a little confusing due to the
3628 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3629 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3630 * _not_ copied as part of the process. So we point the start of our
3631 * array appropriate and decrement the total number of elements.
3633 zb
= ((zbookmark_phys_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3634 zc
.zc_nvlist_dst_size
;
3635 count
-= zc
.zc_nvlist_dst_size
;
3637 qsort(zb
, count
, sizeof (zbookmark_phys_t
), zbookmark_mem_compare
);
3639 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3642 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3644 for (i
= 0; i
< count
; i
++) {
3647 /* ignoring zb_blkid and zb_level for now */
3648 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3649 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3652 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3654 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3655 zb
[i
].zb_objset
) != 0) {
3659 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3660 zb
[i
].zb_object
) != 0) {
3664 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3671 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3675 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3676 return (no_memory(zhp
->zpool_hdl
));
3680 * Upgrade a ZFS pool to the latest on-disk version.
3683 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3685 zfs_cmd_t zc
= {"\0"};
3686 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3688 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3689 zc
.zc_cookie
= new_version
;
3691 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3692 return (zpool_standard_error_fmt(hdl
, errno
,
3693 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3699 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3703 (void) strlcpy(string
, basename(argv
[0]), len
);
3704 for (i
= 1; i
< argc
; i
++) {
3705 (void) strlcat(string
, " ", len
);
3706 (void) strlcat(string
, argv
[i
], len
);
3711 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3713 zfs_cmd_t zc
= {"\0"};
3717 args
= fnvlist_alloc();
3718 fnvlist_add_string(args
, "message", message
);
3719 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3721 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3723 zcmd_free_nvlists(&zc
);
3728 * Perform ioctl to get some command history of a pool.
3730 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3731 * logical offset of the history buffer to start reading from.
3733 * Upon return, 'off' is the next logical offset to read from and
3734 * 'len' is the actual amount of bytes read into 'buf'.
3737 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3739 zfs_cmd_t zc
= {"\0"};
3740 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3742 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3744 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3745 zc
.zc_history_len
= *len
;
3746 zc
.zc_history_offset
= *off
;
3748 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3751 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3752 dgettext(TEXT_DOMAIN
,
3753 "cannot show history for pool '%s'"),
3756 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3757 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3758 "'%s'"), zhp
->zpool_name
));
3760 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3761 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3762 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3764 return (zpool_standard_error_fmt(hdl
, errno
,
3765 dgettext(TEXT_DOMAIN
,
3766 "cannot get history for '%s'"), zhp
->zpool_name
));
3770 *len
= zc
.zc_history_len
;
3771 *off
= zc
.zc_history_offset
;
3777 * Process the buffer of nvlists, unpacking and storing each nvlist record
3778 * into 'records'. 'leftover' is set to the number of bytes that weren't
3779 * processed as there wasn't a complete record.
3782 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3783 nvlist_t
***records
, uint_t
*numrecords
)
3789 while (bytes_read
> sizeof (reclen
)) {
3791 /* get length of packed record (stored as little endian) */
3792 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3793 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3795 if (bytes_read
< sizeof (reclen
) + reclen
)
3799 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3801 bytes_read
-= sizeof (reclen
) + reclen
;
3802 buf
+= sizeof (reclen
) + reclen
;
3804 /* add record to nvlist array */
3806 if (ISP2(*numrecords
+ 1)) {
3807 *records
= realloc(*records
,
3808 *numrecords
* 2 * sizeof (nvlist_t
*));
3810 (*records
)[*numrecords
- 1] = nv
;
3813 *leftover
= bytes_read
;
3818 * Retrieve the command history of a pool.
3821 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3824 int buflen
= 128 * 1024;
3826 nvlist_t
**records
= NULL
;
3827 uint_t numrecords
= 0;
3830 buf
= malloc(buflen
);
3834 uint64_t bytes_read
= buflen
;
3837 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3840 /* if nothing else was read in, we're at EOF, just return */
3844 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3845 &leftover
, &records
, &numrecords
)) != 0)
3848 if (leftover
== bytes_read
) {
3850 * no progress made, because buffer is not big enough
3851 * to hold this record; resize and retry.
3855 buf
= malloc(buflen
);
3866 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3867 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3868 records
, numrecords
) == 0);
3870 for (i
= 0; i
< numrecords
; i
++)
3871 nvlist_free(records
[i
]);
3878 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3879 * If there is a new event available 'nvp' will contain a newly allocated
3880 * nvlist and 'dropped' will be set to the number of missed events since
3881 * the last call to this function. When 'nvp' is set to NULL it indicates
3882 * no new events are available. In either case the function returns 0 and
3883 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3884 * function will return a non-zero value. When the function is called in
3885 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3886 * it will not return until a new event is available.
3889 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3890 int *dropped
, unsigned flags
, int zevent_fd
)
3892 zfs_cmd_t zc
= {"\0"};
3897 zc
.zc_cleanup_fd
= zevent_fd
;
3899 if (flags
& ZEVENT_NONBLOCK
)
3900 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3902 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3906 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3909 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3910 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3913 /* Blocking error case should not occur */
3914 if (!(flags
& ZEVENT_NONBLOCK
))
3915 error
= zpool_standard_error_fmt(hdl
, errno
,
3916 dgettext(TEXT_DOMAIN
, "cannot get event"));
3920 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3921 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3922 dgettext(TEXT_DOMAIN
, "cannot get event"));
3928 error
= zpool_standard_error_fmt(hdl
, errno
,
3929 dgettext(TEXT_DOMAIN
, "cannot get event"));
3934 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3938 *dropped
= (int)zc
.zc_cookie
;
3940 zcmd_free_nvlists(&zc
);
3949 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3951 zfs_cmd_t zc
= {"\0"};
3954 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3955 "cannot clear events"));
3957 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3958 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3961 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3967 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3968 * the passed zevent_fd file handle. On success zero is returned,
3969 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3972 zpool_events_seek(libzfs_handle_t
*hdl
, uint64_t eid
, int zevent_fd
)
3974 zfs_cmd_t zc
= {"\0"};
3978 zc
.zc_cleanup_fd
= zevent_fd
;
3980 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_SEEK
, &zc
) != 0) {
3983 error
= zfs_error_fmt(hdl
, EZFS_NOENT
,
3984 dgettext(TEXT_DOMAIN
, "cannot get event"));
3988 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3989 dgettext(TEXT_DOMAIN
, "cannot get event"));
3993 error
= zpool_standard_error_fmt(hdl
, errno
,
3994 dgettext(TEXT_DOMAIN
, "cannot get event"));
4003 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
4004 char *pathname
, size_t len
)
4006 zfs_cmd_t zc
= {"\0"};
4007 boolean_t mounted
= B_FALSE
;
4008 char *mntpnt
= NULL
;
4009 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
4012 /* special case for the MOS */
4013 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>",
4018 /* get the dataset's name */
4019 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
4021 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
4022 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
4023 /* just write out a path of two object numbers */
4024 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
4025 (longlong_t
)dsobj
, (longlong_t
)obj
);
4028 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
4030 /* find out if the dataset is mounted */
4031 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
4033 /* get the corrupted object's path */
4034 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
4036 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
4039 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
4042 (void) snprintf(pathname
, len
, "%s:%s",
4043 dsname
, zc
.zc_value
);
4046 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
,
4053 * Read the EFI label from the config, if a label does not exist then
4054 * pass back the error to the caller. If the caller has passed a non-NULL
4055 * diskaddr argument then we set it to the starting address of the EFI
4059 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
4063 char diskname
[MAXPATHLEN
];
4066 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
4069 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
4070 strrchr(path
, '/'));
4071 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
4072 struct dk_gpt
*vtoc
;
4074 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
4076 *sb
= vtoc
->efi_parts
[0].p_start
;
4085 * determine where a partition starts on a disk in the current
4089 find_start_block(nvlist_t
*config
)
4093 diskaddr_t sb
= MAXOFFSET_T
;
4096 if (nvlist_lookup_nvlist_array(config
,
4097 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
4098 if (nvlist_lookup_uint64(config
,
4099 ZPOOL_CONFIG_WHOLE_DISK
,
4100 &wholedisk
) != 0 || !wholedisk
) {
4101 return (MAXOFFSET_T
);
4103 if (read_efi_label(config
, &sb
) < 0)
4108 for (c
= 0; c
< children
; c
++) {
4109 sb
= find_start_block(child
[c
]);
4110 if (sb
!= MAXOFFSET_T
) {
4114 return (MAXOFFSET_T
);
4118 zpool_label_disk_check(char *path
)
4120 struct dk_gpt
*vtoc
;
4123 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4126 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4131 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4143 * Generate a unique partition name for the ZFS member. Partitions must
4144 * have unique names to ensure udev will be able to create symlinks under
4145 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4146 * of the form <pool>-<unique-id>.
4149 zpool_label_name(char *label_name
, int label_size
)
4154 fd
= open("/dev/urandom", O_RDONLY
);
4156 if (read(fd
, &id
, sizeof (id
)) != sizeof (id
))
4163 id
= (((uint64_t)rand()) << 32) | (uint64_t)rand();
4165 snprintf(label_name
, label_size
, "zfs-%016llx", (u_longlong_t
) id
);
4169 * Label an individual disk. The name provided is the short name,
4170 * stripped of any leading /dev path.
4173 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4175 char path
[MAXPATHLEN
];
4176 struct dk_gpt
*vtoc
;
4178 size_t resv
= EFI_MIN_RESV_SIZE
;
4179 uint64_t slice_size
;
4180 diskaddr_t start_block
;
4183 /* prepare an error message just in case */
4184 (void) snprintf(errbuf
, sizeof (errbuf
),
4185 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4190 #if defined(__sun__) || defined(__sun)
4191 if (zpool_is_bootable(zhp
)) {
4192 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4193 "EFI labeled devices are not supported on root "
4195 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
4199 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4200 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4202 if (zhp
->zpool_start_block
== 0)
4203 start_block
= find_start_block(nvroot
);
4205 start_block
= zhp
->zpool_start_block
;
4206 zhp
->zpool_start_block
= start_block
;
4209 start_block
= NEW_START_BLOCK
;
4212 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4214 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
4216 * This shouldn't happen. We've long since verified that this
4217 * is a valid device.
4219 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4220 "label '%s': unable to open device: %d"), path
, errno
);
4221 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4224 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4226 * The only way this can fail is if we run out of memory, or we
4227 * were unable to read the disk's capacity
4229 if (errno
== ENOMEM
)
4230 (void) no_memory(hdl
);
4233 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4234 "label '%s': unable to read disk capacity"), path
);
4236 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4239 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4240 slice_size
-= EFI_MIN_RESV_SIZE
;
4241 if (start_block
== MAXOFFSET_T
)
4242 start_block
= NEW_START_BLOCK
;
4243 slice_size
-= start_block
;
4244 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4246 vtoc
->efi_parts
[0].p_start
= start_block
;
4247 vtoc
->efi_parts
[0].p_size
= slice_size
;
4250 * Why we use V_USR: V_BACKUP confuses users, and is considered
4251 * disposable by some EFI utilities (since EFI doesn't have a backup
4252 * slice). V_UNASSIGNED is supposed to be used only for zero size
4253 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4254 * etc. were all pretty specific. V_USR is as close to reality as we
4255 * can get, in the absence of V_OTHER.
4257 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4258 zpool_label_name(vtoc
->efi_parts
[0].p_name
, EFI_PART_NAME_LEN
);
4260 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4261 vtoc
->efi_parts
[8].p_size
= resv
;
4262 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4264 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4266 * Some block drivers (like pcata) may not support EFI
4267 * GPT labels. Print out a helpful error message dir-
4268 * ecting the user to manually label the disk and give
4274 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4275 "parted(8) and then provide a specific slice: %d"), rval
);
4276 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4282 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4283 (void) zfs_append_partition(path
, MAXPATHLEN
);
4285 /* Wait to udev to signal use the device has settled. */
4286 rval
= zpool_label_disk_wait(path
, DISK_LABEL_WAIT
);
4288 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4289 "detect device partitions on '%s': %d"), path
, rval
);
4290 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4293 /* We can't be to paranoid. Read the label back and verify it. */
4294 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4295 rval
= zpool_label_disk_check(path
);
4297 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4298 "EFI label on '%s' is damaged. Ensure\nthis device "
4299 "is not in in use, and is functioning properly: %d"),
4301 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));