4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
53 typedef struct prop_flags
{
54 int create
:1; /* Validate property on creation */
55 int import
:1; /* Validate property on import */
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
67 zfs_cmd_t zc
= {"\0"};
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
185 case VDEV_STATE_CLOSED
:
186 case VDEV_STATE_OFFLINE
:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED
:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN
:
191 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
192 return (gettext("FAULTED"));
193 else if (aux
== VDEV_AUX_SPLIT_POOL
)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED
:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED
:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY
:
202 return (gettext("ONLINE"));
205 return (gettext("UNKNOWN"));
209 * Map POOL STATE to printed strings.
212 zpool_pool_state_to_name(pool_state_t state
)
217 case POOL_STATE_ACTIVE
:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED
:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED
:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE
:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE
:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED
:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL
:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE
:
232 return (gettext("POTENTIALLY_ACTIVE"));
235 return (gettext("UNKNOWN"));
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
243 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
,
244 size_t len
, zprop_source_t
*srctype
, boolean_t literal
)
248 zprop_source_t src
= ZPROP_SRC_NONE
;
253 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
255 case ZPOOL_PROP_NAME
:
256 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
259 case ZPOOL_PROP_HEALTH
:
260 (void) strlcpy(buf
, "FAULTED", len
);
263 case ZPOOL_PROP_GUID
:
264 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
265 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
268 case ZPOOL_PROP_ALTROOT
:
269 case ZPOOL_PROP_CACHEFILE
:
270 case ZPOOL_PROP_COMMENT
:
271 if (zhp
->zpool_props
!= NULL
||
272 zpool_get_all_props(zhp
) == 0) {
274 zpool_get_prop_string(zhp
, prop
, &src
),
280 (void) strlcpy(buf
, "-", len
);
289 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
290 prop
!= ZPOOL_PROP_NAME
)
293 switch (zpool_prop_get_type(prop
)) {
294 case PROP_TYPE_STRING
:
295 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
299 case PROP_TYPE_NUMBER
:
300 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
303 case ZPOOL_PROP_SIZE
:
304 case ZPOOL_PROP_ALLOCATED
:
305 case ZPOOL_PROP_FREE
:
306 case ZPOOL_PROP_FREEING
:
307 case ZPOOL_PROP_LEAKED
:
308 case ZPOOL_PROP_ASHIFT
:
310 (void) snprintf(buf
, len
, "%llu",
311 (u_longlong_t
)intval
);
313 (void) zfs_nicenum(intval
, buf
, len
);
316 case ZPOOL_PROP_EXPANDSZ
:
318 (void) strlcpy(buf
, "-", len
);
319 } else if (literal
) {
320 (void) snprintf(buf
, len
, "%llu",
321 (u_longlong_t
)intval
);
323 (void) zfs_nicenum(intval
, buf
, len
);
327 case ZPOOL_PROP_CAPACITY
:
329 (void) snprintf(buf
, len
, "%llu",
330 (u_longlong_t
)intval
);
332 (void) snprintf(buf
, len
, "%llu%%",
333 (u_longlong_t
)intval
);
337 case ZPOOL_PROP_FRAGMENTATION
:
338 if (intval
== UINT64_MAX
) {
339 (void) strlcpy(buf
, "-", len
);
340 } else if (literal
) {
341 (void) snprintf(buf
, len
, "%llu",
342 (u_longlong_t
)intval
);
344 (void) snprintf(buf
, len
, "%llu%%",
345 (u_longlong_t
)intval
);
349 case ZPOOL_PROP_DEDUPRATIO
:
351 (void) snprintf(buf
, len
, "%llu.%02llu",
352 (u_longlong_t
)(intval
/ 100),
353 (u_longlong_t
)(intval
% 100));
355 (void) snprintf(buf
, len
, "%llu.%02llux",
356 (u_longlong_t
)(intval
/ 100),
357 (u_longlong_t
)(intval
% 100));
360 case ZPOOL_PROP_HEALTH
:
361 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
362 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
363 verify(nvlist_lookup_uint64_array(nvroot
,
364 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
367 (void) strlcpy(buf
, zpool_state_to_name(intval
,
370 case ZPOOL_PROP_VERSION
:
371 if (intval
>= SPA_VERSION_FEATURES
) {
372 (void) snprintf(buf
, len
, "-");
377 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
381 case PROP_TYPE_INDEX
:
382 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
383 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
386 (void) strlcpy(buf
, strval
, len
);
400 * Check if the bootfs name has the same pool name as it is set to.
401 * Assuming bootfs is a valid dataset name.
404 bootfs_name_valid(const char *pool
, char *bootfs
)
406 int len
= strlen(pool
);
408 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
411 if (strncmp(pool
, bootfs
, len
) == 0 &&
412 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
418 #if defined(__sun__) || defined(__sun)
420 * Inspect the configuration to determine if any of the devices contain
424 pool_uses_efi(nvlist_t
*config
)
429 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
430 &child
, &children
) != 0)
431 return (read_efi_label(config
, NULL
) >= 0);
433 for (c
= 0; c
< children
; c
++) {
434 if (pool_uses_efi(child
[c
]))
442 zpool_is_bootable(zpool_handle_t
*zhp
)
444 char bootfs
[ZFS_MAX_DATASET_NAME_LEN
];
446 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
447 sizeof (bootfs
), NULL
, B_FALSE
) == 0 && strncmp(bootfs
, "-",
448 sizeof (bootfs
)) != 0);
453 * Given an nvlist of zpool properties to be set, validate that they are
454 * correct, and parse any numeric properties (index, boolean, etc) if they are
455 * specified as strings.
458 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
459 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
467 struct stat64 statbuf
;
471 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
472 (void) no_memory(hdl
);
477 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
478 const char *propname
= nvpair_name(elem
);
480 prop
= zpool_name_to_prop(propname
);
481 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
483 char *fname
= strchr(propname
, '@') + 1;
485 err
= zfeature_lookup_name(fname
, NULL
);
487 ASSERT3U(err
, ==, ENOENT
);
488 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
489 "invalid feature '%s'"), fname
);
490 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
494 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
495 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
496 "'%s' must be a string"), propname
);
497 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
501 (void) nvpair_value_string(elem
, &strval
);
502 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
503 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
504 "property '%s' can only be set to "
505 "'enabled'"), propname
);
506 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
510 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
511 (void) no_memory(hdl
);
518 * Make sure this property is valid and applies to this type.
520 if (prop
== ZPROP_INVAL
) {
521 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
522 "invalid property '%s'"), propname
);
523 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
527 if (zpool_prop_readonly(prop
)) {
528 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
529 "is readonly"), propname
);
530 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
534 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
535 &strval
, &intval
, errbuf
) != 0)
539 * Perform additional checking for specific properties.
544 case ZPOOL_PROP_VERSION
:
545 if (intval
< version
||
546 !SPA_VERSION_IS_SUPPORTED(intval
)) {
547 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
548 "property '%s' number %d is invalid."),
550 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
555 case ZPOOL_PROP_ASHIFT
:
557 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
558 "property '%s' can only be set at "
559 "creation time"), propname
);
560 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
564 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
565 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
566 "property '%s' number %d is invalid."),
568 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
573 case ZPOOL_PROP_BOOTFS
:
574 if (flags
.create
|| flags
.import
) {
575 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
576 "property '%s' cannot be set at creation "
577 "or import time"), propname
);
578 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
582 if (version
< SPA_VERSION_BOOTFS
) {
583 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
584 "pool must be upgraded to support "
585 "'%s' property"), propname
);
586 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
591 * bootfs property value has to be a dataset name and
592 * the dataset has to be in the same pool as it sets to.
594 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
596 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
597 "is an invalid name"), strval
);
598 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
602 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
603 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
604 "could not open pool '%s'"), poolname
);
605 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
608 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
609 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
611 #if defined(__sun__) || defined(__sun)
613 * bootfs property cannot be set on a disk which has
616 if (pool_uses_efi(nvroot
)) {
617 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
618 "property '%s' not supported on "
619 "EFI labeled devices"), propname
);
620 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
628 case ZPOOL_PROP_ALTROOT
:
629 if (!flags
.create
&& !flags
.import
) {
630 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
631 "property '%s' can only be set during pool "
632 "creation or import"), propname
);
633 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
637 if (strval
[0] != '/') {
638 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
639 "bad alternate root '%s'"), strval
);
640 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
645 case ZPOOL_PROP_CACHEFILE
:
646 if (strval
[0] == '\0')
649 if (strcmp(strval
, "none") == 0)
652 if (strval
[0] != '/') {
653 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
654 "property '%s' must be empty, an "
655 "absolute path, or 'none'"), propname
);
656 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
660 slash
= strrchr(strval
, '/');
662 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
663 strcmp(slash
, "/..") == 0) {
664 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
665 "'%s' is not a valid file"), strval
);
666 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
672 if (strval
[0] != '\0' &&
673 (stat64(strval
, &statbuf
) != 0 ||
674 !S_ISDIR(statbuf
.st_mode
))) {
675 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
676 "'%s' is not a valid directory"),
678 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
685 case ZPOOL_PROP_COMMENT
:
686 for (check
= strval
; *check
!= '\0'; check
++) {
687 if (!isprint(*check
)) {
689 dgettext(TEXT_DOMAIN
,
690 "comment may only have printable "
692 (void) zfs_error(hdl
, EZFS_BADPROP
,
697 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
698 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
699 "comment must not exceed %d characters"),
701 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
705 case ZPOOL_PROP_READONLY
:
707 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
708 "property '%s' can only be set at "
709 "import time"), propname
);
710 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
714 case ZPOOL_PROP_TNAME
:
716 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
717 "property '%s' can only be set at "
718 "creation time"), propname
);
719 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
728 nvlist_free(retprops
);
733 * Set zpool property : propname=propval.
736 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
738 zfs_cmd_t zc
= {"\0"};
741 nvlist_t
*nvl
= NULL
;
744 prop_flags_t flags
= { 0 };
746 (void) snprintf(errbuf
, sizeof (errbuf
),
747 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
750 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
751 return (no_memory(zhp
->zpool_hdl
));
753 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
755 return (no_memory(zhp
->zpool_hdl
));
758 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
759 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
760 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
769 * Execute the corresponding ioctl() to set this property.
771 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
773 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
778 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
780 zcmd_free_nvlists(&zc
);
784 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
786 (void) zpool_props_refresh(zhp
);
792 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
794 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
796 char buf
[ZFS_MAXPROPLEN
];
797 nvlist_t
*features
= NULL
;
800 boolean_t firstexpand
= (NULL
== *plp
);
803 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
807 while (*last
!= NULL
)
808 last
= &(*last
)->pl_next
;
811 features
= zpool_get_features(zhp
);
813 if ((*plp
)->pl_all
&& firstexpand
) {
814 for (i
= 0; i
< SPA_FEATURES
; i
++) {
815 zprop_list_t
*entry
= zfs_alloc(hdl
,
816 sizeof (zprop_list_t
));
817 entry
->pl_prop
= ZPROP_INVAL
;
818 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
819 spa_feature_table
[i
].fi_uname
);
820 entry
->pl_width
= strlen(entry
->pl_user_prop
);
821 entry
->pl_all
= B_TRUE
;
824 last
= &entry
->pl_next
;
828 /* add any unsupported features */
829 for (nvp
= nvlist_next_nvpair(features
, NULL
);
830 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
835 if (zfeature_is_supported(nvpair_name(nvp
)))
838 propname
= zfs_asprintf(hdl
, "unsupported@%s",
842 * Before adding the property to the list make sure that no
843 * other pool already added the same property.
847 while (entry
!= NULL
) {
848 if (entry
->pl_user_prop
!= NULL
&&
849 strcmp(propname
, entry
->pl_user_prop
) == 0) {
853 entry
= entry
->pl_next
;
860 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
861 entry
->pl_prop
= ZPROP_INVAL
;
862 entry
->pl_user_prop
= propname
;
863 entry
->pl_width
= strlen(entry
->pl_user_prop
);
864 entry
->pl_all
= B_TRUE
;
867 last
= &entry
->pl_next
;
870 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
875 if (entry
->pl_prop
!= ZPROP_INVAL
&&
876 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
877 NULL
, B_FALSE
) == 0) {
878 if (strlen(buf
) > entry
->pl_width
)
879 entry
->pl_width
= strlen(buf
);
887 * Get the state for the given feature on the given ZFS pool.
890 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
894 boolean_t found
= B_FALSE
;
895 nvlist_t
*features
= zpool_get_features(zhp
);
897 const char *feature
= strchr(propname
, '@') + 1;
899 supported
= zpool_prop_feature(propname
);
900 ASSERT(supported
|| zpool_prop_unsupported(propname
));
903 * Convert from feature name to feature guid. This conversion is
904 * unecessary for unsupported@... properties because they already
911 ret
= zfeature_lookup_name(feature
, &fid
);
913 (void) strlcpy(buf
, "-", len
);
916 feature
= spa_feature_table
[fid
].fi_guid
;
919 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
924 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
927 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
929 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
934 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
936 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
939 (void) strlcpy(buf
, "-", len
);
948 * Don't start the slice at the default block of 34; many storage
949 * devices will use a stripe width of 128k, other vendors prefer a 1m
950 * alignment. It is best to play it safe and ensure a 1m alignment
951 * given 512B blocks. When the block size is larger by a power of 2
952 * we will still be 1m aligned. Some devices are sensitive to the
953 * partition ending alignment as well.
955 #define NEW_START_BLOCK 2048
956 #define PARTITION_END_ALIGNMENT 2048
959 * Validate the given pool name, optionally putting an extended error message in
963 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
969 ret
= pool_namecheck(pool
, &why
, &what
);
972 * The rules for reserved pool names were extended at a later point.
973 * But we need to support users with existing pools that may now be
974 * invalid. So we only check for this expanded set of names during a
975 * create (or import), and only in userland.
977 if (ret
== 0 && !isopen
&&
978 (strncmp(pool
, "mirror", 6) == 0 ||
979 strncmp(pool
, "raidz", 5) == 0 ||
980 strncmp(pool
, "spare", 5) == 0 ||
981 strcmp(pool
, "log") == 0)) {
984 dgettext(TEXT_DOMAIN
, "name is reserved"));
992 case NAME_ERR_TOOLONG
:
994 dgettext(TEXT_DOMAIN
, "name is too long"));
997 case NAME_ERR_INVALCHAR
:
999 dgettext(TEXT_DOMAIN
, "invalid character "
1000 "'%c' in pool name"), what
);
1003 case NAME_ERR_NOLETTER
:
1004 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1005 "name must begin with a letter"));
1008 case NAME_ERR_RESERVED
:
1009 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1010 "name is reserved"));
1013 case NAME_ERR_DISKLIKE
:
1014 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1015 "pool name is reserved"));
1018 case NAME_ERR_LEADING_SLASH
:
1019 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1020 "leading slash in name"));
1023 case NAME_ERR_EMPTY_COMPONENT
:
1024 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1025 "empty component in name"));
1028 case NAME_ERR_TRAILING_SLASH
:
1029 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1030 "trailing slash in name"));
1033 case NAME_ERR_MULTIPLE_AT
:
1034 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1035 "multiple '@' delimiters in name"));
1037 case NAME_ERR_NO_AT
:
1038 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1039 "permission set is missing '@'"));
1050 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1054 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1056 zpool_handle_t
*zhp
;
1060 * Make sure the pool name is valid.
1062 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1063 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1064 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1069 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1072 zhp
->zpool_hdl
= hdl
;
1073 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1075 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1081 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1082 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1083 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1092 * Like the above, but silent on error. Used when iterating over pools (because
1093 * the configuration cache may be out of date).
1096 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1098 zpool_handle_t
*zhp
;
1101 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1104 zhp
->zpool_hdl
= hdl
;
1105 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1107 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1123 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1127 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1129 zpool_handle_t
*zhp
;
1131 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1134 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1135 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1136 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1145 * Close the handle. Simply frees the memory associated with the handle.
1148 zpool_close(zpool_handle_t
*zhp
)
1150 nvlist_free(zhp
->zpool_config
);
1151 nvlist_free(zhp
->zpool_old_config
);
1152 nvlist_free(zhp
->zpool_props
);
1157 * Return the name of the pool.
1160 zpool_get_name(zpool_handle_t
*zhp
)
1162 return (zhp
->zpool_name
);
1167 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1170 zpool_get_state(zpool_handle_t
*zhp
)
1172 return (zhp
->zpool_state
);
1176 * Create the named pool, using the provided vdev list. It is assumed
1177 * that the consumer has already validated the contents of the nvlist, so we
1178 * don't have to worry about error semantics.
1181 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1182 nvlist_t
*props
, nvlist_t
*fsprops
)
1184 zfs_cmd_t zc
= {"\0"};
1185 nvlist_t
*zc_fsprops
= NULL
;
1186 nvlist_t
*zc_props
= NULL
;
1190 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1191 "cannot create '%s'"), pool
);
1193 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1194 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1196 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1200 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1202 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1203 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1212 zoned
= ((nvlist_lookup_string(fsprops
,
1213 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1214 strcmp(zonestr
, "on") == 0);
1216 if ((zc_fsprops
= zfs_valid_proplist(hdl
, ZFS_TYPE_FILESYSTEM
,
1217 fsprops
, zoned
, NULL
, NULL
, msg
)) == NULL
) {
1221 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1224 if (nvlist_add_nvlist(zc_props
,
1225 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1230 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1233 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1235 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1237 zcmd_free_nvlists(&zc
);
1238 nvlist_free(zc_props
);
1239 nvlist_free(zc_fsprops
);
1244 * This can happen if the user has specified the same
1245 * device multiple times. We can't reliably detect this
1246 * until we try to add it and see we already have a
1247 * label. This can also happen under if the device is
1248 * part of an active md or lvm device.
1250 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1251 "one or more vdevs refer to the same device, or "
1252 "one of\nthe devices is part of an active md or "
1254 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1258 * This happens if the record size is smaller or larger
1259 * than the allowed size range, or not a power of 2.
1261 * NOTE: although zfs_valid_proplist is called earlier,
1262 * this case may have slipped through since the
1263 * pool does not exist yet and it is therefore
1264 * impossible to read properties e.g. max blocksize
1267 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1268 "record size invalid"));
1269 return (zfs_error(hdl
, EZFS_BADPROP
, msg
));
1273 * This occurs when one of the devices is below
1274 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1275 * device was the problem device since there's no
1276 * reliable way to determine device size from userland.
1281 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1283 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1284 "one or more devices is less than the "
1285 "minimum size (%s)"), buf
);
1287 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1290 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1291 "one or more devices is out of space"));
1292 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1295 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1296 "cache device must be a disk or disk slice"));
1297 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1300 return (zpool_standard_error(hdl
, errno
, msg
));
1305 zcmd_free_nvlists(&zc
);
1306 nvlist_free(zc_props
);
1307 nvlist_free(zc_fsprops
);
1312 * Destroy the given pool. It is up to the caller to ensure that there are no
1313 * datasets left in the pool.
1316 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1318 zfs_cmd_t zc
= {"\0"};
1319 zfs_handle_t
*zfp
= NULL
;
1320 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1323 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1324 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1327 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1328 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1330 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1331 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1332 "cannot destroy '%s'"), zhp
->zpool_name
);
1334 if (errno
== EROFS
) {
1335 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1336 "one or more devices is read only"));
1337 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1339 (void) zpool_standard_error(hdl
, errno
, msg
);
1348 remove_mountpoint(zfp
);
1356 * Add the given vdevs to the pool. The caller must have already performed the
1357 * necessary verification to ensure that the vdev specification is well-formed.
1360 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1362 zfs_cmd_t zc
= {"\0"};
1364 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1366 nvlist_t
**spares
, **l2cache
;
1367 uint_t nspares
, nl2cache
;
1369 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1370 "cannot add to '%s'"), zhp
->zpool_name
);
1372 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1373 SPA_VERSION_SPARES
&&
1374 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1375 &spares
, &nspares
) == 0) {
1376 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1377 "upgraded to add hot spares"));
1378 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1381 #if defined(__sun__) || defined(__sun)
1382 if (zpool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1383 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1386 for (s
= 0; s
< nspares
; s
++) {
1389 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1390 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1391 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1392 "device '%s' contains an EFI label and "
1393 "cannot be used on root pools."),
1394 zpool_vdev_name(hdl
, NULL
, spares
[s
], 0));
1395 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1401 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1402 SPA_VERSION_L2CACHE
&&
1403 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1404 &l2cache
, &nl2cache
) == 0) {
1405 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1406 "upgraded to add cache devices"));
1407 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1410 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1412 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1414 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1418 * This can happen if the user has specified the same
1419 * device multiple times. We can't reliably detect this
1420 * until we try to add it and see we already have a
1423 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1424 "one or more vdevs refer to the same device"));
1425 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1430 * This occurrs when one of the devices is below
1431 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1432 * device was the problem device since there's no
1433 * reliable way to determine device size from userland.
1438 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1440 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1441 "device is less than the minimum "
1444 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1448 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1449 "pool must be upgraded to add these vdevs"));
1450 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1454 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1455 "cache device must be a disk or disk slice"));
1456 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1460 (void) zpool_standard_error(hdl
, errno
, msg
);
1468 zcmd_free_nvlists(&zc
);
1474 * Exports the pool from the system. The caller must ensure that there are no
1475 * mounted datasets in the pool.
1478 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1479 const char *log_str
)
1481 zfs_cmd_t zc
= {"\0"};
1484 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1485 "cannot export '%s'"), zhp
->zpool_name
);
1487 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1488 zc
.zc_cookie
= force
;
1489 zc
.zc_guid
= hardforce
;
1490 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1492 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1495 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1496 "use '-f' to override the following errors:\n"
1497 "'%s' has an active shared spare which could be"
1498 " used by other pools once '%s' is exported."),
1499 zhp
->zpool_name
, zhp
->zpool_name
);
1500 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1503 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1512 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1514 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1518 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1520 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1524 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1527 nvlist_t
*nv
= NULL
;
1533 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1536 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1537 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1541 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1543 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1545 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1546 strftime(timestr
, 128, "%c", &t
) != 0) {
1548 (void) printf(dgettext(TEXT_DOMAIN
,
1549 "Would be able to return %s "
1550 "to its state as of %s.\n"),
1553 (void) printf(dgettext(TEXT_DOMAIN
,
1554 "Pool %s returned to its state as of %s.\n"),
1558 (void) printf(dgettext(TEXT_DOMAIN
,
1559 "%s approximately %lld "),
1560 dryrun
? "Would discard" : "Discarded",
1561 ((longlong_t
)loss
+ 30) / 60);
1562 (void) printf(dgettext(TEXT_DOMAIN
,
1563 "minutes of transactions.\n"));
1564 } else if (loss
> 0) {
1565 (void) printf(dgettext(TEXT_DOMAIN
,
1566 "%s approximately %lld "),
1567 dryrun
? "Would discard" : "Discarded",
1569 (void) printf(dgettext(TEXT_DOMAIN
,
1570 "seconds of transactions.\n"));
1576 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1579 nvlist_t
*nv
= NULL
;
1581 uint64_t edata
= UINT64_MAX
;
1586 if (!hdl
->libzfs_printerr
)
1590 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1592 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1594 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1595 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1596 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1597 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1600 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1601 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1604 (void) printf(dgettext(TEXT_DOMAIN
,
1605 "Recovery is possible, but will result in some data loss.\n"));
1607 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1608 strftime(timestr
, 128, "%c", &t
) != 0) {
1609 (void) printf(dgettext(TEXT_DOMAIN
,
1610 "\tReturning the pool to its state as of %s\n"
1611 "\tshould correct the problem. "),
1614 (void) printf(dgettext(TEXT_DOMAIN
,
1615 "\tReverting the pool to an earlier state "
1616 "should correct the problem.\n\t"));
1620 (void) printf(dgettext(TEXT_DOMAIN
,
1621 "Approximately %lld minutes of data\n"
1622 "\tmust be discarded, irreversibly. "),
1623 ((longlong_t
)loss
+ 30) / 60);
1624 } else if (loss
> 0) {
1625 (void) printf(dgettext(TEXT_DOMAIN
,
1626 "Approximately %lld seconds of data\n"
1627 "\tmust be discarded, irreversibly. "),
1630 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1632 (void) printf(dgettext(TEXT_DOMAIN
,
1633 "After rewind, at least\n"
1634 "\tone persistent user-data error will remain. "));
1636 (void) printf(dgettext(TEXT_DOMAIN
,
1637 "After rewind, several\n"
1638 "\tpersistent user-data errors will remain. "));
1641 (void) printf(dgettext(TEXT_DOMAIN
,
1642 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1643 reason
>= 0 ? "clear" : "import", name
);
1645 (void) printf(dgettext(TEXT_DOMAIN
,
1646 "A scrub of the pool\n"
1647 "\tis strongly recommended after recovery.\n"));
1651 (void) printf(dgettext(TEXT_DOMAIN
,
1652 "Destroy and re-create the pool from\n\ta backup source.\n"));
1656 * zpool_import() is a contracted interface. Should be kept the same
1659 * Applications should use zpool_import_props() to import a pool with
1660 * new properties value to be set.
1663 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1666 nvlist_t
*props
= NULL
;
1669 if (altroot
!= NULL
) {
1670 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1671 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1672 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1676 if (nvlist_add_string(props
,
1677 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1678 nvlist_add_string(props
,
1679 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1681 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1682 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1687 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1694 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1700 uint64_t is_log
= 0;
1702 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1706 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1707 is_log
? " [log]" : "");
1709 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1710 &child
, &children
) != 0)
1713 for (c
= 0; c
< children
; c
++) {
1714 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], VDEV_NAME_TYPE_ID
);
1715 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1721 zpool_print_unsup_feat(nvlist_t
*config
)
1723 nvlist_t
*nvinfo
, *unsup_feat
;
1726 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1728 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1731 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1732 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1735 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1736 verify(nvpair_value_string(nvp
, &desc
) == 0);
1738 if (strlen(desc
) > 0)
1739 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1741 (void) printf("\t%s\n", nvpair_name(nvp
));
1746 * Import the given pool using the known configuration and a list of
1747 * properties to be set. The configuration should have come from
1748 * zpool_find_import(). The 'newname' parameters control whether the pool
1749 * is imported with a different name.
1752 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1753 nvlist_t
*props
, int flags
)
1755 zfs_cmd_t zc
= {"\0"};
1756 zpool_rewind_policy_t policy
;
1757 nvlist_t
*nv
= NULL
;
1758 nvlist_t
*nvinfo
= NULL
;
1759 nvlist_t
*missing
= NULL
;
1766 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1769 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1770 "cannot import pool '%s'"), origname
);
1772 if (newname
!= NULL
) {
1773 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1774 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1775 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1777 thename
= (char *)newname
;
1782 if (props
!= NULL
) {
1784 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1786 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1789 if ((props
= zpool_valid_proplist(hdl
, origname
,
1790 props
, version
, flags
, errbuf
)) == NULL
)
1792 if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1799 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1801 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1804 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1805 zcmd_free_nvlists(&zc
);
1808 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1809 zcmd_free_nvlists(&zc
);
1813 zc
.zc_cookie
= flags
;
1814 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1816 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1817 zcmd_free_nvlists(&zc
);
1824 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1826 zcmd_free_nvlists(&zc
);
1828 zpool_get_rewind_policy(config
, &policy
);
1834 * Dry-run failed, but we print out what success
1835 * looks like if we found a best txg
1837 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1838 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1844 if (newname
== NULL
)
1845 (void) snprintf(desc
, sizeof (desc
),
1846 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1849 (void) snprintf(desc
, sizeof (desc
),
1850 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1855 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1856 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1857 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1858 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1859 "pool uses the following feature(s) not "
1860 "supported by this system:\n"));
1861 zpool_print_unsup_feat(nv
);
1862 if (nvlist_exists(nvinfo
,
1863 ZPOOL_CONFIG_CAN_RDONLY
)) {
1864 (void) printf(dgettext(TEXT_DOMAIN
,
1865 "All unsupported features are only "
1866 "required for writing to the pool."
1867 "\nThe pool can be imported using "
1868 "'-o readonly=on'.\n"));
1872 * Unsupported version.
1874 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1878 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1882 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1883 "one or more devices is read only"));
1884 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1888 if (nv
&& nvlist_lookup_nvlist(nv
,
1889 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1890 nvlist_lookup_nvlist(nvinfo
,
1891 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1892 (void) printf(dgettext(TEXT_DOMAIN
,
1893 "The devices below are missing, use "
1894 "'-m' to import the pool anyway:\n"));
1895 print_vdev_tree(hdl
, NULL
, missing
, 2);
1896 (void) printf("\n");
1898 (void) zpool_standard_error(hdl
, error
, desc
);
1902 (void) zpool_standard_error(hdl
, error
, desc
);
1906 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1907 "one or more devices are already in use\n"));
1908 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1911 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1912 "new name of at least one dataset is longer than "
1913 "the maximum allowable length"));
1914 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, desc
);
1917 (void) zpool_standard_error(hdl
, error
, desc
);
1918 zpool_explain_recover(hdl
,
1919 newname
? origname
: thename
, -error
, nv
);
1926 zpool_handle_t
*zhp
;
1929 * This should never fail, but play it safe anyway.
1931 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1933 else if (zhp
!= NULL
)
1935 if (policy
.zrp_request
&
1936 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1937 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1938 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1951 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1953 zfs_cmd_t zc
= {"\0"};
1955 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1957 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1958 zc
.zc_cookie
= func
;
1960 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1961 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1964 if (func
== POOL_SCAN_SCRUB
) {
1965 (void) snprintf(msg
, sizeof (msg
),
1966 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1967 } else if (func
== POOL_SCAN_NONE
) {
1968 (void) snprintf(msg
, sizeof (msg
),
1969 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1972 assert(!"unexpected result");
1975 if (errno
== EBUSY
) {
1977 pool_scan_stat_t
*ps
= NULL
;
1980 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1981 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1982 (void) nvlist_lookup_uint64_array(nvroot
,
1983 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1984 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1985 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1987 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1988 } else if (errno
== ENOENT
) {
1989 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1991 return (zpool_standard_error(hdl
, errno
, msg
));
1996 * Find a vdev that matches the search criteria specified. We use the
1997 * the nvpair name to determine how we should look for the device.
1998 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1999 * spare; but FALSE if its an INUSE spare.
2002 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
2003 boolean_t
*l2cache
, boolean_t
*log
)
2010 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
2012 /* Nothing to look for */
2013 if (search
== NULL
|| pair
== NULL
)
2016 /* Obtain the key we will use to search */
2017 srchkey
= nvpair_name(pair
);
2019 switch (nvpair_type(pair
)) {
2020 case DATA_TYPE_UINT64
:
2021 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
2022 uint64_t srchval
, theguid
;
2024 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
2025 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2027 if (theguid
== srchval
)
2032 case DATA_TYPE_STRING
: {
2033 char *srchval
, *val
;
2035 verify(nvpair_value_string(pair
, &srchval
) == 0);
2036 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
2040 * Search for the requested value. Special cases:
2042 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2043 * "-part1", or "p1". The suffix is hidden from the user,
2044 * but included in the string, so this matches around it.
2045 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2046 * is used to check all possible expanded paths.
2047 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2049 * Otherwise, all other searches are simple string compares.
2051 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2052 uint64_t wholedisk
= 0;
2054 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2056 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2059 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2060 char *type
, *idx
, *end
, *p
;
2061 uint64_t id
, vdev_id
;
2064 * Determine our vdev type, keeping in mind
2065 * that the srchval is composed of a type and
2066 * vdev id pair (i.e. mirror-4).
2068 if ((type
= strdup(srchval
)) == NULL
)
2071 if ((p
= strrchr(type
, '-')) == NULL
) {
2079 * If the types don't match then keep looking.
2081 if (strncmp(val
, type
, strlen(val
)) != 0) {
2086 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2087 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2088 strncmp(type
, VDEV_TYPE_MIRROR
,
2089 strlen(VDEV_TYPE_MIRROR
)) == 0);
2090 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2094 vdev_id
= strtoull(idx
, &end
, 10);
2101 * Now verify that we have the correct vdev id.
2110 if (strcmp(srchval
, val
) == 0)
2119 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2120 &child
, &children
) != 0)
2123 for (c
= 0; c
< children
; c
++) {
2124 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2125 avail_spare
, l2cache
, NULL
)) != NULL
) {
2127 * The 'is_log' value is only set for the toplevel
2128 * vdev, not the leaf vdevs. So we always lookup the
2129 * log device from the root of the vdev tree (where
2130 * 'log' is non-NULL).
2133 nvlist_lookup_uint64(child
[c
],
2134 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2142 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2143 &child
, &children
) == 0) {
2144 for (c
= 0; c
< children
; c
++) {
2145 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2146 avail_spare
, l2cache
, NULL
)) != NULL
) {
2147 *avail_spare
= B_TRUE
;
2153 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2154 &child
, &children
) == 0) {
2155 for (c
= 0; c
< children
; c
++) {
2156 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2157 avail_spare
, l2cache
, NULL
)) != NULL
) {
2168 * Given a physical path (minus the "/devices" prefix), find the
2172 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2173 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2175 nvlist_t
*search
, *nvroot
, *ret
;
2177 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2178 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2180 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2183 *avail_spare
= B_FALSE
;
2187 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2188 nvlist_free(search
);
2194 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2197 zpool_vdev_is_interior(const char *name
)
2199 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2200 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2206 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2207 boolean_t
*l2cache
, boolean_t
*log
)
2210 nvlist_t
*nvroot
, *search
, *ret
;
2213 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2215 guid
= strtoull(path
, &end
, 0);
2216 if (guid
!= 0 && *end
== '\0') {
2217 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2218 } else if (zpool_vdev_is_interior(path
)) {
2219 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2221 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2224 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2227 *avail_spare
= B_FALSE
;
2231 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2232 nvlist_free(search
);
2238 vdev_online(nvlist_t
*nv
)
2242 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2243 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2244 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2251 * Helper function for zpool_get_physpaths().
2254 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2255 size_t *bytes_written
)
2257 size_t bytes_left
, pos
, rsz
;
2261 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2263 return (EZFS_NODEVICE
);
2265 pos
= *bytes_written
;
2266 bytes_left
= physpath_size
- pos
;
2267 format
= (pos
== 0) ? "%s" : " %s";
2269 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2270 *bytes_written
+= rsz
;
2272 if (rsz
>= bytes_left
) {
2273 /* if physpath was not copied properly, clear it */
2274 if (bytes_left
!= 0) {
2277 return (EZFS_NOSPC
);
2283 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2284 size_t *rsz
, boolean_t is_spare
)
2289 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2290 return (EZFS_INVALCONFIG
);
2292 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2294 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2295 * For a spare vdev, we only want to boot from the active
2300 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2303 return (EZFS_INVALCONFIG
);
2306 if (vdev_online(nv
)) {
2307 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2308 phypath_size
, rsz
)) != 0)
2311 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2312 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2313 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2318 if (nvlist_lookup_nvlist_array(nv
,
2319 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2320 return (EZFS_INVALCONFIG
);
2322 for (i
= 0; i
< count
; i
++) {
2323 ret
= vdev_get_physpaths(child
[i
], physpath
,
2324 phypath_size
, rsz
, is_spare
);
2325 if (ret
== EZFS_NOSPC
)
2330 return (EZFS_POOL_INVALARG
);
2334 * Get phys_path for a root pool config.
2335 * Return 0 on success; non-zero on failure.
2338 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2341 nvlist_t
*vdev_root
;
2348 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2350 return (EZFS_INVALCONFIG
);
2352 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2353 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2354 &child
, &count
) != 0)
2355 return (EZFS_INVALCONFIG
);
2357 #if defined(__sun__) || defined(__sun)
2359 * root pool can not have EFI labeled disks and can only have
2360 * a single top-level vdev.
2362 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2363 pool_uses_efi(vdev_root
))
2364 return (EZFS_POOL_INVALARG
);
2367 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2370 /* No online devices */
2372 return (EZFS_NODEVICE
);
2378 * Get phys_path for a root pool
2379 * Return 0 on success; non-zero on failure.
2382 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2384 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2389 * If the device has being dynamically expanded then we need to relabel
2390 * the disk to use the new unallocated space.
2393 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2397 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2398 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2399 "relabel '%s': unable to open device: %d"), path
, errno
);
2400 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2404 * It's possible that we might encounter an error if the device
2405 * does not have any unallocated space left. If so, we simply
2406 * ignore that error and continue on.
2408 * Also, we don't call efi_rescan() - that would just return EBUSY.
2409 * The module will do it for us in vdev_disk_open().
2411 error
= efi_use_whole_disk(fd
);
2413 if (error
&& error
!= VT_ENOSPC
) {
2414 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2415 "relabel '%s': unable to read disk capacity"), path
);
2416 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2422 * Bring the specified vdev online. The 'flags' parameter is a set of the
2423 * ZFS_ONLINE_* flags.
2426 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2427 vdev_state_t
*newstate
)
2429 zfs_cmd_t zc
= {"\0"};
2432 boolean_t avail_spare
, l2cache
, islog
;
2433 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2436 if (flags
& ZFS_ONLINE_EXPAND
) {
2437 (void) snprintf(msg
, sizeof (msg
),
2438 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2440 (void) snprintf(msg
, sizeof (msg
),
2441 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2444 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2445 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2447 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2449 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2452 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2454 if (flags
& ZFS_ONLINE_EXPAND
||
2455 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2456 uint64_t wholedisk
= 0;
2458 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2462 * XXX - L2ARC 1.0 devices can't support expansion.
2465 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2466 "cannot expand cache devices"));
2467 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2471 const char *fullpath
= path
;
2472 char buf
[MAXPATHLEN
];
2474 if (path
[0] != '/') {
2475 error
= zfs_resolve_shortname(path
, buf
,
2478 return (zfs_error(hdl
, EZFS_NODEVICE
,
2484 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2490 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2493 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2494 if (errno
== EINVAL
) {
2495 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2496 "from this pool into a new one. Use '%s' "
2497 "instead"), "zpool detach");
2498 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2500 return (zpool_standard_error(hdl
, errno
, msg
));
2503 *newstate
= zc
.zc_cookie
;
2508 * Take the specified vdev offline
2511 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2513 zfs_cmd_t zc
= {"\0"};
2516 boolean_t avail_spare
, l2cache
;
2517 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2519 (void) snprintf(msg
, sizeof (msg
),
2520 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2522 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2523 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2525 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2527 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2530 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2532 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2533 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2535 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2542 * There are no other replicas of this device.
2544 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2548 * The log device has unplayed logs
2550 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2553 return (zpool_standard_error(hdl
, errno
, msg
));
2558 * Mark the given vdev faulted.
2561 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2563 zfs_cmd_t zc
= {"\0"};
2565 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2567 (void) snprintf(msg
, sizeof (msg
),
2568 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2570 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2572 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2575 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2582 * There are no other replicas of this device.
2584 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2587 return (zpool_standard_error(hdl
, errno
, msg
));
2593 * Mark the given vdev degraded.
2596 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2598 zfs_cmd_t zc
= {"\0"};
2600 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2602 (void) snprintf(msg
, sizeof (msg
),
2603 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2605 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2607 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2610 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2613 return (zpool_standard_error(hdl
, errno
, msg
));
2617 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2621 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2627 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2629 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2632 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2633 children
== 2 && child
[which
] == tgt
)
2636 for (c
= 0; c
< children
; c
++)
2637 if (is_replacing_spare(child
[c
], tgt
, which
))
2645 * Attach new_disk (fully described by nvroot) to old_disk.
2646 * If 'replacing' is specified, the new disk will replace the old one.
2649 zpool_vdev_attach(zpool_handle_t
*zhp
,
2650 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2652 zfs_cmd_t zc
= {"\0"};
2656 boolean_t avail_spare
, l2cache
, islog
;
2661 nvlist_t
*config_root
;
2662 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2663 boolean_t rootpool
= zpool_is_bootable(zhp
);
2666 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2667 "cannot replace %s with %s"), old_disk
, new_disk
);
2669 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2670 "cannot attach %s to %s"), new_disk
, old_disk
);
2672 #if defined(__sun__) || defined(__sun)
2674 * If this is a root pool, make sure that we're not attaching an
2675 * EFI labeled device.
2677 if (rootpool
&& pool_uses_efi(nvroot
)) {
2678 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2679 "EFI labeled devices are not supported on root pools."));
2680 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2684 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2685 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2687 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2690 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2693 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2695 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2696 zc
.zc_cookie
= replacing
;
2698 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2699 &child
, &children
) != 0 || children
!= 1) {
2700 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2701 "new device must be a single disk"));
2702 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2705 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2706 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2708 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], 0)) == NULL
)
2712 * If the target is a hot spare that has been swapped in, we can only
2713 * replace it with another hot spare.
2716 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2717 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2718 NULL
) == NULL
|| !avail_spare
) &&
2719 is_replacing_spare(config_root
, tgt
, 1)) {
2720 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2721 "can only be replaced by another hot spare"));
2723 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2728 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2731 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2733 zcmd_free_nvlists(&zc
);
2738 * XXX need a better way to prevent user from
2739 * booting up a half-baked vdev.
2741 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2742 "sure to wait until resilver is done "
2743 "before rebooting.\n"));
2751 * Can't attach to or replace this type of vdev.
2754 uint64_t version
= zpool_get_prop_int(zhp
,
2755 ZPOOL_PROP_VERSION
, NULL
);
2758 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2759 "cannot replace a log with a spare"));
2760 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2761 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2762 "already in replacing/spare config; wait "
2763 "for completion or use 'zpool detach'"));
2765 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2766 "cannot replace a replacing device"));
2768 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2769 "can only attach to mirrors and top-level "
2772 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2777 * The new device must be a single disk.
2779 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2780 "new device must be a single disk"));
2781 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2785 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2787 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2792 * The new device is too small.
2794 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2795 "device is too small"));
2796 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2801 * The new device has a different optimal sector size.
2803 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2804 "new device has a different optimal sector size; use the "
2805 "option '-o ashift=N' to override the optimal size"));
2806 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2811 * The resulting top-level vdev spec won't fit in the label.
2813 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2817 (void) zpool_standard_error(hdl
, errno
, msg
);
2824 * Detach the specified device.
2827 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2829 zfs_cmd_t zc
= {"\0"};
2832 boolean_t avail_spare
, l2cache
;
2833 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2835 (void) snprintf(msg
, sizeof (msg
),
2836 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2838 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2839 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2841 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2844 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2847 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2849 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2851 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2858 * Can't detach from this type of vdev.
2860 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2861 "applicable to mirror and replacing vdevs"));
2862 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2867 * There are no other replicas of this device.
2869 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2873 (void) zpool_standard_error(hdl
, errno
, msg
);
2880 * Find a mirror vdev in the source nvlist.
2882 * The mchild array contains a list of disks in one of the top-level mirrors
2883 * of the source pool. The schild array contains a list of disks that the
2884 * user specified on the command line. We loop over the mchild array to
2885 * see if any entry in the schild array matches.
2887 * If a disk in the mchild array is found in the schild array, we return
2888 * the index of that entry. Otherwise we return -1.
2891 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2892 nvlist_t
**schild
, uint_t schildren
)
2896 for (mc
= 0; mc
< mchildren
; mc
++) {
2898 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2901 for (sc
= 0; sc
< schildren
; sc
++) {
2902 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2904 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2920 * Split a mirror pool. If newroot points to null, then a new nvlist
2921 * is generated and it is the responsibility of the caller to free it.
2924 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2925 nvlist_t
*props
, splitflags_t flags
)
2927 zfs_cmd_t zc
= {"\0"};
2929 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2930 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2931 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2932 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2934 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2937 (void) snprintf(msg
, sizeof (msg
),
2938 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2940 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2941 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2943 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2944 (void) fprintf(stderr
, gettext("Internal error: unable to "
2945 "retrieve pool configuration\n"));
2949 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2951 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2954 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2955 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2956 props
, vers
, flags
, msg
)) == NULL
)
2960 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2962 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2963 "Source pool is missing vdev tree"));
2964 nvlist_free(zc_props
);
2968 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2971 if (*newroot
== NULL
||
2972 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2973 &newchild
, &newchildren
) != 0)
2976 for (c
= 0; c
< children
; c
++) {
2977 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2979 nvlist_t
**mchild
, *vdev
;
2984 * Unlike cache & spares, slogs are stored in the
2985 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2987 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2989 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2991 if (is_log
|| is_hole
) {
2993 * Create a hole vdev and put it in the config.
2995 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2997 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2998 VDEV_TYPE_HOLE
) != 0)
3000 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
3005 varray
[vcount
++] = vdev
;
3009 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
3011 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
3012 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3013 "Source pool must be composed only of mirrors\n"));
3014 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3018 verify(nvlist_lookup_nvlist_array(child
[c
],
3019 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
3021 /* find or add an entry for this top-level vdev */
3022 if (newchildren
> 0 &&
3023 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
3024 newchild
, newchildren
)) >= 0) {
3025 /* We found a disk that the user specified. */
3026 vdev
= mchild
[entry
];
3029 /* User didn't specify a disk for this vdev. */
3030 vdev
= mchild
[mchildren
- 1];
3033 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
3037 /* did we find every disk the user specified? */
3038 if (found
!= newchildren
) {
3039 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
3040 "include at most one disk from each mirror"));
3041 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3045 /* Prepare the nvlist for populating. */
3046 if (*newroot
== NULL
) {
3047 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3050 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3051 VDEV_TYPE_ROOT
) != 0)
3054 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3057 /* Add all the children we found */
3058 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3059 lastlog
== 0 ? vcount
: lastlog
) != 0)
3063 * If we're just doing a dry run, exit now with success.
3066 memory_err
= B_FALSE
;
3071 /* now build up the config list & call the ioctl */
3072 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3075 if (nvlist_add_nvlist(newconfig
,
3076 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3077 nvlist_add_string(newconfig
,
3078 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3079 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3083 * The new pool is automatically part of the namespace unless we
3084 * explicitly export it.
3087 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3088 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3089 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3090 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3092 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3095 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3096 retval
= zpool_standard_error(hdl
, errno
, msg
);
3101 memory_err
= B_FALSE
;
3104 if (varray
!= NULL
) {
3107 for (v
= 0; v
< vcount
; v
++)
3108 nvlist_free(varray
[v
]);
3111 zcmd_free_nvlists(&zc
);
3112 nvlist_free(zc_props
);
3113 nvlist_free(newconfig
);
3115 nvlist_free(*newroot
);
3123 return (no_memory(hdl
));
3129 * Remove the given device. Currently, this is supported only for hot spares
3130 * and level 2 cache devices.
3133 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3135 zfs_cmd_t zc
= {"\0"};
3138 boolean_t avail_spare
, l2cache
, islog
;
3139 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3142 (void) snprintf(msg
, sizeof (msg
),
3143 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3145 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3146 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3148 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3150 * XXX - this should just go away.
3152 if (!avail_spare
&& !l2cache
&& !islog
) {
3153 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3154 "only inactive hot spares, cache, top-level, "
3155 "or log devices can be removed"));
3156 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3159 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3160 if (islog
&& version
< SPA_VERSION_HOLES
) {
3161 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3162 "pool must be upgrade to support log removal"));
3163 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3166 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3168 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3171 return (zpool_standard_error(hdl
, errno
, msg
));
3175 * Clear the errors for the pool, or the particular device if specified.
3178 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3180 zfs_cmd_t zc
= {"\0"};
3183 zpool_rewind_policy_t policy
;
3184 boolean_t avail_spare
, l2cache
;
3185 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3186 nvlist_t
*nvi
= NULL
;
3190 (void) snprintf(msg
, sizeof (msg
),
3191 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3194 (void) snprintf(msg
, sizeof (msg
),
3195 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3198 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3200 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3201 &l2cache
, NULL
)) == 0)
3202 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3205 * Don't allow error clearing for hot spares. Do allow
3206 * error clearing for l2cache devices.
3209 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3211 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3215 zpool_get_rewind_policy(rewindnvl
, &policy
);
3216 zc
.zc_cookie
= policy
.zrp_request
;
3218 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3221 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3224 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3226 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3227 zcmd_free_nvlists(&zc
);
3232 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3233 errno
!= EPERM
&& errno
!= EACCES
)) {
3234 if (policy
.zrp_request
&
3235 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3236 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3237 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3238 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3242 zcmd_free_nvlists(&zc
);
3246 zcmd_free_nvlists(&zc
);
3247 return (zpool_standard_error(hdl
, errno
, msg
));
3251 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3254 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3256 zfs_cmd_t zc
= {"\0"};
3258 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3260 (void) snprintf(msg
, sizeof (msg
),
3261 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3262 (u_longlong_t
)guid
);
3264 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3266 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3268 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3271 return (zpool_standard_error(hdl
, errno
, msg
));
3275 * Change the GUID for a pool.
3278 zpool_reguid(zpool_handle_t
*zhp
)
3281 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3282 zfs_cmd_t zc
= {"\0"};
3284 (void) snprintf(msg
, sizeof (msg
),
3285 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3287 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3288 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3291 return (zpool_standard_error(hdl
, errno
, msg
));
3298 zpool_reopen(zpool_handle_t
*zhp
)
3300 zfs_cmd_t zc
= {"\0"};
3302 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3304 (void) snprintf(msg
, sizeof (msg
),
3305 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3308 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3309 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3311 return (zpool_standard_error(hdl
, errno
, msg
));
3314 #if defined(__sun__) || defined(__sun)
3316 * Convert from a devid string to a path.
3319 devid_to_path(char *devid_str
)
3324 devid_nmlist_t
*list
= NULL
;
3327 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3330 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3332 devid_str_free(minor
);
3339 * In a case the strdup() fails, we will just return NULL below.
3341 path
= strdup(list
[0].devname
);
3343 devid_free_nmlist(list
);
3349 * Convert from a path to a devid string.
3352 path_to_devid(const char *path
)
3358 if ((fd
= open(path
, O_RDONLY
)) < 0)
3363 if (devid_get(fd
, &devid
) == 0) {
3364 if (devid_get_minor_name(fd
, &minor
) == 0)
3365 ret
= devid_str_encode(devid
, minor
);
3367 devid_str_free(minor
);
3376 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3377 * ignore any failure here, since a common case is for an unprivileged user to
3378 * type 'zpool status', and we'll display the correct information anyway.
3381 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3383 zfs_cmd_t zc
= {"\0"};
3385 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3386 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3387 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3390 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3395 * Remove partition suffix from a vdev path. Partition suffixes may take three
3396 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3397 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3398 * third case only occurs when preceded by a string matching the regular
3399 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3401 * caller must free the returned string
3404 zfs_strip_partition(libzfs_handle_t
*hdl
, char *path
)
3406 char *tmp
= zfs_strdup(hdl
, path
);
3407 char *part
= NULL
, *d
= NULL
;
3409 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3411 } else if ((part
= strrchr(tmp
, 'p')) &&
3412 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3414 } else if ((tmp
[0] == 'h' || tmp
[0] == 's' || tmp
[0] == 'v') &&
3416 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
);
3417 } else if (strncmp("xvd", tmp
, 3) == 0) {
3418 for (d
= &tmp
[3]; isalpha(*d
); part
= ++d
);
3420 if (part
&& d
&& *d
!= '\0') {
3421 for (; isdigit(*d
); d
++);
3428 #define PATH_BUF_LEN 64
3431 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3432 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3433 * We also check if this is a whole disk, in which case we strip off the
3434 * trailing 's0' slice name.
3436 * This routine is also responsible for identifying when disks have been
3437 * reconfigured in a new location. The kernel will have opened the device by
3438 * devid, but the path will still refer to the old location. To catch this, we
3439 * first do a path -> devid translation (which is fast for the common case). If
3440 * the devid matches, we're done. If not, we do a reverse devid -> path
3441 * translation and issue the appropriate ioctl() to update the path of the vdev.
3442 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3446 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3449 char *path
, *type
, *env
;
3451 char buf
[PATH_BUF_LEN
];
3452 char tmpbuf
[PATH_BUF_LEN
];
3454 env
= getenv("ZPOOL_VDEV_NAME_PATH");
3455 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3456 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3457 name_flags
|= VDEV_NAME_PATH
;
3459 env
= getenv("ZPOOL_VDEV_NAME_GUID");
3460 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3461 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3462 name_flags
|= VDEV_NAME_GUID
;
3464 env
= getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3465 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3466 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3467 name_flags
|= VDEV_NAME_FOLLOW_LINKS
;
3469 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &value
) == 0 ||
3470 name_flags
& VDEV_NAME_GUID
) {
3471 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
);
3472 (void) snprintf(buf
, sizeof (buf
), "%llu", (u_longlong_t
)value
);
3474 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3475 #if defined(__sun__) || defined(__sun)
3477 * Live VDEV path updates to a kernel VDEV during a
3478 * zpool_vdev_name lookup are not supported on Linux.
3485 * If the device is dead (faulted, offline, etc) then don't
3486 * bother opening it. Otherwise we may be forcing the user to
3487 * open a misbehaving device, which can have undesirable
3490 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3491 (uint64_t **)&vs
, &vsc
) != 0 ||
3492 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3494 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3496 * Determine if the current path is correct.
3498 char *newdevid
= path_to_devid(path
);
3500 if (newdevid
== NULL
||
3501 strcmp(devid
, newdevid
) != 0) {
3504 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3506 * Update the path appropriately.
3508 set_path(zhp
, nv
, newpath
);
3509 if (nvlist_add_string(nv
,
3510 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3511 verify(nvlist_lookup_string(nv
,
3519 devid_str_free(newdevid
);
3523 if (name_flags
& VDEV_NAME_FOLLOW_LINKS
) {
3524 char *rp
= realpath(path
, NULL
);
3526 strlcpy(buf
, rp
, sizeof (buf
));
3533 * For a block device only use the name.
3535 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3536 if ((strcmp(type
, VDEV_TYPE_DISK
) == 0) &&
3537 !(name_flags
& VDEV_NAME_PATH
)) {
3538 path
= strrchr(path
, '/');
3543 * Remove the partition from the path it this is a whole disk.
3545 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &value
)
3546 == 0 && value
&& !(name_flags
& VDEV_NAME_PATH
)) {
3547 return (zfs_strip_partition(hdl
, path
));
3550 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3553 * If it's a raidz device, we need to stick in the parity level.
3555 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3556 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3558 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3559 (u_longlong_t
)value
);
3564 * We identify each top-level vdev by using a <type-id>
3565 * naming convention.
3567 if (name_flags
& VDEV_NAME_TYPE_ID
) {
3569 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3571 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3572 path
, (u_longlong_t
)id
);
3577 return (zfs_strdup(hdl
, path
));
3581 zbookmark_mem_compare(const void *a
, const void *b
)
3583 return (memcmp(a
, b
, sizeof (zbookmark_phys_t
)));
3587 * Retrieve the persistent error log, uniquify the members, and return to the
3591 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3593 zfs_cmd_t zc
= {"\0"};
3595 zbookmark_phys_t
*zb
= NULL
;
3599 * Retrieve the raw error list from the kernel. If the number of errors
3600 * has increased, allocate more space and continue until we get the
3603 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3607 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3608 count
* sizeof (zbookmark_phys_t
))) == (uintptr_t)NULL
)
3610 zc
.zc_nvlist_dst_size
= count
;
3611 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3613 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3615 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3616 if (errno
== ENOMEM
) {
3619 count
= zc
.zc_nvlist_dst_size
;
3620 dst
= zfs_alloc(zhp
->zpool_hdl
, count
*
3621 sizeof (zbookmark_phys_t
));
3624 zc
.zc_nvlist_dst
= (uintptr_t)dst
;
3634 * Sort the resulting bookmarks. This is a little confusing due to the
3635 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3636 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3637 * _not_ copied as part of the process. So we point the start of our
3638 * array appropriate and decrement the total number of elements.
3640 zb
= ((zbookmark_phys_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3641 zc
.zc_nvlist_dst_size
;
3642 count
-= zc
.zc_nvlist_dst_size
;
3644 qsort(zb
, count
, sizeof (zbookmark_phys_t
), zbookmark_mem_compare
);
3646 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3649 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3651 for (i
= 0; i
< count
; i
++) {
3654 /* ignoring zb_blkid and zb_level for now */
3655 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3656 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3659 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3661 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3662 zb
[i
].zb_objset
) != 0) {
3666 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3667 zb
[i
].zb_object
) != 0) {
3671 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3678 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3682 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3683 return (no_memory(zhp
->zpool_hdl
));
3687 * Upgrade a ZFS pool to the latest on-disk version.
3690 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3692 zfs_cmd_t zc
= {"\0"};
3693 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3695 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3696 zc
.zc_cookie
= new_version
;
3698 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3699 return (zpool_standard_error_fmt(hdl
, errno
,
3700 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3706 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3710 (void) strlcpy(string
, basename(argv
[0]), len
);
3711 for (i
= 1; i
< argc
; i
++) {
3712 (void) strlcat(string
, " ", len
);
3713 (void) strlcat(string
, argv
[i
], len
);
3718 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3720 zfs_cmd_t zc
= {"\0"};
3724 args
= fnvlist_alloc();
3725 fnvlist_add_string(args
, "message", message
);
3726 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3728 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3730 zcmd_free_nvlists(&zc
);
3735 * Perform ioctl to get some command history of a pool.
3737 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3738 * logical offset of the history buffer to start reading from.
3740 * Upon return, 'off' is the next logical offset to read from and
3741 * 'len' is the actual amount of bytes read into 'buf'.
3744 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3746 zfs_cmd_t zc
= {"\0"};
3747 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3749 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3751 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3752 zc
.zc_history_len
= *len
;
3753 zc
.zc_history_offset
= *off
;
3755 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3758 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3759 dgettext(TEXT_DOMAIN
,
3760 "cannot show history for pool '%s'"),
3763 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3764 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3765 "'%s'"), zhp
->zpool_name
));
3767 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3768 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3769 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3771 return (zpool_standard_error_fmt(hdl
, errno
,
3772 dgettext(TEXT_DOMAIN
,
3773 "cannot get history for '%s'"), zhp
->zpool_name
));
3777 *len
= zc
.zc_history_len
;
3778 *off
= zc
.zc_history_offset
;
3784 * Process the buffer of nvlists, unpacking and storing each nvlist record
3785 * into 'records'. 'leftover' is set to the number of bytes that weren't
3786 * processed as there wasn't a complete record.
3789 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3790 nvlist_t
***records
, uint_t
*numrecords
)
3796 while (bytes_read
> sizeof (reclen
)) {
3798 /* get length of packed record (stored as little endian) */
3799 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3800 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3802 if (bytes_read
< sizeof (reclen
) + reclen
)
3806 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3808 bytes_read
-= sizeof (reclen
) + reclen
;
3809 buf
+= sizeof (reclen
) + reclen
;
3811 /* add record to nvlist array */
3813 if (ISP2(*numrecords
+ 1)) {
3814 *records
= realloc(*records
,
3815 *numrecords
* 2 * sizeof (nvlist_t
*));
3817 (*records
)[*numrecords
- 1] = nv
;
3820 *leftover
= bytes_read
;
3825 * Retrieve the command history of a pool.
3828 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3831 int buflen
= 128 * 1024;
3833 nvlist_t
**records
= NULL
;
3834 uint_t numrecords
= 0;
3837 buf
= malloc(buflen
);
3841 uint64_t bytes_read
= buflen
;
3844 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3847 /* if nothing else was read in, we're at EOF, just return */
3851 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3852 &leftover
, &records
, &numrecords
)) != 0)
3855 if (leftover
== bytes_read
) {
3857 * no progress made, because buffer is not big enough
3858 * to hold this record; resize and retry.
3862 buf
= malloc(buflen
);
3873 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3874 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3875 records
, numrecords
) == 0);
3877 for (i
= 0; i
< numrecords
; i
++)
3878 nvlist_free(records
[i
]);
3885 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3886 * If there is a new event available 'nvp' will contain a newly allocated
3887 * nvlist and 'dropped' will be set to the number of missed events since
3888 * the last call to this function. When 'nvp' is set to NULL it indicates
3889 * no new events are available. In either case the function returns 0 and
3890 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3891 * function will return a non-zero value. When the function is called in
3892 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3893 * it will not return until a new event is available.
3896 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3897 int *dropped
, unsigned flags
, int zevent_fd
)
3899 zfs_cmd_t zc
= {"\0"};
3904 zc
.zc_cleanup_fd
= zevent_fd
;
3906 if (flags
& ZEVENT_NONBLOCK
)
3907 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3909 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3913 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3916 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3917 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3920 /* Blocking error case should not occur */
3921 if (!(flags
& ZEVENT_NONBLOCK
))
3922 error
= zpool_standard_error_fmt(hdl
, errno
,
3923 dgettext(TEXT_DOMAIN
, "cannot get event"));
3927 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3928 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3929 dgettext(TEXT_DOMAIN
, "cannot get event"));
3935 error
= zpool_standard_error_fmt(hdl
, errno
,
3936 dgettext(TEXT_DOMAIN
, "cannot get event"));
3941 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3945 *dropped
= (int)zc
.zc_cookie
;
3947 zcmd_free_nvlists(&zc
);
3956 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3958 zfs_cmd_t zc
= {"\0"};
3961 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3962 "cannot clear events"));
3964 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3965 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3968 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3974 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3975 * the passed zevent_fd file handle. On success zero is returned,
3976 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3979 zpool_events_seek(libzfs_handle_t
*hdl
, uint64_t eid
, int zevent_fd
)
3981 zfs_cmd_t zc
= {"\0"};
3985 zc
.zc_cleanup_fd
= zevent_fd
;
3987 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_SEEK
, &zc
) != 0) {
3990 error
= zfs_error_fmt(hdl
, EZFS_NOENT
,
3991 dgettext(TEXT_DOMAIN
, "cannot get event"));
3995 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3996 dgettext(TEXT_DOMAIN
, "cannot get event"));
4000 error
= zpool_standard_error_fmt(hdl
, errno
,
4001 dgettext(TEXT_DOMAIN
, "cannot get event"));
4010 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
4011 char *pathname
, size_t len
)
4013 zfs_cmd_t zc
= {"\0"};
4014 boolean_t mounted
= B_FALSE
;
4015 char *mntpnt
= NULL
;
4016 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
4019 /* special case for the MOS */
4020 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>",
4025 /* get the dataset's name */
4026 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
4028 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
4029 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
4030 /* just write out a path of two object numbers */
4031 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
4032 (longlong_t
)dsobj
, (longlong_t
)obj
);
4035 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
4037 /* find out if the dataset is mounted */
4038 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
4040 /* get the corrupted object's path */
4041 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
4043 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
4046 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
4049 (void) snprintf(pathname
, len
, "%s:%s",
4050 dsname
, zc
.zc_value
);
4053 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
,
4060 * Read the EFI label from the config, if a label does not exist then
4061 * pass back the error to the caller. If the caller has passed a non-NULL
4062 * diskaddr argument then we set it to the starting address of the EFI
4066 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
4070 char diskname
[MAXPATHLEN
];
4073 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
4076 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
4077 strrchr(path
, '/'));
4078 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
4079 struct dk_gpt
*vtoc
;
4081 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
4083 *sb
= vtoc
->efi_parts
[0].p_start
;
4092 * determine where a partition starts on a disk in the current
4096 find_start_block(nvlist_t
*config
)
4100 diskaddr_t sb
= MAXOFFSET_T
;
4103 if (nvlist_lookup_nvlist_array(config
,
4104 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
4105 if (nvlist_lookup_uint64(config
,
4106 ZPOOL_CONFIG_WHOLE_DISK
,
4107 &wholedisk
) != 0 || !wholedisk
) {
4108 return (MAXOFFSET_T
);
4110 if (read_efi_label(config
, &sb
) < 0)
4115 for (c
= 0; c
< children
; c
++) {
4116 sb
= find_start_block(child
[c
]);
4117 if (sb
!= MAXOFFSET_T
) {
4121 return (MAXOFFSET_T
);
4125 zpool_label_disk_check(char *path
)
4127 struct dk_gpt
*vtoc
;
4130 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4133 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4138 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4150 * Generate a unique partition name for the ZFS member. Partitions must
4151 * have unique names to ensure udev will be able to create symlinks under
4152 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4153 * of the form <pool>-<unique-id>.
4156 zpool_label_name(char *label_name
, int label_size
)
4161 fd
= open("/dev/urandom", O_RDONLY
);
4163 if (read(fd
, &id
, sizeof (id
)) != sizeof (id
))
4170 id
= (((uint64_t)rand()) << 32) | (uint64_t)rand();
4172 snprintf(label_name
, label_size
, "zfs-%016llx", (u_longlong_t
) id
);
4176 * Label an individual disk. The name provided is the short name,
4177 * stripped of any leading /dev path.
4180 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4182 char path
[MAXPATHLEN
];
4183 struct dk_gpt
*vtoc
;
4185 size_t resv
= EFI_MIN_RESV_SIZE
;
4186 uint64_t slice_size
;
4187 diskaddr_t start_block
;
4190 /* prepare an error message just in case */
4191 (void) snprintf(errbuf
, sizeof (errbuf
),
4192 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4197 #if defined(__sun__) || defined(__sun)
4198 if (zpool_is_bootable(zhp
)) {
4199 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4200 "EFI labeled devices are not supported on root "
4202 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
4206 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4207 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4209 if (zhp
->zpool_start_block
== 0)
4210 start_block
= find_start_block(nvroot
);
4212 start_block
= zhp
->zpool_start_block
;
4213 zhp
->zpool_start_block
= start_block
;
4216 start_block
= NEW_START_BLOCK
;
4219 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4221 if ((fd
= open(path
, O_RDWR
|O_DIRECT
|O_EXCL
)) < 0) {
4223 * This shouldn't happen. We've long since verified that this
4224 * is a valid device.
4226 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4227 "label '%s': unable to open device: %d"), path
, errno
);
4228 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4231 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4233 * The only way this can fail is if we run out of memory, or we
4234 * were unable to read the disk's capacity
4236 if (errno
== ENOMEM
)
4237 (void) no_memory(hdl
);
4240 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4241 "label '%s': unable to read disk capacity"), path
);
4243 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4246 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4247 slice_size
-= EFI_MIN_RESV_SIZE
;
4248 if (start_block
== MAXOFFSET_T
)
4249 start_block
= NEW_START_BLOCK
;
4250 slice_size
-= start_block
;
4251 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4253 vtoc
->efi_parts
[0].p_start
= start_block
;
4254 vtoc
->efi_parts
[0].p_size
= slice_size
;
4257 * Why we use V_USR: V_BACKUP confuses users, and is considered
4258 * disposable by some EFI utilities (since EFI doesn't have a backup
4259 * slice). V_UNASSIGNED is supposed to be used only for zero size
4260 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4261 * etc. were all pretty specific. V_USR is as close to reality as we
4262 * can get, in the absence of V_OTHER.
4264 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4265 zpool_label_name(vtoc
->efi_parts
[0].p_name
, EFI_PART_NAME_LEN
);
4267 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4268 vtoc
->efi_parts
[8].p_size
= resv
;
4269 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4271 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4273 * Some block drivers (like pcata) may not support EFI
4274 * GPT labels. Print out a helpful error message dir-
4275 * ecting the user to manually label the disk and give
4281 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4282 "parted(8) and then provide a specific slice: %d"), rval
);
4283 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4289 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4290 (void) zfs_append_partition(path
, MAXPATHLEN
);
4292 /* Wait to udev to signal use the device has settled. */
4293 rval
= zpool_label_disk_wait(path
, DISK_LABEL_WAIT
);
4295 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4296 "detect device partitions on '%s': %d"), path
, rval
);
4297 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4300 /* We can't be to paranoid. Read the label back and verify it. */
4301 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4302 rval
= zpool_label_disk_check(path
);
4304 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4305 "EFI label on '%s' is damaged. Ensure\nthis device "
4306 "is not in in use, and is functioning properly: %d"),
4308 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));