4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
41 #include <sys/efi_partition.h>
43 #include <sys/zfs_ioctl.h>
46 #include "zfs_namecheck.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
52 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
54 typedef struct prop_flags
{
55 int create
:1; /* Validate property on creation */
56 int import
:1; /* Validate property on import */
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
66 zpool_get_all_props(zpool_handle_t
*zhp
)
68 zfs_cmd_t zc
= {"\0"};
69 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
71 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
73 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
76 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
77 if (errno
== ENOMEM
) {
78 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
79 zcmd_free_nvlists(&zc
);
83 zcmd_free_nvlists(&zc
);
88 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
89 zcmd_free_nvlists(&zc
);
93 zcmd_free_nvlists(&zc
);
99 zpool_props_refresh(zpool_handle_t
*zhp
)
103 old_props
= zhp
->zpool_props
;
105 if (zpool_get_all_props(zhp
) != 0)
108 nvlist_free(old_props
);
113 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
119 zprop_source_t source
;
121 nvl
= zhp
->zpool_props
;
122 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
123 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
125 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
127 source
= ZPROP_SRC_DEFAULT
;
128 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
139 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
143 zprop_source_t source
;
145 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
151 if ((prop
== ZPOOL_PROP_GUID
) &&
152 (nvlist_lookup_nvlist(zhp
->zpool_config
,
153 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
154 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
158 return (zpool_prop_default_numeric(prop
));
161 nvl
= zhp
->zpool_props
;
162 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
163 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
165 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
167 source
= ZPROP_SRC_DEFAULT
;
168 value
= zpool_prop_default_numeric(prop
);
178 * Map VDEV STATE to printed strings.
181 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
184 case VDEV_STATE_CLOSED
:
185 case VDEV_STATE_OFFLINE
:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED
:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN
:
190 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
191 return (gettext("FAULTED"));
192 else if (aux
== VDEV_AUX_SPLIT_POOL
)
193 return (gettext("SPLIT"));
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED
:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED
:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY
:
201 return (gettext("ONLINE"));
207 return (gettext("UNKNOWN"));
211 * Map POOL STATE to printed strings.
214 zpool_pool_state_to_name(pool_state_t state
)
219 case POOL_STATE_ACTIVE
:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED
:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED
:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE
:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE
:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED
:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL
:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE
:
234 return (gettext("POTENTIALLY_ACTIVE"));
237 return (gettext("UNKNOWN"));
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
245 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
,
246 size_t len
, zprop_source_t
*srctype
, boolean_t literal
)
250 zprop_source_t src
= ZPROP_SRC_NONE
;
255 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
257 case ZPOOL_PROP_NAME
:
258 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
261 case ZPOOL_PROP_HEALTH
:
262 (void) strlcpy(buf
, "FAULTED", len
);
265 case ZPOOL_PROP_GUID
:
266 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
267 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
270 case ZPOOL_PROP_ALTROOT
:
271 case ZPOOL_PROP_CACHEFILE
:
272 case ZPOOL_PROP_COMMENT
:
273 if (zhp
->zpool_props
!= NULL
||
274 zpool_get_all_props(zhp
) == 0) {
276 zpool_get_prop_string(zhp
, prop
, &src
),
282 (void) strlcpy(buf
, "-", len
);
291 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
292 prop
!= ZPOOL_PROP_NAME
)
295 switch (zpool_prop_get_type(prop
)) {
296 case PROP_TYPE_STRING
:
297 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
301 case PROP_TYPE_NUMBER
:
302 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
305 case ZPOOL_PROP_SIZE
:
306 case ZPOOL_PROP_ALLOCATED
:
307 case ZPOOL_PROP_FREE
:
308 case ZPOOL_PROP_FREEING
:
309 case ZPOOL_PROP_LEAKED
:
310 case ZPOOL_PROP_ASHIFT
:
312 (void) snprintf(buf
, len
, "%llu",
313 (u_longlong_t
)intval
);
315 (void) zfs_nicenum(intval
, buf
, len
);
318 case ZPOOL_PROP_EXPANDSZ
:
320 (void) strlcpy(buf
, "-", len
);
321 } else if (literal
) {
322 (void) snprintf(buf
, len
, "%llu",
323 (u_longlong_t
)intval
);
325 (void) zfs_nicenum(intval
, buf
, len
);
329 case ZPOOL_PROP_CAPACITY
:
331 (void) snprintf(buf
, len
, "%llu",
332 (u_longlong_t
)intval
);
334 (void) snprintf(buf
, len
, "%llu%%",
335 (u_longlong_t
)intval
);
339 case ZPOOL_PROP_FRAGMENTATION
:
340 if (intval
== UINT64_MAX
) {
341 (void) strlcpy(buf
, "-", len
);
342 } else if (literal
) {
343 (void) snprintf(buf
, len
, "%llu",
344 (u_longlong_t
)intval
);
346 (void) snprintf(buf
, len
, "%llu%%",
347 (u_longlong_t
)intval
);
351 case ZPOOL_PROP_DEDUPRATIO
:
353 (void) snprintf(buf
, len
, "%llu.%02llu",
354 (u_longlong_t
)(intval
/ 100),
355 (u_longlong_t
)(intval
% 100));
357 (void) snprintf(buf
, len
, "%llu.%02llux",
358 (u_longlong_t
)(intval
/ 100),
359 (u_longlong_t
)(intval
% 100));
362 case ZPOOL_PROP_HEALTH
:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
364 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot
,
366 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
369 (void) strlcpy(buf
, zpool_state_to_name(intval
,
372 case ZPOOL_PROP_VERSION
:
373 if (intval
>= SPA_VERSION_FEATURES
) {
374 (void) snprintf(buf
, len
, "-");
379 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
383 case PROP_TYPE_INDEX
:
384 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
385 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
388 (void) strlcpy(buf
, strval
, len
);
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
406 bootfs_name_valid(const char *pool
, char *bootfs
)
408 int len
= strlen(pool
);
410 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
413 if (strncmp(pool
, bootfs
, len
) == 0 &&
414 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
421 zpool_is_bootable(zpool_handle_t
*zhp
)
423 char bootfs
[ZFS_MAX_DATASET_NAME_LEN
];
425 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
426 sizeof (bootfs
), NULL
, B_FALSE
) == 0 && strncmp(bootfs
, "-",
427 sizeof (bootfs
)) != 0);
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
437 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
438 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
446 struct stat64 statbuf
;
449 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
450 (void) no_memory(hdl
);
455 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
456 const char *propname
= nvpair_name(elem
);
458 prop
= zpool_name_to_prop(propname
);
459 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
461 char *fname
= strchr(propname
, '@') + 1;
463 err
= zfeature_lookup_name(fname
, NULL
);
465 ASSERT3U(err
, ==, ENOENT
);
466 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
467 "invalid feature '%s'"), fname
);
468 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
472 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
473 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
474 "'%s' must be a string"), propname
);
475 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
479 (void) nvpair_value_string(elem
, &strval
);
480 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0 &&
481 strcmp(strval
, ZFS_FEATURE_DISABLED
) != 0) {
482 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname
);
485 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
489 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
490 (void) no_memory(hdl
);
497 * Make sure this property is valid and applies to this type.
499 if (prop
== ZPROP_INVAL
) {
500 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
501 "invalid property '%s'"), propname
);
502 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
506 if (zpool_prop_readonly(prop
)) {
507 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
508 "is readonly"), propname
);
509 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
513 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
514 &strval
, &intval
, errbuf
) != 0)
518 * Perform additional checking for specific properties.
521 case ZPOOL_PROP_VERSION
:
522 if (intval
< version
||
523 !SPA_VERSION_IS_SUPPORTED(intval
)) {
524 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
525 "property '%s' number %d is invalid."),
527 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
532 case ZPOOL_PROP_ASHIFT
:
534 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
535 "property '%s' can only be set at "
536 "creation time"), propname
);
537 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
541 if (intval
!= 0 && (intval
< 9 || intval
> 13)) {
542 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
543 "property '%s' number %d is invalid."),
545 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
550 case ZPOOL_PROP_BOOTFS
:
551 if (flags
.create
|| flags
.import
) {
552 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
553 "property '%s' cannot be set at creation "
554 "or import time"), propname
);
555 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
559 if (version
< SPA_VERSION_BOOTFS
) {
560 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
561 "pool must be upgraded to support "
562 "'%s' property"), propname
);
563 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
568 * bootfs property value has to be a dataset name and
569 * the dataset has to be in the same pool as it sets to.
571 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
573 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
574 "is an invalid name"), strval
);
575 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
579 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
580 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
581 "could not open pool '%s'"), poolname
);
582 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
588 case ZPOOL_PROP_ALTROOT
:
589 if (!flags
.create
&& !flags
.import
) {
590 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
591 "property '%s' can only be set during pool "
592 "creation or import"), propname
);
593 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
597 if (strval
[0] != '/') {
598 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
599 "bad alternate root '%s'"), strval
);
600 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
605 case ZPOOL_PROP_CACHEFILE
:
606 if (strval
[0] == '\0')
609 if (strcmp(strval
, "none") == 0)
612 if (strval
[0] != '/') {
613 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
614 "property '%s' must be empty, an "
615 "absolute path, or 'none'"), propname
);
616 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
620 slash
= strrchr(strval
, '/');
622 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
623 strcmp(slash
, "/..") == 0) {
624 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
625 "'%s' is not a valid file"), strval
);
626 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
632 if (strval
[0] != '\0' &&
633 (stat64(strval
, &statbuf
) != 0 ||
634 !S_ISDIR(statbuf
.st_mode
))) {
635 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
636 "'%s' is not a valid directory"),
638 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
645 case ZPOOL_PROP_COMMENT
:
646 for (check
= strval
; *check
!= '\0'; check
++) {
647 if (!isprint(*check
)) {
649 dgettext(TEXT_DOMAIN
,
650 "comment may only have printable "
652 (void) zfs_error(hdl
, EZFS_BADPROP
,
657 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
658 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
659 "comment must not exceed %d characters"),
661 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
665 case ZPOOL_PROP_READONLY
:
667 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
668 "property '%s' can only be set at "
669 "import time"), propname
);
670 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
674 case ZPOOL_PROP_TNAME
:
676 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
677 "property '%s' can only be set at "
678 "creation time"), propname
);
679 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
685 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
686 "property '%s'(%d) not defined"), propname
, prop
);
693 nvlist_free(retprops
);
698 * Set zpool property : propname=propval.
701 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
703 zfs_cmd_t zc
= {"\0"};
706 nvlist_t
*nvl
= NULL
;
709 prop_flags_t flags
= { 0 };
711 (void) snprintf(errbuf
, sizeof (errbuf
),
712 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
715 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
716 return (no_memory(zhp
->zpool_hdl
));
718 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
720 return (no_memory(zhp
->zpool_hdl
));
723 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
724 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
725 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
734 * Execute the corresponding ioctl() to set this property.
736 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
738 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
743 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
745 zcmd_free_nvlists(&zc
);
749 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
751 (void) zpool_props_refresh(zhp
);
757 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
759 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
761 char buf
[ZFS_MAXPROPLEN
];
762 nvlist_t
*features
= NULL
;
765 boolean_t firstexpand
= (NULL
== *plp
);
768 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
772 while (*last
!= NULL
)
773 last
= &(*last
)->pl_next
;
776 features
= zpool_get_features(zhp
);
778 if ((*plp
)->pl_all
&& firstexpand
) {
779 for (i
= 0; i
< SPA_FEATURES
; i
++) {
780 zprop_list_t
*entry
= zfs_alloc(hdl
,
781 sizeof (zprop_list_t
));
782 entry
->pl_prop
= ZPROP_INVAL
;
783 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
784 spa_feature_table
[i
].fi_uname
);
785 entry
->pl_width
= strlen(entry
->pl_user_prop
);
786 entry
->pl_all
= B_TRUE
;
789 last
= &entry
->pl_next
;
793 /* add any unsupported features */
794 for (nvp
= nvlist_next_nvpair(features
, NULL
);
795 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
800 if (zfeature_is_supported(nvpair_name(nvp
)))
803 propname
= zfs_asprintf(hdl
, "unsupported@%s",
807 * Before adding the property to the list make sure that no
808 * other pool already added the same property.
812 while (entry
!= NULL
) {
813 if (entry
->pl_user_prop
!= NULL
&&
814 strcmp(propname
, entry
->pl_user_prop
) == 0) {
818 entry
= entry
->pl_next
;
825 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
826 entry
->pl_prop
= ZPROP_INVAL
;
827 entry
->pl_user_prop
= propname
;
828 entry
->pl_width
= strlen(entry
->pl_user_prop
);
829 entry
->pl_all
= B_TRUE
;
832 last
= &entry
->pl_next
;
835 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
840 if (entry
->pl_prop
!= ZPROP_INVAL
&&
841 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
842 NULL
, B_FALSE
) == 0) {
843 if (strlen(buf
) > entry
->pl_width
)
844 entry
->pl_width
= strlen(buf
);
852 * Get the state for the given feature on the given ZFS pool.
855 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
859 boolean_t found
= B_FALSE
;
860 nvlist_t
*features
= zpool_get_features(zhp
);
862 const char *feature
= strchr(propname
, '@') + 1;
864 supported
= zpool_prop_feature(propname
);
865 ASSERT(supported
|| zpool_prop_unsupported(propname
));
868 * Convert from feature name to feature guid. This conversion is
869 * unnecessary for unsupported@... properties because they already
876 ret
= zfeature_lookup_name(feature
, &fid
);
878 (void) strlcpy(buf
, "-", len
);
881 feature
= spa_feature_table
[fid
].fi_guid
;
884 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
889 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
892 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
894 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
899 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
901 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
904 (void) strlcpy(buf
, "-", len
);
913 * Don't start the slice at the default block of 34; many storage
914 * devices will use a stripe width of 128k, other vendors prefer a 1m
915 * alignment. It is best to play it safe and ensure a 1m alignment
916 * given 512B blocks. When the block size is larger by a power of 2
917 * we will still be 1m aligned. Some devices are sensitive to the
918 * partition ending alignment as well.
920 #define NEW_START_BLOCK 2048
921 #define PARTITION_END_ALIGNMENT 2048
924 * Validate the given pool name, optionally putting an extended error message in
928 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
934 ret
= pool_namecheck(pool
, &why
, &what
);
937 * The rules for reserved pool names were extended at a later point.
938 * But we need to support users with existing pools that may now be
939 * invalid. So we only check for this expanded set of names during a
940 * create (or import), and only in userland.
942 if (ret
== 0 && !isopen
&&
943 (strncmp(pool
, "mirror", 6) == 0 ||
944 strncmp(pool
, "raidz", 5) == 0 ||
945 strncmp(pool
, "spare", 5) == 0 ||
946 strcmp(pool
, "log") == 0)) {
949 dgettext(TEXT_DOMAIN
, "name is reserved"));
957 case NAME_ERR_TOOLONG
:
959 dgettext(TEXT_DOMAIN
, "name is too long"));
962 case NAME_ERR_INVALCHAR
:
964 dgettext(TEXT_DOMAIN
, "invalid character "
965 "'%c' in pool name"), what
);
968 case NAME_ERR_NOLETTER
:
969 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
970 "name must begin with a letter"));
973 case NAME_ERR_RESERVED
:
974 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
975 "name is reserved"));
978 case NAME_ERR_DISKLIKE
:
979 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
980 "pool name is reserved"));
983 case NAME_ERR_LEADING_SLASH
:
984 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
985 "leading slash in name"));
988 case NAME_ERR_EMPTY_COMPONENT
:
989 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
990 "empty component in name"));
993 case NAME_ERR_TRAILING_SLASH
:
994 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
995 "trailing slash in name"));
998 case NAME_ERR_MULTIPLE_DELIMITERS
:
999 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1000 "multiple '@' and/or '#' delimiters in "
1004 case NAME_ERR_NO_AT
:
1005 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1006 "permission set is missing '@'"));
1010 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1011 "(%d) not defined"), why
);
1022 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1026 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1028 zpool_handle_t
*zhp
;
1032 * Make sure the pool name is valid.
1034 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1035 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1036 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1041 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1044 zhp
->zpool_hdl
= hdl
;
1045 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1047 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1053 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1054 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1055 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1064 * Like the above, but silent on error. Used when iterating over pools (because
1065 * the configuration cache may be out of date).
1068 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1070 zpool_handle_t
*zhp
;
1073 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1076 zhp
->zpool_hdl
= hdl
;
1077 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1079 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1095 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1099 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1101 zpool_handle_t
*zhp
;
1103 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1106 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1107 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1108 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1117 * Close the handle. Simply frees the memory associated with the handle.
1120 zpool_close(zpool_handle_t
*zhp
)
1122 nvlist_free(zhp
->zpool_config
);
1123 nvlist_free(zhp
->zpool_old_config
);
1124 nvlist_free(zhp
->zpool_props
);
1129 * Return the name of the pool.
1132 zpool_get_name(zpool_handle_t
*zhp
)
1134 return (zhp
->zpool_name
);
1139 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1142 zpool_get_state(zpool_handle_t
*zhp
)
1144 return (zhp
->zpool_state
);
1148 * Create the named pool, using the provided vdev list. It is assumed
1149 * that the consumer has already validated the contents of the nvlist, so we
1150 * don't have to worry about error semantics.
1153 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1154 nvlist_t
*props
, nvlist_t
*fsprops
)
1156 zfs_cmd_t zc
= {"\0"};
1157 nvlist_t
*zc_fsprops
= NULL
;
1158 nvlist_t
*zc_props
= NULL
;
1162 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1163 "cannot create '%s'"), pool
);
1165 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1166 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1168 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1172 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1174 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1175 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1184 zoned
= ((nvlist_lookup_string(fsprops
,
1185 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1186 strcmp(zonestr
, "on") == 0);
1188 if ((zc_fsprops
= zfs_valid_proplist(hdl
, ZFS_TYPE_FILESYSTEM
,
1189 fsprops
, zoned
, NULL
, NULL
, msg
)) == NULL
) {
1193 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1196 if (nvlist_add_nvlist(zc_props
,
1197 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1202 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1205 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1207 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1209 zcmd_free_nvlists(&zc
);
1210 nvlist_free(zc_props
);
1211 nvlist_free(zc_fsprops
);
1216 * This can happen if the user has specified the same
1217 * device multiple times. We can't reliably detect this
1218 * until we try to add it and see we already have a
1219 * label. This can also happen under if the device is
1220 * part of an active md or lvm device.
1222 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1223 "one or more vdevs refer to the same device, or "
1224 "one of\nthe devices is part of an active md or "
1226 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1230 * This happens if the record size is smaller or larger
1231 * than the allowed size range, or not a power of 2.
1233 * NOTE: although zfs_valid_proplist is called earlier,
1234 * this case may have slipped through since the
1235 * pool does not exist yet and it is therefore
1236 * impossible to read properties e.g. max blocksize
1239 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1240 "record size invalid"));
1241 return (zfs_error(hdl
, EZFS_BADPROP
, msg
));
1245 * This occurs when one of the devices is below
1246 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1247 * device was the problem device since there's no
1248 * reliable way to determine device size from userland.
1253 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1255 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1256 "one or more devices is less than the "
1257 "minimum size (%s)"), buf
);
1259 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1262 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1263 "one or more devices is out of space"));
1264 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1267 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1268 "cache device must be a disk or disk slice"));
1269 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1272 return (zpool_standard_error(hdl
, errno
, msg
));
1277 zcmd_free_nvlists(&zc
);
1278 nvlist_free(zc_props
);
1279 nvlist_free(zc_fsprops
);
1284 * Destroy the given pool. It is up to the caller to ensure that there are no
1285 * datasets left in the pool.
1288 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1290 zfs_cmd_t zc
= {"\0"};
1291 zfs_handle_t
*zfp
= NULL
;
1292 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1295 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1296 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1299 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1300 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1302 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1303 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1304 "cannot destroy '%s'"), zhp
->zpool_name
);
1306 if (errno
== EROFS
) {
1307 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1308 "one or more devices is read only"));
1309 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1311 (void) zpool_standard_error(hdl
, errno
, msg
);
1320 remove_mountpoint(zfp
);
1328 * Add the given vdevs to the pool. The caller must have already performed the
1329 * necessary verification to ensure that the vdev specification is well-formed.
1332 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1334 zfs_cmd_t zc
= {"\0"};
1336 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1338 nvlist_t
**spares
, **l2cache
;
1339 uint_t nspares
, nl2cache
;
1341 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1342 "cannot add to '%s'"), zhp
->zpool_name
);
1344 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1345 SPA_VERSION_SPARES
&&
1346 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1347 &spares
, &nspares
) == 0) {
1348 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1349 "upgraded to add hot spares"));
1350 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1353 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1354 SPA_VERSION_L2CACHE
&&
1355 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1356 &l2cache
, &nl2cache
) == 0) {
1357 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1358 "upgraded to add cache devices"));
1359 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1362 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1364 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1366 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1370 * This can happen if the user has specified the same
1371 * device multiple times. We can't reliably detect this
1372 * until we try to add it and see we already have a
1375 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1376 "one or more vdevs refer to the same device"));
1377 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1382 * This occurrs when one of the devices is below
1383 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1384 * device was the problem device since there's no
1385 * reliable way to determine device size from userland.
1390 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1392 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1393 "device is less than the minimum "
1396 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1400 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1401 "pool must be upgraded to add these vdevs"));
1402 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1406 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1407 "cache device must be a disk or disk slice"));
1408 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1412 (void) zpool_standard_error(hdl
, errno
, msg
);
1420 zcmd_free_nvlists(&zc
);
1426 * Exports the pool from the system. The caller must ensure that there are no
1427 * mounted datasets in the pool.
1430 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1431 const char *log_str
)
1433 zfs_cmd_t zc
= {"\0"};
1436 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1437 "cannot export '%s'"), zhp
->zpool_name
);
1439 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1440 zc
.zc_cookie
= force
;
1441 zc
.zc_guid
= hardforce
;
1442 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1444 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1447 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1448 "use '-f' to override the following errors:\n"
1449 "'%s' has an active shared spare which could be"
1450 " used by other pools once '%s' is exported."),
1451 zhp
->zpool_name
, zhp
->zpool_name
);
1452 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1455 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1464 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1466 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1470 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1472 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1476 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1479 nvlist_t
*nv
= NULL
;
1485 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1488 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1489 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1493 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1495 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1497 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1498 strftime(timestr
, 128, "%c", &t
) != 0) {
1500 (void) printf(dgettext(TEXT_DOMAIN
,
1501 "Would be able to return %s "
1502 "to its state as of %s.\n"),
1505 (void) printf(dgettext(TEXT_DOMAIN
,
1506 "Pool %s returned to its state as of %s.\n"),
1510 (void) printf(dgettext(TEXT_DOMAIN
,
1511 "%s approximately %lld "),
1512 dryrun
? "Would discard" : "Discarded",
1513 ((longlong_t
)loss
+ 30) / 60);
1514 (void) printf(dgettext(TEXT_DOMAIN
,
1515 "minutes of transactions.\n"));
1516 } else if (loss
> 0) {
1517 (void) printf(dgettext(TEXT_DOMAIN
,
1518 "%s approximately %lld "),
1519 dryrun
? "Would discard" : "Discarded",
1521 (void) printf(dgettext(TEXT_DOMAIN
,
1522 "seconds of transactions.\n"));
1528 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1531 nvlist_t
*nv
= NULL
;
1533 uint64_t edata
= UINT64_MAX
;
1538 if (!hdl
->libzfs_printerr
)
1542 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1544 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1546 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1547 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1548 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1549 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1552 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1553 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1556 (void) printf(dgettext(TEXT_DOMAIN
,
1557 "Recovery is possible, but will result in some data loss.\n"));
1559 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1560 strftime(timestr
, 128, "%c", &t
) != 0) {
1561 (void) printf(dgettext(TEXT_DOMAIN
,
1562 "\tReturning the pool to its state as of %s\n"
1563 "\tshould correct the problem. "),
1566 (void) printf(dgettext(TEXT_DOMAIN
,
1567 "\tReverting the pool to an earlier state "
1568 "should correct the problem.\n\t"));
1572 (void) printf(dgettext(TEXT_DOMAIN
,
1573 "Approximately %lld minutes of data\n"
1574 "\tmust be discarded, irreversibly. "),
1575 ((longlong_t
)loss
+ 30) / 60);
1576 } else if (loss
> 0) {
1577 (void) printf(dgettext(TEXT_DOMAIN
,
1578 "Approximately %lld seconds of data\n"
1579 "\tmust be discarded, irreversibly. "),
1582 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1584 (void) printf(dgettext(TEXT_DOMAIN
,
1585 "After rewind, at least\n"
1586 "\tone persistent user-data error will remain. "));
1588 (void) printf(dgettext(TEXT_DOMAIN
,
1589 "After rewind, several\n"
1590 "\tpersistent user-data errors will remain. "));
1593 (void) printf(dgettext(TEXT_DOMAIN
,
1594 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1595 reason
>= 0 ? "clear" : "import", name
);
1597 (void) printf(dgettext(TEXT_DOMAIN
,
1598 "A scrub of the pool\n"
1599 "\tis strongly recommended after recovery.\n"));
1603 (void) printf(dgettext(TEXT_DOMAIN
,
1604 "Destroy and re-create the pool from\n\ta backup source.\n"));
1608 * zpool_import() is a contracted interface. Should be kept the same
1611 * Applications should use zpool_import_props() to import a pool with
1612 * new properties value to be set.
1615 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1618 nvlist_t
*props
= NULL
;
1621 if (altroot
!= NULL
) {
1622 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1623 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1624 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1628 if (nvlist_add_string(props
,
1629 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1630 nvlist_add_string(props
,
1631 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1633 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1634 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1639 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1646 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1652 uint64_t is_log
= 0;
1654 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1658 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1659 is_log
? " [log]" : "");
1661 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1662 &child
, &children
) != 0)
1665 for (c
= 0; c
< children
; c
++) {
1666 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], VDEV_NAME_TYPE_ID
);
1667 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1673 zpool_print_unsup_feat(nvlist_t
*config
)
1675 nvlist_t
*nvinfo
, *unsup_feat
;
1678 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1680 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1683 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1684 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1687 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1688 verify(nvpair_value_string(nvp
, &desc
) == 0);
1690 if (strlen(desc
) > 0)
1691 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1693 (void) printf("\t%s\n", nvpair_name(nvp
));
1698 * Import the given pool using the known configuration and a list of
1699 * properties to be set. The configuration should have come from
1700 * zpool_find_import(). The 'newname' parameters control whether the pool
1701 * is imported with a different name.
1704 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1705 nvlist_t
*props
, int flags
)
1707 zfs_cmd_t zc
= {"\0"};
1708 zpool_rewind_policy_t policy
;
1709 nvlist_t
*nv
= NULL
;
1710 nvlist_t
*nvinfo
= NULL
;
1711 nvlist_t
*missing
= NULL
;
1718 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1721 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1722 "cannot import pool '%s'"), origname
);
1724 if (newname
!= NULL
) {
1725 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1726 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1727 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1729 thename
= (char *)newname
;
1734 if (props
!= NULL
) {
1736 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1738 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1741 if ((props
= zpool_valid_proplist(hdl
, origname
,
1742 props
, version
, flags
, errbuf
)) == NULL
)
1744 if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1751 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1753 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1756 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1757 zcmd_free_nvlists(&zc
);
1760 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1761 zcmd_free_nvlists(&zc
);
1765 zc
.zc_cookie
= flags
;
1766 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1768 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1769 zcmd_free_nvlists(&zc
);
1776 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1778 zcmd_free_nvlists(&zc
);
1780 zpool_get_rewind_policy(config
, &policy
);
1786 * Dry-run failed, but we print out what success
1787 * looks like if we found a best txg
1789 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1790 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1796 if (newname
== NULL
)
1797 (void) snprintf(desc
, sizeof (desc
),
1798 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1801 (void) snprintf(desc
, sizeof (desc
),
1802 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1807 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1808 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1809 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1810 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1811 "pool uses the following feature(s) not "
1812 "supported by this system:\n"));
1813 zpool_print_unsup_feat(nv
);
1814 if (nvlist_exists(nvinfo
,
1815 ZPOOL_CONFIG_CAN_RDONLY
)) {
1816 (void) printf(dgettext(TEXT_DOMAIN
,
1817 "All unsupported features are only "
1818 "required for writing to the pool."
1819 "\nThe pool can be imported using "
1820 "'-o readonly=on'.\n"));
1824 * Unsupported version.
1826 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1830 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1834 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1835 "one or more devices is read only"));
1836 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1840 if (nv
&& nvlist_lookup_nvlist(nv
,
1841 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1842 nvlist_lookup_nvlist(nvinfo
,
1843 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1844 (void) printf(dgettext(TEXT_DOMAIN
,
1845 "The devices below are missing, use "
1846 "'-m' to import the pool anyway:\n"));
1847 print_vdev_tree(hdl
, NULL
, missing
, 2);
1848 (void) printf("\n");
1850 (void) zpool_standard_error(hdl
, error
, desc
);
1854 (void) zpool_standard_error(hdl
, error
, desc
);
1858 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1859 "one or more devices are already in use\n"));
1860 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1863 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1864 "new name of at least one dataset is longer than "
1865 "the maximum allowable length"));
1866 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, desc
);
1869 (void) zpool_standard_error(hdl
, error
, desc
);
1870 zpool_explain_recover(hdl
,
1871 newname
? origname
: thename
, -error
, nv
);
1878 zpool_handle_t
*zhp
;
1881 * This should never fail, but play it safe anyway.
1883 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1885 else if (zhp
!= NULL
)
1887 if (policy
.zrp_request
&
1888 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1889 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1890 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1903 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1905 zfs_cmd_t zc
= {"\0"};
1907 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1909 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1910 zc
.zc_cookie
= func
;
1912 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1913 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1916 if (func
== POOL_SCAN_SCRUB
) {
1917 (void) snprintf(msg
, sizeof (msg
),
1918 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1919 } else if (func
== POOL_SCAN_NONE
) {
1920 (void) snprintf(msg
, sizeof (msg
),
1921 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1924 assert(!"unexpected result");
1927 if (errno
== EBUSY
) {
1929 pool_scan_stat_t
*ps
= NULL
;
1932 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1933 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1934 (void) nvlist_lookup_uint64_array(nvroot
,
1935 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1936 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1937 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1939 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1940 } else if (errno
== ENOENT
) {
1941 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1943 return (zpool_standard_error(hdl
, errno
, msg
));
1948 * Find a vdev that matches the search criteria specified. We use the
1949 * the nvpair name to determine how we should look for the device.
1950 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1951 * spare; but FALSE if its an INUSE spare.
1954 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1955 boolean_t
*l2cache
, boolean_t
*log
)
1962 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1964 /* Nothing to look for */
1965 if (search
== NULL
|| pair
== NULL
)
1968 /* Obtain the key we will use to search */
1969 srchkey
= nvpair_name(pair
);
1971 switch (nvpair_type(pair
)) {
1972 case DATA_TYPE_UINT64
:
1973 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1974 uint64_t srchval
, theguid
;
1976 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1977 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1979 if (theguid
== srchval
)
1984 case DATA_TYPE_STRING
: {
1985 char *srchval
, *val
;
1987 verify(nvpair_value_string(pair
, &srchval
) == 0);
1988 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1992 * Search for the requested value. Special cases:
1994 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1995 * "-part1", or "p1". The suffix is hidden from the user,
1996 * but included in the string, so this matches around it.
1997 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1998 * is used to check all possible expanded paths.
1999 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2001 * Otherwise, all other searches are simple string compares.
2003 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2004 uint64_t wholedisk
= 0;
2006 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2008 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2011 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2012 char *type
, *idx
, *end
, *p
;
2013 uint64_t id
, vdev_id
;
2016 * Determine our vdev type, keeping in mind
2017 * that the srchval is composed of a type and
2018 * vdev id pair (i.e. mirror-4).
2020 if ((type
= strdup(srchval
)) == NULL
)
2023 if ((p
= strrchr(type
, '-')) == NULL
) {
2031 * If the types don't match then keep looking.
2033 if (strncmp(val
, type
, strlen(val
)) != 0) {
2038 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2039 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2040 strncmp(type
, VDEV_TYPE_MIRROR
,
2041 strlen(VDEV_TYPE_MIRROR
)) == 0);
2042 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2046 vdev_id
= strtoull(idx
, &end
, 10);
2053 * Now verify that we have the correct vdev id.
2062 if (strcmp(srchval
, val
) == 0)
2071 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2072 &child
, &children
) != 0)
2075 for (c
= 0; c
< children
; c
++) {
2076 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2077 avail_spare
, l2cache
, NULL
)) != NULL
) {
2079 * The 'is_log' value is only set for the toplevel
2080 * vdev, not the leaf vdevs. So we always lookup the
2081 * log device from the root of the vdev tree (where
2082 * 'log' is non-NULL).
2085 nvlist_lookup_uint64(child
[c
],
2086 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2094 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2095 &child
, &children
) == 0) {
2096 for (c
= 0; c
< children
; c
++) {
2097 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2098 avail_spare
, l2cache
, NULL
)) != NULL
) {
2099 *avail_spare
= B_TRUE
;
2105 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2106 &child
, &children
) == 0) {
2107 for (c
= 0; c
< children
; c
++) {
2108 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2109 avail_spare
, l2cache
, NULL
)) != NULL
) {
2120 * Given a physical path (minus the "/devices" prefix), find the
2124 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2125 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2127 nvlist_t
*search
, *nvroot
, *ret
;
2129 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2130 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2132 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2135 *avail_spare
= B_FALSE
;
2139 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2140 nvlist_free(search
);
2146 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2149 zpool_vdev_is_interior(const char *name
)
2151 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2152 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2158 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2159 boolean_t
*l2cache
, boolean_t
*log
)
2162 nvlist_t
*nvroot
, *search
, *ret
;
2165 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2167 guid
= strtoull(path
, &end
, 0);
2168 if (guid
!= 0 && *end
== '\0') {
2169 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2170 } else if (zpool_vdev_is_interior(path
)) {
2171 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2173 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2176 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2179 *avail_spare
= B_FALSE
;
2183 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2184 nvlist_free(search
);
2190 vdev_online(nvlist_t
*nv
)
2194 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2195 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2196 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2203 * Helper function for zpool_get_physpaths().
2206 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2207 size_t *bytes_written
)
2209 size_t bytes_left
, pos
, rsz
;
2213 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2215 return (EZFS_NODEVICE
);
2217 pos
= *bytes_written
;
2218 bytes_left
= physpath_size
- pos
;
2219 format
= (pos
== 0) ? "%s" : " %s";
2221 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2222 *bytes_written
+= rsz
;
2224 if (rsz
>= bytes_left
) {
2225 /* if physpath was not copied properly, clear it */
2226 if (bytes_left
!= 0) {
2229 return (EZFS_NOSPC
);
2235 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2236 size_t *rsz
, boolean_t is_spare
)
2241 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2242 return (EZFS_INVALCONFIG
);
2244 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2246 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2247 * For a spare vdev, we only want to boot from the active
2252 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2255 return (EZFS_INVALCONFIG
);
2258 if (vdev_online(nv
)) {
2259 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2260 phypath_size
, rsz
)) != 0)
2263 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2264 strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
2265 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2266 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2271 if (nvlist_lookup_nvlist_array(nv
,
2272 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2273 return (EZFS_INVALCONFIG
);
2275 for (i
= 0; i
< count
; i
++) {
2276 ret
= vdev_get_physpaths(child
[i
], physpath
,
2277 phypath_size
, rsz
, is_spare
);
2278 if (ret
== EZFS_NOSPC
)
2283 return (EZFS_POOL_INVALARG
);
2287 * Get phys_path for a root pool config.
2288 * Return 0 on success; non-zero on failure.
2291 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2294 nvlist_t
*vdev_root
;
2301 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2303 return (EZFS_INVALCONFIG
);
2305 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2306 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2307 &child
, &count
) != 0)
2308 return (EZFS_INVALCONFIG
);
2311 * root pool can only have a single top-level vdev.
2313 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1)
2314 return (EZFS_POOL_INVALARG
);
2316 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2319 /* No online devices */
2321 return (EZFS_NODEVICE
);
2327 * Get phys_path for a root pool
2328 * Return 0 on success; non-zero on failure.
2331 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2333 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2338 * If the device has being dynamically expanded then we need to relabel
2339 * the disk to use the new unallocated space.
2342 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2346 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2347 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2348 "relabel '%s': unable to open device: %d"), path
, errno
);
2349 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2353 * It's possible that we might encounter an error if the device
2354 * does not have any unallocated space left. If so, we simply
2355 * ignore that error and continue on.
2357 * Also, we don't call efi_rescan() - that would just return EBUSY.
2358 * The module will do it for us in vdev_disk_open().
2360 error
= efi_use_whole_disk(fd
);
2362 if (error
&& error
!= VT_ENOSPC
) {
2363 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2364 "relabel '%s': unable to read disk capacity"), path
);
2365 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2371 * Bring the specified vdev online. The 'flags' parameter is a set of the
2372 * ZFS_ONLINE_* flags.
2375 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2376 vdev_state_t
*newstate
)
2378 zfs_cmd_t zc
= {"\0"};
2381 boolean_t avail_spare
, l2cache
, islog
;
2382 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2385 if (flags
& ZFS_ONLINE_EXPAND
) {
2386 (void) snprintf(msg
, sizeof (msg
),
2387 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2389 (void) snprintf(msg
, sizeof (msg
),
2390 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2393 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2394 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2396 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2398 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2401 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2403 if (flags
& ZFS_ONLINE_EXPAND
||
2404 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2405 uint64_t wholedisk
= 0;
2407 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2411 * XXX - L2ARC 1.0 devices can't support expansion.
2414 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2415 "cannot expand cache devices"));
2416 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2420 const char *fullpath
= path
;
2421 char buf
[MAXPATHLEN
];
2423 if (path
[0] != '/') {
2424 error
= zfs_resolve_shortname(path
, buf
,
2427 return (zfs_error(hdl
, EZFS_NODEVICE
,
2433 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2439 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2442 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2443 if (errno
== EINVAL
) {
2444 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2445 "from this pool into a new one. Use '%s' "
2446 "instead"), "zpool detach");
2447 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2449 return (zpool_standard_error(hdl
, errno
, msg
));
2452 *newstate
= zc
.zc_cookie
;
2457 * Take the specified vdev offline
2460 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2462 zfs_cmd_t zc
= {"\0"};
2465 boolean_t avail_spare
, l2cache
;
2466 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2468 (void) snprintf(msg
, sizeof (msg
),
2469 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2471 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2472 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2474 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2476 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2479 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2481 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2482 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2484 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2491 * There are no other replicas of this device.
2493 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2497 * The log device has unplayed logs
2499 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2502 return (zpool_standard_error(hdl
, errno
, msg
));
2507 * Mark the given vdev faulted.
2510 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2512 zfs_cmd_t zc
= {"\0"};
2514 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2516 (void) snprintf(msg
, sizeof (msg
),
2517 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2519 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2521 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2524 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2531 * There are no other replicas of this device.
2533 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2536 return (zpool_standard_error(hdl
, errno
, msg
));
2542 * Mark the given vdev degraded.
2545 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2547 zfs_cmd_t zc
= {"\0"};
2549 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2551 (void) snprintf(msg
, sizeof (msg
),
2552 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2554 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2556 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2559 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2562 return (zpool_standard_error(hdl
, errno
, msg
));
2566 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2570 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2576 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2578 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2581 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2582 children
== 2 && child
[which
] == tgt
)
2585 for (c
= 0; c
< children
; c
++)
2586 if (is_replacing_spare(child
[c
], tgt
, which
))
2594 * Attach new_disk (fully described by nvroot) to old_disk.
2595 * If 'replacing' is specified, the new disk will replace the old one.
2598 zpool_vdev_attach(zpool_handle_t
*zhp
,
2599 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2601 zfs_cmd_t zc
= {"\0"};
2605 boolean_t avail_spare
, l2cache
, islog
;
2610 nvlist_t
*config_root
;
2611 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2612 boolean_t rootpool
= zpool_is_bootable(zhp
);
2615 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2616 "cannot replace %s with %s"), old_disk
, new_disk
);
2618 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2619 "cannot attach %s to %s"), new_disk
, old_disk
);
2621 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2622 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2624 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2627 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2630 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2632 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2633 zc
.zc_cookie
= replacing
;
2635 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2636 &child
, &children
) != 0 || children
!= 1) {
2637 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2638 "new device must be a single disk"));
2639 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2642 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2643 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2645 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], 0)) == NULL
)
2649 * If the target is a hot spare that has been swapped in, we can only
2650 * replace it with another hot spare.
2653 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2654 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2655 NULL
) == NULL
|| !avail_spare
) &&
2656 is_replacing_spare(config_root
, tgt
, 1)) {
2657 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2658 "can only be replaced by another hot spare"));
2660 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2665 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2668 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2670 zcmd_free_nvlists(&zc
);
2675 * XXX need a better way to prevent user from
2676 * booting up a half-baked vdev.
2678 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2679 "sure to wait until resilver is done "
2680 "before rebooting.\n"));
2688 * Can't attach to or replace this type of vdev.
2691 uint64_t version
= zpool_get_prop_int(zhp
,
2692 ZPOOL_PROP_VERSION
, NULL
);
2695 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2696 "cannot replace a log with a spare"));
2697 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2698 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2699 "already in replacing/spare config; wait "
2700 "for completion or use 'zpool detach'"));
2702 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2703 "cannot replace a replacing device"));
2705 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2706 "can only attach to mirrors and top-level "
2709 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2714 * The new device must be a single disk.
2716 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2717 "new device must be a single disk"));
2718 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2722 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2724 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2729 * The new device is too small.
2731 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2732 "device is too small"));
2733 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2738 * The new device has a different optimal sector size.
2740 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2741 "new device has a different optimal sector size; use the "
2742 "option '-o ashift=N' to override the optimal size"));
2743 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2748 * The resulting top-level vdev spec won't fit in the label.
2750 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2754 (void) zpool_standard_error(hdl
, errno
, msg
);
2761 * Detach the specified device.
2764 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2766 zfs_cmd_t zc
= {"\0"};
2769 boolean_t avail_spare
, l2cache
;
2770 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2772 (void) snprintf(msg
, sizeof (msg
),
2773 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2775 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2776 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2778 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2781 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2784 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2786 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2788 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2795 * Can't detach from this type of vdev.
2797 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2798 "applicable to mirror and replacing vdevs"));
2799 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2804 * There are no other replicas of this device.
2806 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2810 (void) zpool_standard_error(hdl
, errno
, msg
);
2817 * Find a mirror vdev in the source nvlist.
2819 * The mchild array contains a list of disks in one of the top-level mirrors
2820 * of the source pool. The schild array contains a list of disks that the
2821 * user specified on the command line. We loop over the mchild array to
2822 * see if any entry in the schild array matches.
2824 * If a disk in the mchild array is found in the schild array, we return
2825 * the index of that entry. Otherwise we return -1.
2828 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2829 nvlist_t
**schild
, uint_t schildren
)
2833 for (mc
= 0; mc
< mchildren
; mc
++) {
2835 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2838 for (sc
= 0; sc
< schildren
; sc
++) {
2839 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2841 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2857 * Split a mirror pool. If newroot points to null, then a new nvlist
2858 * is generated and it is the responsibility of the caller to free it.
2861 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2862 nvlist_t
*props
, splitflags_t flags
)
2864 zfs_cmd_t zc
= {"\0"};
2866 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2867 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2868 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2869 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2871 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2874 (void) snprintf(msg
, sizeof (msg
),
2875 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2877 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2878 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2880 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2881 (void) fprintf(stderr
, gettext("Internal error: unable to "
2882 "retrieve pool configuration\n"));
2886 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2888 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2891 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2892 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2893 props
, vers
, flags
, msg
)) == NULL
)
2897 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2899 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2900 "Source pool is missing vdev tree"));
2901 nvlist_free(zc_props
);
2905 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2908 if (*newroot
== NULL
||
2909 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2910 &newchild
, &newchildren
) != 0)
2913 for (c
= 0; c
< children
; c
++) {
2914 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2916 nvlist_t
**mchild
, *vdev
;
2921 * Unlike cache & spares, slogs are stored in the
2922 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2924 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2926 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2928 if (is_log
|| is_hole
) {
2930 * Create a hole vdev and put it in the config.
2932 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2934 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2935 VDEV_TYPE_HOLE
) != 0)
2937 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2942 varray
[vcount
++] = vdev
;
2946 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2948 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2949 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2950 "Source pool must be composed only of mirrors\n"));
2951 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2955 verify(nvlist_lookup_nvlist_array(child
[c
],
2956 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2958 /* find or add an entry for this top-level vdev */
2959 if (newchildren
> 0 &&
2960 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2961 newchild
, newchildren
)) >= 0) {
2962 /* We found a disk that the user specified. */
2963 vdev
= mchild
[entry
];
2966 /* User didn't specify a disk for this vdev. */
2967 vdev
= mchild
[mchildren
- 1];
2970 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
2974 /* did we find every disk the user specified? */
2975 if (found
!= newchildren
) {
2976 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
2977 "include at most one disk from each mirror"));
2978 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2982 /* Prepare the nvlist for populating. */
2983 if (*newroot
== NULL
) {
2984 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
2987 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
2988 VDEV_TYPE_ROOT
) != 0)
2991 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
2994 /* Add all the children we found */
2995 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
2996 lastlog
== 0 ? vcount
: lastlog
) != 0)
3000 * If we're just doing a dry run, exit now with success.
3003 memory_err
= B_FALSE
;
3008 /* now build up the config list & call the ioctl */
3009 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3012 if (nvlist_add_nvlist(newconfig
,
3013 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3014 nvlist_add_string(newconfig
,
3015 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3016 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3020 * The new pool is automatically part of the namespace unless we
3021 * explicitly export it.
3024 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3025 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3026 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3027 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3029 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3032 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3033 retval
= zpool_standard_error(hdl
, errno
, msg
);
3038 memory_err
= B_FALSE
;
3041 if (varray
!= NULL
) {
3044 for (v
= 0; v
< vcount
; v
++)
3045 nvlist_free(varray
[v
]);
3048 zcmd_free_nvlists(&zc
);
3049 nvlist_free(zc_props
);
3050 nvlist_free(newconfig
);
3052 nvlist_free(*newroot
);
3060 return (no_memory(hdl
));
3066 * Remove the given device. Currently, this is supported only for hot spares,
3067 * cache, and log devices.
3070 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3072 zfs_cmd_t zc
= {"\0"};
3075 boolean_t avail_spare
, l2cache
, islog
;
3076 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3079 (void) snprintf(msg
, sizeof (msg
),
3080 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3082 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3083 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3085 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3087 * XXX - this should just go away.
3089 if (!avail_spare
&& !l2cache
&& !islog
) {
3090 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3091 "only inactive hot spares, cache, "
3092 "or log devices can be removed"));
3093 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3096 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3097 if (islog
&& version
< SPA_VERSION_HOLES
) {
3098 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3099 "pool must be upgrade to support log removal"));
3100 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3103 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3105 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3108 return (zpool_standard_error(hdl
, errno
, msg
));
3112 * Clear the errors for the pool, or the particular device if specified.
3115 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3117 zfs_cmd_t zc
= {"\0"};
3120 zpool_rewind_policy_t policy
;
3121 boolean_t avail_spare
, l2cache
;
3122 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3123 nvlist_t
*nvi
= NULL
;
3127 (void) snprintf(msg
, sizeof (msg
),
3128 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3131 (void) snprintf(msg
, sizeof (msg
),
3132 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3135 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3137 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3138 &l2cache
, NULL
)) == 0)
3139 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3142 * Don't allow error clearing for hot spares. Do allow
3143 * error clearing for l2cache devices.
3146 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3148 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3152 zpool_get_rewind_policy(rewindnvl
, &policy
);
3153 zc
.zc_cookie
= policy
.zrp_request
;
3155 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3158 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3161 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3163 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3164 zcmd_free_nvlists(&zc
);
3169 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3170 errno
!= EPERM
&& errno
!= EACCES
)) {
3171 if (policy
.zrp_request
&
3172 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3173 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3174 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3175 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3179 zcmd_free_nvlists(&zc
);
3183 zcmd_free_nvlists(&zc
);
3184 return (zpool_standard_error(hdl
, errno
, msg
));
3188 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3191 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3193 zfs_cmd_t zc
= {"\0"};
3195 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3197 (void) snprintf(msg
, sizeof (msg
),
3198 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3199 (u_longlong_t
)guid
);
3201 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3203 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3205 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3208 return (zpool_standard_error(hdl
, errno
, msg
));
3212 * Change the GUID for a pool.
3215 zpool_reguid(zpool_handle_t
*zhp
)
3218 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3219 zfs_cmd_t zc
= {"\0"};
3221 (void) snprintf(msg
, sizeof (msg
),
3222 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3224 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3225 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3228 return (zpool_standard_error(hdl
, errno
, msg
));
3235 zpool_reopen(zpool_handle_t
*zhp
)
3237 zfs_cmd_t zc
= {"\0"};
3239 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3241 (void) snprintf(msg
, sizeof (msg
),
3242 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3245 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3246 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3248 return (zpool_standard_error(hdl
, errno
, msg
));
3251 #if defined(__sun__) || defined(__sun)
3253 * Convert from a devid string to a path.
3256 devid_to_path(char *devid_str
)
3261 devid_nmlist_t
*list
= NULL
;
3264 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3267 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3269 devid_str_free(minor
);
3276 * In a case the strdup() fails, we will just return NULL below.
3278 path
= strdup(list
[0].devname
);
3280 devid_free_nmlist(list
);
3286 * Convert from a path to a devid string.
3289 path_to_devid(const char *path
)
3295 if ((fd
= open(path
, O_RDONLY
)) < 0)
3300 if (devid_get(fd
, &devid
) == 0) {
3301 if (devid_get_minor_name(fd
, &minor
) == 0)
3302 ret
= devid_str_encode(devid
, minor
);
3304 devid_str_free(minor
);
3313 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3314 * ignore any failure here, since a common case is for an unprivileged user to
3315 * type 'zpool status', and we'll display the correct information anyway.
3318 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3320 zfs_cmd_t zc
= {"\0"};
3322 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3323 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3324 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3327 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3332 * Remove partition suffix from a vdev path. Partition suffixes may take three
3333 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3334 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3335 * third case only occurs when preceded by a string matching the regular
3336 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3338 * caller must free the returned string
3341 zfs_strip_partition(char *path
)
3343 char *tmp
= strdup(path
);
3344 char *part
= NULL
, *d
= NULL
;
3348 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3350 } else if ((part
= strrchr(tmp
, 'p')) &&
3351 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3353 } else if ((tmp
[0] == 'h' || tmp
[0] == 's' || tmp
[0] == 'v') &&
3355 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
) { }
3356 } else if (strncmp("xvd", tmp
, 3) == 0) {
3357 for (d
= &tmp
[3]; isalpha(*d
); part
= ++d
) { }
3359 if (part
&& d
&& *d
!= '\0') {
3360 for (; isdigit(*d
); d
++) { }
3369 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3374 * Returned string must be freed.
3377 zfs_strip_partition_path(char *path
)
3379 char *newpath
= strdup(path
);
3386 /* Point to "sda1" part of "/dev/sda1" */
3387 sd_offset
= strrchr(newpath
, '/') + 1;
3389 /* Get our new name "sda" */
3390 new_sd
= zfs_strip_partition(sd_offset
);
3396 /* Paste the "sda" where "sda1" was */
3397 strlcpy(sd_offset
, new_sd
, strlen(sd_offset
) + 1);
3399 /* Free temporary "sda" */
3405 #define PATH_BUF_LEN 64
3408 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3409 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3410 * We also check if this is a whole disk, in which case we strip off the
3411 * trailing 's0' slice name.
3413 * This routine is also responsible for identifying when disks have been
3414 * reconfigured in a new location. The kernel will have opened the device by
3415 * devid, but the path will still refer to the old location. To catch this, we
3416 * first do a path -> devid translation (which is fast for the common case). If
3417 * the devid matches, we're done. If not, we do a reverse devid -> path
3418 * translation and issue the appropriate ioctl() to update the path of the vdev.
3419 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3423 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3426 char *path
, *type
, *env
;
3428 char buf
[PATH_BUF_LEN
];
3429 char tmpbuf
[PATH_BUF_LEN
];
3431 env
= getenv("ZPOOL_VDEV_NAME_PATH");
3432 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3433 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3434 name_flags
|= VDEV_NAME_PATH
;
3436 env
= getenv("ZPOOL_VDEV_NAME_GUID");
3437 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3438 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3439 name_flags
|= VDEV_NAME_GUID
;
3441 env
= getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3442 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3443 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3444 name_flags
|= VDEV_NAME_FOLLOW_LINKS
;
3446 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &value
) == 0 ||
3447 name_flags
& VDEV_NAME_GUID
) {
3448 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
);
3449 (void) snprintf(buf
, sizeof (buf
), "%llu", (u_longlong_t
)value
);
3451 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3452 #if defined(__sun__) || defined(__sun)
3454 * Live VDEV path updates to a kernel VDEV during a
3455 * zpool_vdev_name lookup are not supported on Linux.
3462 * If the device is dead (faulted, offline, etc) then don't
3463 * bother opening it. Otherwise we may be forcing the user to
3464 * open a misbehaving device, which can have undesirable
3467 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3468 (uint64_t **)&vs
, &vsc
) != 0 ||
3469 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3471 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3473 * Determine if the current path is correct.
3475 char *newdevid
= path_to_devid(path
);
3477 if (newdevid
== NULL
||
3478 strcmp(devid
, newdevid
) != 0) {
3481 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3483 * Update the path appropriately.
3485 set_path(zhp
, nv
, newpath
);
3486 if (nvlist_add_string(nv
,
3487 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3488 verify(nvlist_lookup_string(nv
,
3496 devid_str_free(newdevid
);
3500 if (name_flags
& VDEV_NAME_FOLLOW_LINKS
) {
3501 char *rp
= realpath(path
, NULL
);
3503 strlcpy(buf
, rp
, sizeof (buf
));
3510 * For a block device only use the name.
3512 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3513 if ((strcmp(type
, VDEV_TYPE_DISK
) == 0) &&
3514 !(name_flags
& VDEV_NAME_PATH
)) {
3515 path
= strrchr(path
, '/');
3520 * Remove the partition from the path it this is a whole disk.
3522 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &value
)
3523 == 0 && value
&& !(name_flags
& VDEV_NAME_PATH
)) {
3524 return (zfs_strip_partition(path
));
3527 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3530 * If it's a raidz device, we need to stick in the parity level.
3532 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3533 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3535 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3536 (u_longlong_t
)value
);
3541 * We identify each top-level vdev by using a <type-id>
3542 * naming convention.
3544 if (name_flags
& VDEV_NAME_TYPE_ID
) {
3546 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3548 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3549 path
, (u_longlong_t
)id
);
3554 return (zfs_strdup(hdl
, path
));
3558 zbookmark_mem_compare(const void *a
, const void *b
)
3560 return (memcmp(a
, b
, sizeof (zbookmark_phys_t
)));
3564 * Retrieve the persistent error log, uniquify the members, and return to the
3568 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3570 zfs_cmd_t zc
= {"\0"};
3572 zbookmark_phys_t
*zb
= NULL
;
3576 * Retrieve the raw error list from the kernel. If the number of errors
3577 * has increased, allocate more space and continue until we get the
3580 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3584 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3585 count
* sizeof (zbookmark_phys_t
))) == (uintptr_t)NULL
)
3587 zc
.zc_nvlist_dst_size
= count
;
3588 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3590 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3592 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3593 if (errno
== ENOMEM
) {
3596 count
= zc
.zc_nvlist_dst_size
;
3597 dst
= zfs_alloc(zhp
->zpool_hdl
, count
*
3598 sizeof (zbookmark_phys_t
));
3601 zc
.zc_nvlist_dst
= (uintptr_t)dst
;
3611 * Sort the resulting bookmarks. This is a little confusing due to the
3612 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3613 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3614 * _not_ copied as part of the process. So we point the start of our
3615 * array appropriate and decrement the total number of elements.
3617 zb
= ((zbookmark_phys_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3618 zc
.zc_nvlist_dst_size
;
3619 count
-= zc
.zc_nvlist_dst_size
;
3621 qsort(zb
, count
, sizeof (zbookmark_phys_t
), zbookmark_mem_compare
);
3623 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3626 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3628 for (i
= 0; i
< count
; i
++) {
3631 /* ignoring zb_blkid and zb_level for now */
3632 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3633 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3636 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3638 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3639 zb
[i
].zb_objset
) != 0) {
3643 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3644 zb
[i
].zb_object
) != 0) {
3648 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3655 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3659 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3660 return (no_memory(zhp
->zpool_hdl
));
3664 * Upgrade a ZFS pool to the latest on-disk version.
3667 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3669 zfs_cmd_t zc
= {"\0"};
3670 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3672 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3673 zc
.zc_cookie
= new_version
;
3675 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3676 return (zpool_standard_error_fmt(hdl
, errno
,
3677 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3683 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3687 (void) strlcpy(string
, basename(argv
[0]), len
);
3688 for (i
= 1; i
< argc
; i
++) {
3689 (void) strlcat(string
, " ", len
);
3690 (void) strlcat(string
, argv
[i
], len
);
3695 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3697 zfs_cmd_t zc
= {"\0"};
3701 args
= fnvlist_alloc();
3702 fnvlist_add_string(args
, "message", message
);
3703 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3705 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3707 zcmd_free_nvlists(&zc
);
3712 * Perform ioctl to get some command history of a pool.
3714 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3715 * logical offset of the history buffer to start reading from.
3717 * Upon return, 'off' is the next logical offset to read from and
3718 * 'len' is the actual amount of bytes read into 'buf'.
3721 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3723 zfs_cmd_t zc
= {"\0"};
3724 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3726 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3728 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3729 zc
.zc_history_len
= *len
;
3730 zc
.zc_history_offset
= *off
;
3732 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3735 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3736 dgettext(TEXT_DOMAIN
,
3737 "cannot show history for pool '%s'"),
3740 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3741 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3742 "'%s'"), zhp
->zpool_name
));
3744 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3745 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3746 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3748 return (zpool_standard_error_fmt(hdl
, errno
,
3749 dgettext(TEXT_DOMAIN
,
3750 "cannot get history for '%s'"), zhp
->zpool_name
));
3754 *len
= zc
.zc_history_len
;
3755 *off
= zc
.zc_history_offset
;
3761 * Process the buffer of nvlists, unpacking and storing each nvlist record
3762 * into 'records'. 'leftover' is set to the number of bytes that weren't
3763 * processed as there wasn't a complete record.
3766 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3767 nvlist_t
***records
, uint_t
*numrecords
)
3774 while (bytes_read
> sizeof (reclen
)) {
3776 /* get length of packed record (stored as little endian) */
3777 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3778 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3780 if (bytes_read
< sizeof (reclen
) + reclen
)
3784 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3786 bytes_read
-= sizeof (reclen
) + reclen
;
3787 buf
+= sizeof (reclen
) + reclen
;
3789 /* add record to nvlist array */
3791 if (ISP2(*numrecords
+ 1)) {
3792 tmp
= realloc(*records
,
3793 *numrecords
* 2 * sizeof (nvlist_t
*));
3801 (*records
)[*numrecords
- 1] = nv
;
3804 *leftover
= bytes_read
;
3809 * Retrieve the command history of a pool.
3812 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3815 int buflen
= 128 * 1024;
3817 nvlist_t
**records
= NULL
;
3818 uint_t numrecords
= 0;
3821 buf
= malloc(buflen
);
3825 uint64_t bytes_read
= buflen
;
3828 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3831 /* if nothing else was read in, we're at EOF, just return */
3835 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3836 &leftover
, &records
, &numrecords
)) != 0)
3839 if (leftover
== bytes_read
) {
3841 * no progress made, because buffer is not big enough
3842 * to hold this record; resize and retry.
3846 buf
= malloc(buflen
);
3857 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3858 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3859 records
, numrecords
) == 0);
3861 for (i
= 0; i
< numrecords
; i
++)
3862 nvlist_free(records
[i
]);
3869 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3870 * If there is a new event available 'nvp' will contain a newly allocated
3871 * nvlist and 'dropped' will be set to the number of missed events since
3872 * the last call to this function. When 'nvp' is set to NULL it indicates
3873 * no new events are available. In either case the function returns 0 and
3874 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3875 * function will return a non-zero value. When the function is called in
3876 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3877 * it will not return until a new event is available.
3880 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3881 int *dropped
, unsigned flags
, int zevent_fd
)
3883 zfs_cmd_t zc
= {"\0"};
3888 zc
.zc_cleanup_fd
= zevent_fd
;
3890 if (flags
& ZEVENT_NONBLOCK
)
3891 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3893 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3897 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3900 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3901 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3904 /* Blocking error case should not occur */
3905 if (!(flags
& ZEVENT_NONBLOCK
))
3906 error
= zpool_standard_error_fmt(hdl
, errno
,
3907 dgettext(TEXT_DOMAIN
, "cannot get event"));
3911 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3912 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3913 dgettext(TEXT_DOMAIN
, "cannot get event"));
3919 error
= zpool_standard_error_fmt(hdl
, errno
,
3920 dgettext(TEXT_DOMAIN
, "cannot get event"));
3925 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3929 *dropped
= (int)zc
.zc_cookie
;
3931 zcmd_free_nvlists(&zc
);
3940 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3942 zfs_cmd_t zc
= {"\0"};
3945 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3946 "cannot clear events"));
3948 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3949 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3952 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3958 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3959 * the passed zevent_fd file handle. On success zero is returned,
3960 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3963 zpool_events_seek(libzfs_handle_t
*hdl
, uint64_t eid
, int zevent_fd
)
3965 zfs_cmd_t zc
= {"\0"};
3969 zc
.zc_cleanup_fd
= zevent_fd
;
3971 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_SEEK
, &zc
) != 0) {
3974 error
= zfs_error_fmt(hdl
, EZFS_NOENT
,
3975 dgettext(TEXT_DOMAIN
, "cannot get event"));
3979 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3980 dgettext(TEXT_DOMAIN
, "cannot get event"));
3984 error
= zpool_standard_error_fmt(hdl
, errno
,
3985 dgettext(TEXT_DOMAIN
, "cannot get event"));
3994 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3995 char *pathname
, size_t len
)
3997 zfs_cmd_t zc
= {"\0"};
3998 boolean_t mounted
= B_FALSE
;
3999 char *mntpnt
= NULL
;
4000 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
4003 /* special case for the MOS */
4004 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>",
4009 /* get the dataset's name */
4010 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
4012 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
4013 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
4014 /* just write out a path of two object numbers */
4015 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
4016 (longlong_t
)dsobj
, (longlong_t
)obj
);
4019 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
4021 /* find out if the dataset is mounted */
4022 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
4024 /* get the corrupted object's path */
4025 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
4027 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
4030 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
4033 (void) snprintf(pathname
, len
, "%s:%s",
4034 dsname
, zc
.zc_value
);
4037 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
,
4044 * Read the EFI label from the config, if a label does not exist then
4045 * pass back the error to the caller. If the caller has passed a non-NULL
4046 * diskaddr argument then we set it to the starting address of the EFI
4050 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
4054 char diskname
[MAXPATHLEN
];
4057 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
4060 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
4061 strrchr(path
, '/'));
4062 if ((fd
= open(diskname
, O_RDWR
|O_DIRECT
)) >= 0) {
4063 struct dk_gpt
*vtoc
;
4065 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
4067 *sb
= vtoc
->efi_parts
[0].p_start
;
4076 * determine where a partition starts on a disk in the current
4080 find_start_block(nvlist_t
*config
)
4084 diskaddr_t sb
= MAXOFFSET_T
;
4087 if (nvlist_lookup_nvlist_array(config
,
4088 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
4089 if (nvlist_lookup_uint64(config
,
4090 ZPOOL_CONFIG_WHOLE_DISK
,
4091 &wholedisk
) != 0 || !wholedisk
) {
4092 return (MAXOFFSET_T
);
4094 if (read_efi_label(config
, &sb
) < 0)
4099 for (c
= 0; c
< children
; c
++) {
4100 sb
= find_start_block(child
[c
]);
4101 if (sb
!= MAXOFFSET_T
) {
4105 return (MAXOFFSET_T
);
4109 zpool_label_disk_check(char *path
)
4111 struct dk_gpt
*vtoc
;
4114 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0)
4117 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4122 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4134 * Generate a unique partition name for the ZFS member. Partitions must
4135 * have unique names to ensure udev will be able to create symlinks under
4136 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4137 * of the form <pool>-<unique-id>.
4140 zpool_label_name(char *label_name
, int label_size
)
4145 fd
= open("/dev/urandom", O_RDONLY
);
4147 if (read(fd
, &id
, sizeof (id
)) != sizeof (id
))
4154 id
= (((uint64_t)rand()) << 32) | (uint64_t)rand();
4156 snprintf(label_name
, label_size
, "zfs-%016llx", (u_longlong_t
)id
);
4160 * Label an individual disk. The name provided is the short name,
4161 * stripped of any leading /dev path.
4164 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4166 char path
[MAXPATHLEN
];
4167 struct dk_gpt
*vtoc
;
4169 size_t resv
= EFI_MIN_RESV_SIZE
;
4170 uint64_t slice_size
;
4171 diskaddr_t start_block
;
4174 /* prepare an error message just in case */
4175 (void) snprintf(errbuf
, sizeof (errbuf
),
4176 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4181 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4182 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4184 if (zhp
->zpool_start_block
== 0)
4185 start_block
= find_start_block(nvroot
);
4187 start_block
= zhp
->zpool_start_block
;
4188 zhp
->zpool_start_block
= start_block
;
4191 start_block
= NEW_START_BLOCK
;
4194 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4196 if ((fd
= open(path
, O_RDWR
|O_DIRECT
|O_EXCL
)) < 0) {
4198 * This shouldn't happen. We've long since verified that this
4199 * is a valid device.
4201 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4202 "label '%s': unable to open device: %d"), path
, errno
);
4203 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4206 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4208 * The only way this can fail is if we run out of memory, or we
4209 * were unable to read the disk's capacity
4211 if (errno
== ENOMEM
)
4212 (void) no_memory(hdl
);
4215 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4216 "label '%s': unable to read disk capacity"), path
);
4218 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4221 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4222 slice_size
-= EFI_MIN_RESV_SIZE
;
4223 if (start_block
== MAXOFFSET_T
)
4224 start_block
= NEW_START_BLOCK
;
4225 slice_size
-= start_block
;
4226 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4228 vtoc
->efi_parts
[0].p_start
= start_block
;
4229 vtoc
->efi_parts
[0].p_size
= slice_size
;
4232 * Why we use V_USR: V_BACKUP confuses users, and is considered
4233 * disposable by some EFI utilities (since EFI doesn't have a backup
4234 * slice). V_UNASSIGNED is supposed to be used only for zero size
4235 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4236 * etc. were all pretty specific. V_USR is as close to reality as we
4237 * can get, in the absence of V_OTHER.
4239 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4240 zpool_label_name(vtoc
->efi_parts
[0].p_name
, EFI_PART_NAME_LEN
);
4242 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4243 vtoc
->efi_parts
[8].p_size
= resv
;
4244 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4246 if ((rval
= efi_write(fd
, vtoc
)) != 0 || (rval
= efi_rescan(fd
)) != 0) {
4248 * Some block drivers (like pcata) may not support EFI
4249 * GPT labels. Print out a helpful error message dir-
4250 * ecting the user to manually label the disk and give
4256 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4257 "parted(8) and then provide a specific slice: %d"), rval
);
4258 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4264 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4265 (void) zfs_append_partition(path
, MAXPATHLEN
);
4267 /* Wait to udev to signal use the device has settled. */
4268 rval
= zpool_label_disk_wait(path
, DISK_LABEL_WAIT
);
4270 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4271 "detect device partitions on '%s': %d"), path
, rval
);
4272 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4275 /* We can't be to paranoid. Read the label back and verify it. */
4276 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4277 rval
= zpool_label_disk_check(path
);
4279 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4280 "EFI label on '%s' is damaged. Ensure\nthis device "
4281 "is not in in use, and is functioning properly: %d"),
4283 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4290 * Allocate and return the underlying device name for a device mapper device.
4291 * If a device mapper device maps to multiple devices, return the first device.
4293 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4294 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4296 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4297 * device then return NULL.
4299 * NOTE: The returned name string must be *freed*.
4302 dm_get_underlying_path(char *dm_name
)
4312 if (dm_name
== NULL
)
4315 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4316 realp
= realpath(dm_name
, NULL
);
4321 * If they preface 'dev' with a path (like "/dev") then strip it off.
4322 * We just want the 'dm-N' part.
4324 tmp
= strrchr(realp
, '/');
4326 dev_str
= tmp
+ 1; /* +1 since we want the chr after '/' */
4330 size
= asprintf(&tmp
, "/sys/block/%s/slaves/", dev_str
);
4331 if (size
== -1 || !tmp
)
4338 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4339 while ((ep
= readdir(dp
))) {
4340 if (ep
->d_type
!= DT_DIR
) { /* skip "." and ".." dirs */
4341 size
= asprintf(&path
, "/dev/%s", ep
->d_name
);
4355 * Return 1 if device is a device mapper or multipath device.
4359 zfs_dev_is_dm(char *dev_name
)
4363 tmp
= dm_get_underlying_path(dev_name
);
4372 * Lookup the underlying device for a device name
4374 * Often you'll have a symlink to a device, a partition device,
4375 * or a multipath device, and want to look up the underlying device.
4376 * This function returns the underlying device name. If the device
4377 * name is already the underlying device, then just return the same
4378 * name. If the device is a DM device with multiple underlying devices
4379 * then return the first one.
4383 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4384 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4387 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4388 * dev_name: /dev/mapper/mpatha
4389 * returns: /dev/sda (first device)
4391 * 3. /dev/sda (already the underlying device)
4392 * dev_name: /dev/sda
4395 * 4. /dev/dm-3 (mapped to /dev/sda)
4396 * dev_name: /dev/dm-3
4399 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4400 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4403 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4404 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4407 * Returns underlying device name, or NULL on error or no match.
4409 * NOTE: The returned name string must be *freed*.
4412 zfs_get_underlying_path(char *dev_name
)
4417 if (dev_name
== NULL
)
4420 tmp
= dm_get_underlying_path(dev_name
);
4422 /* dev_name not a DM device, so just un-symlinkize it */
4424 tmp
= realpath(dev_name
, NULL
);
4427 name
= zfs_strip_partition_path(tmp
);
4435 * Given a dev name like "sda", return the full enclosure sysfs path to
4436 * the disk. You can also pass in the name with "/dev" prepended
4437 * to it (like /dev/sda).
4439 * For example, disk "sda" in enclosure slot 1:
4441 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4443 * 'dev' must be a non-devicemapper device.
4445 * Returned string must be freed.
4448 zfs_get_enclosure_sysfs_path(char *dev_name
)
4452 char buf
[MAXPATHLEN
];
4460 if (dev_name
== NULL
)
4463 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4464 tmp1
= strrchr(dev_name
, '/');
4466 dev_name
= tmp1
+ 1; /* +1 since we want the chr after '/' */
4468 tmpsize
= asprintf(&tmp1
, "/sys/block/%s/device", dev_name
);
4469 if (tmpsize
== -1 || tmp1
== NULL
) {
4476 tmp1
= NULL
; /* To make free() at the end a NOP */
4481 * Look though all sysfs entries in /sys/block/<dev>/device for
4482 * the enclosure symlink.
4484 while ((ep
= readdir(dp
))) {
4485 /* Ignore everything that's not our enclosure_device link */
4486 if (strstr(ep
->d_name
, "enclosure_device") == NULL
)
4489 if (asprintf(&tmp2
, "%s/%s", tmp1
, ep
->d_name
) == -1 ||
4493 size
= readlink(tmp2
, buf
, sizeof (buf
));
4495 /* Did readlink fail or crop the link name? */
4496 if (size
== -1 || size
>= sizeof (buf
)) {
4498 tmp2
= NULL
; /* To make free() at the end a NOP */
4503 * We got a valid link. readlink() doesn't terminate strings
4504 * so we have to do it.
4509 * Our link will look like:
4511 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4513 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4515 tmp3
= strstr(buf
, "enclosure");
4519 if (asprintf(&path
, "/sys/class/%s", tmp3
) == -1) {
4520 /* If asprintf() fails, 'path' is undefined */