4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "@(#)libzfs_pool.c 1.44 08/04/11 SMI"
41 #include <sys/efi_partition.h>
43 #include <sys/zfs_ioctl.h>
47 #include "zfs_namecheck.h"
49 #include "libzfs_impl.h"
53 * ====================================================================
54 * zpool property functions
55 * ====================================================================
59 zpool_get_all_props(zpool_handle_t
*zhp
)
62 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
64 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
66 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
69 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
70 if (errno
== ENOMEM
) {
71 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
72 zcmd_free_nvlists(&zc
);
76 zcmd_free_nvlists(&zc
);
81 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
82 zcmd_free_nvlists(&zc
);
86 zcmd_free_nvlists(&zc
);
92 zpool_props_refresh(zpool_handle_t
*zhp
)
96 old_props
= zhp
->zpool_props
;
98 if (zpool_get_all_props(zhp
) != 0)
101 nvlist_free(old_props
);
106 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
112 zprop_source_t source
;
114 nvl
= zhp
->zpool_props
;
115 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
116 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
118 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
120 source
= ZPROP_SRC_DEFAULT
;
121 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
132 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
136 zprop_source_t source
;
138 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
))
139 return (zpool_prop_default_numeric(prop
));
141 nvl
= zhp
->zpool_props
;
142 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
143 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
145 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
147 source
= ZPROP_SRC_DEFAULT
;
148 value
= zpool_prop_default_numeric(prop
);
158 * Map VDEV STATE to printed strings.
161 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
164 case VDEV_STATE_CLOSED
:
165 case VDEV_STATE_OFFLINE
:
166 return (gettext("OFFLINE"));
167 case VDEV_STATE_REMOVED
:
168 return (gettext("REMOVED"));
169 case VDEV_STATE_CANT_OPEN
:
170 if (aux
== VDEV_AUX_CORRUPT_DATA
)
171 return (gettext("FAULTED"));
173 return (gettext("UNAVAIL"));
174 case VDEV_STATE_FAULTED
:
175 return (gettext("FAULTED"));
176 case VDEV_STATE_DEGRADED
:
177 return (gettext("DEGRADED"));
178 case VDEV_STATE_HEALTHY
:
179 return (gettext("ONLINE"));
182 return (gettext("UNKNOWN"));
186 * Get a zpool property value for 'prop' and return the value in
187 * a pre-allocated buffer.
190 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
191 zprop_source_t
*srctype
)
195 zprop_source_t src
= ZPROP_SRC_NONE
;
200 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
201 if (prop
== ZPOOL_PROP_NAME
)
202 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
203 else if (prop
== ZPOOL_PROP_HEALTH
)
204 (void) strlcpy(buf
, "FAULTED", len
);
206 (void) strlcpy(buf
, "-", len
);
210 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
211 prop
!= ZPOOL_PROP_NAME
)
214 switch (zpool_prop_get_type(prop
)) {
215 case PROP_TYPE_STRING
:
216 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
220 case PROP_TYPE_NUMBER
:
221 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
224 case ZPOOL_PROP_SIZE
:
225 case ZPOOL_PROP_USED
:
226 case ZPOOL_PROP_AVAILABLE
:
227 (void) zfs_nicenum(intval
, buf
, len
);
230 case ZPOOL_PROP_CAPACITY
:
231 (void) snprintf(buf
, len
, "%llu%%",
232 (u_longlong_t
)intval
);
235 case ZPOOL_PROP_HEALTH
:
236 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
237 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
238 verify(nvlist_lookup_uint64_array(nvroot
,
239 ZPOOL_CONFIG_STATS
, (uint64_t **)&vs
, &vsc
) == 0);
241 (void) strlcpy(buf
, zpool_state_to_name(intval
,
245 (void) snprintf(buf
, len
, "%llu", intval
);
249 case PROP_TYPE_INDEX
:
250 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
251 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
254 (void) strlcpy(buf
, strval
, len
);
268 * Check if the bootfs name has the same pool name as it is set to.
269 * Assuming bootfs is a valid dataset name.
272 bootfs_name_valid(const char *pool
, char *bootfs
)
274 int len
= strlen(pool
);
276 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
))
279 if (strncmp(pool
, bootfs
, len
) == 0 &&
280 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
287 * Given an nvlist of zpool properties to be set, validate that they are
288 * correct, and parse any numeric properties (index, boolean, etc) if they are
289 * specified as strings.
292 zpool_validate_properties(libzfs_handle_t
*hdl
, const char *poolname
,
293 nvlist_t
*props
, uint64_t version
, boolean_t create_or_import
, char *errbuf
)
301 struct stat64 statbuf
;
303 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
304 (void) no_memory(hdl
);
309 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
310 const char *propname
= nvpair_name(elem
);
313 * Make sure this property is valid and applies to this type.
315 if ((prop
= zpool_name_to_prop(propname
)) == ZPROP_INVAL
) {
316 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
317 "invalid property '%s'"), propname
);
318 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
322 if (zpool_prop_readonly(prop
)) {
323 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
324 "is readonly"), propname
);
325 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
329 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
330 &strval
, &intval
, errbuf
) != 0)
334 * Perform additional checking for specific properties.
337 case ZPOOL_PROP_VERSION
:
338 if (intval
< version
|| intval
> SPA_VERSION
) {
339 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
340 "property '%s' number %d is invalid."),
342 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
347 case ZPOOL_PROP_BOOTFS
:
348 if (create_or_import
) {
349 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
350 "property '%s' cannot be set at creation "
351 "or import time"), propname
);
352 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
356 if (version
< SPA_VERSION_BOOTFS
) {
357 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
358 "pool must be upgraded to support "
359 "'%s' property"), propname
);
360 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
365 * bootfs property value has to be a dataset name and
366 * the dataset has to be in the same pool as it sets to.
368 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
370 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
371 "is an invalid name"), strval
);
372 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
377 case ZPOOL_PROP_ALTROOT
:
378 if (!create_or_import
) {
379 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
380 "property '%s' can only be set during pool "
381 "creation or import"), propname
);
382 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
386 if (strval
[0] != '/') {
387 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
388 "bad alternate root '%s'"), strval
);
389 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
394 case ZPOOL_PROP_CACHEFILE
:
395 if (strval
[0] == '\0')
398 if (strcmp(strval
, "none") == 0)
401 if (strval
[0] != '/') {
402 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
403 "property '%s' must be empty, an "
404 "absolute path, or 'none'"), propname
);
405 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
409 slash
= strrchr(strval
, '/');
411 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
412 strcmp(slash
, "/..") == 0) {
413 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
414 "'%s' is not a valid file"), strval
);
415 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
421 if (strval
[0] != '\0' &&
422 (stat64(strval
, &statbuf
) != 0 ||
423 !S_ISDIR(statbuf
.st_mode
))) {
424 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
425 "'%s' is not a valid directory"),
427 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
438 nvlist_free(retprops
);
443 * Set zpool property : propname=propval.
446 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
448 zfs_cmd_t zc
= { 0 };
451 nvlist_t
*nvl
= NULL
;
455 (void) snprintf(errbuf
, sizeof (errbuf
),
456 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
459 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
))
460 return (zfs_error(zhp
->zpool_hdl
, EZFS_POOLPROPS
, errbuf
));
462 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
463 return (no_memory(zhp
->zpool_hdl
));
465 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
467 return (no_memory(zhp
->zpool_hdl
));
470 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
471 if ((realprops
= zpool_validate_properties(zhp
->zpool_hdl
,
472 zhp
->zpool_name
, nvl
, version
, B_FALSE
, errbuf
)) == NULL
) {
481 * Execute the corresponding ioctl() to set this property.
483 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
485 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
490 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
492 zcmd_free_nvlists(&zc
);
496 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
498 (void) zpool_props_refresh(zhp
);
504 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
506 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
508 char buf
[ZFS_MAXPROPLEN
];
510 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
513 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
518 if (entry
->pl_prop
!= ZPROP_INVAL
&&
519 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
521 if (strlen(buf
) > entry
->pl_width
)
522 entry
->pl_width
= strlen(buf
);
531 * Validate the given pool name, optionally putting an extended error message in
535 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
541 ret
= pool_namecheck(pool
, &why
, &what
);
544 * The rules for reserved pool names were extended at a later point.
545 * But we need to support users with existing pools that may now be
546 * invalid. So we only check for this expanded set of names during a
547 * create (or import), and only in userland.
549 if (ret
== 0 && !isopen
&&
550 (strncmp(pool
, "mirror", 6) == 0 ||
551 strncmp(pool
, "raidz", 5) == 0 ||
552 strncmp(pool
, "spare", 5) == 0 ||
553 strcmp(pool
, "log") == 0)) {
556 dgettext(TEXT_DOMAIN
, "name is reserved"));
564 case NAME_ERR_TOOLONG
:
566 dgettext(TEXT_DOMAIN
, "name is too long"));
569 case NAME_ERR_INVALCHAR
:
571 dgettext(TEXT_DOMAIN
, "invalid character "
572 "'%c' in pool name"), what
);
575 case NAME_ERR_NOLETTER
:
576 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
577 "name must begin with a letter"));
580 case NAME_ERR_RESERVED
:
581 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
582 "name is reserved"));
585 case NAME_ERR_DISKLIKE
:
586 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
587 "pool name is reserved"));
590 case NAME_ERR_LEADING_SLASH
:
591 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
592 "leading slash in name"));
595 case NAME_ERR_EMPTY_COMPONENT
:
596 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
597 "empty component in name"));
600 case NAME_ERR_TRAILING_SLASH
:
601 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
602 "trailing slash in name"));
605 case NAME_ERR_MULTIPLE_AT
:
606 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
607 "multiple '@' delimiters in name"));
619 * Open a handle to the given pool, even if the pool is currently in the FAULTED
623 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
629 * Make sure the pool name is valid.
631 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
632 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
633 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
638 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
641 zhp
->zpool_hdl
= hdl
;
642 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
644 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
650 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
651 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
652 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
661 * Like the above, but silent on error. Used when iterating over pools (because
662 * the configuration cache may be out of date).
665 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
670 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
673 zhp
->zpool_hdl
= hdl
;
674 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
676 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
692 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
696 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
700 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
703 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
704 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
705 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
714 * Close the handle. Simply frees the memory associated with the handle.
717 zpool_close(zpool_handle_t
*zhp
)
719 if (zhp
->zpool_config
)
720 nvlist_free(zhp
->zpool_config
);
721 if (zhp
->zpool_old_config
)
722 nvlist_free(zhp
->zpool_old_config
);
723 if (zhp
->zpool_props
)
724 nvlist_free(zhp
->zpool_props
);
729 * Return the name of the pool.
732 zpool_get_name(zpool_handle_t
*zhp
)
734 return (zhp
->zpool_name
);
739 * Return the state of the pool (ACTIVE or UNAVAILABLE)
742 zpool_get_state(zpool_handle_t
*zhp
)
744 return (zhp
->zpool_state
);
748 * Create the named pool, using the provided vdev list. It is assumed
749 * that the consumer has already validated the contents of the nvlist, so we
750 * don't have to worry about error semantics.
753 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
756 zfs_cmd_t zc
= { 0 };
760 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
761 "cannot create '%s'"), pool
);
763 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
764 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
766 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
769 if (props
&& (props
= zpool_validate_properties(hdl
, pool
, props
,
770 SPA_VERSION_1
, B_TRUE
, msg
)) == NULL
)
773 if (props
&& zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
778 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
780 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
) != 0) {
782 zcmd_free_nvlists(&zc
);
788 * This can happen if the user has specified the same
789 * device multiple times. We can't reliably detect this
790 * until we try to add it and see we already have a
793 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
794 "one or more vdevs refer to the same device"));
795 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
799 * This occurs when one of the devices is below
800 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
801 * device was the problem device since there's no
802 * reliable way to determine device size from userland.
807 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
809 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
810 "one or more devices is less than the "
811 "minimum size (%s)"), buf
);
813 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
816 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
817 "one or more devices is out of space"));
818 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
821 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
822 "cache device must be a disk or disk slice"));
823 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
826 return (zpool_standard_error(hdl
, errno
, msg
));
831 * If this is an alternate root pool, then we automatically set the
832 * mountpoint of the root dataset to be '/'.
834 if (nvlist_lookup_string(props
, zpool_prop_to_name(ZPOOL_PROP_ALTROOT
),
838 verify((zhp
= zfs_open(hdl
, pool
, ZFS_TYPE_DATASET
)) != NULL
);
839 verify(zfs_prop_set(zhp
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
),
845 zcmd_free_nvlists(&zc
);
851 * Destroy the given pool. It is up to the caller to ensure that there are no
852 * datasets left in the pool.
855 zpool_destroy(zpool_handle_t
*zhp
)
857 zfs_cmd_t zc
= { 0 };
858 zfs_handle_t
*zfp
= NULL
;
859 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
862 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
863 (zfp
= zfs_open(zhp
->zpool_hdl
, zhp
->zpool_name
,
864 ZFS_TYPE_FILESYSTEM
)) == NULL
)
867 if (zpool_remove_zvol_links(zhp
) != 0)
870 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
872 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
873 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
874 "cannot destroy '%s'"), zhp
->zpool_name
);
876 if (errno
== EROFS
) {
877 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
878 "one or more devices is read only"));
879 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
881 (void) zpool_standard_error(hdl
, errno
, msg
);
890 remove_mountpoint(zfp
);
898 * Add the given vdevs to the pool. The caller must have already performed the
899 * necessary verification to ensure that the vdev specification is well-formed.
902 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
904 zfs_cmd_t zc
= { 0 };
906 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
908 nvlist_t
**spares
, **l2cache
;
909 uint_t nspares
, nl2cache
;
911 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
912 "cannot add to '%s'"), zhp
->zpool_name
);
914 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
915 SPA_VERSION_SPARES
&&
916 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
917 &spares
, &nspares
) == 0) {
918 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
919 "upgraded to add hot spares"));
920 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
923 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
924 SPA_VERSION_L2CACHE
&&
925 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
926 &l2cache
, &nl2cache
) == 0) {
927 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
928 "upgraded to add cache devices"));
929 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
932 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
934 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
936 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
940 * This can happen if the user has specified the same
941 * device multiple times. We can't reliably detect this
942 * until we try to add it and see we already have a
945 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
946 "one or more vdevs refer to the same device"));
947 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
952 * This occurrs when one of the devices is below
953 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
954 * device was the problem device since there's no
955 * reliable way to determine device size from userland.
960 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
962 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
963 "device is less than the minimum "
966 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
970 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
971 "pool must be upgraded to add these vdevs"));
972 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
976 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
977 "root pool can not have multiple vdevs"
978 " or separate logs"));
979 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
983 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
984 "cache device must be a disk or disk slice"));
985 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
989 (void) zpool_standard_error(hdl
, errno
, msg
);
997 zcmd_free_nvlists(&zc
);
1003 * Exports the pool from the system. The caller must ensure that there are no
1004 * mounted datasets in the pool.
1007 zpool_export(zpool_handle_t
*zhp
)
1009 zfs_cmd_t zc
= { 0 };
1011 if (zpool_remove_zvol_links(zhp
) != 0)
1014 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1016 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0)
1017 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1018 dgettext(TEXT_DOMAIN
, "cannot export '%s'"),
1024 * zpool_import() is a contracted interface. Should be kept the same
1027 * Applications should use zpool_import_props() to import a pool with
1028 * new properties value to be set.
1031 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1034 nvlist_t
*props
= NULL
;
1037 if (altroot
!= NULL
) {
1038 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1039 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1040 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1044 if (nvlist_add_string(props
,
1045 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0) {
1047 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1048 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1053 ret
= zpool_import_props(hdl
, config
, newname
, props
);
1060 * Import the given pool using the known configuration and a list of
1061 * properties to be set. The configuration should have come from
1062 * zpool_find_import(). The 'newname' parameters control whether the pool
1063 * is imported with a different name.
1066 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1069 zfs_cmd_t zc
= { 0 };
1075 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1078 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1079 "cannot import pool '%s'"), origname
);
1081 if (newname
!= NULL
) {
1082 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1083 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1084 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1086 thename
= (char *)newname
;
1094 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1097 if ((props
= zpool_validate_properties(hdl
, origname
,
1098 props
, version
, B_TRUE
, errbuf
)) == NULL
) {
1100 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1106 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1108 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1111 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1117 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
) != 0) {
1119 if (newname
== NULL
)
1120 (void) snprintf(desc
, sizeof (desc
),
1121 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1124 (void) snprintf(desc
, sizeof (desc
),
1125 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1131 * Unsupported version.
1133 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1137 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1141 (void) zpool_standard_error(hdl
, errno
, desc
);
1146 zpool_handle_t
*zhp
;
1149 * This should never fail, but play it safe anyway.
1151 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0) {
1153 } else if (zhp
!= NULL
) {
1154 ret
= zpool_create_zvol_links(zhp
);
1160 zcmd_free_nvlists(&zc
);
1170 zpool_scrub(zpool_handle_t
*zhp
, pool_scrub_type_t type
)
1172 zfs_cmd_t zc
= { 0 };
1174 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1176 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1177 zc
.zc_cookie
= type
;
1179 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SCRUB
, &zc
) == 0)
1182 (void) snprintf(msg
, sizeof (msg
),
1183 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1186 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1188 return (zpool_standard_error(hdl
, errno
, msg
));
1192 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1193 * spare; but FALSE if its an INUSE spare.
1196 vdev_to_nvlist_iter(nvlist_t
*nv
, const char *search
, uint64_t guid
,
1197 boolean_t
*avail_spare
, boolean_t
*l2cache
)
1201 uint64_t theguid
, present
;
1203 uint64_t wholedisk
= 0;
1206 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &theguid
) == 0);
1208 if (search
== NULL
&&
1209 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &present
) == 0) {
1211 * If the device has never been present since import, the only
1212 * reliable way to match the vdev is by GUID.
1214 if (theguid
== guid
)
1216 } else if (search
!= NULL
&&
1217 nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
1218 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
1222 * For whole disks, the internal path has 's0', but the
1223 * path passed in by the user doesn't.
1225 if (strlen(search
) == strlen(path
) - 2 &&
1226 strncmp(search
, path
, strlen(search
)) == 0)
1228 } else if (strcmp(search
, path
) == 0) {
1233 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1234 &child
, &children
) != 0)
1237 for (c
= 0; c
< children
; c
++)
1238 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1239 avail_spare
, l2cache
)) != NULL
)
1242 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
1243 &child
, &children
) == 0) {
1244 for (c
= 0; c
< children
; c
++) {
1245 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1246 avail_spare
, l2cache
)) != NULL
) {
1247 *avail_spare
= B_TRUE
;
1253 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
1254 &child
, &children
) == 0) {
1255 for (c
= 0; c
< children
; c
++) {
1256 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1257 avail_spare
, l2cache
)) != NULL
) {
1268 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
1271 char buf
[MAXPATHLEN
];
1277 guid
= strtoull(path
, &end
, 10);
1278 if (guid
!= 0 && *end
== '\0') {
1280 } else if (path
[0] != '/') {
1281 (void) snprintf(buf
, sizeof (buf
), "%s%s", "/dev/dsk/", path
);
1287 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1290 *avail_spare
= B_FALSE
;
1292 return (vdev_to_nvlist_iter(nvroot
, search
, guid
, avail_spare
,
1297 * Returns TRUE if the given guid corresponds to the given type.
1298 * This is used to check for hot spares (INUSE or not), and level 2 cache
1302 is_guid_type(zpool_handle_t
*zhp
, uint64_t guid
, const char *type
)
1304 uint64_t target_guid
;
1310 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1312 if (nvlist_lookup_nvlist_array(nvroot
, type
, &list
, &count
) == 0) {
1313 for (i
= 0; i
< count
; i
++) {
1314 verify(nvlist_lookup_uint64(list
[i
], ZPOOL_CONFIG_GUID
,
1315 &target_guid
) == 0);
1316 if (guid
== target_guid
)
1325 * Bring the specified vdev online. The 'flags' parameter is a set of the
1326 * ZFS_ONLINE_* flags.
1329 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
1330 vdev_state_t
*newstate
)
1332 zfs_cmd_t zc
= { 0 };
1335 boolean_t avail_spare
, l2cache
;
1336 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1338 (void) snprintf(msg
, sizeof (msg
),
1339 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
1341 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1342 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
)) == NULL
)
1343 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1345 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1348 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_SPARES
) == B_TRUE
)
1349 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1352 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_L2CACHE
) == B_TRUE
)
1353 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1355 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
1359 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0)
1360 return (zpool_standard_error(hdl
, errno
, msg
));
1362 *newstate
= zc
.zc_cookie
;
1367 * Take the specified vdev offline
1370 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
1372 zfs_cmd_t zc
= { 0 };
1375 boolean_t avail_spare
, l2cache
;
1376 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1378 (void) snprintf(msg
, sizeof (msg
),
1379 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
1381 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1382 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
)) == NULL
)
1383 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1385 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1388 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_SPARES
) == B_TRUE
)
1389 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1392 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_L2CACHE
) == B_TRUE
)
1393 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1395 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
1396 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
1398 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1405 * There are no other replicas of this device.
1407 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
1410 return (zpool_standard_error(hdl
, errno
, msg
));
1415 * Mark the given vdev faulted.
1418 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
)
1420 zfs_cmd_t zc
= { 0 };
1422 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1424 (void) snprintf(msg
, sizeof (msg
),
1425 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), guid
);
1427 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1429 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
1431 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1438 * There are no other replicas of this device.
1440 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
1443 return (zpool_standard_error(hdl
, errno
, msg
));
1449 * Mark the given vdev degraded.
1452 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
)
1454 zfs_cmd_t zc
= { 0 };
1456 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1458 (void) snprintf(msg
, sizeof (msg
),
1459 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), guid
);
1461 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1463 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
1465 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1468 return (zpool_standard_error(hdl
, errno
, msg
));
1472 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1476 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
1482 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
1484 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
1487 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
1488 children
== 2 && child
[which
] == tgt
)
1491 for (c
= 0; c
< children
; c
++)
1492 if (is_replacing_spare(child
[c
], tgt
, which
))
1500 * Attach new_disk (fully described by nvroot) to old_disk.
1501 * If 'replacing' is specified, the new disk will replace the old one.
1504 zpool_vdev_attach(zpool_handle_t
*zhp
,
1505 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
1507 zfs_cmd_t zc
= { 0 };
1511 boolean_t avail_spare
, l2cache
;
1512 uint64_t val
, is_log
;
1516 nvlist_t
*config_root
;
1517 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1520 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1521 "cannot replace %s with %s"), old_disk
, new_disk
);
1523 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1524 "cannot attach %s to %s"), new_disk
, old_disk
);
1526 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1527 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
)) == 0)
1528 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1531 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1534 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1536 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1537 zc
.zc_cookie
= replacing
;
1539 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
1540 &child
, &children
) != 0 || children
!= 1) {
1541 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1542 "new device must be a single disk"));
1543 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
1546 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
1547 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
1550 * If the target is a hot spare that has been swapped in, we can only
1551 * replace it with another hot spare.
1554 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
1555 nvlist_lookup_string(child
[0], ZPOOL_CONFIG_PATH
, &path
) == 0 &&
1556 (zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
) == NULL
||
1557 !avail_spare
) && is_replacing_spare(config_root
, tgt
, 1)) {
1558 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1559 "can only be replaced by another hot spare"));
1560 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
1564 * If we are attempting to replace a spare, it canot be applied to an
1565 * already spared device.
1568 nvlist_lookup_string(child
[0], ZPOOL_CONFIG_PATH
, &path
) == 0 &&
1569 zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
) != NULL
&&
1570 avail_spare
&& is_replacing_spare(config_root
, tgt
, 0)) {
1571 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1572 "device has already been replaced with a spare"));
1573 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
1576 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1579 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
1581 zcmd_free_nvlists(&zc
);
1589 * Can't attach to or replace this type of vdev.
1593 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_LOG
,
1596 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1597 "cannot replace a log with a spare"));
1599 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1600 "cannot replace a replacing device"));
1602 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1603 "can only attach to mirrors and top-level "
1606 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
1611 * The new device must be a single disk.
1613 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1614 "new device must be a single disk"));
1615 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
1619 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
1621 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1626 * The new device is too small.
1628 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1629 "device is too small"));
1630 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1635 * The new device has a different alignment requirement.
1637 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1638 "devices have different sector alignment"));
1639 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1644 * The resulting top-level vdev spec won't fit in the label.
1646 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
1650 (void) zpool_standard_error(hdl
, errno
, msg
);
1657 * Detach the specified device.
1660 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
1662 zfs_cmd_t zc
= { 0 };
1665 boolean_t avail_spare
, l2cache
;
1666 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1668 (void) snprintf(msg
, sizeof (msg
),
1669 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
1671 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1672 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
)) == 0)
1673 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1676 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1679 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1681 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1683 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
1690 * Can't detach from this type of vdev.
1692 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
1693 "applicable to mirror and replacing vdevs"));
1694 (void) zfs_error(zhp
->zpool_hdl
, EZFS_BADTARGET
, msg
);
1699 * There are no other replicas of this device.
1701 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
1705 (void) zpool_standard_error(hdl
, errno
, msg
);
1712 * Remove the given device. Currently, this is supported only for hot spares
1713 * and level 2 cache devices.
1716 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
1718 zfs_cmd_t zc
= { 0 };
1721 boolean_t avail_spare
, l2cache
;
1722 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1724 (void) snprintf(msg
, sizeof (msg
),
1725 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
1727 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1728 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
)) == 0)
1729 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1731 if (!avail_spare
&& !l2cache
) {
1732 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1733 "only inactive hot spares or cache devices "
1735 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1738 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1740 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
1743 return (zpool_standard_error(hdl
, errno
, msg
));
1747 * Clear the errors for the pool, or the particular device if specified.
1750 zpool_clear(zpool_handle_t
*zhp
, const char *path
)
1752 zfs_cmd_t zc
= { 0 };
1755 boolean_t avail_spare
, l2cache
;
1756 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1759 (void) snprintf(msg
, sizeof (msg
),
1760 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
1763 (void) snprintf(msg
, sizeof (msg
),
1764 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
1767 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1769 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
1771 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1774 * Don't allow error clearing for hot spares. Do allow
1775 * error clearing for l2cache devices.
1778 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1780 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
1784 if (zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
) == 0)
1787 return (zpool_standard_error(hdl
, errno
, msg
));
1791 * Similar to zpool_clear(), but takes a GUID (used by fmd).
1794 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
1796 zfs_cmd_t zc
= { 0 };
1798 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1800 (void) snprintf(msg
, sizeof (msg
),
1801 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
1804 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1807 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
1810 return (zpool_standard_error(hdl
, errno
, msg
));
1814 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1818 zpool_iter_zvol(zpool_handle_t
*zhp
, int (*cb
)(const char *, void *),
1821 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1822 char (*paths
)[MAXPATHLEN
];
1824 int curr
, fd
, base
, ret
= 0;
1829 if ((base
= open("/dev/zvol/dsk", O_RDONLY
)) < 0)
1830 return (errno
== ENOENT
? 0 : -1);
1832 if (fstatat(base
, zhp
->zpool_name
, &st
, 0) != 0) {
1835 return (err
== ENOENT
? 0 : -1);
1839 * Oddly this wasn't a directory -- ignore that failure since we
1840 * know there are no links lower in the (non-existant) hierarchy.
1842 if (!S_ISDIR(st
.st_mode
)) {
1847 if ((paths
= zfs_alloc(hdl
, size
* sizeof (paths
[0]))) == NULL
) {
1852 (void) strlcpy(paths
[0], zhp
->zpool_name
, sizeof (paths
[0]));
1856 if (fstatat(base
, paths
[curr
], &st
, AT_SYMLINK_NOFOLLOW
) != 0)
1859 if (S_ISDIR(st
.st_mode
)) {
1860 if ((fd
= openat(base
, paths
[curr
], O_RDONLY
)) < 0)
1863 if ((dirp
= fdopendir(fd
)) == NULL
) {
1868 while ((dp
= readdir(dirp
)) != NULL
) {
1869 if (dp
->d_name
[0] == '.')
1872 if (curr
+ 1 == size
) {
1873 paths
= zfs_realloc(hdl
, paths
,
1874 size
* sizeof (paths
[0]),
1875 size
* 2 * sizeof (paths
[0]));
1876 if (paths
== NULL
) {
1877 (void) closedir(dirp
);
1885 (void) strlcpy(paths
[curr
+ 1], paths
[curr
],
1886 sizeof (paths
[curr
+ 1]));
1887 (void) strlcat(paths
[curr
], "/",
1888 sizeof (paths
[curr
]));
1889 (void) strlcat(paths
[curr
], dp
->d_name
,
1890 sizeof (paths
[curr
]));
1894 (void) closedir(dirp
);
1897 if ((ret
= cb(paths
[curr
], data
)) != 0)
1915 typedef struct zvol_cb
{
1916 zpool_handle_t
*zcb_pool
;
1917 boolean_t zcb_create
;
1922 do_zvol_create(zfs_handle_t
*zhp
, void *data
)
1926 if (ZFS_IS_VOLUME(zhp
)) {
1927 (void) zvol_create_link(zhp
->zfs_hdl
, zhp
->zfs_name
);
1928 ret
= zfs_iter_snapshots(zhp
, do_zvol_create
, NULL
);
1932 ret
= zfs_iter_filesystems(zhp
, do_zvol_create
, NULL
);
1940 * Iterate over all zvols in the pool and make any necessary minor nodes.
1943 zpool_create_zvol_links(zpool_handle_t
*zhp
)
1949 * If the pool is unavailable, just return success.
1951 if ((zfp
= make_dataset_handle(zhp
->zpool_hdl
,
1952 zhp
->zpool_name
)) == NULL
)
1955 ret
= zfs_iter_filesystems(zfp
, do_zvol_create
, NULL
);
1962 do_zvol_remove(const char *dataset
, void *data
)
1964 zpool_handle_t
*zhp
= data
;
1966 return (zvol_remove_link(zhp
->zpool_hdl
, dataset
));
1970 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
1971 * by examining the /dev links so that a corrupted pool doesn't impede this
1975 zpool_remove_zvol_links(zpool_handle_t
*zhp
)
1977 return (zpool_iter_zvol(zhp
, do_zvol_remove
, zhp
));
1981 * Convert from a devid string to a path.
1984 devid_to_path(char *devid_str
)
1989 devid_nmlist_t
*list
= NULL
;
1992 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
1995 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
1997 devid_str_free(minor
);
2003 if ((path
= strdup(list
[0].devname
)) == NULL
)
2006 devid_free_nmlist(list
);
2012 * Convert from a path to a devid string.
2015 path_to_devid(const char *path
)
2021 if ((fd
= open(path
, O_RDONLY
)) < 0)
2026 if (devid_get(fd
, &devid
) == 0) {
2027 if (devid_get_minor_name(fd
, &minor
) == 0)
2028 ret
= devid_str_encode(devid
, minor
);
2030 devid_str_free(minor
);
2039 * Issue the necessary ioctl() to update the stored path value for the vdev. We
2040 * ignore any failure here, since a common case is for an unprivileged user to
2041 * type 'zpool status', and we'll display the correct information anyway.
2044 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
2046 zfs_cmd_t zc
= { 0 };
2048 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2049 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
2050 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2053 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
2057 * Given a vdev, return the name to display in iostat. If the vdev has a path,
2058 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2059 * We also check if this is a whole disk, in which case we strip off the
2060 * trailing 's0' slice name.
2062 * This routine is also responsible for identifying when disks have been
2063 * reconfigured in a new location. The kernel will have opened the device by
2064 * devid, but the path will still refer to the old location. To catch this, we
2065 * first do a path -> devid translation (which is fast for the common case). If
2066 * the devid matches, we're done. If not, we do a reverse devid -> path
2067 * translation and issue the appropriate ioctl() to update the path of the vdev.
2068 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2072 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
)
2080 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
2082 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2084 (void) snprintf(buf
, sizeof (buf
), "%llu",
2085 (u_longlong_t
)value
);
2087 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
2090 * If the device is dead (faulted, offline, etc) then don't
2091 * bother opening it. Otherwise we may be forcing the user to
2092 * open a misbehaving device, which can have undesirable
2095 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_STATS
,
2096 (uint64_t **)&vs
, &vsc
) != 0 ||
2097 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
2099 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
2101 * Determine if the current path is correct.
2103 char *newdevid
= path_to_devid(path
);
2105 if (newdevid
== NULL
||
2106 strcmp(devid
, newdevid
) != 0) {
2109 if ((newpath
= devid_to_path(devid
)) != NULL
) {
2111 * Update the path appropriately.
2113 set_path(zhp
, nv
, newpath
);
2114 if (nvlist_add_string(nv
,
2115 ZPOOL_CONFIG_PATH
, newpath
) == 0)
2116 verify(nvlist_lookup_string(nv
,
2124 devid_str_free(newdevid
);
2127 if (strncmp(path
, "/dev/dsk/", 9) == 0)
2130 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2131 &value
) == 0 && value
) {
2132 char *tmp
= zfs_strdup(hdl
, path
);
2135 tmp
[strlen(path
) - 2] = '\0';
2139 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
2142 * If it's a raidz device, we need to stick in the parity level.
2144 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
2145 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
2147 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
2148 (u_longlong_t
)value
);
2153 return (zfs_strdup(hdl
, path
));
2157 zbookmark_compare(const void *a
, const void *b
)
2159 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
2163 * Retrieve the persistent error log, uniquify the members, and return to the
2167 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
2169 zfs_cmd_t zc
= { 0 };
2171 zbookmark_t
*zb
= NULL
;
2175 * Retrieve the raw error list from the kernel. If the number of errors
2176 * has increased, allocate more space and continue until we get the
2179 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
2183 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
2184 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
2186 zc
.zc_nvlist_dst_size
= count
;
2187 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
2189 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
2191 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2192 if (errno
== ENOMEM
) {
2193 count
= zc
.zc_nvlist_dst_size
;
2194 if ((zc
.zc_nvlist_dst
= (uintptr_t)
2195 zfs_alloc(zhp
->zpool_hdl
, count
*
2196 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
2207 * Sort the resulting bookmarks. This is a little confusing due to the
2208 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
2209 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2210 * _not_ copied as part of the process. So we point the start of our
2211 * array appropriate and decrement the total number of elements.
2213 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
2214 zc
.zc_nvlist_dst_size
;
2215 count
-= zc
.zc_nvlist_dst_size
;
2217 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
2219 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
2222 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2224 for (i
= 0; i
< count
; i
++) {
2227 /* ignoring zb_blkid and zb_level for now */
2228 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
2229 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
2232 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
2234 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
2235 zb
[i
].zb_objset
) != 0) {
2239 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
2240 zb
[i
].zb_object
) != 0) {
2244 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
2251 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2255 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2256 return (no_memory(zhp
->zpool_hdl
));
2260 * Upgrade a ZFS pool to the latest on-disk version.
2263 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
2265 zfs_cmd_t zc
= { 0 };
2266 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2268 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
2269 zc
.zc_cookie
= new_version
;
2271 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
2272 return (zpool_standard_error_fmt(hdl
, errno
,
2273 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
2279 zpool_set_history_str(const char *subcommand
, int argc
, char **argv
,
2284 (void) strlcpy(history_str
, subcommand
, HIS_MAX_RECORD_LEN
);
2285 for (i
= 1; i
< argc
; i
++) {
2286 if (strlen(history_str
) + 1 + strlen(argv
[i
]) >
2289 (void) strlcat(history_str
, " ", HIS_MAX_RECORD_LEN
);
2290 (void) strlcat(history_str
, argv
[i
], HIS_MAX_RECORD_LEN
);
2295 * Stage command history for logging.
2298 zpool_stage_history(libzfs_handle_t
*hdl
, const char *history_str
)
2300 if (history_str
== NULL
)
2303 if (strlen(history_str
) > HIS_MAX_RECORD_LEN
)
2306 if (hdl
->libzfs_log_str
!= NULL
)
2307 free(hdl
->libzfs_log_str
);
2309 if ((hdl
->libzfs_log_str
= strdup(history_str
)) == NULL
)
2310 return (no_memory(hdl
));
2316 * Perform ioctl to get some command history of a pool.
2318 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2319 * logical offset of the history buffer to start reading from.
2321 * Upon return, 'off' is the next logical offset to read from and
2322 * 'len' is the actual amount of bytes read into 'buf'.
2325 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
2327 zfs_cmd_t zc
= { 0 };
2328 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2330 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2332 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
2333 zc
.zc_history_len
= *len
;
2334 zc
.zc_history_offset
= *off
;
2336 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
2339 return (zfs_error_fmt(hdl
, EZFS_PERM
,
2340 dgettext(TEXT_DOMAIN
,
2341 "cannot show history for pool '%s'"),
2344 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
2345 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
2346 "'%s'"), zhp
->zpool_name
));
2348 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
2349 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
2350 "'%s', pool must be upgraded"), zhp
->zpool_name
));
2352 return (zpool_standard_error_fmt(hdl
, errno
,
2353 dgettext(TEXT_DOMAIN
,
2354 "cannot get history for '%s'"), zhp
->zpool_name
));
2358 *len
= zc
.zc_history_len
;
2359 *off
= zc
.zc_history_offset
;
2365 * Process the buffer of nvlists, unpacking and storing each nvlist record
2366 * into 'records'. 'leftover' is set to the number of bytes that weren't
2367 * processed as there wasn't a complete record.
2370 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
2371 nvlist_t
***records
, uint_t
*numrecords
)
2377 while (bytes_read
> sizeof (reclen
)) {
2379 /* get length of packed record (stored as little endian) */
2380 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
2381 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
2383 if (bytes_read
< sizeof (reclen
) + reclen
)
2387 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
2389 bytes_read
-= sizeof (reclen
) + reclen
;
2390 buf
+= sizeof (reclen
) + reclen
;
2392 /* add record to nvlist array */
2394 if (ISP2(*numrecords
+ 1)) {
2395 *records
= realloc(*records
,
2396 *numrecords
* 2 * sizeof (nvlist_t
*));
2398 (*records
)[*numrecords
- 1] = nv
;
2401 *leftover
= bytes_read
;
2405 #define HIS_BUF_LEN (128*1024)
2408 * Retrieve the command history of a pool.
2411 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
2413 char buf
[HIS_BUF_LEN
];
2415 nvlist_t
**records
= NULL
;
2416 uint_t numrecords
= 0;
2420 uint64_t bytes_read
= sizeof (buf
);
2423 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
2426 /* if nothing else was read in, we're at EOF, just return */
2430 if ((err
= zpool_history_unpack(buf
, bytes_read
,
2431 &leftover
, &records
, &numrecords
)) != 0)
2439 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
2440 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
2441 records
, numrecords
) == 0);
2443 for (i
= 0; i
< numrecords
; i
++)
2444 nvlist_free(records
[i
]);
2451 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
2452 char *pathname
, size_t len
)
2454 zfs_cmd_t zc
= { 0 };
2455 boolean_t mounted
= B_FALSE
;
2456 char *mntpnt
= NULL
;
2457 char dsname
[MAXNAMELEN
];
2460 /* special case for the MOS */
2461 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", obj
);
2465 /* get the dataset's name */
2466 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2468 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
2469 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
2470 /* just write out a path of two object numbers */
2471 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
2475 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
2477 /* find out if the dataset is mounted */
2478 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
2480 /* get the corrupted object's path */
2481 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
2483 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
2486 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
2489 (void) snprintf(pathname
, len
, "%s:%s",
2490 dsname
, zc
.zc_value
);
2493 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, obj
);
2498 #define RDISK_ROOT "/dev/rdsk"
2499 #define BACKUP_SLICE "s2"
2501 * Don't start the slice at the default block of 34; many storage
2502 * devices will use a stripe width of 128k, so start there instead.
2504 #define NEW_START_BLOCK 256
2507 * determine where a partition starts on a disk in the current
2511 find_start_block(nvlist_t
*config
)
2516 diskaddr_t sb
= MAXOFFSET_T
;
2518 char diskname
[MAXPATHLEN
];
2521 if (nvlist_lookup_nvlist_array(config
,
2522 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
2523 if (nvlist_lookup_uint64(config
,
2524 ZPOOL_CONFIG_WHOLE_DISK
,
2525 &wholedisk
) != 0 || !wholedisk
) {
2526 return (MAXOFFSET_T
);
2528 if (nvlist_lookup_string(config
,
2529 ZPOOL_CONFIG_PATH
, &path
) != 0) {
2530 return (MAXOFFSET_T
);
2533 (void) snprintf(diskname
, sizeof (diskname
), "%s%s",
2534 RDISK_ROOT
, strrchr(path
, '/'));
2535 if ((fd
= open(diskname
, O_RDONLY
|O_NDELAY
)) >= 0) {
2536 struct dk_gpt
*vtoc
;
2537 if (efi_alloc_and_read(fd
, &vtoc
) >= 0) {
2538 sb
= vtoc
->efi_parts
[0].p_start
;
2546 for (c
= 0; c
< children
; c
++) {
2547 sb
= find_start_block(child
[c
]);
2548 if (sb
!= MAXOFFSET_T
) {
2552 return (MAXOFFSET_T
);
2556 * Label an individual disk. The name provided is the short name,
2557 * stripped of any leading /dev path.
2560 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
2562 char path
[MAXPATHLEN
];
2563 struct dk_gpt
*vtoc
;
2565 size_t resv
= EFI_MIN_RESV_SIZE
;
2566 uint64_t slice_size
;
2567 diskaddr_t start_block
;
2570 /* prepare an error message just in case */
2571 (void) snprintf(errbuf
, sizeof (errbuf
),
2572 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
2577 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
2578 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
2580 if (zhp
->zpool_start_block
== 0)
2581 start_block
= find_start_block(nvroot
);
2583 start_block
= zhp
->zpool_start_block
;
2584 zhp
->zpool_start_block
= start_block
;
2587 start_block
= NEW_START_BLOCK
;
2590 (void) snprintf(path
, sizeof (path
), "%s/%s%s", RDISK_ROOT
, name
,
2593 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
2595 * This shouldn't happen. We've long since verified that this
2596 * is a valid device.
2599 dgettext(TEXT_DOMAIN
, "unable to open device"));
2600 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
2603 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
2605 * The only way this can fail is if we run out of memory, or we
2606 * were unable to read the disk's capacity
2608 if (errno
== ENOMEM
)
2609 (void) no_memory(hdl
);
2612 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2613 "unable to read disk capacity"), name
);
2615 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
2618 slice_size
= vtoc
->efi_last_u_lba
+ 1;
2619 slice_size
-= EFI_MIN_RESV_SIZE
;
2620 if (start_block
== MAXOFFSET_T
)
2621 start_block
= NEW_START_BLOCK
;
2622 slice_size
-= start_block
;
2624 vtoc
->efi_parts
[0].p_start
= start_block
;
2625 vtoc
->efi_parts
[0].p_size
= slice_size
;
2628 * Why we use V_USR: V_BACKUP confuses users, and is considered
2629 * disposable by some EFI utilities (since EFI doesn't have a backup
2630 * slice). V_UNASSIGNED is supposed to be used only for zero size
2631 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
2632 * etc. were all pretty specific. V_USR is as close to reality as we
2633 * can get, in the absence of V_OTHER.
2635 vtoc
->efi_parts
[0].p_tag
= V_USR
;
2636 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
2638 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
2639 vtoc
->efi_parts
[8].p_size
= resv
;
2640 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
2642 if (efi_write(fd
, vtoc
) != 0) {
2644 * Some block drivers (like pcata) may not support EFI
2645 * GPT labels. Print out a helpful error message dir-
2646 * ecting the user to manually label the disk and give
2652 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2653 "try using fdisk(1M) and then provide a specific slice"));
2654 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
2663 supported_dump_vdev_type(libzfs_handle_t
*hdl
, nvlist_t
*config
, char *errbuf
)
2669 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
2670 if (strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
2671 strcmp(type
, VDEV_TYPE_FILE
) == 0 ||
2672 strcmp(type
, VDEV_TYPE_LOG
) == 0 ||
2673 strcmp(type
, VDEV_TYPE_MISSING
) == 0) {
2674 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2675 "vdev type '%s' is not supported"), type
);
2676 (void) zfs_error(hdl
, EZFS_VDEVNOTSUP
, errbuf
);
2679 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
2680 &child
, &children
) == 0) {
2681 for (c
= 0; c
< children
; c
++) {
2682 if (!supported_dump_vdev_type(hdl
, child
[c
], errbuf
))
2690 * check if this zvol is allowable for use as a dump device; zero if
2691 * it is, > 0 if it isn't, < 0 if it isn't a zvol
2694 zvol_check_dump_config(char *arg
)
2696 zpool_handle_t
*zhp
= NULL
;
2697 nvlist_t
*config
, *nvroot
;
2701 libzfs_handle_t
*hdl
;
2703 char poolname
[ZPOOL_MAXNAMELEN
];
2704 int pathlen
= strlen(ZVOL_FULL_DEV_DIR
);
2707 if (strncmp(arg
, ZVOL_FULL_DEV_DIR
, pathlen
)) {
2711 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
2712 "dump is not supported on device '%s'"), arg
);
2714 if ((hdl
= libzfs_init()) == NULL
)
2716 libzfs_print_on_error(hdl
, B_TRUE
);
2718 volname
= arg
+ pathlen
;
2720 /* check the configuration of the pool */
2721 if ((p
= strchr(volname
, '/')) == NULL
) {
2722 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2723 "malformed dataset name"));
2724 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
2726 } else if (p
- volname
>= ZFS_MAXNAMELEN
) {
2727 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2728 "dataset name is too long"));
2729 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, errbuf
);
2732 (void) strncpy(poolname
, volname
, p
- volname
);
2733 poolname
[p
- volname
] = '\0';
2736 if ((zhp
= zpool_open(hdl
, poolname
)) == NULL
) {
2737 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2738 "could not open pool '%s'"), poolname
);
2739 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
2742 config
= zpool_get_config(zhp
, NULL
);
2743 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2745 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2746 "could not obtain vdev configuration for '%s'"), poolname
);
2747 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, errbuf
);
2751 verify(nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2752 &top
, &toplevels
) == 0);
2753 if (toplevels
!= 1) {
2754 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2755 "'%s' has multiple top level vdevs"), poolname
);
2756 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, errbuf
);
2760 if (!supported_dump_vdev_type(hdl
, top
[0], errbuf
)) {