4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
28 * Pool import support functions.
30 * To import a pool, we rely on reading the configuration information from the
31 * ZFS label of each device. If we successfully read the label, then we
32 * organize the configuration information in the following hierarchy:
34 * pool guid -> toplevel vdev guid -> label txg
36 * Duplicate entries matching this same tuple will be discarded. Once we have
37 * examined every device, we pick the best label txg config for each toplevel
38 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
39 * update any paths that have changed. Finally, we attempt to import the pool
40 * using our derived config, and record the results.
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
58 #include <sys/vdev_impl.h>
60 #include <blkid/blkid.h>
64 #include "libzfs_impl.h"
67 * Intermediate structures used to gather configuration information.
69 typedef struct config_entry
{
72 struct config_entry
*ce_next
;
75 typedef struct vdev_entry
{
77 config_entry_t
*ve_configs
;
78 struct vdev_entry
*ve_next
;
81 typedef struct pool_entry
{
83 vdev_entry_t
*pe_vdevs
;
84 struct pool_entry
*pe_next
;
87 typedef struct name_entry
{
91 uint64_t ne_num_labels
;
92 struct name_entry
*ne_next
;
95 typedef struct pool_list
{
100 #define DEV_BYID_PATH "/dev/disk/by-id/"
103 get_devid(const char *path
)
109 if ((fd
= open(path
, O_RDONLY
)) < 0)
114 if (devid_get(fd
, &devid
) == 0) {
115 if (devid_get_minor_name(fd
, &minor
) == 0)
116 ret
= devid_str_encode(devid
, minor
);
118 devid_str_free(minor
);
127 * Wait up to timeout_ms for udev to set up the device node. The device is
128 * considered ready when the provided path have been verified to exist and
129 * it has been allowed to settle. At this point the device the device can
130 * be accessed reliably. Depending on the complexity of the udev rules thisi
131 * process could take several seconds.
134 zpool_label_disk_wait(char *path
, int timeout_ms
)
138 hrtime_t start
, settle
;
139 struct stat64 statbuf
;
146 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0)) {
148 settle
= gethrtime();
149 else if (NSEC2MSEC(gethrtime() - settle
) >= settle_ms
)
151 } else if (errno
!= ENOENT
) {
155 usleep(sleep_ms
* MILLISEC
);
156 } while (NSEC2MSEC(gethrtime() - start
) < timeout_ms
);
162 * Go through and fix up any path and/or devid information for the given vdev
166 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
171 name_entry_t
*ne
, *best
;
174 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
175 &child
, &children
) == 0) {
176 for (c
= 0; c
< children
; c
++)
177 if (fix_paths(child
[c
], names
) != 0)
183 * This is a leaf (file or disk) vdev. In either case, go through
184 * the name list and see if we find a matching guid. If so, replace
185 * the path and see if we can calculate a new devid.
187 * There may be multiple names associated with a particular guid, in
188 * which case we have overlapping partitions or multiple paths to the
189 * same disk. In this case we prefer to use the path name which
190 * matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
191 * use the lowest order device which corresponds to the first match
192 * while traversing the ZPOOL_IMPORT_PATH search path.
194 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
195 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
199 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
200 if (ne
->ne_guid
== guid
) {
206 if ((strlen(path
) == strlen(ne
->ne_name
)) &&
207 strncmp(path
, ne
->ne_name
, strlen(path
)) == 0) {
217 /* Prefer paths with move vdev labels. */
218 if (ne
->ne_num_labels
> best
->ne_num_labels
) {
223 /* Prefer paths earlier in the search order. */
224 if (best
->ne_num_labels
== best
->ne_num_labels
&&
225 ne
->ne_order
< best
->ne_order
) {
235 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
238 if ((devid
= get_devid(best
->ne_name
)) == NULL
) {
239 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
241 if (nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, devid
) != 0)
243 devid_str_free(devid
);
250 * Add the given configuration to the list of known devices.
253 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
254 int order
, int num_labels
, nvlist_t
*config
)
256 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
263 * If this is a hot spare not currently in use or level 2 cache
264 * device, add it to the list of names to translate, but don't do
267 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
269 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
270 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
271 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
274 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
278 ne
->ne_guid
= vdev_guid
;
279 ne
->ne_order
= order
;
280 ne
->ne_num_labels
= num_labels
;
281 ne
->ne_next
= pl
->names
;
287 * If we have a valid config but cannot read any of these fields, then
288 * it means we have a half-initialized label. In vdev_label_init()
289 * we write a label with txg == 0 so that we can identify the device
290 * in case the user refers to the same disk later on. If we fail to
291 * create the pool, we'll be left with a label in this state
292 * which should not be considered part of a valid pool.
294 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
296 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
298 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
300 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
301 &txg
) != 0 || txg
== 0) {
307 * First, see if we know about this pool. If not, then add it to the
308 * list of known pools.
310 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
311 if (pe
->pe_guid
== pool_guid
)
316 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
320 pe
->pe_guid
= pool_guid
;
321 pe
->pe_next
= pl
->pools
;
326 * Second, see if we know about this toplevel vdev. Add it if its
329 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
330 if (ve
->ve_guid
== top_guid
)
335 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
339 ve
->ve_guid
= top_guid
;
340 ve
->ve_next
= pe
->pe_vdevs
;
345 * Third, see if we have a config with a matching transaction group. If
346 * so, then we do nothing. Otherwise, add it to the list of known
349 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
350 if (ce
->ce_txg
== txg
)
355 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
360 ce
->ce_config
= config
;
361 ce
->ce_next
= ve
->ve_configs
;
368 * At this point we've successfully added our config to the list of
369 * known configs. The last thing to do is add the vdev guid -> path
370 * mappings so that we can fix up the configuration as necessary before
373 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
376 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
381 ne
->ne_guid
= vdev_guid
;
382 ne
->ne_order
= order
;
383 ne
->ne_num_labels
= num_labels
;
384 ne
->ne_next
= pl
->names
;
392 add_path(libzfs_handle_t
*hdl
, pool_list_t
*pools
, uint64_t pool_guid
,
393 uint64_t vdev_guid
, const char *path
, int order
)
397 int error
, fd
, num_labels
;
399 fd
= open64(path
, O_RDONLY
);
403 error
= zpool_read_label(fd
, &label
, &num_labels
);
406 if (error
|| label
== NULL
)
409 error
= nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_GUID
, &guid
);
410 if (error
|| guid
!= pool_guid
) {
415 error
= nvlist_lookup_uint64(label
, ZPOOL_CONFIG_GUID
, &guid
);
416 if (error
|| guid
!= vdev_guid
) {
421 error
= add_config(hdl
, pools
, path
, order
, num_labels
, label
);
427 add_configs_from_label_impl(libzfs_handle_t
*hdl
, pool_list_t
*pools
,
428 nvlist_t
*nvroot
, uint64_t pool_guid
, uint64_t vdev_guid
)
430 char udevpath
[MAXPATHLEN
];
437 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
438 &child
, &children
) == 0) {
439 for (c
= 0; c
< children
; c
++) {
440 error
= add_configs_from_label_impl(hdl
, pools
,
441 child
[c
], pool_guid
, vdev_guid
);
451 error
= nvlist_lookup_uint64(nvroot
, ZPOOL_CONFIG_GUID
, &guid
);
452 if ((error
!= 0) || (guid
!= vdev_guid
))
455 error
= nvlist_lookup_string(nvroot
, ZPOOL_CONFIG_PATH
, &path
);
457 (void) add_path(hdl
, pools
, pool_guid
, vdev_guid
, path
, 0);
459 error
= nvlist_lookup_string(nvroot
, ZPOOL_CONFIG_DEVID
, &path
);
461 sprintf(udevpath
, "%s%s", DEV_BYID_PATH
, path
);
462 (void) add_path(hdl
, pools
, pool_guid
, vdev_guid
, udevpath
, 1);
469 * Given a disk label call add_config() for all known paths to the device
470 * as described by the label itself. The paths are added in the following
471 * priority order: 'path', 'devid', 'devnode'. As these alternate paths are
472 * added the labels are verified to make sure they refer to the same device.
475 add_configs_from_label(libzfs_handle_t
*hdl
, pool_list_t
*pools
,
476 char *devname
, int num_labels
, nvlist_t
*label
)
483 if (nvlist_lookup_nvlist(label
, ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) ||
484 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) ||
485 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_GUID
, &vdev_guid
))
488 /* Allow devlinks to stabilize so all paths are available. */
489 zpool_label_disk_wait(devname
, DISK_LABEL_WAIT
);
491 /* Add alternate paths as described by the label vdev_tree. */
492 (void) add_configs_from_label_impl(hdl
, pools
, nvroot
,
493 pool_guid
, vdev_guid
);
495 /* Add the device node /dev/sdX path as a last resort. */
496 error
= add_config(hdl
, pools
, devname
, 100, num_labels
, label
);
500 #endif /* HAVE_LIBBLKID */
503 * Returns true if the named pool matches the given GUID.
506 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
512 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
520 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
525 *isactive
= (theguid
== guid
);
530 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
533 zfs_cmd_t zc
= {"\0"};
536 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
539 if (zcmd_alloc_dst_nvlist(hdl
, &zc
,
540 zc
.zc_nvlist_conf_size
* 2) != 0) {
541 zcmd_free_nvlists(&zc
);
545 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
546 &zc
)) != 0 && errno
== ENOMEM
) {
547 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
548 zcmd_free_nvlists(&zc
);
554 zcmd_free_nvlists(&zc
);
558 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
559 zcmd_free_nvlists(&zc
);
563 zcmd_free_nvlists(&zc
);
568 * Determine if the vdev id is a hole in the namespace.
571 vdev_is_hole(uint64_t *hole_array
, uint_t holes
, uint_t id
)
575 for (c
= 0; c
< holes
; c
++) {
577 /* Top-level is a hole */
578 if (hole_array
[c
] == id
)
585 * Convert our list of pools into the definitive set of configurations. We
586 * start by picking the best config for each toplevel vdev. Once that's done,
587 * we assemble the toplevel vdevs into a full config for the pool. We make a
588 * pass to fix up any incorrect paths, and then add it to the main list to
589 * return to the user.
592 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
597 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
= NULL
, *nvtop
, *nvroot
;
598 nvlist_t
**spares
, **l2cache
;
599 uint_t i
, nspares
, nl2cache
;
600 boolean_t config_seen
;
602 char *name
, *hostname
= NULL
;
605 nvlist_t
**child
= NULL
;
607 uint64_t *hole_array
, max_id
;
612 boolean_t valid_top_config
= B_FALSE
;
614 if (nvlist_alloc(&ret
, 0, 0) != 0)
617 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
618 uint64_t id
, max_txg
= 0;
620 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
622 config_seen
= B_FALSE
;
625 * Iterate over all toplevel vdevs. Grab the pool configuration
626 * from the first one we find, and then go through the rest and
627 * add them as necessary to the 'vdevs' member of the config.
629 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
632 * Determine the best configuration for this vdev by
633 * selecting the config with the latest transaction
637 for (ce
= ve
->ve_configs
; ce
!= NULL
;
640 if (ce
->ce_txg
> best_txg
) {
642 best_txg
= ce
->ce_txg
;
647 * We rely on the fact that the max txg for the
648 * pool will contain the most up-to-date information
649 * about the valid top-levels in the vdev namespace.
651 if (best_txg
> max_txg
) {
652 (void) nvlist_remove(config
,
653 ZPOOL_CONFIG_VDEV_CHILDREN
,
655 (void) nvlist_remove(config
,
656 ZPOOL_CONFIG_HOLE_ARRAY
,
657 DATA_TYPE_UINT64_ARRAY
);
663 valid_top_config
= B_FALSE
;
665 if (nvlist_lookup_uint64(tmp
,
666 ZPOOL_CONFIG_VDEV_CHILDREN
, &max_id
) == 0) {
667 verify(nvlist_add_uint64(config
,
668 ZPOOL_CONFIG_VDEV_CHILDREN
,
670 valid_top_config
= B_TRUE
;
673 if (nvlist_lookup_uint64_array(tmp
,
674 ZPOOL_CONFIG_HOLE_ARRAY
, &hole_array
,
676 verify(nvlist_add_uint64_array(config
,
677 ZPOOL_CONFIG_HOLE_ARRAY
,
678 hole_array
, holes
) == 0);
684 * Copy the relevant pieces of data to the pool
690 * comment (if available)
692 * hostid (if available)
693 * hostname (if available)
695 uint64_t state
, version
;
696 char *comment
= NULL
;
698 version
= fnvlist_lookup_uint64(tmp
,
699 ZPOOL_CONFIG_VERSION
);
700 fnvlist_add_uint64(config
,
701 ZPOOL_CONFIG_VERSION
, version
);
702 guid
= fnvlist_lookup_uint64(tmp
,
703 ZPOOL_CONFIG_POOL_GUID
);
704 fnvlist_add_uint64(config
,
705 ZPOOL_CONFIG_POOL_GUID
, guid
);
706 name
= fnvlist_lookup_string(tmp
,
707 ZPOOL_CONFIG_POOL_NAME
);
708 fnvlist_add_string(config
,
709 ZPOOL_CONFIG_POOL_NAME
, name
);
711 if (nvlist_lookup_string(tmp
,
712 ZPOOL_CONFIG_COMMENT
, &comment
) == 0)
713 fnvlist_add_string(config
,
714 ZPOOL_CONFIG_COMMENT
, comment
);
716 state
= fnvlist_lookup_uint64(tmp
,
717 ZPOOL_CONFIG_POOL_STATE
);
718 fnvlist_add_uint64(config
,
719 ZPOOL_CONFIG_POOL_STATE
, state
);
722 if (nvlist_lookup_uint64(tmp
,
723 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
724 fnvlist_add_uint64(config
,
725 ZPOOL_CONFIG_HOSTID
, hostid
);
726 hostname
= fnvlist_lookup_string(tmp
,
727 ZPOOL_CONFIG_HOSTNAME
);
728 fnvlist_add_string(config
,
729 ZPOOL_CONFIG_HOSTNAME
, hostname
);
732 config_seen
= B_TRUE
;
736 * Add this top-level vdev to the child array.
738 verify(nvlist_lookup_nvlist(tmp
,
739 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
740 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
743 if (id
>= children
) {
746 newchild
= zfs_alloc(hdl
, (id
+ 1) *
747 sizeof (nvlist_t
*));
748 if (newchild
== NULL
)
751 for (c
= 0; c
< children
; c
++)
752 newchild
[c
] = child
[c
];
758 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
764 * If we have information about all the top-levels then
765 * clean up the nvlist which we've constructed. This
766 * means removing any extraneous devices that are
767 * beyond the valid range or adding devices to the end
768 * of our array which appear to be missing.
770 if (valid_top_config
) {
771 if (max_id
< children
) {
772 for (c
= max_id
; c
< children
; c
++)
773 nvlist_free(child
[c
]);
775 } else if (max_id
> children
) {
778 newchild
= zfs_alloc(hdl
, (max_id
) *
779 sizeof (nvlist_t
*));
780 if (newchild
== NULL
)
783 for (c
= 0; c
< children
; c
++)
784 newchild
[c
] = child
[c
];
792 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
796 * The vdev namespace may contain holes as a result of
797 * device removal. We must add them back into the vdev
798 * tree before we process any missing devices.
801 ASSERT(valid_top_config
);
803 for (c
= 0; c
< children
; c
++) {
806 if (child
[c
] != NULL
||
807 !vdev_is_hole(hole_array
, holes
, c
))
810 if (nvlist_alloc(&holey
, NV_UNIQUE_NAME
,
815 * Holes in the namespace are treated as
816 * "hole" top-level vdevs and have a
817 * special flag set on them.
819 if (nvlist_add_string(holey
,
821 VDEV_TYPE_HOLE
) != 0 ||
822 nvlist_add_uint64(holey
,
823 ZPOOL_CONFIG_ID
, c
) != 0 ||
824 nvlist_add_uint64(holey
,
825 ZPOOL_CONFIG_GUID
, 0ULL) != 0)
832 * Look for any missing top-level vdevs. If this is the case,
833 * create a faked up 'missing' vdev as a placeholder. We cannot
834 * simply compress the child array, because the kernel performs
835 * certain checks to make sure the vdev IDs match their location
836 * in the configuration.
838 for (c
= 0; c
< children
; c
++) {
839 if (child
[c
] == NULL
) {
841 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
844 if (nvlist_add_string(missing
,
846 VDEV_TYPE_MISSING
) != 0 ||
847 nvlist_add_uint64(missing
,
848 ZPOOL_CONFIG_ID
, c
) != 0 ||
849 nvlist_add_uint64(missing
,
850 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
851 nvlist_free(missing
);
859 * Put all of this pool's top-level vdevs into a root vdev.
861 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
863 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
864 VDEV_TYPE_ROOT
) != 0 ||
865 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
866 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
867 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
868 child
, children
) != 0) {
873 for (c
= 0; c
< children
; c
++)
874 nvlist_free(child
[c
]);
880 * Go through and fix up any paths and/or devids based on our
881 * known list of vdev GUID -> path mappings.
883 if (fix_paths(nvroot
, pl
->names
) != 0) {
889 * Add the root vdev to this pool's configuration.
891 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
899 * zdb uses this path to report on active pools that were
900 * imported or created using -R.
906 * Determine if this pool is currently active, in which case we
907 * can't actually import it.
909 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
911 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
914 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
923 if ((nvl
= refresh_config(hdl
, config
)) == NULL
) {
933 * Go through and update the paths for spares, now that we have
936 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
938 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
939 &spares
, &nspares
) == 0) {
940 for (i
= 0; i
< nspares
; i
++) {
941 if (fix_paths(spares
[i
], pl
->names
) != 0)
947 * Update the paths for l2cache devices.
949 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
950 &l2cache
, &nl2cache
) == 0) {
951 for (i
= 0; i
< nl2cache
; i
++) {
952 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
958 * Restore the original information read from the actual label.
960 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
962 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
965 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
967 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
973 * Add this pool to the list of configs.
975 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
977 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
987 (void) no_memory(hdl
);
991 for (c
= 0; c
< children
; c
++)
992 nvlist_free(child
[c
]);
999 * Return the offset of the given label.
1002 label_offset(uint64_t size
, int l
)
1004 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
1005 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
1006 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
1010 * Given a file descriptor, read the label information and return an nvlist
1011 * describing the configuration, if there is one. The number of valid
1012 * labels found will be returned in num_labels when non-NULL.
1015 zpool_read_label(int fd
, nvlist_t
**config
, int *num_labels
)
1017 struct stat64 statbuf
;
1019 vdev_label_t
*label
;
1020 nvlist_t
*expected_config
= NULL
;
1021 uint64_t expected_guid
= 0, size
;
1025 if (fstat64_blk(fd
, &statbuf
) == -1)
1027 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
1029 if ((label
= malloc(sizeof (vdev_label_t
))) == NULL
)
1032 for (l
= 0; l
< VDEV_LABELS
; l
++) {
1033 uint64_t state
, guid
, txg
;
1035 if (pread64(fd
, label
, sizeof (vdev_label_t
),
1036 label_offset(size
, l
)) != sizeof (vdev_label_t
))
1039 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
1040 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
1043 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_GUID
,
1044 &guid
) != 0 || guid
== 0) {
1045 nvlist_free(*config
);
1049 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
1050 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
1051 nvlist_free(*config
);
1055 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
1056 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
1057 &txg
) != 0 || txg
== 0)) {
1058 nvlist_free(*config
);
1062 if (expected_guid
) {
1063 if (expected_guid
== guid
)
1066 nvlist_free(*config
);
1068 expected_config
= *config
;
1069 expected_guid
= guid
;
1074 if (num_labels
!= NULL
)
1075 *num_labels
= count
;
1078 *config
= expected_config
;
1084 * Given a file descriptor, clear (zero) the label information. This function
1085 * is used in the appliance stack as part of the ZFS sysevent module and
1086 * to implement the "zpool labelclear" command.
1089 zpool_clear_label(int fd
)
1091 struct stat64 statbuf
;
1093 vdev_label_t
*label
;
1096 if (fstat64_blk(fd
, &statbuf
) == -1)
1098 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
1100 if ((label
= calloc(sizeof (vdev_label_t
), 1)) == NULL
)
1103 for (l
= 0; l
< VDEV_LABELS
; l
++) {
1104 if (pwrite64(fd
, label
, sizeof (vdev_label_t
),
1105 label_offset(size
, l
)) != sizeof (vdev_label_t
)) {
1115 #ifdef HAVE_LIBBLKID
1117 * Use libblkid to quickly search for zfs devices
1120 zpool_find_import_blkid(libzfs_handle_t
*hdl
, pool_list_t
*pools
)
1123 blkid_dev_iterate iter
;
1127 err
= blkid_get_cache(&cache
, NULL
);
1129 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1130 dgettext(TEXT_DOMAIN
, "blkid_get_cache() %d"), err
);
1134 err
= blkid_probe_all(cache
);
1136 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1137 dgettext(TEXT_DOMAIN
, "blkid_probe_all() %d"), err
);
1141 iter
= blkid_dev_iterate_begin(cache
);
1143 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1144 dgettext(TEXT_DOMAIN
, "blkid_dev_iterate_begin()"));
1148 err
= blkid_dev_set_search(iter
, "TYPE", "zfs_member");
1150 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1151 dgettext(TEXT_DOMAIN
, "blkid_dev_set_search() %d"), err
);
1155 while (blkid_dev_next(iter
, &dev
) == 0) {
1160 devname
= (char *) blkid_dev_devname(dev
);
1161 if ((fd
= open64(devname
, O_RDONLY
)) < 0)
1164 err
= zpool_read_label(fd
, &label
, &num_labels
);
1167 if (err
|| label
== NULL
)
1170 add_configs_from_label(hdl
, pools
, devname
, num_labels
, label
);
1175 blkid_dev_iterate_end(iter
);
1177 blkid_put_cache(cache
);
1181 #endif /* HAVE_LIBBLKID */
1184 zpool_default_import_path
[DEFAULT_IMPORT_PATH_SIZE
] = {
1185 "/dev/disk/by-vdev", /* Custom rules, use first if they exist */
1186 "/dev/mapper", /* Use multipath devices before components */
1187 "/dev/disk/by-uuid", /* Single unique entry and persistent */
1188 "/dev/disk/by-id", /* May be multiple entries and persistent */
1189 "/dev/disk/by-path", /* Encodes physical location and persistent */
1190 "/dev/disk/by-label", /* Custom persistent labels */
1191 "/dev" /* UNSAFE device names will change */
1195 * Given a list of directories to search, find all pools stored on disk. This
1196 * includes partial pools which are not available to import. If no args are
1197 * given (argc is 0), then the default directory (/dev/dsk) is searched.
1198 * poolname or guid (but not both) are provided by the caller when trying
1199 * to import a specific pool.
1202 zpool_find_import_impl(libzfs_handle_t
*hdl
, importargs_t
*iarg
)
1204 int i
, num_labels
, dirs
= iarg
->paths
;
1206 struct dirent64
*dp
;
1207 char path
[MAXPATHLEN
];
1208 char *end
, **dir
= iarg
->path
;
1210 struct stat64 statbuf
;
1211 nvlist_t
*ret
= NULL
, *config
;
1213 pool_list_t pools
= { 0 };
1214 pool_entry_t
*pe
, *penext
;
1215 vdev_entry_t
*ve
, *venext
;
1216 config_entry_t
*ce
, *cenext
;
1217 name_entry_t
*ne
, *nenext
;
1219 verify(iarg
->poolname
== NULL
|| iarg
->guid
== 0);
1222 #ifdef HAVE_LIBBLKID
1223 /* Use libblkid to scan all device for their type */
1224 if (zpool_find_import_blkid(hdl
, &pools
) == 0)
1227 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1228 dgettext(TEXT_DOMAIN
, "blkid failure falling back "
1229 "to manual probing"));
1230 #endif /* HAVE_LIBBLKID */
1232 dir
= zpool_default_import_path
;
1233 dirs
= DEFAULT_IMPORT_PATH_SIZE
;
1237 * Go through and read the label configuration information from every
1238 * possible device, organizing the information according to pool GUID
1239 * and toplevel GUID.
1241 for (i
= 0; i
< dirs
; i
++) {
1245 /* use realpath to normalize the path */
1246 if (realpath(dir
[i
], path
) == 0) {
1248 /* it is safe to skip missing search paths */
1249 if (errno
== ENOENT
)
1252 zfs_error_aux(hdl
, strerror(errno
));
1253 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1254 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), dir
[i
]);
1257 end
= &path
[strlen(path
)];
1260 pathleft
= &path
[sizeof (path
)] - end
;
1263 * Using raw devices instead of block devices when we're
1264 * reading the labels skips a bunch of slow operations during
1265 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1267 if (strcmp(path
, "/dev/dsk/") == 0)
1268 rdsk
= "/dev/rdsk/";
1272 if ((dfd
= open64(rdsk
, O_RDONLY
)) < 0 ||
1273 (dirp
= fdopendir(dfd
)) == NULL
) {
1274 zfs_error_aux(hdl
, strerror(errno
));
1275 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1276 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1282 * This is not MT-safe, but we have no MT consumers of libzfs
1284 while ((dp
= readdir64(dirp
)) != NULL
) {
1285 const char *name
= dp
->d_name
;
1286 if (name
[0] == '.' &&
1287 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
1291 * Skip checking devices with well known prefixes:
1292 * watchdog - A special close is required to avoid
1293 * triggering it and resetting the system.
1294 * fuse - Fuse control device.
1295 * ppp - Generic PPP driver.
1296 * tty* - Generic serial interface.
1297 * vcs* - Virtual console memory.
1298 * parport* - Parallel port interface.
1299 * lp* - Printer interface.
1300 * fd* - Floppy interface.
1301 * hpet - High Precision Event Timer, crashes qemu
1302 * when accessed from a virtual machine.
1303 * core - Symlink to /proc/kcore, causes a crash
1304 * when access from Xen dom0.
1306 if ((strncmp(name
, "watchdog", 8) == 0) ||
1307 (strncmp(name
, "fuse", 4) == 0) ||
1308 (strncmp(name
, "ppp", 3) == 0) ||
1309 (strncmp(name
, "tty", 3) == 0) ||
1310 (strncmp(name
, "vcs", 3) == 0) ||
1311 (strncmp(name
, "parport", 7) == 0) ||
1312 (strncmp(name
, "lp", 2) == 0) ||
1313 (strncmp(name
, "fd", 2) == 0) ||
1314 (strncmp(name
, "hpet", 4) == 0) ||
1315 (strncmp(name
, "core", 4) == 0))
1319 * Ignore failed stats. We only want regular
1320 * files and block devices.
1322 if ((fstatat64(dfd
, name
, &statbuf
, 0) != 0) ||
1323 (!S_ISREG(statbuf
.st_mode
) &&
1324 !S_ISBLK(statbuf
.st_mode
)))
1327 if ((fd
= openat64(dfd
, name
, O_RDONLY
)) < 0)
1330 if ((zpool_read_label(fd
, &config
, &num_labels
))) {
1332 (void) no_memory(hdl
);
1338 if (config
!= NULL
) {
1339 boolean_t matched
= B_TRUE
;
1342 if ((iarg
->poolname
!= NULL
) &&
1343 (nvlist_lookup_string(config
,
1344 ZPOOL_CONFIG_POOL_NAME
, &pname
) == 0)) {
1346 if (strcmp(iarg
->poolname
, pname
))
1349 } else if (iarg
->guid
!= 0) {
1352 matched
= nvlist_lookup_uint64(config
,
1353 ZPOOL_CONFIG_POOL_GUID
,
1355 iarg
->guid
== this_guid
;
1358 nvlist_free(config
);
1362 /* use the non-raw path for the config */
1363 (void) strlcpy(end
, name
, pathleft
);
1364 if (add_config(hdl
, &pools
, path
, i
+1,
1365 num_labels
, config
))
1370 (void) closedir(dirp
);
1374 #ifdef HAVE_LIBBLKID
1377 ret
= get_configs(hdl
, &pools
, iarg
->can_be_active
);
1380 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
1381 penext
= pe
->pe_next
;
1382 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
1383 venext
= ve
->ve_next
;
1384 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
1385 cenext
= ce
->ce_next
;
1387 nvlist_free(ce
->ce_config
);
1395 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
1396 nenext
= ne
->ne_next
;
1403 (void) closedir(dirp
);
1409 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
1411 importargs_t iarg
= { 0 };
1416 return (zpool_find_import_impl(hdl
, &iarg
));
1420 * Given a cache file, return the contents as a list of importable pools.
1421 * poolname or guid (but not both) are provided by the caller when trying
1422 * to import a specific pool.
1425 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
1426 char *poolname
, uint64_t guid
)
1430 struct stat64 statbuf
;
1431 nvlist_t
*raw
, *src
, *dst
;
1438 verify(poolname
== NULL
|| guid
== 0);
1440 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
1441 zfs_error_aux(hdl
, "%s", strerror(errno
));
1442 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1443 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
1447 if (fstat64(fd
, &statbuf
) != 0) {
1448 zfs_error_aux(hdl
, "%s", strerror(errno
));
1450 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1451 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
1455 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
1460 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
1463 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1464 dgettext(TEXT_DOMAIN
,
1465 "failed to read cache file contents"));
1471 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
1473 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1474 dgettext(TEXT_DOMAIN
,
1475 "invalid or corrupt cache file contents"));
1482 * Go through and get the current state of the pools and refresh their
1485 if (nvlist_alloc(&pools
, 0, 0) != 0) {
1486 (void) no_memory(hdl
);
1492 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
1493 src
= fnvpair_value_nvlist(elem
);
1495 name
= fnvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
);
1496 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
1499 this_guid
= fnvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
);
1500 if (guid
!= 0 && guid
!= this_guid
)
1503 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
1512 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
1518 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
1519 (void) no_memory(hdl
);
1533 name_or_guid_exists(zpool_handle_t
*zhp
, void *data
)
1535 importargs_t
*import
= data
;
1538 if (import
->poolname
!= NULL
) {
1541 verify(nvlist_lookup_string(zhp
->zpool_config
,
1542 ZPOOL_CONFIG_POOL_NAME
, &pool_name
) == 0);
1543 if (strcmp(pool_name
, import
->poolname
) == 0)
1548 verify(nvlist_lookup_uint64(zhp
->zpool_config
,
1549 ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) == 0);
1550 if (pool_guid
== import
->guid
)
1559 zpool_search_import(libzfs_handle_t
*hdl
, importargs_t
*import
)
1561 verify(import
->poolname
== NULL
|| import
->guid
== 0);
1564 import
->exists
= zpool_iter(hdl
, name_or_guid_exists
, import
);
1566 if (import
->cachefile
!= NULL
)
1567 return (zpool_find_import_cached(hdl
, import
->cachefile
,
1568 import
->poolname
, import
->guid
));
1570 return (zpool_find_import_impl(hdl
, import
));
1574 find_guid(nvlist_t
*nv
, uint64_t guid
)
1580 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
1584 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1585 &child
, &children
) == 0) {
1586 for (c
= 0; c
< children
; c
++)
1587 if (find_guid(child
[c
], guid
))
1594 typedef struct aux_cbdata
{
1595 const char *cb_type
;
1597 zpool_handle_t
*cb_zhp
;
1601 find_aux(zpool_handle_t
*zhp
, void *data
)
1603 aux_cbdata_t
*cbp
= data
;
1609 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1612 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
1613 &list
, &count
) == 0) {
1614 for (i
= 0; i
< count
; i
++) {
1615 verify(nvlist_lookup_uint64(list
[i
],
1616 ZPOOL_CONFIG_GUID
, &guid
) == 0);
1617 if (guid
== cbp
->cb_guid
) {
1629 * Determines if the pool is in use. If so, it returns true and the state of
1630 * the pool as well as the name of the pool. Both strings are allocated and
1631 * must be freed by the caller.
1634 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
1640 uint64_t guid
, vdev_guid
;
1641 zpool_handle_t
*zhp
;
1642 nvlist_t
*pool_config
;
1643 uint64_t stateval
, isspare
;
1644 aux_cbdata_t cb
= { 0 };
1649 if (zpool_read_label(fd
, &config
, NULL
) != 0) {
1650 (void) no_memory(hdl
);
1657 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
1659 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
1662 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
1663 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1665 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1670 case POOL_STATE_EXPORTED
:
1672 * A pool with an exported state may in fact be imported
1673 * read-only, so check the in-core state to see if it's
1674 * active and imported read-only. If it is, set
1675 * its state to active.
1677 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
1678 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
) {
1679 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
1680 stateval
= POOL_STATE_ACTIVE
;
1683 * All we needed the zpool handle for is the
1684 * readonly prop check.
1692 case POOL_STATE_ACTIVE
:
1694 * For an active pool, we have to determine if it's really part
1695 * of a currently active pool (in which case the pool will exist
1696 * and the guid will be the same), or whether it's part of an
1697 * active pool that was disconnected without being explicitly
1700 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
1701 nvlist_free(config
);
1707 * Because the device may have been removed while
1708 * offlined, we only report it as active if the vdev is
1709 * still present in the config. Otherwise, pretend like
1712 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1713 (pool_config
= zpool_get_config(zhp
, NULL
))
1717 verify(nvlist_lookup_nvlist(pool_config
,
1718 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1719 ret
= find_guid(nvroot
, vdev_guid
);
1725 * If this is an active spare within another pool, we
1726 * treat it like an unused hot spare. This allows the
1727 * user to create a pool with a hot spare that currently
1728 * in use within another pool. Since we return B_TRUE,
1729 * libdiskmgt will continue to prevent generic consumers
1730 * from using the device.
1732 if (ret
&& nvlist_lookup_uint64(config
,
1733 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
1734 stateval
= POOL_STATE_SPARE
;
1739 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
1744 case POOL_STATE_SPARE
:
1746 * For a hot spare, it can be either definitively in use, or
1747 * potentially active. To determine if it's in use, we iterate
1748 * over all pools in the system and search for one with a spare
1749 * with a matching guid.
1751 * Due to the shared nature of spares, we don't actually report
1752 * the potentially active case as in use. This means the user
1753 * can freely create pools on the hot spares of exported pools,
1754 * but to do otherwise makes the resulting code complicated, and
1755 * we end up having to deal with this case anyway.
1758 cb
.cb_guid
= vdev_guid
;
1759 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
1760 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1761 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1768 case POOL_STATE_L2CACHE
:
1771 * Check if any pool is currently using this l2cache device.
1774 cb
.cb_guid
= vdev_guid
;
1775 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
1776 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1777 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1790 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
1792 zpool_close(cb
.cb_zhp
);
1793 nvlist_free(config
);
1796 *state
= (pool_state_t
)stateval
;
1800 zpool_close(cb
.cb_zhp
);
1802 nvlist_free(config
);