4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Pool import support functions.
28 * To import a pool, we rely on reading the configuration information from the
29 * ZFS label of each device. If we successfully read the label, then we
30 * organize the configuration information in the following hierarchy:
32 * pool guid -> toplevel vdev guid -> label txg
34 * Duplicate entries matching this same tuple will be discarded. Once we have
35 * examined every device, we pick the best label txg config for each toplevel
36 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
37 * update any paths that have changed. Finally, we attempt to import the pool
38 * using our derived config, and record the results.
53 #include <sys/dktp/fdisk.h>
54 #include <sys/efi_partition.h>
56 #include <sys/vdev_impl.h>
58 #include <blkid/blkid.h>
62 #include "libzfs_impl.h"
65 * Intermediate structures used to gather configuration information.
67 typedef struct config_entry
{
70 struct config_entry
*ce_next
;
73 typedef struct vdev_entry
{
75 config_entry_t
*ve_configs
;
76 struct vdev_entry
*ve_next
;
79 typedef struct pool_entry
{
81 vdev_entry_t
*pe_vdevs
;
82 struct pool_entry
*pe_next
;
85 typedef struct name_entry
{
88 struct name_entry
*ne_next
;
91 typedef struct pool_list
{
97 get_devid(const char *path
)
103 if ((fd
= open(path
, O_RDONLY
)) < 0)
108 if (devid_get(fd
, &devid
) == 0) {
109 if (devid_get_minor_name(fd
, &minor
) == 0)
110 ret
= devid_str_encode(devid
, minor
);
112 devid_str_free(minor
);
122 * Go through and fix up any path and/or devid information for the given vdev
126 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
131 name_entry_t
*ne
, *best
;
135 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
136 &child
, &children
) == 0) {
137 for (c
= 0; c
< children
; c
++)
138 if (fix_paths(child
[c
], names
) != 0)
144 * This is a leaf (file or disk) vdev. In either case, go through
145 * the name list and see if we find a matching guid. If so, replace
146 * the path and see if we can calculate a new devid.
148 * There may be multiple names associated with a particular guid, in
149 * which case we have overlapping slices or multiple paths to the same
150 * disk. If this is the case, then we want to pick the path that is
151 * the most similar to the original, where "most similar" is the number
152 * of matching characters starting from the end of the path. This will
153 * preserve slice numbers even if the disks have been reorganized, and
154 * will also catch preferred disk names if multiple paths exist.
156 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
157 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
162 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
163 if (ne
->ne_guid
== guid
) {
164 const char *src
, *dst
;
172 src
= ne
->ne_name
+ strlen(ne
->ne_name
) - 1;
173 dst
= path
+ strlen(path
) - 1;
174 for (count
= 0; src
>= ne
->ne_name
&& dst
>= path
;
175 src
--, dst
--, count
++)
180 * At this point, 'count' is the number of characters
181 * matched from the end.
183 if (count
> matched
|| best
== NULL
) {
193 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
196 if ((devid
= get_devid(best
->ne_name
)) == NULL
) {
197 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
199 if (nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, devid
) != 0)
201 devid_str_free(devid
);
208 * Add the given configuration to the list of known devices.
211 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
214 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
221 * If this is a hot spare not currently in use or level 2 cache
222 * device, add it to the list of names to translate, but don't do
225 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
227 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
228 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
229 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
232 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
236 ne
->ne_guid
= vdev_guid
;
237 ne
->ne_next
= pl
->names
;
243 * If we have a valid config but cannot read any of these fields, then
244 * it means we have a half-initialized label. In vdev_label_init()
245 * we write a label with txg == 0 so that we can identify the device
246 * in case the user refers to the same disk later on. If we fail to
247 * create the pool, we'll be left with a label in this state
248 * which should not be considered part of a valid pool.
250 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
252 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
254 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
256 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
257 &txg
) != 0 || txg
== 0) {
263 * First, see if we know about this pool. If not, then add it to the
264 * list of known pools.
266 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
267 if (pe
->pe_guid
== pool_guid
)
272 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
276 pe
->pe_guid
= pool_guid
;
277 pe
->pe_next
= pl
->pools
;
282 * Second, see if we know about this toplevel vdev. Add it if its
285 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
286 if (ve
->ve_guid
== top_guid
)
291 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
295 ve
->ve_guid
= top_guid
;
296 ve
->ve_next
= pe
->pe_vdevs
;
301 * Third, see if we have a config with a matching transaction group. If
302 * so, then we do nothing. Otherwise, add it to the list of known
305 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
306 if (ce
->ce_txg
== txg
)
311 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
316 ce
->ce_config
= config
;
317 ce
->ce_next
= ve
->ve_configs
;
324 * At this point we've successfully added our config to the list of
325 * known configs. The last thing to do is add the vdev guid -> path
326 * mappings so that we can fix up the configuration as necessary before
329 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
332 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
337 ne
->ne_guid
= vdev_guid
;
338 ne
->ne_next
= pl
->names
;
345 * Returns true if the named pool matches the given GUID.
348 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
354 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
362 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
367 *isactive
= (theguid
== guid
);
372 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
375 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
378 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
381 if (zcmd_alloc_dst_nvlist(hdl
, &zc
,
382 zc
.zc_nvlist_conf_size
* 2) != 0) {
383 zcmd_free_nvlists(&zc
);
387 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
388 &zc
)) != 0 && errno
== ENOMEM
) {
389 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
390 zcmd_free_nvlists(&zc
);
396 zcmd_free_nvlists(&zc
);
400 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
401 zcmd_free_nvlists(&zc
);
405 zcmd_free_nvlists(&zc
);
410 * Determine if the vdev id is a hole in the namespace.
413 vdev_is_hole(uint64_t *hole_array
, uint_t holes
, uint_t id
)
417 for (c
= 0; c
< holes
; c
++) {
419 /* Top-level is a hole */
420 if (hole_array
[c
] == id
)
427 * Convert our list of pools into the definitive set of configurations. We
428 * start by picking the best config for each toplevel vdev. Once that's done,
429 * we assemble the toplevel vdevs into a full config for the pool. We make a
430 * pass to fix up any incorrect paths, and then add it to the main list to
431 * return to the user.
434 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
439 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
= NULL
, *nvtop
, *nvroot
;
440 nvlist_t
**spares
, **l2cache
;
441 uint_t i
, nspares
, nl2cache
;
442 boolean_t config_seen
;
444 char *name
, *hostname
;
445 uint64_t version
, guid
;
447 nvlist_t
**child
= NULL
;
449 uint64_t *hole_array
, max_id
;
454 boolean_t found_one
= B_FALSE
;
455 boolean_t valid_top_config
= B_FALSE
;
457 if (nvlist_alloc(&ret
, 0, 0) != 0)
460 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
461 uint64_t id
, max_txg
= 0;
463 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
465 config_seen
= B_FALSE
;
468 * Iterate over all toplevel vdevs. Grab the pool configuration
469 * from the first one we find, and then go through the rest and
470 * add them as necessary to the 'vdevs' member of the config.
472 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
475 * Determine the best configuration for this vdev by
476 * selecting the config with the latest transaction
480 for (ce
= ve
->ve_configs
; ce
!= NULL
;
483 if (ce
->ce_txg
> best_txg
) {
485 best_txg
= ce
->ce_txg
;
490 * We rely on the fact that the max txg for the
491 * pool will contain the most up-to-date information
492 * about the valid top-levels in the vdev namespace.
494 if (best_txg
> max_txg
) {
495 (void) nvlist_remove(config
,
496 ZPOOL_CONFIG_VDEV_CHILDREN
,
498 (void) nvlist_remove(config
,
499 ZPOOL_CONFIG_HOLE_ARRAY
,
500 DATA_TYPE_UINT64_ARRAY
);
506 valid_top_config
= B_FALSE
;
508 if (nvlist_lookup_uint64(tmp
,
509 ZPOOL_CONFIG_VDEV_CHILDREN
, &max_id
) == 0) {
510 verify(nvlist_add_uint64(config
,
511 ZPOOL_CONFIG_VDEV_CHILDREN
,
513 valid_top_config
= B_TRUE
;
516 if (nvlist_lookup_uint64_array(tmp
,
517 ZPOOL_CONFIG_HOLE_ARRAY
, &hole_array
,
519 verify(nvlist_add_uint64_array(config
,
520 ZPOOL_CONFIG_HOLE_ARRAY
,
521 hole_array
, holes
) == 0);
527 * Copy the relevant pieces of data to the pool
534 * hostid (if available)
535 * hostname (if available)
539 verify(nvlist_lookup_uint64(tmp
,
540 ZPOOL_CONFIG_VERSION
, &version
) == 0);
541 if (nvlist_add_uint64(config
,
542 ZPOOL_CONFIG_VERSION
, version
) != 0)
544 verify(nvlist_lookup_uint64(tmp
,
545 ZPOOL_CONFIG_POOL_GUID
, &guid
) == 0);
546 if (nvlist_add_uint64(config
,
547 ZPOOL_CONFIG_POOL_GUID
, guid
) != 0)
549 verify(nvlist_lookup_string(tmp
,
550 ZPOOL_CONFIG_POOL_NAME
, &name
) == 0);
551 if (nvlist_add_string(config
,
552 ZPOOL_CONFIG_POOL_NAME
, name
) != 0)
554 verify(nvlist_lookup_uint64(tmp
,
555 ZPOOL_CONFIG_POOL_STATE
, &state
) == 0);
556 if (nvlist_add_uint64(config
,
557 ZPOOL_CONFIG_POOL_STATE
, state
) != 0)
560 if (nvlist_lookup_uint64(tmp
,
561 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
562 if (nvlist_add_uint64(config
,
563 ZPOOL_CONFIG_HOSTID
, hostid
) != 0)
565 verify(nvlist_lookup_string(tmp
,
566 ZPOOL_CONFIG_HOSTNAME
,
568 if (nvlist_add_string(config
,
569 ZPOOL_CONFIG_HOSTNAME
,
574 config_seen
= B_TRUE
;
578 * Add this top-level vdev to the child array.
580 verify(nvlist_lookup_nvlist(tmp
,
581 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
582 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
585 if (id
>= children
) {
588 newchild
= zfs_alloc(hdl
, (id
+ 1) *
589 sizeof (nvlist_t
*));
590 if (newchild
== NULL
)
593 for (c
= 0; c
< children
; c
++)
594 newchild
[c
] = child
[c
];
600 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
606 * If we have information about all the top-levels then
607 * clean up the nvlist which we've constructed. This
608 * means removing any extraneous devices that are
609 * beyond the valid range or adding devices to the end
610 * of our array which appear to be missing.
612 if (valid_top_config
) {
613 if (max_id
< children
) {
614 for (c
= max_id
; c
< children
; c
++)
615 nvlist_free(child
[c
]);
617 } else if (max_id
> children
) {
620 newchild
= zfs_alloc(hdl
, (max_id
) *
621 sizeof (nvlist_t
*));
622 if (newchild
== NULL
)
625 for (c
= 0; c
< children
; c
++)
626 newchild
[c
] = child
[c
];
634 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
638 * The vdev namespace may contain holes as a result of
639 * device removal. We must add them back into the vdev
640 * tree before we process any missing devices.
643 ASSERT(valid_top_config
);
645 for (c
= 0; c
< children
; c
++) {
648 if (child
[c
] != NULL
||
649 !vdev_is_hole(hole_array
, holes
, c
))
652 if (nvlist_alloc(&holey
, NV_UNIQUE_NAME
,
657 * Holes in the namespace are treated as
658 * "hole" top-level vdevs and have a
659 * special flag set on them.
661 if (nvlist_add_string(holey
,
663 VDEV_TYPE_HOLE
) != 0 ||
664 nvlist_add_uint64(holey
,
665 ZPOOL_CONFIG_ID
, c
) != 0 ||
666 nvlist_add_uint64(holey
,
667 ZPOOL_CONFIG_GUID
, 0ULL) != 0)
674 * Look for any missing top-level vdevs. If this is the case,
675 * create a faked up 'missing' vdev as a placeholder. We cannot
676 * simply compress the child array, because the kernel performs
677 * certain checks to make sure the vdev IDs match their location
678 * in the configuration.
680 for (c
= 0; c
< children
; c
++) {
681 if (child
[c
] == NULL
) {
683 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
686 if (nvlist_add_string(missing
,
688 VDEV_TYPE_MISSING
) != 0 ||
689 nvlist_add_uint64(missing
,
690 ZPOOL_CONFIG_ID
, c
) != 0 ||
691 nvlist_add_uint64(missing
,
692 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
693 nvlist_free(missing
);
701 * Put all of this pool's top-level vdevs into a root vdev.
703 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
705 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
706 VDEV_TYPE_ROOT
) != 0 ||
707 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
708 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
709 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
710 child
, children
) != 0) {
715 for (c
= 0; c
< children
; c
++)
716 nvlist_free(child
[c
]);
722 * Go through and fix up any paths and/or devids based on our
723 * known list of vdev GUID -> path mappings.
725 if (fix_paths(nvroot
, pl
->names
) != 0) {
731 * Add the root vdev to this pool's configuration.
733 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
741 * zdb uses this path to report on active pools that were
742 * imported or created using -R.
748 * Determine if this pool is currently active, in which case we
749 * can't actually import it.
751 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
753 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
756 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
765 if ((nvl
= refresh_config(hdl
, config
)) == NULL
) {
775 * Go through and update the paths for spares, now that we have
778 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
780 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
781 &spares
, &nspares
) == 0) {
782 for (i
= 0; i
< nspares
; i
++) {
783 if (fix_paths(spares
[i
], pl
->names
) != 0)
789 * Update the paths for l2cache devices.
791 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
792 &l2cache
, &nl2cache
) == 0) {
793 for (i
= 0; i
< nl2cache
; i
++) {
794 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
800 * Restore the original information read from the actual label.
802 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
804 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
807 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
809 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
815 * Add this pool to the list of configs.
817 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
819 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
835 (void) no_memory(hdl
);
839 for (c
= 0; c
< children
; c
++)
840 nvlist_free(child
[c
]);
847 * Return the offset of the given label.
850 label_offset(uint64_t size
, int l
)
852 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
853 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
854 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
858 * Given a file descriptor, read the label information and return an nvlist
859 * describing the configuration, if there is one.
862 zpool_read_label(int fd
, nvlist_t
**config
)
864 struct stat64 statbuf
;
867 uint64_t state
, txg
, size
;
871 if (fstat64(fd
, &statbuf
) == -1)
873 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
875 if ((label
= malloc(sizeof (vdev_label_t
))) == NULL
)
878 for (l
= 0; l
< VDEV_LABELS
; l
++) {
879 if (pread64(fd
, label
, sizeof (vdev_label_t
),
880 label_offset(size
, l
)) != sizeof (vdev_label_t
))
883 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
884 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
887 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
888 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
889 nvlist_free(*config
);
893 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
894 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
895 &txg
) != 0 || txg
== 0)) {
896 nvlist_free(*config
);
911 * Use libblkid to quickly search for zfs devices
914 zpool_find_import_blkid(libzfs_handle_t
*hdl
, pool_list_t
*pools
)
917 blkid_dev_iterate iter
;
923 err
= blkid_get_cache(&cache
, NULL
);
925 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
926 dgettext(TEXT_DOMAIN
, "blkid_get_cache() %d"), err
);
930 err
= blkid_probe_all(cache
);
932 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
933 dgettext(TEXT_DOMAIN
, "blkid_probe_all() %d"), err
);
937 iter
= blkid_dev_iterate_begin(cache
);
939 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
940 dgettext(TEXT_DOMAIN
, "blkid_dev_iterate_begin()"));
944 err
= blkid_dev_set_search(iter
, "TYPE", "zfs");
946 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
947 dgettext(TEXT_DOMAIN
, "blkid_dev_set_search() %d"), err
);
951 while (blkid_dev_next(iter
, &dev
) == 0) {
952 devname
= blkid_dev_devname(dev
);
953 if ((fd
= open64(devname
, O_RDONLY
)) < 0)
956 err
= zpool_read_label(fd
, &config
);
960 (void) no_memory(hdl
);
964 if (config
!= NULL
) {
965 err
= add_config(hdl
, pools
, devname
, config
);
972 blkid_dev_iterate_end(iter
);
974 blkid_put_cache(cache
);
978 #endif /* HAVE_LIBBLKID */
981 * Given a list of directories to search, find all pools stored on disk. This
982 * includes partial pools which are not available to import. If no args are
983 * given (argc is 0), then the default directory (/dev/dsk) is searched.
984 * poolname or guid (but not both) are provided by the caller when trying
985 * to import a specific pool.
988 zpool_find_import_impl(libzfs_handle_t
*hdl
, importargs_t
*iarg
)
990 int i
, dirs
= iarg
->paths
;
993 char path
[MAXPATHLEN
];
994 char *end
, **dir
= iarg
->path
;
996 struct stat64 statbuf
;
997 nvlist_t
*ret
= NULL
, *config
;
998 static char *default_dir
= DISK_ROOT
;
1000 pool_list_t pools
= { 0 };
1001 pool_entry_t
*pe
, *penext
;
1002 vdev_entry_t
*ve
, *venext
;
1003 config_entry_t
*ce
, *cenext
;
1004 name_entry_t
*ne
, *nenext
;
1006 verify(iarg
->poolname
== NULL
|| iarg
->guid
== 0);
1009 #ifdef HAVE_LIBBLKID
1010 /* Use libblkid to scan all device for their type */
1011 if (zpool_find_import_blkid(hdl
, &pools
) == 0)
1014 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1015 dgettext(TEXT_DOMAIN
, "blkid failure falling back "
1016 "to manual probing"));
1017 #endif /* HAVE_LIBBLKID */
1023 * Go through and read the label configuration information from every
1024 * possible device, organizing the information according to pool GUID
1025 * and toplevel GUID.
1027 for (i
= 0; i
< dirs
; i
++) {
1031 /* use realpath to normalize the path */
1032 if (realpath(dir
[i
], path
) == 0) {
1033 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1034 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), dir
[i
]);
1037 end
= &path
[strlen(path
)];
1040 pathleft
= &path
[sizeof (path
)] - end
;
1043 * Using raw devices instead of block devices when we're
1044 * reading the labels skips a bunch of slow operations during
1045 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1047 if (strcmp(path
, "/dev/dsk/") == 0)
1048 rdsk
= "/dev/rdsk/";
1052 if ((dfd
= open64(rdsk
, O_RDONLY
)) < 0 ||
1053 (dirp
= fdopendir(dfd
)) == NULL
) {
1054 zfs_error_aux(hdl
, strerror(errno
));
1055 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1056 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1062 * This is not MT-safe, but we have no MT consumers of libzfs
1064 while ((dp
= readdir64(dirp
)) != NULL
) {
1065 const char *name
= dp
->d_name
;
1066 if (name
[0] == '.' &&
1067 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
1071 * Skip checking devices with well known prefixes:
1072 * watchdog - A special close is required to avoid
1073 * triggering it and resetting the system.
1074 * fuse - Fuse control device.
1075 * ppp - Generic PPP driver.
1076 * tty* - Generic serial interface.
1077 * vcs* - Virtual console memory.
1078 * parport* - Parallel port interface.
1079 * lp* - Printer interface.
1080 * fd* - Floppy interface.
1081 * hpet - High Precision Event Timer, crashes qemu
1082 * when accessed from a virtual machine.
1083 * core - Symlink to /proc/kcore, causes a crash
1084 * when access from Xen dom0.
1086 if ((strncmp(name
, "watchdog", 8) == 0) ||
1087 (strncmp(name
, "fuse", 4) == 0) ||
1088 (strncmp(name
, "ppp", 3) == 0) ||
1089 (strncmp(name
, "tty", 3) == 0) ||
1090 (strncmp(name
, "vcs", 3) == 0) ||
1091 (strncmp(name
, "parport", 7) == 0) ||
1092 (strncmp(name
, "lp", 2) == 0) ||
1093 (strncmp(name
, "fd", 2) == 0) ||
1094 (strncmp(name
, "hpet", 4) == 0) ||
1095 (strncmp(name
, "core", 4) == 0))
1099 * Ignore failed stats. We only want regular
1100 * files and block devices.
1102 if ((fstatat64(dfd
, name
, &statbuf
, 0) != 0) ||
1103 (!S_ISREG(statbuf
.st_mode
) &&
1104 !S_ISBLK(statbuf
.st_mode
)))
1107 if ((fd
= openat64(dfd
, name
, O_RDONLY
)) < 0)
1110 if ((zpool_read_label(fd
, &config
)) != 0) {
1112 (void) no_memory(hdl
);
1118 if (config
!= NULL
) {
1119 boolean_t matched
= B_TRUE
;
1121 if (iarg
->poolname
!= NULL
) {
1124 matched
= nvlist_lookup_string(config
,
1125 ZPOOL_CONFIG_POOL_NAME
,
1127 strcmp(iarg
->poolname
, pname
) == 0;
1128 } else if (iarg
->guid
!= 0) {
1131 matched
= nvlist_lookup_uint64(config
,
1132 ZPOOL_CONFIG_POOL_GUID
,
1134 iarg
->guid
== this_guid
;
1137 nvlist_free(config
);
1141 /* use the non-raw path for the config */
1142 (void) strlcpy(end
, name
, pathleft
);
1143 if (add_config(hdl
, &pools
, path
, config
) != 0)
1148 (void) closedir(dirp
);
1152 #ifdef HAVE_LIBBLKID
1155 ret
= get_configs(hdl
, &pools
, iarg
->can_be_active
);
1158 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
1159 penext
= pe
->pe_next
;
1160 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
1161 venext
= ve
->ve_next
;
1162 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
1163 cenext
= ce
->ce_next
;
1165 nvlist_free(ce
->ce_config
);
1173 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
1174 nenext
= ne
->ne_next
;
1181 (void) closedir(dirp
);
1187 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
1189 importargs_t iarg
= { 0 };
1194 return (zpool_find_import_impl(hdl
, &iarg
));
1198 * Given a cache file, return the contents as a list of importable pools.
1199 * poolname or guid (but not both) are provided by the caller when trying
1200 * to import a specific pool.
1203 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
1204 char *poolname
, uint64_t guid
)
1208 struct stat64 statbuf
;
1209 nvlist_t
*raw
, *src
, *dst
;
1216 verify(poolname
== NULL
|| guid
== 0);
1218 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
1219 zfs_error_aux(hdl
, "%s", strerror(errno
));
1220 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1221 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
1225 if (fstat64(fd
, &statbuf
) != 0) {
1226 zfs_error_aux(hdl
, "%s", strerror(errno
));
1228 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1229 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
1233 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
1238 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
1241 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1242 dgettext(TEXT_DOMAIN
,
1243 "failed to read cache file contents"));
1249 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
1251 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1252 dgettext(TEXT_DOMAIN
,
1253 "invalid or corrupt cache file contents"));
1260 * Go through and get the current state of the pools and refresh their
1263 if (nvlist_alloc(&pools
, 0, 0) != 0) {
1264 (void) no_memory(hdl
);
1270 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
1271 verify(nvpair_value_nvlist(elem
, &src
) == 0);
1273 verify(nvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
,
1275 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
1278 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1281 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1283 if (guid
!= this_guid
)
1287 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
1296 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
1302 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
1303 (void) no_memory(hdl
);
1317 name_or_guid_exists(zpool_handle_t
*zhp
, void *data
)
1319 importargs_t
*import
= data
;
1322 if (import
->poolname
!= NULL
) {
1325 verify(nvlist_lookup_string(zhp
->zpool_config
,
1326 ZPOOL_CONFIG_POOL_NAME
, &pool_name
) == 0);
1327 if (strcmp(pool_name
, import
->poolname
) == 0)
1332 verify(nvlist_lookup_uint64(zhp
->zpool_config
,
1333 ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) == 0);
1334 if (pool_guid
== import
->guid
)
1343 zpool_search_import(libzfs_handle_t
*hdl
, importargs_t
*import
)
1345 verify(import
->poolname
== NULL
|| import
->guid
== 0);
1348 import
->exists
= zpool_iter(hdl
, name_or_guid_exists
, import
);
1350 if (import
->cachefile
!= NULL
)
1351 return (zpool_find_import_cached(hdl
, import
->cachefile
,
1352 import
->poolname
, import
->guid
));
1354 return (zpool_find_import_impl(hdl
, import
));
1358 find_guid(nvlist_t
*nv
, uint64_t guid
)
1364 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
1368 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1369 &child
, &children
) == 0) {
1370 for (c
= 0; c
< children
; c
++)
1371 if (find_guid(child
[c
], guid
))
1378 typedef struct aux_cbdata
{
1379 const char *cb_type
;
1381 zpool_handle_t
*cb_zhp
;
1385 find_aux(zpool_handle_t
*zhp
, void *data
)
1387 aux_cbdata_t
*cbp
= data
;
1393 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1396 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
1397 &list
, &count
) == 0) {
1398 for (i
= 0; i
< count
; i
++) {
1399 verify(nvlist_lookup_uint64(list
[i
],
1400 ZPOOL_CONFIG_GUID
, &guid
) == 0);
1401 if (guid
== cbp
->cb_guid
) {
1413 * Determines if the pool is in use. If so, it returns true and the state of
1414 * the pool as well as the name of the pool. Both strings are allocated and
1415 * must be freed by the caller.
1418 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
1424 uint64_t guid
, vdev_guid
;
1425 zpool_handle_t
*zhp
;
1426 nvlist_t
*pool_config
;
1427 uint64_t stateval
, isspare
;
1428 aux_cbdata_t cb
= { 0 };
1433 if (zpool_read_label(fd
, &config
) != 0) {
1434 (void) no_memory(hdl
);
1441 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
1443 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
1446 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
1447 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1449 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1454 case POOL_STATE_EXPORTED
:
1456 * A pool with an exported state may in fact be imported
1457 * read-only, so check the in-core state to see if it's
1458 * active and imported read-only. If it is, set
1459 * its state to active.
1461 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
1462 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1463 zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
1464 stateval
= POOL_STATE_ACTIVE
;
1469 case POOL_STATE_ACTIVE
:
1471 * For an active pool, we have to determine if it's really part
1472 * of a currently active pool (in which case the pool will exist
1473 * and the guid will be the same), or whether it's part of an
1474 * active pool that was disconnected without being explicitly
1477 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
1478 nvlist_free(config
);
1484 * Because the device may have been removed while
1485 * offlined, we only report it as active if the vdev is
1486 * still present in the config. Otherwise, pretend like
1489 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1490 (pool_config
= zpool_get_config(zhp
, NULL
))
1494 verify(nvlist_lookup_nvlist(pool_config
,
1495 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1496 ret
= find_guid(nvroot
, vdev_guid
);
1502 * If this is an active spare within another pool, we
1503 * treat it like an unused hot spare. This allows the
1504 * user to create a pool with a hot spare that currently
1505 * in use within another pool. Since we return B_TRUE,
1506 * libdiskmgt will continue to prevent generic consumers
1507 * from using the device.
1509 if (ret
&& nvlist_lookup_uint64(config
,
1510 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
1511 stateval
= POOL_STATE_SPARE
;
1516 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
1521 case POOL_STATE_SPARE
:
1523 * For a hot spare, it can be either definitively in use, or
1524 * potentially active. To determine if it's in use, we iterate
1525 * over all pools in the system and search for one with a spare
1526 * with a matching guid.
1528 * Due to the shared nature of spares, we don't actually report
1529 * the potentially active case as in use. This means the user
1530 * can freely create pools on the hot spares of exported pools,
1531 * but to do otherwise makes the resulting code complicated, and
1532 * we end up having to deal with this case anyway.
1535 cb
.cb_guid
= vdev_guid
;
1536 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
1537 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1538 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1545 case POOL_STATE_L2CACHE
:
1548 * Check if any pool is currently using this l2cache device.
1551 cb
.cb_guid
= vdev_guid
;
1552 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
1553 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1554 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1567 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
1569 zpool_close(cb
.cb_zhp
);
1570 nvlist_free(config
);
1573 *state
= (pool_state_t
)stateval
;
1577 zpool_close(cb
.cb_zhp
);
1579 nvlist_free(config
);