4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011 by Delphix. All rights reserved.
28 * Pool import support functions.
30 * To import a pool, we rely on reading the configuration information from the
31 * ZFS label of each device. If we successfully read the label, then we
32 * organize the configuration information in the following hierarchy:
34 * pool guid -> toplevel vdev guid -> label txg
36 * Duplicate entries matching this same tuple will be discarded. Once we have
37 * examined every device, we pick the best label txg config for each toplevel
38 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
39 * update any paths that have changed. Finally, we attempt to import the pool
40 * using our derived config, and record the results.
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
58 #include <sys/vdev_impl.h>
60 #include <blkid/blkid.h>
64 #include "libzfs_impl.h"
67 * Intermediate structures used to gather configuration information.
69 typedef struct config_entry
{
72 struct config_entry
*ce_next
;
75 typedef struct vdev_entry
{
77 config_entry_t
*ve_configs
;
78 struct vdev_entry
*ve_next
;
81 typedef struct pool_entry
{
83 vdev_entry_t
*pe_vdevs
;
84 struct pool_entry
*pe_next
;
87 typedef struct name_entry
{
90 struct name_entry
*ne_next
;
93 typedef struct pool_list
{
99 get_devid(const char *path
)
105 if ((fd
= open(path
, O_RDONLY
)) < 0)
110 if (devid_get(fd
, &devid
) == 0) {
111 if (devid_get_minor_name(fd
, &minor
) == 0)
112 ret
= devid_str_encode(devid
, minor
);
114 devid_str_free(minor
);
124 * Go through and fix up any path and/or devid information for the given vdev
128 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
133 name_entry_t
*ne
, *best
;
137 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
138 &child
, &children
) == 0) {
139 for (c
= 0; c
< children
; c
++)
140 if (fix_paths(child
[c
], names
) != 0)
146 * This is a leaf (file or disk) vdev. In either case, go through
147 * the name list and see if we find a matching guid. If so, replace
148 * the path and see if we can calculate a new devid.
150 * There may be multiple names associated with a particular guid, in
151 * which case we have overlapping slices or multiple paths to the same
152 * disk. If this is the case, then we want to pick the path that is
153 * the most similar to the original, where "most similar" is the number
154 * of matching characters starting from the end of the path. This will
155 * preserve slice numbers even if the disks have been reorganized, and
156 * will also catch preferred disk names if multiple paths exist.
158 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
159 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
164 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
165 if (ne
->ne_guid
== guid
) {
166 const char *src
, *dst
;
174 src
= ne
->ne_name
+ strlen(ne
->ne_name
) - 1;
175 dst
= path
+ strlen(path
) - 1;
176 for (count
= 0; src
>= ne
->ne_name
&& dst
>= path
;
177 src
--, dst
--, count
++)
182 * At this point, 'count' is the number of characters
183 * matched from the end.
185 if (count
> matched
|| best
== NULL
) {
195 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
198 if ((devid
= get_devid(best
->ne_name
)) == NULL
) {
199 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
201 if (nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, devid
) != 0)
203 devid_str_free(devid
);
210 * Add the given configuration to the list of known devices.
213 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
216 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
223 * If this is a hot spare not currently in use or level 2 cache
224 * device, add it to the list of names to translate, but don't do
227 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
229 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
230 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
231 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
234 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
238 ne
->ne_guid
= vdev_guid
;
239 ne
->ne_next
= pl
->names
;
245 * If we have a valid config but cannot read any of these fields, then
246 * it means we have a half-initialized label. In vdev_label_init()
247 * we write a label with txg == 0 so that we can identify the device
248 * in case the user refers to the same disk later on. If we fail to
249 * create the pool, we'll be left with a label in this state
250 * which should not be considered part of a valid pool.
252 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
254 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
256 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
258 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
259 &txg
) != 0 || txg
== 0) {
265 * First, see if we know about this pool. If not, then add it to the
266 * list of known pools.
268 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
269 if (pe
->pe_guid
== pool_guid
)
274 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
278 pe
->pe_guid
= pool_guid
;
279 pe
->pe_next
= pl
->pools
;
284 * Second, see if we know about this toplevel vdev. Add it if its
287 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
288 if (ve
->ve_guid
== top_guid
)
293 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
297 ve
->ve_guid
= top_guid
;
298 ve
->ve_next
= pe
->pe_vdevs
;
303 * Third, see if we have a config with a matching transaction group. If
304 * so, then we do nothing. Otherwise, add it to the list of known
307 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
308 if (ce
->ce_txg
== txg
)
313 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
318 ce
->ce_config
= config
;
319 ce
->ce_next
= ve
->ve_configs
;
326 * At this point we've successfully added our config to the list of
327 * known configs. The last thing to do is add the vdev guid -> path
328 * mappings so that we can fix up the configuration as necessary before
331 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
334 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
339 ne
->ne_guid
= vdev_guid
;
340 ne
->ne_next
= pl
->names
;
347 * Returns true if the named pool matches the given GUID.
350 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
356 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
364 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
369 *isactive
= (theguid
== guid
);
374 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
377 zfs_cmd_t zc
= { "\0", "\0", "\0", "\0", 0 };
380 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
383 if (zcmd_alloc_dst_nvlist(hdl
, &zc
,
384 zc
.zc_nvlist_conf_size
* 2) != 0) {
385 zcmd_free_nvlists(&zc
);
389 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
390 &zc
)) != 0 && errno
== ENOMEM
) {
391 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
392 zcmd_free_nvlists(&zc
);
398 zcmd_free_nvlists(&zc
);
402 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
403 zcmd_free_nvlists(&zc
);
407 zcmd_free_nvlists(&zc
);
412 * Determine if the vdev id is a hole in the namespace.
415 vdev_is_hole(uint64_t *hole_array
, uint_t holes
, uint_t id
)
419 for (c
= 0; c
< holes
; c
++) {
421 /* Top-level is a hole */
422 if (hole_array
[c
] == id
)
429 * Convert our list of pools into the definitive set of configurations. We
430 * start by picking the best config for each toplevel vdev. Once that's done,
431 * we assemble the toplevel vdevs into a full config for the pool. We make a
432 * pass to fix up any incorrect paths, and then add it to the main list to
433 * return to the user.
436 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
441 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
= NULL
, *nvtop
, *nvroot
;
442 nvlist_t
**spares
, **l2cache
;
443 uint_t i
, nspares
, nl2cache
;
444 boolean_t config_seen
;
446 char *name
, *hostname
, *comment
;
447 uint64_t version
, guid
;
449 nvlist_t
**child
= NULL
;
451 uint64_t *hole_array
, max_id
;
456 boolean_t found_one
= B_FALSE
;
457 boolean_t valid_top_config
= B_FALSE
;
459 if (nvlist_alloc(&ret
, 0, 0) != 0)
462 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
463 uint64_t id
, max_txg
= 0;
465 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
467 config_seen
= B_FALSE
;
470 * Iterate over all toplevel vdevs. Grab the pool configuration
471 * from the first one we find, and then go through the rest and
472 * add them as necessary to the 'vdevs' member of the config.
474 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
477 * Determine the best configuration for this vdev by
478 * selecting the config with the latest transaction
482 for (ce
= ve
->ve_configs
; ce
!= NULL
;
485 if (ce
->ce_txg
> best_txg
) {
487 best_txg
= ce
->ce_txg
;
492 * We rely on the fact that the max txg for the
493 * pool will contain the most up-to-date information
494 * about the valid top-levels in the vdev namespace.
496 if (best_txg
> max_txg
) {
497 (void) nvlist_remove(config
,
498 ZPOOL_CONFIG_VDEV_CHILDREN
,
500 (void) nvlist_remove(config
,
501 ZPOOL_CONFIG_HOLE_ARRAY
,
502 DATA_TYPE_UINT64_ARRAY
);
508 valid_top_config
= B_FALSE
;
510 if (nvlist_lookup_uint64(tmp
,
511 ZPOOL_CONFIG_VDEV_CHILDREN
, &max_id
) == 0) {
512 verify(nvlist_add_uint64(config
,
513 ZPOOL_CONFIG_VDEV_CHILDREN
,
515 valid_top_config
= B_TRUE
;
518 if (nvlist_lookup_uint64_array(tmp
,
519 ZPOOL_CONFIG_HOLE_ARRAY
, &hole_array
,
521 verify(nvlist_add_uint64_array(config
,
522 ZPOOL_CONFIG_HOLE_ARRAY
,
523 hole_array
, holes
) == 0);
529 * Copy the relevant pieces of data to the pool
535 * comment (if available)
537 * hostid (if available)
538 * hostname (if available)
542 verify(nvlist_lookup_uint64(tmp
,
543 ZPOOL_CONFIG_VERSION
, &version
) == 0);
544 if (nvlist_add_uint64(config
,
545 ZPOOL_CONFIG_VERSION
, version
) != 0)
547 verify(nvlist_lookup_uint64(tmp
,
548 ZPOOL_CONFIG_POOL_GUID
, &guid
) == 0);
549 if (nvlist_add_uint64(config
,
550 ZPOOL_CONFIG_POOL_GUID
, guid
) != 0)
552 verify(nvlist_lookup_string(tmp
,
553 ZPOOL_CONFIG_POOL_NAME
, &name
) == 0);
554 if (nvlist_add_string(config
,
555 ZPOOL_CONFIG_POOL_NAME
, name
) != 0)
559 * COMMENT is optional, don't bail if it's not
560 * there, instead, set it to NULL.
562 if (nvlist_lookup_string(tmp
,
563 ZPOOL_CONFIG_COMMENT
, &comment
) != 0)
565 else if (nvlist_add_string(config
,
566 ZPOOL_CONFIG_COMMENT
, comment
) != 0)
569 verify(nvlist_lookup_uint64(tmp
,
570 ZPOOL_CONFIG_POOL_STATE
, &state
) == 0);
571 if (nvlist_add_uint64(config
,
572 ZPOOL_CONFIG_POOL_STATE
, state
) != 0)
576 if (nvlist_lookup_uint64(tmp
,
577 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
578 if (nvlist_add_uint64(config
,
579 ZPOOL_CONFIG_HOSTID
, hostid
) != 0)
581 verify(nvlist_lookup_string(tmp
,
582 ZPOOL_CONFIG_HOSTNAME
,
584 if (nvlist_add_string(config
,
585 ZPOOL_CONFIG_HOSTNAME
,
590 config_seen
= B_TRUE
;
594 * Add this top-level vdev to the child array.
596 verify(nvlist_lookup_nvlist(tmp
,
597 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
598 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
601 if (id
>= children
) {
604 newchild
= zfs_alloc(hdl
, (id
+ 1) *
605 sizeof (nvlist_t
*));
606 if (newchild
== NULL
)
609 for (c
= 0; c
< children
; c
++)
610 newchild
[c
] = child
[c
];
616 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
622 * If we have information about all the top-levels then
623 * clean up the nvlist which we've constructed. This
624 * means removing any extraneous devices that are
625 * beyond the valid range or adding devices to the end
626 * of our array which appear to be missing.
628 if (valid_top_config
) {
629 if (max_id
< children
) {
630 for (c
= max_id
; c
< children
; c
++)
631 nvlist_free(child
[c
]);
633 } else if (max_id
> children
) {
636 newchild
= zfs_alloc(hdl
, (max_id
) *
637 sizeof (nvlist_t
*));
638 if (newchild
== NULL
)
641 for (c
= 0; c
< children
; c
++)
642 newchild
[c
] = child
[c
];
650 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
654 * The vdev namespace may contain holes as a result of
655 * device removal. We must add them back into the vdev
656 * tree before we process any missing devices.
659 ASSERT(valid_top_config
);
661 for (c
= 0; c
< children
; c
++) {
664 if (child
[c
] != NULL
||
665 !vdev_is_hole(hole_array
, holes
, c
))
668 if (nvlist_alloc(&holey
, NV_UNIQUE_NAME
,
673 * Holes in the namespace are treated as
674 * "hole" top-level vdevs and have a
675 * special flag set on them.
677 if (nvlist_add_string(holey
,
679 VDEV_TYPE_HOLE
) != 0 ||
680 nvlist_add_uint64(holey
,
681 ZPOOL_CONFIG_ID
, c
) != 0 ||
682 nvlist_add_uint64(holey
,
683 ZPOOL_CONFIG_GUID
, 0ULL) != 0)
690 * Look for any missing top-level vdevs. If this is the case,
691 * create a faked up 'missing' vdev as a placeholder. We cannot
692 * simply compress the child array, because the kernel performs
693 * certain checks to make sure the vdev IDs match their location
694 * in the configuration.
696 for (c
= 0; c
< children
; c
++) {
697 if (child
[c
] == NULL
) {
699 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
702 if (nvlist_add_string(missing
,
704 VDEV_TYPE_MISSING
) != 0 ||
705 nvlist_add_uint64(missing
,
706 ZPOOL_CONFIG_ID
, c
) != 0 ||
707 nvlist_add_uint64(missing
,
708 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
709 nvlist_free(missing
);
717 * Put all of this pool's top-level vdevs into a root vdev.
719 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
721 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
722 VDEV_TYPE_ROOT
) != 0 ||
723 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
724 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
725 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
726 child
, children
) != 0) {
731 for (c
= 0; c
< children
; c
++)
732 nvlist_free(child
[c
]);
738 * Go through and fix up any paths and/or devids based on our
739 * known list of vdev GUID -> path mappings.
741 if (fix_paths(nvroot
, pl
->names
) != 0) {
747 * Add the root vdev to this pool's configuration.
749 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
757 * zdb uses this path to report on active pools that were
758 * imported or created using -R.
764 * Determine if this pool is currently active, in which case we
765 * can't actually import it.
767 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
769 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
772 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
781 if ((nvl
= refresh_config(hdl
, config
)) == NULL
) {
791 * Go through and update the paths for spares, now that we have
794 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
796 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
797 &spares
, &nspares
) == 0) {
798 for (i
= 0; i
< nspares
; i
++) {
799 if (fix_paths(spares
[i
], pl
->names
) != 0)
805 * Update the paths for l2cache devices.
807 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
808 &l2cache
, &nl2cache
) == 0) {
809 for (i
= 0; i
< nl2cache
; i
++) {
810 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
816 * Restore the original information read from the actual label.
818 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
820 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
823 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
825 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
831 * Add this pool to the list of configs.
833 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
835 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
851 (void) no_memory(hdl
);
855 for (c
= 0; c
< children
; c
++)
856 nvlist_free(child
[c
]);
863 * Return the offset of the given label.
866 label_offset(uint64_t size
, int l
)
868 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
869 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
870 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
874 * Given a file descriptor, read the label information and return an nvlist
875 * describing the configuration, if there is one.
878 zpool_read_label(int fd
, nvlist_t
**config
)
880 struct stat64 statbuf
;
883 uint64_t state
, txg
, size
;
887 if (fstat64(fd
, &statbuf
) == -1)
889 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
891 if ((label
= malloc(sizeof (vdev_label_t
))) == NULL
)
894 for (l
= 0; l
< VDEV_LABELS
; l
++) {
895 if (pread64(fd
, label
, sizeof (vdev_label_t
),
896 label_offset(size
, l
)) != sizeof (vdev_label_t
))
899 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
900 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
903 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
904 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
905 nvlist_free(*config
);
909 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
910 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
911 &txg
) != 0 || txg
== 0)) {
912 nvlist_free(*config
);
927 * Use libblkid to quickly search for zfs devices
930 zpool_find_import_blkid(libzfs_handle_t
*hdl
, pool_list_t
*pools
)
933 blkid_dev_iterate iter
;
939 err
= blkid_get_cache(&cache
, NULL
);
941 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
942 dgettext(TEXT_DOMAIN
, "blkid_get_cache() %d"), err
);
946 err
= blkid_probe_all(cache
);
948 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
949 dgettext(TEXT_DOMAIN
, "blkid_probe_all() %d"), err
);
953 iter
= blkid_dev_iterate_begin(cache
);
955 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
956 dgettext(TEXT_DOMAIN
, "blkid_dev_iterate_begin()"));
960 err
= blkid_dev_set_search(iter
, "TYPE", "zfs");
962 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
963 dgettext(TEXT_DOMAIN
, "blkid_dev_set_search() %d"), err
);
967 while (blkid_dev_next(iter
, &dev
) == 0) {
968 devname
= blkid_dev_devname(dev
);
969 if ((fd
= open64(devname
, O_RDONLY
)) < 0)
972 err
= zpool_read_label(fd
, &config
);
976 (void) no_memory(hdl
);
980 if (config
!= NULL
) {
981 err
= add_config(hdl
, pools
, devname
, config
);
988 blkid_dev_iterate_end(iter
);
990 blkid_put_cache(cache
);
994 #endif /* HAVE_LIBBLKID */
997 * Given a list of directories to search, find all pools stored on disk. This
998 * includes partial pools which are not available to import. If no args are
999 * given (argc is 0), then the default directory (/dev/dsk) is searched.
1000 * poolname or guid (but not both) are provided by the caller when trying
1001 * to import a specific pool.
1004 zpool_find_import_impl(libzfs_handle_t
*hdl
, importargs_t
*iarg
)
1006 int i
, dirs
= iarg
->paths
;
1008 struct dirent64
*dp
;
1009 char path
[MAXPATHLEN
];
1010 char *end
, **dir
= iarg
->path
;
1012 struct stat64 statbuf
;
1013 nvlist_t
*ret
= NULL
, *config
;
1014 static char *default_dir
= DISK_ROOT
;
1016 pool_list_t pools
= { 0 };
1017 pool_entry_t
*pe
, *penext
;
1018 vdev_entry_t
*ve
, *venext
;
1019 config_entry_t
*ce
, *cenext
;
1020 name_entry_t
*ne
, *nenext
;
1022 verify(iarg
->poolname
== NULL
|| iarg
->guid
== 0);
1025 #ifdef HAVE_LIBBLKID
1026 /* Use libblkid to scan all device for their type */
1027 if (zpool_find_import_blkid(hdl
, &pools
) == 0)
1030 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1031 dgettext(TEXT_DOMAIN
, "blkid failure falling back "
1032 "to manual probing"));
1033 #endif /* HAVE_LIBBLKID */
1039 * Go through and read the label configuration information from every
1040 * possible device, organizing the information according to pool GUID
1041 * and toplevel GUID.
1043 for (i
= 0; i
< dirs
; i
++) {
1047 /* use realpath to normalize the path */
1048 if (realpath(dir
[i
], path
) == 0) {
1049 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1050 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), dir
[i
]);
1053 end
= &path
[strlen(path
)];
1056 pathleft
= &path
[sizeof (path
)] - end
;
1059 * Using raw devices instead of block devices when we're
1060 * reading the labels skips a bunch of slow operations during
1061 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1063 if (strcmp(path
, "/dev/dsk/") == 0)
1064 rdsk
= "/dev/rdsk/";
1068 if ((dfd
= open64(rdsk
, O_RDONLY
)) < 0 ||
1069 (dirp
= fdopendir(dfd
)) == NULL
) {
1070 zfs_error_aux(hdl
, strerror(errno
));
1071 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1072 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1078 * This is not MT-safe, but we have no MT consumers of libzfs
1080 while ((dp
= readdir64(dirp
)) != NULL
) {
1081 const char *name
= dp
->d_name
;
1082 if (name
[0] == '.' &&
1083 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
1087 * Skip checking devices with well known prefixes:
1088 * watchdog - A special close is required to avoid
1089 * triggering it and resetting the system.
1090 * fuse - Fuse control device.
1091 * ppp - Generic PPP driver.
1092 * tty* - Generic serial interface.
1093 * vcs* - Virtual console memory.
1094 * parport* - Parallel port interface.
1095 * lp* - Printer interface.
1096 * fd* - Floppy interface.
1097 * hpet - High Precision Event Timer, crashes qemu
1098 * when accessed from a virtual machine.
1099 * core - Symlink to /proc/kcore, causes a crash
1100 * when access from Xen dom0.
1102 if ((strncmp(name
, "watchdog", 8) == 0) ||
1103 (strncmp(name
, "fuse", 4) == 0) ||
1104 (strncmp(name
, "ppp", 3) == 0) ||
1105 (strncmp(name
, "tty", 3) == 0) ||
1106 (strncmp(name
, "vcs", 3) == 0) ||
1107 (strncmp(name
, "parport", 7) == 0) ||
1108 (strncmp(name
, "lp", 2) == 0) ||
1109 (strncmp(name
, "fd", 2) == 0) ||
1110 (strncmp(name
, "hpet", 4) == 0) ||
1111 (strncmp(name
, "core", 4) == 0))
1115 * Ignore failed stats. We only want regular
1116 * files and block devices.
1118 if ((fstatat64(dfd
, name
, &statbuf
, 0) != 0) ||
1119 (!S_ISREG(statbuf
.st_mode
) &&
1120 !S_ISBLK(statbuf
.st_mode
)))
1123 if ((fd
= openat64(dfd
, name
, O_RDONLY
)) < 0)
1126 if ((zpool_read_label(fd
, &config
)) != 0) {
1128 (void) no_memory(hdl
);
1134 if (config
!= NULL
) {
1135 boolean_t matched
= B_TRUE
;
1137 if (iarg
->poolname
!= NULL
) {
1140 matched
= nvlist_lookup_string(config
,
1141 ZPOOL_CONFIG_POOL_NAME
,
1143 strcmp(iarg
->poolname
, pname
) == 0;
1144 } else if (iarg
->guid
!= 0) {
1147 matched
= nvlist_lookup_uint64(config
,
1148 ZPOOL_CONFIG_POOL_GUID
,
1150 iarg
->guid
== this_guid
;
1153 nvlist_free(config
);
1157 /* use the non-raw path for the config */
1158 (void) strlcpy(end
, name
, pathleft
);
1159 if (add_config(hdl
, &pools
, path
, config
) != 0)
1164 (void) closedir(dirp
);
1168 #ifdef HAVE_LIBBLKID
1171 ret
= get_configs(hdl
, &pools
, iarg
->can_be_active
);
1174 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
1175 penext
= pe
->pe_next
;
1176 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
1177 venext
= ve
->ve_next
;
1178 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
1179 cenext
= ce
->ce_next
;
1181 nvlist_free(ce
->ce_config
);
1189 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
1190 nenext
= ne
->ne_next
;
1197 (void) closedir(dirp
);
1203 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
1205 importargs_t iarg
= { 0 };
1210 return (zpool_find_import_impl(hdl
, &iarg
));
1214 * Given a cache file, return the contents as a list of importable pools.
1215 * poolname or guid (but not both) are provided by the caller when trying
1216 * to import a specific pool.
1219 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
1220 char *poolname
, uint64_t guid
)
1224 struct stat64 statbuf
;
1225 nvlist_t
*raw
, *src
, *dst
;
1232 verify(poolname
== NULL
|| guid
== 0);
1234 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
1235 zfs_error_aux(hdl
, "%s", strerror(errno
));
1236 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1237 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
1241 if (fstat64(fd
, &statbuf
) != 0) {
1242 zfs_error_aux(hdl
, "%s", strerror(errno
));
1244 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1245 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
1249 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
1254 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
1257 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1258 dgettext(TEXT_DOMAIN
,
1259 "failed to read cache file contents"));
1265 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
1267 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1268 dgettext(TEXT_DOMAIN
,
1269 "invalid or corrupt cache file contents"));
1276 * Go through and get the current state of the pools and refresh their
1279 if (nvlist_alloc(&pools
, 0, 0) != 0) {
1280 (void) no_memory(hdl
);
1286 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
1287 verify(nvpair_value_nvlist(elem
, &src
) == 0);
1289 verify(nvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
,
1291 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
1294 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1297 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1299 if (guid
!= this_guid
)
1303 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
1312 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
1318 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
1319 (void) no_memory(hdl
);
1333 name_or_guid_exists(zpool_handle_t
*zhp
, void *data
)
1335 importargs_t
*import
= data
;
1338 if (import
->poolname
!= NULL
) {
1341 verify(nvlist_lookup_string(zhp
->zpool_config
,
1342 ZPOOL_CONFIG_POOL_NAME
, &pool_name
) == 0);
1343 if (strcmp(pool_name
, import
->poolname
) == 0)
1348 verify(nvlist_lookup_uint64(zhp
->zpool_config
,
1349 ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) == 0);
1350 if (pool_guid
== import
->guid
)
1359 zpool_search_import(libzfs_handle_t
*hdl
, importargs_t
*import
)
1361 verify(import
->poolname
== NULL
|| import
->guid
== 0);
1364 import
->exists
= zpool_iter(hdl
, name_or_guid_exists
, import
);
1366 if (import
->cachefile
!= NULL
)
1367 return (zpool_find_import_cached(hdl
, import
->cachefile
,
1368 import
->poolname
, import
->guid
));
1370 return (zpool_find_import_impl(hdl
, import
));
1374 find_guid(nvlist_t
*nv
, uint64_t guid
)
1380 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
1384 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1385 &child
, &children
) == 0) {
1386 for (c
= 0; c
< children
; c
++)
1387 if (find_guid(child
[c
], guid
))
1394 typedef struct aux_cbdata
{
1395 const char *cb_type
;
1397 zpool_handle_t
*cb_zhp
;
1401 find_aux(zpool_handle_t
*zhp
, void *data
)
1403 aux_cbdata_t
*cbp
= data
;
1409 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1412 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
1413 &list
, &count
) == 0) {
1414 for (i
= 0; i
< count
; i
++) {
1415 verify(nvlist_lookup_uint64(list
[i
],
1416 ZPOOL_CONFIG_GUID
, &guid
) == 0);
1417 if (guid
== cbp
->cb_guid
) {
1429 * Determines if the pool is in use. If so, it returns true and the state of
1430 * the pool as well as the name of the pool. Both strings are allocated and
1431 * must be freed by the caller.
1434 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
1440 uint64_t guid
, vdev_guid
;
1441 zpool_handle_t
*zhp
;
1442 nvlist_t
*pool_config
;
1443 uint64_t stateval
, isspare
;
1444 aux_cbdata_t cb
= { 0 };
1449 if (zpool_read_label(fd
, &config
) != 0) {
1450 (void) no_memory(hdl
);
1457 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
1459 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
1462 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
1463 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1465 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1470 case POOL_STATE_EXPORTED
:
1472 * A pool with an exported state may in fact be imported
1473 * read-only, so check the in-core state to see if it's
1474 * active and imported read-only. If it is, set
1475 * its state to active.
1477 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
1478 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1479 zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
1480 stateval
= POOL_STATE_ACTIVE
;
1485 case POOL_STATE_ACTIVE
:
1487 * For an active pool, we have to determine if it's really part
1488 * of a currently active pool (in which case the pool will exist
1489 * and the guid will be the same), or whether it's part of an
1490 * active pool that was disconnected without being explicitly
1493 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
1494 nvlist_free(config
);
1500 * Because the device may have been removed while
1501 * offlined, we only report it as active if the vdev is
1502 * still present in the config. Otherwise, pretend like
1505 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1506 (pool_config
= zpool_get_config(zhp
, NULL
))
1510 verify(nvlist_lookup_nvlist(pool_config
,
1511 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1512 ret
= find_guid(nvroot
, vdev_guid
);
1518 * If this is an active spare within another pool, we
1519 * treat it like an unused hot spare. This allows the
1520 * user to create a pool with a hot spare that currently
1521 * in use within another pool. Since we return B_TRUE,
1522 * libdiskmgt will continue to prevent generic consumers
1523 * from using the device.
1525 if (ret
&& nvlist_lookup_uint64(config
,
1526 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
1527 stateval
= POOL_STATE_SPARE
;
1532 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
1537 case POOL_STATE_SPARE
:
1539 * For a hot spare, it can be either definitively in use, or
1540 * potentially active. To determine if it's in use, we iterate
1541 * over all pools in the system and search for one with a spare
1542 * with a matching guid.
1544 * Due to the shared nature of spares, we don't actually report
1545 * the potentially active case as in use. This means the user
1546 * can freely create pools on the hot spares of exported pools,
1547 * but to do otherwise makes the resulting code complicated, and
1548 * we end up having to deal with this case anyway.
1551 cb
.cb_guid
= vdev_guid
;
1552 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
1553 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1554 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1561 case POOL_STATE_L2CACHE
:
1564 * Check if any pool is currently using this l2cache device.
1567 cb
.cb_guid
= vdev_guid
;
1568 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
1569 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1570 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1583 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
1585 zpool_close(cb
.cb_zhp
);
1586 nvlist_free(config
);
1589 *state
= (pool_state_t
)stateval
;
1593 zpool_close(cb
.cb_zhp
);
1595 nvlist_free(config
);