4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * Pool import support functions.
30 * To import a pool, we rely on reading the configuration information from the
31 * ZFS label of each device. If we successfully read the label, then we
32 * organize the configuration information in the following hierarchy:
34 * pool guid -> toplevel vdev guid -> label txg
36 * Duplicate entries matching this same tuple will be discarded. Once we have
37 * examined every device, we pick the best label txg config for each toplevel
38 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
39 * update any paths that have changed. Finally, we attempt to import the pool
40 * using our derived config, and record the results.
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
58 #include <sys/vdev_impl.h>
60 #include <blkid/blkid.h>
64 #include "libzfs_impl.h"
67 * Intermediate structures used to gather configuration information.
69 typedef struct config_entry
{
72 struct config_entry
*ce_next
;
75 typedef struct vdev_entry
{
77 config_entry_t
*ve_configs
;
78 struct vdev_entry
*ve_next
;
81 typedef struct pool_entry
{
83 vdev_entry_t
*pe_vdevs
;
84 struct pool_entry
*pe_next
;
87 typedef struct name_entry
{
91 uint64_t ne_num_labels
;
92 struct name_entry
*ne_next
;
95 typedef struct pool_list
{
101 get_devid(const char *path
)
107 if ((fd
= open(path
, O_RDONLY
)) < 0)
112 if (devid_get(fd
, &devid
) == 0) {
113 if (devid_get_minor_name(fd
, &minor
) == 0)
114 ret
= devid_str_encode(devid
, minor
);
116 devid_str_free(minor
);
126 * Go through and fix up any path and/or devid information for the given vdev
130 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
135 name_entry_t
*ne
, *best
;
138 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
139 &child
, &children
) == 0) {
140 for (c
= 0; c
< children
; c
++)
141 if (fix_paths(child
[c
], names
) != 0)
147 * This is a leaf (file or disk) vdev. In either case, go through
148 * the name list and see if we find a matching guid. If so, replace
149 * the path and see if we can calculate a new devid.
151 * There may be multiple names associated with a particular guid, in
152 * which case we have overlapping partitions or multiple paths to the
153 * same disk. In this case we prefer to use the path name which
154 * matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
155 * use the lowest order device which corresponds to the first match
156 * while traversing the ZPOOL_IMPORT_PATH search path.
158 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
159 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
163 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
164 if (ne
->ne_guid
== guid
) {
171 if ((strlen(path
) == strlen(ne
->ne_name
)) &&
172 strncmp(path
, ne
->ne_name
, strlen(path
)) == 0) {
182 /* Prefer paths with move vdev labels. */
183 if (ne
->ne_num_labels
> best
->ne_num_labels
) {
188 /* Prefer paths earlier in the search order. */
189 if (best
->ne_num_labels
== best
->ne_num_labels
&&
190 ne
->ne_order
< best
->ne_order
) {
200 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
203 if ((devid
= get_devid(best
->ne_name
)) == NULL
) {
204 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
206 if (nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, devid
) != 0)
208 devid_str_free(devid
);
215 * Add the given configuration to the list of known devices.
218 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
219 int order
, int num_labels
, nvlist_t
*config
)
221 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
228 * If this is a hot spare not currently in use or level 2 cache
229 * device, add it to the list of names to translate, but don't do
232 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
234 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
235 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
236 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
239 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
243 ne
->ne_guid
= vdev_guid
;
244 ne
->ne_order
= order
;
245 ne
->ne_num_labels
= num_labels
;
246 ne
->ne_next
= pl
->names
;
252 * If we have a valid config but cannot read any of these fields, then
253 * it means we have a half-initialized label. In vdev_label_init()
254 * we write a label with txg == 0 so that we can identify the device
255 * in case the user refers to the same disk later on. If we fail to
256 * create the pool, we'll be left with a label in this state
257 * which should not be considered part of a valid pool.
259 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
261 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
263 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
265 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
266 &txg
) != 0 || txg
== 0) {
272 * First, see if we know about this pool. If not, then add it to the
273 * list of known pools.
275 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
276 if (pe
->pe_guid
== pool_guid
)
281 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
285 pe
->pe_guid
= pool_guid
;
286 pe
->pe_next
= pl
->pools
;
291 * Second, see if we know about this toplevel vdev. Add it if its
294 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
295 if (ve
->ve_guid
== top_guid
)
300 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
304 ve
->ve_guid
= top_guid
;
305 ve
->ve_next
= pe
->pe_vdevs
;
310 * Third, see if we have a config with a matching transaction group. If
311 * so, then we do nothing. Otherwise, add it to the list of known
314 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
315 if (ce
->ce_txg
== txg
)
320 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
325 ce
->ce_config
= config
;
326 ce
->ce_next
= ve
->ve_configs
;
333 * At this point we've successfully added our config to the list of
334 * known configs. The last thing to do is add the vdev guid -> path
335 * mappings so that we can fix up the configuration as necessary before
338 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
341 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
346 ne
->ne_guid
= vdev_guid
;
347 ne
->ne_order
= order
;
348 ne
->ne_num_labels
= num_labels
;
349 ne
->ne_next
= pl
->names
;
356 * Returns true if the named pool matches the given GUID.
359 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
365 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
373 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
378 *isactive
= (theguid
== guid
);
383 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
386 zfs_cmd_t zc
= {"\0"};
389 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
392 if (zcmd_alloc_dst_nvlist(hdl
, &zc
,
393 zc
.zc_nvlist_conf_size
* 2) != 0) {
394 zcmd_free_nvlists(&zc
);
398 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
399 &zc
)) != 0 && errno
== ENOMEM
) {
400 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
401 zcmd_free_nvlists(&zc
);
407 zcmd_free_nvlists(&zc
);
411 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
412 zcmd_free_nvlists(&zc
);
416 zcmd_free_nvlists(&zc
);
421 * Determine if the vdev id is a hole in the namespace.
424 vdev_is_hole(uint64_t *hole_array
, uint_t holes
, uint_t id
)
428 for (c
= 0; c
< holes
; c
++) {
430 /* Top-level is a hole */
431 if (hole_array
[c
] == id
)
438 * Convert our list of pools into the definitive set of configurations. We
439 * start by picking the best config for each toplevel vdev. Once that's done,
440 * we assemble the toplevel vdevs into a full config for the pool. We make a
441 * pass to fix up any incorrect paths, and then add it to the main list to
442 * return to the user.
445 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
450 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
= NULL
, *nvtop
, *nvroot
;
451 nvlist_t
**spares
, **l2cache
;
452 uint_t i
, nspares
, nl2cache
;
453 boolean_t config_seen
;
455 char *name
, *hostname
= NULL
;
458 nvlist_t
**child
= NULL
;
460 uint64_t *hole_array
, max_id
;
465 boolean_t valid_top_config
= B_FALSE
;
467 if (nvlist_alloc(&ret
, 0, 0) != 0)
470 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
471 uint64_t id
, max_txg
= 0;
473 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
475 config_seen
= B_FALSE
;
478 * Iterate over all toplevel vdevs. Grab the pool configuration
479 * from the first one we find, and then go through the rest and
480 * add them as necessary to the 'vdevs' member of the config.
482 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
485 * Determine the best configuration for this vdev by
486 * selecting the config with the latest transaction
490 for (ce
= ve
->ve_configs
; ce
!= NULL
;
493 if (ce
->ce_txg
> best_txg
) {
495 best_txg
= ce
->ce_txg
;
500 * We rely on the fact that the max txg for the
501 * pool will contain the most up-to-date information
502 * about the valid top-levels in the vdev namespace.
504 if (best_txg
> max_txg
) {
505 (void) nvlist_remove(config
,
506 ZPOOL_CONFIG_VDEV_CHILDREN
,
508 (void) nvlist_remove(config
,
509 ZPOOL_CONFIG_HOLE_ARRAY
,
510 DATA_TYPE_UINT64_ARRAY
);
516 valid_top_config
= B_FALSE
;
518 if (nvlist_lookup_uint64(tmp
,
519 ZPOOL_CONFIG_VDEV_CHILDREN
, &max_id
) == 0) {
520 verify(nvlist_add_uint64(config
,
521 ZPOOL_CONFIG_VDEV_CHILDREN
,
523 valid_top_config
= B_TRUE
;
526 if (nvlist_lookup_uint64_array(tmp
,
527 ZPOOL_CONFIG_HOLE_ARRAY
, &hole_array
,
529 verify(nvlist_add_uint64_array(config
,
530 ZPOOL_CONFIG_HOLE_ARRAY
,
531 hole_array
, holes
) == 0);
537 * Copy the relevant pieces of data to the pool
543 * comment (if available)
545 * hostid (if available)
546 * hostname (if available)
548 uint64_t state
, version
;
549 char *comment
= NULL
;
551 version
= fnvlist_lookup_uint64(tmp
,
552 ZPOOL_CONFIG_VERSION
);
553 fnvlist_add_uint64(config
,
554 ZPOOL_CONFIG_VERSION
, version
);
555 guid
= fnvlist_lookup_uint64(tmp
,
556 ZPOOL_CONFIG_POOL_GUID
);
557 fnvlist_add_uint64(config
,
558 ZPOOL_CONFIG_POOL_GUID
, guid
);
559 name
= fnvlist_lookup_string(tmp
,
560 ZPOOL_CONFIG_POOL_NAME
);
561 fnvlist_add_string(config
,
562 ZPOOL_CONFIG_POOL_NAME
, name
);
564 if (nvlist_lookup_string(tmp
,
565 ZPOOL_CONFIG_COMMENT
, &comment
) == 0)
566 fnvlist_add_string(config
,
567 ZPOOL_CONFIG_COMMENT
, comment
);
569 state
= fnvlist_lookup_uint64(tmp
,
570 ZPOOL_CONFIG_POOL_STATE
);
571 fnvlist_add_uint64(config
,
572 ZPOOL_CONFIG_POOL_STATE
, state
);
575 if (nvlist_lookup_uint64(tmp
,
576 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
577 fnvlist_add_uint64(config
,
578 ZPOOL_CONFIG_HOSTID
, hostid
);
579 hostname
= fnvlist_lookup_string(tmp
,
580 ZPOOL_CONFIG_HOSTNAME
);
581 fnvlist_add_string(config
,
582 ZPOOL_CONFIG_HOSTNAME
, hostname
);
585 config_seen
= B_TRUE
;
589 * Add this top-level vdev to the child array.
591 verify(nvlist_lookup_nvlist(tmp
,
592 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
593 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
596 if (id
>= children
) {
599 newchild
= zfs_alloc(hdl
, (id
+ 1) *
600 sizeof (nvlist_t
*));
601 if (newchild
== NULL
)
604 for (c
= 0; c
< children
; c
++)
605 newchild
[c
] = child
[c
];
611 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
617 * If we have information about all the top-levels then
618 * clean up the nvlist which we've constructed. This
619 * means removing any extraneous devices that are
620 * beyond the valid range or adding devices to the end
621 * of our array which appear to be missing.
623 if (valid_top_config
) {
624 if (max_id
< children
) {
625 for (c
= max_id
; c
< children
; c
++)
626 nvlist_free(child
[c
]);
628 } else if (max_id
> children
) {
631 newchild
= zfs_alloc(hdl
, (max_id
) *
632 sizeof (nvlist_t
*));
633 if (newchild
== NULL
)
636 for (c
= 0; c
< children
; c
++)
637 newchild
[c
] = child
[c
];
645 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
649 * The vdev namespace may contain holes as a result of
650 * device removal. We must add them back into the vdev
651 * tree before we process any missing devices.
654 ASSERT(valid_top_config
);
656 for (c
= 0; c
< children
; c
++) {
659 if (child
[c
] != NULL
||
660 !vdev_is_hole(hole_array
, holes
, c
))
663 if (nvlist_alloc(&holey
, NV_UNIQUE_NAME
,
668 * Holes in the namespace are treated as
669 * "hole" top-level vdevs and have a
670 * special flag set on them.
672 if (nvlist_add_string(holey
,
674 VDEV_TYPE_HOLE
) != 0 ||
675 nvlist_add_uint64(holey
,
676 ZPOOL_CONFIG_ID
, c
) != 0 ||
677 nvlist_add_uint64(holey
,
678 ZPOOL_CONFIG_GUID
, 0ULL) != 0)
685 * Look for any missing top-level vdevs. If this is the case,
686 * create a faked up 'missing' vdev as a placeholder. We cannot
687 * simply compress the child array, because the kernel performs
688 * certain checks to make sure the vdev IDs match their location
689 * in the configuration.
691 for (c
= 0; c
< children
; c
++) {
692 if (child
[c
] == NULL
) {
694 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
697 if (nvlist_add_string(missing
,
699 VDEV_TYPE_MISSING
) != 0 ||
700 nvlist_add_uint64(missing
,
701 ZPOOL_CONFIG_ID
, c
) != 0 ||
702 nvlist_add_uint64(missing
,
703 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
704 nvlist_free(missing
);
712 * Put all of this pool's top-level vdevs into a root vdev.
714 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
716 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
717 VDEV_TYPE_ROOT
) != 0 ||
718 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
719 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
720 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
721 child
, children
) != 0) {
726 for (c
= 0; c
< children
; c
++)
727 nvlist_free(child
[c
]);
733 * Go through and fix up any paths and/or devids based on our
734 * known list of vdev GUID -> path mappings.
736 if (fix_paths(nvroot
, pl
->names
) != 0) {
742 * Add the root vdev to this pool's configuration.
744 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
752 * zdb uses this path to report on active pools that were
753 * imported or created using -R.
759 * Determine if this pool is currently active, in which case we
760 * can't actually import it.
762 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
764 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
767 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
776 if ((nvl
= refresh_config(hdl
, config
)) == NULL
) {
786 * Go through and update the paths for spares, now that we have
789 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
791 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
792 &spares
, &nspares
) == 0) {
793 for (i
= 0; i
< nspares
; i
++) {
794 if (fix_paths(spares
[i
], pl
->names
) != 0)
800 * Update the paths for l2cache devices.
802 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
803 &l2cache
, &nl2cache
) == 0) {
804 for (i
= 0; i
< nl2cache
; i
++) {
805 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
811 * Restore the original information read from the actual label.
813 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
815 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
818 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
820 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
826 * Add this pool to the list of configs.
828 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
830 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
840 (void) no_memory(hdl
);
844 for (c
= 0; c
< children
; c
++)
845 nvlist_free(child
[c
]);
852 * Return the offset of the given label.
855 label_offset(uint64_t size
, int l
)
857 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
858 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
859 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
863 * Given a file descriptor, read the label information and return an nvlist
864 * describing the configuration, if there is one. The number of valid
865 * labels found will be returned in num_labels when non-NULL.
868 zpool_read_label(int fd
, nvlist_t
**config
, int *num_labels
)
870 struct stat64 statbuf
;
873 nvlist_t
*expected_config
= NULL
;
874 uint64_t expected_guid
= 0, size
;
878 if (fstat64_blk(fd
, &statbuf
) == -1)
880 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
882 if ((label
= malloc(sizeof (vdev_label_t
))) == NULL
)
885 for (l
= 0; l
< VDEV_LABELS
; l
++) {
886 uint64_t state
, guid
, txg
;
888 if (pread64(fd
, label
, sizeof (vdev_label_t
),
889 label_offset(size
, l
)) != sizeof (vdev_label_t
))
892 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
893 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
896 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_GUID
,
897 &guid
) != 0 || guid
== 0) {
898 nvlist_free(*config
);
902 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
903 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
904 nvlist_free(*config
);
908 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
909 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
910 &txg
) != 0 || txg
== 0)) {
911 nvlist_free(*config
);
916 if (expected_guid
== guid
)
919 nvlist_free(*config
);
921 expected_config
= *config
;
922 expected_guid
= guid
;
927 if (num_labels
!= NULL
)
931 *config
= expected_config
;
937 * Given a file descriptor, clear (zero) the label information. This function
938 * is used in the appliance stack as part of the ZFS sysevent module and
939 * to implement the "zpool labelclear" command.
942 zpool_clear_label(int fd
)
944 struct stat64 statbuf
;
949 if (fstat64_blk(fd
, &statbuf
) == -1)
951 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
953 if ((label
= calloc(sizeof (vdev_label_t
), 1)) == NULL
)
956 for (l
= 0; l
< VDEV_LABELS
; l
++) {
957 if (pwrite64(fd
, label
, sizeof (vdev_label_t
),
958 label_offset(size
, l
)) != sizeof (vdev_label_t
)) {
970 * Use libblkid to quickly search for zfs devices
973 zpool_find_import_blkid(libzfs_handle_t
*hdl
, pool_list_t
*pools
)
976 blkid_dev_iterate iter
;
980 int fd
, err
, num_labels
;
982 err
= blkid_get_cache(&cache
, NULL
);
984 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
985 dgettext(TEXT_DOMAIN
, "blkid_get_cache() %d"), err
);
989 err
= blkid_probe_all(cache
);
991 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
992 dgettext(TEXT_DOMAIN
, "blkid_probe_all() %d"), err
);
996 iter
= blkid_dev_iterate_begin(cache
);
998 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
999 dgettext(TEXT_DOMAIN
, "blkid_dev_iterate_begin()"));
1003 err
= blkid_dev_set_search(iter
, "TYPE", "zfs_member");
1005 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1006 dgettext(TEXT_DOMAIN
, "blkid_dev_set_search() %d"), err
);
1010 while (blkid_dev_next(iter
, &dev
) == 0) {
1011 devname
= blkid_dev_devname(dev
);
1012 if ((fd
= open64(devname
, O_RDONLY
)) < 0)
1015 err
= zpool_read_label(fd
, &config
, &num_labels
);
1019 (void) no_memory(hdl
);
1023 if (config
!= NULL
) {
1024 err
= add_config(hdl
, pools
, devname
, 0,
1025 num_labels
, config
);
1032 blkid_dev_iterate_end(iter
);
1034 blkid_put_cache(cache
);
1038 #endif /* HAVE_LIBBLKID */
1041 zpool_default_import_path
[DEFAULT_IMPORT_PATH_SIZE
] = {
1042 "/dev/disk/by-vdev", /* Custom rules, use first if they exist */
1043 "/dev/mapper", /* Use multipath devices before components */
1044 "/dev/disk/by-uuid", /* Single unique entry and persistent */
1045 "/dev/disk/by-id", /* May be multiple entries and persistent */
1046 "/dev/disk/by-path", /* Encodes physical location and persistent */
1047 "/dev/disk/by-label", /* Custom persistent labels */
1048 "/dev" /* UNSAFE device names will change */
1052 * Given a list of directories to search, find all pools stored on disk. This
1053 * includes partial pools which are not available to import. If no args are
1054 * given (argc is 0), then the default directory (/dev/dsk) is searched.
1055 * poolname or guid (but not both) are provided by the caller when trying
1056 * to import a specific pool.
1059 zpool_find_import_impl(libzfs_handle_t
*hdl
, importargs_t
*iarg
)
1061 int i
, num_labels
, dirs
= iarg
->paths
;
1063 struct dirent64
*dp
;
1064 char path
[MAXPATHLEN
];
1065 char *end
, **dir
= iarg
->path
;
1067 struct stat64 statbuf
;
1068 nvlist_t
*ret
= NULL
, *config
;
1070 pool_list_t pools
= { 0 };
1071 pool_entry_t
*pe
, *penext
;
1072 vdev_entry_t
*ve
, *venext
;
1073 config_entry_t
*ce
, *cenext
;
1074 name_entry_t
*ne
, *nenext
;
1076 verify(iarg
->poolname
== NULL
|| iarg
->guid
== 0);
1079 #ifdef HAVE_LIBBLKID
1080 /* Use libblkid to scan all device for their type */
1081 if (zpool_find_import_blkid(hdl
, &pools
) == 0)
1084 (void) zfs_error_fmt(hdl
, EZFS_BADCACHE
,
1085 dgettext(TEXT_DOMAIN
, "blkid failure falling back "
1086 "to manual probing"));
1087 #endif /* HAVE_LIBBLKID */
1089 dir
= zpool_default_import_path
;
1090 dirs
= DEFAULT_IMPORT_PATH_SIZE
;
1094 * Go through and read the label configuration information from every
1095 * possible device, organizing the information according to pool GUID
1096 * and toplevel GUID.
1098 for (i
= 0; i
< dirs
; i
++) {
1102 /* use realpath to normalize the path */
1103 if (realpath(dir
[i
], path
) == 0) {
1105 /* it is safe to skip missing search paths */
1106 if (errno
== ENOENT
)
1109 zfs_error_aux(hdl
, strerror(errno
));
1110 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1111 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), dir
[i
]);
1114 end
= &path
[strlen(path
)];
1117 pathleft
= &path
[sizeof (path
)] - end
;
1120 * Using raw devices instead of block devices when we're
1121 * reading the labels skips a bunch of slow operations during
1122 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1124 if (strcmp(path
, "/dev/dsk/") == 0)
1125 rdsk
= "/dev/rdsk/";
1129 if ((dfd
= open64(rdsk
, O_RDONLY
)) < 0 ||
1130 (dirp
= fdopendir(dfd
)) == NULL
) {
1131 zfs_error_aux(hdl
, strerror(errno
));
1132 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1133 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1139 * This is not MT-safe, but we have no MT consumers of libzfs
1141 while ((dp
= readdir64(dirp
)) != NULL
) {
1142 const char *name
= dp
->d_name
;
1143 if (name
[0] == '.' &&
1144 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
1148 * Skip checking devices with well known prefixes:
1149 * watchdog - A special close is required to avoid
1150 * triggering it and resetting the system.
1151 * fuse - Fuse control device.
1152 * ppp - Generic PPP driver.
1153 * tty* - Generic serial interface.
1154 * vcs* - Virtual console memory.
1155 * parport* - Parallel port interface.
1156 * lp* - Printer interface.
1157 * fd* - Floppy interface.
1158 * hpet - High Precision Event Timer, crashes qemu
1159 * when accessed from a virtual machine.
1160 * core - Symlink to /proc/kcore, causes a crash
1161 * when access from Xen dom0.
1163 if ((strncmp(name
, "watchdog", 8) == 0) ||
1164 (strncmp(name
, "fuse", 4) == 0) ||
1165 (strncmp(name
, "ppp", 3) == 0) ||
1166 (strncmp(name
, "tty", 3) == 0) ||
1167 (strncmp(name
, "vcs", 3) == 0) ||
1168 (strncmp(name
, "parport", 7) == 0) ||
1169 (strncmp(name
, "lp", 2) == 0) ||
1170 (strncmp(name
, "fd", 2) == 0) ||
1171 (strncmp(name
, "hpet", 4) == 0) ||
1172 (strncmp(name
, "core", 4) == 0))
1176 * Ignore failed stats. We only want regular
1177 * files and block devices.
1179 if ((fstatat64(dfd
, name
, &statbuf
, 0) != 0) ||
1180 (!S_ISREG(statbuf
.st_mode
) &&
1181 !S_ISBLK(statbuf
.st_mode
)))
1184 if ((fd
= openat64(dfd
, name
, O_RDONLY
)) < 0)
1187 if ((zpool_read_label(fd
, &config
, &num_labels
))) {
1189 (void) no_memory(hdl
);
1195 if (config
!= NULL
) {
1196 boolean_t matched
= B_TRUE
;
1199 if ((iarg
->poolname
!= NULL
) &&
1200 (nvlist_lookup_string(config
,
1201 ZPOOL_CONFIG_POOL_NAME
, &pname
) == 0)) {
1203 if (strcmp(iarg
->poolname
, pname
))
1206 } else if (iarg
->guid
!= 0) {
1209 matched
= nvlist_lookup_uint64(config
,
1210 ZPOOL_CONFIG_POOL_GUID
,
1212 iarg
->guid
== this_guid
;
1215 nvlist_free(config
);
1219 /* use the non-raw path for the config */
1220 (void) strlcpy(end
, name
, pathleft
);
1221 if (add_config(hdl
, &pools
, path
, i
+1,
1222 num_labels
, config
))
1227 (void) closedir(dirp
);
1231 #ifdef HAVE_LIBBLKID
1234 ret
= get_configs(hdl
, &pools
, iarg
->can_be_active
);
1237 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
1238 penext
= pe
->pe_next
;
1239 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
1240 venext
= ve
->ve_next
;
1241 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
1242 cenext
= ce
->ce_next
;
1244 nvlist_free(ce
->ce_config
);
1252 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
1253 nenext
= ne
->ne_next
;
1260 (void) closedir(dirp
);
1266 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
1268 importargs_t iarg
= { 0 };
1273 return (zpool_find_import_impl(hdl
, &iarg
));
1277 * Given a cache file, return the contents as a list of importable pools.
1278 * poolname or guid (but not both) are provided by the caller when trying
1279 * to import a specific pool.
1282 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
1283 char *poolname
, uint64_t guid
)
1287 struct stat64 statbuf
;
1288 nvlist_t
*raw
, *src
, *dst
;
1295 verify(poolname
== NULL
|| guid
== 0);
1297 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
1298 zfs_error_aux(hdl
, "%s", strerror(errno
));
1299 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1300 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
1304 if (fstat64(fd
, &statbuf
) != 0) {
1305 zfs_error_aux(hdl
, "%s", strerror(errno
));
1307 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1308 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
1312 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
1317 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
1320 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1321 dgettext(TEXT_DOMAIN
,
1322 "failed to read cache file contents"));
1328 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
1330 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1331 dgettext(TEXT_DOMAIN
,
1332 "invalid or corrupt cache file contents"));
1339 * Go through and get the current state of the pools and refresh their
1342 if (nvlist_alloc(&pools
, 0, 0) != 0) {
1343 (void) no_memory(hdl
);
1349 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
1350 src
= fnvpair_value_nvlist(elem
);
1352 name
= fnvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
);
1353 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
1356 this_guid
= fnvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
);
1357 if (guid
!= 0 && guid
!= this_guid
)
1360 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
1369 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
1375 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
1376 (void) no_memory(hdl
);
1390 name_or_guid_exists(zpool_handle_t
*zhp
, void *data
)
1392 importargs_t
*import
= data
;
1395 if (import
->poolname
!= NULL
) {
1398 verify(nvlist_lookup_string(zhp
->zpool_config
,
1399 ZPOOL_CONFIG_POOL_NAME
, &pool_name
) == 0);
1400 if (strcmp(pool_name
, import
->poolname
) == 0)
1405 verify(nvlist_lookup_uint64(zhp
->zpool_config
,
1406 ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) == 0);
1407 if (pool_guid
== import
->guid
)
1416 zpool_search_import(libzfs_handle_t
*hdl
, importargs_t
*import
)
1418 verify(import
->poolname
== NULL
|| import
->guid
== 0);
1421 import
->exists
= zpool_iter(hdl
, name_or_guid_exists
, import
);
1423 if (import
->cachefile
!= NULL
)
1424 return (zpool_find_import_cached(hdl
, import
->cachefile
,
1425 import
->poolname
, import
->guid
));
1427 return (zpool_find_import_impl(hdl
, import
));
1431 find_guid(nvlist_t
*nv
, uint64_t guid
)
1437 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
1441 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1442 &child
, &children
) == 0) {
1443 for (c
= 0; c
< children
; c
++)
1444 if (find_guid(child
[c
], guid
))
1451 typedef struct aux_cbdata
{
1452 const char *cb_type
;
1454 zpool_handle_t
*cb_zhp
;
1458 find_aux(zpool_handle_t
*zhp
, void *data
)
1460 aux_cbdata_t
*cbp
= data
;
1466 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1469 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
1470 &list
, &count
) == 0) {
1471 for (i
= 0; i
< count
; i
++) {
1472 verify(nvlist_lookup_uint64(list
[i
],
1473 ZPOOL_CONFIG_GUID
, &guid
) == 0);
1474 if (guid
== cbp
->cb_guid
) {
1486 * Determines if the pool is in use. If so, it returns true and the state of
1487 * the pool as well as the name of the pool. Both strings are allocated and
1488 * must be freed by the caller.
1491 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
1497 uint64_t guid
, vdev_guid
;
1498 zpool_handle_t
*zhp
;
1499 nvlist_t
*pool_config
;
1500 uint64_t stateval
, isspare
;
1501 aux_cbdata_t cb
= { 0 };
1506 if (zpool_read_label(fd
, &config
, NULL
) != 0) {
1507 (void) no_memory(hdl
);
1514 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
1516 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
1519 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
1520 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1522 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1527 case POOL_STATE_EXPORTED
:
1529 * A pool with an exported state may in fact be imported
1530 * read-only, so check the in-core state to see if it's
1531 * active and imported read-only. If it is, set
1532 * its state to active.
1534 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
1535 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1536 zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
1537 stateval
= POOL_STATE_ACTIVE
;
1542 case POOL_STATE_ACTIVE
:
1544 * For an active pool, we have to determine if it's really part
1545 * of a currently active pool (in which case the pool will exist
1546 * and the guid will be the same), or whether it's part of an
1547 * active pool that was disconnected without being explicitly
1550 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
1551 nvlist_free(config
);
1557 * Because the device may have been removed while
1558 * offlined, we only report it as active if the vdev is
1559 * still present in the config. Otherwise, pretend like
1562 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1563 (pool_config
= zpool_get_config(zhp
, NULL
))
1567 verify(nvlist_lookup_nvlist(pool_config
,
1568 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1569 ret
= find_guid(nvroot
, vdev_guid
);
1575 * If this is an active spare within another pool, we
1576 * treat it like an unused hot spare. This allows the
1577 * user to create a pool with a hot spare that currently
1578 * in use within another pool. Since we return B_TRUE,
1579 * libdiskmgt will continue to prevent generic consumers
1580 * from using the device.
1582 if (ret
&& nvlist_lookup_uint64(config
,
1583 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
1584 stateval
= POOL_STATE_SPARE
;
1589 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
1594 case POOL_STATE_SPARE
:
1596 * For a hot spare, it can be either definitively in use, or
1597 * potentially active. To determine if it's in use, we iterate
1598 * over all pools in the system and search for one with a spare
1599 * with a matching guid.
1601 * Due to the shared nature of spares, we don't actually report
1602 * the potentially active case as in use. This means the user
1603 * can freely create pools on the hot spares of exported pools,
1604 * but to do otherwise makes the resulting code complicated, and
1605 * we end up having to deal with this case anyway.
1608 cb
.cb_guid
= vdev_guid
;
1609 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
1610 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1611 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1618 case POOL_STATE_L2CACHE
:
1621 * Check if any pool is currently using this l2cache device.
1624 cb
.cb_guid
= vdev_guid
;
1625 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
1626 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1627 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1640 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
1642 zpool_close(cb
.cb_zhp
);
1643 nvlist_free(config
);
1646 *state
= (pool_state_t
)stateval
;
1650 zpool_close(cb
.cb_zhp
);
1652 nvlist_free(config
);