]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_import.c
Fixes for issues found with cppcheck tool
[mirror_zfs.git] / lib / libzfs / libzfs_import.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
0fdd8d64 22 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
572e2857 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3bc7e0fb 24 * Copyright (c) 2012 by Delphix. All rights reserved.
ee42b3d6 25 * Copyright 2015 RackTop Systems.
39fc0cb5 26 * Copyright (c) 2016, Intel Corporation.
34dc7c2f
BB
27 */
28
34dc7c2f
BB
29/*
30 * Pool import support functions.
31 *
32 * To import a pool, we rely on reading the configuration information from the
33 * ZFS label of each device. If we successfully read the label, then we
34 * organize the configuration information in the following hierarchy:
35 *
36 * pool guid -> toplevel vdev guid -> label txg
37 *
38 * Duplicate entries matching this same tuple will be discarded. Once we have
39 * examined every device, we pick the best label txg config for each toplevel
40 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
41 * update any paths that have changed. Finally, we attempt to import the pool
42 * using our derived config, and record the results.
43 */
44
428870ff 45#include <ctype.h>
34dc7c2f
BB
46#include <devid.h>
47#include <dirent.h>
48#include <errno.h>
49#include <libintl.h>
39fc0cb5
DB
50#ifdef HAVE_LIBUDEV
51#include <libudev.h>
52#include <sched.h>
53#endif
428870ff 54#include <stddef.h>
34dc7c2f
BB
55#include <stdlib.h>
56#include <string.h>
57#include <sys/stat.h>
58#include <unistd.h>
59#include <fcntl.h>
428870ff
BB
60#include <sys/vtoc.h>
61#include <sys/dktp/fdisk.h>
62#include <sys/efi_partition.h>
34dc7c2f 63#include <sys/vdev_impl.h>
d603ed6c 64#include <blkid/blkid.h>
34dc7c2f
BB
65#include "libzfs.h"
66#include "libzfs_impl.h"
67
68/*
69 * Intermediate structures used to gather configuration information.
70 */
71typedef struct config_entry {
72 uint64_t ce_txg;
73 nvlist_t *ce_config;
74 struct config_entry *ce_next;
75} config_entry_t;
76
77typedef struct vdev_entry {
78 uint64_t ve_guid;
79 config_entry_t *ve_configs;
80 struct vdev_entry *ve_next;
81} vdev_entry_t;
82
83typedef struct pool_entry {
84 uint64_t pe_guid;
85 vdev_entry_t *pe_vdevs;
86 struct pool_entry *pe_next;
87} pool_entry_t;
88
89typedef struct name_entry {
90 char *ne_name;
91 uint64_t ne_guid;
44867b6d 92 uint64_t ne_order;
7d90f569 93 uint64_t ne_num_labels;
34dc7c2f
BB
94 struct name_entry *ne_next;
95} name_entry_t;
96
97typedef struct pool_list {
98 pool_entry_t *pools;
99 name_entry_t *names;
100} pool_list_t;
101
325414e4
BB
102#define DEV_BYID_PATH "/dev/disk/by-id/"
103
39fc0cb5
DB
104/*
105 * Linux persistent device strings for vdev labels
106 *
107 * based on libudev for consistency with libudev disk add/remove events
108 */
109#ifdef HAVE_LIBUDEV
110
39fc0cb5
DB
111typedef struct vdev_dev_strs {
112 char vds_devid[128];
113 char vds_devphys[128];
114} vdev_dev_strs_t;
115
116/*
117 * Obtain the persistent device id string (describes what)
118 *
119 * used by ZED auto-{online,expand,replace}
120 */
121static int
122udev_device_get_devid(struct udev_device *dev, char *bufptr, size_t buflen)
123{
124 struct udev_list_entry *entry;
125 const char *bus;
126 char devbyid[MAXPATHLEN];
127
128 /* The bus based by-id path is preferred */
129 bus = udev_device_get_property_value(dev, "ID_BUS");
130
131 if (bus == NULL) {
132 const char *dm_uuid;
133
134 /*
135 * For multipath nodes use the persistent uuid based identifier
136 *
137 * Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f
138 */
139 dm_uuid = udev_device_get_property_value(dev, "DM_UUID");
140 if (dm_uuid != NULL) {
141 (void) snprintf(bufptr, buflen, "dm-uuid-%s", dm_uuid);
142 return (0);
143 }
144 return (ENODATA);
145 }
146
147 /*
148 * locate the bus specific by-id link
149 */
150 (void) snprintf(devbyid, sizeof (devbyid), "%s%s-", DEV_BYID_PATH, bus);
151 entry = udev_device_get_devlinks_list_entry(dev);
152 while (entry != NULL) {
153 const char *name;
154
155 name = udev_list_entry_get_name(entry);
156 if (strncmp(name, devbyid, strlen(devbyid)) == 0) {
157 name += strlen(DEV_BYID_PATH);
158 (void) strlcpy(bufptr, name, buflen);
159 return (0);
160 }
161 entry = udev_list_entry_get_next(entry);
162 }
163
164 return (ENODATA);
165}
166
167/*
168 * Obtain the persistent physical location string (describes where)
169 *
170 * used by ZED auto-{online,expand,replace}
171 */
172static int
173udev_device_get_physical(struct udev_device *dev, char *bufptr, size_t buflen)
174{
175 const char *physpath, *value;
176
177 /*
178 * Skip indirect multipath device nodes
179 */
180 value = udev_device_get_property_value(dev, "DM_MULTIPATH_DEVICE_PATH");
181 if (value != NULL && strcmp(value, "1") == 0)
182 return (ENODATA); /* skip physical for multipath nodes */
183
184 physpath = udev_device_get_property_value(dev, "ID_PATH");
185 if (physpath != NULL && physpath[0] != '\0') {
186 (void) strlcpy(bufptr, physpath, buflen);
187 return (0);
188 }
189
190 return (ENODATA);
191}
192
193/*
194 * A disk is considered a multipath whole disk when:
195 * DEVNAME key value has "dm-"
196 * DM_NAME key value has "mpath" prefix
197 * DM_UUID key exists
198 * ID_PART_TABLE_TYPE key does not exist or is not gpt
199 */
200static boolean_t
201udev_mpath_whole_disk(struct udev_device *dev)
202{
203 const char *devname, *mapname, *type, *uuid;
204
205 devname = udev_device_get_property_value(dev, "DEVNAME");
206 mapname = udev_device_get_property_value(dev, "DM_NAME");
207 type = udev_device_get_property_value(dev, "ID_PART_TABLE_TYPE");
208 uuid = udev_device_get_property_value(dev, "DM_UUID");
209
210 if ((devname != NULL && strncmp(devname, "/dev/dm-", 8) == 0) &&
211 (mapname != NULL && strncmp(mapname, "mpath", 5) == 0) &&
212 ((type == NULL) || (strcmp(type, "gpt") != 0)) &&
213 (uuid != NULL)) {
214 return (B_TRUE);
215 }
216
217 return (B_FALSE);
218}
219
220/*
221 * Check if a disk is effectively a multipath whole disk
222 */
223boolean_t
224is_mpath_whole_disk(const char *path)
225{
226 struct udev *udev;
227 struct udev_device *dev = NULL;
228 char nodepath[MAXPATHLEN];
229 char *sysname;
230 boolean_t wholedisk = B_FALSE;
231
232 if (realpath(path, nodepath) == NULL)
233 return (B_FALSE);
234 sysname = strrchr(nodepath, '/') + 1;
235 if (strncmp(sysname, "dm-", 3) != 0)
236 return (B_FALSE);
237 if ((udev = udev_new()) == NULL)
238 return (B_FALSE);
239 if ((dev = udev_device_new_from_subsystem_sysname(udev, "block",
240 sysname)) == NULL) {
241 udev_device_unref(dev);
242 return (B_FALSE);
243 }
244
245 wholedisk = udev_mpath_whole_disk(dev);
246
247 udev_device_unref(dev);
248 return (wholedisk);
249}
250
251static int
252udev_device_is_ready(struct udev_device *dev)
253{
254#ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED
255 return (udev_device_get_is_initialized(dev));
256#else
257 /* wait for DEVLINKS property to be initialized */
258 return (udev_device_get_property_value(dev, "DEVLINKS") != NULL);
259#endif
260}
261
2d82ea8b
BB
262/*
263 * Wait up to timeout_ms for udev to set up the device node. The device is
264 * considered ready when libudev determines it has been initialized, all of
265 * the device links have been verified to exist, and it has been allowed to
266 * settle. At this point the device the device can be accessed reliably.
267 * Depending on the complexity of the udev rules this process could take
268 * several seconds.
269 */
270int
271zpool_label_disk_wait(char *path, int timeout_ms)
272{
273 struct udev *udev;
274 struct udev_device *dev = NULL;
275 char nodepath[MAXPATHLEN];
276 char *sysname = NULL;
277 int ret = ENODEV;
278 int settle_ms = 50;
279 long sleep_ms = 10;
280 hrtime_t start, settle;
281
282 if ((udev = udev_new()) == NULL)
283 return (ENXIO);
284
285 start = gethrtime();
286 settle = 0;
287
288 do {
289 if (sysname == NULL) {
290 if (realpath(path, nodepath) != NULL) {
291 sysname = strrchr(nodepath, '/') + 1;
292 } else {
293 (void) usleep(sleep_ms * MILLISEC);
294 continue;
295 }
296 }
297
298 dev = udev_device_new_from_subsystem_sysname(udev,
299 "block", sysname);
300 if ((dev != NULL) && udev_device_is_ready(dev)) {
301 struct udev_list_entry *links, *link;
302
303 ret = 0;
304 links = udev_device_get_devlinks_list_entry(dev);
305
306 udev_list_entry_foreach(link, links) {
307 struct stat64 statbuf;
308 const char *name;
309
310 name = udev_list_entry_get_name(link);
311 errno = 0;
312 if (stat64(name, &statbuf) == 0 && errno == 0)
313 continue;
314
315 settle = 0;
316 ret = ENODEV;
317 break;
318 }
319
320 if (ret == 0) {
321 if (settle == 0) {
322 settle = gethrtime();
323 } else if (NSEC2MSEC(gethrtime() - settle) >=
324 settle_ms) {
325 udev_device_unref(dev);
326 break;
327 }
328 }
329 }
330
331 udev_device_unref(dev);
332 (void) usleep(sleep_ms * MILLISEC);
333
334 } while (NSEC2MSEC(gethrtime() - start) < timeout_ms);
335
336 udev_unref(udev);
337
338 return (ret);
339}
340
341
39fc0cb5
DB
342/*
343 * Encode the persistent devices strings
344 * used for the vdev disk label
345 */
346static int
347encode_device_strings(const char *path, vdev_dev_strs_t *ds,
348 boolean_t wholedisk)
34dc7c2f 349{
39fc0cb5
DB
350 struct udev *udev;
351 struct udev_device *dev = NULL;
352 char nodepath[MAXPATHLEN];
353 char *sysname;
354 int ret = ENODEV;
355 hrtime_t start;
356
357 if ((udev = udev_new()) == NULL)
358 return (ENXIO);
359
360 /* resolve path to a runtime device node instance */
361 if (realpath(path, nodepath) == NULL)
362 goto no_dev;
363
364 sysname = strrchr(nodepath, '/') + 1;
365
366 /*
367 * Wait up to 3 seconds for udev to set up the device node context
368 */
369 start = gethrtime();
370 do {
371 dev = udev_device_new_from_subsystem_sysname(udev, "block",
372 sysname);
373 if (dev == NULL)
374 goto no_dev;
375 if (udev_device_is_ready(dev))
376 break; /* udev ready */
377
378 udev_device_unref(dev);
379 dev = NULL;
380
381 if (NSEC2MSEC(gethrtime() - start) < 10)
382 (void) sched_yield(); /* yield/busy wait up to 10ms */
383 else
384 (void) usleep(10 * MILLISEC);
385
386 } while (NSEC2MSEC(gethrtime() - start) < (3 * MILLISEC));
387
388 if (dev == NULL)
389 goto no_dev;
390
391 /*
392 * Only whole disks require extra device strings
393 */
394 if (!wholedisk && !udev_mpath_whole_disk(dev))
395 goto no_dev;
396
397 ret = udev_device_get_devid(dev, ds->vds_devid, sizeof (ds->vds_devid));
398 if (ret != 0)
399 goto no_dev_ref;
400
401 /* physical location string (optional) */
402 if (udev_device_get_physical(dev, ds->vds_devphys,
403 sizeof (ds->vds_devphys)) != 0) {
404 ds->vds_devphys[0] = '\0'; /* empty string --> not available */
34dc7c2f 405 }
39fc0cb5
DB
406
407no_dev_ref:
408 udev_device_unref(dev);
409no_dev:
410 udev_unref(udev);
34dc7c2f
BB
411
412 return (ret);
413}
414
39fc0cb5
DB
415/*
416 * Update a leaf vdev's persistent device strings (Linux only)
417 *
418 * - only applies for a dedicated leaf vdev (aka whole disk)
419 * - updated during pool create|add|attach|import
420 * - used for matching device matching during auto-{online,expand,replace}
421 * - stored in a leaf disk config label (i.e. alongside 'path' NVP)
422 * - these strings are currently not used in kernel (i.e. for vdev_disk_open)
423 *
424 * single device node example:
425 * devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1'
426 * phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0'
427 *
428 * multipath device node example:
429 * devid: 'dm-uuid-mpath-35000c5006304de3f'
430 */
431void
432update_vdev_config_dev_strs(nvlist_t *nv)
433{
434 vdev_dev_strs_t vds;
435 char *env, *type, *path;
436 uint64_t wholedisk = 0;
437
438 /*
439 * For the benefit of legacy ZFS implementations, allow
440 * for opting out of devid strings in the vdev label.
441 *
442 * example use:
443 * env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer
444 *
445 * explanation:
446 * Older ZFS on Linux implementations had issues when attempting to
447 * display pool config VDEV names if a "devid" NVP value is present
448 * in the pool's config.
449 *
450 * For example, a pool that originated on illumos platform would
451 * have a devid value in the config and "zpool status" would fail
452 * when listing the config.
453 *
454 * A pool can be stripped of any "devid" values on import or
455 * prevented from adding them on zpool create|add by setting
456 * ZFS_VDEV_DEVID_OPT_OUT.
457 */
458 env = getenv("ZFS_VDEV_DEVID_OPT_OUT");
459 if (env && (strtoul(env, NULL, 0) > 0 ||
460 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) {
461 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
462 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH);
463 return;
464 }
465
466 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0 ||
467 strcmp(type, VDEV_TYPE_DISK) != 0) {
468 return;
469 }
470 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
471 return;
472 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
473
474 /*
475 * Update device string values in config nvlist
476 */
477 if (encode_device_strings(path, &vds, (boolean_t)wholedisk) == 0) {
478 (void) nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vds.vds_devid);
479 if (vds.vds_devphys[0] != '\0') {
480 (void) nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
481 vds.vds_devphys);
482 }
483 } else {
484 /* clear out any stale entries */
485 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
486 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH);
487 }
488}
489#else
490
491boolean_t
492is_mpath_whole_disk(const char *path)
493{
494 return (B_FALSE);
495}
496
2d82ea8b
BB
497/*
498 * Wait up to timeout_ms for udev to set up the device node. The device is
499 * considered ready when the provided path have been verified to exist and
500 * it has been allowed to settle. At this point the device the device can
501 * be accessed reliably. Depending on the complexity of the udev rules thisi
502 * process could take several seconds.
503 */
504int
505zpool_label_disk_wait(char *path, int timeout_ms)
506{
507 int settle_ms = 50;
508 long sleep_ms = 10;
509 hrtime_t start, settle;
510 struct stat64 statbuf;
511
512 start = gethrtime();
513 settle = 0;
514
515 do {
516 errno = 0;
517 if ((stat64(path, &statbuf) == 0) && (errno == 0)) {
518 if (settle == 0)
519 settle = gethrtime();
520 else if (NSEC2MSEC(gethrtime() - settle) >= settle_ms)
521 return (0);
522 } else if (errno != ENOENT) {
523 return (errno);
524 }
525
526 usleep(sleep_ms * MILLISEC);
527 } while (NSEC2MSEC(gethrtime() - start) < timeout_ms);
528
529 return (ENODEV);
530}
531
39fc0cb5
DB
532void
533update_vdev_config_dev_strs(nvlist_t *nv)
534{
535}
536
537#endif /* HAVE_LIBUDEV */
538
34dc7c2f
BB
539/*
540 * Go through and fix up any path and/or devid information for the given vdev
541 * configuration.
542 */
543static int
544fix_paths(nvlist_t *nv, name_entry_t *names)
545{
546 nvlist_t **child;
547 uint_t c, children;
548 uint64_t guid;
549 name_entry_t *ne, *best;
39fc0cb5 550 char *path;
34dc7c2f
BB
551
552 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
553 &child, &children) == 0) {
554 for (c = 0; c < children; c++)
555 if (fix_paths(child[c], names) != 0)
556 return (-1);
557 return (0);
558 }
559
560 /*
561 * This is a leaf (file or disk) vdev. In either case, go through
562 * the name list and see if we find a matching guid. If so, replace
563 * the path and see if we can calculate a new devid.
564 *
565 * There may be multiple names associated with a particular guid, in
44867b6d
BB
566 * which case we have overlapping partitions or multiple paths to the
567 * same disk. In this case we prefer to use the path name which
568 * matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
569 * use the lowest order device which corresponds to the first match
570 * while traversing the ZPOOL_IMPORT_PATH search path.
34dc7c2f
BB
571 */
572 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
573 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
574 path = NULL;
575
34dc7c2f
BB
576 best = NULL;
577 for (ne = names; ne != NULL; ne = ne->ne_next) {
578 if (ne->ne_guid == guid) {
34dc7c2f
BB
579 if (path == NULL) {
580 best = ne;
581 break;
582 }
583
44867b6d 584 if ((strlen(path) == strlen(ne->ne_name)) &&
d1d7e268 585 strncmp(path, ne->ne_name, strlen(path)) == 0) {
34dc7c2f 586 best = ne;
44867b6d 587 break;
34dc7c2f 588 }
44867b6d 589
7d90f569 590 if (best == NULL) {
44867b6d 591 best = ne;
7d90f569
BB
592 continue;
593 }
594
595 /* Prefer paths with move vdev labels. */
596 if (ne->ne_num_labels > best->ne_num_labels) {
597 best = ne;
598 continue;
599 }
600
601 /* Prefer paths earlier in the search order. */
20c901dc 602 if (ne->ne_num_labels == best->ne_num_labels &&
7d90f569
BB
603 ne->ne_order < best->ne_order) {
604 best = ne;
605 continue;
606 }
34dc7c2f
BB
607 }
608 }
609
610 if (best == NULL)
611 return (0);
612
613 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
614 return (-1);
615
39fc0cb5
DB
616 /* Linux only - update ZPOOL_CONFIG_DEVID and ZPOOL_CONFIG_PHYS_PATH */
617 update_vdev_config_dev_strs(nv);
34dc7c2f
BB
618
619 return (0);
620}
621
622/*
623 * Add the given configuration to the list of known devices.
624 */
625static int
626add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
7d90f569 627 int order, int num_labels, nvlist_t *config)
34dc7c2f
BB
628{
629 uint64_t pool_guid, vdev_guid, top_guid, txg, state;
630 pool_entry_t *pe;
631 vdev_entry_t *ve;
632 config_entry_t *ce;
633 name_entry_t *ne;
634
635 /*
636 * If this is a hot spare not currently in use or level 2 cache
637 * device, add it to the list of names to translate, but don't do
638 * anything else.
639 */
640 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
641 &state) == 0 &&
642 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
643 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
644 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
645 return (-1);
646
647 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
648 free(ne);
649 return (-1);
650 }
651 ne->ne_guid = vdev_guid;
44867b6d 652 ne->ne_order = order;
7d90f569 653 ne->ne_num_labels = num_labels;
34dc7c2f
BB
654 ne->ne_next = pl->names;
655 pl->names = ne;
656 return (0);
657 }
658
659 /*
660 * If we have a valid config but cannot read any of these fields, then
661 * it means we have a half-initialized label. In vdev_label_init()
662 * we write a label with txg == 0 so that we can identify the device
663 * in case the user refers to the same disk later on. If we fail to
664 * create the pool, we'll be left with a label in this state
665 * which should not be considered part of a valid pool.
666 */
667 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
668 &pool_guid) != 0 ||
669 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
670 &vdev_guid) != 0 ||
671 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
672 &top_guid) != 0 ||
673 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
674 &txg) != 0 || txg == 0) {
675 nvlist_free(config);
676 return (0);
677 }
678
679 /*
680 * First, see if we know about this pool. If not, then add it to the
681 * list of known pools.
682 */
683 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
684 if (pe->pe_guid == pool_guid)
685 break;
686 }
687
688 if (pe == NULL) {
689 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
690 nvlist_free(config);
691 return (-1);
692 }
693 pe->pe_guid = pool_guid;
694 pe->pe_next = pl->pools;
695 pl->pools = pe;
696 }
697
698 /*
699 * Second, see if we know about this toplevel vdev. Add it if its
700 * missing.
701 */
702 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
703 if (ve->ve_guid == top_guid)
704 break;
705 }
706
707 if (ve == NULL) {
708 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
709 nvlist_free(config);
710 return (-1);
711 }
712 ve->ve_guid = top_guid;
713 ve->ve_next = pe->pe_vdevs;
714 pe->pe_vdevs = ve;
715 }
716
717 /*
718 * Third, see if we have a config with a matching transaction group. If
719 * so, then we do nothing. Otherwise, add it to the list of known
720 * configs.
721 */
722 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
723 if (ce->ce_txg == txg)
724 break;
725 }
726
727 if (ce == NULL) {
728 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
729 nvlist_free(config);
730 return (-1);
731 }
732 ce->ce_txg = txg;
733 ce->ce_config = config;
734 ce->ce_next = ve->ve_configs;
735 ve->ve_configs = ce;
736 } else {
737 nvlist_free(config);
738 }
739
740 /*
741 * At this point we've successfully added our config to the list of
742 * known configs. The last thing to do is add the vdev guid -> path
743 * mappings so that we can fix up the configuration as necessary before
744 * doing the import.
745 */
746 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
747 return (-1);
748
749 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
750 free(ne);
751 return (-1);
752 }
753
754 ne->ne_guid = vdev_guid;
44867b6d 755 ne->ne_order = order;
7d90f569 756 ne->ne_num_labels = num_labels;
34dc7c2f
BB
757 ne->ne_next = pl->names;
758 pl->names = ne;
759
760 return (0);
761}
762
325414e4
BB
763static int
764add_path(libzfs_handle_t *hdl, pool_list_t *pools, uint64_t pool_guid,
765 uint64_t vdev_guid, const char *path, int order)
766{
767 nvlist_t *label;
768 uint64_t guid;
769 int error, fd, num_labels;
770
771 fd = open64(path, O_RDONLY);
772 if (fd < 0)
773 return (errno);
774
775 error = zpool_read_label(fd, &label, &num_labels);
776 close(fd);
777
778 if (error || label == NULL)
779 return (ENOENT);
780
781 error = nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid);
782 if (error || guid != pool_guid) {
783 nvlist_free(label);
784 return (EINVAL);
785 }
786
787 error = nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid);
788 if (error || guid != vdev_guid) {
789 nvlist_free(label);
790 return (EINVAL);
791 }
792
793 error = add_config(hdl, pools, path, order, num_labels, label);
794
795 return (error);
796}
797
798static int
799add_configs_from_label_impl(libzfs_handle_t *hdl, pool_list_t *pools,
800 nvlist_t *nvroot, uint64_t pool_guid, uint64_t vdev_guid)
801{
802 char udevpath[MAXPATHLEN];
803 char *path;
804 nvlist_t **child;
805 uint_t c, children;
806 uint64_t guid;
807 int error;
808
809 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
810 &child, &children) == 0) {
811 for (c = 0; c < children; c++) {
812 error = add_configs_from_label_impl(hdl, pools,
813 child[c], pool_guid, vdev_guid);
814 if (error)
815 return (error);
816 }
817 return (0);
818 }
819
820 if (nvroot == NULL)
821 return (0);
822
823 error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
824 if ((error != 0) || (guid != vdev_guid))
825 return (0);
826
827 error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &path);
828 if (error == 0)
829 (void) add_path(hdl, pools, pool_guid, vdev_guid, path, 0);
830
831 error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &path);
832 if (error == 0) {
833 sprintf(udevpath, "%s%s", DEV_BYID_PATH, path);
834 (void) add_path(hdl, pools, pool_guid, vdev_guid, udevpath, 1);
835 }
836
837 return (0);
838}
839
840/*
841 * Given a disk label call add_config() for all known paths to the device
842 * as described by the label itself. The paths are added in the following
843 * priority order: 'path', 'devid', 'devnode'. As these alternate paths are
844 * added the labels are verified to make sure they refer to the same device.
845 */
846static int
847add_configs_from_label(libzfs_handle_t *hdl, pool_list_t *pools,
848 char *devname, int num_labels, nvlist_t *label)
849{
850 nvlist_t *nvroot;
851 uint64_t pool_guid;
852 uint64_t vdev_guid;
853 int error;
854
855 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
856 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
857 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
858 return (ENOENT);
859
860 /* Allow devlinks to stabilize so all paths are available. */
861 zpool_label_disk_wait(devname, DISK_LABEL_WAIT);
862
863 /* Add alternate paths as described by the label vdev_tree. */
864 (void) add_configs_from_label_impl(hdl, pools, nvroot,
865 pool_guid, vdev_guid);
866
867 /* Add the device node /dev/sdX path as a last resort. */
868 error = add_config(hdl, pools, devname, 100, num_labels, label);
869
870 return (error);
871}
872
34dc7c2f
BB
873/*
874 * Returns true if the named pool matches the given GUID.
875 */
876static int
877pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
878 boolean_t *isactive)
879{
880 zpool_handle_t *zhp;
881 uint64_t theguid;
882
883 if (zpool_open_silent(hdl, name, &zhp) != 0)
884 return (-1);
885
886 if (zhp == NULL) {
887 *isactive = B_FALSE;
888 return (0);
889 }
890
891 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
892 &theguid) == 0);
893
894 zpool_close(zhp);
895
896 *isactive = (theguid == guid);
897 return (0);
898}
899
900static nvlist_t *
901refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
902{
903 nvlist_t *nvl;
13fe0198 904 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
905 int err;
906
907 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
908 return (NULL);
909
910 if (zcmd_alloc_dst_nvlist(hdl, &zc,
911 zc.zc_nvlist_conf_size * 2) != 0) {
912 zcmd_free_nvlists(&zc);
913 return (NULL);
914 }
915
916 while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
917 &zc)) != 0 && errno == ENOMEM) {
918 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
919 zcmd_free_nvlists(&zc);
920 return (NULL);
921 }
922 }
923
924 if (err) {
34dc7c2f
BB
925 zcmd_free_nvlists(&zc);
926 return (NULL);
927 }
928
929 if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
930 zcmd_free_nvlists(&zc);
931 return (NULL);
932 }
933
934 zcmd_free_nvlists(&zc);
935 return (nvl);
936}
937
428870ff
BB
938/*
939 * Determine if the vdev id is a hole in the namespace.
940 */
941boolean_t
942vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
943{
d6320ddb
BB
944 int c;
945
946 for (c = 0; c < holes; c++) {
428870ff
BB
947
948 /* Top-level is a hole */
949 if (hole_array[c] == id)
950 return (B_TRUE);
951 }
952 return (B_FALSE);
953}
954
34dc7c2f
BB
955/*
956 * Convert our list of pools into the definitive set of configurations. We
957 * start by picking the best config for each toplevel vdev. Once that's done,
958 * we assemble the toplevel vdevs into a full config for the pool. We make a
959 * pass to fix up any incorrect paths, and then add it to the main list to
960 * return to the user.
961 */
962static nvlist_t *
963get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
964{
965 pool_entry_t *pe;
966 vdev_entry_t *ve;
967 config_entry_t *ce;
d4ed6673 968 nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
34dc7c2f
BB
969 nvlist_t **spares, **l2cache;
970 uint_t i, nspares, nl2cache;
971 boolean_t config_seen;
972 uint64_t best_txg;
3bc7e0fb
GW
973 char *name, *hostname = NULL;
974 uint64_t guid;
34dc7c2f
BB
975 uint_t children = 0;
976 nvlist_t **child = NULL;
428870ff
BB
977 uint_t holes;
978 uint64_t *hole_array, max_id;
34dc7c2f
BB
979 uint_t c;
980 boolean_t isactive;
981 uint64_t hostid;
982 nvlist_t *nvl;
428870ff 983 boolean_t valid_top_config = B_FALSE;
34dc7c2f
BB
984
985 if (nvlist_alloc(&ret, 0, 0) != 0)
986 goto nomem;
987
988 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
428870ff 989 uint64_t id, max_txg = 0;
34dc7c2f
BB
990
991 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
992 goto nomem;
993 config_seen = B_FALSE;
994
995 /*
996 * Iterate over all toplevel vdevs. Grab the pool configuration
997 * from the first one we find, and then go through the rest and
998 * add them as necessary to the 'vdevs' member of the config.
999 */
1000 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
1001
1002 /*
1003 * Determine the best configuration for this vdev by
1004 * selecting the config with the latest transaction
1005 * group.
1006 */
1007 best_txg = 0;
1008 for (ce = ve->ve_configs; ce != NULL;
1009 ce = ce->ce_next) {
1010
1011 if (ce->ce_txg > best_txg) {
1012 tmp = ce->ce_config;
1013 best_txg = ce->ce_txg;
1014 }
1015 }
1016
428870ff
BB
1017 /*
1018 * We rely on the fact that the max txg for the
1019 * pool will contain the most up-to-date information
1020 * about the valid top-levels in the vdev namespace.
1021 */
1022 if (best_txg > max_txg) {
1023 (void) nvlist_remove(config,
1024 ZPOOL_CONFIG_VDEV_CHILDREN,
1025 DATA_TYPE_UINT64);
1026 (void) nvlist_remove(config,
1027 ZPOOL_CONFIG_HOLE_ARRAY,
1028 DATA_TYPE_UINT64_ARRAY);
1029
1030 max_txg = best_txg;
1031 hole_array = NULL;
1032 holes = 0;
1033 max_id = 0;
1034 valid_top_config = B_FALSE;
1035
1036 if (nvlist_lookup_uint64(tmp,
1037 ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
1038 verify(nvlist_add_uint64(config,
1039 ZPOOL_CONFIG_VDEV_CHILDREN,
1040 max_id) == 0);
1041 valid_top_config = B_TRUE;
1042 }
1043
1044 if (nvlist_lookup_uint64_array(tmp,
1045 ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
1046 &holes) == 0) {
1047 verify(nvlist_add_uint64_array(config,
1048 ZPOOL_CONFIG_HOLE_ARRAY,
1049 hole_array, holes) == 0);
1050 }
1051 }
1052
34dc7c2f
BB
1053 if (!config_seen) {
1054 /*
1055 * Copy the relevant pieces of data to the pool
1056 * configuration:
1057 *
1058 * version
3bc7e0fb
GW
1059 * pool guid
1060 * name
d96eb2b1 1061 * comment (if available)
3bc7e0fb 1062 * pool state
34dc7c2f
BB
1063 * hostid (if available)
1064 * hostname (if available)
1065 */
295304be 1066 uint64_t state, version;
3bc7e0fb
GW
1067 char *comment = NULL;
1068
1069 version = fnvlist_lookup_uint64(tmp,
1070 ZPOOL_CONFIG_VERSION);
1071 fnvlist_add_uint64(config,
1072 ZPOOL_CONFIG_VERSION, version);
1073 guid = fnvlist_lookup_uint64(tmp,
1074 ZPOOL_CONFIG_POOL_GUID);
1075 fnvlist_add_uint64(config,
1076 ZPOOL_CONFIG_POOL_GUID, guid);
1077 name = fnvlist_lookup_string(tmp,
1078 ZPOOL_CONFIG_POOL_NAME);
1079 fnvlist_add_string(config,
1080 ZPOOL_CONFIG_POOL_NAME, name);
34dc7c2f 1081
d96eb2b1 1082 if (nvlist_lookup_string(tmp,
3bc7e0fb
GW
1083 ZPOOL_CONFIG_COMMENT, &comment) == 0)
1084 fnvlist_add_string(config,
1085 ZPOOL_CONFIG_COMMENT, comment);
d96eb2b1 1086
3bc7e0fb
GW
1087 state = fnvlist_lookup_uint64(tmp,
1088 ZPOOL_CONFIG_POOL_STATE);
1089 fnvlist_add_uint64(config,
1090 ZPOOL_CONFIG_POOL_STATE, state);
d96eb2b1 1091
34dc7c2f
BB
1092 hostid = 0;
1093 if (nvlist_lookup_uint64(tmp,
1094 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3bc7e0fb
GW
1095 fnvlist_add_uint64(config,
1096 ZPOOL_CONFIG_HOSTID, hostid);
1097 hostname = fnvlist_lookup_string(tmp,
1098 ZPOOL_CONFIG_HOSTNAME);
1099 fnvlist_add_string(config,
1100 ZPOOL_CONFIG_HOSTNAME, hostname);
34dc7c2f
BB
1101 }
1102
1103 config_seen = B_TRUE;
1104 }
1105
1106 /*
1107 * Add this top-level vdev to the child array.
1108 */
1109 verify(nvlist_lookup_nvlist(tmp,
1110 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
1111 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
1112 &id) == 0);
428870ff 1113
34dc7c2f
BB
1114 if (id >= children) {
1115 nvlist_t **newchild;
1116
1117 newchild = zfs_alloc(hdl, (id + 1) *
1118 sizeof (nvlist_t *));
1119 if (newchild == NULL)
1120 goto nomem;
1121
1122 for (c = 0; c < children; c++)
1123 newchild[c] = child[c];
1124
1125 free(child);
1126 child = newchild;
1127 children = id + 1;
1128 }
1129 if (nvlist_dup(nvtop, &child[id], 0) != 0)
1130 goto nomem;
1131
1132 }
1133
428870ff
BB
1134 /*
1135 * If we have information about all the top-levels then
1136 * clean up the nvlist which we've constructed. This
1137 * means removing any extraneous devices that are
1138 * beyond the valid range or adding devices to the end
1139 * of our array which appear to be missing.
1140 */
1141 if (valid_top_config) {
1142 if (max_id < children) {
1143 for (c = max_id; c < children; c++)
1144 nvlist_free(child[c]);
1145 children = max_id;
1146 } else if (max_id > children) {
1147 nvlist_t **newchild;
1148
1149 newchild = zfs_alloc(hdl, (max_id) *
1150 sizeof (nvlist_t *));
1151 if (newchild == NULL)
1152 goto nomem;
1153
1154 for (c = 0; c < children; c++)
1155 newchild[c] = child[c];
1156
1157 free(child);
1158 child = newchild;
1159 children = max_id;
1160 }
1161 }
1162
34dc7c2f
BB
1163 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1164 &guid) == 0);
1165
428870ff
BB
1166 /*
1167 * The vdev namespace may contain holes as a result of
1168 * device removal. We must add them back into the vdev
1169 * tree before we process any missing devices.
1170 */
1171 if (holes > 0) {
1172 ASSERT(valid_top_config);
1173
1174 for (c = 0; c < children; c++) {
1175 nvlist_t *holey;
1176
1177 if (child[c] != NULL ||
1178 !vdev_is_hole(hole_array, holes, c))
1179 continue;
1180
1181 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
1182 0) != 0)
1183 goto nomem;
1184
1185 /*
1186 * Holes in the namespace are treated as
1187 * "hole" top-level vdevs and have a
1188 * special flag set on them.
1189 */
1190 if (nvlist_add_string(holey,
1191 ZPOOL_CONFIG_TYPE,
1192 VDEV_TYPE_HOLE) != 0 ||
1193 nvlist_add_uint64(holey,
1194 ZPOOL_CONFIG_ID, c) != 0 ||
1195 nvlist_add_uint64(holey,
0fdd8d64
MT
1196 ZPOOL_CONFIG_GUID, 0ULL) != 0) {
1197 nvlist_free(holey);
428870ff 1198 goto nomem;
0fdd8d64 1199 }
428870ff
BB
1200 child[c] = holey;
1201 }
1202 }
1203
34dc7c2f
BB
1204 /*
1205 * Look for any missing top-level vdevs. If this is the case,
1206 * create a faked up 'missing' vdev as a placeholder. We cannot
1207 * simply compress the child array, because the kernel performs
1208 * certain checks to make sure the vdev IDs match their location
1209 * in the configuration.
1210 */
428870ff 1211 for (c = 0; c < children; c++) {
34dc7c2f
BB
1212 if (child[c] == NULL) {
1213 nvlist_t *missing;
1214 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
1215 0) != 0)
1216 goto nomem;
1217 if (nvlist_add_string(missing,
1218 ZPOOL_CONFIG_TYPE,
1219 VDEV_TYPE_MISSING) != 0 ||
1220 nvlist_add_uint64(missing,
1221 ZPOOL_CONFIG_ID, c) != 0 ||
1222 nvlist_add_uint64(missing,
1223 ZPOOL_CONFIG_GUID, 0ULL) != 0) {
1224 nvlist_free(missing);
1225 goto nomem;
1226 }
1227 child[c] = missing;
1228 }
428870ff 1229 }
34dc7c2f
BB
1230
1231 /*
1232 * Put all of this pool's top-level vdevs into a root vdev.
1233 */
1234 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
1235 goto nomem;
1236 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
1237 VDEV_TYPE_ROOT) != 0 ||
1238 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
1239 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
1240 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1241 child, children) != 0) {
1242 nvlist_free(nvroot);
1243 goto nomem;
1244 }
1245
1246 for (c = 0; c < children; c++)
1247 nvlist_free(child[c]);
1248 free(child);
1249 children = 0;
1250 child = NULL;
1251
1252 /*
1253 * Go through and fix up any paths and/or devids based on our
1254 * known list of vdev GUID -> path mappings.
1255 */
1256 if (fix_paths(nvroot, pl->names) != 0) {
1257 nvlist_free(nvroot);
1258 goto nomem;
1259 }
1260
1261 /*
1262 * Add the root vdev to this pool's configuration.
1263 */
1264 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1265 nvroot) != 0) {
1266 nvlist_free(nvroot);
1267 goto nomem;
1268 }
1269 nvlist_free(nvroot);
1270
1271 /*
1272 * zdb uses this path to report on active pools that were
1273 * imported or created using -R.
1274 */
1275 if (active_ok)
1276 goto add_pool;
1277
1278 /*
1279 * Determine if this pool is currently active, in which case we
1280 * can't actually import it.
1281 */
1282 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1283 &name) == 0);
1284 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1285 &guid) == 0);
1286
1287 if (pool_active(hdl, name, guid, &isactive) != 0)
1288 goto error;
1289
1290 if (isactive) {
1291 nvlist_free(config);
1292 config = NULL;
1293 continue;
1294 }
1295
428870ff
BB
1296 if ((nvl = refresh_config(hdl, config)) == NULL) {
1297 nvlist_free(config);
1298 config = NULL;
1299 continue;
1300 }
34dc7c2f
BB
1301
1302 nvlist_free(config);
1303 config = nvl;
1304
1305 /*
1306 * Go through and update the paths for spares, now that we have
1307 * them.
1308 */
1309 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1310 &nvroot) == 0);
1311 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1312 &spares, &nspares) == 0) {
1313 for (i = 0; i < nspares; i++) {
1314 if (fix_paths(spares[i], pl->names) != 0)
1315 goto nomem;
1316 }
1317 }
1318
1319 /*
1320 * Update the paths for l2cache devices.
1321 */
1322 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1323 &l2cache, &nl2cache) == 0) {
1324 for (i = 0; i < nl2cache; i++) {
1325 if (fix_paths(l2cache[i], pl->names) != 0)
1326 goto nomem;
1327 }
1328 }
1329
1330 /*
1331 * Restore the original information read from the actual label.
1332 */
1333 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
1334 DATA_TYPE_UINT64);
1335 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
1336 DATA_TYPE_STRING);
1337 if (hostid != 0) {
1338 verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
1339 hostid) == 0);
1340 verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
1341 hostname) == 0);
1342 }
1343
1344add_pool:
1345 /*
1346 * Add this pool to the list of configs.
1347 */
1348 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1349 &name) == 0);
1350 if (nvlist_add_nvlist(ret, name, config) != 0)
1351 goto nomem;
1352
1353 nvlist_free(config);
1354 config = NULL;
1355 }
1356
1357 return (ret);
1358
1359nomem:
1360 (void) no_memory(hdl);
1361error:
1362 nvlist_free(config);
1363 nvlist_free(ret);
1364 for (c = 0; c < children; c++)
1365 nvlist_free(child[c]);
1366 free(child);
1367
1368 return (NULL);
1369}
1370
1371/*
1372 * Return the offset of the given label.
1373 */
1374static uint64_t
1375label_offset(uint64_t size, int l)
1376{
1377 ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
1378 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
1379 0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
1380}
1381
1382/*
1383 * Given a file descriptor, read the label information and return an nvlist
7d90f569
BB
1384 * describing the configuration, if there is one. The number of valid
1385 * labels found will be returned in num_labels when non-NULL.
34dc7c2f
BB
1386 */
1387int
7d90f569 1388zpool_read_label(int fd, nvlist_t **config, int *num_labels)
34dc7c2f
BB
1389{
1390 struct stat64 statbuf;
7d90f569 1391 int l, count = 0;
34dc7c2f 1392 vdev_label_t *label;
7d90f569
BB
1393 nvlist_t *expected_config = NULL;
1394 uint64_t expected_guid = 0, size;
34dc7c2f
BB
1395
1396 *config = NULL;
1397
ff3510c1 1398 if (fstat64_blk(fd, &statbuf) == -1)
34dc7c2f
BB
1399 return (0);
1400 size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1401
1402 if ((label = malloc(sizeof (vdev_label_t))) == NULL)
1403 return (-1);
1404
1405 for (l = 0; l < VDEV_LABELS; l++) {
7d90f569
BB
1406 uint64_t state, guid, txg;
1407
b128c09f 1408 if (pread64(fd, label, sizeof (vdev_label_t),
34dc7c2f
BB
1409 label_offset(size, l)) != sizeof (vdev_label_t))
1410 continue;
1411
1412 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
1413 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
1414 continue;
1415
7d90f569
BB
1416 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
1417 &guid) != 0 || guid == 0) {
1418 nvlist_free(*config);
1419 continue;
1420 }
1421
34dc7c2f
BB
1422 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
1423 &state) != 0 || state > POOL_STATE_L2CACHE) {
1424 nvlist_free(*config);
1425 continue;
1426 }
1427
1428 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
1429 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
1430 &txg) != 0 || txg == 0)) {
1431 nvlist_free(*config);
1432 continue;
1433 }
1434
7d90f569
BB
1435 if (expected_guid) {
1436 if (expected_guid == guid)
1437 count++;
1438
1439 nvlist_free(*config);
1440 } else {
1441 expected_config = *config;
1442 expected_guid = guid;
1443 count++;
1444 }
34dc7c2f
BB
1445 }
1446
7d90f569
BB
1447 if (num_labels != NULL)
1448 *num_labels = count;
1449
34dc7c2f 1450 free(label);
7d90f569
BB
1451 *config = expected_config;
1452
34dc7c2f
BB
1453 return (0);
1454}
1455
519129ff
BB
1456typedef struct rdsk_node {
1457 char *rn_name;
1458 int rn_num_labels;
1459 int rn_dfd;
1460 libzfs_handle_t *rn_hdl;
1461 nvlist_t *rn_config;
1462 avl_tree_t *rn_avl;
1463 avl_node_t rn_node;
1464 boolean_t rn_nozpool;
1465} rdsk_node_t;
1466
1467static int
1468slice_cache_compare(const void *arg1, const void *arg2)
1469{
1470 const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
1471 const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
1472 char *nm1slice, *nm2slice;
1473 int rv;
1474
1475 /*
1476 * partitions one and three (slices zero and two) are the most
1477 * likely to provide results, so put those first
1478 */
1479 nm1slice = strstr(nm1, "part1");
1480 nm2slice = strstr(nm2, "part1");
1481 if (nm1slice && !nm2slice) {
1482 return (-1);
1483 }
1484 if (!nm1slice && nm2slice) {
1485 return (1);
1486 }
1487 nm1slice = strstr(nm1, "part3");
1488 nm2slice = strstr(nm2, "part3");
1489 if (nm1slice && !nm2slice) {
1490 return (-1);
1491 }
1492 if (!nm1slice && nm2slice) {
1493 return (1);
1494 }
1495
1496 rv = strcmp(nm1, nm2);
1497 if (rv == 0)
1498 return (0);
1499 return (rv > 0 ? 1 : -1);
1500}
1501
1502#ifndef __linux__
1503static void
1504check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
1505 diskaddr_t size, uint_t blksz)
1506{
1507 rdsk_node_t tmpnode;
1508 rdsk_node_t *node;
1509 char sname[MAXNAMELEN];
1510
1511 tmpnode.rn_name = &sname[0];
1512 (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
1513 diskname, partno);
1514 /* too small to contain a zpool? */
1515 if ((size < (SPA_MINDEVSIZE / blksz)) &&
1516 (node = avl_find(r, &tmpnode, NULL)))
1517 node->rn_nozpool = B_TRUE;
1518}
1519#endif
1520
1521static void
1522nozpool_all_slices(avl_tree_t *r, const char *sname)
1523{
1524#ifndef __linux__
1525 char diskname[MAXNAMELEN];
1526 char *ptr;
1527 int i;
1528
1529 (void) strncpy(diskname, sname, MAXNAMELEN);
1530 if (((ptr = strrchr(diskname, 's')) == NULL) &&
1531 ((ptr = strrchr(diskname, 'p')) == NULL))
1532 return;
1533 ptr[0] = 's';
1534 ptr[1] = '\0';
1535 for (i = 0; i < NDKMAP; i++)
1536 check_one_slice(r, diskname, i, 0, 1);
1537 ptr[0] = 'p';
1538 for (i = 0; i <= FD_NUMPART; i++)
1539 check_one_slice(r, diskname, i, 0, 1);
1540#endif
1541}
1542
1543static void
1544check_slices(avl_tree_t *r, int fd, const char *sname)
1545{
1546#ifndef __linux__
1547 struct extvtoc vtoc;
1548 struct dk_gpt *gpt;
1549 char diskname[MAXNAMELEN];
1550 char *ptr;
1551 int i;
1552
1553 (void) strncpy(diskname, sname, MAXNAMELEN);
a64f903b 1554 diskname[MAXNAMELEN - 1] = '\0';
519129ff
BB
1555 if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1556 return;
1557 ptr[1] = '\0';
1558
1559 if (read_extvtoc(fd, &vtoc) >= 0) {
1560 for (i = 0; i < NDKMAP; i++)
1561 check_one_slice(r, diskname, i,
1562 vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1563 } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1564 /*
1565 * on x86 we'll still have leftover links that point
1566 * to slices s[9-15], so use NDKMAP instead
1567 */
1568 for (i = 0; i < NDKMAP; i++)
1569 check_one_slice(r, diskname, i,
1570 gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1571 /* nodes p[1-4] are never used with EFI labels */
1572 ptr[0] = 'p';
1573 for (i = 1; i <= FD_NUMPART; i++)
1574 check_one_slice(r, diskname, i, 0, 1);
1575 efi_free(gpt);
1576 }
1577#endif
1578}
1579
8fc5674c
NB
1580static boolean_t
1581is_watchdog_dev(char *dev)
1582{
1583 /* For 'watchdog' dev */
1584 if (strcmp(dev, "watchdog") == 0)
1585 return (B_TRUE);
1586
1587 /* For 'watchdog<digit><whatever> */
1588 if (strstr(dev, "watchdog") == dev && isdigit(dev[8]))
1589 return (B_TRUE);
1590
1591 return (B_FALSE);
1592}
1593
519129ff
BB
1594static void
1595zpool_open_func(void *arg)
1596{
1597 rdsk_node_t *rn = arg;
1598 struct stat64 statbuf;
1599 nvlist_t *config;
1600 int num_labels;
1601 int fd;
1602
1603 if (rn->rn_nozpool)
1604 return;
1605#ifdef __linux__
1606 /*
1607 * Skip devices with well known prefixes there can be side effects
1608 * when opening devices which need to be avoided.
1609 *
519129ff 1610 * hpet - High Precision Event Timer
519129ff
BB
1611 * watchdog - Watchdog must be closed in a special way.
1612 */
8fc5674c
NB
1613 if ((strcmp(rn->rn_name, "hpet") == 0) ||
1614 is_watchdog_dev(rn->rn_name))
519129ff
BB
1615 return;
1616
1617 /*
1618 * Ignore failed stats. We only want regular files and block devices.
1619 */
1620 if (fstatat64(rn->rn_dfd, rn->rn_name, &statbuf, 0) != 0 ||
1621 (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode)))
1622 return;
1623
1624 if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1625 /* symlink to a device that's no longer there */
1626 if (errno == ENOENT)
1627 nozpool_all_slices(rn->rn_avl, rn->rn_name);
1628 return;
1629 }
1630#else
1631 if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1632 /* symlink to a device that's no longer there */
1633 if (errno == ENOENT)
1634 nozpool_all_slices(rn->rn_avl, rn->rn_name);
1635 return;
1636 }
1637 /*
1638 * Ignore failed stats. We only want regular
1639 * files, character devs and block devs.
1640 */
1641 if (fstat64(fd, &statbuf) != 0 ||
1642 (!S_ISREG(statbuf.st_mode) &&
1643 !S_ISCHR(statbuf.st_mode) &&
1644 !S_ISBLK(statbuf.st_mode))) {
1645 (void) close(fd);
1646 return;
1647 }
1648#endif
1649 /* this file is too small to hold a zpool */
1650 if (S_ISREG(statbuf.st_mode) &&
1651 statbuf.st_size < SPA_MINDEVSIZE) {
1652 (void) close(fd);
1653 return;
1654 } else if (!S_ISREG(statbuf.st_mode)) {
1655 /*
1656 * Try to read the disk label first so we don't have to
1657 * open a bunch of minor nodes that can't have a zpool.
1658 */
1659 check_slices(rn->rn_avl, fd, rn->rn_name);
1660 }
1661
1662 if ((zpool_read_label(fd, &config, &num_labels)) != 0) {
1663 (void) close(fd);
1664 (void) no_memory(rn->rn_hdl);
1665 return;
1666 }
1667
1668 if (num_labels == 0) {
1669 (void) close(fd);
1670 nvlist_free(config);
1671 return;
1672 }
1673
1674 (void) close(fd);
1675
1676 rn->rn_config = config;
1677 rn->rn_num_labels = num_labels;
519129ff
BB
1678}
1679
51a3ae72
DK
1680/*
1681 * Given a file descriptor, clear (zero) the label information. This function
131cc95c
DK
1682 * is used in the appliance stack as part of the ZFS sysevent module and
1683 * to implement the "zpool labelclear" command.
51a3ae72
DK
1684 */
1685int
1686zpool_clear_label(int fd)
1687{
1688 struct stat64 statbuf;
1689 int l;
1690 vdev_label_t *label;
1691 uint64_t size;
1692
1693 if (fstat64_blk(fd, &statbuf) == -1)
1694 return (0);
1695 size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1696
1697 if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1698 return (-1);
1699
1700 for (l = 0; l < VDEV_LABELS; l++) {
1701 if (pwrite64(fd, label, sizeof (vdev_label_t),
4def05f8
RY
1702 label_offset(size, l)) != sizeof (vdev_label_t)) {
1703 free(label);
51a3ae72 1704 return (-1);
4def05f8 1705 }
51a3ae72
DK
1706 }
1707
1708 free(label);
1709 return (0);
1710}
1711
d603ed6c
BB
1712/*
1713 * Use libblkid to quickly search for zfs devices
1714 */
428870ff 1715static int
d603ed6c 1716zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
428870ff 1717{
d603ed6c
BB
1718 blkid_cache cache;
1719 blkid_dev_iterate iter;
1720 blkid_dev dev;
325414e4 1721 int err;
428870ff 1722
d603ed6c
BB
1723 err = blkid_get_cache(&cache, NULL);
1724 if (err != 0) {
1725 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1726 dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
1727 goto err_blkid1;
428870ff
BB
1728 }
1729
d603ed6c
BB
1730 err = blkid_probe_all(cache);
1731 if (err != 0) {
1732 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1733 dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
1734 goto err_blkid2;
428870ff 1735 }
428870ff 1736
d603ed6c
BB
1737 iter = blkid_dev_iterate_begin(cache);
1738 if (iter == NULL) {
1739 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1740 dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
1741 goto err_blkid2;
1742 }
428870ff 1743
1db7b9be 1744 err = blkid_dev_set_search(iter, "TYPE", "zfs_member");
d603ed6c
BB
1745 if (err != 0) {
1746 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1747 dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
1748 goto err_blkid3;
428870ff 1749 }
428870ff 1750
d603ed6c 1751 while (blkid_dev_next(iter, &dev) == 0) {
325414e4
BB
1752 nvlist_t *label;
1753 char *devname;
1754 int fd, num_labels;
1755
1756 devname = (char *) blkid_dev_devname(dev);
d603ed6c
BB
1757 if ((fd = open64(devname, O_RDONLY)) < 0)
1758 continue;
428870ff 1759
325414e4 1760 err = zpool_read_label(fd, &label, &num_labels);
d603ed6c 1761 (void) close(fd);
428870ff 1762
325414e4
BB
1763 if (err || label == NULL)
1764 continue;
428870ff 1765
325414e4 1766 add_configs_from_label(hdl, pools, devname, num_labels, label);
428870ff 1767 }
325414e4 1768 err = 0;
428870ff 1769
d603ed6c
BB
1770err_blkid3:
1771 blkid_dev_iterate_end(iter);
1772err_blkid2:
1773 blkid_put_cache(cache);
1774err_blkid1:
d1d7e268 1775 return (err);
428870ff
BB
1776}
1777
eac47204 1778char *
44867b6d
BB
1779zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
1780 "/dev/disk/by-vdev", /* Custom rules, use first if they exist */
44867b6d 1781 "/dev/mapper", /* Use multipath devices before components */
95003f70
TC
1782 "/dev/disk/by-partlabel", /* Single unique entry set by user */
1783 "/dev/disk/by-partuuid", /* Generated partition uuid */
1784 "/dev/disk/by-label", /* Custom persistent labels */
44867b6d
BB
1785 "/dev/disk/by-uuid", /* Single unique entry and persistent */
1786 "/dev/disk/by-id", /* May be multiple entries and persistent */
1787 "/dev/disk/by-path", /* Encodes physical location and persistent */
44867b6d
BB
1788 "/dev" /* UNSAFE device names will change */
1789};
1790
34dc7c2f
BB
1791/*
1792 * Given a list of directories to search, find all pools stored on disk. This
1793 * includes partial pools which are not available to import. If no args are
1794 * given (argc is 0), then the default directory (/dev/dsk) is searched.
b128c09f
BB
1795 * poolname or guid (but not both) are provided by the caller when trying
1796 * to import a specific pool.
34dc7c2f 1797 */
b128c09f 1798static nvlist_t *
428870ff 1799zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
34dc7c2f 1800{
519129ff 1801 int i, dirs = iarg->paths;
34dc7c2f
BB
1802 struct dirent64 *dp;
1803 char path[MAXPATHLEN];
428870ff 1804 char *end, **dir = iarg->path;
34dc7c2f 1805 size_t pathleft;
519129ff 1806 nvlist_t *ret = NULL;
34dc7c2f
BB
1807 pool_list_t pools = { 0 };
1808 pool_entry_t *pe, *penext;
1809 vdev_entry_t *ve, *venext;
1810 config_entry_t *ce, *cenext;
1811 name_entry_t *ne, *nenext;
519129ff
BB
1812 avl_tree_t slice_cache;
1813 rdsk_node_t *slice;
1814 void *cookie;
d603ed6c
BB
1815
1816 verify(iarg->poolname == NULL || iarg->guid == 0);
34dc7c2f 1817
7d11e37e
BB
1818 /*
1819 * Prefer to locate pool member vdevs using libblkid. Only fall
1820 * back to legacy directory scanning when explicitly requested or
1821 * if an error is encountered when consulted the libblkid cache.
1822 */
428870ff 1823 if (dirs == 0) {
7d11e37e 1824 if (!iarg->scan && (zpool_find_import_blkid(hdl, &pools) == 0))
d603ed6c
BB
1825 goto skip_scanning;
1826
44867b6d
BB
1827 dir = zpool_default_import_path;
1828 dirs = DEFAULT_IMPORT_PATH_SIZE;
34dc7c2f
BB
1829 }
1830
1831 /*
1832 * Go through and read the label configuration information from every
1833 * possible device, organizing the information according to pool GUID
1834 * and toplevel GUID.
1835 */
428870ff 1836 for (i = 0; i < dirs; i++) {
519129ff 1837 taskq_t *t;
34dc7c2f
BB
1838 char *rdsk;
1839 int dfd;
0fdd8d64
MT
1840 boolean_t config_failed = B_FALSE;
1841 DIR *dirp;
34dc7c2f
BB
1842
1843 /* use realpath to normalize the path */
428870ff 1844 if (realpath(dir[i], path) == 0) {
44867b6d
BB
1845
1846 /* it is safe to skip missing search paths */
1847 if (errno == ENOENT)
1848 continue;
1849
1850 zfs_error_aux(hdl, strerror(errno));
34dc7c2f 1851 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
428870ff 1852 dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
34dc7c2f
BB
1853 goto error;
1854 }
1855 end = &path[strlen(path)];
1856 *end++ = '/';
1857 *end = 0;
1858 pathleft = &path[sizeof (path)] - end;
1859
1860 /*
1861 * Using raw devices instead of block devices when we're
1862 * reading the labels skips a bunch of slow operations during
1863 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1864 */
1865 if (strcmp(path, "/dev/dsk/") == 0)
1866 rdsk = "/dev/rdsk/";
1867 else
1868 rdsk = path;
1869
1870 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1871 (dirp = fdopendir(dfd)) == NULL) {
0fdd8d64
MT
1872 if (dfd >= 0)
1873 (void) close(dfd);
34dc7c2f
BB
1874 zfs_error_aux(hdl, strerror(errno));
1875 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1876 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1877 rdsk);
1878 goto error;
1879 }
1880
519129ff
BB
1881 avl_create(&slice_cache, slice_cache_compare,
1882 sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1883
34dc7c2f
BB
1884 /*
1885 * This is not MT-safe, but we have no MT consumers of libzfs
1886 */
1887 while ((dp = readdir64(dirp)) != NULL) {
1888 const char *name = dp->d_name;
1889 if (name[0] == '.' &&
1890 (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1891 continue;
1892
519129ff
BB
1893 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1894 slice->rn_name = zfs_strdup(hdl, name);
1895 slice->rn_avl = &slice_cache;
1896 slice->rn_dfd = dfd;
1897 slice->rn_hdl = hdl;
1898 slice->rn_nozpool = B_FALSE;
1899 avl_add(&slice_cache, slice);
1900 }
505d9655 1901
519129ff
BB
1902 /*
1903 * create a thread pool to do all of this in parallel;
1904 * rn_nozpool is not protected, so this is racy in that
1905 * multiple tasks could decide that the same slice can
1906 * not hold a zpool, which is benign. Also choose
1907 * double the number of processors; we hold a lot of
1908 * locks in the kernel, so going beyond this doesn't
1909 * buy us much.
1910 */
519129ff
BB
1911 t = taskq_create("z_import", 2 * boot_ncpus, defclsyspri,
1912 2 * boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1913 for (slice = avl_first(&slice_cache); slice;
1914 (slice = avl_walk(&slice_cache, slice,
1915 AVL_AFTER)))
1916 (void) taskq_dispatch(t, zpool_open_func, slice,
1917 TQ_SLEEP);
1918 taskq_wait(t);
1919 taskq_destroy(t);
519129ff
BB
1920
1921 cookie = NULL;
1922 while ((slice = avl_destroy_nodes(&slice_cache,
1923 &cookie)) != NULL) {
0fdd8d64 1924 if (slice->rn_config != NULL && !config_failed) {
519129ff 1925 nvlist_t *config = slice->rn_config;
b128c09f 1926 boolean_t matched = B_TRUE;
30b937ee 1927
519129ff
BB
1928 if (iarg->poolname != NULL) {
1929 char *pname;
b128c09f 1930
519129ff
BB
1931 matched = nvlist_lookup_string(config,
1932 ZPOOL_CONFIG_POOL_NAME,
1933 &pname) == 0 &&
1934 strcmp(iarg->poolname, pname) == 0;
428870ff 1935 } else if (iarg->guid != 0) {
b128c09f
BB
1936 uint64_t this_guid;
1937
1938 matched = nvlist_lookup_uint64(config,
1939 ZPOOL_CONFIG_POOL_GUID,
1940 &this_guid) == 0 &&
428870ff 1941 iarg->guid == this_guid;
b128c09f
BB
1942 }
1943 if (!matched) {
1944 nvlist_free(config);
0fdd8d64
MT
1945 } else {
1946 /*
1947 * use the non-raw path for the config
1948 */
1949 (void) strlcpy(end, slice->rn_name,
1950 pathleft);
1951 if (add_config(hdl, &pools, path, i+1,
1952 slice->rn_num_labels, config) != 0)
1953 config_failed = B_TRUE;
b128c09f 1954 }
34dc7c2f 1955 }
519129ff
BB
1956 free(slice->rn_name);
1957 free(slice);
34dc7c2f 1958 }
519129ff 1959 avl_destroy(&slice_cache);
34dc7c2f
BB
1960
1961 (void) closedir(dirp);
0fdd8d64
MT
1962
1963 if (config_failed)
1964 goto error;
34dc7c2f
BB
1965 }
1966
d603ed6c 1967skip_scanning:
428870ff 1968 ret = get_configs(hdl, &pools, iarg->can_be_active);
34dc7c2f
BB
1969
1970error:
1971 for (pe = pools.pools; pe != NULL; pe = penext) {
1972 penext = pe->pe_next;
1973 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1974 venext = ve->ve_next;
1975 for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1976 cenext = ce->ce_next;
8a5fc748 1977 nvlist_free(ce->ce_config);
34dc7c2f
BB
1978 free(ce);
1979 }
1980 free(ve);
1981 }
1982 free(pe);
1983 }
1984
1985 for (ne = pools.names; ne != NULL; ne = nenext) {
1986 nenext = ne->ne_next;
0fdd8d64 1987 free(ne->ne_name);
34dc7c2f
BB
1988 free(ne);
1989 }
1990
34dc7c2f
BB
1991 return (ret);
1992}
1993
b128c09f
BB
1994nvlist_t *
1995zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1996{
428870ff 1997 importargs_t iarg = { 0 };
b128c09f 1998
428870ff
BB
1999 iarg.paths = argc;
2000 iarg.path = argv;
b128c09f 2001
428870ff 2002 return (zpool_find_import_impl(hdl, &iarg));
b128c09f
BB
2003}
2004
34dc7c2f
BB
2005/*
2006 * Given a cache file, return the contents as a list of importable pools.
b128c09f
BB
2007 * poolname or guid (but not both) are provided by the caller when trying
2008 * to import a specific pool.
34dc7c2f
BB
2009 */
2010nvlist_t *
2011zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
b128c09f 2012 char *poolname, uint64_t guid)
34dc7c2f
BB
2013{
2014 char *buf;
2015 int fd;
2016 struct stat64 statbuf;
2017 nvlist_t *raw, *src, *dst;
2018 nvlist_t *pools;
2019 nvpair_t *elem;
2020 char *name;
b128c09f 2021 uint64_t this_guid;
34dc7c2f
BB
2022 boolean_t active;
2023
b128c09f
BB
2024 verify(poolname == NULL || guid == 0);
2025
34dc7c2f
BB
2026 if ((fd = open(cachefile, O_RDONLY)) < 0) {
2027 zfs_error_aux(hdl, "%s", strerror(errno));
2028 (void) zfs_error(hdl, EZFS_BADCACHE,
2029 dgettext(TEXT_DOMAIN, "failed to open cache file"));
2030 return (NULL);
2031 }
2032
2033 if (fstat64(fd, &statbuf) != 0) {
2034 zfs_error_aux(hdl, "%s", strerror(errno));
2035 (void) close(fd);
2036 (void) zfs_error(hdl, EZFS_BADCACHE,
2037 dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
2038 return (NULL);
2039 }
2040
2041 if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
2042 (void) close(fd);
2043 return (NULL);
2044 }
2045
2046 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
2047 (void) close(fd);
2048 free(buf);
2049 (void) zfs_error(hdl, EZFS_BADCACHE,
2050 dgettext(TEXT_DOMAIN,
2051 "failed to read cache file contents"));
2052 return (NULL);
2053 }
2054
2055 (void) close(fd);
2056
2057 if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
2058 free(buf);
2059 (void) zfs_error(hdl, EZFS_BADCACHE,
2060 dgettext(TEXT_DOMAIN,
2061 "invalid or corrupt cache file contents"));
2062 return (NULL);
2063 }
2064
2065 free(buf);
2066
2067 /*
2068 * Go through and get the current state of the pools and refresh their
2069 * state.
2070 */
2071 if (nvlist_alloc(&pools, 0, 0) != 0) {
2072 (void) no_memory(hdl);
2073 nvlist_free(raw);
2074 return (NULL);
2075 }
2076
2077 elem = NULL;
2078 while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
ab2894e6 2079 src = fnvpair_value_nvlist(elem);
34dc7c2f 2080
ab2894e6 2081 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
b128c09f
BB
2082 if (poolname != NULL && strcmp(poolname, name) != 0)
2083 continue;
2084
ab2894e6
MA
2085 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
2086 if (guid != 0 && guid != this_guid)
2087 continue;
34dc7c2f 2088
b128c09f
BB
2089 if (pool_active(hdl, name, this_guid, &active) != 0) {
2090 nvlist_free(raw);
2091 nvlist_free(pools);
2092 return (NULL);
2093 }
34dc7c2f 2094
b128c09f
BB
2095 if (active)
2096 continue;
34dc7c2f 2097
b128c09f
BB
2098 if ((dst = refresh_config(hdl, src)) == NULL) {
2099 nvlist_free(raw);
2100 nvlist_free(pools);
2101 return (NULL);
2102 }
34dc7c2f 2103
b128c09f
BB
2104 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
2105 (void) no_memory(hdl);
34dc7c2f 2106 nvlist_free(dst);
b128c09f
BB
2107 nvlist_free(raw);
2108 nvlist_free(pools);
2109 return (NULL);
34dc7c2f 2110 }
b128c09f 2111 nvlist_free(dst);
34dc7c2f
BB
2112 }
2113
2114 nvlist_free(raw);
2115 return (pools);
2116}
2117
428870ff
BB
2118static int
2119name_or_guid_exists(zpool_handle_t *zhp, void *data)
2120{
2121 importargs_t *import = data;
2122 int found = 0;
2123
2124 if (import->poolname != NULL) {
2125 char *pool_name;
2126
2127 verify(nvlist_lookup_string(zhp->zpool_config,
2128 ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
2129 if (strcmp(pool_name, import->poolname) == 0)
2130 found = 1;
2131 } else {
2132 uint64_t pool_guid;
2133
2134 verify(nvlist_lookup_uint64(zhp->zpool_config,
2135 ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
2136 if (pool_guid == import->guid)
2137 found = 1;
2138 }
2139
2140 zpool_close(zhp);
2141 return (found);
2142}
2143
2144nvlist_t *
2145zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
2146{
2147 verify(import->poolname == NULL || import->guid == 0);
2148
2149 if (import->unique)
2150 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
2151
2152 if (import->cachefile != NULL)
2153 return (zpool_find_import_cached(hdl, import->cachefile,
2154 import->poolname, import->guid));
2155
2156 return (zpool_find_import_impl(hdl, import));
2157}
34dc7c2f
BB
2158
2159boolean_t
2160find_guid(nvlist_t *nv, uint64_t guid)
2161{
2162 uint64_t tmp;
2163 nvlist_t **child;
2164 uint_t c, children;
2165
2166 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
2167 if (tmp == guid)
2168 return (B_TRUE);
2169
2170 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2171 &child, &children) == 0) {
2172 for (c = 0; c < children; c++)
2173 if (find_guid(child[c], guid))
2174 return (B_TRUE);
2175 }
2176
2177 return (B_FALSE);
2178}
2179
2180typedef struct aux_cbdata {
2181 const char *cb_type;
2182 uint64_t cb_guid;
2183 zpool_handle_t *cb_zhp;
2184} aux_cbdata_t;
2185
2186static int
2187find_aux(zpool_handle_t *zhp, void *data)
2188{
2189 aux_cbdata_t *cbp = data;
2190 nvlist_t **list;
2191 uint_t i, count;
2192 uint64_t guid;
2193 nvlist_t *nvroot;
2194
2195 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2196 &nvroot) == 0);
2197
2198 if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
2199 &list, &count) == 0) {
2200 for (i = 0; i < count; i++) {
2201 verify(nvlist_lookup_uint64(list[i],
2202 ZPOOL_CONFIG_GUID, &guid) == 0);
2203 if (guid == cbp->cb_guid) {
2204 cbp->cb_zhp = zhp;
2205 return (1);
2206 }
2207 }
2208 }
2209
2210 zpool_close(zhp);
2211 return (0);
2212}
2213
2214/*
2215 * Determines if the pool is in use. If so, it returns true and the state of
2216 * the pool as well as the name of the pool. Both strings are allocated and
2217 * must be freed by the caller.
2218 */
2219int
2220zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
2221 boolean_t *inuse)
2222{
2223 nvlist_t *config;
2224 char *name;
2225 boolean_t ret;
2226 uint64_t guid, vdev_guid;
2227 zpool_handle_t *zhp;
2228 nvlist_t *pool_config;
2229 uint64_t stateval, isspare;
2230 aux_cbdata_t cb = { 0 };
2231 boolean_t isactive;
2232
2233 *inuse = B_FALSE;
2234
7d90f569 2235 if (zpool_read_label(fd, &config, NULL) != 0) {
34dc7c2f
BB
2236 (void) no_memory(hdl);
2237 return (-1);
2238 }
2239
2240 if (config == NULL)
2241 return (0);
2242
2243 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2244 &stateval) == 0);
2245 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
2246 &vdev_guid) == 0);
2247
2248 if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
2249 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2250 &name) == 0);
2251 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2252 &guid) == 0);
2253 }
2254
2255 switch (stateval) {
2256 case POOL_STATE_EXPORTED:
572e2857
BB
2257 /*
2258 * A pool with an exported state may in fact be imported
2259 * read-only, so check the in-core state to see if it's
2260 * active and imported read-only. If it is, set
2261 * its state to active.
2262 */
2263 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
02f8fe42
JJS
2264 (zhp = zpool_open_canfail(hdl, name)) != NULL) {
2265 if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
2266 stateval = POOL_STATE_ACTIVE;
2267
2268 /*
2269 * All we needed the zpool handle for is the
2270 * readonly prop check.
2271 */
2272 zpool_close(zhp);
2273 }
572e2857 2274
34dc7c2f
BB
2275 ret = B_TRUE;
2276 break;
2277
2278 case POOL_STATE_ACTIVE:
2279 /*
2280 * For an active pool, we have to determine if it's really part
2281 * of a currently active pool (in which case the pool will exist
2282 * and the guid will be the same), or whether it's part of an
2283 * active pool that was disconnected without being explicitly
2284 * exported.
2285 */
2286 if (pool_active(hdl, name, guid, &isactive) != 0) {
2287 nvlist_free(config);
2288 return (-1);
2289 }
2290
2291 if (isactive) {
2292 /*
2293 * Because the device may have been removed while
2294 * offlined, we only report it as active if the vdev is
2295 * still present in the config. Otherwise, pretend like
2296 * it's not in use.
2297 */
2298 if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
2299 (pool_config = zpool_get_config(zhp, NULL))
2300 != NULL) {
2301 nvlist_t *nvroot;
2302
2303 verify(nvlist_lookup_nvlist(pool_config,
2304 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2305 ret = find_guid(nvroot, vdev_guid);
2306 } else {
2307 ret = B_FALSE;
2308 }
2309
2310 /*
2311 * If this is an active spare within another pool, we
2312 * treat it like an unused hot spare. This allows the
2313 * user to create a pool with a hot spare that currently
2314 * in use within another pool. Since we return B_TRUE,
2315 * libdiskmgt will continue to prevent generic consumers
2316 * from using the device.
2317 */
2318 if (ret && nvlist_lookup_uint64(config,
2319 ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
2320 stateval = POOL_STATE_SPARE;
2321
2322 if (zhp != NULL)
2323 zpool_close(zhp);
2324 } else {
2325 stateval = POOL_STATE_POTENTIALLY_ACTIVE;
2326 ret = B_TRUE;
2327 }
2328 break;
2329
2330 case POOL_STATE_SPARE:
2331 /*
2332 * For a hot spare, it can be either definitively in use, or
2333 * potentially active. To determine if it's in use, we iterate
2334 * over all pools in the system and search for one with a spare
2335 * with a matching guid.
2336 *
2337 * Due to the shared nature of spares, we don't actually report
2338 * the potentially active case as in use. This means the user
2339 * can freely create pools on the hot spares of exported pools,
2340 * but to do otherwise makes the resulting code complicated, and
2341 * we end up having to deal with this case anyway.
2342 */
2343 cb.cb_zhp = NULL;
2344 cb.cb_guid = vdev_guid;
2345 cb.cb_type = ZPOOL_CONFIG_SPARES;
2346 if (zpool_iter(hdl, find_aux, &cb) == 1) {
2347 name = (char *)zpool_get_name(cb.cb_zhp);
0fdd8d64 2348 ret = B_TRUE;
34dc7c2f 2349 } else {
0fdd8d64 2350 ret = B_FALSE;
34dc7c2f
BB
2351 }
2352 break;
2353
2354 case POOL_STATE_L2CACHE:
2355
2356 /*
2357 * Check if any pool is currently using this l2cache device.
2358 */
2359 cb.cb_zhp = NULL;
2360 cb.cb_guid = vdev_guid;
2361 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
2362 if (zpool_iter(hdl, find_aux, &cb) == 1) {
2363 name = (char *)zpool_get_name(cb.cb_zhp);
0fdd8d64 2364 ret = B_TRUE;
34dc7c2f 2365 } else {
0fdd8d64 2366 ret = B_FALSE;
34dc7c2f
BB
2367 }
2368 break;
2369
2370 default:
2371 ret = B_FALSE;
2372 }
2373
2374
2375 if (ret) {
2376 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
2377 if (cb.cb_zhp)
2378 zpool_close(cb.cb_zhp);
2379 nvlist_free(config);
2380 return (-1);
2381 }
2382 *state = (pool_state_t)stateval;
2383 }
2384
2385 if (cb.cb_zhp)
2386 zpool_close(cb.cb_zhp);
2387
2388 nvlist_free(config);
2389 *inuse = ret;
2390 return (0);
2391}