]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright 2008 Sun Microsystems, Inc. All rights reserved. | |
23 | * Use is subject to license terms. | |
24 | */ | |
25 | ||
b128c09f | 26 | #pragma ident "%Z%%M% %I% %E% SMI" |
34dc7c2f BB |
27 | |
28 | /* | |
29 | * Pool import support functions. | |
30 | * | |
31 | * To import a pool, we rely on reading the configuration information from the | |
32 | * ZFS label of each device. If we successfully read the label, then we | |
33 | * organize the configuration information in the following hierarchy: | |
34 | * | |
35 | * pool guid -> toplevel vdev guid -> label txg | |
36 | * | |
37 | * Duplicate entries matching this same tuple will be discarded. Once we have | |
38 | * examined every device, we pick the best label txg config for each toplevel | |
39 | * vdev. We then arrange these toplevel vdevs into a complete pool config, and | |
40 | * update any paths that have changed. Finally, we attempt to import the pool | |
41 | * using our derived config, and record the results. | |
42 | */ | |
43 | ||
44 | #include <devid.h> | |
45 | #include <dirent.h> | |
46 | #include <errno.h> | |
47 | #include <libintl.h> | |
48 | #include <stdlib.h> | |
49 | #include <string.h> | |
50 | #include <sys/stat.h> | |
51 | #include <unistd.h> | |
52 | #include <fcntl.h> | |
53 | ||
54 | #include <sys/vdev_impl.h> | |
55 | ||
56 | #include "libzfs.h" | |
57 | #include "libzfs_impl.h" | |
58 | ||
59 | /* | |
60 | * Intermediate structures used to gather configuration information. | |
61 | */ | |
62 | typedef struct config_entry { | |
63 | uint64_t ce_txg; | |
64 | nvlist_t *ce_config; | |
65 | struct config_entry *ce_next; | |
66 | } config_entry_t; | |
67 | ||
68 | typedef struct vdev_entry { | |
69 | uint64_t ve_guid; | |
70 | config_entry_t *ve_configs; | |
71 | struct vdev_entry *ve_next; | |
72 | } vdev_entry_t; | |
73 | ||
74 | typedef struct pool_entry { | |
75 | uint64_t pe_guid; | |
76 | vdev_entry_t *pe_vdevs; | |
77 | struct pool_entry *pe_next; | |
78 | } pool_entry_t; | |
79 | ||
80 | typedef struct name_entry { | |
81 | char *ne_name; | |
82 | uint64_t ne_guid; | |
83 | struct name_entry *ne_next; | |
84 | } name_entry_t; | |
85 | ||
86 | typedef struct pool_list { | |
87 | pool_entry_t *pools; | |
88 | name_entry_t *names; | |
89 | } pool_list_t; | |
90 | ||
91 | static char * | |
92 | get_devid(const char *path) | |
93 | { | |
94 | int fd; | |
95 | ddi_devid_t devid; | |
96 | char *minor, *ret; | |
97 | ||
98 | if ((fd = open(path, O_RDONLY)) < 0) | |
99 | return (NULL); | |
100 | ||
101 | minor = NULL; | |
102 | ret = NULL; | |
103 | if (devid_get(fd, &devid) == 0) { | |
104 | if (devid_get_minor_name(fd, &minor) == 0) | |
105 | ret = devid_str_encode(devid, minor); | |
106 | if (minor != NULL) | |
107 | devid_str_free(minor); | |
108 | devid_free(devid); | |
109 | } | |
110 | (void) close(fd); | |
111 | ||
112 | return (ret); | |
113 | } | |
114 | ||
115 | ||
116 | /* | |
117 | * Go through and fix up any path and/or devid information for the given vdev | |
118 | * configuration. | |
119 | */ | |
120 | static int | |
121 | fix_paths(nvlist_t *nv, name_entry_t *names) | |
122 | { | |
123 | nvlist_t **child; | |
124 | uint_t c, children; | |
125 | uint64_t guid; | |
126 | name_entry_t *ne, *best; | |
127 | char *path, *devid; | |
128 | int matched; | |
129 | ||
130 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
131 | &child, &children) == 0) { | |
132 | for (c = 0; c < children; c++) | |
133 | if (fix_paths(child[c], names) != 0) | |
134 | return (-1); | |
135 | return (0); | |
136 | } | |
137 | ||
138 | /* | |
139 | * This is a leaf (file or disk) vdev. In either case, go through | |
140 | * the name list and see if we find a matching guid. If so, replace | |
141 | * the path and see if we can calculate a new devid. | |
142 | * | |
143 | * There may be multiple names associated with a particular guid, in | |
144 | * which case we have overlapping slices or multiple paths to the same | |
145 | * disk. If this is the case, then we want to pick the path that is | |
146 | * the most similar to the original, where "most similar" is the number | |
147 | * of matching characters starting from the end of the path. This will | |
148 | * preserve slice numbers even if the disks have been reorganized, and | |
149 | * will also catch preferred disk names if multiple paths exist. | |
150 | */ | |
151 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); | |
152 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) | |
153 | path = NULL; | |
154 | ||
155 | matched = 0; | |
156 | best = NULL; | |
157 | for (ne = names; ne != NULL; ne = ne->ne_next) { | |
158 | if (ne->ne_guid == guid) { | |
159 | const char *src, *dst; | |
160 | int count; | |
161 | ||
162 | if (path == NULL) { | |
163 | best = ne; | |
164 | break; | |
165 | } | |
166 | ||
167 | src = ne->ne_name + strlen(ne->ne_name) - 1; | |
168 | dst = path + strlen(path) - 1; | |
169 | for (count = 0; src >= ne->ne_name && dst >= path; | |
170 | src--, dst--, count++) | |
171 | if (*src != *dst) | |
172 | break; | |
173 | ||
174 | /* | |
175 | * At this point, 'count' is the number of characters | |
176 | * matched from the end. | |
177 | */ | |
178 | if (count > matched || best == NULL) { | |
179 | best = ne; | |
180 | matched = count; | |
181 | } | |
182 | } | |
183 | } | |
184 | ||
185 | if (best == NULL) | |
186 | return (0); | |
187 | ||
188 | if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) | |
189 | return (-1); | |
190 | ||
191 | if ((devid = get_devid(best->ne_name)) == NULL) { | |
192 | (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); | |
193 | } else { | |
194 | if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) | |
195 | return (-1); | |
196 | devid_str_free(devid); | |
197 | } | |
198 | ||
199 | return (0); | |
200 | } | |
201 | ||
202 | /* | |
203 | * Add the given configuration to the list of known devices. | |
204 | */ | |
205 | static int | |
206 | add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, | |
207 | nvlist_t *config) | |
208 | { | |
209 | uint64_t pool_guid, vdev_guid, top_guid, txg, state; | |
210 | pool_entry_t *pe; | |
211 | vdev_entry_t *ve; | |
212 | config_entry_t *ce; | |
213 | name_entry_t *ne; | |
214 | ||
215 | /* | |
216 | * If this is a hot spare not currently in use or level 2 cache | |
217 | * device, add it to the list of names to translate, but don't do | |
218 | * anything else. | |
219 | */ | |
220 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
221 | &state) == 0 && | |
222 | (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && | |
223 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { | |
224 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
225 | return (-1); | |
226 | ||
227 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
228 | free(ne); | |
229 | return (-1); | |
230 | } | |
231 | ne->ne_guid = vdev_guid; | |
232 | ne->ne_next = pl->names; | |
233 | pl->names = ne; | |
234 | return (0); | |
235 | } | |
236 | ||
237 | /* | |
238 | * If we have a valid config but cannot read any of these fields, then | |
239 | * it means we have a half-initialized label. In vdev_label_init() | |
240 | * we write a label with txg == 0 so that we can identify the device | |
241 | * in case the user refers to the same disk later on. If we fail to | |
242 | * create the pool, we'll be left with a label in this state | |
243 | * which should not be considered part of a valid pool. | |
244 | */ | |
245 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
246 | &pool_guid) != 0 || | |
247 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
248 | &vdev_guid) != 0 || | |
249 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, | |
250 | &top_guid) != 0 || | |
251 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, | |
252 | &txg) != 0 || txg == 0) { | |
253 | nvlist_free(config); | |
254 | return (0); | |
255 | } | |
256 | ||
257 | /* | |
258 | * First, see if we know about this pool. If not, then add it to the | |
259 | * list of known pools. | |
260 | */ | |
261 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
262 | if (pe->pe_guid == pool_guid) | |
263 | break; | |
264 | } | |
265 | ||
266 | if (pe == NULL) { | |
267 | if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { | |
268 | nvlist_free(config); | |
269 | return (-1); | |
270 | } | |
271 | pe->pe_guid = pool_guid; | |
272 | pe->pe_next = pl->pools; | |
273 | pl->pools = pe; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Second, see if we know about this toplevel vdev. Add it if its | |
278 | * missing. | |
279 | */ | |
280 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
281 | if (ve->ve_guid == top_guid) | |
282 | break; | |
283 | } | |
284 | ||
285 | if (ve == NULL) { | |
286 | if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { | |
287 | nvlist_free(config); | |
288 | return (-1); | |
289 | } | |
290 | ve->ve_guid = top_guid; | |
291 | ve->ve_next = pe->pe_vdevs; | |
292 | pe->pe_vdevs = ve; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Third, see if we have a config with a matching transaction group. If | |
297 | * so, then we do nothing. Otherwise, add it to the list of known | |
298 | * configs. | |
299 | */ | |
300 | for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { | |
301 | if (ce->ce_txg == txg) | |
302 | break; | |
303 | } | |
304 | ||
305 | if (ce == NULL) { | |
306 | if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { | |
307 | nvlist_free(config); | |
308 | return (-1); | |
309 | } | |
310 | ce->ce_txg = txg; | |
311 | ce->ce_config = config; | |
312 | ce->ce_next = ve->ve_configs; | |
313 | ve->ve_configs = ce; | |
314 | } else { | |
315 | nvlist_free(config); | |
316 | } | |
317 | ||
318 | /* | |
319 | * At this point we've successfully added our config to the list of | |
320 | * known configs. The last thing to do is add the vdev guid -> path | |
321 | * mappings so that we can fix up the configuration as necessary before | |
322 | * doing the import. | |
323 | */ | |
324 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
325 | return (-1); | |
326 | ||
327 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
328 | free(ne); | |
329 | return (-1); | |
330 | } | |
331 | ||
332 | ne->ne_guid = vdev_guid; | |
333 | ne->ne_next = pl->names; | |
334 | pl->names = ne; | |
335 | ||
336 | return (0); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Returns true if the named pool matches the given GUID. | |
341 | */ | |
342 | static int | |
343 | pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, | |
344 | boolean_t *isactive) | |
345 | { | |
346 | zpool_handle_t *zhp; | |
347 | uint64_t theguid; | |
348 | ||
349 | if (zpool_open_silent(hdl, name, &zhp) != 0) | |
350 | return (-1); | |
351 | ||
352 | if (zhp == NULL) { | |
353 | *isactive = B_FALSE; | |
354 | return (0); | |
355 | } | |
356 | ||
357 | verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, | |
358 | &theguid) == 0); | |
359 | ||
360 | zpool_close(zhp); | |
361 | ||
362 | *isactive = (theguid == guid); | |
363 | return (0); | |
364 | } | |
365 | ||
366 | static nvlist_t * | |
367 | refresh_config(libzfs_handle_t *hdl, nvlist_t *config) | |
368 | { | |
369 | nvlist_t *nvl; | |
370 | zfs_cmd_t zc = { 0 }; | |
371 | int err; | |
372 | ||
373 | if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) | |
374 | return (NULL); | |
375 | ||
376 | if (zcmd_alloc_dst_nvlist(hdl, &zc, | |
377 | zc.zc_nvlist_conf_size * 2) != 0) { | |
378 | zcmd_free_nvlists(&zc); | |
379 | return (NULL); | |
380 | } | |
381 | ||
382 | while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, | |
383 | &zc)) != 0 && errno == ENOMEM) { | |
384 | if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { | |
385 | zcmd_free_nvlists(&zc); | |
386 | return (NULL); | |
387 | } | |
388 | } | |
389 | ||
390 | if (err) { | |
391 | (void) zpool_standard_error(hdl, errno, | |
392 | dgettext(TEXT_DOMAIN, "cannot discover pools")); | |
393 | zcmd_free_nvlists(&zc); | |
394 | return (NULL); | |
395 | } | |
396 | ||
397 | if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { | |
398 | zcmd_free_nvlists(&zc); | |
399 | return (NULL); | |
400 | } | |
401 | ||
402 | zcmd_free_nvlists(&zc); | |
403 | return (nvl); | |
404 | } | |
405 | ||
406 | /* | |
407 | * Convert our list of pools into the definitive set of configurations. We | |
408 | * start by picking the best config for each toplevel vdev. Once that's done, | |
409 | * we assemble the toplevel vdevs into a full config for the pool. We make a | |
410 | * pass to fix up any incorrect paths, and then add it to the main list to | |
411 | * return to the user. | |
412 | */ | |
413 | static nvlist_t * | |
414 | get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) | |
415 | { | |
416 | pool_entry_t *pe; | |
417 | vdev_entry_t *ve; | |
418 | config_entry_t *ce; | |
419 | nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; | |
420 | nvlist_t **spares, **l2cache; | |
421 | uint_t i, nspares, nl2cache; | |
422 | boolean_t config_seen; | |
423 | uint64_t best_txg; | |
424 | char *name, *hostname; | |
425 | uint64_t version, guid; | |
426 | uint_t children = 0; | |
427 | nvlist_t **child = NULL; | |
428 | uint_t c; | |
429 | boolean_t isactive; | |
430 | uint64_t hostid; | |
431 | nvlist_t *nvl; | |
b128c09f | 432 | boolean_t found_one = B_FALSE; |
34dc7c2f BB |
433 | |
434 | if (nvlist_alloc(&ret, 0, 0) != 0) | |
435 | goto nomem; | |
436 | ||
437 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
438 | uint64_t id; | |
439 | ||
440 | if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) | |
441 | goto nomem; | |
442 | config_seen = B_FALSE; | |
443 | ||
444 | /* | |
445 | * Iterate over all toplevel vdevs. Grab the pool configuration | |
446 | * from the first one we find, and then go through the rest and | |
447 | * add them as necessary to the 'vdevs' member of the config. | |
448 | */ | |
449 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
450 | ||
451 | /* | |
452 | * Determine the best configuration for this vdev by | |
453 | * selecting the config with the latest transaction | |
454 | * group. | |
455 | */ | |
456 | best_txg = 0; | |
457 | for (ce = ve->ve_configs; ce != NULL; | |
458 | ce = ce->ce_next) { | |
459 | ||
460 | if (ce->ce_txg > best_txg) { | |
461 | tmp = ce->ce_config; | |
462 | best_txg = ce->ce_txg; | |
463 | } | |
464 | } | |
465 | ||
466 | if (!config_seen) { | |
467 | /* | |
468 | * Copy the relevant pieces of data to the pool | |
469 | * configuration: | |
470 | * | |
471 | * version | |
472 | * pool guid | |
473 | * name | |
474 | * pool state | |
475 | * hostid (if available) | |
476 | * hostname (if available) | |
477 | */ | |
478 | uint64_t state; | |
479 | ||
480 | verify(nvlist_lookup_uint64(tmp, | |
481 | ZPOOL_CONFIG_VERSION, &version) == 0); | |
482 | if (nvlist_add_uint64(config, | |
483 | ZPOOL_CONFIG_VERSION, version) != 0) | |
484 | goto nomem; | |
485 | verify(nvlist_lookup_uint64(tmp, | |
486 | ZPOOL_CONFIG_POOL_GUID, &guid) == 0); | |
487 | if (nvlist_add_uint64(config, | |
488 | ZPOOL_CONFIG_POOL_GUID, guid) != 0) | |
489 | goto nomem; | |
490 | verify(nvlist_lookup_string(tmp, | |
491 | ZPOOL_CONFIG_POOL_NAME, &name) == 0); | |
492 | if (nvlist_add_string(config, | |
493 | ZPOOL_CONFIG_POOL_NAME, name) != 0) | |
494 | goto nomem; | |
495 | verify(nvlist_lookup_uint64(tmp, | |
496 | ZPOOL_CONFIG_POOL_STATE, &state) == 0); | |
497 | if (nvlist_add_uint64(config, | |
498 | ZPOOL_CONFIG_POOL_STATE, state) != 0) | |
499 | goto nomem; | |
500 | hostid = 0; | |
501 | if (nvlist_lookup_uint64(tmp, | |
502 | ZPOOL_CONFIG_HOSTID, &hostid) == 0) { | |
503 | if (nvlist_add_uint64(config, | |
504 | ZPOOL_CONFIG_HOSTID, hostid) != 0) | |
505 | goto nomem; | |
506 | verify(nvlist_lookup_string(tmp, | |
507 | ZPOOL_CONFIG_HOSTNAME, | |
508 | &hostname) == 0); | |
509 | if (nvlist_add_string(config, | |
510 | ZPOOL_CONFIG_HOSTNAME, | |
511 | hostname) != 0) | |
512 | goto nomem; | |
513 | } | |
514 | ||
515 | config_seen = B_TRUE; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Add this top-level vdev to the child array. | |
520 | */ | |
521 | verify(nvlist_lookup_nvlist(tmp, | |
522 | ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); | |
523 | verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, | |
524 | &id) == 0); | |
525 | if (id >= children) { | |
526 | nvlist_t **newchild; | |
527 | ||
528 | newchild = zfs_alloc(hdl, (id + 1) * | |
529 | sizeof (nvlist_t *)); | |
530 | if (newchild == NULL) | |
531 | goto nomem; | |
532 | ||
533 | for (c = 0; c < children; c++) | |
534 | newchild[c] = child[c]; | |
535 | ||
536 | free(child); | |
537 | child = newchild; | |
538 | children = id + 1; | |
539 | } | |
540 | if (nvlist_dup(nvtop, &child[id], 0) != 0) | |
541 | goto nomem; | |
542 | ||
543 | } | |
544 | ||
545 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
546 | &guid) == 0); | |
547 | ||
548 | /* | |
549 | * Look for any missing top-level vdevs. If this is the case, | |
550 | * create a faked up 'missing' vdev as a placeholder. We cannot | |
551 | * simply compress the child array, because the kernel performs | |
552 | * certain checks to make sure the vdev IDs match their location | |
553 | * in the configuration. | |
554 | */ | |
555 | for (c = 0; c < children; c++) | |
556 | if (child[c] == NULL) { | |
557 | nvlist_t *missing; | |
558 | if (nvlist_alloc(&missing, NV_UNIQUE_NAME, | |
559 | 0) != 0) | |
560 | goto nomem; | |
561 | if (nvlist_add_string(missing, | |
562 | ZPOOL_CONFIG_TYPE, | |
563 | VDEV_TYPE_MISSING) != 0 || | |
564 | nvlist_add_uint64(missing, | |
565 | ZPOOL_CONFIG_ID, c) != 0 || | |
566 | nvlist_add_uint64(missing, | |
567 | ZPOOL_CONFIG_GUID, 0ULL) != 0) { | |
568 | nvlist_free(missing); | |
569 | goto nomem; | |
570 | } | |
571 | child[c] = missing; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Put all of this pool's top-level vdevs into a root vdev. | |
576 | */ | |
577 | if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) | |
578 | goto nomem; | |
579 | if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, | |
580 | VDEV_TYPE_ROOT) != 0 || | |
581 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || | |
582 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || | |
583 | nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, | |
584 | child, children) != 0) { | |
585 | nvlist_free(nvroot); | |
586 | goto nomem; | |
587 | } | |
588 | ||
589 | for (c = 0; c < children; c++) | |
590 | nvlist_free(child[c]); | |
591 | free(child); | |
592 | children = 0; | |
593 | child = NULL; | |
594 | ||
595 | /* | |
596 | * Go through and fix up any paths and/or devids based on our | |
597 | * known list of vdev GUID -> path mappings. | |
598 | */ | |
599 | if (fix_paths(nvroot, pl->names) != 0) { | |
600 | nvlist_free(nvroot); | |
601 | goto nomem; | |
602 | } | |
603 | ||
604 | /* | |
605 | * Add the root vdev to this pool's configuration. | |
606 | */ | |
607 | if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
608 | nvroot) != 0) { | |
609 | nvlist_free(nvroot); | |
610 | goto nomem; | |
611 | } | |
612 | nvlist_free(nvroot); | |
613 | ||
614 | /* | |
615 | * zdb uses this path to report on active pools that were | |
616 | * imported or created using -R. | |
617 | */ | |
618 | if (active_ok) | |
619 | goto add_pool; | |
620 | ||
621 | /* | |
622 | * Determine if this pool is currently active, in which case we | |
623 | * can't actually import it. | |
624 | */ | |
625 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
626 | &name) == 0); | |
627 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
628 | &guid) == 0); | |
629 | ||
630 | if (pool_active(hdl, name, guid, &isactive) != 0) | |
631 | goto error; | |
632 | ||
633 | if (isactive) { | |
634 | nvlist_free(config); | |
635 | config = NULL; | |
636 | continue; | |
637 | } | |
638 | ||
639 | if ((nvl = refresh_config(hdl, config)) == NULL) | |
640 | goto error; | |
641 | ||
642 | nvlist_free(config); | |
643 | config = nvl; | |
644 | ||
645 | /* | |
646 | * Go through and update the paths for spares, now that we have | |
647 | * them. | |
648 | */ | |
649 | verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
650 | &nvroot) == 0); | |
651 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, | |
652 | &spares, &nspares) == 0) { | |
653 | for (i = 0; i < nspares; i++) { | |
654 | if (fix_paths(spares[i], pl->names) != 0) | |
655 | goto nomem; | |
656 | } | |
657 | } | |
658 | ||
659 | /* | |
660 | * Update the paths for l2cache devices. | |
661 | */ | |
662 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, | |
663 | &l2cache, &nl2cache) == 0) { | |
664 | for (i = 0; i < nl2cache; i++) { | |
665 | if (fix_paths(l2cache[i], pl->names) != 0) | |
666 | goto nomem; | |
667 | } | |
668 | } | |
669 | ||
670 | /* | |
671 | * Restore the original information read from the actual label. | |
672 | */ | |
673 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, | |
674 | DATA_TYPE_UINT64); | |
675 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, | |
676 | DATA_TYPE_STRING); | |
677 | if (hostid != 0) { | |
678 | verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, | |
679 | hostid) == 0); | |
680 | verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, | |
681 | hostname) == 0); | |
682 | } | |
683 | ||
684 | add_pool: | |
685 | /* | |
686 | * Add this pool to the list of configs. | |
687 | */ | |
688 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
689 | &name) == 0); | |
690 | if (nvlist_add_nvlist(ret, name, config) != 0) | |
691 | goto nomem; | |
692 | ||
b128c09f | 693 | found_one = B_TRUE; |
34dc7c2f BB |
694 | nvlist_free(config); |
695 | config = NULL; | |
696 | } | |
697 | ||
b128c09f BB |
698 | if (!found_one) { |
699 | nvlist_free(ret); | |
700 | ret = NULL; | |
701 | } | |
702 | ||
34dc7c2f BB |
703 | return (ret); |
704 | ||
705 | nomem: | |
706 | (void) no_memory(hdl); | |
707 | error: | |
708 | nvlist_free(config); | |
709 | nvlist_free(ret); | |
710 | for (c = 0; c < children; c++) | |
711 | nvlist_free(child[c]); | |
712 | free(child); | |
713 | ||
714 | return (NULL); | |
715 | } | |
716 | ||
717 | /* | |
718 | * Return the offset of the given label. | |
719 | */ | |
720 | static uint64_t | |
721 | label_offset(uint64_t size, int l) | |
722 | { | |
723 | ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); | |
724 | return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? | |
725 | 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); | |
726 | } | |
727 | ||
728 | /* | |
729 | * Given a file descriptor, read the label information and return an nvlist | |
730 | * describing the configuration, if there is one. | |
731 | */ | |
732 | int | |
733 | zpool_read_label(int fd, nvlist_t **config) | |
734 | { | |
735 | struct stat64 statbuf; | |
736 | int l; | |
737 | vdev_label_t *label; | |
738 | uint64_t state, txg, size; | |
739 | ||
740 | *config = NULL; | |
741 | ||
742 | if (fstat64(fd, &statbuf) == -1) | |
743 | return (0); | |
744 | size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); | |
745 | ||
746 | if ((label = malloc(sizeof (vdev_label_t))) == NULL) | |
747 | return (-1); | |
748 | ||
749 | for (l = 0; l < VDEV_LABELS; l++) { | |
b128c09f | 750 | if (pread64(fd, label, sizeof (vdev_label_t), |
34dc7c2f BB |
751 | label_offset(size, l)) != sizeof (vdev_label_t)) |
752 | continue; | |
753 | ||
754 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
755 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) | |
756 | continue; | |
757 | ||
758 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
759 | &state) != 0 || state > POOL_STATE_L2CACHE) { | |
760 | nvlist_free(*config); | |
761 | continue; | |
762 | } | |
763 | ||
764 | if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && | |
765 | (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
766 | &txg) != 0 || txg == 0)) { | |
767 | nvlist_free(*config); | |
768 | continue; | |
769 | } | |
770 | ||
771 | free(label); | |
772 | return (0); | |
773 | } | |
774 | ||
775 | free(label); | |
776 | *config = NULL; | |
777 | return (0); | |
778 | } | |
779 | ||
780 | /* | |
781 | * Given a list of directories to search, find all pools stored on disk. This | |
782 | * includes partial pools which are not available to import. If no args are | |
783 | * given (argc is 0), then the default directory (/dev/dsk) is searched. | |
b128c09f BB |
784 | * poolname or guid (but not both) are provided by the caller when trying |
785 | * to import a specific pool. | |
34dc7c2f | 786 | */ |
b128c09f BB |
787 | static nvlist_t * |
788 | zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv, | |
789 | boolean_t active_ok, char *poolname, uint64_t guid) | |
34dc7c2f BB |
790 | { |
791 | int i; | |
792 | DIR *dirp = NULL; | |
793 | struct dirent64 *dp; | |
794 | char path[MAXPATHLEN]; | |
795 | char *end; | |
796 | size_t pathleft; | |
797 | struct stat64 statbuf; | |
798 | nvlist_t *ret = NULL, *config; | |
799 | static char *default_dir = "/dev/dsk"; | |
800 | int fd; | |
801 | pool_list_t pools = { 0 }; | |
802 | pool_entry_t *pe, *penext; | |
803 | vdev_entry_t *ve, *venext; | |
804 | config_entry_t *ce, *cenext; | |
805 | name_entry_t *ne, *nenext; | |
806 | ||
b128c09f BB |
807 | verify(poolname == NULL || guid == 0); |
808 | ||
34dc7c2f BB |
809 | if (argc == 0) { |
810 | argc = 1; | |
811 | argv = &default_dir; | |
812 | } | |
813 | ||
814 | /* | |
815 | * Go through and read the label configuration information from every | |
816 | * possible device, organizing the information according to pool GUID | |
817 | * and toplevel GUID. | |
818 | */ | |
819 | for (i = 0; i < argc; i++) { | |
820 | char *rdsk; | |
821 | int dfd; | |
822 | ||
823 | /* use realpath to normalize the path */ | |
824 | if (realpath(argv[i], path) == 0) { | |
825 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, | |
826 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), | |
827 | argv[i]); | |
828 | goto error; | |
829 | } | |
830 | end = &path[strlen(path)]; | |
831 | *end++ = '/'; | |
832 | *end = 0; | |
833 | pathleft = &path[sizeof (path)] - end; | |
834 | ||
835 | /* | |
836 | * Using raw devices instead of block devices when we're | |
837 | * reading the labels skips a bunch of slow operations during | |
838 | * close(2) processing, so we replace /dev/dsk with /dev/rdsk. | |
839 | */ | |
840 | if (strcmp(path, "/dev/dsk/") == 0) | |
841 | rdsk = "/dev/rdsk/"; | |
842 | else | |
843 | rdsk = path; | |
844 | ||
845 | if ((dfd = open64(rdsk, O_RDONLY)) < 0 || | |
846 | (dirp = fdopendir(dfd)) == NULL) { | |
847 | zfs_error_aux(hdl, strerror(errno)); | |
848 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, | |
849 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), | |
850 | rdsk); | |
851 | goto error; | |
852 | } | |
853 | ||
854 | /* | |
855 | * This is not MT-safe, but we have no MT consumers of libzfs | |
856 | */ | |
857 | while ((dp = readdir64(dirp)) != NULL) { | |
858 | const char *name = dp->d_name; | |
859 | if (name[0] == '.' && | |
860 | (name[1] == 0 || (name[1] == '.' && name[2] == 0))) | |
861 | continue; | |
862 | ||
863 | if ((fd = openat64(dfd, name, O_RDONLY)) < 0) | |
864 | continue; | |
865 | ||
866 | /* | |
867 | * Ignore failed stats. We only want regular | |
868 | * files, character devs and block devs. | |
869 | */ | |
870 | if (fstat64(fd, &statbuf) != 0 || | |
871 | (!S_ISREG(statbuf.st_mode) && | |
872 | !S_ISCHR(statbuf.st_mode) && | |
873 | !S_ISBLK(statbuf.st_mode))) { | |
874 | (void) close(fd); | |
875 | continue; | |
876 | } | |
877 | ||
878 | if ((zpool_read_label(fd, &config)) != 0) { | |
879 | (void) close(fd); | |
880 | (void) no_memory(hdl); | |
881 | goto error; | |
882 | } | |
883 | ||
884 | (void) close(fd); | |
885 | ||
886 | if (config != NULL) { | |
b128c09f BB |
887 | boolean_t matched = B_TRUE; |
888 | ||
889 | if (poolname != NULL) { | |
890 | char *pname; | |
891 | ||
892 | matched = nvlist_lookup_string(config, | |
893 | ZPOOL_CONFIG_POOL_NAME, | |
894 | &pname) == 0 && | |
895 | strcmp(poolname, pname) == 0; | |
896 | } else if (guid != 0) { | |
897 | uint64_t this_guid; | |
898 | ||
899 | matched = nvlist_lookup_uint64(config, | |
900 | ZPOOL_CONFIG_POOL_GUID, | |
901 | &this_guid) == 0 && | |
902 | guid == this_guid; | |
903 | } | |
904 | if (!matched) { | |
905 | nvlist_free(config); | |
906 | config = NULL; | |
907 | continue; | |
908 | } | |
34dc7c2f BB |
909 | /* use the non-raw path for the config */ |
910 | (void) strlcpy(end, name, pathleft); | |
911 | if (add_config(hdl, &pools, path, config) != 0) | |
912 | goto error; | |
913 | } | |
914 | } | |
915 | ||
916 | (void) closedir(dirp); | |
917 | dirp = NULL; | |
918 | } | |
919 | ||
920 | ret = get_configs(hdl, &pools, active_ok); | |
921 | ||
922 | error: | |
923 | for (pe = pools.pools; pe != NULL; pe = penext) { | |
924 | penext = pe->pe_next; | |
925 | for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { | |
926 | venext = ve->ve_next; | |
927 | for (ce = ve->ve_configs; ce != NULL; ce = cenext) { | |
928 | cenext = ce->ce_next; | |
929 | if (ce->ce_config) | |
930 | nvlist_free(ce->ce_config); | |
931 | free(ce); | |
932 | } | |
933 | free(ve); | |
934 | } | |
935 | free(pe); | |
936 | } | |
937 | ||
938 | for (ne = pools.names; ne != NULL; ne = nenext) { | |
939 | nenext = ne->ne_next; | |
940 | if (ne->ne_name) | |
941 | free(ne->ne_name); | |
942 | free(ne); | |
943 | } | |
944 | ||
945 | if (dirp) | |
946 | (void) closedir(dirp); | |
947 | ||
948 | return (ret); | |
949 | } | |
950 | ||
b128c09f BB |
951 | nvlist_t * |
952 | zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) | |
953 | { | |
954 | return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0)); | |
955 | } | |
956 | ||
957 | nvlist_t * | |
958 | zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv, | |
959 | char *pool) | |
960 | { | |
961 | return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0)); | |
962 | } | |
963 | ||
964 | nvlist_t * | |
965 | zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv, | |
966 | uint64_t guid) | |
967 | { | |
968 | return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid)); | |
969 | } | |
970 | ||
971 | nvlist_t * | |
972 | zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv) | |
973 | { | |
974 | return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0)); | |
975 | } | |
976 | ||
34dc7c2f BB |
977 | /* |
978 | * Given a cache file, return the contents as a list of importable pools. | |
b128c09f BB |
979 | * poolname or guid (but not both) are provided by the caller when trying |
980 | * to import a specific pool. | |
34dc7c2f BB |
981 | */ |
982 | nvlist_t * | |
983 | zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, | |
b128c09f | 984 | char *poolname, uint64_t guid) |
34dc7c2f BB |
985 | { |
986 | char *buf; | |
987 | int fd; | |
988 | struct stat64 statbuf; | |
989 | nvlist_t *raw, *src, *dst; | |
990 | nvlist_t *pools; | |
991 | nvpair_t *elem; | |
992 | char *name; | |
b128c09f | 993 | uint64_t this_guid; |
34dc7c2f BB |
994 | boolean_t active; |
995 | ||
b128c09f BB |
996 | verify(poolname == NULL || guid == 0); |
997 | ||
34dc7c2f BB |
998 | if ((fd = open(cachefile, O_RDONLY)) < 0) { |
999 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1000 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1001 | dgettext(TEXT_DOMAIN, "failed to open cache file")); | |
1002 | return (NULL); | |
1003 | } | |
1004 | ||
1005 | if (fstat64(fd, &statbuf) != 0) { | |
1006 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1007 | (void) close(fd); | |
1008 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1009 | dgettext(TEXT_DOMAIN, "failed to get size of cache file")); | |
1010 | return (NULL); | |
1011 | } | |
1012 | ||
1013 | if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { | |
1014 | (void) close(fd); | |
1015 | return (NULL); | |
1016 | } | |
1017 | ||
1018 | if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { | |
1019 | (void) close(fd); | |
1020 | free(buf); | |
1021 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1022 | dgettext(TEXT_DOMAIN, | |
1023 | "failed to read cache file contents")); | |
1024 | return (NULL); | |
1025 | } | |
1026 | ||
1027 | (void) close(fd); | |
1028 | ||
1029 | if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { | |
1030 | free(buf); | |
1031 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1032 | dgettext(TEXT_DOMAIN, | |
1033 | "invalid or corrupt cache file contents")); | |
1034 | return (NULL); | |
1035 | } | |
1036 | ||
1037 | free(buf); | |
1038 | ||
1039 | /* | |
1040 | * Go through and get the current state of the pools and refresh their | |
1041 | * state. | |
1042 | */ | |
1043 | if (nvlist_alloc(&pools, 0, 0) != 0) { | |
1044 | (void) no_memory(hdl); | |
1045 | nvlist_free(raw); | |
1046 | return (NULL); | |
1047 | } | |
1048 | ||
1049 | elem = NULL; | |
1050 | while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { | |
1051 | verify(nvpair_value_nvlist(elem, &src) == 0); | |
1052 | ||
1053 | verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, | |
1054 | &name) == 0); | |
b128c09f BB |
1055 | if (poolname != NULL && strcmp(poolname, name) != 0) |
1056 | continue; | |
1057 | ||
34dc7c2f | 1058 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, |
b128c09f BB |
1059 | &this_guid) == 0); |
1060 | if (guid != 0) { | |
1061 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, | |
1062 | &this_guid) == 0); | |
1063 | if (guid != this_guid) | |
1064 | continue; | |
1065 | } | |
34dc7c2f | 1066 | |
b128c09f BB |
1067 | if (pool_active(hdl, name, this_guid, &active) != 0) { |
1068 | nvlist_free(raw); | |
1069 | nvlist_free(pools); | |
1070 | return (NULL); | |
1071 | } | |
34dc7c2f | 1072 | |
b128c09f BB |
1073 | if (active) |
1074 | continue; | |
34dc7c2f | 1075 | |
b128c09f BB |
1076 | if ((dst = refresh_config(hdl, src)) == NULL) { |
1077 | nvlist_free(raw); | |
1078 | nvlist_free(pools); | |
1079 | return (NULL); | |
1080 | } | |
34dc7c2f | 1081 | |
b128c09f BB |
1082 | if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { |
1083 | (void) no_memory(hdl); | |
34dc7c2f | 1084 | nvlist_free(dst); |
b128c09f BB |
1085 | nvlist_free(raw); |
1086 | nvlist_free(pools); | |
1087 | return (NULL); | |
34dc7c2f | 1088 | } |
b128c09f | 1089 | nvlist_free(dst); |
34dc7c2f BB |
1090 | } |
1091 | ||
1092 | nvlist_free(raw); | |
1093 | return (pools); | |
1094 | } | |
1095 | ||
1096 | ||
1097 | boolean_t | |
1098 | find_guid(nvlist_t *nv, uint64_t guid) | |
1099 | { | |
1100 | uint64_t tmp; | |
1101 | nvlist_t **child; | |
1102 | uint_t c, children; | |
1103 | ||
1104 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); | |
1105 | if (tmp == guid) | |
1106 | return (B_TRUE); | |
1107 | ||
1108 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
1109 | &child, &children) == 0) { | |
1110 | for (c = 0; c < children; c++) | |
1111 | if (find_guid(child[c], guid)) | |
1112 | return (B_TRUE); | |
1113 | } | |
1114 | ||
1115 | return (B_FALSE); | |
1116 | } | |
1117 | ||
1118 | typedef struct aux_cbdata { | |
1119 | const char *cb_type; | |
1120 | uint64_t cb_guid; | |
1121 | zpool_handle_t *cb_zhp; | |
1122 | } aux_cbdata_t; | |
1123 | ||
1124 | static int | |
1125 | find_aux(zpool_handle_t *zhp, void *data) | |
1126 | { | |
1127 | aux_cbdata_t *cbp = data; | |
1128 | nvlist_t **list; | |
1129 | uint_t i, count; | |
1130 | uint64_t guid; | |
1131 | nvlist_t *nvroot; | |
1132 | ||
1133 | verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, | |
1134 | &nvroot) == 0); | |
1135 | ||
1136 | if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, | |
1137 | &list, &count) == 0) { | |
1138 | for (i = 0; i < count; i++) { | |
1139 | verify(nvlist_lookup_uint64(list[i], | |
1140 | ZPOOL_CONFIG_GUID, &guid) == 0); | |
1141 | if (guid == cbp->cb_guid) { | |
1142 | cbp->cb_zhp = zhp; | |
1143 | return (1); | |
1144 | } | |
1145 | } | |
1146 | } | |
1147 | ||
1148 | zpool_close(zhp); | |
1149 | return (0); | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * Determines if the pool is in use. If so, it returns true and the state of | |
1154 | * the pool as well as the name of the pool. Both strings are allocated and | |
1155 | * must be freed by the caller. | |
1156 | */ | |
1157 | int | |
1158 | zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, | |
1159 | boolean_t *inuse) | |
1160 | { | |
1161 | nvlist_t *config; | |
1162 | char *name; | |
1163 | boolean_t ret; | |
1164 | uint64_t guid, vdev_guid; | |
1165 | zpool_handle_t *zhp; | |
1166 | nvlist_t *pool_config; | |
1167 | uint64_t stateval, isspare; | |
1168 | aux_cbdata_t cb = { 0 }; | |
1169 | boolean_t isactive; | |
1170 | ||
1171 | *inuse = B_FALSE; | |
1172 | ||
1173 | if (zpool_read_label(fd, &config) != 0) { | |
1174 | (void) no_memory(hdl); | |
1175 | return (-1); | |
1176 | } | |
1177 | ||
1178 | if (config == NULL) | |
1179 | return (0); | |
1180 | ||
1181 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
1182 | &stateval) == 0); | |
1183 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
1184 | &vdev_guid) == 0); | |
1185 | ||
1186 | if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { | |
1187 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
1188 | &name) == 0); | |
1189 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
1190 | &guid) == 0); | |
1191 | } | |
1192 | ||
1193 | switch (stateval) { | |
1194 | case POOL_STATE_EXPORTED: | |
1195 | ret = B_TRUE; | |
1196 | break; | |
1197 | ||
1198 | case POOL_STATE_ACTIVE: | |
1199 | /* | |
1200 | * For an active pool, we have to determine if it's really part | |
1201 | * of a currently active pool (in which case the pool will exist | |
1202 | * and the guid will be the same), or whether it's part of an | |
1203 | * active pool that was disconnected without being explicitly | |
1204 | * exported. | |
1205 | */ | |
1206 | if (pool_active(hdl, name, guid, &isactive) != 0) { | |
1207 | nvlist_free(config); | |
1208 | return (-1); | |
1209 | } | |
1210 | ||
1211 | if (isactive) { | |
1212 | /* | |
1213 | * Because the device may have been removed while | |
1214 | * offlined, we only report it as active if the vdev is | |
1215 | * still present in the config. Otherwise, pretend like | |
1216 | * it's not in use. | |
1217 | */ | |
1218 | if ((zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1219 | (pool_config = zpool_get_config(zhp, NULL)) | |
1220 | != NULL) { | |
1221 | nvlist_t *nvroot; | |
1222 | ||
1223 | verify(nvlist_lookup_nvlist(pool_config, | |
1224 | ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); | |
1225 | ret = find_guid(nvroot, vdev_guid); | |
1226 | } else { | |
1227 | ret = B_FALSE; | |
1228 | } | |
1229 | ||
1230 | /* | |
1231 | * If this is an active spare within another pool, we | |
1232 | * treat it like an unused hot spare. This allows the | |
1233 | * user to create a pool with a hot spare that currently | |
1234 | * in use within another pool. Since we return B_TRUE, | |
1235 | * libdiskmgt will continue to prevent generic consumers | |
1236 | * from using the device. | |
1237 | */ | |
1238 | if (ret && nvlist_lookup_uint64(config, | |
1239 | ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) | |
1240 | stateval = POOL_STATE_SPARE; | |
1241 | ||
1242 | if (zhp != NULL) | |
1243 | zpool_close(zhp); | |
1244 | } else { | |
1245 | stateval = POOL_STATE_POTENTIALLY_ACTIVE; | |
1246 | ret = B_TRUE; | |
1247 | } | |
1248 | break; | |
1249 | ||
1250 | case POOL_STATE_SPARE: | |
1251 | /* | |
1252 | * For a hot spare, it can be either definitively in use, or | |
1253 | * potentially active. To determine if it's in use, we iterate | |
1254 | * over all pools in the system and search for one with a spare | |
1255 | * with a matching guid. | |
1256 | * | |
1257 | * Due to the shared nature of spares, we don't actually report | |
1258 | * the potentially active case as in use. This means the user | |
1259 | * can freely create pools on the hot spares of exported pools, | |
1260 | * but to do otherwise makes the resulting code complicated, and | |
1261 | * we end up having to deal with this case anyway. | |
1262 | */ | |
1263 | cb.cb_zhp = NULL; | |
1264 | cb.cb_guid = vdev_guid; | |
1265 | cb.cb_type = ZPOOL_CONFIG_SPARES; | |
1266 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1267 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1268 | ret = TRUE; | |
1269 | } else { | |
1270 | ret = FALSE; | |
1271 | } | |
1272 | break; | |
1273 | ||
1274 | case POOL_STATE_L2CACHE: | |
1275 | ||
1276 | /* | |
1277 | * Check if any pool is currently using this l2cache device. | |
1278 | */ | |
1279 | cb.cb_zhp = NULL; | |
1280 | cb.cb_guid = vdev_guid; | |
1281 | cb.cb_type = ZPOOL_CONFIG_L2CACHE; | |
1282 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1283 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1284 | ret = TRUE; | |
1285 | } else { | |
1286 | ret = FALSE; | |
1287 | } | |
1288 | break; | |
1289 | ||
1290 | default: | |
1291 | ret = B_FALSE; | |
1292 | } | |
1293 | ||
1294 | ||
1295 | if (ret) { | |
1296 | if ((*namestr = zfs_strdup(hdl, name)) == NULL) { | |
1297 | if (cb.cb_zhp) | |
1298 | zpool_close(cb.cb_zhp); | |
1299 | nvlist_free(config); | |
1300 | return (-1); | |
1301 | } | |
1302 | *state = (pool_state_t)stateval; | |
1303 | } | |
1304 | ||
1305 | if (cb.cb_zhp) | |
1306 | zpool_close(cb.cb_zhp); | |
1307 | ||
1308 | nvlist_free(config); | |
1309 | *inuse = ret; | |
1310 | return (0); | |
1311 | } |