]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
572e2857 | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
34dc7c2f BB |
23 | */ |
24 | ||
34dc7c2f BB |
25 | /* |
26 | * Pool import support functions. | |
27 | * | |
28 | * To import a pool, we rely on reading the configuration information from the | |
29 | * ZFS label of each device. If we successfully read the label, then we | |
30 | * organize the configuration information in the following hierarchy: | |
31 | * | |
32 | * pool guid -> toplevel vdev guid -> label txg | |
33 | * | |
34 | * Duplicate entries matching this same tuple will be discarded. Once we have | |
35 | * examined every device, we pick the best label txg config for each toplevel | |
36 | * vdev. We then arrange these toplevel vdevs into a complete pool config, and | |
37 | * update any paths that have changed. Finally, we attempt to import the pool | |
38 | * using our derived config, and record the results. | |
39 | */ | |
40 | ||
428870ff | 41 | #include <ctype.h> |
34dc7c2f BB |
42 | #include <devid.h> |
43 | #include <dirent.h> | |
44 | #include <errno.h> | |
45 | #include <libintl.h> | |
428870ff | 46 | #include <stddef.h> |
34dc7c2f BB |
47 | #include <stdlib.h> |
48 | #include <string.h> | |
49 | #include <sys/stat.h> | |
50 | #include <unistd.h> | |
51 | #include <fcntl.h> | |
428870ff BB |
52 | #include <sys/vtoc.h> |
53 | #include <sys/dktp/fdisk.h> | |
54 | #include <sys/efi_partition.h> | |
34dc7c2f BB |
55 | |
56 | #include <sys/vdev_impl.h> | |
d603ed6c BB |
57 | #ifdef HAVE_LIBBLKID |
58 | #include <blkid/blkid.h> | |
59 | #endif | |
34dc7c2f BB |
60 | |
61 | #include "libzfs.h" | |
62 | #include "libzfs_impl.h" | |
63 | ||
64 | /* | |
65 | * Intermediate structures used to gather configuration information. | |
66 | */ | |
67 | typedef struct config_entry { | |
68 | uint64_t ce_txg; | |
69 | nvlist_t *ce_config; | |
70 | struct config_entry *ce_next; | |
71 | } config_entry_t; | |
72 | ||
73 | typedef struct vdev_entry { | |
74 | uint64_t ve_guid; | |
75 | config_entry_t *ve_configs; | |
76 | struct vdev_entry *ve_next; | |
77 | } vdev_entry_t; | |
78 | ||
79 | typedef struct pool_entry { | |
80 | uint64_t pe_guid; | |
81 | vdev_entry_t *pe_vdevs; | |
82 | struct pool_entry *pe_next; | |
83 | } pool_entry_t; | |
84 | ||
85 | typedef struct name_entry { | |
86 | char *ne_name; | |
87 | uint64_t ne_guid; | |
88 | struct name_entry *ne_next; | |
89 | } name_entry_t; | |
90 | ||
91 | typedef struct pool_list { | |
92 | pool_entry_t *pools; | |
93 | name_entry_t *names; | |
94 | } pool_list_t; | |
95 | ||
96 | static char * | |
97 | get_devid(const char *path) | |
98 | { | |
99 | int fd; | |
100 | ddi_devid_t devid; | |
101 | char *minor, *ret; | |
102 | ||
103 | if ((fd = open(path, O_RDONLY)) < 0) | |
104 | return (NULL); | |
105 | ||
106 | minor = NULL; | |
107 | ret = NULL; | |
108 | if (devid_get(fd, &devid) == 0) { | |
109 | if (devid_get_minor_name(fd, &minor) == 0) | |
110 | ret = devid_str_encode(devid, minor); | |
111 | if (minor != NULL) | |
112 | devid_str_free(minor); | |
113 | devid_free(devid); | |
114 | } | |
115 | (void) close(fd); | |
116 | ||
117 | return (ret); | |
118 | } | |
119 | ||
120 | ||
121 | /* | |
122 | * Go through and fix up any path and/or devid information for the given vdev | |
123 | * configuration. | |
124 | */ | |
125 | static int | |
126 | fix_paths(nvlist_t *nv, name_entry_t *names) | |
127 | { | |
128 | nvlist_t **child; | |
129 | uint_t c, children; | |
130 | uint64_t guid; | |
131 | name_entry_t *ne, *best; | |
132 | char *path, *devid; | |
133 | int matched; | |
134 | ||
135 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
136 | &child, &children) == 0) { | |
137 | for (c = 0; c < children; c++) | |
138 | if (fix_paths(child[c], names) != 0) | |
139 | return (-1); | |
140 | return (0); | |
141 | } | |
142 | ||
143 | /* | |
144 | * This is a leaf (file or disk) vdev. In either case, go through | |
145 | * the name list and see if we find a matching guid. If so, replace | |
146 | * the path and see if we can calculate a new devid. | |
147 | * | |
148 | * There may be multiple names associated with a particular guid, in | |
149 | * which case we have overlapping slices or multiple paths to the same | |
150 | * disk. If this is the case, then we want to pick the path that is | |
151 | * the most similar to the original, where "most similar" is the number | |
152 | * of matching characters starting from the end of the path. This will | |
153 | * preserve slice numbers even if the disks have been reorganized, and | |
154 | * will also catch preferred disk names if multiple paths exist. | |
155 | */ | |
156 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); | |
157 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) | |
158 | path = NULL; | |
159 | ||
160 | matched = 0; | |
161 | best = NULL; | |
162 | for (ne = names; ne != NULL; ne = ne->ne_next) { | |
163 | if (ne->ne_guid == guid) { | |
164 | const char *src, *dst; | |
165 | int count; | |
166 | ||
167 | if (path == NULL) { | |
168 | best = ne; | |
169 | break; | |
170 | } | |
171 | ||
172 | src = ne->ne_name + strlen(ne->ne_name) - 1; | |
173 | dst = path + strlen(path) - 1; | |
174 | for (count = 0; src >= ne->ne_name && dst >= path; | |
175 | src--, dst--, count++) | |
176 | if (*src != *dst) | |
177 | break; | |
178 | ||
179 | /* | |
180 | * At this point, 'count' is the number of characters | |
181 | * matched from the end. | |
182 | */ | |
183 | if (count > matched || best == NULL) { | |
184 | best = ne; | |
185 | matched = count; | |
186 | } | |
187 | } | |
188 | } | |
189 | ||
190 | if (best == NULL) | |
191 | return (0); | |
192 | ||
193 | if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) | |
194 | return (-1); | |
195 | ||
196 | if ((devid = get_devid(best->ne_name)) == NULL) { | |
197 | (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); | |
198 | } else { | |
199 | if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) | |
200 | return (-1); | |
201 | devid_str_free(devid); | |
202 | } | |
203 | ||
204 | return (0); | |
205 | } | |
206 | ||
207 | /* | |
208 | * Add the given configuration to the list of known devices. | |
209 | */ | |
210 | static int | |
211 | add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, | |
212 | nvlist_t *config) | |
213 | { | |
214 | uint64_t pool_guid, vdev_guid, top_guid, txg, state; | |
215 | pool_entry_t *pe; | |
216 | vdev_entry_t *ve; | |
217 | config_entry_t *ce; | |
218 | name_entry_t *ne; | |
219 | ||
220 | /* | |
221 | * If this is a hot spare not currently in use or level 2 cache | |
222 | * device, add it to the list of names to translate, but don't do | |
223 | * anything else. | |
224 | */ | |
225 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
226 | &state) == 0 && | |
227 | (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && | |
228 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { | |
229 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
230 | return (-1); | |
231 | ||
232 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
233 | free(ne); | |
234 | return (-1); | |
235 | } | |
236 | ne->ne_guid = vdev_guid; | |
237 | ne->ne_next = pl->names; | |
238 | pl->names = ne; | |
239 | return (0); | |
240 | } | |
241 | ||
242 | /* | |
243 | * If we have a valid config but cannot read any of these fields, then | |
244 | * it means we have a half-initialized label. In vdev_label_init() | |
245 | * we write a label with txg == 0 so that we can identify the device | |
246 | * in case the user refers to the same disk later on. If we fail to | |
247 | * create the pool, we'll be left with a label in this state | |
248 | * which should not be considered part of a valid pool. | |
249 | */ | |
250 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
251 | &pool_guid) != 0 || | |
252 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
253 | &vdev_guid) != 0 || | |
254 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, | |
255 | &top_guid) != 0 || | |
256 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, | |
257 | &txg) != 0 || txg == 0) { | |
258 | nvlist_free(config); | |
259 | return (0); | |
260 | } | |
261 | ||
262 | /* | |
263 | * First, see if we know about this pool. If not, then add it to the | |
264 | * list of known pools. | |
265 | */ | |
266 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
267 | if (pe->pe_guid == pool_guid) | |
268 | break; | |
269 | } | |
270 | ||
271 | if (pe == NULL) { | |
272 | if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { | |
273 | nvlist_free(config); | |
274 | return (-1); | |
275 | } | |
276 | pe->pe_guid = pool_guid; | |
277 | pe->pe_next = pl->pools; | |
278 | pl->pools = pe; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Second, see if we know about this toplevel vdev. Add it if its | |
283 | * missing. | |
284 | */ | |
285 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
286 | if (ve->ve_guid == top_guid) | |
287 | break; | |
288 | } | |
289 | ||
290 | if (ve == NULL) { | |
291 | if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { | |
292 | nvlist_free(config); | |
293 | return (-1); | |
294 | } | |
295 | ve->ve_guid = top_guid; | |
296 | ve->ve_next = pe->pe_vdevs; | |
297 | pe->pe_vdevs = ve; | |
298 | } | |
299 | ||
300 | /* | |
301 | * Third, see if we have a config with a matching transaction group. If | |
302 | * so, then we do nothing. Otherwise, add it to the list of known | |
303 | * configs. | |
304 | */ | |
305 | for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { | |
306 | if (ce->ce_txg == txg) | |
307 | break; | |
308 | } | |
309 | ||
310 | if (ce == NULL) { | |
311 | if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { | |
312 | nvlist_free(config); | |
313 | return (-1); | |
314 | } | |
315 | ce->ce_txg = txg; | |
316 | ce->ce_config = config; | |
317 | ce->ce_next = ve->ve_configs; | |
318 | ve->ve_configs = ce; | |
319 | } else { | |
320 | nvlist_free(config); | |
321 | } | |
322 | ||
323 | /* | |
324 | * At this point we've successfully added our config to the list of | |
325 | * known configs. The last thing to do is add the vdev guid -> path | |
326 | * mappings so that we can fix up the configuration as necessary before | |
327 | * doing the import. | |
328 | */ | |
329 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
330 | return (-1); | |
331 | ||
332 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
333 | free(ne); | |
334 | return (-1); | |
335 | } | |
336 | ||
337 | ne->ne_guid = vdev_guid; | |
338 | ne->ne_next = pl->names; | |
339 | pl->names = ne; | |
340 | ||
341 | return (0); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Returns true if the named pool matches the given GUID. | |
346 | */ | |
347 | static int | |
348 | pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, | |
349 | boolean_t *isactive) | |
350 | { | |
351 | zpool_handle_t *zhp; | |
352 | uint64_t theguid; | |
353 | ||
354 | if (zpool_open_silent(hdl, name, &zhp) != 0) | |
355 | return (-1); | |
356 | ||
357 | if (zhp == NULL) { | |
358 | *isactive = B_FALSE; | |
359 | return (0); | |
360 | } | |
361 | ||
362 | verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, | |
363 | &theguid) == 0); | |
364 | ||
365 | zpool_close(zhp); | |
366 | ||
367 | *isactive = (theguid == guid); | |
368 | return (0); | |
369 | } | |
370 | ||
371 | static nvlist_t * | |
372 | refresh_config(libzfs_handle_t *hdl, nvlist_t *config) | |
373 | { | |
374 | nvlist_t *nvl; | |
2598c001 | 375 | zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 }; |
34dc7c2f BB |
376 | int err; |
377 | ||
378 | if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) | |
379 | return (NULL); | |
380 | ||
381 | if (zcmd_alloc_dst_nvlist(hdl, &zc, | |
382 | zc.zc_nvlist_conf_size * 2) != 0) { | |
383 | zcmd_free_nvlists(&zc); | |
384 | return (NULL); | |
385 | } | |
386 | ||
387 | while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, | |
388 | &zc)) != 0 && errno == ENOMEM) { | |
389 | if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { | |
390 | zcmd_free_nvlists(&zc); | |
391 | return (NULL); | |
392 | } | |
393 | } | |
394 | ||
395 | if (err) { | |
34dc7c2f BB |
396 | zcmd_free_nvlists(&zc); |
397 | return (NULL); | |
398 | } | |
399 | ||
400 | if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { | |
401 | zcmd_free_nvlists(&zc); | |
402 | return (NULL); | |
403 | } | |
404 | ||
405 | zcmd_free_nvlists(&zc); | |
406 | return (nvl); | |
407 | } | |
408 | ||
428870ff BB |
409 | /* |
410 | * Determine if the vdev id is a hole in the namespace. | |
411 | */ | |
412 | boolean_t | |
413 | vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) | |
414 | { | |
d6320ddb BB |
415 | int c; |
416 | ||
417 | for (c = 0; c < holes; c++) { | |
428870ff BB |
418 | |
419 | /* Top-level is a hole */ | |
420 | if (hole_array[c] == id) | |
421 | return (B_TRUE); | |
422 | } | |
423 | return (B_FALSE); | |
424 | } | |
425 | ||
34dc7c2f BB |
426 | /* |
427 | * Convert our list of pools into the definitive set of configurations. We | |
428 | * start by picking the best config for each toplevel vdev. Once that's done, | |
429 | * we assemble the toplevel vdevs into a full config for the pool. We make a | |
430 | * pass to fix up any incorrect paths, and then add it to the main list to | |
431 | * return to the user. | |
432 | */ | |
433 | static nvlist_t * | |
434 | get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) | |
435 | { | |
436 | pool_entry_t *pe; | |
437 | vdev_entry_t *ve; | |
438 | config_entry_t *ce; | |
d4ed6673 | 439 | nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; |
34dc7c2f BB |
440 | nvlist_t **spares, **l2cache; |
441 | uint_t i, nspares, nl2cache; | |
442 | boolean_t config_seen; | |
443 | uint64_t best_txg; | |
444 | char *name, *hostname; | |
445 | uint64_t version, guid; | |
446 | uint_t children = 0; | |
447 | nvlist_t **child = NULL; | |
428870ff BB |
448 | uint_t holes; |
449 | uint64_t *hole_array, max_id; | |
34dc7c2f BB |
450 | uint_t c; |
451 | boolean_t isactive; | |
452 | uint64_t hostid; | |
453 | nvlist_t *nvl; | |
b128c09f | 454 | boolean_t found_one = B_FALSE; |
428870ff | 455 | boolean_t valid_top_config = B_FALSE; |
34dc7c2f BB |
456 | |
457 | if (nvlist_alloc(&ret, 0, 0) != 0) | |
458 | goto nomem; | |
459 | ||
460 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
428870ff | 461 | uint64_t id, max_txg = 0; |
34dc7c2f BB |
462 | |
463 | if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) | |
464 | goto nomem; | |
465 | config_seen = B_FALSE; | |
466 | ||
467 | /* | |
468 | * Iterate over all toplevel vdevs. Grab the pool configuration | |
469 | * from the first one we find, and then go through the rest and | |
470 | * add them as necessary to the 'vdevs' member of the config. | |
471 | */ | |
472 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
473 | ||
474 | /* | |
475 | * Determine the best configuration for this vdev by | |
476 | * selecting the config with the latest transaction | |
477 | * group. | |
478 | */ | |
479 | best_txg = 0; | |
480 | for (ce = ve->ve_configs; ce != NULL; | |
481 | ce = ce->ce_next) { | |
482 | ||
483 | if (ce->ce_txg > best_txg) { | |
484 | tmp = ce->ce_config; | |
485 | best_txg = ce->ce_txg; | |
486 | } | |
487 | } | |
488 | ||
428870ff BB |
489 | /* |
490 | * We rely on the fact that the max txg for the | |
491 | * pool will contain the most up-to-date information | |
492 | * about the valid top-levels in the vdev namespace. | |
493 | */ | |
494 | if (best_txg > max_txg) { | |
495 | (void) nvlist_remove(config, | |
496 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
497 | DATA_TYPE_UINT64); | |
498 | (void) nvlist_remove(config, | |
499 | ZPOOL_CONFIG_HOLE_ARRAY, | |
500 | DATA_TYPE_UINT64_ARRAY); | |
501 | ||
502 | max_txg = best_txg; | |
503 | hole_array = NULL; | |
504 | holes = 0; | |
505 | max_id = 0; | |
506 | valid_top_config = B_FALSE; | |
507 | ||
508 | if (nvlist_lookup_uint64(tmp, | |
509 | ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { | |
510 | verify(nvlist_add_uint64(config, | |
511 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
512 | max_id) == 0); | |
513 | valid_top_config = B_TRUE; | |
514 | } | |
515 | ||
516 | if (nvlist_lookup_uint64_array(tmp, | |
517 | ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, | |
518 | &holes) == 0) { | |
519 | verify(nvlist_add_uint64_array(config, | |
520 | ZPOOL_CONFIG_HOLE_ARRAY, | |
521 | hole_array, holes) == 0); | |
522 | } | |
523 | } | |
524 | ||
34dc7c2f BB |
525 | if (!config_seen) { |
526 | /* | |
527 | * Copy the relevant pieces of data to the pool | |
528 | * configuration: | |
529 | * | |
530 | * version | |
531 | * pool guid | |
532 | * name | |
533 | * pool state | |
534 | * hostid (if available) | |
535 | * hostname (if available) | |
536 | */ | |
537 | uint64_t state; | |
538 | ||
539 | verify(nvlist_lookup_uint64(tmp, | |
540 | ZPOOL_CONFIG_VERSION, &version) == 0); | |
541 | if (nvlist_add_uint64(config, | |
542 | ZPOOL_CONFIG_VERSION, version) != 0) | |
543 | goto nomem; | |
544 | verify(nvlist_lookup_uint64(tmp, | |
545 | ZPOOL_CONFIG_POOL_GUID, &guid) == 0); | |
546 | if (nvlist_add_uint64(config, | |
547 | ZPOOL_CONFIG_POOL_GUID, guid) != 0) | |
548 | goto nomem; | |
549 | verify(nvlist_lookup_string(tmp, | |
550 | ZPOOL_CONFIG_POOL_NAME, &name) == 0); | |
551 | if (nvlist_add_string(config, | |
552 | ZPOOL_CONFIG_POOL_NAME, name) != 0) | |
553 | goto nomem; | |
554 | verify(nvlist_lookup_uint64(tmp, | |
555 | ZPOOL_CONFIG_POOL_STATE, &state) == 0); | |
556 | if (nvlist_add_uint64(config, | |
557 | ZPOOL_CONFIG_POOL_STATE, state) != 0) | |
558 | goto nomem; | |
559 | hostid = 0; | |
560 | if (nvlist_lookup_uint64(tmp, | |
561 | ZPOOL_CONFIG_HOSTID, &hostid) == 0) { | |
562 | if (nvlist_add_uint64(config, | |
563 | ZPOOL_CONFIG_HOSTID, hostid) != 0) | |
564 | goto nomem; | |
565 | verify(nvlist_lookup_string(tmp, | |
566 | ZPOOL_CONFIG_HOSTNAME, | |
567 | &hostname) == 0); | |
568 | if (nvlist_add_string(config, | |
569 | ZPOOL_CONFIG_HOSTNAME, | |
570 | hostname) != 0) | |
571 | goto nomem; | |
572 | } | |
573 | ||
574 | config_seen = B_TRUE; | |
575 | } | |
576 | ||
577 | /* | |
578 | * Add this top-level vdev to the child array. | |
579 | */ | |
580 | verify(nvlist_lookup_nvlist(tmp, | |
581 | ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); | |
582 | verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, | |
583 | &id) == 0); | |
428870ff | 584 | |
34dc7c2f BB |
585 | if (id >= children) { |
586 | nvlist_t **newchild; | |
587 | ||
588 | newchild = zfs_alloc(hdl, (id + 1) * | |
589 | sizeof (nvlist_t *)); | |
590 | if (newchild == NULL) | |
591 | goto nomem; | |
592 | ||
593 | for (c = 0; c < children; c++) | |
594 | newchild[c] = child[c]; | |
595 | ||
596 | free(child); | |
597 | child = newchild; | |
598 | children = id + 1; | |
599 | } | |
600 | if (nvlist_dup(nvtop, &child[id], 0) != 0) | |
601 | goto nomem; | |
602 | ||
603 | } | |
604 | ||
428870ff BB |
605 | /* |
606 | * If we have information about all the top-levels then | |
607 | * clean up the nvlist which we've constructed. This | |
608 | * means removing any extraneous devices that are | |
609 | * beyond the valid range or adding devices to the end | |
610 | * of our array which appear to be missing. | |
611 | */ | |
612 | if (valid_top_config) { | |
613 | if (max_id < children) { | |
614 | for (c = max_id; c < children; c++) | |
615 | nvlist_free(child[c]); | |
616 | children = max_id; | |
617 | } else if (max_id > children) { | |
618 | nvlist_t **newchild; | |
619 | ||
620 | newchild = zfs_alloc(hdl, (max_id) * | |
621 | sizeof (nvlist_t *)); | |
622 | if (newchild == NULL) | |
623 | goto nomem; | |
624 | ||
625 | for (c = 0; c < children; c++) | |
626 | newchild[c] = child[c]; | |
627 | ||
628 | free(child); | |
629 | child = newchild; | |
630 | children = max_id; | |
631 | } | |
632 | } | |
633 | ||
34dc7c2f BB |
634 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, |
635 | &guid) == 0); | |
636 | ||
428870ff BB |
637 | /* |
638 | * The vdev namespace may contain holes as a result of | |
639 | * device removal. We must add them back into the vdev | |
640 | * tree before we process any missing devices. | |
641 | */ | |
642 | if (holes > 0) { | |
643 | ASSERT(valid_top_config); | |
644 | ||
645 | for (c = 0; c < children; c++) { | |
646 | nvlist_t *holey; | |
647 | ||
648 | if (child[c] != NULL || | |
649 | !vdev_is_hole(hole_array, holes, c)) | |
650 | continue; | |
651 | ||
652 | if (nvlist_alloc(&holey, NV_UNIQUE_NAME, | |
653 | 0) != 0) | |
654 | goto nomem; | |
655 | ||
656 | /* | |
657 | * Holes in the namespace are treated as | |
658 | * "hole" top-level vdevs and have a | |
659 | * special flag set on them. | |
660 | */ | |
661 | if (nvlist_add_string(holey, | |
662 | ZPOOL_CONFIG_TYPE, | |
663 | VDEV_TYPE_HOLE) != 0 || | |
664 | nvlist_add_uint64(holey, | |
665 | ZPOOL_CONFIG_ID, c) != 0 || | |
666 | nvlist_add_uint64(holey, | |
667 | ZPOOL_CONFIG_GUID, 0ULL) != 0) | |
668 | goto nomem; | |
669 | child[c] = holey; | |
670 | } | |
671 | } | |
672 | ||
34dc7c2f BB |
673 | /* |
674 | * Look for any missing top-level vdevs. If this is the case, | |
675 | * create a faked up 'missing' vdev as a placeholder. We cannot | |
676 | * simply compress the child array, because the kernel performs | |
677 | * certain checks to make sure the vdev IDs match their location | |
678 | * in the configuration. | |
679 | */ | |
428870ff | 680 | for (c = 0; c < children; c++) { |
34dc7c2f BB |
681 | if (child[c] == NULL) { |
682 | nvlist_t *missing; | |
683 | if (nvlist_alloc(&missing, NV_UNIQUE_NAME, | |
684 | 0) != 0) | |
685 | goto nomem; | |
686 | if (nvlist_add_string(missing, | |
687 | ZPOOL_CONFIG_TYPE, | |
688 | VDEV_TYPE_MISSING) != 0 || | |
689 | nvlist_add_uint64(missing, | |
690 | ZPOOL_CONFIG_ID, c) != 0 || | |
691 | nvlist_add_uint64(missing, | |
692 | ZPOOL_CONFIG_GUID, 0ULL) != 0) { | |
693 | nvlist_free(missing); | |
694 | goto nomem; | |
695 | } | |
696 | child[c] = missing; | |
697 | } | |
428870ff | 698 | } |
34dc7c2f BB |
699 | |
700 | /* | |
701 | * Put all of this pool's top-level vdevs into a root vdev. | |
702 | */ | |
703 | if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) | |
704 | goto nomem; | |
705 | if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, | |
706 | VDEV_TYPE_ROOT) != 0 || | |
707 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || | |
708 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || | |
709 | nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, | |
710 | child, children) != 0) { | |
711 | nvlist_free(nvroot); | |
712 | goto nomem; | |
713 | } | |
714 | ||
715 | for (c = 0; c < children; c++) | |
716 | nvlist_free(child[c]); | |
717 | free(child); | |
718 | children = 0; | |
719 | child = NULL; | |
720 | ||
721 | /* | |
722 | * Go through and fix up any paths and/or devids based on our | |
723 | * known list of vdev GUID -> path mappings. | |
724 | */ | |
725 | if (fix_paths(nvroot, pl->names) != 0) { | |
726 | nvlist_free(nvroot); | |
727 | goto nomem; | |
728 | } | |
729 | ||
730 | /* | |
731 | * Add the root vdev to this pool's configuration. | |
732 | */ | |
733 | if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
734 | nvroot) != 0) { | |
735 | nvlist_free(nvroot); | |
736 | goto nomem; | |
737 | } | |
738 | nvlist_free(nvroot); | |
739 | ||
740 | /* | |
741 | * zdb uses this path to report on active pools that were | |
742 | * imported or created using -R. | |
743 | */ | |
744 | if (active_ok) | |
745 | goto add_pool; | |
746 | ||
747 | /* | |
748 | * Determine if this pool is currently active, in which case we | |
749 | * can't actually import it. | |
750 | */ | |
751 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
752 | &name) == 0); | |
753 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
754 | &guid) == 0); | |
755 | ||
756 | if (pool_active(hdl, name, guid, &isactive) != 0) | |
757 | goto error; | |
758 | ||
759 | if (isactive) { | |
760 | nvlist_free(config); | |
761 | config = NULL; | |
762 | continue; | |
763 | } | |
764 | ||
428870ff BB |
765 | if ((nvl = refresh_config(hdl, config)) == NULL) { |
766 | nvlist_free(config); | |
767 | config = NULL; | |
768 | continue; | |
769 | } | |
34dc7c2f BB |
770 | |
771 | nvlist_free(config); | |
772 | config = nvl; | |
773 | ||
774 | /* | |
775 | * Go through and update the paths for spares, now that we have | |
776 | * them. | |
777 | */ | |
778 | verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
779 | &nvroot) == 0); | |
780 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, | |
781 | &spares, &nspares) == 0) { | |
782 | for (i = 0; i < nspares; i++) { | |
783 | if (fix_paths(spares[i], pl->names) != 0) | |
784 | goto nomem; | |
785 | } | |
786 | } | |
787 | ||
788 | /* | |
789 | * Update the paths for l2cache devices. | |
790 | */ | |
791 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, | |
792 | &l2cache, &nl2cache) == 0) { | |
793 | for (i = 0; i < nl2cache; i++) { | |
794 | if (fix_paths(l2cache[i], pl->names) != 0) | |
795 | goto nomem; | |
796 | } | |
797 | } | |
798 | ||
799 | /* | |
800 | * Restore the original information read from the actual label. | |
801 | */ | |
802 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, | |
803 | DATA_TYPE_UINT64); | |
804 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, | |
805 | DATA_TYPE_STRING); | |
806 | if (hostid != 0) { | |
807 | verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, | |
808 | hostid) == 0); | |
809 | verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, | |
810 | hostname) == 0); | |
811 | } | |
812 | ||
813 | add_pool: | |
814 | /* | |
815 | * Add this pool to the list of configs. | |
816 | */ | |
817 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
818 | &name) == 0); | |
819 | if (nvlist_add_nvlist(ret, name, config) != 0) | |
820 | goto nomem; | |
821 | ||
b128c09f | 822 | found_one = B_TRUE; |
34dc7c2f BB |
823 | nvlist_free(config); |
824 | config = NULL; | |
825 | } | |
826 | ||
b128c09f BB |
827 | if (!found_one) { |
828 | nvlist_free(ret); | |
829 | ret = NULL; | |
830 | } | |
831 | ||
34dc7c2f BB |
832 | return (ret); |
833 | ||
834 | nomem: | |
835 | (void) no_memory(hdl); | |
836 | error: | |
837 | nvlist_free(config); | |
838 | nvlist_free(ret); | |
839 | for (c = 0; c < children; c++) | |
840 | nvlist_free(child[c]); | |
841 | free(child); | |
842 | ||
843 | return (NULL); | |
844 | } | |
845 | ||
846 | /* | |
847 | * Return the offset of the given label. | |
848 | */ | |
849 | static uint64_t | |
850 | label_offset(uint64_t size, int l) | |
851 | { | |
852 | ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); | |
853 | return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? | |
854 | 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); | |
855 | } | |
856 | ||
857 | /* | |
858 | * Given a file descriptor, read the label information and return an nvlist | |
859 | * describing the configuration, if there is one. | |
860 | */ | |
861 | int | |
862 | zpool_read_label(int fd, nvlist_t **config) | |
863 | { | |
864 | struct stat64 statbuf; | |
865 | int l; | |
866 | vdev_label_t *label; | |
867 | uint64_t state, txg, size; | |
868 | ||
869 | *config = NULL; | |
870 | ||
871 | if (fstat64(fd, &statbuf) == -1) | |
872 | return (0); | |
873 | size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); | |
874 | ||
875 | if ((label = malloc(sizeof (vdev_label_t))) == NULL) | |
876 | return (-1); | |
877 | ||
878 | for (l = 0; l < VDEV_LABELS; l++) { | |
b128c09f | 879 | if (pread64(fd, label, sizeof (vdev_label_t), |
34dc7c2f BB |
880 | label_offset(size, l)) != sizeof (vdev_label_t)) |
881 | continue; | |
882 | ||
883 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
884 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) | |
885 | continue; | |
886 | ||
887 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
888 | &state) != 0 || state > POOL_STATE_L2CACHE) { | |
889 | nvlist_free(*config); | |
890 | continue; | |
891 | } | |
892 | ||
893 | if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && | |
894 | (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
895 | &txg) != 0 || txg == 0)) { | |
896 | nvlist_free(*config); | |
897 | continue; | |
898 | } | |
899 | ||
900 | free(label); | |
901 | return (0); | |
902 | } | |
903 | ||
904 | free(label); | |
905 | *config = NULL; | |
906 | return (0); | |
907 | } | |
908 | ||
d603ed6c BB |
909 | #ifdef HAVE_LIBBLKID |
910 | /* | |
911 | * Use libblkid to quickly search for zfs devices | |
912 | */ | |
428870ff | 913 | static int |
d603ed6c | 914 | zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools) |
428870ff | 915 | { |
d603ed6c BB |
916 | blkid_cache cache; |
917 | blkid_dev_iterate iter; | |
918 | blkid_dev dev; | |
919 | const char *devname; | |
428870ff | 920 | nvlist_t *config; |
d603ed6c | 921 | int fd, err; |
428870ff | 922 | |
d603ed6c BB |
923 | err = blkid_get_cache(&cache, NULL); |
924 | if (err != 0) { | |
925 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
926 | dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err); | |
927 | goto err_blkid1; | |
428870ff BB |
928 | } |
929 | ||
d603ed6c BB |
930 | err = blkid_probe_all(cache); |
931 | if (err != 0) { | |
932 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
933 | dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err); | |
934 | goto err_blkid2; | |
428870ff | 935 | } |
428870ff | 936 | |
d603ed6c BB |
937 | iter = blkid_dev_iterate_begin(cache); |
938 | if (iter == NULL) { | |
939 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
940 | dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()")); | |
941 | goto err_blkid2; | |
942 | } | |
428870ff | 943 | |
d603ed6c BB |
944 | err = blkid_dev_set_search(iter, "TYPE", "zfs"); |
945 | if (err != 0) { | |
946 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
947 | dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err); | |
948 | goto err_blkid3; | |
428870ff | 949 | } |
428870ff | 950 | |
d603ed6c BB |
951 | while (blkid_dev_next(iter, &dev) == 0) { |
952 | devname = blkid_dev_devname(dev); | |
953 | if ((fd = open64(devname, O_RDONLY)) < 0) | |
954 | continue; | |
428870ff | 955 | |
d603ed6c BB |
956 | err = zpool_read_label(fd, &config); |
957 | (void) close(fd); | |
428870ff | 958 | |
d603ed6c BB |
959 | if (err != 0) { |
960 | (void) no_memory(hdl); | |
961 | goto err_blkid3; | |
962 | } | |
428870ff | 963 | |
d603ed6c BB |
964 | if (config != NULL) { |
965 | err = add_config(hdl, pools, devname, config); | |
966 | if (err != 0) | |
967 | goto err_blkid3; | |
968 | } | |
428870ff BB |
969 | } |
970 | ||
d603ed6c BB |
971 | err_blkid3: |
972 | blkid_dev_iterate_end(iter); | |
973 | err_blkid2: | |
974 | blkid_put_cache(cache); | |
975 | err_blkid1: | |
976 | return err; | |
428870ff | 977 | } |
d603ed6c | 978 | #endif /* HAVE_LIBBLKID */ |
428870ff | 979 | |
34dc7c2f BB |
980 | /* |
981 | * Given a list of directories to search, find all pools stored on disk. This | |
982 | * includes partial pools which are not available to import. If no args are | |
983 | * given (argc is 0), then the default directory (/dev/dsk) is searched. | |
b128c09f BB |
984 | * poolname or guid (but not both) are provided by the caller when trying |
985 | * to import a specific pool. | |
34dc7c2f | 986 | */ |
b128c09f | 987 | static nvlist_t * |
428870ff | 988 | zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) |
34dc7c2f | 989 | { |
428870ff | 990 | int i, dirs = iarg->paths; |
34dc7c2f BB |
991 | DIR *dirp = NULL; |
992 | struct dirent64 *dp; | |
993 | char path[MAXPATHLEN]; | |
428870ff | 994 | char *end, **dir = iarg->path; |
34dc7c2f | 995 | size_t pathleft; |
d603ed6c BB |
996 | struct stat64 statbuf; |
997 | nvlist_t *ret = NULL, *config; | |
998 | static char *default_dir = DISK_ROOT; | |
999 | int fd; | |
34dc7c2f BB |
1000 | pool_list_t pools = { 0 }; |
1001 | pool_entry_t *pe, *penext; | |
1002 | vdev_entry_t *ve, *venext; | |
1003 | config_entry_t *ce, *cenext; | |
1004 | name_entry_t *ne, *nenext; | |
d603ed6c BB |
1005 | |
1006 | verify(iarg->poolname == NULL || iarg->guid == 0); | |
34dc7c2f | 1007 | |
428870ff | 1008 | if (dirs == 0) { |
d603ed6c BB |
1009 | #ifdef HAVE_LIBBLKID |
1010 | /* Use libblkid to scan all device for their type */ | |
1011 | if (zpool_find_import_blkid(hdl, &pools) == 0) | |
1012 | goto skip_scanning; | |
1013 | ||
1014 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
1015 | dgettext(TEXT_DOMAIN, "blkid failure falling back " | |
1016 | "to manual probing")); | |
1017 | #endif /* HAVE_LIBBLKID */ | |
428870ff BB |
1018 | dirs = 1; |
1019 | dir = &default_dir; | |
34dc7c2f BB |
1020 | } |
1021 | ||
1022 | /* | |
1023 | * Go through and read the label configuration information from every | |
1024 | * possible device, organizing the information according to pool GUID | |
1025 | * and toplevel GUID. | |
1026 | */ | |
428870ff | 1027 | for (i = 0; i < dirs; i++) { |
34dc7c2f BB |
1028 | char *rdsk; |
1029 | int dfd; | |
1030 | ||
1031 | /* use realpath to normalize the path */ | |
428870ff | 1032 | if (realpath(dir[i], path) == 0) { |
34dc7c2f | 1033 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, |
428870ff | 1034 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); |
34dc7c2f BB |
1035 | goto error; |
1036 | } | |
1037 | end = &path[strlen(path)]; | |
1038 | *end++ = '/'; | |
1039 | *end = 0; | |
1040 | pathleft = &path[sizeof (path)] - end; | |
1041 | ||
1042 | /* | |
1043 | * Using raw devices instead of block devices when we're | |
1044 | * reading the labels skips a bunch of slow operations during | |
1045 | * close(2) processing, so we replace /dev/dsk with /dev/rdsk. | |
1046 | */ | |
1047 | if (strcmp(path, "/dev/dsk/") == 0) | |
1048 | rdsk = "/dev/rdsk/"; | |
1049 | else | |
1050 | rdsk = path; | |
1051 | ||
1052 | if ((dfd = open64(rdsk, O_RDONLY)) < 0 || | |
1053 | (dirp = fdopendir(dfd)) == NULL) { | |
1054 | zfs_error_aux(hdl, strerror(errno)); | |
1055 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, | |
1056 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), | |
1057 | rdsk); | |
1058 | goto error; | |
1059 | } | |
1060 | ||
1061 | /* | |
1062 | * This is not MT-safe, but we have no MT consumers of libzfs | |
1063 | */ | |
1064 | while ((dp = readdir64(dirp)) != NULL) { | |
1065 | const char *name = dp->d_name; | |
1066 | if (name[0] == '.' && | |
1067 | (name[1] == 0 || (name[1] == '.' && name[2] == 0))) | |
1068 | continue; | |
1069 | ||
d603ed6c BB |
1070 | /* |
1071 | * Skip checking devices with well known prefixes: | |
1072 | * watchdog - A special close is required to avoid | |
1073 | * triggering it and resetting the system. | |
1074 | * fuse - Fuse control device. | |
1075 | * ppp - Generic PPP driver. | |
1076 | * tty* - Generic serial interface. | |
1077 | * vcs* - Virtual console memory. | |
1078 | * parport* - Parallel port interface. | |
1079 | * lp* - Printer interface. | |
1080 | * fd* - Floppy interface. | |
f03e41e8 BB |
1081 | * hpet - High Precision Event Timer, crashes qemu |
1082 | * when accessed from a virtual machine. | |
1083 | * core - Symlink to /proc/kcore, causes a crash | |
1084 | * when access from Xen dom0. | |
d603ed6c BB |
1085 | */ |
1086 | if ((strncmp(name, "watchdog", 8) == 0) || | |
1087 | (strncmp(name, "fuse", 4) == 0) || | |
1088 | (strncmp(name, "ppp", 3) == 0) || | |
1089 | (strncmp(name, "tty", 3) == 0) || | |
1090 | (strncmp(name, "vcs", 3) == 0) || | |
1091 | (strncmp(name, "parport", 7) == 0) || | |
1092 | (strncmp(name, "lp", 2) == 0) || | |
5e7affae | 1093 | (strncmp(name, "fd", 2) == 0) || |
f03e41e8 BB |
1094 | (strncmp(name, "hpet", 4) == 0) || |
1095 | (strncmp(name, "core", 4) == 0)) | |
d603ed6c BB |
1096 | continue; |
1097 | ||
1098 | /* | |
1099 | * Ignore failed stats. We only want regular | |
f03e41e8 | 1100 | * files and block devices. |
d603ed6c | 1101 | */ |
f03e41e8 | 1102 | if ((fstatat64(dfd, name, &statbuf, 0) != 0) || |
d603ed6c | 1103 | (!S_ISREG(statbuf.st_mode) && |
f03e41e8 BB |
1104 | !S_ISBLK(statbuf.st_mode))) |
1105 | continue; | |
1106 | ||
1107 | if ((fd = openat64(dfd, name, O_RDONLY)) < 0) | |
d603ed6c | 1108 | continue; |
d603ed6c BB |
1109 | |
1110 | if ((zpool_read_label(fd, &config)) != 0) { | |
1111 | (void) close(fd); | |
1112 | (void) no_memory(hdl); | |
1113 | goto error; | |
1114 | } | |
1115 | ||
1116 | (void) close(fd); | |
1117 | ||
1118 | if (config != NULL) { | |
b128c09f BB |
1119 | boolean_t matched = B_TRUE; |
1120 | ||
428870ff | 1121 | if (iarg->poolname != NULL) { |
b128c09f BB |
1122 | char *pname; |
1123 | ||
1124 | matched = nvlist_lookup_string(config, | |
1125 | ZPOOL_CONFIG_POOL_NAME, | |
1126 | &pname) == 0 && | |
428870ff BB |
1127 | strcmp(iarg->poolname, pname) == 0; |
1128 | } else if (iarg->guid != 0) { | |
b128c09f BB |
1129 | uint64_t this_guid; |
1130 | ||
1131 | matched = nvlist_lookup_uint64(config, | |
1132 | ZPOOL_CONFIG_POOL_GUID, | |
1133 | &this_guid) == 0 && | |
428870ff | 1134 | iarg->guid == this_guid; |
b128c09f BB |
1135 | } |
1136 | if (!matched) { | |
1137 | nvlist_free(config); | |
1138 | config = NULL; | |
1139 | continue; | |
1140 | } | |
34dc7c2f | 1141 | /* use the non-raw path for the config */ |
d603ed6c | 1142 | (void) strlcpy(end, name, pathleft); |
34dc7c2f BB |
1143 | if (add_config(hdl, &pools, path, config) != 0) |
1144 | goto error; | |
1145 | } | |
1146 | } | |
1147 | ||
1148 | (void) closedir(dirp); | |
1149 | dirp = NULL; | |
1150 | } | |
1151 | ||
d603ed6c BB |
1152 | #ifdef HAVE_LIBBLKID |
1153 | skip_scanning: | |
1154 | #endif | |
428870ff | 1155 | ret = get_configs(hdl, &pools, iarg->can_be_active); |
34dc7c2f BB |
1156 | |
1157 | error: | |
1158 | for (pe = pools.pools; pe != NULL; pe = penext) { | |
1159 | penext = pe->pe_next; | |
1160 | for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { | |
1161 | venext = ve->ve_next; | |
1162 | for (ce = ve->ve_configs; ce != NULL; ce = cenext) { | |
1163 | cenext = ce->ce_next; | |
1164 | if (ce->ce_config) | |
1165 | nvlist_free(ce->ce_config); | |
1166 | free(ce); | |
1167 | } | |
1168 | free(ve); | |
1169 | } | |
1170 | free(pe); | |
1171 | } | |
1172 | ||
1173 | for (ne = pools.names; ne != NULL; ne = nenext) { | |
1174 | nenext = ne->ne_next; | |
1175 | if (ne->ne_name) | |
1176 | free(ne->ne_name); | |
1177 | free(ne); | |
1178 | } | |
1179 | ||
1180 | if (dirp) | |
1181 | (void) closedir(dirp); | |
1182 | ||
1183 | return (ret); | |
1184 | } | |
1185 | ||
b128c09f BB |
1186 | nvlist_t * |
1187 | zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) | |
1188 | { | |
428870ff | 1189 | importargs_t iarg = { 0 }; |
b128c09f | 1190 | |
428870ff BB |
1191 | iarg.paths = argc; |
1192 | iarg.path = argv; | |
b128c09f | 1193 | |
428870ff | 1194 | return (zpool_find_import_impl(hdl, &iarg)); |
b128c09f BB |
1195 | } |
1196 | ||
34dc7c2f BB |
1197 | /* |
1198 | * Given a cache file, return the contents as a list of importable pools. | |
b128c09f BB |
1199 | * poolname or guid (but not both) are provided by the caller when trying |
1200 | * to import a specific pool. | |
34dc7c2f BB |
1201 | */ |
1202 | nvlist_t * | |
1203 | zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, | |
b128c09f | 1204 | char *poolname, uint64_t guid) |
34dc7c2f BB |
1205 | { |
1206 | char *buf; | |
1207 | int fd; | |
1208 | struct stat64 statbuf; | |
1209 | nvlist_t *raw, *src, *dst; | |
1210 | nvlist_t *pools; | |
1211 | nvpair_t *elem; | |
1212 | char *name; | |
b128c09f | 1213 | uint64_t this_guid; |
34dc7c2f BB |
1214 | boolean_t active; |
1215 | ||
b128c09f BB |
1216 | verify(poolname == NULL || guid == 0); |
1217 | ||
34dc7c2f BB |
1218 | if ((fd = open(cachefile, O_RDONLY)) < 0) { |
1219 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1220 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1221 | dgettext(TEXT_DOMAIN, "failed to open cache file")); | |
1222 | return (NULL); | |
1223 | } | |
1224 | ||
1225 | if (fstat64(fd, &statbuf) != 0) { | |
1226 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1227 | (void) close(fd); | |
1228 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1229 | dgettext(TEXT_DOMAIN, "failed to get size of cache file")); | |
1230 | return (NULL); | |
1231 | } | |
1232 | ||
1233 | if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { | |
1234 | (void) close(fd); | |
1235 | return (NULL); | |
1236 | } | |
1237 | ||
1238 | if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { | |
1239 | (void) close(fd); | |
1240 | free(buf); | |
1241 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1242 | dgettext(TEXT_DOMAIN, | |
1243 | "failed to read cache file contents")); | |
1244 | return (NULL); | |
1245 | } | |
1246 | ||
1247 | (void) close(fd); | |
1248 | ||
1249 | if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { | |
1250 | free(buf); | |
1251 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1252 | dgettext(TEXT_DOMAIN, | |
1253 | "invalid or corrupt cache file contents")); | |
1254 | return (NULL); | |
1255 | } | |
1256 | ||
1257 | free(buf); | |
1258 | ||
1259 | /* | |
1260 | * Go through and get the current state of the pools and refresh their | |
1261 | * state. | |
1262 | */ | |
1263 | if (nvlist_alloc(&pools, 0, 0) != 0) { | |
1264 | (void) no_memory(hdl); | |
1265 | nvlist_free(raw); | |
1266 | return (NULL); | |
1267 | } | |
1268 | ||
1269 | elem = NULL; | |
1270 | while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { | |
1271 | verify(nvpair_value_nvlist(elem, &src) == 0); | |
1272 | ||
1273 | verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, | |
1274 | &name) == 0); | |
b128c09f BB |
1275 | if (poolname != NULL && strcmp(poolname, name) != 0) |
1276 | continue; | |
1277 | ||
34dc7c2f | 1278 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, |
b128c09f BB |
1279 | &this_guid) == 0); |
1280 | if (guid != 0) { | |
1281 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, | |
1282 | &this_guid) == 0); | |
1283 | if (guid != this_guid) | |
1284 | continue; | |
1285 | } | |
34dc7c2f | 1286 | |
b128c09f BB |
1287 | if (pool_active(hdl, name, this_guid, &active) != 0) { |
1288 | nvlist_free(raw); | |
1289 | nvlist_free(pools); | |
1290 | return (NULL); | |
1291 | } | |
34dc7c2f | 1292 | |
b128c09f BB |
1293 | if (active) |
1294 | continue; | |
34dc7c2f | 1295 | |
b128c09f BB |
1296 | if ((dst = refresh_config(hdl, src)) == NULL) { |
1297 | nvlist_free(raw); | |
1298 | nvlist_free(pools); | |
1299 | return (NULL); | |
1300 | } | |
34dc7c2f | 1301 | |
b128c09f BB |
1302 | if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { |
1303 | (void) no_memory(hdl); | |
34dc7c2f | 1304 | nvlist_free(dst); |
b128c09f BB |
1305 | nvlist_free(raw); |
1306 | nvlist_free(pools); | |
1307 | return (NULL); | |
34dc7c2f | 1308 | } |
b128c09f | 1309 | nvlist_free(dst); |
34dc7c2f BB |
1310 | } |
1311 | ||
1312 | nvlist_free(raw); | |
1313 | return (pools); | |
1314 | } | |
1315 | ||
428870ff BB |
1316 | static int |
1317 | name_or_guid_exists(zpool_handle_t *zhp, void *data) | |
1318 | { | |
1319 | importargs_t *import = data; | |
1320 | int found = 0; | |
1321 | ||
1322 | if (import->poolname != NULL) { | |
1323 | char *pool_name; | |
1324 | ||
1325 | verify(nvlist_lookup_string(zhp->zpool_config, | |
1326 | ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0); | |
1327 | if (strcmp(pool_name, import->poolname) == 0) | |
1328 | found = 1; | |
1329 | } else { | |
1330 | uint64_t pool_guid; | |
1331 | ||
1332 | verify(nvlist_lookup_uint64(zhp->zpool_config, | |
1333 | ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0); | |
1334 | if (pool_guid == import->guid) | |
1335 | found = 1; | |
1336 | } | |
1337 | ||
1338 | zpool_close(zhp); | |
1339 | return (found); | |
1340 | } | |
1341 | ||
1342 | nvlist_t * | |
1343 | zpool_search_import(libzfs_handle_t *hdl, importargs_t *import) | |
1344 | { | |
1345 | verify(import->poolname == NULL || import->guid == 0); | |
1346 | ||
1347 | if (import->unique) | |
1348 | import->exists = zpool_iter(hdl, name_or_guid_exists, import); | |
1349 | ||
1350 | if (import->cachefile != NULL) | |
1351 | return (zpool_find_import_cached(hdl, import->cachefile, | |
1352 | import->poolname, import->guid)); | |
1353 | ||
1354 | return (zpool_find_import_impl(hdl, import)); | |
1355 | } | |
34dc7c2f BB |
1356 | |
1357 | boolean_t | |
1358 | find_guid(nvlist_t *nv, uint64_t guid) | |
1359 | { | |
1360 | uint64_t tmp; | |
1361 | nvlist_t **child; | |
1362 | uint_t c, children; | |
1363 | ||
1364 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); | |
1365 | if (tmp == guid) | |
1366 | return (B_TRUE); | |
1367 | ||
1368 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
1369 | &child, &children) == 0) { | |
1370 | for (c = 0; c < children; c++) | |
1371 | if (find_guid(child[c], guid)) | |
1372 | return (B_TRUE); | |
1373 | } | |
1374 | ||
1375 | return (B_FALSE); | |
1376 | } | |
1377 | ||
1378 | typedef struct aux_cbdata { | |
1379 | const char *cb_type; | |
1380 | uint64_t cb_guid; | |
1381 | zpool_handle_t *cb_zhp; | |
1382 | } aux_cbdata_t; | |
1383 | ||
1384 | static int | |
1385 | find_aux(zpool_handle_t *zhp, void *data) | |
1386 | { | |
1387 | aux_cbdata_t *cbp = data; | |
1388 | nvlist_t **list; | |
1389 | uint_t i, count; | |
1390 | uint64_t guid; | |
1391 | nvlist_t *nvroot; | |
1392 | ||
1393 | verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, | |
1394 | &nvroot) == 0); | |
1395 | ||
1396 | if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, | |
1397 | &list, &count) == 0) { | |
1398 | for (i = 0; i < count; i++) { | |
1399 | verify(nvlist_lookup_uint64(list[i], | |
1400 | ZPOOL_CONFIG_GUID, &guid) == 0); | |
1401 | if (guid == cbp->cb_guid) { | |
1402 | cbp->cb_zhp = zhp; | |
1403 | return (1); | |
1404 | } | |
1405 | } | |
1406 | } | |
1407 | ||
1408 | zpool_close(zhp); | |
1409 | return (0); | |
1410 | } | |
1411 | ||
1412 | /* | |
1413 | * Determines if the pool is in use. If so, it returns true and the state of | |
1414 | * the pool as well as the name of the pool. Both strings are allocated and | |
1415 | * must be freed by the caller. | |
1416 | */ | |
1417 | int | |
1418 | zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, | |
1419 | boolean_t *inuse) | |
1420 | { | |
1421 | nvlist_t *config; | |
1422 | char *name; | |
1423 | boolean_t ret; | |
1424 | uint64_t guid, vdev_guid; | |
1425 | zpool_handle_t *zhp; | |
1426 | nvlist_t *pool_config; | |
1427 | uint64_t stateval, isspare; | |
1428 | aux_cbdata_t cb = { 0 }; | |
1429 | boolean_t isactive; | |
1430 | ||
1431 | *inuse = B_FALSE; | |
1432 | ||
1433 | if (zpool_read_label(fd, &config) != 0) { | |
1434 | (void) no_memory(hdl); | |
1435 | return (-1); | |
1436 | } | |
1437 | ||
1438 | if (config == NULL) | |
1439 | return (0); | |
1440 | ||
1441 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
1442 | &stateval) == 0); | |
1443 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
1444 | &vdev_guid) == 0); | |
1445 | ||
1446 | if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { | |
1447 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
1448 | &name) == 0); | |
1449 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
1450 | &guid) == 0); | |
1451 | } | |
1452 | ||
1453 | switch (stateval) { | |
1454 | case POOL_STATE_EXPORTED: | |
572e2857 BB |
1455 | /* |
1456 | * A pool with an exported state may in fact be imported | |
1457 | * read-only, so check the in-core state to see if it's | |
1458 | * active and imported read-only. If it is, set | |
1459 | * its state to active. | |
1460 | */ | |
1461 | if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && | |
1462 | (zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1463 | zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) | |
1464 | stateval = POOL_STATE_ACTIVE; | |
1465 | ||
34dc7c2f BB |
1466 | ret = B_TRUE; |
1467 | break; | |
1468 | ||
1469 | case POOL_STATE_ACTIVE: | |
1470 | /* | |
1471 | * For an active pool, we have to determine if it's really part | |
1472 | * of a currently active pool (in which case the pool will exist | |
1473 | * and the guid will be the same), or whether it's part of an | |
1474 | * active pool that was disconnected without being explicitly | |
1475 | * exported. | |
1476 | */ | |
1477 | if (pool_active(hdl, name, guid, &isactive) != 0) { | |
1478 | nvlist_free(config); | |
1479 | return (-1); | |
1480 | } | |
1481 | ||
1482 | if (isactive) { | |
1483 | /* | |
1484 | * Because the device may have been removed while | |
1485 | * offlined, we only report it as active if the vdev is | |
1486 | * still present in the config. Otherwise, pretend like | |
1487 | * it's not in use. | |
1488 | */ | |
1489 | if ((zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1490 | (pool_config = zpool_get_config(zhp, NULL)) | |
1491 | != NULL) { | |
1492 | nvlist_t *nvroot; | |
1493 | ||
1494 | verify(nvlist_lookup_nvlist(pool_config, | |
1495 | ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); | |
1496 | ret = find_guid(nvroot, vdev_guid); | |
1497 | } else { | |
1498 | ret = B_FALSE; | |
1499 | } | |
1500 | ||
1501 | /* | |
1502 | * If this is an active spare within another pool, we | |
1503 | * treat it like an unused hot spare. This allows the | |
1504 | * user to create a pool with a hot spare that currently | |
1505 | * in use within another pool. Since we return B_TRUE, | |
1506 | * libdiskmgt will continue to prevent generic consumers | |
1507 | * from using the device. | |
1508 | */ | |
1509 | if (ret && nvlist_lookup_uint64(config, | |
1510 | ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) | |
1511 | stateval = POOL_STATE_SPARE; | |
1512 | ||
1513 | if (zhp != NULL) | |
1514 | zpool_close(zhp); | |
1515 | } else { | |
1516 | stateval = POOL_STATE_POTENTIALLY_ACTIVE; | |
1517 | ret = B_TRUE; | |
1518 | } | |
1519 | break; | |
1520 | ||
1521 | case POOL_STATE_SPARE: | |
1522 | /* | |
1523 | * For a hot spare, it can be either definitively in use, or | |
1524 | * potentially active. To determine if it's in use, we iterate | |
1525 | * over all pools in the system and search for one with a spare | |
1526 | * with a matching guid. | |
1527 | * | |
1528 | * Due to the shared nature of spares, we don't actually report | |
1529 | * the potentially active case as in use. This means the user | |
1530 | * can freely create pools on the hot spares of exported pools, | |
1531 | * but to do otherwise makes the resulting code complicated, and | |
1532 | * we end up having to deal with this case anyway. | |
1533 | */ | |
1534 | cb.cb_zhp = NULL; | |
1535 | cb.cb_guid = vdev_guid; | |
1536 | cb.cb_type = ZPOOL_CONFIG_SPARES; | |
1537 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1538 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1539 | ret = TRUE; | |
1540 | } else { | |
1541 | ret = FALSE; | |
1542 | } | |
1543 | break; | |
1544 | ||
1545 | case POOL_STATE_L2CACHE: | |
1546 | ||
1547 | /* | |
1548 | * Check if any pool is currently using this l2cache device. | |
1549 | */ | |
1550 | cb.cb_zhp = NULL; | |
1551 | cb.cb_guid = vdev_guid; | |
1552 | cb.cb_type = ZPOOL_CONFIG_L2CACHE; | |
1553 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1554 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1555 | ret = TRUE; | |
1556 | } else { | |
1557 | ret = FALSE; | |
1558 | } | |
1559 | break; | |
1560 | ||
1561 | default: | |
1562 | ret = B_FALSE; | |
1563 | } | |
1564 | ||
1565 | ||
1566 | if (ret) { | |
1567 | if ((*namestr = zfs_strdup(hdl, name)) == NULL) { | |
1568 | if (cb.cb_zhp) | |
1569 | zpool_close(cb.cb_zhp); | |
1570 | nvlist_free(config); | |
1571 | return (-1); | |
1572 | } | |
1573 | *state = (pool_state_t)stateval; | |
1574 | } | |
1575 | ||
1576 | if (cb.cb_zhp) | |
1577 | zpool_close(cb.cb_zhp); | |
1578 | ||
1579 | nvlist_free(config); | |
1580 | *inuse = ret; | |
1581 | return (0); | |
1582 | } |