]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
572e2857 | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
d96eb2b1 DM |
23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
24 | * Copyright (c) 2011 by Delphix. All rights reserved. | |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f BB |
27 | /* |
28 | * Pool import support functions. | |
29 | * | |
30 | * To import a pool, we rely on reading the configuration information from the | |
31 | * ZFS label of each device. If we successfully read the label, then we | |
32 | * organize the configuration information in the following hierarchy: | |
33 | * | |
34 | * pool guid -> toplevel vdev guid -> label txg | |
35 | * | |
36 | * Duplicate entries matching this same tuple will be discarded. Once we have | |
37 | * examined every device, we pick the best label txg config for each toplevel | |
38 | * vdev. We then arrange these toplevel vdevs into a complete pool config, and | |
39 | * update any paths that have changed. Finally, we attempt to import the pool | |
40 | * using our derived config, and record the results. | |
41 | */ | |
42 | ||
428870ff | 43 | #include <ctype.h> |
34dc7c2f BB |
44 | #include <devid.h> |
45 | #include <dirent.h> | |
46 | #include <errno.h> | |
47 | #include <libintl.h> | |
428870ff | 48 | #include <stddef.h> |
34dc7c2f BB |
49 | #include <stdlib.h> |
50 | #include <string.h> | |
51 | #include <sys/stat.h> | |
52 | #include <unistd.h> | |
53 | #include <fcntl.h> | |
428870ff BB |
54 | #include <sys/vtoc.h> |
55 | #include <sys/dktp/fdisk.h> | |
56 | #include <sys/efi_partition.h> | |
34dc7c2f BB |
57 | |
58 | #include <sys/vdev_impl.h> | |
d603ed6c BB |
59 | #ifdef HAVE_LIBBLKID |
60 | #include <blkid/blkid.h> | |
61 | #endif | |
34dc7c2f BB |
62 | |
63 | #include "libzfs.h" | |
64 | #include "libzfs_impl.h" | |
65 | ||
66 | /* | |
67 | * Intermediate structures used to gather configuration information. | |
68 | */ | |
69 | typedef struct config_entry { | |
70 | uint64_t ce_txg; | |
71 | nvlist_t *ce_config; | |
72 | struct config_entry *ce_next; | |
73 | } config_entry_t; | |
74 | ||
75 | typedef struct vdev_entry { | |
76 | uint64_t ve_guid; | |
77 | config_entry_t *ve_configs; | |
78 | struct vdev_entry *ve_next; | |
79 | } vdev_entry_t; | |
80 | ||
81 | typedef struct pool_entry { | |
82 | uint64_t pe_guid; | |
83 | vdev_entry_t *pe_vdevs; | |
84 | struct pool_entry *pe_next; | |
85 | } pool_entry_t; | |
86 | ||
87 | typedef struct name_entry { | |
88 | char *ne_name; | |
89 | uint64_t ne_guid; | |
44867b6d | 90 | uint64_t ne_order; |
34dc7c2f BB |
91 | struct name_entry *ne_next; |
92 | } name_entry_t; | |
93 | ||
94 | typedef struct pool_list { | |
95 | pool_entry_t *pools; | |
96 | name_entry_t *names; | |
97 | } pool_list_t; | |
98 | ||
99 | static char * | |
100 | get_devid(const char *path) | |
101 | { | |
102 | int fd; | |
103 | ddi_devid_t devid; | |
104 | char *minor, *ret; | |
105 | ||
106 | if ((fd = open(path, O_RDONLY)) < 0) | |
107 | return (NULL); | |
108 | ||
109 | minor = NULL; | |
110 | ret = NULL; | |
111 | if (devid_get(fd, &devid) == 0) { | |
112 | if (devid_get_minor_name(fd, &minor) == 0) | |
113 | ret = devid_str_encode(devid, minor); | |
114 | if (minor != NULL) | |
115 | devid_str_free(minor); | |
116 | devid_free(devid); | |
117 | } | |
118 | (void) close(fd); | |
119 | ||
120 | return (ret); | |
121 | } | |
122 | ||
123 | ||
124 | /* | |
125 | * Go through and fix up any path and/or devid information for the given vdev | |
126 | * configuration. | |
127 | */ | |
128 | static int | |
129 | fix_paths(nvlist_t *nv, name_entry_t *names) | |
130 | { | |
131 | nvlist_t **child; | |
132 | uint_t c, children; | |
133 | uint64_t guid; | |
134 | name_entry_t *ne, *best; | |
135 | char *path, *devid; | |
34dc7c2f BB |
136 | |
137 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
138 | &child, &children) == 0) { | |
139 | for (c = 0; c < children; c++) | |
140 | if (fix_paths(child[c], names) != 0) | |
141 | return (-1); | |
142 | return (0); | |
143 | } | |
144 | ||
145 | /* | |
146 | * This is a leaf (file or disk) vdev. In either case, go through | |
147 | * the name list and see if we find a matching guid. If so, replace | |
148 | * the path and see if we can calculate a new devid. | |
149 | * | |
150 | * There may be multiple names associated with a particular guid, in | |
44867b6d BB |
151 | * which case we have overlapping partitions or multiple paths to the |
152 | * same disk. In this case we prefer to use the path name which | |
153 | * matches the ZPOOL_CONFIG_PATH. If no matching entry is found we | |
154 | * use the lowest order device which corresponds to the first match | |
155 | * while traversing the ZPOOL_IMPORT_PATH search path. | |
34dc7c2f BB |
156 | */ |
157 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); | |
158 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) | |
159 | path = NULL; | |
160 | ||
34dc7c2f BB |
161 | best = NULL; |
162 | for (ne = names; ne != NULL; ne = ne->ne_next) { | |
163 | if (ne->ne_guid == guid) { | |
34dc7c2f BB |
164 | |
165 | if (path == NULL) { | |
166 | best = ne; | |
167 | break; | |
168 | } | |
169 | ||
44867b6d BB |
170 | if ((strlen(path) == strlen(ne->ne_name)) && |
171 | !strncmp(path, ne->ne_name, strlen(path))) { | |
34dc7c2f | 172 | best = ne; |
44867b6d | 173 | break; |
34dc7c2f | 174 | } |
44867b6d | 175 | |
0a2f7b36 | 176 | if (best == NULL || ne->ne_order < best->ne_order) |
44867b6d | 177 | best = ne; |
34dc7c2f BB |
178 | } |
179 | } | |
180 | ||
181 | if (best == NULL) | |
182 | return (0); | |
183 | ||
184 | if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) | |
185 | return (-1); | |
186 | ||
187 | if ((devid = get_devid(best->ne_name)) == NULL) { | |
188 | (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); | |
189 | } else { | |
190 | if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) | |
191 | return (-1); | |
192 | devid_str_free(devid); | |
193 | } | |
194 | ||
195 | return (0); | |
196 | } | |
197 | ||
198 | /* | |
199 | * Add the given configuration to the list of known devices. | |
200 | */ | |
201 | static int | |
202 | add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, | |
44867b6d | 203 | int order, nvlist_t *config) |
34dc7c2f BB |
204 | { |
205 | uint64_t pool_guid, vdev_guid, top_guid, txg, state; | |
206 | pool_entry_t *pe; | |
207 | vdev_entry_t *ve; | |
208 | config_entry_t *ce; | |
209 | name_entry_t *ne; | |
210 | ||
211 | /* | |
212 | * If this is a hot spare not currently in use or level 2 cache | |
213 | * device, add it to the list of names to translate, but don't do | |
214 | * anything else. | |
215 | */ | |
216 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
217 | &state) == 0 && | |
218 | (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && | |
219 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { | |
220 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
221 | return (-1); | |
222 | ||
223 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
224 | free(ne); | |
225 | return (-1); | |
226 | } | |
227 | ne->ne_guid = vdev_guid; | |
44867b6d | 228 | ne->ne_order = order; |
34dc7c2f BB |
229 | ne->ne_next = pl->names; |
230 | pl->names = ne; | |
231 | return (0); | |
232 | } | |
233 | ||
234 | /* | |
235 | * If we have a valid config but cannot read any of these fields, then | |
236 | * it means we have a half-initialized label. In vdev_label_init() | |
237 | * we write a label with txg == 0 so that we can identify the device | |
238 | * in case the user refers to the same disk later on. If we fail to | |
239 | * create the pool, we'll be left with a label in this state | |
240 | * which should not be considered part of a valid pool. | |
241 | */ | |
242 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
243 | &pool_guid) != 0 || | |
244 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
245 | &vdev_guid) != 0 || | |
246 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, | |
247 | &top_guid) != 0 || | |
248 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, | |
249 | &txg) != 0 || txg == 0) { | |
250 | nvlist_free(config); | |
251 | return (0); | |
252 | } | |
253 | ||
254 | /* | |
255 | * First, see if we know about this pool. If not, then add it to the | |
256 | * list of known pools. | |
257 | */ | |
258 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
259 | if (pe->pe_guid == pool_guid) | |
260 | break; | |
261 | } | |
262 | ||
263 | if (pe == NULL) { | |
264 | if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { | |
265 | nvlist_free(config); | |
266 | return (-1); | |
267 | } | |
268 | pe->pe_guid = pool_guid; | |
269 | pe->pe_next = pl->pools; | |
270 | pl->pools = pe; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Second, see if we know about this toplevel vdev. Add it if its | |
275 | * missing. | |
276 | */ | |
277 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
278 | if (ve->ve_guid == top_guid) | |
279 | break; | |
280 | } | |
281 | ||
282 | if (ve == NULL) { | |
283 | if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { | |
284 | nvlist_free(config); | |
285 | return (-1); | |
286 | } | |
287 | ve->ve_guid = top_guid; | |
288 | ve->ve_next = pe->pe_vdevs; | |
289 | pe->pe_vdevs = ve; | |
290 | } | |
291 | ||
292 | /* | |
293 | * Third, see if we have a config with a matching transaction group. If | |
294 | * so, then we do nothing. Otherwise, add it to the list of known | |
295 | * configs. | |
296 | */ | |
297 | for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { | |
298 | if (ce->ce_txg == txg) | |
299 | break; | |
300 | } | |
301 | ||
302 | if (ce == NULL) { | |
303 | if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { | |
304 | nvlist_free(config); | |
305 | return (-1); | |
306 | } | |
307 | ce->ce_txg = txg; | |
308 | ce->ce_config = config; | |
309 | ce->ce_next = ve->ve_configs; | |
310 | ve->ve_configs = ce; | |
311 | } else { | |
312 | nvlist_free(config); | |
313 | } | |
314 | ||
315 | /* | |
316 | * At this point we've successfully added our config to the list of | |
317 | * known configs. The last thing to do is add the vdev guid -> path | |
318 | * mappings so that we can fix up the configuration as necessary before | |
319 | * doing the import. | |
320 | */ | |
321 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
322 | return (-1); | |
323 | ||
324 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
325 | free(ne); | |
326 | return (-1); | |
327 | } | |
328 | ||
329 | ne->ne_guid = vdev_guid; | |
44867b6d | 330 | ne->ne_order = order; |
34dc7c2f BB |
331 | ne->ne_next = pl->names; |
332 | pl->names = ne; | |
333 | ||
334 | return (0); | |
335 | } | |
336 | ||
337 | /* | |
338 | * Returns true if the named pool matches the given GUID. | |
339 | */ | |
340 | static int | |
341 | pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, | |
342 | boolean_t *isactive) | |
343 | { | |
344 | zpool_handle_t *zhp; | |
345 | uint64_t theguid; | |
346 | ||
347 | if (zpool_open_silent(hdl, name, &zhp) != 0) | |
348 | return (-1); | |
349 | ||
350 | if (zhp == NULL) { | |
351 | *isactive = B_FALSE; | |
352 | return (0); | |
353 | } | |
354 | ||
355 | verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, | |
356 | &theguid) == 0); | |
357 | ||
358 | zpool_close(zhp); | |
359 | ||
360 | *isactive = (theguid == guid); | |
361 | return (0); | |
362 | } | |
363 | ||
364 | static nvlist_t * | |
365 | refresh_config(libzfs_handle_t *hdl, nvlist_t *config) | |
366 | { | |
367 | nvlist_t *nvl; | |
2598c001 | 368 | zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 }; |
34dc7c2f BB |
369 | int err; |
370 | ||
371 | if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) | |
372 | return (NULL); | |
373 | ||
374 | if (zcmd_alloc_dst_nvlist(hdl, &zc, | |
375 | zc.zc_nvlist_conf_size * 2) != 0) { | |
376 | zcmd_free_nvlists(&zc); | |
377 | return (NULL); | |
378 | } | |
379 | ||
380 | while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, | |
381 | &zc)) != 0 && errno == ENOMEM) { | |
382 | if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { | |
383 | zcmd_free_nvlists(&zc); | |
384 | return (NULL); | |
385 | } | |
386 | } | |
387 | ||
388 | if (err) { | |
34dc7c2f BB |
389 | zcmd_free_nvlists(&zc); |
390 | return (NULL); | |
391 | } | |
392 | ||
393 | if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { | |
394 | zcmd_free_nvlists(&zc); | |
395 | return (NULL); | |
396 | } | |
397 | ||
398 | zcmd_free_nvlists(&zc); | |
399 | return (nvl); | |
400 | } | |
401 | ||
428870ff BB |
402 | /* |
403 | * Determine if the vdev id is a hole in the namespace. | |
404 | */ | |
405 | boolean_t | |
406 | vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) | |
407 | { | |
d6320ddb BB |
408 | int c; |
409 | ||
410 | for (c = 0; c < holes; c++) { | |
428870ff BB |
411 | |
412 | /* Top-level is a hole */ | |
413 | if (hole_array[c] == id) | |
414 | return (B_TRUE); | |
415 | } | |
416 | return (B_FALSE); | |
417 | } | |
418 | ||
34dc7c2f BB |
419 | /* |
420 | * Convert our list of pools into the definitive set of configurations. We | |
421 | * start by picking the best config for each toplevel vdev. Once that's done, | |
422 | * we assemble the toplevel vdevs into a full config for the pool. We make a | |
423 | * pass to fix up any incorrect paths, and then add it to the main list to | |
424 | * return to the user. | |
425 | */ | |
426 | static nvlist_t * | |
427 | get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) | |
428 | { | |
429 | pool_entry_t *pe; | |
430 | vdev_entry_t *ve; | |
431 | config_entry_t *ce; | |
d4ed6673 | 432 | nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; |
34dc7c2f BB |
433 | nvlist_t **spares, **l2cache; |
434 | uint_t i, nspares, nl2cache; | |
435 | boolean_t config_seen; | |
436 | uint64_t best_txg; | |
d96eb2b1 | 437 | char *name, *hostname, *comment; |
34dc7c2f BB |
438 | uint64_t version, guid; |
439 | uint_t children = 0; | |
440 | nvlist_t **child = NULL; | |
428870ff BB |
441 | uint_t holes; |
442 | uint64_t *hole_array, max_id; | |
34dc7c2f BB |
443 | uint_t c; |
444 | boolean_t isactive; | |
445 | uint64_t hostid; | |
446 | nvlist_t *nvl; | |
b128c09f | 447 | boolean_t found_one = B_FALSE; |
428870ff | 448 | boolean_t valid_top_config = B_FALSE; |
34dc7c2f BB |
449 | |
450 | if (nvlist_alloc(&ret, 0, 0) != 0) | |
451 | goto nomem; | |
452 | ||
453 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
428870ff | 454 | uint64_t id, max_txg = 0; |
34dc7c2f BB |
455 | |
456 | if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) | |
457 | goto nomem; | |
458 | config_seen = B_FALSE; | |
459 | ||
460 | /* | |
461 | * Iterate over all toplevel vdevs. Grab the pool configuration | |
462 | * from the first one we find, and then go through the rest and | |
463 | * add them as necessary to the 'vdevs' member of the config. | |
464 | */ | |
465 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
466 | ||
467 | /* | |
468 | * Determine the best configuration for this vdev by | |
469 | * selecting the config with the latest transaction | |
470 | * group. | |
471 | */ | |
472 | best_txg = 0; | |
473 | for (ce = ve->ve_configs; ce != NULL; | |
474 | ce = ce->ce_next) { | |
475 | ||
476 | if (ce->ce_txg > best_txg) { | |
477 | tmp = ce->ce_config; | |
478 | best_txg = ce->ce_txg; | |
479 | } | |
480 | } | |
481 | ||
428870ff BB |
482 | /* |
483 | * We rely on the fact that the max txg for the | |
484 | * pool will contain the most up-to-date information | |
485 | * about the valid top-levels in the vdev namespace. | |
486 | */ | |
487 | if (best_txg > max_txg) { | |
488 | (void) nvlist_remove(config, | |
489 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
490 | DATA_TYPE_UINT64); | |
491 | (void) nvlist_remove(config, | |
492 | ZPOOL_CONFIG_HOLE_ARRAY, | |
493 | DATA_TYPE_UINT64_ARRAY); | |
494 | ||
495 | max_txg = best_txg; | |
496 | hole_array = NULL; | |
497 | holes = 0; | |
498 | max_id = 0; | |
499 | valid_top_config = B_FALSE; | |
500 | ||
501 | if (nvlist_lookup_uint64(tmp, | |
502 | ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { | |
503 | verify(nvlist_add_uint64(config, | |
504 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
505 | max_id) == 0); | |
506 | valid_top_config = B_TRUE; | |
507 | } | |
508 | ||
509 | if (nvlist_lookup_uint64_array(tmp, | |
510 | ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, | |
511 | &holes) == 0) { | |
512 | verify(nvlist_add_uint64_array(config, | |
513 | ZPOOL_CONFIG_HOLE_ARRAY, | |
514 | hole_array, holes) == 0); | |
515 | } | |
516 | } | |
517 | ||
34dc7c2f BB |
518 | if (!config_seen) { |
519 | /* | |
520 | * Copy the relevant pieces of data to the pool | |
521 | * configuration: | |
522 | * | |
523 | * version | |
524 | * pool guid | |
525 | * name | |
d96eb2b1 | 526 | * comment (if available) |
34dc7c2f BB |
527 | * pool state |
528 | * hostid (if available) | |
529 | * hostname (if available) | |
530 | */ | |
531 | uint64_t state; | |
532 | ||
533 | verify(nvlist_lookup_uint64(tmp, | |
534 | ZPOOL_CONFIG_VERSION, &version) == 0); | |
535 | if (nvlist_add_uint64(config, | |
536 | ZPOOL_CONFIG_VERSION, version) != 0) | |
537 | goto nomem; | |
538 | verify(nvlist_lookup_uint64(tmp, | |
539 | ZPOOL_CONFIG_POOL_GUID, &guid) == 0); | |
540 | if (nvlist_add_uint64(config, | |
541 | ZPOOL_CONFIG_POOL_GUID, guid) != 0) | |
542 | goto nomem; | |
543 | verify(nvlist_lookup_string(tmp, | |
544 | ZPOOL_CONFIG_POOL_NAME, &name) == 0); | |
545 | if (nvlist_add_string(config, | |
546 | ZPOOL_CONFIG_POOL_NAME, name) != 0) | |
547 | goto nomem; | |
d96eb2b1 DM |
548 | |
549 | /* | |
550 | * COMMENT is optional, don't bail if it's not | |
551 | * there, instead, set it to NULL. | |
552 | */ | |
553 | if (nvlist_lookup_string(tmp, | |
554 | ZPOOL_CONFIG_COMMENT, &comment) != 0) | |
555 | comment = NULL; | |
556 | else if (nvlist_add_string(config, | |
557 | ZPOOL_CONFIG_COMMENT, comment) != 0) | |
558 | goto nomem; | |
559 | ||
34dc7c2f BB |
560 | verify(nvlist_lookup_uint64(tmp, |
561 | ZPOOL_CONFIG_POOL_STATE, &state) == 0); | |
562 | if (nvlist_add_uint64(config, | |
563 | ZPOOL_CONFIG_POOL_STATE, state) != 0) | |
564 | goto nomem; | |
d96eb2b1 | 565 | |
34dc7c2f BB |
566 | hostid = 0; |
567 | if (nvlist_lookup_uint64(tmp, | |
568 | ZPOOL_CONFIG_HOSTID, &hostid) == 0) { | |
569 | if (nvlist_add_uint64(config, | |
570 | ZPOOL_CONFIG_HOSTID, hostid) != 0) | |
571 | goto nomem; | |
572 | verify(nvlist_lookup_string(tmp, | |
573 | ZPOOL_CONFIG_HOSTNAME, | |
574 | &hostname) == 0); | |
575 | if (nvlist_add_string(config, | |
576 | ZPOOL_CONFIG_HOSTNAME, | |
577 | hostname) != 0) | |
578 | goto nomem; | |
579 | } | |
580 | ||
581 | config_seen = B_TRUE; | |
582 | } | |
583 | ||
584 | /* | |
585 | * Add this top-level vdev to the child array. | |
586 | */ | |
587 | verify(nvlist_lookup_nvlist(tmp, | |
588 | ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); | |
589 | verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, | |
590 | &id) == 0); | |
428870ff | 591 | |
34dc7c2f BB |
592 | if (id >= children) { |
593 | nvlist_t **newchild; | |
594 | ||
595 | newchild = zfs_alloc(hdl, (id + 1) * | |
596 | sizeof (nvlist_t *)); | |
597 | if (newchild == NULL) | |
598 | goto nomem; | |
599 | ||
600 | for (c = 0; c < children; c++) | |
601 | newchild[c] = child[c]; | |
602 | ||
603 | free(child); | |
604 | child = newchild; | |
605 | children = id + 1; | |
606 | } | |
607 | if (nvlist_dup(nvtop, &child[id], 0) != 0) | |
608 | goto nomem; | |
609 | ||
610 | } | |
611 | ||
428870ff BB |
612 | /* |
613 | * If we have information about all the top-levels then | |
614 | * clean up the nvlist which we've constructed. This | |
615 | * means removing any extraneous devices that are | |
616 | * beyond the valid range or adding devices to the end | |
617 | * of our array which appear to be missing. | |
618 | */ | |
619 | if (valid_top_config) { | |
620 | if (max_id < children) { | |
621 | for (c = max_id; c < children; c++) | |
622 | nvlist_free(child[c]); | |
623 | children = max_id; | |
624 | } else if (max_id > children) { | |
625 | nvlist_t **newchild; | |
626 | ||
627 | newchild = zfs_alloc(hdl, (max_id) * | |
628 | sizeof (nvlist_t *)); | |
629 | if (newchild == NULL) | |
630 | goto nomem; | |
631 | ||
632 | for (c = 0; c < children; c++) | |
633 | newchild[c] = child[c]; | |
634 | ||
635 | free(child); | |
636 | child = newchild; | |
637 | children = max_id; | |
638 | } | |
639 | } | |
640 | ||
34dc7c2f BB |
641 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, |
642 | &guid) == 0); | |
643 | ||
428870ff BB |
644 | /* |
645 | * The vdev namespace may contain holes as a result of | |
646 | * device removal. We must add them back into the vdev | |
647 | * tree before we process any missing devices. | |
648 | */ | |
649 | if (holes > 0) { | |
650 | ASSERT(valid_top_config); | |
651 | ||
652 | for (c = 0; c < children; c++) { | |
653 | nvlist_t *holey; | |
654 | ||
655 | if (child[c] != NULL || | |
656 | !vdev_is_hole(hole_array, holes, c)) | |
657 | continue; | |
658 | ||
659 | if (nvlist_alloc(&holey, NV_UNIQUE_NAME, | |
660 | 0) != 0) | |
661 | goto nomem; | |
662 | ||
663 | /* | |
664 | * Holes in the namespace are treated as | |
665 | * "hole" top-level vdevs and have a | |
666 | * special flag set on them. | |
667 | */ | |
668 | if (nvlist_add_string(holey, | |
669 | ZPOOL_CONFIG_TYPE, | |
670 | VDEV_TYPE_HOLE) != 0 || | |
671 | nvlist_add_uint64(holey, | |
672 | ZPOOL_CONFIG_ID, c) != 0 || | |
673 | nvlist_add_uint64(holey, | |
674 | ZPOOL_CONFIG_GUID, 0ULL) != 0) | |
675 | goto nomem; | |
676 | child[c] = holey; | |
677 | } | |
678 | } | |
679 | ||
34dc7c2f BB |
680 | /* |
681 | * Look for any missing top-level vdevs. If this is the case, | |
682 | * create a faked up 'missing' vdev as a placeholder. We cannot | |
683 | * simply compress the child array, because the kernel performs | |
684 | * certain checks to make sure the vdev IDs match their location | |
685 | * in the configuration. | |
686 | */ | |
428870ff | 687 | for (c = 0; c < children; c++) { |
34dc7c2f BB |
688 | if (child[c] == NULL) { |
689 | nvlist_t *missing; | |
690 | if (nvlist_alloc(&missing, NV_UNIQUE_NAME, | |
691 | 0) != 0) | |
692 | goto nomem; | |
693 | if (nvlist_add_string(missing, | |
694 | ZPOOL_CONFIG_TYPE, | |
695 | VDEV_TYPE_MISSING) != 0 || | |
696 | nvlist_add_uint64(missing, | |
697 | ZPOOL_CONFIG_ID, c) != 0 || | |
698 | nvlist_add_uint64(missing, | |
699 | ZPOOL_CONFIG_GUID, 0ULL) != 0) { | |
700 | nvlist_free(missing); | |
701 | goto nomem; | |
702 | } | |
703 | child[c] = missing; | |
704 | } | |
428870ff | 705 | } |
34dc7c2f BB |
706 | |
707 | /* | |
708 | * Put all of this pool's top-level vdevs into a root vdev. | |
709 | */ | |
710 | if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) | |
711 | goto nomem; | |
712 | if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, | |
713 | VDEV_TYPE_ROOT) != 0 || | |
714 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || | |
715 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || | |
716 | nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, | |
717 | child, children) != 0) { | |
718 | nvlist_free(nvroot); | |
719 | goto nomem; | |
720 | } | |
721 | ||
722 | for (c = 0; c < children; c++) | |
723 | nvlist_free(child[c]); | |
724 | free(child); | |
725 | children = 0; | |
726 | child = NULL; | |
727 | ||
728 | /* | |
729 | * Go through and fix up any paths and/or devids based on our | |
730 | * known list of vdev GUID -> path mappings. | |
731 | */ | |
732 | if (fix_paths(nvroot, pl->names) != 0) { | |
733 | nvlist_free(nvroot); | |
734 | goto nomem; | |
735 | } | |
736 | ||
737 | /* | |
738 | * Add the root vdev to this pool's configuration. | |
739 | */ | |
740 | if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
741 | nvroot) != 0) { | |
742 | nvlist_free(nvroot); | |
743 | goto nomem; | |
744 | } | |
745 | nvlist_free(nvroot); | |
746 | ||
747 | /* | |
748 | * zdb uses this path to report on active pools that were | |
749 | * imported or created using -R. | |
750 | */ | |
751 | if (active_ok) | |
752 | goto add_pool; | |
753 | ||
754 | /* | |
755 | * Determine if this pool is currently active, in which case we | |
756 | * can't actually import it. | |
757 | */ | |
758 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
759 | &name) == 0); | |
760 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
761 | &guid) == 0); | |
762 | ||
763 | if (pool_active(hdl, name, guid, &isactive) != 0) | |
764 | goto error; | |
765 | ||
766 | if (isactive) { | |
767 | nvlist_free(config); | |
768 | config = NULL; | |
769 | continue; | |
770 | } | |
771 | ||
428870ff BB |
772 | if ((nvl = refresh_config(hdl, config)) == NULL) { |
773 | nvlist_free(config); | |
774 | config = NULL; | |
775 | continue; | |
776 | } | |
34dc7c2f BB |
777 | |
778 | nvlist_free(config); | |
779 | config = nvl; | |
780 | ||
781 | /* | |
782 | * Go through and update the paths for spares, now that we have | |
783 | * them. | |
784 | */ | |
785 | verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
786 | &nvroot) == 0); | |
787 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, | |
788 | &spares, &nspares) == 0) { | |
789 | for (i = 0; i < nspares; i++) { | |
790 | if (fix_paths(spares[i], pl->names) != 0) | |
791 | goto nomem; | |
792 | } | |
793 | } | |
794 | ||
795 | /* | |
796 | * Update the paths for l2cache devices. | |
797 | */ | |
798 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, | |
799 | &l2cache, &nl2cache) == 0) { | |
800 | for (i = 0; i < nl2cache; i++) { | |
801 | if (fix_paths(l2cache[i], pl->names) != 0) | |
802 | goto nomem; | |
803 | } | |
804 | } | |
805 | ||
806 | /* | |
807 | * Restore the original information read from the actual label. | |
808 | */ | |
809 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, | |
810 | DATA_TYPE_UINT64); | |
811 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, | |
812 | DATA_TYPE_STRING); | |
813 | if (hostid != 0) { | |
814 | verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, | |
815 | hostid) == 0); | |
816 | verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, | |
817 | hostname) == 0); | |
818 | } | |
819 | ||
820 | add_pool: | |
821 | /* | |
822 | * Add this pool to the list of configs. | |
823 | */ | |
824 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
825 | &name) == 0); | |
826 | if (nvlist_add_nvlist(ret, name, config) != 0) | |
827 | goto nomem; | |
828 | ||
b128c09f | 829 | found_one = B_TRUE; |
34dc7c2f BB |
830 | nvlist_free(config); |
831 | config = NULL; | |
832 | } | |
833 | ||
b128c09f BB |
834 | if (!found_one) { |
835 | nvlist_free(ret); | |
836 | ret = NULL; | |
837 | } | |
838 | ||
34dc7c2f BB |
839 | return (ret); |
840 | ||
841 | nomem: | |
842 | (void) no_memory(hdl); | |
843 | error: | |
844 | nvlist_free(config); | |
845 | nvlist_free(ret); | |
846 | for (c = 0; c < children; c++) | |
847 | nvlist_free(child[c]); | |
848 | free(child); | |
849 | ||
850 | return (NULL); | |
851 | } | |
852 | ||
853 | /* | |
854 | * Return the offset of the given label. | |
855 | */ | |
856 | static uint64_t | |
857 | label_offset(uint64_t size, int l) | |
858 | { | |
859 | ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); | |
860 | return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? | |
861 | 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); | |
862 | } | |
863 | ||
864 | /* | |
865 | * Given a file descriptor, read the label information and return an nvlist | |
866 | * describing the configuration, if there is one. | |
867 | */ | |
868 | int | |
869 | zpool_read_label(int fd, nvlist_t **config) | |
870 | { | |
871 | struct stat64 statbuf; | |
872 | int l; | |
873 | vdev_label_t *label; | |
874 | uint64_t state, txg, size; | |
875 | ||
876 | *config = NULL; | |
877 | ||
878 | if (fstat64(fd, &statbuf) == -1) | |
879 | return (0); | |
880 | size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); | |
881 | ||
882 | if ((label = malloc(sizeof (vdev_label_t))) == NULL) | |
883 | return (-1); | |
884 | ||
885 | for (l = 0; l < VDEV_LABELS; l++) { | |
b128c09f | 886 | if (pread64(fd, label, sizeof (vdev_label_t), |
34dc7c2f BB |
887 | label_offset(size, l)) != sizeof (vdev_label_t)) |
888 | continue; | |
889 | ||
890 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
891 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) | |
892 | continue; | |
893 | ||
894 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
895 | &state) != 0 || state > POOL_STATE_L2CACHE) { | |
896 | nvlist_free(*config); | |
897 | continue; | |
898 | } | |
899 | ||
900 | if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && | |
901 | (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
902 | &txg) != 0 || txg == 0)) { | |
903 | nvlist_free(*config); | |
904 | continue; | |
905 | } | |
906 | ||
907 | free(label); | |
908 | return (0); | |
909 | } | |
910 | ||
911 | free(label); | |
912 | *config = NULL; | |
913 | return (0); | |
914 | } | |
915 | ||
d603ed6c BB |
916 | #ifdef HAVE_LIBBLKID |
917 | /* | |
918 | * Use libblkid to quickly search for zfs devices | |
919 | */ | |
428870ff | 920 | static int |
d603ed6c | 921 | zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools) |
428870ff | 922 | { |
d603ed6c BB |
923 | blkid_cache cache; |
924 | blkid_dev_iterate iter; | |
925 | blkid_dev dev; | |
926 | const char *devname; | |
428870ff | 927 | nvlist_t *config; |
d603ed6c | 928 | int fd, err; |
428870ff | 929 | |
d603ed6c BB |
930 | err = blkid_get_cache(&cache, NULL); |
931 | if (err != 0) { | |
932 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
933 | dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err); | |
934 | goto err_blkid1; | |
428870ff BB |
935 | } |
936 | ||
d603ed6c BB |
937 | err = blkid_probe_all(cache); |
938 | if (err != 0) { | |
939 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
940 | dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err); | |
941 | goto err_blkid2; | |
428870ff | 942 | } |
428870ff | 943 | |
d603ed6c BB |
944 | iter = blkid_dev_iterate_begin(cache); |
945 | if (iter == NULL) { | |
946 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
947 | dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()")); | |
948 | goto err_blkid2; | |
949 | } | |
428870ff | 950 | |
d603ed6c BB |
951 | err = blkid_dev_set_search(iter, "TYPE", "zfs"); |
952 | if (err != 0) { | |
953 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
954 | dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err); | |
955 | goto err_blkid3; | |
428870ff | 956 | } |
428870ff | 957 | |
d603ed6c BB |
958 | while (blkid_dev_next(iter, &dev) == 0) { |
959 | devname = blkid_dev_devname(dev); | |
960 | if ((fd = open64(devname, O_RDONLY)) < 0) | |
961 | continue; | |
428870ff | 962 | |
d603ed6c BB |
963 | err = zpool_read_label(fd, &config); |
964 | (void) close(fd); | |
428870ff | 965 | |
d603ed6c BB |
966 | if (err != 0) { |
967 | (void) no_memory(hdl); | |
968 | goto err_blkid3; | |
969 | } | |
428870ff | 970 | |
d603ed6c | 971 | if (config != NULL) { |
44867b6d | 972 | err = add_config(hdl, pools, devname, 0, config); |
d603ed6c BB |
973 | if (err != 0) |
974 | goto err_blkid3; | |
975 | } | |
428870ff BB |
976 | } |
977 | ||
d603ed6c BB |
978 | err_blkid3: |
979 | blkid_dev_iterate_end(iter); | |
980 | err_blkid2: | |
981 | blkid_put_cache(cache); | |
982 | err_blkid1: | |
983 | return err; | |
428870ff | 984 | } |
d603ed6c | 985 | #endif /* HAVE_LIBBLKID */ |
428870ff | 986 | |
44867b6d BB |
987 | #define DEFAULT_IMPORT_PATH_SIZE 8 |
988 | ||
989 | static char * | |
990 | zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = { | |
991 | "/dev/disk/by-vdev", /* Custom rules, use first if they exist */ | |
992 | "/dev/disk/zpool", /* Custom rules, use first if they exist */ | |
993 | "/dev/mapper", /* Use multipath devices before components */ | |
994 | "/dev/disk/by-uuid", /* Single unique entry and persistent */ | |
995 | "/dev/disk/by-id", /* May be multiple entries and persistent */ | |
996 | "/dev/disk/by-path", /* Encodes physical location and persistent */ | |
997 | "/dev/disk/by-label", /* Custom persistent labels */ | |
998 | "/dev" /* UNSAFE device names will change */ | |
999 | }; | |
1000 | ||
34dc7c2f BB |
1001 | /* |
1002 | * Given a list of directories to search, find all pools stored on disk. This | |
1003 | * includes partial pools which are not available to import. If no args are | |
1004 | * given (argc is 0), then the default directory (/dev/dsk) is searched. | |
b128c09f BB |
1005 | * poolname or guid (but not both) are provided by the caller when trying |
1006 | * to import a specific pool. | |
34dc7c2f | 1007 | */ |
b128c09f | 1008 | static nvlist_t * |
428870ff | 1009 | zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) |
34dc7c2f | 1010 | { |
428870ff | 1011 | int i, dirs = iarg->paths; |
34dc7c2f BB |
1012 | DIR *dirp = NULL; |
1013 | struct dirent64 *dp; | |
1014 | char path[MAXPATHLEN]; | |
428870ff | 1015 | char *end, **dir = iarg->path; |
34dc7c2f | 1016 | size_t pathleft; |
d603ed6c BB |
1017 | struct stat64 statbuf; |
1018 | nvlist_t *ret = NULL, *config; | |
d603ed6c | 1019 | int fd; |
34dc7c2f BB |
1020 | pool_list_t pools = { 0 }; |
1021 | pool_entry_t *pe, *penext; | |
1022 | vdev_entry_t *ve, *venext; | |
1023 | config_entry_t *ce, *cenext; | |
1024 | name_entry_t *ne, *nenext; | |
d603ed6c BB |
1025 | |
1026 | verify(iarg->poolname == NULL || iarg->guid == 0); | |
34dc7c2f | 1027 | |
428870ff | 1028 | if (dirs == 0) { |
d603ed6c BB |
1029 | #ifdef HAVE_LIBBLKID |
1030 | /* Use libblkid to scan all device for their type */ | |
1031 | if (zpool_find_import_blkid(hdl, &pools) == 0) | |
1032 | goto skip_scanning; | |
1033 | ||
1034 | (void) zfs_error_fmt(hdl, EZFS_BADCACHE, | |
1035 | dgettext(TEXT_DOMAIN, "blkid failure falling back " | |
1036 | "to manual probing")); | |
1037 | #endif /* HAVE_LIBBLKID */ | |
44867b6d BB |
1038 | |
1039 | dir = zpool_default_import_path; | |
1040 | dirs = DEFAULT_IMPORT_PATH_SIZE; | |
34dc7c2f BB |
1041 | } |
1042 | ||
1043 | /* | |
1044 | * Go through and read the label configuration information from every | |
1045 | * possible device, organizing the information according to pool GUID | |
1046 | * and toplevel GUID. | |
1047 | */ | |
428870ff | 1048 | for (i = 0; i < dirs; i++) { |
34dc7c2f BB |
1049 | char *rdsk; |
1050 | int dfd; | |
1051 | ||
1052 | /* use realpath to normalize the path */ | |
428870ff | 1053 | if (realpath(dir[i], path) == 0) { |
44867b6d BB |
1054 | |
1055 | /* it is safe to skip missing search paths */ | |
1056 | if (errno == ENOENT) | |
1057 | continue; | |
1058 | ||
1059 | zfs_error_aux(hdl, strerror(errno)); | |
34dc7c2f | 1060 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, |
428870ff | 1061 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); |
34dc7c2f BB |
1062 | goto error; |
1063 | } | |
1064 | end = &path[strlen(path)]; | |
1065 | *end++ = '/'; | |
1066 | *end = 0; | |
1067 | pathleft = &path[sizeof (path)] - end; | |
1068 | ||
1069 | /* | |
1070 | * Using raw devices instead of block devices when we're | |
1071 | * reading the labels skips a bunch of slow operations during | |
1072 | * close(2) processing, so we replace /dev/dsk with /dev/rdsk. | |
1073 | */ | |
1074 | if (strcmp(path, "/dev/dsk/") == 0) | |
1075 | rdsk = "/dev/rdsk/"; | |
1076 | else | |
1077 | rdsk = path; | |
1078 | ||
1079 | if ((dfd = open64(rdsk, O_RDONLY)) < 0 || | |
1080 | (dirp = fdopendir(dfd)) == NULL) { | |
1081 | zfs_error_aux(hdl, strerror(errno)); | |
1082 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, | |
1083 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), | |
1084 | rdsk); | |
1085 | goto error; | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * This is not MT-safe, but we have no MT consumers of libzfs | |
1090 | */ | |
1091 | while ((dp = readdir64(dirp)) != NULL) { | |
1092 | const char *name = dp->d_name; | |
1093 | if (name[0] == '.' && | |
1094 | (name[1] == 0 || (name[1] == '.' && name[2] == 0))) | |
1095 | continue; | |
1096 | ||
d603ed6c BB |
1097 | /* |
1098 | * Skip checking devices with well known prefixes: | |
1099 | * watchdog - A special close is required to avoid | |
1100 | * triggering it and resetting the system. | |
1101 | * fuse - Fuse control device. | |
1102 | * ppp - Generic PPP driver. | |
1103 | * tty* - Generic serial interface. | |
1104 | * vcs* - Virtual console memory. | |
1105 | * parport* - Parallel port interface. | |
1106 | * lp* - Printer interface. | |
1107 | * fd* - Floppy interface. | |
f03e41e8 BB |
1108 | * hpet - High Precision Event Timer, crashes qemu |
1109 | * when accessed from a virtual machine. | |
1110 | * core - Symlink to /proc/kcore, causes a crash | |
1111 | * when access from Xen dom0. | |
d603ed6c BB |
1112 | */ |
1113 | if ((strncmp(name, "watchdog", 8) == 0) || | |
1114 | (strncmp(name, "fuse", 4) == 0) || | |
1115 | (strncmp(name, "ppp", 3) == 0) || | |
1116 | (strncmp(name, "tty", 3) == 0) || | |
1117 | (strncmp(name, "vcs", 3) == 0) || | |
1118 | (strncmp(name, "parport", 7) == 0) || | |
1119 | (strncmp(name, "lp", 2) == 0) || | |
5e7affae | 1120 | (strncmp(name, "fd", 2) == 0) || |
f03e41e8 BB |
1121 | (strncmp(name, "hpet", 4) == 0) || |
1122 | (strncmp(name, "core", 4) == 0)) | |
d603ed6c BB |
1123 | continue; |
1124 | ||
1125 | /* | |
1126 | * Ignore failed stats. We only want regular | |
f03e41e8 | 1127 | * files and block devices. |
d603ed6c | 1128 | */ |
f03e41e8 | 1129 | if ((fstatat64(dfd, name, &statbuf, 0) != 0) || |
d603ed6c | 1130 | (!S_ISREG(statbuf.st_mode) && |
f03e41e8 BB |
1131 | !S_ISBLK(statbuf.st_mode))) |
1132 | continue; | |
1133 | ||
1134 | if ((fd = openat64(dfd, name, O_RDONLY)) < 0) | |
d603ed6c | 1135 | continue; |
d603ed6c BB |
1136 | |
1137 | if ((zpool_read_label(fd, &config)) != 0) { | |
1138 | (void) close(fd); | |
1139 | (void) no_memory(hdl); | |
1140 | goto error; | |
1141 | } | |
1142 | ||
1143 | (void) close(fd); | |
1144 | ||
1145 | if (config != NULL) { | |
b128c09f BB |
1146 | boolean_t matched = B_TRUE; |
1147 | ||
428870ff | 1148 | if (iarg->poolname != NULL) { |
b128c09f BB |
1149 | char *pname; |
1150 | ||
1151 | matched = nvlist_lookup_string(config, | |
1152 | ZPOOL_CONFIG_POOL_NAME, | |
1153 | &pname) == 0 && | |
428870ff BB |
1154 | strcmp(iarg->poolname, pname) == 0; |
1155 | } else if (iarg->guid != 0) { | |
b128c09f BB |
1156 | uint64_t this_guid; |
1157 | ||
1158 | matched = nvlist_lookup_uint64(config, | |
1159 | ZPOOL_CONFIG_POOL_GUID, | |
1160 | &this_guid) == 0 && | |
428870ff | 1161 | iarg->guid == this_guid; |
b128c09f BB |
1162 | } |
1163 | if (!matched) { | |
1164 | nvlist_free(config); | |
1165 | config = NULL; | |
1166 | continue; | |
1167 | } | |
34dc7c2f | 1168 | /* use the non-raw path for the config */ |
d603ed6c | 1169 | (void) strlcpy(end, name, pathleft); |
44867b6d | 1170 | if (add_config(hdl, &pools, path, i+1, config)) |
34dc7c2f BB |
1171 | goto error; |
1172 | } | |
1173 | } | |
1174 | ||
1175 | (void) closedir(dirp); | |
1176 | dirp = NULL; | |
1177 | } | |
1178 | ||
d603ed6c BB |
1179 | #ifdef HAVE_LIBBLKID |
1180 | skip_scanning: | |
1181 | #endif | |
428870ff | 1182 | ret = get_configs(hdl, &pools, iarg->can_be_active); |
34dc7c2f BB |
1183 | |
1184 | error: | |
1185 | for (pe = pools.pools; pe != NULL; pe = penext) { | |
1186 | penext = pe->pe_next; | |
1187 | for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { | |
1188 | venext = ve->ve_next; | |
1189 | for (ce = ve->ve_configs; ce != NULL; ce = cenext) { | |
1190 | cenext = ce->ce_next; | |
1191 | if (ce->ce_config) | |
1192 | nvlist_free(ce->ce_config); | |
1193 | free(ce); | |
1194 | } | |
1195 | free(ve); | |
1196 | } | |
1197 | free(pe); | |
1198 | } | |
1199 | ||
1200 | for (ne = pools.names; ne != NULL; ne = nenext) { | |
1201 | nenext = ne->ne_next; | |
1202 | if (ne->ne_name) | |
1203 | free(ne->ne_name); | |
1204 | free(ne); | |
1205 | } | |
1206 | ||
1207 | if (dirp) | |
1208 | (void) closedir(dirp); | |
1209 | ||
1210 | return (ret); | |
1211 | } | |
1212 | ||
b128c09f BB |
1213 | nvlist_t * |
1214 | zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) | |
1215 | { | |
428870ff | 1216 | importargs_t iarg = { 0 }; |
b128c09f | 1217 | |
428870ff BB |
1218 | iarg.paths = argc; |
1219 | iarg.path = argv; | |
b128c09f | 1220 | |
428870ff | 1221 | return (zpool_find_import_impl(hdl, &iarg)); |
b128c09f BB |
1222 | } |
1223 | ||
34dc7c2f BB |
1224 | /* |
1225 | * Given a cache file, return the contents as a list of importable pools. | |
b128c09f BB |
1226 | * poolname or guid (but not both) are provided by the caller when trying |
1227 | * to import a specific pool. | |
34dc7c2f BB |
1228 | */ |
1229 | nvlist_t * | |
1230 | zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, | |
b128c09f | 1231 | char *poolname, uint64_t guid) |
34dc7c2f BB |
1232 | { |
1233 | char *buf; | |
1234 | int fd; | |
1235 | struct stat64 statbuf; | |
1236 | nvlist_t *raw, *src, *dst; | |
1237 | nvlist_t *pools; | |
1238 | nvpair_t *elem; | |
1239 | char *name; | |
b128c09f | 1240 | uint64_t this_guid; |
34dc7c2f BB |
1241 | boolean_t active; |
1242 | ||
b128c09f BB |
1243 | verify(poolname == NULL || guid == 0); |
1244 | ||
34dc7c2f BB |
1245 | if ((fd = open(cachefile, O_RDONLY)) < 0) { |
1246 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1247 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1248 | dgettext(TEXT_DOMAIN, "failed to open cache file")); | |
1249 | return (NULL); | |
1250 | } | |
1251 | ||
1252 | if (fstat64(fd, &statbuf) != 0) { | |
1253 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1254 | (void) close(fd); | |
1255 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1256 | dgettext(TEXT_DOMAIN, "failed to get size of cache file")); | |
1257 | return (NULL); | |
1258 | } | |
1259 | ||
1260 | if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { | |
1261 | (void) close(fd); | |
1262 | return (NULL); | |
1263 | } | |
1264 | ||
1265 | if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { | |
1266 | (void) close(fd); | |
1267 | free(buf); | |
1268 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1269 | dgettext(TEXT_DOMAIN, | |
1270 | "failed to read cache file contents")); | |
1271 | return (NULL); | |
1272 | } | |
1273 | ||
1274 | (void) close(fd); | |
1275 | ||
1276 | if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { | |
1277 | free(buf); | |
1278 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1279 | dgettext(TEXT_DOMAIN, | |
1280 | "invalid or corrupt cache file contents")); | |
1281 | return (NULL); | |
1282 | } | |
1283 | ||
1284 | free(buf); | |
1285 | ||
1286 | /* | |
1287 | * Go through and get the current state of the pools and refresh their | |
1288 | * state. | |
1289 | */ | |
1290 | if (nvlist_alloc(&pools, 0, 0) != 0) { | |
1291 | (void) no_memory(hdl); | |
1292 | nvlist_free(raw); | |
1293 | return (NULL); | |
1294 | } | |
1295 | ||
1296 | elem = NULL; | |
1297 | while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { | |
1298 | verify(nvpair_value_nvlist(elem, &src) == 0); | |
1299 | ||
1300 | verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, | |
1301 | &name) == 0); | |
b128c09f BB |
1302 | if (poolname != NULL && strcmp(poolname, name) != 0) |
1303 | continue; | |
1304 | ||
34dc7c2f | 1305 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, |
b128c09f BB |
1306 | &this_guid) == 0); |
1307 | if (guid != 0) { | |
1308 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, | |
1309 | &this_guid) == 0); | |
1310 | if (guid != this_guid) | |
1311 | continue; | |
1312 | } | |
34dc7c2f | 1313 | |
b128c09f BB |
1314 | if (pool_active(hdl, name, this_guid, &active) != 0) { |
1315 | nvlist_free(raw); | |
1316 | nvlist_free(pools); | |
1317 | return (NULL); | |
1318 | } | |
34dc7c2f | 1319 | |
b128c09f BB |
1320 | if (active) |
1321 | continue; | |
34dc7c2f | 1322 | |
b128c09f BB |
1323 | if ((dst = refresh_config(hdl, src)) == NULL) { |
1324 | nvlist_free(raw); | |
1325 | nvlist_free(pools); | |
1326 | return (NULL); | |
1327 | } | |
34dc7c2f | 1328 | |
b128c09f BB |
1329 | if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { |
1330 | (void) no_memory(hdl); | |
34dc7c2f | 1331 | nvlist_free(dst); |
b128c09f BB |
1332 | nvlist_free(raw); |
1333 | nvlist_free(pools); | |
1334 | return (NULL); | |
34dc7c2f | 1335 | } |
b128c09f | 1336 | nvlist_free(dst); |
34dc7c2f BB |
1337 | } |
1338 | ||
1339 | nvlist_free(raw); | |
1340 | return (pools); | |
1341 | } | |
1342 | ||
428870ff BB |
1343 | static int |
1344 | name_or_guid_exists(zpool_handle_t *zhp, void *data) | |
1345 | { | |
1346 | importargs_t *import = data; | |
1347 | int found = 0; | |
1348 | ||
1349 | if (import->poolname != NULL) { | |
1350 | char *pool_name; | |
1351 | ||
1352 | verify(nvlist_lookup_string(zhp->zpool_config, | |
1353 | ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0); | |
1354 | if (strcmp(pool_name, import->poolname) == 0) | |
1355 | found = 1; | |
1356 | } else { | |
1357 | uint64_t pool_guid; | |
1358 | ||
1359 | verify(nvlist_lookup_uint64(zhp->zpool_config, | |
1360 | ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0); | |
1361 | if (pool_guid == import->guid) | |
1362 | found = 1; | |
1363 | } | |
1364 | ||
1365 | zpool_close(zhp); | |
1366 | return (found); | |
1367 | } | |
1368 | ||
1369 | nvlist_t * | |
1370 | zpool_search_import(libzfs_handle_t *hdl, importargs_t *import) | |
1371 | { | |
1372 | verify(import->poolname == NULL || import->guid == 0); | |
1373 | ||
1374 | if (import->unique) | |
1375 | import->exists = zpool_iter(hdl, name_or_guid_exists, import); | |
1376 | ||
1377 | if (import->cachefile != NULL) | |
1378 | return (zpool_find_import_cached(hdl, import->cachefile, | |
1379 | import->poolname, import->guid)); | |
1380 | ||
1381 | return (zpool_find_import_impl(hdl, import)); | |
1382 | } | |
34dc7c2f BB |
1383 | |
1384 | boolean_t | |
1385 | find_guid(nvlist_t *nv, uint64_t guid) | |
1386 | { | |
1387 | uint64_t tmp; | |
1388 | nvlist_t **child; | |
1389 | uint_t c, children; | |
1390 | ||
1391 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); | |
1392 | if (tmp == guid) | |
1393 | return (B_TRUE); | |
1394 | ||
1395 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
1396 | &child, &children) == 0) { | |
1397 | for (c = 0; c < children; c++) | |
1398 | if (find_guid(child[c], guid)) | |
1399 | return (B_TRUE); | |
1400 | } | |
1401 | ||
1402 | return (B_FALSE); | |
1403 | } | |
1404 | ||
1405 | typedef struct aux_cbdata { | |
1406 | const char *cb_type; | |
1407 | uint64_t cb_guid; | |
1408 | zpool_handle_t *cb_zhp; | |
1409 | } aux_cbdata_t; | |
1410 | ||
1411 | static int | |
1412 | find_aux(zpool_handle_t *zhp, void *data) | |
1413 | { | |
1414 | aux_cbdata_t *cbp = data; | |
1415 | nvlist_t **list; | |
1416 | uint_t i, count; | |
1417 | uint64_t guid; | |
1418 | nvlist_t *nvroot; | |
1419 | ||
1420 | verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, | |
1421 | &nvroot) == 0); | |
1422 | ||
1423 | if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, | |
1424 | &list, &count) == 0) { | |
1425 | for (i = 0; i < count; i++) { | |
1426 | verify(nvlist_lookup_uint64(list[i], | |
1427 | ZPOOL_CONFIG_GUID, &guid) == 0); | |
1428 | if (guid == cbp->cb_guid) { | |
1429 | cbp->cb_zhp = zhp; | |
1430 | return (1); | |
1431 | } | |
1432 | } | |
1433 | } | |
1434 | ||
1435 | zpool_close(zhp); | |
1436 | return (0); | |
1437 | } | |
1438 | ||
1439 | /* | |
1440 | * Determines if the pool is in use. If so, it returns true and the state of | |
1441 | * the pool as well as the name of the pool. Both strings are allocated and | |
1442 | * must be freed by the caller. | |
1443 | */ | |
1444 | int | |
1445 | zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, | |
1446 | boolean_t *inuse) | |
1447 | { | |
1448 | nvlist_t *config; | |
1449 | char *name; | |
1450 | boolean_t ret; | |
1451 | uint64_t guid, vdev_guid; | |
1452 | zpool_handle_t *zhp; | |
1453 | nvlist_t *pool_config; | |
1454 | uint64_t stateval, isspare; | |
1455 | aux_cbdata_t cb = { 0 }; | |
1456 | boolean_t isactive; | |
1457 | ||
1458 | *inuse = B_FALSE; | |
1459 | ||
1460 | if (zpool_read_label(fd, &config) != 0) { | |
1461 | (void) no_memory(hdl); | |
1462 | return (-1); | |
1463 | } | |
1464 | ||
1465 | if (config == NULL) | |
1466 | return (0); | |
1467 | ||
1468 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
1469 | &stateval) == 0); | |
1470 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
1471 | &vdev_guid) == 0); | |
1472 | ||
1473 | if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { | |
1474 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
1475 | &name) == 0); | |
1476 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
1477 | &guid) == 0); | |
1478 | } | |
1479 | ||
1480 | switch (stateval) { | |
1481 | case POOL_STATE_EXPORTED: | |
572e2857 BB |
1482 | /* |
1483 | * A pool with an exported state may in fact be imported | |
1484 | * read-only, so check the in-core state to see if it's | |
1485 | * active and imported read-only. If it is, set | |
1486 | * its state to active. | |
1487 | */ | |
1488 | if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && | |
1489 | (zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1490 | zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) | |
1491 | stateval = POOL_STATE_ACTIVE; | |
1492 | ||
34dc7c2f BB |
1493 | ret = B_TRUE; |
1494 | break; | |
1495 | ||
1496 | case POOL_STATE_ACTIVE: | |
1497 | /* | |
1498 | * For an active pool, we have to determine if it's really part | |
1499 | * of a currently active pool (in which case the pool will exist | |
1500 | * and the guid will be the same), or whether it's part of an | |
1501 | * active pool that was disconnected without being explicitly | |
1502 | * exported. | |
1503 | */ | |
1504 | if (pool_active(hdl, name, guid, &isactive) != 0) { | |
1505 | nvlist_free(config); | |
1506 | return (-1); | |
1507 | } | |
1508 | ||
1509 | if (isactive) { | |
1510 | /* | |
1511 | * Because the device may have been removed while | |
1512 | * offlined, we only report it as active if the vdev is | |
1513 | * still present in the config. Otherwise, pretend like | |
1514 | * it's not in use. | |
1515 | */ | |
1516 | if ((zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1517 | (pool_config = zpool_get_config(zhp, NULL)) | |
1518 | != NULL) { | |
1519 | nvlist_t *nvroot; | |
1520 | ||
1521 | verify(nvlist_lookup_nvlist(pool_config, | |
1522 | ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); | |
1523 | ret = find_guid(nvroot, vdev_guid); | |
1524 | } else { | |
1525 | ret = B_FALSE; | |
1526 | } | |
1527 | ||
1528 | /* | |
1529 | * If this is an active spare within another pool, we | |
1530 | * treat it like an unused hot spare. This allows the | |
1531 | * user to create a pool with a hot spare that currently | |
1532 | * in use within another pool. Since we return B_TRUE, | |
1533 | * libdiskmgt will continue to prevent generic consumers | |
1534 | * from using the device. | |
1535 | */ | |
1536 | if (ret && nvlist_lookup_uint64(config, | |
1537 | ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) | |
1538 | stateval = POOL_STATE_SPARE; | |
1539 | ||
1540 | if (zhp != NULL) | |
1541 | zpool_close(zhp); | |
1542 | } else { | |
1543 | stateval = POOL_STATE_POTENTIALLY_ACTIVE; | |
1544 | ret = B_TRUE; | |
1545 | } | |
1546 | break; | |
1547 | ||
1548 | case POOL_STATE_SPARE: | |
1549 | /* | |
1550 | * For a hot spare, it can be either definitively in use, or | |
1551 | * potentially active. To determine if it's in use, we iterate | |
1552 | * over all pools in the system and search for one with a spare | |
1553 | * with a matching guid. | |
1554 | * | |
1555 | * Due to the shared nature of spares, we don't actually report | |
1556 | * the potentially active case as in use. This means the user | |
1557 | * can freely create pools on the hot spares of exported pools, | |
1558 | * but to do otherwise makes the resulting code complicated, and | |
1559 | * we end up having to deal with this case anyway. | |
1560 | */ | |
1561 | cb.cb_zhp = NULL; | |
1562 | cb.cb_guid = vdev_guid; | |
1563 | cb.cb_type = ZPOOL_CONFIG_SPARES; | |
1564 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1565 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1566 | ret = TRUE; | |
1567 | } else { | |
1568 | ret = FALSE; | |
1569 | } | |
1570 | break; | |
1571 | ||
1572 | case POOL_STATE_L2CACHE: | |
1573 | ||
1574 | /* | |
1575 | * Check if any pool is currently using this l2cache device. | |
1576 | */ | |
1577 | cb.cb_zhp = NULL; | |
1578 | cb.cb_guid = vdev_guid; | |
1579 | cb.cb_type = ZPOOL_CONFIG_L2CACHE; | |
1580 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1581 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1582 | ret = TRUE; | |
1583 | } else { | |
1584 | ret = FALSE; | |
1585 | } | |
1586 | break; | |
1587 | ||
1588 | default: | |
1589 | ret = B_FALSE; | |
1590 | } | |
1591 | ||
1592 | ||
1593 | if (ret) { | |
1594 | if ((*namestr = zfs_strdup(hdl, name)) == NULL) { | |
1595 | if (cb.cb_zhp) | |
1596 | zpool_close(cb.cb_zhp); | |
1597 | nvlist_free(config); | |
1598 | return (-1); | |
1599 | } | |
1600 | *state = (pool_state_t)stateval; | |
1601 | } | |
1602 | ||
1603 | if (cb.cb_zhp) | |
1604 | zpool_close(cb.cb_zhp); | |
1605 | ||
1606 | nvlist_free(config); | |
1607 | *inuse = ret; | |
1608 | return (0); | |
1609 | } |