]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
572e2857 | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
34dc7c2f BB |
23 | */ |
24 | ||
34dc7c2f BB |
25 | /* |
26 | * Pool import support functions. | |
27 | * | |
28 | * To import a pool, we rely on reading the configuration information from the | |
29 | * ZFS label of each device. If we successfully read the label, then we | |
30 | * organize the configuration information in the following hierarchy: | |
31 | * | |
32 | * pool guid -> toplevel vdev guid -> label txg | |
33 | * | |
34 | * Duplicate entries matching this same tuple will be discarded. Once we have | |
35 | * examined every device, we pick the best label txg config for each toplevel | |
36 | * vdev. We then arrange these toplevel vdevs into a complete pool config, and | |
37 | * update any paths that have changed. Finally, we attempt to import the pool | |
38 | * using our derived config, and record the results. | |
39 | */ | |
40 | ||
428870ff | 41 | #include <ctype.h> |
34dc7c2f BB |
42 | #include <devid.h> |
43 | #include <dirent.h> | |
44 | #include <errno.h> | |
45 | #include <libintl.h> | |
428870ff | 46 | #include <stddef.h> |
34dc7c2f BB |
47 | #include <stdlib.h> |
48 | #include <string.h> | |
49 | #include <sys/stat.h> | |
50 | #include <unistd.h> | |
51 | #include <fcntl.h> | |
428870ff BB |
52 | #include <sys/vtoc.h> |
53 | #include <sys/dktp/fdisk.h> | |
54 | #include <sys/efi_partition.h> | |
55 | #include <thread_pool.h> | |
34dc7c2f BB |
56 | |
57 | #include <sys/vdev_impl.h> | |
58 | ||
59 | #include "libzfs.h" | |
60 | #include "libzfs_impl.h" | |
61 | ||
62 | /* | |
63 | * Intermediate structures used to gather configuration information. | |
64 | */ | |
65 | typedef struct config_entry { | |
66 | uint64_t ce_txg; | |
67 | nvlist_t *ce_config; | |
68 | struct config_entry *ce_next; | |
69 | } config_entry_t; | |
70 | ||
71 | typedef struct vdev_entry { | |
72 | uint64_t ve_guid; | |
73 | config_entry_t *ve_configs; | |
74 | struct vdev_entry *ve_next; | |
75 | } vdev_entry_t; | |
76 | ||
77 | typedef struct pool_entry { | |
78 | uint64_t pe_guid; | |
79 | vdev_entry_t *pe_vdevs; | |
80 | struct pool_entry *pe_next; | |
81 | } pool_entry_t; | |
82 | ||
83 | typedef struct name_entry { | |
84 | char *ne_name; | |
85 | uint64_t ne_guid; | |
86 | struct name_entry *ne_next; | |
87 | } name_entry_t; | |
88 | ||
89 | typedef struct pool_list { | |
90 | pool_entry_t *pools; | |
91 | name_entry_t *names; | |
92 | } pool_list_t; | |
93 | ||
94 | static char * | |
95 | get_devid(const char *path) | |
96 | { | |
97 | int fd; | |
98 | ddi_devid_t devid; | |
99 | char *minor, *ret; | |
100 | ||
101 | if ((fd = open(path, O_RDONLY)) < 0) | |
102 | return (NULL); | |
103 | ||
104 | minor = NULL; | |
105 | ret = NULL; | |
106 | if (devid_get(fd, &devid) == 0) { | |
107 | if (devid_get_minor_name(fd, &minor) == 0) | |
108 | ret = devid_str_encode(devid, minor); | |
109 | if (minor != NULL) | |
110 | devid_str_free(minor); | |
111 | devid_free(devid); | |
112 | } | |
113 | (void) close(fd); | |
114 | ||
115 | return (ret); | |
116 | } | |
117 | ||
118 | ||
119 | /* | |
120 | * Go through and fix up any path and/or devid information for the given vdev | |
121 | * configuration. | |
122 | */ | |
123 | static int | |
124 | fix_paths(nvlist_t *nv, name_entry_t *names) | |
125 | { | |
126 | nvlist_t **child; | |
127 | uint_t c, children; | |
128 | uint64_t guid; | |
129 | name_entry_t *ne, *best; | |
130 | char *path, *devid; | |
131 | int matched; | |
132 | ||
133 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
134 | &child, &children) == 0) { | |
135 | for (c = 0; c < children; c++) | |
136 | if (fix_paths(child[c], names) != 0) | |
137 | return (-1); | |
138 | return (0); | |
139 | } | |
140 | ||
141 | /* | |
142 | * This is a leaf (file or disk) vdev. In either case, go through | |
143 | * the name list and see if we find a matching guid. If so, replace | |
144 | * the path and see if we can calculate a new devid. | |
145 | * | |
146 | * There may be multiple names associated with a particular guid, in | |
147 | * which case we have overlapping slices or multiple paths to the same | |
148 | * disk. If this is the case, then we want to pick the path that is | |
149 | * the most similar to the original, where "most similar" is the number | |
150 | * of matching characters starting from the end of the path. This will | |
151 | * preserve slice numbers even if the disks have been reorganized, and | |
152 | * will also catch preferred disk names if multiple paths exist. | |
153 | */ | |
154 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); | |
155 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) | |
156 | path = NULL; | |
157 | ||
158 | matched = 0; | |
159 | best = NULL; | |
160 | for (ne = names; ne != NULL; ne = ne->ne_next) { | |
161 | if (ne->ne_guid == guid) { | |
162 | const char *src, *dst; | |
163 | int count; | |
164 | ||
165 | if (path == NULL) { | |
166 | best = ne; | |
167 | break; | |
168 | } | |
169 | ||
170 | src = ne->ne_name + strlen(ne->ne_name) - 1; | |
171 | dst = path + strlen(path) - 1; | |
172 | for (count = 0; src >= ne->ne_name && dst >= path; | |
173 | src--, dst--, count++) | |
174 | if (*src != *dst) | |
175 | break; | |
176 | ||
177 | /* | |
178 | * At this point, 'count' is the number of characters | |
179 | * matched from the end. | |
180 | */ | |
181 | if (count > matched || best == NULL) { | |
182 | best = ne; | |
183 | matched = count; | |
184 | } | |
185 | } | |
186 | } | |
187 | ||
188 | if (best == NULL) | |
189 | return (0); | |
190 | ||
191 | if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) | |
192 | return (-1); | |
193 | ||
194 | if ((devid = get_devid(best->ne_name)) == NULL) { | |
195 | (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); | |
196 | } else { | |
197 | if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) | |
198 | return (-1); | |
199 | devid_str_free(devid); | |
200 | } | |
201 | ||
202 | return (0); | |
203 | } | |
204 | ||
205 | /* | |
206 | * Add the given configuration to the list of known devices. | |
207 | */ | |
208 | static int | |
209 | add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, | |
210 | nvlist_t *config) | |
211 | { | |
212 | uint64_t pool_guid, vdev_guid, top_guid, txg, state; | |
213 | pool_entry_t *pe; | |
214 | vdev_entry_t *ve; | |
215 | config_entry_t *ce; | |
216 | name_entry_t *ne; | |
217 | ||
218 | /* | |
219 | * If this is a hot spare not currently in use or level 2 cache | |
220 | * device, add it to the list of names to translate, but don't do | |
221 | * anything else. | |
222 | */ | |
223 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
224 | &state) == 0 && | |
225 | (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && | |
226 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { | |
227 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
228 | return (-1); | |
229 | ||
230 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
231 | free(ne); | |
232 | return (-1); | |
233 | } | |
234 | ne->ne_guid = vdev_guid; | |
235 | ne->ne_next = pl->names; | |
236 | pl->names = ne; | |
237 | return (0); | |
238 | } | |
239 | ||
240 | /* | |
241 | * If we have a valid config but cannot read any of these fields, then | |
242 | * it means we have a half-initialized label. In vdev_label_init() | |
243 | * we write a label with txg == 0 so that we can identify the device | |
244 | * in case the user refers to the same disk later on. If we fail to | |
245 | * create the pool, we'll be left with a label in this state | |
246 | * which should not be considered part of a valid pool. | |
247 | */ | |
248 | if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
249 | &pool_guid) != 0 || | |
250 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
251 | &vdev_guid) != 0 || | |
252 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, | |
253 | &top_guid) != 0 || | |
254 | nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, | |
255 | &txg) != 0 || txg == 0) { | |
256 | nvlist_free(config); | |
257 | return (0); | |
258 | } | |
259 | ||
260 | /* | |
261 | * First, see if we know about this pool. If not, then add it to the | |
262 | * list of known pools. | |
263 | */ | |
264 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
265 | if (pe->pe_guid == pool_guid) | |
266 | break; | |
267 | } | |
268 | ||
269 | if (pe == NULL) { | |
270 | if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { | |
271 | nvlist_free(config); | |
272 | return (-1); | |
273 | } | |
274 | pe->pe_guid = pool_guid; | |
275 | pe->pe_next = pl->pools; | |
276 | pl->pools = pe; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Second, see if we know about this toplevel vdev. Add it if its | |
281 | * missing. | |
282 | */ | |
283 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
284 | if (ve->ve_guid == top_guid) | |
285 | break; | |
286 | } | |
287 | ||
288 | if (ve == NULL) { | |
289 | if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { | |
290 | nvlist_free(config); | |
291 | return (-1); | |
292 | } | |
293 | ve->ve_guid = top_guid; | |
294 | ve->ve_next = pe->pe_vdevs; | |
295 | pe->pe_vdevs = ve; | |
296 | } | |
297 | ||
298 | /* | |
299 | * Third, see if we have a config with a matching transaction group. If | |
300 | * so, then we do nothing. Otherwise, add it to the list of known | |
301 | * configs. | |
302 | */ | |
303 | for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { | |
304 | if (ce->ce_txg == txg) | |
305 | break; | |
306 | } | |
307 | ||
308 | if (ce == NULL) { | |
309 | if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { | |
310 | nvlist_free(config); | |
311 | return (-1); | |
312 | } | |
313 | ce->ce_txg = txg; | |
314 | ce->ce_config = config; | |
315 | ce->ce_next = ve->ve_configs; | |
316 | ve->ve_configs = ce; | |
317 | } else { | |
318 | nvlist_free(config); | |
319 | } | |
320 | ||
321 | /* | |
322 | * At this point we've successfully added our config to the list of | |
323 | * known configs. The last thing to do is add the vdev guid -> path | |
324 | * mappings so that we can fix up the configuration as necessary before | |
325 | * doing the import. | |
326 | */ | |
327 | if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) | |
328 | return (-1); | |
329 | ||
330 | if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { | |
331 | free(ne); | |
332 | return (-1); | |
333 | } | |
334 | ||
335 | ne->ne_guid = vdev_guid; | |
336 | ne->ne_next = pl->names; | |
337 | pl->names = ne; | |
338 | ||
339 | return (0); | |
340 | } | |
341 | ||
342 | /* | |
343 | * Returns true if the named pool matches the given GUID. | |
344 | */ | |
345 | static int | |
346 | pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, | |
347 | boolean_t *isactive) | |
348 | { | |
349 | zpool_handle_t *zhp; | |
350 | uint64_t theguid; | |
351 | ||
352 | if (zpool_open_silent(hdl, name, &zhp) != 0) | |
353 | return (-1); | |
354 | ||
355 | if (zhp == NULL) { | |
356 | *isactive = B_FALSE; | |
357 | return (0); | |
358 | } | |
359 | ||
360 | verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, | |
361 | &theguid) == 0); | |
362 | ||
363 | zpool_close(zhp); | |
364 | ||
365 | *isactive = (theguid == guid); | |
366 | return (0); | |
367 | } | |
368 | ||
369 | static nvlist_t * | |
370 | refresh_config(libzfs_handle_t *hdl, nvlist_t *config) | |
371 | { | |
372 | nvlist_t *nvl; | |
2598c001 | 373 | zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 }; |
34dc7c2f BB |
374 | int err; |
375 | ||
376 | if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) | |
377 | return (NULL); | |
378 | ||
379 | if (zcmd_alloc_dst_nvlist(hdl, &zc, | |
380 | zc.zc_nvlist_conf_size * 2) != 0) { | |
381 | zcmd_free_nvlists(&zc); | |
382 | return (NULL); | |
383 | } | |
384 | ||
385 | while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, | |
386 | &zc)) != 0 && errno == ENOMEM) { | |
387 | if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { | |
388 | zcmd_free_nvlists(&zc); | |
389 | return (NULL); | |
390 | } | |
391 | } | |
392 | ||
393 | if (err) { | |
34dc7c2f BB |
394 | zcmd_free_nvlists(&zc); |
395 | return (NULL); | |
396 | } | |
397 | ||
398 | if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { | |
399 | zcmd_free_nvlists(&zc); | |
400 | return (NULL); | |
401 | } | |
402 | ||
403 | zcmd_free_nvlists(&zc); | |
404 | return (nvl); | |
405 | } | |
406 | ||
428870ff BB |
407 | /* |
408 | * Determine if the vdev id is a hole in the namespace. | |
409 | */ | |
410 | boolean_t | |
411 | vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) | |
412 | { | |
d6320ddb BB |
413 | int c; |
414 | ||
415 | for (c = 0; c < holes; c++) { | |
428870ff BB |
416 | |
417 | /* Top-level is a hole */ | |
418 | if (hole_array[c] == id) | |
419 | return (B_TRUE); | |
420 | } | |
421 | return (B_FALSE); | |
422 | } | |
423 | ||
34dc7c2f BB |
424 | /* |
425 | * Convert our list of pools into the definitive set of configurations. We | |
426 | * start by picking the best config for each toplevel vdev. Once that's done, | |
427 | * we assemble the toplevel vdevs into a full config for the pool. We make a | |
428 | * pass to fix up any incorrect paths, and then add it to the main list to | |
429 | * return to the user. | |
430 | */ | |
431 | static nvlist_t * | |
432 | get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) | |
433 | { | |
434 | pool_entry_t *pe; | |
435 | vdev_entry_t *ve; | |
436 | config_entry_t *ce; | |
d4ed6673 | 437 | nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; |
34dc7c2f BB |
438 | nvlist_t **spares, **l2cache; |
439 | uint_t i, nspares, nl2cache; | |
440 | boolean_t config_seen; | |
441 | uint64_t best_txg; | |
442 | char *name, *hostname; | |
443 | uint64_t version, guid; | |
444 | uint_t children = 0; | |
445 | nvlist_t **child = NULL; | |
428870ff BB |
446 | uint_t holes; |
447 | uint64_t *hole_array, max_id; | |
34dc7c2f BB |
448 | uint_t c; |
449 | boolean_t isactive; | |
450 | uint64_t hostid; | |
451 | nvlist_t *nvl; | |
b128c09f | 452 | boolean_t found_one = B_FALSE; |
428870ff | 453 | boolean_t valid_top_config = B_FALSE; |
34dc7c2f BB |
454 | |
455 | if (nvlist_alloc(&ret, 0, 0) != 0) | |
456 | goto nomem; | |
457 | ||
458 | for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { | |
428870ff | 459 | uint64_t id, max_txg = 0; |
34dc7c2f BB |
460 | |
461 | if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) | |
462 | goto nomem; | |
463 | config_seen = B_FALSE; | |
464 | ||
465 | /* | |
466 | * Iterate over all toplevel vdevs. Grab the pool configuration | |
467 | * from the first one we find, and then go through the rest and | |
468 | * add them as necessary to the 'vdevs' member of the config. | |
469 | */ | |
470 | for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { | |
471 | ||
472 | /* | |
473 | * Determine the best configuration for this vdev by | |
474 | * selecting the config with the latest transaction | |
475 | * group. | |
476 | */ | |
477 | best_txg = 0; | |
478 | for (ce = ve->ve_configs; ce != NULL; | |
479 | ce = ce->ce_next) { | |
480 | ||
481 | if (ce->ce_txg > best_txg) { | |
482 | tmp = ce->ce_config; | |
483 | best_txg = ce->ce_txg; | |
484 | } | |
485 | } | |
486 | ||
428870ff BB |
487 | /* |
488 | * We rely on the fact that the max txg for the | |
489 | * pool will contain the most up-to-date information | |
490 | * about the valid top-levels in the vdev namespace. | |
491 | */ | |
492 | if (best_txg > max_txg) { | |
493 | (void) nvlist_remove(config, | |
494 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
495 | DATA_TYPE_UINT64); | |
496 | (void) nvlist_remove(config, | |
497 | ZPOOL_CONFIG_HOLE_ARRAY, | |
498 | DATA_TYPE_UINT64_ARRAY); | |
499 | ||
500 | max_txg = best_txg; | |
501 | hole_array = NULL; | |
502 | holes = 0; | |
503 | max_id = 0; | |
504 | valid_top_config = B_FALSE; | |
505 | ||
506 | if (nvlist_lookup_uint64(tmp, | |
507 | ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { | |
508 | verify(nvlist_add_uint64(config, | |
509 | ZPOOL_CONFIG_VDEV_CHILDREN, | |
510 | max_id) == 0); | |
511 | valid_top_config = B_TRUE; | |
512 | } | |
513 | ||
514 | if (nvlist_lookup_uint64_array(tmp, | |
515 | ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, | |
516 | &holes) == 0) { | |
517 | verify(nvlist_add_uint64_array(config, | |
518 | ZPOOL_CONFIG_HOLE_ARRAY, | |
519 | hole_array, holes) == 0); | |
520 | } | |
521 | } | |
522 | ||
34dc7c2f BB |
523 | if (!config_seen) { |
524 | /* | |
525 | * Copy the relevant pieces of data to the pool | |
526 | * configuration: | |
527 | * | |
528 | * version | |
529 | * pool guid | |
530 | * name | |
531 | * pool state | |
532 | * hostid (if available) | |
533 | * hostname (if available) | |
534 | */ | |
535 | uint64_t state; | |
536 | ||
537 | verify(nvlist_lookup_uint64(tmp, | |
538 | ZPOOL_CONFIG_VERSION, &version) == 0); | |
539 | if (nvlist_add_uint64(config, | |
540 | ZPOOL_CONFIG_VERSION, version) != 0) | |
541 | goto nomem; | |
542 | verify(nvlist_lookup_uint64(tmp, | |
543 | ZPOOL_CONFIG_POOL_GUID, &guid) == 0); | |
544 | if (nvlist_add_uint64(config, | |
545 | ZPOOL_CONFIG_POOL_GUID, guid) != 0) | |
546 | goto nomem; | |
547 | verify(nvlist_lookup_string(tmp, | |
548 | ZPOOL_CONFIG_POOL_NAME, &name) == 0); | |
549 | if (nvlist_add_string(config, | |
550 | ZPOOL_CONFIG_POOL_NAME, name) != 0) | |
551 | goto nomem; | |
552 | verify(nvlist_lookup_uint64(tmp, | |
553 | ZPOOL_CONFIG_POOL_STATE, &state) == 0); | |
554 | if (nvlist_add_uint64(config, | |
555 | ZPOOL_CONFIG_POOL_STATE, state) != 0) | |
556 | goto nomem; | |
557 | hostid = 0; | |
558 | if (nvlist_lookup_uint64(tmp, | |
559 | ZPOOL_CONFIG_HOSTID, &hostid) == 0) { | |
560 | if (nvlist_add_uint64(config, | |
561 | ZPOOL_CONFIG_HOSTID, hostid) != 0) | |
562 | goto nomem; | |
563 | verify(nvlist_lookup_string(tmp, | |
564 | ZPOOL_CONFIG_HOSTNAME, | |
565 | &hostname) == 0); | |
566 | if (nvlist_add_string(config, | |
567 | ZPOOL_CONFIG_HOSTNAME, | |
568 | hostname) != 0) | |
569 | goto nomem; | |
570 | } | |
571 | ||
572 | config_seen = B_TRUE; | |
573 | } | |
574 | ||
575 | /* | |
576 | * Add this top-level vdev to the child array. | |
577 | */ | |
578 | verify(nvlist_lookup_nvlist(tmp, | |
579 | ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); | |
580 | verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, | |
581 | &id) == 0); | |
428870ff | 582 | |
34dc7c2f BB |
583 | if (id >= children) { |
584 | nvlist_t **newchild; | |
585 | ||
586 | newchild = zfs_alloc(hdl, (id + 1) * | |
587 | sizeof (nvlist_t *)); | |
588 | if (newchild == NULL) | |
589 | goto nomem; | |
590 | ||
591 | for (c = 0; c < children; c++) | |
592 | newchild[c] = child[c]; | |
593 | ||
594 | free(child); | |
595 | child = newchild; | |
596 | children = id + 1; | |
597 | } | |
598 | if (nvlist_dup(nvtop, &child[id], 0) != 0) | |
599 | goto nomem; | |
600 | ||
601 | } | |
602 | ||
428870ff BB |
603 | /* |
604 | * If we have information about all the top-levels then | |
605 | * clean up the nvlist which we've constructed. This | |
606 | * means removing any extraneous devices that are | |
607 | * beyond the valid range or adding devices to the end | |
608 | * of our array which appear to be missing. | |
609 | */ | |
610 | if (valid_top_config) { | |
611 | if (max_id < children) { | |
612 | for (c = max_id; c < children; c++) | |
613 | nvlist_free(child[c]); | |
614 | children = max_id; | |
615 | } else if (max_id > children) { | |
616 | nvlist_t **newchild; | |
617 | ||
618 | newchild = zfs_alloc(hdl, (max_id) * | |
619 | sizeof (nvlist_t *)); | |
620 | if (newchild == NULL) | |
621 | goto nomem; | |
622 | ||
623 | for (c = 0; c < children; c++) | |
624 | newchild[c] = child[c]; | |
625 | ||
626 | free(child); | |
627 | child = newchild; | |
628 | children = max_id; | |
629 | } | |
630 | } | |
631 | ||
34dc7c2f BB |
632 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, |
633 | &guid) == 0); | |
634 | ||
428870ff BB |
635 | /* |
636 | * The vdev namespace may contain holes as a result of | |
637 | * device removal. We must add them back into the vdev | |
638 | * tree before we process any missing devices. | |
639 | */ | |
640 | if (holes > 0) { | |
641 | ASSERT(valid_top_config); | |
642 | ||
643 | for (c = 0; c < children; c++) { | |
644 | nvlist_t *holey; | |
645 | ||
646 | if (child[c] != NULL || | |
647 | !vdev_is_hole(hole_array, holes, c)) | |
648 | continue; | |
649 | ||
650 | if (nvlist_alloc(&holey, NV_UNIQUE_NAME, | |
651 | 0) != 0) | |
652 | goto nomem; | |
653 | ||
654 | /* | |
655 | * Holes in the namespace are treated as | |
656 | * "hole" top-level vdevs and have a | |
657 | * special flag set on them. | |
658 | */ | |
659 | if (nvlist_add_string(holey, | |
660 | ZPOOL_CONFIG_TYPE, | |
661 | VDEV_TYPE_HOLE) != 0 || | |
662 | nvlist_add_uint64(holey, | |
663 | ZPOOL_CONFIG_ID, c) != 0 || | |
664 | nvlist_add_uint64(holey, | |
665 | ZPOOL_CONFIG_GUID, 0ULL) != 0) | |
666 | goto nomem; | |
667 | child[c] = holey; | |
668 | } | |
669 | } | |
670 | ||
34dc7c2f BB |
671 | /* |
672 | * Look for any missing top-level vdevs. If this is the case, | |
673 | * create a faked up 'missing' vdev as a placeholder. We cannot | |
674 | * simply compress the child array, because the kernel performs | |
675 | * certain checks to make sure the vdev IDs match their location | |
676 | * in the configuration. | |
677 | */ | |
428870ff | 678 | for (c = 0; c < children; c++) { |
34dc7c2f BB |
679 | if (child[c] == NULL) { |
680 | nvlist_t *missing; | |
681 | if (nvlist_alloc(&missing, NV_UNIQUE_NAME, | |
682 | 0) != 0) | |
683 | goto nomem; | |
684 | if (nvlist_add_string(missing, | |
685 | ZPOOL_CONFIG_TYPE, | |
686 | VDEV_TYPE_MISSING) != 0 || | |
687 | nvlist_add_uint64(missing, | |
688 | ZPOOL_CONFIG_ID, c) != 0 || | |
689 | nvlist_add_uint64(missing, | |
690 | ZPOOL_CONFIG_GUID, 0ULL) != 0) { | |
691 | nvlist_free(missing); | |
692 | goto nomem; | |
693 | } | |
694 | child[c] = missing; | |
695 | } | |
428870ff | 696 | } |
34dc7c2f BB |
697 | |
698 | /* | |
699 | * Put all of this pool's top-level vdevs into a root vdev. | |
700 | */ | |
701 | if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) | |
702 | goto nomem; | |
703 | if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, | |
704 | VDEV_TYPE_ROOT) != 0 || | |
705 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || | |
706 | nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || | |
707 | nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, | |
708 | child, children) != 0) { | |
709 | nvlist_free(nvroot); | |
710 | goto nomem; | |
711 | } | |
712 | ||
713 | for (c = 0; c < children; c++) | |
714 | nvlist_free(child[c]); | |
715 | free(child); | |
716 | children = 0; | |
717 | child = NULL; | |
718 | ||
719 | /* | |
720 | * Go through and fix up any paths and/or devids based on our | |
721 | * known list of vdev GUID -> path mappings. | |
722 | */ | |
723 | if (fix_paths(nvroot, pl->names) != 0) { | |
724 | nvlist_free(nvroot); | |
725 | goto nomem; | |
726 | } | |
727 | ||
728 | /* | |
729 | * Add the root vdev to this pool's configuration. | |
730 | */ | |
731 | if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
732 | nvroot) != 0) { | |
733 | nvlist_free(nvroot); | |
734 | goto nomem; | |
735 | } | |
736 | nvlist_free(nvroot); | |
737 | ||
738 | /* | |
739 | * zdb uses this path to report on active pools that were | |
740 | * imported or created using -R. | |
741 | */ | |
742 | if (active_ok) | |
743 | goto add_pool; | |
744 | ||
745 | /* | |
746 | * Determine if this pool is currently active, in which case we | |
747 | * can't actually import it. | |
748 | */ | |
749 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
750 | &name) == 0); | |
751 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
752 | &guid) == 0); | |
753 | ||
754 | if (pool_active(hdl, name, guid, &isactive) != 0) | |
755 | goto error; | |
756 | ||
757 | if (isactive) { | |
758 | nvlist_free(config); | |
759 | config = NULL; | |
760 | continue; | |
761 | } | |
762 | ||
428870ff BB |
763 | if ((nvl = refresh_config(hdl, config)) == NULL) { |
764 | nvlist_free(config); | |
765 | config = NULL; | |
766 | continue; | |
767 | } | |
34dc7c2f BB |
768 | |
769 | nvlist_free(config); | |
770 | config = nvl; | |
771 | ||
772 | /* | |
773 | * Go through and update the paths for spares, now that we have | |
774 | * them. | |
775 | */ | |
776 | verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, | |
777 | &nvroot) == 0); | |
778 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, | |
779 | &spares, &nspares) == 0) { | |
780 | for (i = 0; i < nspares; i++) { | |
781 | if (fix_paths(spares[i], pl->names) != 0) | |
782 | goto nomem; | |
783 | } | |
784 | } | |
785 | ||
786 | /* | |
787 | * Update the paths for l2cache devices. | |
788 | */ | |
789 | if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, | |
790 | &l2cache, &nl2cache) == 0) { | |
791 | for (i = 0; i < nl2cache; i++) { | |
792 | if (fix_paths(l2cache[i], pl->names) != 0) | |
793 | goto nomem; | |
794 | } | |
795 | } | |
796 | ||
797 | /* | |
798 | * Restore the original information read from the actual label. | |
799 | */ | |
800 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, | |
801 | DATA_TYPE_UINT64); | |
802 | (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, | |
803 | DATA_TYPE_STRING); | |
804 | if (hostid != 0) { | |
805 | verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, | |
806 | hostid) == 0); | |
807 | verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, | |
808 | hostname) == 0); | |
809 | } | |
810 | ||
811 | add_pool: | |
812 | /* | |
813 | * Add this pool to the list of configs. | |
814 | */ | |
815 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
816 | &name) == 0); | |
817 | if (nvlist_add_nvlist(ret, name, config) != 0) | |
818 | goto nomem; | |
819 | ||
b128c09f | 820 | found_one = B_TRUE; |
34dc7c2f BB |
821 | nvlist_free(config); |
822 | config = NULL; | |
823 | } | |
824 | ||
b128c09f BB |
825 | if (!found_one) { |
826 | nvlist_free(ret); | |
827 | ret = NULL; | |
828 | } | |
829 | ||
34dc7c2f BB |
830 | return (ret); |
831 | ||
832 | nomem: | |
833 | (void) no_memory(hdl); | |
834 | error: | |
835 | nvlist_free(config); | |
836 | nvlist_free(ret); | |
837 | for (c = 0; c < children; c++) | |
838 | nvlist_free(child[c]); | |
839 | free(child); | |
840 | ||
841 | return (NULL); | |
842 | } | |
843 | ||
844 | /* | |
845 | * Return the offset of the given label. | |
846 | */ | |
847 | static uint64_t | |
848 | label_offset(uint64_t size, int l) | |
849 | { | |
850 | ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); | |
851 | return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? | |
852 | 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); | |
853 | } | |
854 | ||
855 | /* | |
856 | * Given a file descriptor, read the label information and return an nvlist | |
857 | * describing the configuration, if there is one. | |
858 | */ | |
859 | int | |
860 | zpool_read_label(int fd, nvlist_t **config) | |
861 | { | |
862 | struct stat64 statbuf; | |
863 | int l; | |
864 | vdev_label_t *label; | |
865 | uint64_t state, txg, size; | |
866 | ||
867 | *config = NULL; | |
868 | ||
869 | if (fstat64(fd, &statbuf) == -1) | |
870 | return (0); | |
871 | size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); | |
872 | ||
873 | if ((label = malloc(sizeof (vdev_label_t))) == NULL) | |
874 | return (-1); | |
875 | ||
876 | for (l = 0; l < VDEV_LABELS; l++) { | |
b128c09f | 877 | if (pread64(fd, label, sizeof (vdev_label_t), |
34dc7c2f BB |
878 | label_offset(size, l)) != sizeof (vdev_label_t)) |
879 | continue; | |
880 | ||
881 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
882 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) | |
883 | continue; | |
884 | ||
885 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
886 | &state) != 0 || state > POOL_STATE_L2CACHE) { | |
887 | nvlist_free(*config); | |
888 | continue; | |
889 | } | |
890 | ||
891 | if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && | |
892 | (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
893 | &txg) != 0 || txg == 0)) { | |
894 | nvlist_free(*config); | |
895 | continue; | |
896 | } | |
897 | ||
898 | free(label); | |
899 | return (0); | |
900 | } | |
901 | ||
902 | free(label); | |
903 | *config = NULL; | |
904 | return (0); | |
905 | } | |
906 | ||
428870ff BB |
907 | typedef struct rdsk_node { |
908 | char *rn_name; | |
909 | int rn_dfd; | |
910 | libzfs_handle_t *rn_hdl; | |
911 | nvlist_t *rn_config; | |
912 | avl_tree_t *rn_avl; | |
913 | avl_node_t rn_node; | |
914 | boolean_t rn_nozpool; | |
915 | } rdsk_node_t; | |
916 | ||
917 | static int | |
918 | slice_cache_compare(const void *arg1, const void *arg2) | |
919 | { | |
920 | const char *nm1 = ((rdsk_node_t *)arg1)->rn_name; | |
921 | const char *nm2 = ((rdsk_node_t *)arg2)->rn_name; | |
922 | char *nm1slice, *nm2slice; | |
923 | int rv; | |
924 | ||
925 | /* | |
926 | * slices zero and two are the most likely to provide results, | |
927 | * so put those first | |
928 | */ | |
929 | nm1slice = strstr(nm1, "s0"); | |
930 | nm2slice = strstr(nm2, "s0"); | |
931 | if (nm1slice && !nm2slice) { | |
932 | return (-1); | |
933 | } | |
934 | if (!nm1slice && nm2slice) { | |
935 | return (1); | |
936 | } | |
937 | nm1slice = strstr(nm1, "s2"); | |
938 | nm2slice = strstr(nm2, "s2"); | |
939 | if (nm1slice && !nm2slice) { | |
940 | return (-1); | |
941 | } | |
942 | if (!nm1slice && nm2slice) { | |
943 | return (1); | |
944 | } | |
945 | ||
946 | rv = strcmp(nm1, nm2); | |
947 | if (rv == 0) | |
948 | return (0); | |
949 | return (rv > 0 ? 1 : -1); | |
950 | } | |
951 | ||
952 | static void | |
953 | check_one_slice(avl_tree_t *r, char *diskname, uint_t partno, | |
954 | diskaddr_t size, uint_t blksz) | |
955 | { | |
956 | rdsk_node_t tmpnode; | |
957 | rdsk_node_t *node; | |
958 | char sname[MAXNAMELEN]; | |
959 | ||
960 | tmpnode.rn_name = &sname[0]; | |
961 | (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u", | |
962 | diskname, partno); | |
963 | /* | |
964 | * protect against division by zero for disk labels that | |
965 | * contain a bogus sector size | |
966 | */ | |
967 | if (blksz == 0) | |
968 | blksz = DEV_BSIZE; | |
969 | /* too small to contain a zpool? */ | |
970 | if ((size < (SPA_MINDEVSIZE / blksz)) && | |
971 | (node = avl_find(r, &tmpnode, NULL))) | |
972 | node->rn_nozpool = B_TRUE; | |
973 | } | |
974 | ||
975 | static void | |
976 | nozpool_all_slices(avl_tree_t *r, const char *sname) | |
977 | { | |
978 | char diskname[MAXNAMELEN]; | |
979 | char *ptr; | |
980 | int i; | |
981 | ||
982 | (void) strncpy(diskname, sname, MAXNAMELEN); | |
983 | if (((ptr = strrchr(diskname, 's')) == NULL) && | |
984 | ((ptr = strrchr(diskname, 'p')) == NULL)) | |
985 | return; | |
986 | ptr[0] = 's'; | |
987 | ptr[1] = '\0'; | |
988 | for (i = 0; i < NDKMAP; i++) | |
989 | check_one_slice(r, diskname, i, 0, 1); | |
990 | ptr[0] = 'p'; | |
991 | for (i = 0; i <= FD_NUMPART; i++) | |
992 | check_one_slice(r, diskname, i, 0, 1); | |
993 | } | |
994 | ||
995 | static void | |
996 | check_slices(avl_tree_t *r, int fd, const char *sname) | |
997 | { | |
998 | struct extvtoc vtoc; | |
999 | struct dk_gpt *gpt; | |
1000 | char diskname[MAXNAMELEN]; | |
1001 | char *ptr; | |
1002 | int i; | |
1003 | ||
1004 | (void) strncpy(diskname, sname, MAXNAMELEN); | |
1005 | if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1])) | |
1006 | return; | |
1007 | ptr[1] = '\0'; | |
1008 | ||
1009 | if (read_extvtoc(fd, &vtoc) >= 0) { | |
1010 | for (i = 0; i < NDKMAP; i++) | |
1011 | check_one_slice(r, diskname, i, | |
1012 | vtoc.v_part[i].p_size, vtoc.v_sectorsz); | |
1013 | } else if (efi_alloc_and_read(fd, &gpt) >= 0) { | |
1014 | /* | |
1015 | * on x86 we'll still have leftover links that point | |
1016 | * to slices s[9-15], so use NDKMAP instead | |
1017 | */ | |
1018 | for (i = 0; i < NDKMAP; i++) | |
1019 | check_one_slice(r, diskname, i, | |
1020 | gpt->efi_parts[i].p_size, gpt->efi_lbasize); | |
1021 | /* nodes p[1-4] are never used with EFI labels */ | |
1022 | ptr[0] = 'p'; | |
1023 | for (i = 1; i <= FD_NUMPART; i++) | |
1024 | check_one_slice(r, diskname, i, 0, 1); | |
1025 | efi_free(gpt); | |
1026 | } | |
1027 | } | |
1028 | ||
1029 | static void | |
1030 | zpool_open_func(void *arg) | |
1031 | { | |
1032 | rdsk_node_t *rn = arg; | |
1033 | struct stat64 statbuf; | |
1034 | nvlist_t *config; | |
1035 | int fd; | |
1036 | ||
1037 | if (rn->rn_nozpool) | |
1038 | return; | |
1039 | if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { | |
1040 | /* symlink to a device that's no longer there */ | |
1041 | if (errno == ENOENT) | |
1042 | nozpool_all_slices(rn->rn_avl, rn->rn_name); | |
1043 | return; | |
1044 | } | |
1045 | /* | |
1046 | * Ignore failed stats. We only want regular | |
1047 | * files, character devs and block devs. | |
1048 | */ | |
1049 | if (fstat64(fd, &statbuf) != 0 || | |
1050 | (!S_ISREG(statbuf.st_mode) && | |
1051 | !S_ISCHR(statbuf.st_mode) && | |
1052 | !S_ISBLK(statbuf.st_mode))) { | |
1053 | (void) close(fd); | |
1054 | return; | |
1055 | } | |
1056 | /* this file is too small to hold a zpool */ | |
1057 | if (S_ISREG(statbuf.st_mode) && | |
1058 | statbuf.st_size < SPA_MINDEVSIZE) { | |
1059 | (void) close(fd); | |
1060 | return; | |
1061 | } else if (!S_ISREG(statbuf.st_mode)) { | |
1062 | /* | |
1063 | * Try to read the disk label first so we don't have to | |
1064 | * open a bunch of minor nodes that can't have a zpool. | |
1065 | */ | |
1066 | check_slices(rn->rn_avl, fd, rn->rn_name); | |
1067 | } | |
1068 | ||
1069 | if ((zpool_read_label(fd, &config)) != 0) { | |
1070 | (void) close(fd); | |
1071 | (void) no_memory(rn->rn_hdl); | |
1072 | return; | |
1073 | } | |
1074 | (void) close(fd); | |
1075 | ||
1076 | ||
1077 | rn->rn_config = config; | |
1078 | if (config != NULL) { | |
1079 | assert(rn->rn_nozpool == B_FALSE); | |
1080 | } | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * Given a file descriptor, clear (zero) the label information. This function | |
1085 | * is currently only used in the appliance stack as part of the ZFS sysevent | |
1086 | * module. | |
1087 | */ | |
1088 | int | |
1089 | zpool_clear_label(int fd) | |
1090 | { | |
1091 | struct stat64 statbuf; | |
1092 | int l; | |
1093 | vdev_label_t *label; | |
1094 | uint64_t size; | |
1095 | ||
1096 | if (fstat64(fd, &statbuf) == -1) | |
1097 | return (0); | |
1098 | size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); | |
1099 | ||
1100 | if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL) | |
1101 | return (-1); | |
1102 | ||
1103 | for (l = 0; l < VDEV_LABELS; l++) { | |
1104 | if (pwrite64(fd, label, sizeof (vdev_label_t), | |
1105 | label_offset(size, l)) != sizeof (vdev_label_t)) | |
1106 | return (-1); | |
1107 | } | |
1108 | ||
1109 | free(label); | |
1110 | return (0); | |
1111 | } | |
1112 | ||
34dc7c2f BB |
1113 | /* |
1114 | * Given a list of directories to search, find all pools stored on disk. This | |
1115 | * includes partial pools which are not available to import. If no args are | |
1116 | * given (argc is 0), then the default directory (/dev/dsk) is searched. | |
b128c09f BB |
1117 | * poolname or guid (but not both) are provided by the caller when trying |
1118 | * to import a specific pool. | |
34dc7c2f | 1119 | */ |
b128c09f | 1120 | static nvlist_t * |
428870ff | 1121 | zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) |
34dc7c2f | 1122 | { |
428870ff | 1123 | int i, dirs = iarg->paths; |
34dc7c2f BB |
1124 | DIR *dirp = NULL; |
1125 | struct dirent64 *dp; | |
1126 | char path[MAXPATHLEN]; | |
428870ff | 1127 | char *end, **dir = iarg->path; |
34dc7c2f | 1128 | size_t pathleft; |
428870ff | 1129 | nvlist_t *ret = NULL; |
34dc7c2f | 1130 | static char *default_dir = "/dev/dsk"; |
34dc7c2f BB |
1131 | pool_list_t pools = { 0 }; |
1132 | pool_entry_t *pe, *penext; | |
1133 | vdev_entry_t *ve, *venext; | |
1134 | config_entry_t *ce, *cenext; | |
1135 | name_entry_t *ne, *nenext; | |
428870ff BB |
1136 | avl_tree_t slice_cache; |
1137 | rdsk_node_t *slice; | |
1138 | void *cookie; | |
34dc7c2f | 1139 | |
428870ff BB |
1140 | if (dirs == 0) { |
1141 | dirs = 1; | |
1142 | dir = &default_dir; | |
34dc7c2f BB |
1143 | } |
1144 | ||
1145 | /* | |
1146 | * Go through and read the label configuration information from every | |
1147 | * possible device, organizing the information according to pool GUID | |
1148 | * and toplevel GUID. | |
1149 | */ | |
428870ff BB |
1150 | for (i = 0; i < dirs; i++) { |
1151 | tpool_t *t; | |
34dc7c2f BB |
1152 | char *rdsk; |
1153 | int dfd; | |
1154 | ||
1155 | /* use realpath to normalize the path */ | |
428870ff | 1156 | if (realpath(dir[i], path) == 0) { |
34dc7c2f | 1157 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, |
428870ff | 1158 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); |
34dc7c2f BB |
1159 | goto error; |
1160 | } | |
1161 | end = &path[strlen(path)]; | |
1162 | *end++ = '/'; | |
1163 | *end = 0; | |
1164 | pathleft = &path[sizeof (path)] - end; | |
1165 | ||
1166 | /* | |
1167 | * Using raw devices instead of block devices when we're | |
1168 | * reading the labels skips a bunch of slow operations during | |
1169 | * close(2) processing, so we replace /dev/dsk with /dev/rdsk. | |
1170 | */ | |
1171 | if (strcmp(path, "/dev/dsk/") == 0) | |
1172 | rdsk = "/dev/rdsk/"; | |
1173 | else | |
1174 | rdsk = path; | |
1175 | ||
1176 | if ((dfd = open64(rdsk, O_RDONLY)) < 0 || | |
1177 | (dirp = fdopendir(dfd)) == NULL) { | |
1178 | zfs_error_aux(hdl, strerror(errno)); | |
1179 | (void) zfs_error_fmt(hdl, EZFS_BADPATH, | |
1180 | dgettext(TEXT_DOMAIN, "cannot open '%s'"), | |
1181 | rdsk); | |
1182 | goto error; | |
1183 | } | |
1184 | ||
428870ff BB |
1185 | avl_create(&slice_cache, slice_cache_compare, |
1186 | sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node)); | |
34dc7c2f BB |
1187 | /* |
1188 | * This is not MT-safe, but we have no MT consumers of libzfs | |
1189 | */ | |
1190 | while ((dp = readdir64(dirp)) != NULL) { | |
1191 | const char *name = dp->d_name; | |
1192 | if (name[0] == '.' && | |
1193 | (name[1] == 0 || (name[1] == '.' && name[2] == 0))) | |
1194 | continue; | |
1195 | ||
428870ff BB |
1196 | slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); |
1197 | slice->rn_name = zfs_strdup(hdl, name); | |
1198 | slice->rn_avl = &slice_cache; | |
1199 | slice->rn_dfd = dfd; | |
1200 | slice->rn_hdl = hdl; | |
1201 | slice->rn_nozpool = B_FALSE; | |
1202 | avl_add(&slice_cache, slice); | |
1203 | } | |
1204 | /* | |
1205 | * create a thread pool to do all of this in parallel; | |
1206 | * rn_nozpool is not protected, so this is racy in that | |
1207 | * multiple tasks could decide that the same slice can | |
1208 | * not hold a zpool, which is benign. Also choose | |
1209 | * double the number of processors; we hold a lot of | |
1210 | * locks in the kernel, so going beyond this doesn't | |
1211 | * buy us much. | |
1212 | */ | |
1213 | t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), | |
1214 | 0, NULL); | |
1215 | for (slice = avl_first(&slice_cache); slice; | |
1216 | (slice = avl_walk(&slice_cache, slice, | |
1217 | AVL_AFTER))) | |
1218 | (void) tpool_dispatch(t, zpool_open_func, slice); | |
1219 | tpool_wait(t); | |
1220 | tpool_destroy(t); | |
1221 | ||
1222 | cookie = NULL; | |
1223 | while ((slice = avl_destroy_nodes(&slice_cache, | |
1224 | &cookie)) != NULL) { | |
1225 | if (slice->rn_config != NULL) { | |
1226 | nvlist_t *config = slice->rn_config; | |
b128c09f BB |
1227 | boolean_t matched = B_TRUE; |
1228 | ||
428870ff | 1229 | if (iarg->poolname != NULL) { |
b128c09f BB |
1230 | char *pname; |
1231 | ||
1232 | matched = nvlist_lookup_string(config, | |
1233 | ZPOOL_CONFIG_POOL_NAME, | |
1234 | &pname) == 0 && | |
428870ff BB |
1235 | strcmp(iarg->poolname, pname) == 0; |
1236 | } else if (iarg->guid != 0) { | |
b128c09f BB |
1237 | uint64_t this_guid; |
1238 | ||
1239 | matched = nvlist_lookup_uint64(config, | |
1240 | ZPOOL_CONFIG_POOL_GUID, | |
1241 | &this_guid) == 0 && | |
428870ff | 1242 | iarg->guid == this_guid; |
b128c09f BB |
1243 | } |
1244 | if (!matched) { | |
1245 | nvlist_free(config); | |
1246 | config = NULL; | |
1247 | continue; | |
1248 | } | |
34dc7c2f | 1249 | /* use the non-raw path for the config */ |
428870ff | 1250 | (void) strlcpy(end, slice->rn_name, pathleft); |
34dc7c2f BB |
1251 | if (add_config(hdl, &pools, path, config) != 0) |
1252 | goto error; | |
1253 | } | |
428870ff BB |
1254 | free(slice->rn_name); |
1255 | free(slice); | |
34dc7c2f | 1256 | } |
428870ff | 1257 | avl_destroy(&slice_cache); |
34dc7c2f BB |
1258 | |
1259 | (void) closedir(dirp); | |
1260 | dirp = NULL; | |
1261 | } | |
1262 | ||
428870ff | 1263 | ret = get_configs(hdl, &pools, iarg->can_be_active); |
34dc7c2f BB |
1264 | |
1265 | error: | |
1266 | for (pe = pools.pools; pe != NULL; pe = penext) { | |
1267 | penext = pe->pe_next; | |
1268 | for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { | |
1269 | venext = ve->ve_next; | |
1270 | for (ce = ve->ve_configs; ce != NULL; ce = cenext) { | |
1271 | cenext = ce->ce_next; | |
1272 | if (ce->ce_config) | |
1273 | nvlist_free(ce->ce_config); | |
1274 | free(ce); | |
1275 | } | |
1276 | free(ve); | |
1277 | } | |
1278 | free(pe); | |
1279 | } | |
1280 | ||
1281 | for (ne = pools.names; ne != NULL; ne = nenext) { | |
1282 | nenext = ne->ne_next; | |
1283 | if (ne->ne_name) | |
1284 | free(ne->ne_name); | |
1285 | free(ne); | |
1286 | } | |
1287 | ||
1288 | if (dirp) | |
1289 | (void) closedir(dirp); | |
1290 | ||
1291 | return (ret); | |
1292 | } | |
1293 | ||
b128c09f BB |
1294 | nvlist_t * |
1295 | zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) | |
1296 | { | |
428870ff | 1297 | importargs_t iarg = { 0 }; |
b128c09f | 1298 | |
428870ff BB |
1299 | iarg.paths = argc; |
1300 | iarg.path = argv; | |
b128c09f | 1301 | |
428870ff | 1302 | return (zpool_find_import_impl(hdl, &iarg)); |
b128c09f BB |
1303 | } |
1304 | ||
34dc7c2f BB |
1305 | /* |
1306 | * Given a cache file, return the contents as a list of importable pools. | |
b128c09f BB |
1307 | * poolname or guid (but not both) are provided by the caller when trying |
1308 | * to import a specific pool. | |
34dc7c2f BB |
1309 | */ |
1310 | nvlist_t * | |
1311 | zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, | |
b128c09f | 1312 | char *poolname, uint64_t guid) |
34dc7c2f BB |
1313 | { |
1314 | char *buf; | |
1315 | int fd; | |
1316 | struct stat64 statbuf; | |
1317 | nvlist_t *raw, *src, *dst; | |
1318 | nvlist_t *pools; | |
1319 | nvpair_t *elem; | |
1320 | char *name; | |
b128c09f | 1321 | uint64_t this_guid; |
34dc7c2f BB |
1322 | boolean_t active; |
1323 | ||
b128c09f BB |
1324 | verify(poolname == NULL || guid == 0); |
1325 | ||
34dc7c2f BB |
1326 | if ((fd = open(cachefile, O_RDONLY)) < 0) { |
1327 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1328 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1329 | dgettext(TEXT_DOMAIN, "failed to open cache file")); | |
1330 | return (NULL); | |
1331 | } | |
1332 | ||
1333 | if (fstat64(fd, &statbuf) != 0) { | |
1334 | zfs_error_aux(hdl, "%s", strerror(errno)); | |
1335 | (void) close(fd); | |
1336 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1337 | dgettext(TEXT_DOMAIN, "failed to get size of cache file")); | |
1338 | return (NULL); | |
1339 | } | |
1340 | ||
1341 | if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { | |
1342 | (void) close(fd); | |
1343 | return (NULL); | |
1344 | } | |
1345 | ||
1346 | if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { | |
1347 | (void) close(fd); | |
1348 | free(buf); | |
1349 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1350 | dgettext(TEXT_DOMAIN, | |
1351 | "failed to read cache file contents")); | |
1352 | return (NULL); | |
1353 | } | |
1354 | ||
1355 | (void) close(fd); | |
1356 | ||
1357 | if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { | |
1358 | free(buf); | |
1359 | (void) zfs_error(hdl, EZFS_BADCACHE, | |
1360 | dgettext(TEXT_DOMAIN, | |
1361 | "invalid or corrupt cache file contents")); | |
1362 | return (NULL); | |
1363 | } | |
1364 | ||
1365 | free(buf); | |
1366 | ||
1367 | /* | |
1368 | * Go through and get the current state of the pools and refresh their | |
1369 | * state. | |
1370 | */ | |
1371 | if (nvlist_alloc(&pools, 0, 0) != 0) { | |
1372 | (void) no_memory(hdl); | |
1373 | nvlist_free(raw); | |
1374 | return (NULL); | |
1375 | } | |
1376 | ||
1377 | elem = NULL; | |
1378 | while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { | |
1379 | verify(nvpair_value_nvlist(elem, &src) == 0); | |
1380 | ||
1381 | verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, | |
1382 | &name) == 0); | |
b128c09f BB |
1383 | if (poolname != NULL && strcmp(poolname, name) != 0) |
1384 | continue; | |
1385 | ||
34dc7c2f | 1386 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, |
b128c09f BB |
1387 | &this_guid) == 0); |
1388 | if (guid != 0) { | |
1389 | verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, | |
1390 | &this_guid) == 0); | |
1391 | if (guid != this_guid) | |
1392 | continue; | |
1393 | } | |
34dc7c2f | 1394 | |
b128c09f BB |
1395 | if (pool_active(hdl, name, this_guid, &active) != 0) { |
1396 | nvlist_free(raw); | |
1397 | nvlist_free(pools); | |
1398 | return (NULL); | |
1399 | } | |
34dc7c2f | 1400 | |
b128c09f BB |
1401 | if (active) |
1402 | continue; | |
34dc7c2f | 1403 | |
b128c09f BB |
1404 | if ((dst = refresh_config(hdl, src)) == NULL) { |
1405 | nvlist_free(raw); | |
1406 | nvlist_free(pools); | |
1407 | return (NULL); | |
1408 | } | |
34dc7c2f | 1409 | |
b128c09f BB |
1410 | if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { |
1411 | (void) no_memory(hdl); | |
34dc7c2f | 1412 | nvlist_free(dst); |
b128c09f BB |
1413 | nvlist_free(raw); |
1414 | nvlist_free(pools); | |
1415 | return (NULL); | |
34dc7c2f | 1416 | } |
b128c09f | 1417 | nvlist_free(dst); |
34dc7c2f BB |
1418 | } |
1419 | ||
1420 | nvlist_free(raw); | |
1421 | return (pools); | |
1422 | } | |
1423 | ||
428870ff BB |
1424 | static int |
1425 | name_or_guid_exists(zpool_handle_t *zhp, void *data) | |
1426 | { | |
1427 | importargs_t *import = data; | |
1428 | int found = 0; | |
1429 | ||
1430 | if (import->poolname != NULL) { | |
1431 | char *pool_name; | |
1432 | ||
1433 | verify(nvlist_lookup_string(zhp->zpool_config, | |
1434 | ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0); | |
1435 | if (strcmp(pool_name, import->poolname) == 0) | |
1436 | found = 1; | |
1437 | } else { | |
1438 | uint64_t pool_guid; | |
1439 | ||
1440 | verify(nvlist_lookup_uint64(zhp->zpool_config, | |
1441 | ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0); | |
1442 | if (pool_guid == import->guid) | |
1443 | found = 1; | |
1444 | } | |
1445 | ||
1446 | zpool_close(zhp); | |
1447 | return (found); | |
1448 | } | |
1449 | ||
1450 | nvlist_t * | |
1451 | zpool_search_import(libzfs_handle_t *hdl, importargs_t *import) | |
1452 | { | |
1453 | verify(import->poolname == NULL || import->guid == 0); | |
1454 | ||
1455 | if (import->unique) | |
1456 | import->exists = zpool_iter(hdl, name_or_guid_exists, import); | |
1457 | ||
1458 | if (import->cachefile != NULL) | |
1459 | return (zpool_find_import_cached(hdl, import->cachefile, | |
1460 | import->poolname, import->guid)); | |
1461 | ||
1462 | return (zpool_find_import_impl(hdl, import)); | |
1463 | } | |
34dc7c2f BB |
1464 | |
1465 | boolean_t | |
1466 | find_guid(nvlist_t *nv, uint64_t guid) | |
1467 | { | |
1468 | uint64_t tmp; | |
1469 | nvlist_t **child; | |
1470 | uint_t c, children; | |
1471 | ||
1472 | verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); | |
1473 | if (tmp == guid) | |
1474 | return (B_TRUE); | |
1475 | ||
1476 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
1477 | &child, &children) == 0) { | |
1478 | for (c = 0; c < children; c++) | |
1479 | if (find_guid(child[c], guid)) | |
1480 | return (B_TRUE); | |
1481 | } | |
1482 | ||
1483 | return (B_FALSE); | |
1484 | } | |
1485 | ||
1486 | typedef struct aux_cbdata { | |
1487 | const char *cb_type; | |
1488 | uint64_t cb_guid; | |
1489 | zpool_handle_t *cb_zhp; | |
1490 | } aux_cbdata_t; | |
1491 | ||
1492 | static int | |
1493 | find_aux(zpool_handle_t *zhp, void *data) | |
1494 | { | |
1495 | aux_cbdata_t *cbp = data; | |
1496 | nvlist_t **list; | |
1497 | uint_t i, count; | |
1498 | uint64_t guid; | |
1499 | nvlist_t *nvroot; | |
1500 | ||
1501 | verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, | |
1502 | &nvroot) == 0); | |
1503 | ||
1504 | if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, | |
1505 | &list, &count) == 0) { | |
1506 | for (i = 0; i < count; i++) { | |
1507 | verify(nvlist_lookup_uint64(list[i], | |
1508 | ZPOOL_CONFIG_GUID, &guid) == 0); | |
1509 | if (guid == cbp->cb_guid) { | |
1510 | cbp->cb_zhp = zhp; | |
1511 | return (1); | |
1512 | } | |
1513 | } | |
1514 | } | |
1515 | ||
1516 | zpool_close(zhp); | |
1517 | return (0); | |
1518 | } | |
1519 | ||
1520 | /* | |
1521 | * Determines if the pool is in use. If so, it returns true and the state of | |
1522 | * the pool as well as the name of the pool. Both strings are allocated and | |
1523 | * must be freed by the caller. | |
1524 | */ | |
1525 | int | |
1526 | zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, | |
1527 | boolean_t *inuse) | |
1528 | { | |
1529 | nvlist_t *config; | |
1530 | char *name; | |
1531 | boolean_t ret; | |
1532 | uint64_t guid, vdev_guid; | |
1533 | zpool_handle_t *zhp; | |
1534 | nvlist_t *pool_config; | |
1535 | uint64_t stateval, isspare; | |
1536 | aux_cbdata_t cb = { 0 }; | |
1537 | boolean_t isactive; | |
1538 | ||
1539 | *inuse = B_FALSE; | |
1540 | ||
1541 | if (zpool_read_label(fd, &config) != 0) { | |
1542 | (void) no_memory(hdl); | |
1543 | return (-1); | |
1544 | } | |
1545 | ||
1546 | if (config == NULL) | |
1547 | return (0); | |
1548 | ||
1549 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, | |
1550 | &stateval) == 0); | |
1551 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, | |
1552 | &vdev_guid) == 0); | |
1553 | ||
1554 | if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { | |
1555 | verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, | |
1556 | &name) == 0); | |
1557 | verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, | |
1558 | &guid) == 0); | |
1559 | } | |
1560 | ||
1561 | switch (stateval) { | |
1562 | case POOL_STATE_EXPORTED: | |
572e2857 BB |
1563 | /* |
1564 | * A pool with an exported state may in fact be imported | |
1565 | * read-only, so check the in-core state to see if it's | |
1566 | * active and imported read-only. If it is, set | |
1567 | * its state to active. | |
1568 | */ | |
1569 | if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && | |
1570 | (zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1571 | zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) | |
1572 | stateval = POOL_STATE_ACTIVE; | |
1573 | ||
34dc7c2f BB |
1574 | ret = B_TRUE; |
1575 | break; | |
1576 | ||
1577 | case POOL_STATE_ACTIVE: | |
1578 | /* | |
1579 | * For an active pool, we have to determine if it's really part | |
1580 | * of a currently active pool (in which case the pool will exist | |
1581 | * and the guid will be the same), or whether it's part of an | |
1582 | * active pool that was disconnected without being explicitly | |
1583 | * exported. | |
1584 | */ | |
1585 | if (pool_active(hdl, name, guid, &isactive) != 0) { | |
1586 | nvlist_free(config); | |
1587 | return (-1); | |
1588 | } | |
1589 | ||
1590 | if (isactive) { | |
1591 | /* | |
1592 | * Because the device may have been removed while | |
1593 | * offlined, we only report it as active if the vdev is | |
1594 | * still present in the config. Otherwise, pretend like | |
1595 | * it's not in use. | |
1596 | */ | |
1597 | if ((zhp = zpool_open_canfail(hdl, name)) != NULL && | |
1598 | (pool_config = zpool_get_config(zhp, NULL)) | |
1599 | != NULL) { | |
1600 | nvlist_t *nvroot; | |
1601 | ||
1602 | verify(nvlist_lookup_nvlist(pool_config, | |
1603 | ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); | |
1604 | ret = find_guid(nvroot, vdev_guid); | |
1605 | } else { | |
1606 | ret = B_FALSE; | |
1607 | } | |
1608 | ||
1609 | /* | |
1610 | * If this is an active spare within another pool, we | |
1611 | * treat it like an unused hot spare. This allows the | |
1612 | * user to create a pool with a hot spare that currently | |
1613 | * in use within another pool. Since we return B_TRUE, | |
1614 | * libdiskmgt will continue to prevent generic consumers | |
1615 | * from using the device. | |
1616 | */ | |
1617 | if (ret && nvlist_lookup_uint64(config, | |
1618 | ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) | |
1619 | stateval = POOL_STATE_SPARE; | |
1620 | ||
1621 | if (zhp != NULL) | |
1622 | zpool_close(zhp); | |
1623 | } else { | |
1624 | stateval = POOL_STATE_POTENTIALLY_ACTIVE; | |
1625 | ret = B_TRUE; | |
1626 | } | |
1627 | break; | |
1628 | ||
1629 | case POOL_STATE_SPARE: | |
1630 | /* | |
1631 | * For a hot spare, it can be either definitively in use, or | |
1632 | * potentially active. To determine if it's in use, we iterate | |
1633 | * over all pools in the system and search for one with a spare | |
1634 | * with a matching guid. | |
1635 | * | |
1636 | * Due to the shared nature of spares, we don't actually report | |
1637 | * the potentially active case as in use. This means the user | |
1638 | * can freely create pools on the hot spares of exported pools, | |
1639 | * but to do otherwise makes the resulting code complicated, and | |
1640 | * we end up having to deal with this case anyway. | |
1641 | */ | |
1642 | cb.cb_zhp = NULL; | |
1643 | cb.cb_guid = vdev_guid; | |
1644 | cb.cb_type = ZPOOL_CONFIG_SPARES; | |
1645 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1646 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1647 | ret = TRUE; | |
1648 | } else { | |
1649 | ret = FALSE; | |
1650 | } | |
1651 | break; | |
1652 | ||
1653 | case POOL_STATE_L2CACHE: | |
1654 | ||
1655 | /* | |
1656 | * Check if any pool is currently using this l2cache device. | |
1657 | */ | |
1658 | cb.cb_zhp = NULL; | |
1659 | cb.cb_guid = vdev_guid; | |
1660 | cb.cb_type = ZPOOL_CONFIG_L2CACHE; | |
1661 | if (zpool_iter(hdl, find_aux, &cb) == 1) { | |
1662 | name = (char *)zpool_get_name(cb.cb_zhp); | |
1663 | ret = TRUE; | |
1664 | } else { | |
1665 | ret = FALSE; | |
1666 | } | |
1667 | break; | |
1668 | ||
1669 | default: | |
1670 | ret = B_FALSE; | |
1671 | } | |
1672 | ||
1673 | ||
1674 | if (ret) { | |
1675 | if ((*namestr = zfs_strdup(hdl, name)) == NULL) { | |
1676 | if (cb.cb_zhp) | |
1677 | zpool_close(cb.cb_zhp); | |
1678 | nvlist_free(config); | |
1679 | return (-1); | |
1680 | } | |
1681 | *state = (pool_state_t)stateval; | |
1682 | } | |
1683 | ||
1684 | if (cb.cb_zhp) | |
1685 | zpool_close(cb.cb_zhp); | |
1686 | ||
1687 | nvlist_free(config); | |
1688 | *inuse = ret; | |
1689 | return (0); | |
1690 | } |