]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Add zfs_nicebytes() to print human-readable sizes
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 */
28
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <zone.h>
40 #include <sys/stat.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53
54 typedef struct prop_flags {
55 int create:1; /* Validate property on creation */
56 int import:1; /* Validate property on import */
57 } prop_flags_t;
58
59 /*
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
63 */
64
65 static int
66 zpool_get_all_props(zpool_handle_t *zhp)
67 {
68 zfs_cmd_t zc = {"\0"};
69 libzfs_handle_t *hdl = zhp->zpool_hdl;
70
71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72
73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 return (-1);
75
76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 if (errno == ENOMEM) {
78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 zcmd_free_nvlists(&zc);
80 return (-1);
81 }
82 } else {
83 zcmd_free_nvlists(&zc);
84 return (-1);
85 }
86 }
87
88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 zcmd_free_nvlists(&zc);
90 return (-1);
91 }
92
93 zcmd_free_nvlists(&zc);
94
95 return (0);
96 }
97
98 static int
99 zpool_props_refresh(zpool_handle_t *zhp)
100 {
101 nvlist_t *old_props;
102
103 old_props = zhp->zpool_props;
104
105 if (zpool_get_all_props(zhp) != 0)
106 return (-1);
107
108 nvlist_free(old_props);
109 return (0);
110 }
111
112 static char *
113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 zprop_source_t *src)
115 {
116 nvlist_t *nv, *nvl;
117 uint64_t ival;
118 char *value;
119 zprop_source_t source;
120
121 nvl = zhp->zpool_props;
122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 source = ival;
125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 } else {
127 source = ZPROP_SRC_DEFAULT;
128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 value = "-";
130 }
131
132 if (src)
133 *src = source;
134
135 return (value);
136 }
137
138 uint64_t
139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 {
141 nvlist_t *nv, *nvl;
142 uint64_t value;
143 zprop_source_t source;
144
145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 /*
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
150 */
151 if ((prop == ZPOOL_PROP_GUID) &&
152 (nvlist_lookup_nvlist(zhp->zpool_config,
153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 == 0)) {
156 return (value);
157 }
158 return (zpool_prop_default_numeric(prop));
159 }
160
161 nvl = zhp->zpool_props;
162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 source = value;
165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 } else {
167 source = ZPROP_SRC_DEFAULT;
168 value = zpool_prop_default_numeric(prop);
169 }
170
171 if (src)
172 *src = source;
173
174 return (value);
175 }
176
177 /*
178 * Map VDEV STATE to printed strings.
179 */
180 char *
181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 {
183 switch (state) {
184 case VDEV_STATE_CLOSED:
185 case VDEV_STATE_OFFLINE:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN:
190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 return (gettext("FAULTED"));
192 else if (aux == VDEV_AUX_SPLIT_POOL)
193 return (gettext("SPLIT"));
194 else
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY:
201 return (gettext("ONLINE"));
202
203 default:
204 break;
205 }
206
207 return (gettext("UNKNOWN"));
208 }
209
210 /*
211 * Map POOL STATE to printed strings.
212 */
213 const char *
214 zpool_pool_state_to_name(pool_state_t state)
215 {
216 switch (state) {
217 default:
218 break;
219 case POOL_STATE_ACTIVE:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE:
234 return (gettext("POTENTIALLY_ACTIVE"));
235 }
236
237 return (gettext("UNKNOWN"));
238 }
239
240 /*
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
243 */
244 int
245 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
246 size_t len, zprop_source_t *srctype, boolean_t literal)
247 {
248 uint64_t intval;
249 const char *strval;
250 zprop_source_t src = ZPROP_SRC_NONE;
251 nvlist_t *nvroot;
252 vdev_stat_t *vs;
253 uint_t vsc;
254
255 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 switch (prop) {
257 case ZPOOL_PROP_NAME:
258 (void) strlcpy(buf, zpool_get_name(zhp), len);
259 break;
260
261 case ZPOOL_PROP_HEALTH:
262 (void) strlcpy(buf, "FAULTED", len);
263 break;
264
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
268 break;
269
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
275 (void) strlcpy(buf,
276 zpool_get_prop_string(zhp, prop, &src),
277 len);
278 break;
279 }
280 /* FALLTHROUGH */
281 default:
282 (void) strlcpy(buf, "-", len);
283 break;
284 }
285
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
293 return (-1);
294
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 len);
299 break;
300
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
303
304 switch (prop) {
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
310 case ZPOOL_PROP_ASHIFT:
311 if (literal)
312 (void) snprintf(buf, len, "%llu",
313 (u_longlong_t)intval);
314 else
315 (void) zfs_nicenum(intval, buf, len);
316 break;
317
318 case ZPOOL_PROP_EXPANDSZ:
319 if (intval == 0) {
320 (void) strlcpy(buf, "-", len);
321 } else if (literal) {
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 } else {
325 (void) zfs_nicebytes(intval, buf, len);
326 }
327 break;
328
329 case ZPOOL_PROP_CAPACITY:
330 if (literal) {
331 (void) snprintf(buf, len, "%llu",
332 (u_longlong_t)intval);
333 } else {
334 (void) snprintf(buf, len, "%llu%%",
335 (u_longlong_t)intval);
336 }
337 break;
338
339 case ZPOOL_PROP_FRAGMENTATION:
340 if (intval == UINT64_MAX) {
341 (void) strlcpy(buf, "-", len);
342 } else if (literal) {
343 (void) snprintf(buf, len, "%llu",
344 (u_longlong_t)intval);
345 } else {
346 (void) snprintf(buf, len, "%llu%%",
347 (u_longlong_t)intval);
348 }
349 break;
350
351 case ZPOOL_PROP_DEDUPRATIO:
352 if (literal)
353 (void) snprintf(buf, len, "%llu.%02llu",
354 (u_longlong_t)(intval / 100),
355 (u_longlong_t)(intval % 100));
356 else
357 (void) snprintf(buf, len, "%llu.%02llux",
358 (u_longlong_t)(intval / 100),
359 (u_longlong_t)(intval % 100));
360 break;
361
362 case ZPOOL_PROP_HEALTH:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
364 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot,
366 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
367 == 0);
368
369 (void) strlcpy(buf, zpool_state_to_name(intval,
370 vs->vs_aux), len);
371 break;
372 case ZPOOL_PROP_VERSION:
373 if (intval >= SPA_VERSION_FEATURES) {
374 (void) snprintf(buf, len, "-");
375 break;
376 }
377 /* FALLTHROUGH */
378 default:
379 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
380 }
381 break;
382
383 case PROP_TYPE_INDEX:
384 intval = zpool_get_prop_int(zhp, prop, &src);
385 if (zpool_prop_index_to_string(prop, intval, &strval)
386 != 0)
387 return (-1);
388 (void) strlcpy(buf, strval, len);
389 break;
390
391 default:
392 abort();
393 }
394
395 if (srctype)
396 *srctype = src;
397
398 return (0);
399 }
400
401 /*
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
404 */
405 static boolean_t
406 bootfs_name_valid(const char *pool, char *bootfs)
407 {
408 int len = strlen(pool);
409
410 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
411 return (B_FALSE);
412
413 if (strncmp(pool, bootfs, len) == 0 &&
414 (bootfs[len] == '/' || bootfs[len] == '\0'))
415 return (B_TRUE);
416
417 return (B_FALSE);
418 }
419
420 boolean_t
421 zpool_is_bootable(zpool_handle_t *zhp)
422 {
423 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
424
425 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
426 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
427 sizeof (bootfs)) != 0);
428 }
429
430
431 /*
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
435 */
436 static nvlist_t *
437 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
438 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
439 {
440 nvpair_t *elem;
441 nvlist_t *retprops;
442 zpool_prop_t prop;
443 char *strval;
444 uint64_t intval;
445 char *slash, *check;
446 struct stat64 statbuf;
447 zpool_handle_t *zhp;
448
449 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
450 (void) no_memory(hdl);
451 return (NULL);
452 }
453
454 elem = NULL;
455 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
456 const char *propname = nvpair_name(elem);
457
458 prop = zpool_name_to_prop(propname);
459 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
460 int err;
461 char *fname = strchr(propname, '@') + 1;
462
463 err = zfeature_lookup_name(fname, NULL);
464 if (err != 0) {
465 ASSERT3U(err, ==, ENOENT);
466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
467 "invalid feature '%s'"), fname);
468 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
469 goto error;
470 }
471
472 if (nvpair_type(elem) != DATA_TYPE_STRING) {
473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
474 "'%s' must be a string"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
476 goto error;
477 }
478
479 (void) nvpair_value_string(elem, &strval);
480 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
481 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486 goto error;
487 }
488
489 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
490 (void) no_memory(hdl);
491 goto error;
492 }
493 continue;
494 }
495
496 /*
497 * Make sure this property is valid and applies to this type.
498 */
499 if (prop == ZPROP_INVAL) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "invalid property '%s'"), propname);
502 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503 goto error;
504 }
505
506 if (zpool_prop_readonly(prop)) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
508 "is readonly"), propname);
509 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
510 goto error;
511 }
512
513 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
514 &strval, &intval, errbuf) != 0)
515 goto error;
516
517 /*
518 * Perform additional checking for specific properties.
519 */
520 switch (prop) {
521 case ZPOOL_PROP_VERSION:
522 if (intval < version ||
523 !SPA_VERSION_IS_SUPPORTED(intval)) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' number %d is invalid."),
526 propname, intval);
527 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
528 goto error;
529 }
530 break;
531
532 case ZPOOL_PROP_ASHIFT:
533 if (!flags.create) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' can only be set at "
536 "creation time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 goto error;
539 }
540
541 if (intval != 0 &&
542 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
543 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
544 "invalid '%s=%d' property: only values "
545 "between %" PRId32 " and %" PRId32 " "
546 "are allowed.\n"),
547 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
548 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
549 goto error;
550 }
551 break;
552
553 case ZPOOL_PROP_BOOTFS:
554 if (flags.create || flags.import) {
555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
556 "property '%s' cannot be set at creation "
557 "or import time"), propname);
558 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
559 goto error;
560 }
561
562 if (version < SPA_VERSION_BOOTFS) {
563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
564 "pool must be upgraded to support "
565 "'%s' property"), propname);
566 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
567 goto error;
568 }
569
570 /*
571 * bootfs property value has to be a dataset name and
572 * the dataset has to be in the same pool as it sets to.
573 */
574 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
575 strval)) {
576 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
577 "is an invalid name"), strval);
578 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
579 goto error;
580 }
581
582 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "could not open pool '%s'"), poolname);
585 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
586 goto error;
587 }
588 zpool_close(zhp);
589 break;
590
591 case ZPOOL_PROP_ALTROOT:
592 if (!flags.create && !flags.import) {
593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
594 "property '%s' can only be set during pool "
595 "creation or import"), propname);
596 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
597 goto error;
598 }
599
600 if (strval[0] != '/') {
601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
602 "bad alternate root '%s'"), strval);
603 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
604 goto error;
605 }
606 break;
607
608 case ZPOOL_PROP_CACHEFILE:
609 if (strval[0] == '\0')
610 break;
611
612 if (strcmp(strval, "none") == 0)
613 break;
614
615 if (strval[0] != '/') {
616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
617 "property '%s' must be empty, an "
618 "absolute path, or 'none'"), propname);
619 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
620 goto error;
621 }
622
623 slash = strrchr(strval, '/');
624
625 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
626 strcmp(slash, "/..") == 0) {
627 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
628 "'%s' is not a valid file"), strval);
629 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
630 goto error;
631 }
632
633 *slash = '\0';
634
635 if (strval[0] != '\0' &&
636 (stat64(strval, &statbuf) != 0 ||
637 !S_ISDIR(statbuf.st_mode))) {
638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
639 "'%s' is not a valid directory"),
640 strval);
641 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
642 goto error;
643 }
644
645 *slash = '/';
646 break;
647
648 case ZPOOL_PROP_COMMENT:
649 for (check = strval; *check != '\0'; check++) {
650 if (!isprint(*check)) {
651 zfs_error_aux(hdl,
652 dgettext(TEXT_DOMAIN,
653 "comment may only have printable "
654 "characters"));
655 (void) zfs_error(hdl, EZFS_BADPROP,
656 errbuf);
657 goto error;
658 }
659 }
660 if (strlen(strval) > ZPROP_MAX_COMMENT) {
661 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
662 "comment must not exceed %d characters"),
663 ZPROP_MAX_COMMENT);
664 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
665 goto error;
666 }
667 break;
668 case ZPOOL_PROP_READONLY:
669 if (!flags.import) {
670 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
671 "property '%s' can only be set at "
672 "import time"), propname);
673 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
674 goto error;
675 }
676 break;
677 case ZPOOL_PROP_TNAME:
678 if (!flags.create) {
679 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
680 "property '%s' can only be set at "
681 "creation time"), propname);
682 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
683 goto error;
684 }
685 break;
686
687 default:
688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
689 "property '%s'(%d) not defined"), propname, prop);
690 break;
691 }
692 }
693
694 return (retprops);
695 error:
696 nvlist_free(retprops);
697 return (NULL);
698 }
699
700 /*
701 * Set zpool property : propname=propval.
702 */
703 int
704 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
705 {
706 zfs_cmd_t zc = {"\0"};
707 int ret = -1;
708 char errbuf[1024];
709 nvlist_t *nvl = NULL;
710 nvlist_t *realprops;
711 uint64_t version;
712 prop_flags_t flags = { 0 };
713
714 (void) snprintf(errbuf, sizeof (errbuf),
715 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
716 zhp->zpool_name);
717
718 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
719 return (no_memory(zhp->zpool_hdl));
720
721 if (nvlist_add_string(nvl, propname, propval) != 0) {
722 nvlist_free(nvl);
723 return (no_memory(zhp->zpool_hdl));
724 }
725
726 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
727 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
728 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
729 nvlist_free(nvl);
730 return (-1);
731 }
732
733 nvlist_free(nvl);
734 nvl = realprops;
735
736 /*
737 * Execute the corresponding ioctl() to set this property.
738 */
739 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
740
741 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
742 nvlist_free(nvl);
743 return (-1);
744 }
745
746 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
747
748 zcmd_free_nvlists(&zc);
749 nvlist_free(nvl);
750
751 if (ret)
752 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
753 else
754 (void) zpool_props_refresh(zhp);
755
756 return (ret);
757 }
758
759 int
760 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
761 {
762 libzfs_handle_t *hdl = zhp->zpool_hdl;
763 zprop_list_t *entry;
764 char buf[ZFS_MAXPROPLEN];
765 nvlist_t *features = NULL;
766 nvpair_t *nvp;
767 zprop_list_t **last;
768 boolean_t firstexpand = (NULL == *plp);
769 int i;
770
771 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
772 return (-1);
773
774 last = plp;
775 while (*last != NULL)
776 last = &(*last)->pl_next;
777
778 if ((*plp)->pl_all)
779 features = zpool_get_features(zhp);
780
781 if ((*plp)->pl_all && firstexpand) {
782 for (i = 0; i < SPA_FEATURES; i++) {
783 zprop_list_t *entry = zfs_alloc(hdl,
784 sizeof (zprop_list_t));
785 entry->pl_prop = ZPROP_INVAL;
786 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
787 spa_feature_table[i].fi_uname);
788 entry->pl_width = strlen(entry->pl_user_prop);
789 entry->pl_all = B_TRUE;
790
791 *last = entry;
792 last = &entry->pl_next;
793 }
794 }
795
796 /* add any unsupported features */
797 for (nvp = nvlist_next_nvpair(features, NULL);
798 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
799 char *propname;
800 boolean_t found;
801 zprop_list_t *entry;
802
803 if (zfeature_is_supported(nvpair_name(nvp)))
804 continue;
805
806 propname = zfs_asprintf(hdl, "unsupported@%s",
807 nvpair_name(nvp));
808
809 /*
810 * Before adding the property to the list make sure that no
811 * other pool already added the same property.
812 */
813 found = B_FALSE;
814 entry = *plp;
815 while (entry != NULL) {
816 if (entry->pl_user_prop != NULL &&
817 strcmp(propname, entry->pl_user_prop) == 0) {
818 found = B_TRUE;
819 break;
820 }
821 entry = entry->pl_next;
822 }
823 if (found) {
824 free(propname);
825 continue;
826 }
827
828 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
829 entry->pl_prop = ZPROP_INVAL;
830 entry->pl_user_prop = propname;
831 entry->pl_width = strlen(entry->pl_user_prop);
832 entry->pl_all = B_TRUE;
833
834 *last = entry;
835 last = &entry->pl_next;
836 }
837
838 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
839
840 if (entry->pl_fixed)
841 continue;
842
843 if (entry->pl_prop != ZPROP_INVAL &&
844 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
845 NULL, B_FALSE) == 0) {
846 if (strlen(buf) > entry->pl_width)
847 entry->pl_width = strlen(buf);
848 }
849 }
850
851 return (0);
852 }
853
854 /*
855 * Get the state for the given feature on the given ZFS pool.
856 */
857 int
858 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
859 size_t len)
860 {
861 uint64_t refcount;
862 boolean_t found = B_FALSE;
863 nvlist_t *features = zpool_get_features(zhp);
864 boolean_t supported;
865 const char *feature = strchr(propname, '@') + 1;
866
867 supported = zpool_prop_feature(propname);
868 ASSERT(supported || zpool_prop_unsupported(propname));
869
870 /*
871 * Convert from feature name to feature guid. This conversion is
872 * unnecessary for unsupported@... properties because they already
873 * use guids.
874 */
875 if (supported) {
876 int ret;
877 spa_feature_t fid;
878
879 ret = zfeature_lookup_name(feature, &fid);
880 if (ret != 0) {
881 (void) strlcpy(buf, "-", len);
882 return (ENOTSUP);
883 }
884 feature = spa_feature_table[fid].fi_guid;
885 }
886
887 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
888 found = B_TRUE;
889
890 if (supported) {
891 if (!found) {
892 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
893 } else {
894 if (refcount == 0)
895 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
896 else
897 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
898 }
899 } else {
900 if (found) {
901 if (refcount == 0) {
902 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
903 } else {
904 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
905 }
906 } else {
907 (void) strlcpy(buf, "-", len);
908 return (ENOTSUP);
909 }
910 }
911
912 return (0);
913 }
914
915 /*
916 * Don't start the slice at the default block of 34; many storage
917 * devices will use a stripe width of 128k, other vendors prefer a 1m
918 * alignment. It is best to play it safe and ensure a 1m alignment
919 * given 512B blocks. When the block size is larger by a power of 2
920 * we will still be 1m aligned. Some devices are sensitive to the
921 * partition ending alignment as well.
922 */
923 #define NEW_START_BLOCK 2048
924 #define PARTITION_END_ALIGNMENT 2048
925
926 /*
927 * Validate the given pool name, optionally putting an extended error message in
928 * 'buf'.
929 */
930 boolean_t
931 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
932 {
933 namecheck_err_t why;
934 char what;
935 int ret;
936
937 ret = pool_namecheck(pool, &why, &what);
938
939 /*
940 * The rules for reserved pool names were extended at a later point.
941 * But we need to support users with existing pools that may now be
942 * invalid. So we only check for this expanded set of names during a
943 * create (or import), and only in userland.
944 */
945 if (ret == 0 && !isopen &&
946 (strncmp(pool, "mirror", 6) == 0 ||
947 strncmp(pool, "raidz", 5) == 0 ||
948 strncmp(pool, "spare", 5) == 0 ||
949 strcmp(pool, "log") == 0)) {
950 if (hdl != NULL)
951 zfs_error_aux(hdl,
952 dgettext(TEXT_DOMAIN, "name is reserved"));
953 return (B_FALSE);
954 }
955
956
957 if (ret != 0) {
958 if (hdl != NULL) {
959 switch (why) {
960 case NAME_ERR_TOOLONG:
961 zfs_error_aux(hdl,
962 dgettext(TEXT_DOMAIN, "name is too long"));
963 break;
964
965 case NAME_ERR_INVALCHAR:
966 zfs_error_aux(hdl,
967 dgettext(TEXT_DOMAIN, "invalid character "
968 "'%c' in pool name"), what);
969 break;
970
971 case NAME_ERR_NOLETTER:
972 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
973 "name must begin with a letter"));
974 break;
975
976 case NAME_ERR_RESERVED:
977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
978 "name is reserved"));
979 break;
980
981 case NAME_ERR_DISKLIKE:
982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 "pool name is reserved"));
984 break;
985
986 case NAME_ERR_LEADING_SLASH:
987 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
988 "leading slash in name"));
989 break;
990
991 case NAME_ERR_EMPTY_COMPONENT:
992 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
993 "empty component in name"));
994 break;
995
996 case NAME_ERR_TRAILING_SLASH:
997 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998 "trailing slash in name"));
999 break;
1000
1001 case NAME_ERR_MULTIPLE_DELIMITERS:
1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1003 "multiple '@' and/or '#' delimiters in "
1004 "name"));
1005 break;
1006
1007 case NAME_ERR_NO_AT:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "permission set is missing '@'"));
1010 break;
1011
1012 default:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "(%d) not defined"), why);
1015 break;
1016 }
1017 }
1018 return (B_FALSE);
1019 }
1020
1021 return (B_TRUE);
1022 }
1023
1024 /*
1025 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1026 * state.
1027 */
1028 zpool_handle_t *
1029 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1030 {
1031 zpool_handle_t *zhp;
1032 boolean_t missing;
1033
1034 /*
1035 * Make sure the pool name is valid.
1036 */
1037 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1038 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1039 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1040 pool);
1041 return (NULL);
1042 }
1043
1044 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1045 return (NULL);
1046
1047 zhp->zpool_hdl = hdl;
1048 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1049
1050 if (zpool_refresh_stats(zhp, &missing) != 0) {
1051 zpool_close(zhp);
1052 return (NULL);
1053 }
1054
1055 if (missing) {
1056 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1057 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1058 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1059 zpool_close(zhp);
1060 return (NULL);
1061 }
1062
1063 return (zhp);
1064 }
1065
1066 /*
1067 * Like the above, but silent on error. Used when iterating over pools (because
1068 * the configuration cache may be out of date).
1069 */
1070 int
1071 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1072 {
1073 zpool_handle_t *zhp;
1074 boolean_t missing;
1075
1076 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1077 return (-1);
1078
1079 zhp->zpool_hdl = hdl;
1080 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1081
1082 if (zpool_refresh_stats(zhp, &missing) != 0) {
1083 zpool_close(zhp);
1084 return (-1);
1085 }
1086
1087 if (missing) {
1088 zpool_close(zhp);
1089 *ret = NULL;
1090 return (0);
1091 }
1092
1093 *ret = zhp;
1094 return (0);
1095 }
1096
1097 /*
1098 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1099 * state.
1100 */
1101 zpool_handle_t *
1102 zpool_open(libzfs_handle_t *hdl, const char *pool)
1103 {
1104 zpool_handle_t *zhp;
1105
1106 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1107 return (NULL);
1108
1109 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1110 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1111 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1112 zpool_close(zhp);
1113 return (NULL);
1114 }
1115
1116 return (zhp);
1117 }
1118
1119 /*
1120 * Close the handle. Simply frees the memory associated with the handle.
1121 */
1122 void
1123 zpool_close(zpool_handle_t *zhp)
1124 {
1125 nvlist_free(zhp->zpool_config);
1126 nvlist_free(zhp->zpool_old_config);
1127 nvlist_free(zhp->zpool_props);
1128 free(zhp);
1129 }
1130
1131 /*
1132 * Return the name of the pool.
1133 */
1134 const char *
1135 zpool_get_name(zpool_handle_t *zhp)
1136 {
1137 return (zhp->zpool_name);
1138 }
1139
1140
1141 /*
1142 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1143 */
1144 int
1145 zpool_get_state(zpool_handle_t *zhp)
1146 {
1147 return (zhp->zpool_state);
1148 }
1149
1150 /*
1151 * Create the named pool, using the provided vdev list. It is assumed
1152 * that the consumer has already validated the contents of the nvlist, so we
1153 * don't have to worry about error semantics.
1154 */
1155 int
1156 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1157 nvlist_t *props, nvlist_t *fsprops)
1158 {
1159 zfs_cmd_t zc = {"\0"};
1160 nvlist_t *zc_fsprops = NULL;
1161 nvlist_t *zc_props = NULL;
1162 char msg[1024];
1163 int ret = -1;
1164
1165 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1166 "cannot create '%s'"), pool);
1167
1168 if (!zpool_name_valid(hdl, B_FALSE, pool))
1169 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1170
1171 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1172 return (-1);
1173
1174 if (props) {
1175 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1176
1177 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1178 SPA_VERSION_1, flags, msg)) == NULL) {
1179 goto create_failed;
1180 }
1181 }
1182
1183 if (fsprops) {
1184 uint64_t zoned;
1185 char *zonestr;
1186
1187 zoned = ((nvlist_lookup_string(fsprops,
1188 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1189 strcmp(zonestr, "on") == 0);
1190
1191 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1192 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1193 goto create_failed;
1194 }
1195 if (!zc_props &&
1196 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1197 goto create_failed;
1198 }
1199 if (nvlist_add_nvlist(zc_props,
1200 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1201 goto create_failed;
1202 }
1203 }
1204
1205 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1206 goto create_failed;
1207
1208 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1209
1210 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1211
1212 zcmd_free_nvlists(&zc);
1213 nvlist_free(zc_props);
1214 nvlist_free(zc_fsprops);
1215
1216 switch (errno) {
1217 case EBUSY:
1218 /*
1219 * This can happen if the user has specified the same
1220 * device multiple times. We can't reliably detect this
1221 * until we try to add it and see we already have a
1222 * label. This can also happen under if the device is
1223 * part of an active md or lvm device.
1224 */
1225 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1226 "one or more vdevs refer to the same device, or "
1227 "one of\nthe devices is part of an active md or "
1228 "lvm device"));
1229 return (zfs_error(hdl, EZFS_BADDEV, msg));
1230
1231 case ERANGE:
1232 /*
1233 * This happens if the record size is smaller or larger
1234 * than the allowed size range, or not a power of 2.
1235 *
1236 * NOTE: although zfs_valid_proplist is called earlier,
1237 * this case may have slipped through since the
1238 * pool does not exist yet and it is therefore
1239 * impossible to read properties e.g. max blocksize
1240 * from the pool.
1241 */
1242 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1243 "record size invalid"));
1244 return (zfs_error(hdl, EZFS_BADPROP, msg));
1245
1246 case EOVERFLOW:
1247 /*
1248 * This occurs when one of the devices is below
1249 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1250 * device was the problem device since there's no
1251 * reliable way to determine device size from userland.
1252 */
1253 {
1254 char buf[64];
1255
1256 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1257 sizeof (buf));
1258
1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 "one or more devices is less than the "
1261 "minimum size (%s)"), buf);
1262 }
1263 return (zfs_error(hdl, EZFS_BADDEV, msg));
1264
1265 case ENOSPC:
1266 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1267 "one or more devices is out of space"));
1268 return (zfs_error(hdl, EZFS_BADDEV, msg));
1269
1270 case ENOTBLK:
1271 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1272 "cache device must be a disk or disk slice"));
1273 return (zfs_error(hdl, EZFS_BADDEV, msg));
1274
1275 default:
1276 return (zpool_standard_error(hdl, errno, msg));
1277 }
1278 }
1279
1280 create_failed:
1281 zcmd_free_nvlists(&zc);
1282 nvlist_free(zc_props);
1283 nvlist_free(zc_fsprops);
1284 return (ret);
1285 }
1286
1287 /*
1288 * Destroy the given pool. It is up to the caller to ensure that there are no
1289 * datasets left in the pool.
1290 */
1291 int
1292 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1293 {
1294 zfs_cmd_t zc = {"\0"};
1295 zfs_handle_t *zfp = NULL;
1296 libzfs_handle_t *hdl = zhp->zpool_hdl;
1297 char msg[1024];
1298
1299 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1300 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1301 return (-1);
1302
1303 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1304 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1305
1306 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1307 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1308 "cannot destroy '%s'"), zhp->zpool_name);
1309
1310 if (errno == EROFS) {
1311 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1312 "one or more devices is read only"));
1313 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1314 } else {
1315 (void) zpool_standard_error(hdl, errno, msg);
1316 }
1317
1318 if (zfp)
1319 zfs_close(zfp);
1320 return (-1);
1321 }
1322
1323 if (zfp) {
1324 remove_mountpoint(zfp);
1325 zfs_close(zfp);
1326 }
1327
1328 return (0);
1329 }
1330
1331 /*
1332 * Add the given vdevs to the pool. The caller must have already performed the
1333 * necessary verification to ensure that the vdev specification is well-formed.
1334 */
1335 int
1336 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1337 {
1338 zfs_cmd_t zc = {"\0"};
1339 int ret;
1340 libzfs_handle_t *hdl = zhp->zpool_hdl;
1341 char msg[1024];
1342 nvlist_t **spares, **l2cache;
1343 uint_t nspares, nl2cache;
1344
1345 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1346 "cannot add to '%s'"), zhp->zpool_name);
1347
1348 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1349 SPA_VERSION_SPARES &&
1350 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1351 &spares, &nspares) == 0) {
1352 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1353 "upgraded to add hot spares"));
1354 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1355 }
1356
1357 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1358 SPA_VERSION_L2CACHE &&
1359 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1360 &l2cache, &nl2cache) == 0) {
1361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1362 "upgraded to add cache devices"));
1363 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1364 }
1365
1366 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1367 return (-1);
1368 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1369
1370 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1371 switch (errno) {
1372 case EBUSY:
1373 /*
1374 * This can happen if the user has specified the same
1375 * device multiple times. We can't reliably detect this
1376 * until we try to add it and see we already have a
1377 * label.
1378 */
1379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1380 "one or more vdevs refer to the same device"));
1381 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1382 break;
1383
1384 case EOVERFLOW:
1385 /*
1386 * This occurrs when one of the devices is below
1387 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1388 * device was the problem device since there's no
1389 * reliable way to determine device size from userland.
1390 */
1391 {
1392 char buf[64];
1393
1394 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1395 sizeof (buf));
1396
1397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1398 "device is less than the minimum "
1399 "size (%s)"), buf);
1400 }
1401 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1402 break;
1403
1404 case ENOTSUP:
1405 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1406 "pool must be upgraded to add these vdevs"));
1407 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1408 break;
1409
1410 case ENOTBLK:
1411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1412 "cache device must be a disk or disk slice"));
1413 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1414 break;
1415
1416 default:
1417 (void) zpool_standard_error(hdl, errno, msg);
1418 }
1419
1420 ret = -1;
1421 } else {
1422 ret = 0;
1423 }
1424
1425 zcmd_free_nvlists(&zc);
1426
1427 return (ret);
1428 }
1429
1430 /*
1431 * Exports the pool from the system. The caller must ensure that there are no
1432 * mounted datasets in the pool.
1433 */
1434 static int
1435 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1436 const char *log_str)
1437 {
1438 zfs_cmd_t zc = {"\0"};
1439 char msg[1024];
1440
1441 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1442 "cannot export '%s'"), zhp->zpool_name);
1443
1444 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1445 zc.zc_cookie = force;
1446 zc.zc_guid = hardforce;
1447 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1448
1449 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1450 switch (errno) {
1451 case EXDEV:
1452 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1453 "use '-f' to override the following errors:\n"
1454 "'%s' has an active shared spare which could be"
1455 " used by other pools once '%s' is exported."),
1456 zhp->zpool_name, zhp->zpool_name);
1457 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1458 msg));
1459 default:
1460 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1461 msg));
1462 }
1463 }
1464
1465 return (0);
1466 }
1467
1468 int
1469 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1470 {
1471 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1472 }
1473
1474 int
1475 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1476 {
1477 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1478 }
1479
1480 static void
1481 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1482 nvlist_t *config)
1483 {
1484 nvlist_t *nv = NULL;
1485 uint64_t rewindto;
1486 int64_t loss = -1;
1487 struct tm t;
1488 char timestr[128];
1489
1490 if (!hdl->libzfs_printerr || config == NULL)
1491 return;
1492
1493 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1494 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1495 return;
1496 }
1497
1498 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1499 return;
1500 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1501
1502 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1503 strftime(timestr, 128, "%c", &t) != 0) {
1504 if (dryrun) {
1505 (void) printf(dgettext(TEXT_DOMAIN,
1506 "Would be able to return %s "
1507 "to its state as of %s.\n"),
1508 name, timestr);
1509 } else {
1510 (void) printf(dgettext(TEXT_DOMAIN,
1511 "Pool %s returned to its state as of %s.\n"),
1512 name, timestr);
1513 }
1514 if (loss > 120) {
1515 (void) printf(dgettext(TEXT_DOMAIN,
1516 "%s approximately %lld "),
1517 dryrun ? "Would discard" : "Discarded",
1518 ((longlong_t)loss + 30) / 60);
1519 (void) printf(dgettext(TEXT_DOMAIN,
1520 "minutes of transactions.\n"));
1521 } else if (loss > 0) {
1522 (void) printf(dgettext(TEXT_DOMAIN,
1523 "%s approximately %lld "),
1524 dryrun ? "Would discard" : "Discarded",
1525 (longlong_t)loss);
1526 (void) printf(dgettext(TEXT_DOMAIN,
1527 "seconds of transactions.\n"));
1528 }
1529 }
1530 }
1531
1532 void
1533 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1534 nvlist_t *config)
1535 {
1536 nvlist_t *nv = NULL;
1537 int64_t loss = -1;
1538 uint64_t edata = UINT64_MAX;
1539 uint64_t rewindto;
1540 struct tm t;
1541 char timestr[128];
1542
1543 if (!hdl->libzfs_printerr)
1544 return;
1545
1546 if (reason >= 0)
1547 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1548 else
1549 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1550
1551 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1552 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1553 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1554 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1555 goto no_info;
1556
1557 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1558 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1559 &edata);
1560
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "Recovery is possible, but will result in some data loss.\n"));
1563
1564 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1565 strftime(timestr, 128, "%c", &t) != 0) {
1566 (void) printf(dgettext(TEXT_DOMAIN,
1567 "\tReturning the pool to its state as of %s\n"
1568 "\tshould correct the problem. "),
1569 timestr);
1570 } else {
1571 (void) printf(dgettext(TEXT_DOMAIN,
1572 "\tReverting the pool to an earlier state "
1573 "should correct the problem.\n\t"));
1574 }
1575
1576 if (loss > 120) {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "Approximately %lld minutes of data\n"
1579 "\tmust be discarded, irreversibly. "),
1580 ((longlong_t)loss + 30) / 60);
1581 } else if (loss > 0) {
1582 (void) printf(dgettext(TEXT_DOMAIN,
1583 "Approximately %lld seconds of data\n"
1584 "\tmust be discarded, irreversibly. "),
1585 (longlong_t)loss);
1586 }
1587 if (edata != 0 && edata != UINT64_MAX) {
1588 if (edata == 1) {
1589 (void) printf(dgettext(TEXT_DOMAIN,
1590 "After rewind, at least\n"
1591 "\tone persistent user-data error will remain. "));
1592 } else {
1593 (void) printf(dgettext(TEXT_DOMAIN,
1594 "After rewind, several\n"
1595 "\tpersistent user-data errors will remain. "));
1596 }
1597 }
1598 (void) printf(dgettext(TEXT_DOMAIN,
1599 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1600 reason >= 0 ? "clear" : "import", name);
1601
1602 (void) printf(dgettext(TEXT_DOMAIN,
1603 "A scrub of the pool\n"
1604 "\tis strongly recommended after recovery.\n"));
1605 return;
1606
1607 no_info:
1608 (void) printf(dgettext(TEXT_DOMAIN,
1609 "Destroy and re-create the pool from\n\ta backup source.\n"));
1610 }
1611
1612 /*
1613 * zpool_import() is a contracted interface. Should be kept the same
1614 * if possible.
1615 *
1616 * Applications should use zpool_import_props() to import a pool with
1617 * new properties value to be set.
1618 */
1619 int
1620 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1621 char *altroot)
1622 {
1623 nvlist_t *props = NULL;
1624 int ret;
1625
1626 if (altroot != NULL) {
1627 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1628 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1629 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1630 newname));
1631 }
1632
1633 if (nvlist_add_string(props,
1634 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1635 nvlist_add_string(props,
1636 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1637 nvlist_free(props);
1638 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1639 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1640 newname));
1641 }
1642 }
1643
1644 ret = zpool_import_props(hdl, config, newname, props,
1645 ZFS_IMPORT_NORMAL);
1646 nvlist_free(props);
1647 return (ret);
1648 }
1649
1650 static void
1651 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1652 int indent)
1653 {
1654 nvlist_t **child;
1655 uint_t c, children;
1656 char *vname;
1657 uint64_t is_log = 0;
1658
1659 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1660 &is_log);
1661
1662 if (name != NULL)
1663 (void) printf("\t%*s%s%s\n", indent, "", name,
1664 is_log ? " [log]" : "");
1665
1666 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1667 &child, &children) != 0)
1668 return;
1669
1670 for (c = 0; c < children; c++) {
1671 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1672 print_vdev_tree(hdl, vname, child[c], indent + 2);
1673 free(vname);
1674 }
1675 }
1676
1677 void
1678 zpool_print_unsup_feat(nvlist_t *config)
1679 {
1680 nvlist_t *nvinfo, *unsup_feat;
1681 nvpair_t *nvp;
1682
1683 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1684 0);
1685 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1686 &unsup_feat) == 0);
1687
1688 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1689 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1690 char *desc;
1691
1692 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1693 verify(nvpair_value_string(nvp, &desc) == 0);
1694
1695 if (strlen(desc) > 0)
1696 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1697 else
1698 (void) printf("\t%s\n", nvpair_name(nvp));
1699 }
1700 }
1701
1702 /*
1703 * Import the given pool using the known configuration and a list of
1704 * properties to be set. The configuration should have come from
1705 * zpool_find_import(). The 'newname' parameters control whether the pool
1706 * is imported with a different name.
1707 */
1708 int
1709 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1710 nvlist_t *props, int flags)
1711 {
1712 zfs_cmd_t zc = {"\0"};
1713 zpool_rewind_policy_t policy;
1714 nvlist_t *nv = NULL;
1715 nvlist_t *nvinfo = NULL;
1716 nvlist_t *missing = NULL;
1717 char *thename;
1718 char *origname;
1719 int ret;
1720 int error = 0;
1721 char errbuf[1024];
1722
1723 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1724 &origname) == 0);
1725
1726 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1727 "cannot import pool '%s'"), origname);
1728
1729 if (newname != NULL) {
1730 if (!zpool_name_valid(hdl, B_FALSE, newname))
1731 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1732 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1733 newname));
1734 thename = (char *)newname;
1735 } else {
1736 thename = origname;
1737 }
1738
1739 if (props != NULL) {
1740 uint64_t version;
1741 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1742
1743 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1744 &version) == 0);
1745
1746 if ((props = zpool_valid_proplist(hdl, origname,
1747 props, version, flags, errbuf)) == NULL)
1748 return (-1);
1749 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1750 nvlist_free(props);
1751 return (-1);
1752 }
1753 nvlist_free(props);
1754 }
1755
1756 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1757
1758 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1759 &zc.zc_guid) == 0);
1760
1761 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1762 zcmd_free_nvlists(&zc);
1763 return (-1);
1764 }
1765 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1766 zcmd_free_nvlists(&zc);
1767 return (-1);
1768 }
1769
1770 zc.zc_cookie = flags;
1771 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1772 errno == ENOMEM) {
1773 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1774 zcmd_free_nvlists(&zc);
1775 return (-1);
1776 }
1777 }
1778 if (ret != 0)
1779 error = errno;
1780
1781 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1782
1783 zcmd_free_nvlists(&zc);
1784
1785 zpool_get_rewind_policy(config, &policy);
1786
1787 if (error) {
1788 char desc[1024];
1789
1790 /*
1791 * Dry-run failed, but we print out what success
1792 * looks like if we found a best txg
1793 */
1794 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1795 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1796 B_TRUE, nv);
1797 nvlist_free(nv);
1798 return (-1);
1799 }
1800
1801 if (newname == NULL)
1802 (void) snprintf(desc, sizeof (desc),
1803 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1804 thename);
1805 else
1806 (void) snprintf(desc, sizeof (desc),
1807 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1808 origname, thename);
1809
1810 switch (error) {
1811 case ENOTSUP:
1812 if (nv != NULL && nvlist_lookup_nvlist(nv,
1813 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1814 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1815 (void) printf(dgettext(TEXT_DOMAIN, "This "
1816 "pool uses the following feature(s) not "
1817 "supported by this system:\n"));
1818 zpool_print_unsup_feat(nv);
1819 if (nvlist_exists(nvinfo,
1820 ZPOOL_CONFIG_CAN_RDONLY)) {
1821 (void) printf(dgettext(TEXT_DOMAIN,
1822 "All unsupported features are only "
1823 "required for writing to the pool."
1824 "\nThe pool can be imported using "
1825 "'-o readonly=on'.\n"));
1826 }
1827 }
1828 /*
1829 * Unsupported version.
1830 */
1831 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1832 break;
1833
1834 case EINVAL:
1835 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1836 break;
1837
1838 case EROFS:
1839 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1840 "one or more devices is read only"));
1841 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1842 break;
1843
1844 case ENXIO:
1845 if (nv && nvlist_lookup_nvlist(nv,
1846 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1847 nvlist_lookup_nvlist(nvinfo,
1848 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1849 (void) printf(dgettext(TEXT_DOMAIN,
1850 "The devices below are missing, use "
1851 "'-m' to import the pool anyway:\n"));
1852 print_vdev_tree(hdl, NULL, missing, 2);
1853 (void) printf("\n");
1854 }
1855 (void) zpool_standard_error(hdl, error, desc);
1856 break;
1857
1858 case EEXIST:
1859 (void) zpool_standard_error(hdl, error, desc);
1860 break;
1861
1862 case EBUSY:
1863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1864 "one or more devices are already in use\n"));
1865 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1866 break;
1867 case ENAMETOOLONG:
1868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1869 "new name of at least one dataset is longer than "
1870 "the maximum allowable length"));
1871 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1872 break;
1873 default:
1874 (void) zpool_standard_error(hdl, error, desc);
1875 zpool_explain_recover(hdl,
1876 newname ? origname : thename, -error, nv);
1877 break;
1878 }
1879
1880 nvlist_free(nv);
1881 ret = -1;
1882 } else {
1883 zpool_handle_t *zhp;
1884
1885 /*
1886 * This should never fail, but play it safe anyway.
1887 */
1888 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1889 ret = -1;
1890 else if (zhp != NULL)
1891 zpool_close(zhp);
1892 if (policy.zrp_request &
1893 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1894 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1895 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1896 }
1897 nvlist_free(nv);
1898 return (0);
1899 }
1900
1901 return (ret);
1902 }
1903
1904 /*
1905 * Scan the pool.
1906 */
1907 int
1908 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1909 {
1910 zfs_cmd_t zc = {"\0"};
1911 char msg[1024];
1912 libzfs_handle_t *hdl = zhp->zpool_hdl;
1913
1914 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1915 zc.zc_cookie = func;
1916
1917 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1918 (errno == ENOENT && func != POOL_SCAN_NONE))
1919 return (0);
1920
1921 if (func == POOL_SCAN_SCRUB) {
1922 (void) snprintf(msg, sizeof (msg),
1923 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1924 } else if (func == POOL_SCAN_NONE) {
1925 (void) snprintf(msg, sizeof (msg),
1926 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1927 zc.zc_name);
1928 } else {
1929 assert(!"unexpected result");
1930 }
1931
1932 if (errno == EBUSY) {
1933 nvlist_t *nvroot;
1934 pool_scan_stat_t *ps = NULL;
1935 uint_t psc;
1936
1937 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1938 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1939 (void) nvlist_lookup_uint64_array(nvroot,
1940 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1941 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1942 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1943 else
1944 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1945 } else if (errno == ENOENT) {
1946 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1947 } else {
1948 return (zpool_standard_error(hdl, errno, msg));
1949 }
1950 }
1951
1952 /*
1953 * Find a vdev that matches the search criteria specified. We use the
1954 * the nvpair name to determine how we should look for the device.
1955 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1956 * spare; but FALSE if its an INUSE spare.
1957 */
1958 static nvlist_t *
1959 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1960 boolean_t *l2cache, boolean_t *log)
1961 {
1962 uint_t c, children;
1963 nvlist_t **child;
1964 nvlist_t *ret;
1965 uint64_t is_log;
1966 char *srchkey;
1967 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1968
1969 /* Nothing to look for */
1970 if (search == NULL || pair == NULL)
1971 return (NULL);
1972
1973 /* Obtain the key we will use to search */
1974 srchkey = nvpair_name(pair);
1975
1976 switch (nvpair_type(pair)) {
1977 case DATA_TYPE_UINT64:
1978 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1979 uint64_t srchval, theguid;
1980
1981 verify(nvpair_value_uint64(pair, &srchval) == 0);
1982 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1983 &theguid) == 0);
1984 if (theguid == srchval)
1985 return (nv);
1986 }
1987 break;
1988
1989 case DATA_TYPE_STRING: {
1990 char *srchval, *val;
1991
1992 verify(nvpair_value_string(pair, &srchval) == 0);
1993 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1994 break;
1995
1996 /*
1997 * Search for the requested value. Special cases:
1998 *
1999 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2000 * "-part1", or "p1". The suffix is hidden from the user,
2001 * but included in the string, so this matches around it.
2002 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2003 * is used to check all possible expanded paths.
2004 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2005 *
2006 * Otherwise, all other searches are simple string compares.
2007 */
2008 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2009 uint64_t wholedisk = 0;
2010
2011 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2012 &wholedisk);
2013 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2014 return (nv);
2015
2016 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2017 char *type, *idx, *end, *p;
2018 uint64_t id, vdev_id;
2019
2020 /*
2021 * Determine our vdev type, keeping in mind
2022 * that the srchval is composed of a type and
2023 * vdev id pair (i.e. mirror-4).
2024 */
2025 if ((type = strdup(srchval)) == NULL)
2026 return (NULL);
2027
2028 if ((p = strrchr(type, '-')) == NULL) {
2029 free(type);
2030 break;
2031 }
2032 idx = p + 1;
2033 *p = '\0';
2034
2035 /*
2036 * If the types don't match then keep looking.
2037 */
2038 if (strncmp(val, type, strlen(val)) != 0) {
2039 free(type);
2040 break;
2041 }
2042
2043 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2044 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2045 strncmp(type, VDEV_TYPE_MIRROR,
2046 strlen(VDEV_TYPE_MIRROR)) == 0);
2047 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2048 &id) == 0);
2049
2050 errno = 0;
2051 vdev_id = strtoull(idx, &end, 10);
2052
2053 free(type);
2054 if (errno != 0)
2055 return (NULL);
2056
2057 /*
2058 * Now verify that we have the correct vdev id.
2059 */
2060 if (vdev_id == id)
2061 return (nv);
2062 }
2063
2064 /*
2065 * Common case
2066 */
2067 if (strcmp(srchval, val) == 0)
2068 return (nv);
2069 break;
2070 }
2071
2072 default:
2073 break;
2074 }
2075
2076 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2077 &child, &children) != 0)
2078 return (NULL);
2079
2080 for (c = 0; c < children; c++) {
2081 if ((ret = vdev_to_nvlist_iter(child[c], search,
2082 avail_spare, l2cache, NULL)) != NULL) {
2083 /*
2084 * The 'is_log' value is only set for the toplevel
2085 * vdev, not the leaf vdevs. So we always lookup the
2086 * log device from the root of the vdev tree (where
2087 * 'log' is non-NULL).
2088 */
2089 if (log != NULL &&
2090 nvlist_lookup_uint64(child[c],
2091 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2092 is_log) {
2093 *log = B_TRUE;
2094 }
2095 return (ret);
2096 }
2097 }
2098
2099 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2100 &child, &children) == 0) {
2101 for (c = 0; c < children; c++) {
2102 if ((ret = vdev_to_nvlist_iter(child[c], search,
2103 avail_spare, l2cache, NULL)) != NULL) {
2104 *avail_spare = B_TRUE;
2105 return (ret);
2106 }
2107 }
2108 }
2109
2110 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2111 &child, &children) == 0) {
2112 for (c = 0; c < children; c++) {
2113 if ((ret = vdev_to_nvlist_iter(child[c], search,
2114 avail_spare, l2cache, NULL)) != NULL) {
2115 *l2cache = B_TRUE;
2116 return (ret);
2117 }
2118 }
2119 }
2120
2121 return (NULL);
2122 }
2123
2124 /*
2125 * Given a physical path (minus the "/devices" prefix), find the
2126 * associated vdev.
2127 */
2128 nvlist_t *
2129 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2130 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2131 {
2132 nvlist_t *search, *nvroot, *ret;
2133
2134 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2135 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2136
2137 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2138 &nvroot) == 0);
2139
2140 *avail_spare = B_FALSE;
2141 *l2cache = B_FALSE;
2142 if (log != NULL)
2143 *log = B_FALSE;
2144 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2145 nvlist_free(search);
2146
2147 return (ret);
2148 }
2149
2150 /*
2151 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2152 */
2153 boolean_t
2154 zpool_vdev_is_interior(const char *name)
2155 {
2156 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2157 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2158 return (B_TRUE);
2159 return (B_FALSE);
2160 }
2161
2162 nvlist_t *
2163 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2164 boolean_t *l2cache, boolean_t *log)
2165 {
2166 char *end;
2167 nvlist_t *nvroot, *search, *ret;
2168 uint64_t guid;
2169
2170 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2171
2172 guid = strtoull(path, &end, 0);
2173 if (guid != 0 && *end == '\0') {
2174 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2175 } else if (zpool_vdev_is_interior(path)) {
2176 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2177 } else {
2178 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2179 }
2180
2181 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2182 &nvroot) == 0);
2183
2184 *avail_spare = B_FALSE;
2185 *l2cache = B_FALSE;
2186 if (log != NULL)
2187 *log = B_FALSE;
2188 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2189 nvlist_free(search);
2190
2191 return (ret);
2192 }
2193
2194 static int
2195 vdev_online(nvlist_t *nv)
2196 {
2197 uint64_t ival;
2198
2199 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2200 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2201 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2202 return (0);
2203
2204 return (1);
2205 }
2206
2207 /*
2208 * Helper function for zpool_get_physpaths().
2209 */
2210 static int
2211 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2212 size_t *bytes_written)
2213 {
2214 size_t bytes_left, pos, rsz;
2215 char *tmppath;
2216 const char *format;
2217
2218 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2219 &tmppath) != 0)
2220 return (EZFS_NODEVICE);
2221
2222 pos = *bytes_written;
2223 bytes_left = physpath_size - pos;
2224 format = (pos == 0) ? "%s" : " %s";
2225
2226 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2227 *bytes_written += rsz;
2228
2229 if (rsz >= bytes_left) {
2230 /* if physpath was not copied properly, clear it */
2231 if (bytes_left != 0) {
2232 physpath[pos] = 0;
2233 }
2234 return (EZFS_NOSPC);
2235 }
2236 return (0);
2237 }
2238
2239 static int
2240 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2241 size_t *rsz, boolean_t is_spare)
2242 {
2243 char *type;
2244 int ret;
2245
2246 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2247 return (EZFS_INVALCONFIG);
2248
2249 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2250 /*
2251 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2252 * For a spare vdev, we only want to boot from the active
2253 * spare device.
2254 */
2255 if (is_spare) {
2256 uint64_t spare = 0;
2257 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2258 &spare);
2259 if (!spare)
2260 return (EZFS_INVALCONFIG);
2261 }
2262
2263 if (vdev_online(nv)) {
2264 if ((ret = vdev_get_one_physpath(nv, physpath,
2265 phypath_size, rsz)) != 0)
2266 return (ret);
2267 }
2268 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2269 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2270 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2271 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2272 nvlist_t **child;
2273 uint_t count;
2274 int i, ret;
2275
2276 if (nvlist_lookup_nvlist_array(nv,
2277 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2278 return (EZFS_INVALCONFIG);
2279
2280 for (i = 0; i < count; i++) {
2281 ret = vdev_get_physpaths(child[i], physpath,
2282 phypath_size, rsz, is_spare);
2283 if (ret == EZFS_NOSPC)
2284 return (ret);
2285 }
2286 }
2287
2288 return (EZFS_POOL_INVALARG);
2289 }
2290
2291 /*
2292 * Get phys_path for a root pool config.
2293 * Return 0 on success; non-zero on failure.
2294 */
2295 static int
2296 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2297 {
2298 size_t rsz;
2299 nvlist_t *vdev_root;
2300 nvlist_t **child;
2301 uint_t count;
2302 char *type;
2303
2304 rsz = 0;
2305
2306 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2307 &vdev_root) != 0)
2308 return (EZFS_INVALCONFIG);
2309
2310 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2311 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2312 &child, &count) != 0)
2313 return (EZFS_INVALCONFIG);
2314
2315 /*
2316 * root pool can only have a single top-level vdev.
2317 */
2318 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2319 return (EZFS_POOL_INVALARG);
2320
2321 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2322 B_FALSE);
2323
2324 /* No online devices */
2325 if (rsz == 0)
2326 return (EZFS_NODEVICE);
2327
2328 return (0);
2329 }
2330
2331 /*
2332 * Get phys_path for a root pool
2333 * Return 0 on success; non-zero on failure.
2334 */
2335 int
2336 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2337 {
2338 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2339 phypath_size));
2340 }
2341
2342 /*
2343 * If the device has being dynamically expanded then we need to relabel
2344 * the disk to use the new unallocated space.
2345 */
2346 static int
2347 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2348 {
2349 int fd, error;
2350
2351 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2352 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2353 "relabel '%s': unable to open device: %d"), path, errno);
2354 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2355 }
2356
2357 /*
2358 * It's possible that we might encounter an error if the device
2359 * does not have any unallocated space left. If so, we simply
2360 * ignore that error and continue on.
2361 *
2362 * Also, we don't call efi_rescan() - that would just return EBUSY.
2363 * The module will do it for us in vdev_disk_open().
2364 */
2365 error = efi_use_whole_disk(fd);
2366
2367 /* Flush the buffers to disk and invalidate the page cache. */
2368 (void) fsync(fd);
2369 (void) ioctl(fd, BLKFLSBUF);
2370
2371 (void) close(fd);
2372 if (error && error != VT_ENOSPC) {
2373 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2374 "relabel '%s': unable to read disk capacity"), path);
2375 return (zfs_error(hdl, EZFS_NOCAP, msg));
2376 }
2377
2378 return (0);
2379 }
2380
2381 /*
2382 * Bring the specified vdev online. The 'flags' parameter is a set of the
2383 * ZFS_ONLINE_* flags.
2384 */
2385 int
2386 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2387 vdev_state_t *newstate)
2388 {
2389 zfs_cmd_t zc = {"\0"};
2390 char msg[1024];
2391 nvlist_t *tgt;
2392 boolean_t avail_spare, l2cache, islog;
2393 libzfs_handle_t *hdl = zhp->zpool_hdl;
2394 int error;
2395
2396 if (flags & ZFS_ONLINE_EXPAND) {
2397 (void) snprintf(msg, sizeof (msg),
2398 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2399 } else {
2400 (void) snprintf(msg, sizeof (msg),
2401 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2402 }
2403
2404 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2405 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2406 &islog)) == NULL)
2407 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2408
2409 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2410
2411 if (avail_spare)
2412 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2413
2414 if (flags & ZFS_ONLINE_EXPAND ||
2415 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2416 uint64_t wholedisk = 0;
2417
2418 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2419 &wholedisk);
2420
2421 /*
2422 * XXX - L2ARC 1.0 devices can't support expansion.
2423 */
2424 if (l2cache) {
2425 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2426 "cannot expand cache devices"));
2427 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2428 }
2429
2430 if (wholedisk) {
2431 const char *fullpath = path;
2432 char buf[MAXPATHLEN];
2433
2434 if (path[0] != '/') {
2435 error = zfs_resolve_shortname(path, buf,
2436 sizeof (buf));
2437 if (error != 0)
2438 return (zfs_error(hdl, EZFS_NODEVICE,
2439 msg));
2440
2441 fullpath = buf;
2442 }
2443
2444 error = zpool_relabel_disk(hdl, fullpath, msg);
2445 if (error != 0)
2446 return (error);
2447 }
2448 }
2449
2450 zc.zc_cookie = VDEV_STATE_ONLINE;
2451 zc.zc_obj = flags;
2452
2453 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2454 if (errno == EINVAL) {
2455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2456 "from this pool into a new one. Use '%s' "
2457 "instead"), "zpool detach");
2458 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2459 }
2460 return (zpool_standard_error(hdl, errno, msg));
2461 }
2462
2463 *newstate = zc.zc_cookie;
2464 return (0);
2465 }
2466
2467 /*
2468 * Take the specified vdev offline
2469 */
2470 int
2471 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2472 {
2473 zfs_cmd_t zc = {"\0"};
2474 char msg[1024];
2475 nvlist_t *tgt;
2476 boolean_t avail_spare, l2cache;
2477 libzfs_handle_t *hdl = zhp->zpool_hdl;
2478
2479 (void) snprintf(msg, sizeof (msg),
2480 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2481
2482 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2483 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2484 NULL)) == NULL)
2485 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2486
2487 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2488
2489 if (avail_spare)
2490 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2491
2492 zc.zc_cookie = VDEV_STATE_OFFLINE;
2493 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2494
2495 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2496 return (0);
2497
2498 switch (errno) {
2499 case EBUSY:
2500
2501 /*
2502 * There are no other replicas of this device.
2503 */
2504 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2505
2506 case EEXIST:
2507 /*
2508 * The log device has unplayed logs
2509 */
2510 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2511
2512 default:
2513 return (zpool_standard_error(hdl, errno, msg));
2514 }
2515 }
2516
2517 /*
2518 * Mark the given vdev faulted.
2519 */
2520 int
2521 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2522 {
2523 zfs_cmd_t zc = {"\0"};
2524 char msg[1024];
2525 libzfs_handle_t *hdl = zhp->zpool_hdl;
2526
2527 (void) snprintf(msg, sizeof (msg),
2528 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2529
2530 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2531 zc.zc_guid = guid;
2532 zc.zc_cookie = VDEV_STATE_FAULTED;
2533 zc.zc_obj = aux;
2534
2535 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2536 return (0);
2537
2538 switch (errno) {
2539 case EBUSY:
2540
2541 /*
2542 * There are no other replicas of this device.
2543 */
2544 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2545
2546 default:
2547 return (zpool_standard_error(hdl, errno, msg));
2548 }
2549
2550 }
2551
2552 /*
2553 * Mark the given vdev degraded.
2554 */
2555 int
2556 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2557 {
2558 zfs_cmd_t zc = {"\0"};
2559 char msg[1024];
2560 libzfs_handle_t *hdl = zhp->zpool_hdl;
2561
2562 (void) snprintf(msg, sizeof (msg),
2563 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2564
2565 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2566 zc.zc_guid = guid;
2567 zc.zc_cookie = VDEV_STATE_DEGRADED;
2568 zc.zc_obj = aux;
2569
2570 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2571 return (0);
2572
2573 return (zpool_standard_error(hdl, errno, msg));
2574 }
2575
2576 /*
2577 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2578 * a hot spare.
2579 */
2580 static boolean_t
2581 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2582 {
2583 nvlist_t **child;
2584 uint_t c, children;
2585 char *type;
2586
2587 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2588 &children) == 0) {
2589 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2590 &type) == 0);
2591
2592 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2593 children == 2 && child[which] == tgt)
2594 return (B_TRUE);
2595
2596 for (c = 0; c < children; c++)
2597 if (is_replacing_spare(child[c], tgt, which))
2598 return (B_TRUE);
2599 }
2600
2601 return (B_FALSE);
2602 }
2603
2604 /*
2605 * Attach new_disk (fully described by nvroot) to old_disk.
2606 * If 'replacing' is specified, the new disk will replace the old one.
2607 */
2608 int
2609 zpool_vdev_attach(zpool_handle_t *zhp,
2610 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2611 {
2612 zfs_cmd_t zc = {"\0"};
2613 char msg[1024];
2614 int ret;
2615 nvlist_t *tgt;
2616 boolean_t avail_spare, l2cache, islog;
2617 uint64_t val;
2618 char *newname;
2619 nvlist_t **child;
2620 uint_t children;
2621 nvlist_t *config_root;
2622 libzfs_handle_t *hdl = zhp->zpool_hdl;
2623 boolean_t rootpool = zpool_is_bootable(zhp);
2624
2625 if (replacing)
2626 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2627 "cannot replace %s with %s"), old_disk, new_disk);
2628 else
2629 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2630 "cannot attach %s to %s"), new_disk, old_disk);
2631
2632 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2633 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2634 &islog)) == 0)
2635 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2636
2637 if (avail_spare)
2638 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2639
2640 if (l2cache)
2641 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2642
2643 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2644 zc.zc_cookie = replacing;
2645
2646 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2647 &child, &children) != 0 || children != 1) {
2648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2649 "new device must be a single disk"));
2650 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2651 }
2652
2653 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2654 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2655
2656 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2657 return (-1);
2658
2659 /*
2660 * If the target is a hot spare that has been swapped in, we can only
2661 * replace it with another hot spare.
2662 */
2663 if (replacing &&
2664 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2665 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2666 NULL) == NULL || !avail_spare) &&
2667 is_replacing_spare(config_root, tgt, 1)) {
2668 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2669 "can only be replaced by another hot spare"));
2670 free(newname);
2671 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2672 }
2673
2674 free(newname);
2675
2676 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2677 return (-1);
2678
2679 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2680
2681 zcmd_free_nvlists(&zc);
2682
2683 if (ret == 0) {
2684 if (rootpool) {
2685 /*
2686 * XXX need a better way to prevent user from
2687 * booting up a half-baked vdev.
2688 */
2689 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2690 "sure to wait until resilver is done "
2691 "before rebooting.\n"));
2692 }
2693 return (0);
2694 }
2695
2696 switch (errno) {
2697 case ENOTSUP:
2698 /*
2699 * Can't attach to or replace this type of vdev.
2700 */
2701 if (replacing) {
2702 uint64_t version = zpool_get_prop_int(zhp,
2703 ZPOOL_PROP_VERSION, NULL);
2704
2705 if (islog)
2706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2707 "cannot replace a log with a spare"));
2708 else if (version >= SPA_VERSION_MULTI_REPLACE)
2709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2710 "already in replacing/spare config; wait "
2711 "for completion or use 'zpool detach'"));
2712 else
2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 "cannot replace a replacing device"));
2715 } else {
2716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2717 "can only attach to mirrors and top-level "
2718 "disks"));
2719 }
2720 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2721 break;
2722
2723 case EINVAL:
2724 /*
2725 * The new device must be a single disk.
2726 */
2727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2728 "new device must be a single disk"));
2729 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2730 break;
2731
2732 case EBUSY:
2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2734 new_disk);
2735 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2736 break;
2737
2738 case EOVERFLOW:
2739 /*
2740 * The new device is too small.
2741 */
2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2743 "device is too small"));
2744 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2745 break;
2746
2747 case EDOM:
2748 /*
2749 * The new device has a different optimal sector size.
2750 */
2751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2752 "new device has a different optimal sector size; use the "
2753 "option '-o ashift=N' to override the optimal size"));
2754 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2755 break;
2756
2757 case ENAMETOOLONG:
2758 /*
2759 * The resulting top-level vdev spec won't fit in the label.
2760 */
2761 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2762 break;
2763
2764 default:
2765 (void) zpool_standard_error(hdl, errno, msg);
2766 }
2767
2768 return (-1);
2769 }
2770
2771 /*
2772 * Detach the specified device.
2773 */
2774 int
2775 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2776 {
2777 zfs_cmd_t zc = {"\0"};
2778 char msg[1024];
2779 nvlist_t *tgt;
2780 boolean_t avail_spare, l2cache;
2781 libzfs_handle_t *hdl = zhp->zpool_hdl;
2782
2783 (void) snprintf(msg, sizeof (msg),
2784 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2785
2786 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2787 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2788 NULL)) == 0)
2789 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2790
2791 if (avail_spare)
2792 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2793
2794 if (l2cache)
2795 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2796
2797 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2798
2799 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2800 return (0);
2801
2802 switch (errno) {
2803
2804 case ENOTSUP:
2805 /*
2806 * Can't detach from this type of vdev.
2807 */
2808 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2809 "applicable to mirror and replacing vdevs"));
2810 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2811 break;
2812
2813 case EBUSY:
2814 /*
2815 * There are no other replicas of this device.
2816 */
2817 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2818 break;
2819
2820 default:
2821 (void) zpool_standard_error(hdl, errno, msg);
2822 }
2823
2824 return (-1);
2825 }
2826
2827 /*
2828 * Find a mirror vdev in the source nvlist.
2829 *
2830 * The mchild array contains a list of disks in one of the top-level mirrors
2831 * of the source pool. The schild array contains a list of disks that the
2832 * user specified on the command line. We loop over the mchild array to
2833 * see if any entry in the schild array matches.
2834 *
2835 * If a disk in the mchild array is found in the schild array, we return
2836 * the index of that entry. Otherwise we return -1.
2837 */
2838 static int
2839 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2840 nvlist_t **schild, uint_t schildren)
2841 {
2842 uint_t mc;
2843
2844 for (mc = 0; mc < mchildren; mc++) {
2845 uint_t sc;
2846 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2847 mchild[mc], 0);
2848
2849 for (sc = 0; sc < schildren; sc++) {
2850 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2851 schild[sc], 0);
2852 boolean_t result = (strcmp(mpath, spath) == 0);
2853
2854 free(spath);
2855 if (result) {
2856 free(mpath);
2857 return (mc);
2858 }
2859 }
2860
2861 free(mpath);
2862 }
2863
2864 return (-1);
2865 }
2866
2867 /*
2868 * Split a mirror pool. If newroot points to null, then a new nvlist
2869 * is generated and it is the responsibility of the caller to free it.
2870 */
2871 int
2872 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2873 nvlist_t *props, splitflags_t flags)
2874 {
2875 zfs_cmd_t zc = {"\0"};
2876 char msg[1024];
2877 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2878 nvlist_t **varray = NULL, *zc_props = NULL;
2879 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2880 libzfs_handle_t *hdl = zhp->zpool_hdl;
2881 uint64_t vers;
2882 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2883 int retval = 0;
2884
2885 (void) snprintf(msg, sizeof (msg),
2886 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2887
2888 if (!zpool_name_valid(hdl, B_FALSE, newname))
2889 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2890
2891 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2892 (void) fprintf(stderr, gettext("Internal error: unable to "
2893 "retrieve pool configuration\n"));
2894 return (-1);
2895 }
2896
2897 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2898 == 0);
2899 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2900
2901 if (props) {
2902 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2903 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2904 props, vers, flags, msg)) == NULL)
2905 return (-1);
2906 }
2907
2908 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2909 &children) != 0) {
2910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2911 "Source pool is missing vdev tree"));
2912 nvlist_free(zc_props);
2913 return (-1);
2914 }
2915
2916 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2917 vcount = 0;
2918
2919 if (*newroot == NULL ||
2920 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2921 &newchild, &newchildren) != 0)
2922 newchildren = 0;
2923
2924 for (c = 0; c < children; c++) {
2925 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2926 char *type;
2927 nvlist_t **mchild, *vdev;
2928 uint_t mchildren;
2929 int entry;
2930
2931 /*
2932 * Unlike cache & spares, slogs are stored in the
2933 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2934 */
2935 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2936 &is_log);
2937 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2938 &is_hole);
2939 if (is_log || is_hole) {
2940 /*
2941 * Create a hole vdev and put it in the config.
2942 */
2943 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2944 goto out;
2945 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2946 VDEV_TYPE_HOLE) != 0)
2947 goto out;
2948 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2949 1) != 0)
2950 goto out;
2951 if (lastlog == 0)
2952 lastlog = vcount;
2953 varray[vcount++] = vdev;
2954 continue;
2955 }
2956 lastlog = 0;
2957 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2958 == 0);
2959 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2961 "Source pool must be composed only of mirrors\n"));
2962 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2963 goto out;
2964 }
2965
2966 verify(nvlist_lookup_nvlist_array(child[c],
2967 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2968
2969 /* find or add an entry for this top-level vdev */
2970 if (newchildren > 0 &&
2971 (entry = find_vdev_entry(zhp, mchild, mchildren,
2972 newchild, newchildren)) >= 0) {
2973 /* We found a disk that the user specified. */
2974 vdev = mchild[entry];
2975 ++found;
2976 } else {
2977 /* User didn't specify a disk for this vdev. */
2978 vdev = mchild[mchildren - 1];
2979 }
2980
2981 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2982 goto out;
2983 }
2984
2985 /* did we find every disk the user specified? */
2986 if (found != newchildren) {
2987 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2988 "include at most one disk from each mirror"));
2989 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2990 goto out;
2991 }
2992
2993 /* Prepare the nvlist for populating. */
2994 if (*newroot == NULL) {
2995 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2996 goto out;
2997 freelist = B_TRUE;
2998 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2999 VDEV_TYPE_ROOT) != 0)
3000 goto out;
3001 } else {
3002 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3003 }
3004
3005 /* Add all the children we found */
3006 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3007 lastlog == 0 ? vcount : lastlog) != 0)
3008 goto out;
3009
3010 /*
3011 * If we're just doing a dry run, exit now with success.
3012 */
3013 if (flags.dryrun) {
3014 memory_err = B_FALSE;
3015 freelist = B_FALSE;
3016 goto out;
3017 }
3018
3019 /* now build up the config list & call the ioctl */
3020 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3021 goto out;
3022
3023 if (nvlist_add_nvlist(newconfig,
3024 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3025 nvlist_add_string(newconfig,
3026 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3027 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3028 goto out;
3029
3030 /*
3031 * The new pool is automatically part of the namespace unless we
3032 * explicitly export it.
3033 */
3034 if (!flags.import)
3035 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3036 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3037 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3038 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3039 goto out;
3040 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3041 goto out;
3042
3043 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3044 retval = zpool_standard_error(hdl, errno, msg);
3045 goto out;
3046 }
3047
3048 freelist = B_FALSE;
3049 memory_err = B_FALSE;
3050
3051 out:
3052 if (varray != NULL) {
3053 int v;
3054
3055 for (v = 0; v < vcount; v++)
3056 nvlist_free(varray[v]);
3057 free(varray);
3058 }
3059 zcmd_free_nvlists(&zc);
3060 nvlist_free(zc_props);
3061 nvlist_free(newconfig);
3062 if (freelist) {
3063 nvlist_free(*newroot);
3064 *newroot = NULL;
3065 }
3066
3067 if (retval != 0)
3068 return (retval);
3069
3070 if (memory_err)
3071 return (no_memory(hdl));
3072
3073 return (0);
3074 }
3075
3076 /*
3077 * Remove the given device. Currently, this is supported only for hot spares,
3078 * cache, and log devices.
3079 */
3080 int
3081 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3082 {
3083 zfs_cmd_t zc = {"\0"};
3084 char msg[1024];
3085 nvlist_t *tgt;
3086 boolean_t avail_spare, l2cache, islog;
3087 libzfs_handle_t *hdl = zhp->zpool_hdl;
3088 uint64_t version;
3089
3090 (void) snprintf(msg, sizeof (msg),
3091 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3092
3093 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3094 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3095 &islog)) == 0)
3096 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3097 /*
3098 * XXX - this should just go away.
3099 */
3100 if (!avail_spare && !l2cache && !islog) {
3101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3102 "only inactive hot spares, cache, "
3103 "or log devices can be removed"));
3104 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3105 }
3106
3107 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3108 if (islog && version < SPA_VERSION_HOLES) {
3109 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3110 "pool must be upgrade to support log removal"));
3111 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3112 }
3113
3114 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3115
3116 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3117 return (0);
3118
3119 return (zpool_standard_error(hdl, errno, msg));
3120 }
3121
3122 /*
3123 * Clear the errors for the pool, or the particular device if specified.
3124 */
3125 int
3126 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3127 {
3128 zfs_cmd_t zc = {"\0"};
3129 char msg[1024];
3130 nvlist_t *tgt;
3131 zpool_rewind_policy_t policy;
3132 boolean_t avail_spare, l2cache;
3133 libzfs_handle_t *hdl = zhp->zpool_hdl;
3134 nvlist_t *nvi = NULL;
3135 int error;
3136
3137 if (path)
3138 (void) snprintf(msg, sizeof (msg),
3139 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3140 path);
3141 else
3142 (void) snprintf(msg, sizeof (msg),
3143 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3144 zhp->zpool_name);
3145
3146 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3147 if (path) {
3148 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3149 &l2cache, NULL)) == 0)
3150 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3151
3152 /*
3153 * Don't allow error clearing for hot spares. Do allow
3154 * error clearing for l2cache devices.
3155 */
3156 if (avail_spare)
3157 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3158
3159 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3160 &zc.zc_guid) == 0);
3161 }
3162
3163 zpool_get_rewind_policy(rewindnvl, &policy);
3164 zc.zc_cookie = policy.zrp_request;
3165
3166 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3167 return (-1);
3168
3169 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3170 return (-1);
3171
3172 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3173 errno == ENOMEM) {
3174 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3175 zcmd_free_nvlists(&zc);
3176 return (-1);
3177 }
3178 }
3179
3180 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3181 errno != EPERM && errno != EACCES)) {
3182 if (policy.zrp_request &
3183 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3184 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3185 zpool_rewind_exclaim(hdl, zc.zc_name,
3186 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3187 nvi);
3188 nvlist_free(nvi);
3189 }
3190 zcmd_free_nvlists(&zc);
3191 return (0);
3192 }
3193
3194 zcmd_free_nvlists(&zc);
3195 return (zpool_standard_error(hdl, errno, msg));
3196 }
3197
3198 /*
3199 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3200 */
3201 int
3202 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3203 {
3204 zfs_cmd_t zc = {"\0"};
3205 char msg[1024];
3206 libzfs_handle_t *hdl = zhp->zpool_hdl;
3207
3208 (void) snprintf(msg, sizeof (msg),
3209 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3210 (u_longlong_t)guid);
3211
3212 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3213 zc.zc_guid = guid;
3214 zc.zc_cookie = ZPOOL_NO_REWIND;
3215
3216 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3217 return (0);
3218
3219 return (zpool_standard_error(hdl, errno, msg));
3220 }
3221
3222 /*
3223 * Change the GUID for a pool.
3224 */
3225 int
3226 zpool_reguid(zpool_handle_t *zhp)
3227 {
3228 char msg[1024];
3229 libzfs_handle_t *hdl = zhp->zpool_hdl;
3230 zfs_cmd_t zc = {"\0"};
3231
3232 (void) snprintf(msg, sizeof (msg),
3233 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3234
3235 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3236 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3237 return (0);
3238
3239 return (zpool_standard_error(hdl, errno, msg));
3240 }
3241
3242 /*
3243 * Reopen the pool.
3244 */
3245 int
3246 zpool_reopen(zpool_handle_t *zhp)
3247 {
3248 zfs_cmd_t zc = {"\0"};
3249 char msg[1024];
3250 libzfs_handle_t *hdl = zhp->zpool_hdl;
3251
3252 (void) snprintf(msg, sizeof (msg),
3253 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3254 zhp->zpool_name);
3255
3256 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3257 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3258 return (0);
3259 return (zpool_standard_error(hdl, errno, msg));
3260 }
3261
3262 #if defined(__sun__) || defined(__sun)
3263 /*
3264 * Convert from a devid string to a path.
3265 */
3266 static char *
3267 devid_to_path(char *devid_str)
3268 {
3269 ddi_devid_t devid;
3270 char *minor;
3271 char *path;
3272 devid_nmlist_t *list = NULL;
3273 int ret;
3274
3275 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3276 return (NULL);
3277
3278 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3279
3280 devid_str_free(minor);
3281 devid_free(devid);
3282
3283 if (ret != 0)
3284 return (NULL);
3285
3286 /*
3287 * In a case the strdup() fails, we will just return NULL below.
3288 */
3289 path = strdup(list[0].devname);
3290
3291 devid_free_nmlist(list);
3292
3293 return (path);
3294 }
3295
3296 /*
3297 * Convert from a path to a devid string.
3298 */
3299 static char *
3300 path_to_devid(const char *path)
3301 {
3302 int fd;
3303 ddi_devid_t devid;
3304 char *minor, *ret;
3305
3306 if ((fd = open(path, O_RDONLY)) < 0)
3307 return (NULL);
3308
3309 minor = NULL;
3310 ret = NULL;
3311 if (devid_get(fd, &devid) == 0) {
3312 if (devid_get_minor_name(fd, &minor) == 0)
3313 ret = devid_str_encode(devid, minor);
3314 if (minor != NULL)
3315 devid_str_free(minor);
3316 devid_free(devid);
3317 }
3318 (void) close(fd);
3319
3320 return (ret);
3321 }
3322
3323 /*
3324 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3325 * ignore any failure here, since a common case is for an unprivileged user to
3326 * type 'zpool status', and we'll display the correct information anyway.
3327 */
3328 static void
3329 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3330 {
3331 zfs_cmd_t zc = {"\0"};
3332
3333 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3334 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3335 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3336 &zc.zc_guid) == 0);
3337
3338 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3339 }
3340 #endif /* sun */
3341
3342 /*
3343 * Remove partition suffix from a vdev path. Partition suffixes may take three
3344 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3345 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3346 * third case only occurs when preceded by a string matching the regular
3347 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3348 *
3349 * caller must free the returned string
3350 */
3351 char *
3352 zfs_strip_partition(char *path)
3353 {
3354 char *tmp = strdup(path);
3355 char *part = NULL, *d = NULL;
3356 if (!tmp)
3357 return (NULL);
3358
3359 if ((part = strstr(tmp, "-part")) && part != tmp) {
3360 d = part + 5;
3361 } else if ((part = strrchr(tmp, 'p')) &&
3362 part > tmp + 1 && isdigit(*(part-1))) {
3363 d = part + 1;
3364 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3365 tmp[1] == 'd') {
3366 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3367 } else if (strncmp("xvd", tmp, 3) == 0) {
3368 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3369 }
3370 if (part && d && *d != '\0') {
3371 for (; isdigit(*d); d++) { }
3372 if (*d == '\0')
3373 *part = '\0';
3374 }
3375
3376 return (tmp);
3377 }
3378
3379 /*
3380 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3381 *
3382 * path: /dev/sda1
3383 * returns: /dev/sda
3384 *
3385 * Returned string must be freed.
3386 */
3387 char *
3388 zfs_strip_partition_path(char *path)
3389 {
3390 char *newpath = strdup(path);
3391 char *sd_offset;
3392 char *new_sd;
3393
3394 if (!newpath)
3395 return (NULL);
3396
3397 /* Point to "sda1" part of "/dev/sda1" */
3398 sd_offset = strrchr(newpath, '/') + 1;
3399
3400 /* Get our new name "sda" */
3401 new_sd = zfs_strip_partition(sd_offset);
3402 if (!new_sd) {
3403 free(newpath);
3404 return (NULL);
3405 }
3406
3407 /* Paste the "sda" where "sda1" was */
3408 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3409
3410 /* Free temporary "sda" */
3411 free(new_sd);
3412
3413 return (newpath);
3414 }
3415
3416 #define PATH_BUF_LEN 64
3417
3418 /*
3419 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3420 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3421 * We also check if this is a whole disk, in which case we strip off the
3422 * trailing 's0' slice name.
3423 *
3424 * This routine is also responsible for identifying when disks have been
3425 * reconfigured in a new location. The kernel will have opened the device by
3426 * devid, but the path will still refer to the old location. To catch this, we
3427 * first do a path -> devid translation (which is fast for the common case). If
3428 * the devid matches, we're done. If not, we do a reverse devid -> path
3429 * translation and issue the appropriate ioctl() to update the path of the vdev.
3430 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3431 * of these checks.
3432 */
3433 char *
3434 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3435 int name_flags)
3436 {
3437 char *path, *type, *env;
3438 uint64_t value;
3439 char buf[PATH_BUF_LEN];
3440 char tmpbuf[PATH_BUF_LEN];
3441
3442 env = getenv("ZPOOL_VDEV_NAME_PATH");
3443 if (env && (strtoul(env, NULL, 0) > 0 ||
3444 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3445 name_flags |= VDEV_NAME_PATH;
3446
3447 env = getenv("ZPOOL_VDEV_NAME_GUID");
3448 if (env && (strtoul(env, NULL, 0) > 0 ||
3449 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3450 name_flags |= VDEV_NAME_GUID;
3451
3452 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3453 if (env && (strtoul(env, NULL, 0) > 0 ||
3454 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3455 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3456
3457 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3458 name_flags & VDEV_NAME_GUID) {
3459 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3460 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3461 path = buf;
3462 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3463 #if defined(__sun__) || defined(__sun)
3464 /*
3465 * Live VDEV path updates to a kernel VDEV during a
3466 * zpool_vdev_name lookup are not supported on Linux.
3467 */
3468 char *devid;
3469 vdev_stat_t *vs;
3470 uint_t vsc;
3471
3472 /*
3473 * If the device is dead (faulted, offline, etc) then don't
3474 * bother opening it. Otherwise we may be forcing the user to
3475 * open a misbehaving device, which can have undesirable
3476 * effects.
3477 */
3478 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3479 (uint64_t **)&vs, &vsc) != 0 ||
3480 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3481 zhp != NULL &&
3482 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3483 /*
3484 * Determine if the current path is correct.
3485 */
3486 char *newdevid = path_to_devid(path);
3487
3488 if (newdevid == NULL ||
3489 strcmp(devid, newdevid) != 0) {
3490 char *newpath;
3491
3492 if ((newpath = devid_to_path(devid)) != NULL) {
3493 /*
3494 * Update the path appropriately.
3495 */
3496 set_path(zhp, nv, newpath);
3497 if (nvlist_add_string(nv,
3498 ZPOOL_CONFIG_PATH, newpath) == 0)
3499 verify(nvlist_lookup_string(nv,
3500 ZPOOL_CONFIG_PATH,
3501 &path) == 0);
3502 free(newpath);
3503 }
3504 }
3505
3506 if (newdevid)
3507 devid_str_free(newdevid);
3508 }
3509 #endif /* sun */
3510
3511 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3512 char *rp = realpath(path, NULL);
3513 if (rp) {
3514 strlcpy(buf, rp, sizeof (buf));
3515 path = buf;
3516 free(rp);
3517 }
3518 }
3519
3520 /*
3521 * For a block device only use the name.
3522 */
3523 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3524 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3525 !(name_flags & VDEV_NAME_PATH)) {
3526 path = strrchr(path, '/');
3527 path++;
3528 }
3529
3530 /*
3531 * Remove the partition from the path it this is a whole disk.
3532 */
3533 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3534 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3535 return (zfs_strip_partition(path));
3536 }
3537 } else {
3538 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3539
3540 /*
3541 * If it's a raidz device, we need to stick in the parity level.
3542 */
3543 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3544 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3545 &value) == 0);
3546 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3547 (u_longlong_t)value);
3548 path = buf;
3549 }
3550
3551 /*
3552 * We identify each top-level vdev by using a <type-id>
3553 * naming convention.
3554 */
3555 if (name_flags & VDEV_NAME_TYPE_ID) {
3556 uint64_t id;
3557 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3558 &id) == 0);
3559 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3560 path, (u_longlong_t)id);
3561 path = tmpbuf;
3562 }
3563 }
3564
3565 return (zfs_strdup(hdl, path));
3566 }
3567
3568 static int
3569 zbookmark_mem_compare(const void *a, const void *b)
3570 {
3571 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3572 }
3573
3574 /*
3575 * Retrieve the persistent error log, uniquify the members, and return to the
3576 * caller.
3577 */
3578 int
3579 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3580 {
3581 zfs_cmd_t zc = {"\0"};
3582 libzfs_handle_t *hdl = zhp->zpool_hdl;
3583 uint64_t count;
3584 zbookmark_phys_t *zb = NULL;
3585 int i;
3586
3587 /*
3588 * Retrieve the raw error list from the kernel. If the number of errors
3589 * has increased, allocate more space and continue until we get the
3590 * entire list.
3591 */
3592 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3593 &count) == 0);
3594 if (count == 0)
3595 return (0);
3596 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3597 count * sizeof (zbookmark_phys_t));
3598 zc.zc_nvlist_dst_size = count;
3599 (void) strcpy(zc.zc_name, zhp->zpool_name);
3600 for (;;) {
3601 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3602 &zc) != 0) {
3603 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3604 if (errno == ENOMEM) {
3605 void *dst;
3606
3607 count = zc.zc_nvlist_dst_size;
3608 dst = zfs_alloc(zhp->zpool_hdl, count *
3609 sizeof (zbookmark_phys_t));
3610 zc.zc_nvlist_dst = (uintptr_t)dst;
3611 } else {
3612 return (zpool_standard_error_fmt(hdl, errno,
3613 dgettext(TEXT_DOMAIN, "errors: List of "
3614 "errors unavailable")));
3615 }
3616 } else {
3617 break;
3618 }
3619 }
3620
3621 /*
3622 * Sort the resulting bookmarks. This is a little confusing due to the
3623 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3624 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3625 * _not_ copied as part of the process. So we point the start of our
3626 * array appropriate and decrement the total number of elements.
3627 */
3628 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3629 zc.zc_nvlist_dst_size;
3630 count -= zc.zc_nvlist_dst_size;
3631
3632 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3633
3634 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3635
3636 /*
3637 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3638 */
3639 for (i = 0; i < count; i++) {
3640 nvlist_t *nv;
3641
3642 /* ignoring zb_blkid and zb_level for now */
3643 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3644 zb[i-1].zb_object == zb[i].zb_object)
3645 continue;
3646
3647 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3648 goto nomem;
3649 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3650 zb[i].zb_objset) != 0) {
3651 nvlist_free(nv);
3652 goto nomem;
3653 }
3654 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3655 zb[i].zb_object) != 0) {
3656 nvlist_free(nv);
3657 goto nomem;
3658 }
3659 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3660 nvlist_free(nv);
3661 goto nomem;
3662 }
3663 nvlist_free(nv);
3664 }
3665
3666 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3667 return (0);
3668
3669 nomem:
3670 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3671 return (no_memory(zhp->zpool_hdl));
3672 }
3673
3674 /*
3675 * Upgrade a ZFS pool to the latest on-disk version.
3676 */
3677 int
3678 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3679 {
3680 zfs_cmd_t zc = {"\0"};
3681 libzfs_handle_t *hdl = zhp->zpool_hdl;
3682
3683 (void) strcpy(zc.zc_name, zhp->zpool_name);
3684 zc.zc_cookie = new_version;
3685
3686 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3687 return (zpool_standard_error_fmt(hdl, errno,
3688 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3689 zhp->zpool_name));
3690 return (0);
3691 }
3692
3693 void
3694 zfs_save_arguments(int argc, char **argv, char *string, int len)
3695 {
3696 int i;
3697
3698 (void) strlcpy(string, basename(argv[0]), len);
3699 for (i = 1; i < argc; i++) {
3700 (void) strlcat(string, " ", len);
3701 (void) strlcat(string, argv[i], len);
3702 }
3703 }
3704
3705 int
3706 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3707 {
3708 zfs_cmd_t zc = {"\0"};
3709 nvlist_t *args;
3710 int err;
3711
3712 args = fnvlist_alloc();
3713 fnvlist_add_string(args, "message", message);
3714 err = zcmd_write_src_nvlist(hdl, &zc, args);
3715 if (err == 0)
3716 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3717 nvlist_free(args);
3718 zcmd_free_nvlists(&zc);
3719 return (err);
3720 }
3721
3722 /*
3723 * Perform ioctl to get some command history of a pool.
3724 *
3725 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3726 * logical offset of the history buffer to start reading from.
3727 *
3728 * Upon return, 'off' is the next logical offset to read from and
3729 * 'len' is the actual amount of bytes read into 'buf'.
3730 */
3731 static int
3732 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3733 {
3734 zfs_cmd_t zc = {"\0"};
3735 libzfs_handle_t *hdl = zhp->zpool_hdl;
3736
3737 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3738
3739 zc.zc_history = (uint64_t)(uintptr_t)buf;
3740 zc.zc_history_len = *len;
3741 zc.zc_history_offset = *off;
3742
3743 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3744 switch (errno) {
3745 case EPERM:
3746 return (zfs_error_fmt(hdl, EZFS_PERM,
3747 dgettext(TEXT_DOMAIN,
3748 "cannot show history for pool '%s'"),
3749 zhp->zpool_name));
3750 case ENOENT:
3751 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3752 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3753 "'%s'"), zhp->zpool_name));
3754 case ENOTSUP:
3755 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3756 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3757 "'%s', pool must be upgraded"), zhp->zpool_name));
3758 default:
3759 return (zpool_standard_error_fmt(hdl, errno,
3760 dgettext(TEXT_DOMAIN,
3761 "cannot get history for '%s'"), zhp->zpool_name));
3762 }
3763 }
3764
3765 *len = zc.zc_history_len;
3766 *off = zc.zc_history_offset;
3767
3768 return (0);
3769 }
3770
3771 /*
3772 * Process the buffer of nvlists, unpacking and storing each nvlist record
3773 * into 'records'. 'leftover' is set to the number of bytes that weren't
3774 * processed as there wasn't a complete record.
3775 */
3776 int
3777 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3778 nvlist_t ***records, uint_t *numrecords)
3779 {
3780 uint64_t reclen;
3781 nvlist_t *nv;
3782 int i;
3783 void *tmp;
3784
3785 while (bytes_read > sizeof (reclen)) {
3786
3787 /* get length of packed record (stored as little endian) */
3788 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3789 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3790
3791 if (bytes_read < sizeof (reclen) + reclen)
3792 break;
3793
3794 /* unpack record */
3795 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3796 return (ENOMEM);
3797 bytes_read -= sizeof (reclen) + reclen;
3798 buf += sizeof (reclen) + reclen;
3799
3800 /* add record to nvlist array */
3801 (*numrecords)++;
3802 if (ISP2(*numrecords + 1)) {
3803 tmp = realloc(*records,
3804 *numrecords * 2 * sizeof (nvlist_t *));
3805 if (tmp == NULL) {
3806 nvlist_free(nv);
3807 (*numrecords)--;
3808 return (ENOMEM);
3809 }
3810 *records = tmp;
3811 }
3812 (*records)[*numrecords - 1] = nv;
3813 }
3814
3815 *leftover = bytes_read;
3816 return (0);
3817 }
3818
3819 /*
3820 * Retrieve the command history of a pool.
3821 */
3822 int
3823 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3824 {
3825 char *buf;
3826 int buflen = 128 * 1024;
3827 uint64_t off = 0;
3828 nvlist_t **records = NULL;
3829 uint_t numrecords = 0;
3830 int err, i;
3831
3832 buf = malloc(buflen);
3833 if (buf == NULL)
3834 return (ENOMEM);
3835 do {
3836 uint64_t bytes_read = buflen;
3837 uint64_t leftover;
3838
3839 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3840 break;
3841
3842 /* if nothing else was read in, we're at EOF, just return */
3843 if (!bytes_read)
3844 break;
3845
3846 if ((err = zpool_history_unpack(buf, bytes_read,
3847 &leftover, &records, &numrecords)) != 0)
3848 break;
3849 off -= leftover;
3850 if (leftover == bytes_read) {
3851 /*
3852 * no progress made, because buffer is not big enough
3853 * to hold this record; resize and retry.
3854 */
3855 buflen *= 2;
3856 free(buf);
3857 buf = malloc(buflen);
3858 if (buf == NULL)
3859 return (ENOMEM);
3860 }
3861
3862 /* CONSTCOND */
3863 } while (1);
3864
3865 free(buf);
3866
3867 if (!err) {
3868 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3869 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3870 records, numrecords) == 0);
3871 }
3872 for (i = 0; i < numrecords; i++)
3873 nvlist_free(records[i]);
3874 free(records);
3875
3876 return (err);
3877 }
3878
3879 /*
3880 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3881 * If there is a new event available 'nvp' will contain a newly allocated
3882 * nvlist and 'dropped' will be set to the number of missed events since
3883 * the last call to this function. When 'nvp' is set to NULL it indicates
3884 * no new events are available. In either case the function returns 0 and
3885 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3886 * function will return a non-zero value. When the function is called in
3887 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3888 * it will not return until a new event is available.
3889 */
3890 int
3891 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3892 int *dropped, unsigned flags, int zevent_fd)
3893 {
3894 zfs_cmd_t zc = {"\0"};
3895 int error = 0;
3896
3897 *nvp = NULL;
3898 *dropped = 0;
3899 zc.zc_cleanup_fd = zevent_fd;
3900
3901 if (flags & ZEVENT_NONBLOCK)
3902 zc.zc_guid = ZEVENT_NONBLOCK;
3903
3904 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3905 return (-1);
3906
3907 retry:
3908 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3909 switch (errno) {
3910 case ESHUTDOWN:
3911 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3912 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3913 goto out;
3914 case ENOENT:
3915 /* Blocking error case should not occur */
3916 if (!(flags & ZEVENT_NONBLOCK))
3917 error = zpool_standard_error_fmt(hdl, errno,
3918 dgettext(TEXT_DOMAIN, "cannot get event"));
3919
3920 goto out;
3921 case ENOMEM:
3922 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3923 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3924 dgettext(TEXT_DOMAIN, "cannot get event"));
3925 goto out;
3926 } else {
3927 goto retry;
3928 }
3929 default:
3930 error = zpool_standard_error_fmt(hdl, errno,
3931 dgettext(TEXT_DOMAIN, "cannot get event"));
3932 goto out;
3933 }
3934 }
3935
3936 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3937 if (error != 0)
3938 goto out;
3939
3940 *dropped = (int)zc.zc_cookie;
3941 out:
3942 zcmd_free_nvlists(&zc);
3943
3944 return (error);
3945 }
3946
3947 /*
3948 * Clear all events.
3949 */
3950 int
3951 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3952 {
3953 zfs_cmd_t zc = {"\0"};
3954 char msg[1024];
3955
3956 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3957 "cannot clear events"));
3958
3959 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3960 return (zpool_standard_error_fmt(hdl, errno, msg));
3961
3962 if (count != NULL)
3963 *count = (int)zc.zc_cookie; /* # of events cleared */
3964
3965 return (0);
3966 }
3967
3968 /*
3969 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3970 * the passed zevent_fd file handle. On success zero is returned,
3971 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3972 */
3973 int
3974 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3975 {
3976 zfs_cmd_t zc = {"\0"};
3977 int error = 0;
3978
3979 zc.zc_guid = eid;
3980 zc.zc_cleanup_fd = zevent_fd;
3981
3982 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3983 switch (errno) {
3984 case ENOENT:
3985 error = zfs_error_fmt(hdl, EZFS_NOENT,
3986 dgettext(TEXT_DOMAIN, "cannot get event"));
3987 break;
3988
3989 case ENOMEM:
3990 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3991 dgettext(TEXT_DOMAIN, "cannot get event"));
3992 break;
3993
3994 default:
3995 error = zpool_standard_error_fmt(hdl, errno,
3996 dgettext(TEXT_DOMAIN, "cannot get event"));
3997 break;
3998 }
3999 }
4000
4001 return (error);
4002 }
4003
4004 void
4005 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4006 char *pathname, size_t len)
4007 {
4008 zfs_cmd_t zc = {"\0"};
4009 boolean_t mounted = B_FALSE;
4010 char *mntpnt = NULL;
4011 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4012
4013 if (dsobj == 0) {
4014 /* special case for the MOS */
4015 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4016 (longlong_t)obj);
4017 return;
4018 }
4019
4020 /* get the dataset's name */
4021 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4022 zc.zc_obj = dsobj;
4023 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4024 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4025 /* just write out a path of two object numbers */
4026 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4027 (longlong_t)dsobj, (longlong_t)obj);
4028 return;
4029 }
4030 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4031
4032 /* find out if the dataset is mounted */
4033 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4034
4035 /* get the corrupted object's path */
4036 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4037 zc.zc_obj = obj;
4038 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4039 &zc) == 0) {
4040 if (mounted) {
4041 (void) snprintf(pathname, len, "%s%s", mntpnt,
4042 zc.zc_value);
4043 } else {
4044 (void) snprintf(pathname, len, "%s:%s",
4045 dsname, zc.zc_value);
4046 }
4047 } else {
4048 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4049 (longlong_t)obj);
4050 }
4051 free(mntpnt);
4052 }
4053
4054 /*
4055 * Read the EFI label from the config, if a label does not exist then
4056 * pass back the error to the caller. If the caller has passed a non-NULL
4057 * diskaddr argument then we set it to the starting address of the EFI
4058 * partition.
4059 */
4060 static int
4061 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4062 {
4063 char *path;
4064 int fd;
4065 char diskname[MAXPATHLEN];
4066 int err = -1;
4067
4068 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4069 return (err);
4070
4071 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4072 strrchr(path, '/'));
4073 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
4074 struct dk_gpt *vtoc;
4075
4076 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4077 if (sb != NULL)
4078 *sb = vtoc->efi_parts[0].p_start;
4079 efi_free(vtoc);
4080 }
4081 (void) close(fd);
4082 }
4083 return (err);
4084 }
4085
4086 /*
4087 * determine where a partition starts on a disk in the current
4088 * configuration
4089 */
4090 static diskaddr_t
4091 find_start_block(nvlist_t *config)
4092 {
4093 nvlist_t **child;
4094 uint_t c, children;
4095 diskaddr_t sb = MAXOFFSET_T;
4096 uint64_t wholedisk;
4097
4098 if (nvlist_lookup_nvlist_array(config,
4099 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4100 if (nvlist_lookup_uint64(config,
4101 ZPOOL_CONFIG_WHOLE_DISK,
4102 &wholedisk) != 0 || !wholedisk) {
4103 return (MAXOFFSET_T);
4104 }
4105 if (read_efi_label(config, &sb) < 0)
4106 sb = MAXOFFSET_T;
4107 return (sb);
4108 }
4109
4110 for (c = 0; c < children; c++) {
4111 sb = find_start_block(child[c]);
4112 if (sb != MAXOFFSET_T) {
4113 return (sb);
4114 }
4115 }
4116 return (MAXOFFSET_T);
4117 }
4118
4119 static int
4120 zpool_label_disk_check(char *path)
4121 {
4122 struct dk_gpt *vtoc;
4123 int fd, err;
4124
4125 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
4126 return (errno);
4127
4128 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4129 (void) close(fd);
4130 return (err);
4131 }
4132
4133 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4134 efi_free(vtoc);
4135 (void) close(fd);
4136 return (EIDRM);
4137 }
4138
4139 efi_free(vtoc);
4140 (void) close(fd);
4141 return (0);
4142 }
4143
4144 /*
4145 * Generate a unique partition name for the ZFS member. Partitions must
4146 * have unique names to ensure udev will be able to create symlinks under
4147 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4148 * of the form <pool>-<unique-id>.
4149 */
4150 static void
4151 zpool_label_name(char *label_name, int label_size)
4152 {
4153 uint64_t id = 0;
4154 int fd;
4155
4156 fd = open("/dev/urandom", O_RDONLY);
4157 if (fd >= 0) {
4158 if (read(fd, &id, sizeof (id)) != sizeof (id))
4159 id = 0;
4160
4161 close(fd);
4162 }
4163
4164 if (id == 0)
4165 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4166
4167 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4168 }
4169
4170 /*
4171 * Label an individual disk. The name provided is the short name,
4172 * stripped of any leading /dev path.
4173 */
4174 int
4175 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4176 {
4177 char path[MAXPATHLEN];
4178 struct dk_gpt *vtoc;
4179 int rval, fd;
4180 size_t resv = EFI_MIN_RESV_SIZE;
4181 uint64_t slice_size;
4182 diskaddr_t start_block;
4183 char errbuf[1024];
4184
4185 /* prepare an error message just in case */
4186 (void) snprintf(errbuf, sizeof (errbuf),
4187 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4188
4189 if (zhp) {
4190 nvlist_t *nvroot;
4191
4192 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4193 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4194
4195 if (zhp->zpool_start_block == 0)
4196 start_block = find_start_block(nvroot);
4197 else
4198 start_block = zhp->zpool_start_block;
4199 zhp->zpool_start_block = start_block;
4200 } else {
4201 /* new pool */
4202 start_block = NEW_START_BLOCK;
4203 }
4204
4205 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4206
4207 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4208 /*
4209 * This shouldn't happen. We've long since verified that this
4210 * is a valid device.
4211 */
4212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4213 "label '%s': unable to open device: %d"), path, errno);
4214 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4215 }
4216
4217 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4218 /*
4219 * The only way this can fail is if we run out of memory, or we
4220 * were unable to read the disk's capacity
4221 */
4222 if (errno == ENOMEM)
4223 (void) no_memory(hdl);
4224
4225 (void) close(fd);
4226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4227 "label '%s': unable to read disk capacity"), path);
4228
4229 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4230 }
4231
4232 slice_size = vtoc->efi_last_u_lba + 1;
4233 slice_size -= EFI_MIN_RESV_SIZE;
4234 if (start_block == MAXOFFSET_T)
4235 start_block = NEW_START_BLOCK;
4236 slice_size -= start_block;
4237 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4238
4239 vtoc->efi_parts[0].p_start = start_block;
4240 vtoc->efi_parts[0].p_size = slice_size;
4241
4242 /*
4243 * Why we use V_USR: V_BACKUP confuses users, and is considered
4244 * disposable by some EFI utilities (since EFI doesn't have a backup
4245 * slice). V_UNASSIGNED is supposed to be used only for zero size
4246 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4247 * etc. were all pretty specific. V_USR is as close to reality as we
4248 * can get, in the absence of V_OTHER.
4249 */
4250 vtoc->efi_parts[0].p_tag = V_USR;
4251 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4252
4253 vtoc->efi_parts[8].p_start = slice_size + start_block;
4254 vtoc->efi_parts[8].p_size = resv;
4255 vtoc->efi_parts[8].p_tag = V_RESERVED;
4256
4257 rval = efi_write(fd, vtoc);
4258
4259 /* Flush the buffers to disk and invalidate the page cache. */
4260 (void) fsync(fd);
4261 (void) ioctl(fd, BLKFLSBUF);
4262
4263 if (rval == 0)
4264 rval = efi_rescan(fd);
4265
4266 /*
4267 * Some block drivers (like pcata) may not support EFI GPT labels.
4268 * Print out a helpful error message directing the user to manually
4269 * label the disk and give a specific slice.
4270 */
4271 if (rval != 0) {
4272 (void) close(fd);
4273 efi_free(vtoc);
4274
4275 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4276 "parted(8) and then provide a specific slice: %d"), rval);
4277 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4278 }
4279
4280 (void) close(fd);
4281 efi_free(vtoc);
4282
4283 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4284 (void) zfs_append_partition(path, MAXPATHLEN);
4285
4286 /* Wait to udev to signal use the device has settled. */
4287 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4288 if (rval) {
4289 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4290 "detect device partitions on '%s': %d"), path, rval);
4291 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4292 }
4293
4294 /* We can't be to paranoid. Read the label back and verify it. */
4295 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4296 rval = zpool_label_disk_check(path);
4297 if (rval) {
4298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4299 "EFI label on '%s' is damaged. Ensure\nthis device "
4300 "is not in in use, and is functioning properly: %d"),
4301 path, rval);
4302 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4303 }
4304
4305 return (0);
4306 }
4307
4308 /*
4309 * Allocate and return the underlying device name for a device mapper device.
4310 * If a device mapper device maps to multiple devices, return the first device.
4311 *
4312 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4313 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4314 *
4315 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4316 * device then return NULL.
4317 *
4318 * NOTE: The returned name string must be *freed*.
4319 */
4320 char *
4321 dm_get_underlying_path(char *dm_name)
4322 {
4323 DIR *dp = NULL;
4324 struct dirent *ep;
4325 char *realp;
4326 char *tmp = NULL;
4327 char *path = NULL;
4328 char *dev_str;
4329 int size;
4330
4331 if (dm_name == NULL)
4332 return (NULL);
4333
4334 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4335 realp = realpath(dm_name, NULL);
4336 if (realp == NULL)
4337 return (NULL);
4338
4339 /*
4340 * If they preface 'dev' with a path (like "/dev") then strip it off.
4341 * We just want the 'dm-N' part.
4342 */
4343 tmp = strrchr(realp, '/');
4344 if (tmp != NULL)
4345 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4346 else
4347 dev_str = tmp;
4348
4349 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4350 if (size == -1 || !tmp)
4351 goto end;
4352
4353 dp = opendir(tmp);
4354 if (dp == NULL)
4355 goto end;
4356
4357 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4358 while ((ep = readdir(dp))) {
4359 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4360 size = asprintf(&path, "/dev/%s", ep->d_name);
4361 break;
4362 }
4363 }
4364
4365 end:
4366 if (dp != NULL)
4367 closedir(dp);
4368 free(tmp);
4369 free(realp);
4370 return (path);
4371 }
4372
4373 /*
4374 * Return 1 if device is a device mapper or multipath device.
4375 * Return 0 if not.
4376 */
4377 int
4378 zfs_dev_is_dm(char *dev_name)
4379 {
4380
4381 char *tmp;
4382 tmp = dm_get_underlying_path(dev_name);
4383 if (tmp == NULL)
4384 return (0);
4385
4386 free(tmp);
4387 return (1);
4388 }
4389
4390 /*
4391 * By "whole disk" we mean an entire physical disk (something we can
4392 * label, toggle the write cache on, etc.) as opposed to the full
4393 * capacity of a pseudo-device such as lofi or did. We act as if we
4394 * are labeling the disk, which should be a pretty good test of whether
4395 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4396 * it isn't.
4397 */
4398 int
4399 zfs_dev_is_whole_disk(char *dev_name)
4400 {
4401 struct dk_gpt *label;
4402 int fd;
4403
4404 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4405 return (0);
4406
4407 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4408 (void) close(fd);
4409 return (0);
4410 }
4411
4412 efi_free(label);
4413 (void) close(fd);
4414
4415 return (1);
4416 }
4417
4418 /*
4419 * Lookup the underlying device for a device name
4420 *
4421 * Often you'll have a symlink to a device, a partition device,
4422 * or a multipath device, and want to look up the underlying device.
4423 * This function returns the underlying device name. If the device
4424 * name is already the underlying device, then just return the same
4425 * name. If the device is a DM device with multiple underlying devices
4426 * then return the first one.
4427 *
4428 * For example:
4429 *
4430 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4431 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4432 * returns: /dev/sda
4433 *
4434 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4435 * dev_name: /dev/mapper/mpatha
4436 * returns: /dev/sda (first device)
4437 *
4438 * 3. /dev/sda (already the underlying device)
4439 * dev_name: /dev/sda
4440 * returns: /dev/sda
4441 *
4442 * 4. /dev/dm-3 (mapped to /dev/sda)
4443 * dev_name: /dev/dm-3
4444 * returns: /dev/sda
4445 *
4446 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4447 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4448 * returns: /dev/sdb
4449 *
4450 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4451 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4452 * returns: /dev/sda
4453 *
4454 * Returns underlying device name, or NULL on error or no match.
4455 *
4456 * NOTE: The returned name string must be *freed*.
4457 */
4458 char *
4459 zfs_get_underlying_path(char *dev_name)
4460 {
4461 char *name = NULL;
4462 char *tmp;
4463
4464 if (dev_name == NULL)
4465 return (NULL);
4466
4467 tmp = dm_get_underlying_path(dev_name);
4468
4469 /* dev_name not a DM device, so just un-symlinkize it */
4470 if (tmp == NULL)
4471 tmp = realpath(dev_name, NULL);
4472
4473 if (tmp != NULL) {
4474 name = zfs_strip_partition_path(tmp);
4475 free(tmp);
4476 }
4477
4478 return (name);
4479 }
4480
4481 /*
4482 * Given a dev name like "sda", return the full enclosure sysfs path to
4483 * the disk. You can also pass in the name with "/dev" prepended
4484 * to it (like /dev/sda).
4485 *
4486 * For example, disk "sda" in enclosure slot 1:
4487 * dev: "sda"
4488 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4489 *
4490 * 'dev' must be a non-devicemapper device.
4491 *
4492 * Returned string must be freed.
4493 */
4494 char *
4495 zfs_get_enclosure_sysfs_path(char *dev_name)
4496 {
4497 DIR *dp = NULL;
4498 struct dirent *ep;
4499 char buf[MAXPATHLEN];
4500 char *tmp1 = NULL;
4501 char *tmp2 = NULL;
4502 char *tmp3 = NULL;
4503 char *path = NULL;
4504 size_t size;
4505 int tmpsize;
4506
4507 if (dev_name == NULL)
4508 return (NULL);
4509
4510 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4511 tmp1 = strrchr(dev_name, '/');
4512 if (tmp1 != NULL)
4513 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4514
4515 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4516 if (tmpsize == -1 || tmp1 == NULL) {
4517 tmp1 = NULL;
4518 goto end;
4519 }
4520
4521 dp = opendir(tmp1);
4522 if (dp == NULL) {
4523 tmp1 = NULL; /* To make free() at the end a NOP */
4524 goto end;
4525 }
4526
4527 /*
4528 * Look though all sysfs entries in /sys/block/<dev>/device for
4529 * the enclosure symlink.
4530 */
4531 while ((ep = readdir(dp))) {
4532 /* Ignore everything that's not our enclosure_device link */
4533 if (strstr(ep->d_name, "enclosure_device") == NULL)
4534 continue;
4535
4536 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4537 tmp2 == NULL)
4538 break;
4539
4540 size = readlink(tmp2, buf, sizeof (buf));
4541
4542 /* Did readlink fail or crop the link name? */
4543 if (size == -1 || size >= sizeof (buf)) {
4544 free(tmp2);
4545 tmp2 = NULL; /* To make free() at the end a NOP */
4546 break;
4547 }
4548
4549 /*
4550 * We got a valid link. readlink() doesn't terminate strings
4551 * so we have to do it.
4552 */
4553 buf[size] = '\0';
4554
4555 /*
4556 * Our link will look like:
4557 *
4558 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4559 *
4560 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4561 */
4562 tmp3 = strstr(buf, "enclosure");
4563 if (tmp3 == NULL)
4564 break;
4565
4566 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4567 /* If asprintf() fails, 'path' is undefined */
4568 path = NULL;
4569 break;
4570 }
4571
4572 if (path == NULL)
4573 break;
4574 }
4575
4576 end:
4577 free(tmp2);
4578 free(tmp1);
4579
4580 if (dp != NULL)
4581 closedir(dp);
4582
4583 return (path);
4584 }