]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Distributed Spare (dRAID) Feature
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31 */
32
33 #include <errno.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <libgen.h>
40 #include <zone.h>
41 #include <sys/stat.h>
42 #include <sys/efi_partition.h>
43 #include <sys/systeminfo.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/zfs_sysfs.h>
46 #include <sys/vdev_disk.h>
47 #include <dlfcn.h>
48 #include <libzutil.h>
49 #include "zfs_namecheck.h"
50 #include "zfs_prop.h"
51 #include "libzfs_impl.h"
52 #include "zfs_comutil.h"
53 #include "zfeature_common.h"
54
55 static boolean_t zpool_vdev_is_interior(const char *name);
56
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
60 } prop_flags_t;
61
62 /*
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
66 */
67
68 static int
69 zpool_get_all_props(zpool_handle_t *zhp)
70 {
71 zfs_cmd_t zc = {"\0"};
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
73
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 return (-1);
78
79 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 } else {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89 }
90
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
93 return (-1);
94 }
95
96 zcmd_free_nvlists(&zc);
97
98 return (0);
99 }
100
101 int
102 zpool_props_refresh(zpool_handle_t *zhp)
103 {
104 nvlist_t *old_props;
105
106 old_props = zhp->zpool_props;
107
108 if (zpool_get_all_props(zhp) != 0)
109 return (-1);
110
111 nvlist_free(old_props);
112 return (0);
113 }
114
115 static const char *
116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
117 zprop_source_t *src)
118 {
119 nvlist_t *nv, *nvl;
120 uint64_t ival;
121 char *value;
122 zprop_source_t source;
123
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 source = ival;
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 } else {
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
132 value = "-";
133 }
134
135 if (src)
136 *src = source;
137
138 return (value);
139 }
140
141 uint64_t
142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143 {
144 nvlist_t *nv, *nvl;
145 uint64_t value;
146 zprop_source_t source;
147
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 /*
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
153 */
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
158 == 0)) {
159 return (value);
160 }
161 return (zpool_prop_default_numeric(prop));
162 }
163
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 source = value;
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 } else {
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
172 }
173
174 if (src)
175 *src = source;
176
177 return (value);
178 }
179
180 /*
181 * Map VDEV STATE to printed strings.
182 */
183 const char *
184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 {
186 switch (state) {
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194 return (gettext("FAULTED"));
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
197 else
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
205
206 default:
207 break;
208 }
209
210 return (gettext("UNKNOWN"));
211 }
212
213 /*
214 * Map POOL STATE to printed strings.
215 */
216 const char *
217 zpool_pool_state_to_name(pool_state_t state)
218 {
219 switch (state) {
220 default:
221 break;
222 case POOL_STATE_ACTIVE:
223 return (gettext("ACTIVE"));
224 case POOL_STATE_EXPORTED:
225 return (gettext("EXPORTED"));
226 case POOL_STATE_DESTROYED:
227 return (gettext("DESTROYED"));
228 case POOL_STATE_SPARE:
229 return (gettext("SPARE"));
230 case POOL_STATE_L2CACHE:
231 return (gettext("L2CACHE"));
232 case POOL_STATE_UNINITIALIZED:
233 return (gettext("UNINITIALIZED"));
234 case POOL_STATE_UNAVAIL:
235 return (gettext("UNAVAIL"));
236 case POOL_STATE_POTENTIALLY_ACTIVE:
237 return (gettext("POTENTIALLY_ACTIVE"));
238 }
239
240 return (gettext("UNKNOWN"));
241 }
242
243 /*
244 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
245 * "SUSPENDED", etc).
246 */
247 const char *
248 zpool_get_state_str(zpool_handle_t *zhp)
249 {
250 zpool_errata_t errata;
251 zpool_status_t status;
252 nvlist_t *nvroot;
253 vdev_stat_t *vs;
254 uint_t vsc;
255 const char *str;
256
257 status = zpool_get_status(zhp, NULL, &errata);
258
259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260 str = gettext("FAULTED");
261 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
262 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
263 str = gettext("SUSPENDED");
264 } else {
265 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
266 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
267 verify(nvlist_lookup_uint64_array(nvroot,
268 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
269 == 0);
270 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
271 }
272 return (str);
273 }
274
275 /*
276 * Get a zpool property value for 'prop' and return the value in
277 * a pre-allocated buffer.
278 */
279 int
280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
281 size_t len, zprop_source_t *srctype, boolean_t literal)
282 {
283 uint64_t intval;
284 const char *strval;
285 zprop_source_t src = ZPROP_SRC_NONE;
286
287 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
288 switch (prop) {
289 case ZPOOL_PROP_NAME:
290 (void) strlcpy(buf, zpool_get_name(zhp), len);
291 break;
292
293 case ZPOOL_PROP_HEALTH:
294 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
295 break;
296
297 case ZPOOL_PROP_GUID:
298 intval = zpool_get_prop_int(zhp, prop, &src);
299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
300 break;
301
302 case ZPOOL_PROP_ALTROOT:
303 case ZPOOL_PROP_CACHEFILE:
304 case ZPOOL_PROP_COMMENT:
305 if (zhp->zpool_props != NULL ||
306 zpool_get_all_props(zhp) == 0) {
307 (void) strlcpy(buf,
308 zpool_get_prop_string(zhp, prop, &src),
309 len);
310 break;
311 }
312 /* FALLTHROUGH */
313 default:
314 (void) strlcpy(buf, "-", len);
315 break;
316 }
317
318 if (srctype != NULL)
319 *srctype = src;
320 return (0);
321 }
322
323 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
324 prop != ZPOOL_PROP_NAME)
325 return (-1);
326
327 switch (zpool_prop_get_type(prop)) {
328 case PROP_TYPE_STRING:
329 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
330 len);
331 break;
332
333 case PROP_TYPE_NUMBER:
334 intval = zpool_get_prop_int(zhp, prop, &src);
335
336 switch (prop) {
337 case ZPOOL_PROP_SIZE:
338 case ZPOOL_PROP_ALLOCATED:
339 case ZPOOL_PROP_FREE:
340 case ZPOOL_PROP_FREEING:
341 case ZPOOL_PROP_LEAKED:
342 case ZPOOL_PROP_ASHIFT:
343 if (literal)
344 (void) snprintf(buf, len, "%llu",
345 (u_longlong_t)intval);
346 else
347 (void) zfs_nicenum(intval, buf, len);
348 break;
349
350 case ZPOOL_PROP_EXPANDSZ:
351 case ZPOOL_PROP_CHECKPOINT:
352 if (intval == 0) {
353 (void) strlcpy(buf, "-", len);
354 } else if (literal) {
355 (void) snprintf(buf, len, "%llu",
356 (u_longlong_t)intval);
357 } else {
358 (void) zfs_nicebytes(intval, buf, len);
359 }
360 break;
361
362 case ZPOOL_PROP_CAPACITY:
363 if (literal) {
364 (void) snprintf(buf, len, "%llu",
365 (u_longlong_t)intval);
366 } else {
367 (void) snprintf(buf, len, "%llu%%",
368 (u_longlong_t)intval);
369 }
370 break;
371
372 case ZPOOL_PROP_FRAGMENTATION:
373 if (intval == UINT64_MAX) {
374 (void) strlcpy(buf, "-", len);
375 } else if (literal) {
376 (void) snprintf(buf, len, "%llu",
377 (u_longlong_t)intval);
378 } else {
379 (void) snprintf(buf, len, "%llu%%",
380 (u_longlong_t)intval);
381 }
382 break;
383
384 case ZPOOL_PROP_DEDUPRATIO:
385 if (literal)
386 (void) snprintf(buf, len, "%llu.%02llu",
387 (u_longlong_t)(intval / 100),
388 (u_longlong_t)(intval % 100));
389 else
390 (void) snprintf(buf, len, "%llu.%02llux",
391 (u_longlong_t)(intval / 100),
392 (u_longlong_t)(intval % 100));
393 break;
394
395 case ZPOOL_PROP_HEALTH:
396 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
397 break;
398 case ZPOOL_PROP_VERSION:
399 if (intval >= SPA_VERSION_FEATURES) {
400 (void) snprintf(buf, len, "-");
401 break;
402 }
403 /* FALLTHROUGH */
404 default:
405 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
406 }
407 break;
408
409 case PROP_TYPE_INDEX:
410 intval = zpool_get_prop_int(zhp, prop, &src);
411 if (zpool_prop_index_to_string(prop, intval, &strval)
412 != 0)
413 return (-1);
414 (void) strlcpy(buf, strval, len);
415 break;
416
417 default:
418 abort();
419 }
420
421 if (srctype)
422 *srctype = src;
423
424 return (0);
425 }
426
427 /*
428 * Check if the bootfs name has the same pool name as it is set to.
429 * Assuming bootfs is a valid dataset name.
430 */
431 static boolean_t
432 bootfs_name_valid(const char *pool, const char *bootfs)
433 {
434 int len = strlen(pool);
435 if (bootfs[0] == '\0')
436 return (B_TRUE);
437
438 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
439 return (B_FALSE);
440
441 if (strncmp(pool, bootfs, len) == 0 &&
442 (bootfs[len] == '/' || bootfs[len] == '\0'))
443 return (B_TRUE);
444
445 return (B_FALSE);
446 }
447
448 /*
449 * Given an nvlist of zpool properties to be set, validate that they are
450 * correct, and parse any numeric properties (index, boolean, etc) if they are
451 * specified as strings.
452 */
453 static nvlist_t *
454 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
455 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
456 {
457 nvpair_t *elem;
458 nvlist_t *retprops;
459 zpool_prop_t prop;
460 char *strval;
461 uint64_t intval;
462 char *slash, *check;
463 struct stat64 statbuf;
464 zpool_handle_t *zhp;
465
466 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
467 (void) no_memory(hdl);
468 return (NULL);
469 }
470
471 elem = NULL;
472 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
473 const char *propname = nvpair_name(elem);
474
475 prop = zpool_name_to_prop(propname);
476 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
477 int err;
478 char *fname = strchr(propname, '@') + 1;
479
480 err = zfeature_lookup_name(fname, NULL);
481 if (err != 0) {
482 ASSERT3U(err, ==, ENOENT);
483 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
484 "feature '%s' unsupported by kernel"),
485 fname);
486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
487 goto error;
488 }
489
490 if (nvpair_type(elem) != DATA_TYPE_STRING) {
491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
492 "'%s' must be a string"), propname);
493 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
494 goto error;
495 }
496
497 (void) nvpair_value_string(elem, &strval);
498 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
499 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "property '%s' can only be set to "
502 "'enabled' or 'disabled'"), propname);
503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
504 goto error;
505 }
506
507 if (!flags.create &&
508 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
510 "property '%s' can only be set to "
511 "'disabled' at creation time"), propname);
512 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
513 goto error;
514 }
515
516 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
517 (void) no_memory(hdl);
518 goto error;
519 }
520 continue;
521 }
522
523 /*
524 * Make sure this property is valid and applies to this type.
525 */
526 if (prop == ZPOOL_PROP_INVAL) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 "invalid property '%s'"), propname);
529 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
530 goto error;
531 }
532
533 if (zpool_prop_readonly(prop)) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
535 "is readonly"), propname);
536 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
537 goto error;
538 }
539
540 if (!flags.create && zpool_prop_setonce(prop)) {
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "property '%s' can only be set at "
543 "creation time"), propname);
544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
545 goto error;
546 }
547
548 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
549 &strval, &intval, errbuf) != 0)
550 goto error;
551
552 /*
553 * Perform additional checking for specific properties.
554 */
555 switch (prop) {
556 case ZPOOL_PROP_VERSION:
557 if (intval < version ||
558 !SPA_VERSION_IS_SUPPORTED(intval)) {
559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560 "property '%s' number %d is invalid."),
561 propname, intval);
562 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
563 goto error;
564 }
565 break;
566
567 case ZPOOL_PROP_ASHIFT:
568 if (intval != 0 &&
569 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
571 "property '%s' number %d is invalid, only "
572 "values between %" PRId32 " and "
573 "%" PRId32 " are allowed."),
574 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
576 goto error;
577 }
578 break;
579
580 case ZPOOL_PROP_BOOTFS:
581 if (flags.create || flags.import) {
582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
583 "property '%s' cannot be set at creation "
584 "or import time"), propname);
585 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
586 goto error;
587 }
588
589 if (version < SPA_VERSION_BOOTFS) {
590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 "pool must be upgraded to support "
592 "'%s' property"), propname);
593 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
594 goto error;
595 }
596
597 /*
598 * bootfs property value has to be a dataset name and
599 * the dataset has to be in the same pool as it sets to.
600 */
601 if (!bootfs_name_valid(poolname, strval)) {
602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
603 "is an invalid name"), strval);
604 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
605 goto error;
606 }
607
608 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
610 "could not open pool '%s'"), poolname);
611 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
612 goto error;
613 }
614 zpool_close(zhp);
615 break;
616
617 case ZPOOL_PROP_ALTROOT:
618 if (!flags.create && !flags.import) {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "property '%s' can only be set during pool "
621 "creation or import"), propname);
622 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
623 goto error;
624 }
625
626 if (strval[0] != '/') {
627 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
628 "bad alternate root '%s'"), strval);
629 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
630 goto error;
631 }
632 break;
633
634 case ZPOOL_PROP_CACHEFILE:
635 if (strval[0] == '\0')
636 break;
637
638 if (strcmp(strval, "none") == 0)
639 break;
640
641 if (strval[0] != '/') {
642 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
643 "property '%s' must be empty, an "
644 "absolute path, or 'none'"), propname);
645 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
646 goto error;
647 }
648
649 slash = strrchr(strval, '/');
650
651 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
652 strcmp(slash, "/..") == 0) {
653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 "'%s' is not a valid file"), strval);
655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
656 goto error;
657 }
658
659 *slash = '\0';
660
661 if (strval[0] != '\0' &&
662 (stat64(strval, &statbuf) != 0 ||
663 !S_ISDIR(statbuf.st_mode))) {
664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665 "'%s' is not a valid directory"),
666 strval);
667 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
668 goto error;
669 }
670
671 *slash = '/';
672 break;
673
674 case ZPOOL_PROP_COMMENT:
675 for (check = strval; *check != '\0'; check++) {
676 if (!isprint(*check)) {
677 zfs_error_aux(hdl,
678 dgettext(TEXT_DOMAIN,
679 "comment may only have printable "
680 "characters"));
681 (void) zfs_error(hdl, EZFS_BADPROP,
682 errbuf);
683 goto error;
684 }
685 }
686 if (strlen(strval) > ZPROP_MAX_COMMENT) {
687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
688 "comment must not exceed %d characters"),
689 ZPROP_MAX_COMMENT);
690 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
691 goto error;
692 }
693 break;
694 case ZPOOL_PROP_READONLY:
695 if (!flags.import) {
696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
697 "property '%s' can only be set at "
698 "import time"), propname);
699 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
700 goto error;
701 }
702 break;
703 case ZPOOL_PROP_MULTIHOST:
704 if (get_system_hostid() == 0) {
705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
706 "requires a non-zero system hostid"));
707 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
708 goto error;
709 }
710 break;
711 case ZPOOL_PROP_DEDUPDITTO:
712 printf("Note: property '%s' no longer has "
713 "any effect\n", propname);
714 break;
715
716 default:
717 break;
718 }
719 }
720
721 return (retprops);
722 error:
723 nvlist_free(retprops);
724 return (NULL);
725 }
726
727 /*
728 * Set zpool property : propname=propval.
729 */
730 int
731 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
732 {
733 zfs_cmd_t zc = {"\0"};
734 int ret = -1;
735 char errbuf[1024];
736 nvlist_t *nvl = NULL;
737 nvlist_t *realprops;
738 uint64_t version;
739 prop_flags_t flags = { 0 };
740
741 (void) snprintf(errbuf, sizeof (errbuf),
742 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
743 zhp->zpool_name);
744
745 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
746 return (no_memory(zhp->zpool_hdl));
747
748 if (nvlist_add_string(nvl, propname, propval) != 0) {
749 nvlist_free(nvl);
750 return (no_memory(zhp->zpool_hdl));
751 }
752
753 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
754 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
755 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
756 nvlist_free(nvl);
757 return (-1);
758 }
759
760 nvlist_free(nvl);
761 nvl = realprops;
762
763 /*
764 * Execute the corresponding ioctl() to set this property.
765 */
766 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
767
768 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
769 nvlist_free(nvl);
770 return (-1);
771 }
772
773 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
774
775 zcmd_free_nvlists(&zc);
776 nvlist_free(nvl);
777
778 if (ret)
779 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
780 else
781 (void) zpool_props_refresh(zhp);
782
783 return (ret);
784 }
785
786 int
787 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
788 {
789 libzfs_handle_t *hdl = zhp->zpool_hdl;
790 zprop_list_t *entry;
791 char buf[ZFS_MAXPROPLEN];
792 nvlist_t *features = NULL;
793 nvpair_t *nvp;
794 zprop_list_t **last;
795 boolean_t firstexpand = (NULL == *plp);
796 int i;
797
798 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
799 return (-1);
800
801 last = plp;
802 while (*last != NULL)
803 last = &(*last)->pl_next;
804
805 if ((*plp)->pl_all)
806 features = zpool_get_features(zhp);
807
808 if ((*plp)->pl_all && firstexpand) {
809 for (i = 0; i < SPA_FEATURES; i++) {
810 zprop_list_t *entry = zfs_alloc(hdl,
811 sizeof (zprop_list_t));
812 entry->pl_prop = ZPROP_INVAL;
813 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
814 spa_feature_table[i].fi_uname);
815 entry->pl_width = strlen(entry->pl_user_prop);
816 entry->pl_all = B_TRUE;
817
818 *last = entry;
819 last = &entry->pl_next;
820 }
821 }
822
823 /* add any unsupported features */
824 for (nvp = nvlist_next_nvpair(features, NULL);
825 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
826 char *propname;
827 boolean_t found;
828 zprop_list_t *entry;
829
830 if (zfeature_is_supported(nvpair_name(nvp)))
831 continue;
832
833 propname = zfs_asprintf(hdl, "unsupported@%s",
834 nvpair_name(nvp));
835
836 /*
837 * Before adding the property to the list make sure that no
838 * other pool already added the same property.
839 */
840 found = B_FALSE;
841 entry = *plp;
842 while (entry != NULL) {
843 if (entry->pl_user_prop != NULL &&
844 strcmp(propname, entry->pl_user_prop) == 0) {
845 found = B_TRUE;
846 break;
847 }
848 entry = entry->pl_next;
849 }
850 if (found) {
851 free(propname);
852 continue;
853 }
854
855 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
856 entry->pl_prop = ZPROP_INVAL;
857 entry->pl_user_prop = propname;
858 entry->pl_width = strlen(entry->pl_user_prop);
859 entry->pl_all = B_TRUE;
860
861 *last = entry;
862 last = &entry->pl_next;
863 }
864
865 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
866
867 if (entry->pl_fixed)
868 continue;
869
870 if (entry->pl_prop != ZPROP_INVAL &&
871 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
872 NULL, B_FALSE) == 0) {
873 if (strlen(buf) > entry->pl_width)
874 entry->pl_width = strlen(buf);
875 }
876 }
877
878 return (0);
879 }
880
881 /*
882 * Get the state for the given feature on the given ZFS pool.
883 */
884 int
885 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
886 size_t len)
887 {
888 uint64_t refcount;
889 boolean_t found = B_FALSE;
890 nvlist_t *features = zpool_get_features(zhp);
891 boolean_t supported;
892 const char *feature = strchr(propname, '@') + 1;
893
894 supported = zpool_prop_feature(propname);
895 ASSERT(supported || zpool_prop_unsupported(propname));
896
897 /*
898 * Convert from feature name to feature guid. This conversion is
899 * unnecessary for unsupported@... properties because they already
900 * use guids.
901 */
902 if (supported) {
903 int ret;
904 spa_feature_t fid;
905
906 ret = zfeature_lookup_name(feature, &fid);
907 if (ret != 0) {
908 (void) strlcpy(buf, "-", len);
909 return (ENOTSUP);
910 }
911 feature = spa_feature_table[fid].fi_guid;
912 }
913
914 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
915 found = B_TRUE;
916
917 if (supported) {
918 if (!found) {
919 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
920 } else {
921 if (refcount == 0)
922 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
923 else
924 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
925 }
926 } else {
927 if (found) {
928 if (refcount == 0) {
929 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
930 } else {
931 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
932 }
933 } else {
934 (void) strlcpy(buf, "-", len);
935 return (ENOTSUP);
936 }
937 }
938
939 return (0);
940 }
941
942 /*
943 * Validate the given pool name, optionally putting an extended error message in
944 * 'buf'.
945 */
946 boolean_t
947 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
948 {
949 namecheck_err_t why;
950 char what;
951 int ret;
952
953 ret = pool_namecheck(pool, &why, &what);
954
955 /*
956 * The rules for reserved pool names were extended at a later point.
957 * But we need to support users with existing pools that may now be
958 * invalid. So we only check for this expanded set of names during a
959 * create (or import), and only in userland.
960 */
961 if (ret == 0 && !isopen &&
962 (strncmp(pool, "mirror", 6) == 0 ||
963 strncmp(pool, "raidz", 5) == 0 ||
964 strncmp(pool, "draid", 5) == 0 ||
965 strncmp(pool, "spare", 5) == 0 ||
966 strcmp(pool, "log") == 0)) {
967 if (hdl != NULL)
968 zfs_error_aux(hdl,
969 dgettext(TEXT_DOMAIN, "name is reserved"));
970 return (B_FALSE);
971 }
972
973
974 if (ret != 0) {
975 if (hdl != NULL) {
976 switch (why) {
977 case NAME_ERR_TOOLONG:
978 zfs_error_aux(hdl,
979 dgettext(TEXT_DOMAIN, "name is too long"));
980 break;
981
982 case NAME_ERR_INVALCHAR:
983 zfs_error_aux(hdl,
984 dgettext(TEXT_DOMAIN, "invalid character "
985 "'%c' in pool name"), what);
986 break;
987
988 case NAME_ERR_NOLETTER:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "name must begin with a letter"));
991 break;
992
993 case NAME_ERR_RESERVED:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "name is reserved"));
996 break;
997
998 case NAME_ERR_DISKLIKE:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "pool name is reserved"));
1001 break;
1002
1003 case NAME_ERR_LEADING_SLASH:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "leading slash in name"));
1006 break;
1007
1008 case NAME_ERR_EMPTY_COMPONENT:
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "empty component in name"));
1011 break;
1012
1013 case NAME_ERR_TRAILING_SLASH:
1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1015 "trailing slash in name"));
1016 break;
1017
1018 case NAME_ERR_MULTIPLE_DELIMITERS:
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1020 "multiple '@' and/or '#' delimiters in "
1021 "name"));
1022 break;
1023
1024 case NAME_ERR_NO_AT:
1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1026 "permission set is missing '@'"));
1027 break;
1028
1029 default:
1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1031 "(%d) not defined"), why);
1032 break;
1033 }
1034 }
1035 return (B_FALSE);
1036 }
1037
1038 return (B_TRUE);
1039 }
1040
1041 /*
1042 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1043 * state.
1044 */
1045 zpool_handle_t *
1046 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1047 {
1048 zpool_handle_t *zhp;
1049 boolean_t missing;
1050
1051 /*
1052 * Make sure the pool name is valid.
1053 */
1054 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1055 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1056 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1057 pool);
1058 return (NULL);
1059 }
1060
1061 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1062 return (NULL);
1063
1064 zhp->zpool_hdl = hdl;
1065 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1066
1067 if (zpool_refresh_stats(zhp, &missing) != 0) {
1068 zpool_close(zhp);
1069 return (NULL);
1070 }
1071
1072 if (missing) {
1073 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1074 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1075 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1076 zpool_close(zhp);
1077 return (NULL);
1078 }
1079
1080 return (zhp);
1081 }
1082
1083 /*
1084 * Like the above, but silent on error. Used when iterating over pools (because
1085 * the configuration cache may be out of date).
1086 */
1087 int
1088 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1089 {
1090 zpool_handle_t *zhp;
1091 boolean_t missing;
1092
1093 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1094 return (-1);
1095
1096 zhp->zpool_hdl = hdl;
1097 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1098
1099 if (zpool_refresh_stats(zhp, &missing) != 0) {
1100 zpool_close(zhp);
1101 return (-1);
1102 }
1103
1104 if (missing) {
1105 zpool_close(zhp);
1106 *ret = NULL;
1107 return (0);
1108 }
1109
1110 *ret = zhp;
1111 return (0);
1112 }
1113
1114 /*
1115 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1116 * state.
1117 */
1118 zpool_handle_t *
1119 zpool_open(libzfs_handle_t *hdl, const char *pool)
1120 {
1121 zpool_handle_t *zhp;
1122
1123 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1124 return (NULL);
1125
1126 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1127 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1128 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1129 zpool_close(zhp);
1130 return (NULL);
1131 }
1132
1133 return (zhp);
1134 }
1135
1136 /*
1137 * Close the handle. Simply frees the memory associated with the handle.
1138 */
1139 void
1140 zpool_close(zpool_handle_t *zhp)
1141 {
1142 nvlist_free(zhp->zpool_config);
1143 nvlist_free(zhp->zpool_old_config);
1144 nvlist_free(zhp->zpool_props);
1145 free(zhp);
1146 }
1147
1148 /*
1149 * Return the name of the pool.
1150 */
1151 const char *
1152 zpool_get_name(zpool_handle_t *zhp)
1153 {
1154 return (zhp->zpool_name);
1155 }
1156
1157
1158 /*
1159 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1160 */
1161 int
1162 zpool_get_state(zpool_handle_t *zhp)
1163 {
1164 return (zhp->zpool_state);
1165 }
1166
1167 /*
1168 * Check if vdev list contains a special vdev
1169 */
1170 static boolean_t
1171 zpool_has_special_vdev(nvlist_t *nvroot)
1172 {
1173 nvlist_t **child;
1174 uint_t children;
1175
1176 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1177 &children) == 0) {
1178 for (uint_t c = 0; c < children; c++) {
1179 char *bias;
1180
1181 if (nvlist_lookup_string(child[c],
1182 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1183 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1184 return (B_TRUE);
1185 }
1186 }
1187 }
1188 return (B_FALSE);
1189 }
1190
1191 /*
1192 * Output a dRAID top-level vdev name in to the provided buffer.
1193 */
1194 static char *
1195 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1196 uint64_t spares, uint64_t children)
1197 {
1198 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1199 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1200 (u_longlong_t)children, (u_longlong_t)spares);
1201
1202 return (name);
1203 }
1204
1205 /*
1206 * Return B_TRUE if the provided name is a dRAID spare name.
1207 */
1208 boolean_t
1209 zpool_is_draid_spare(const char *name)
1210 {
1211 uint64_t spare_id, parity, vdev_id;
1212
1213 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1214 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1215 (u_longlong_t *)&spare_id) == 3) {
1216 return (B_TRUE);
1217 }
1218
1219 return (B_FALSE);
1220 }
1221
1222 /*
1223 * Create the named pool, using the provided vdev list. It is assumed
1224 * that the consumer has already validated the contents of the nvlist, so we
1225 * don't have to worry about error semantics.
1226 */
1227 int
1228 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1229 nvlist_t *props, nvlist_t *fsprops)
1230 {
1231 zfs_cmd_t zc = {"\0"};
1232 nvlist_t *zc_fsprops = NULL;
1233 nvlist_t *zc_props = NULL;
1234 nvlist_t *hidden_args = NULL;
1235 uint8_t *wkeydata = NULL;
1236 uint_t wkeylen = 0;
1237 char msg[1024];
1238 int ret = -1;
1239
1240 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1241 "cannot create '%s'"), pool);
1242
1243 if (!zpool_name_valid(hdl, B_FALSE, pool))
1244 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1245
1246 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1247 return (-1);
1248
1249 if (props) {
1250 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1251
1252 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1253 SPA_VERSION_1, flags, msg)) == NULL) {
1254 goto create_failed;
1255 }
1256 }
1257
1258 if (fsprops) {
1259 uint64_t zoned;
1260 char *zonestr;
1261
1262 zoned = ((nvlist_lookup_string(fsprops,
1263 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1264 strcmp(zonestr, "on") == 0);
1265
1266 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1267 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1268 goto create_failed;
1269 }
1270
1271 if (nvlist_exists(zc_fsprops,
1272 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1273 !zpool_has_special_vdev(nvroot)) {
1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1275 "%s property requires a special vdev"),
1276 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1277 (void) zfs_error(hdl, EZFS_BADPROP, msg);
1278 goto create_failed;
1279 }
1280
1281 if (!zc_props &&
1282 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1283 goto create_failed;
1284 }
1285 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1286 &wkeydata, &wkeylen) != 0) {
1287 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1288 goto create_failed;
1289 }
1290 if (nvlist_add_nvlist(zc_props,
1291 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1292 goto create_failed;
1293 }
1294 if (wkeydata != NULL) {
1295 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1296 goto create_failed;
1297
1298 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1299 wkeydata, wkeylen) != 0)
1300 goto create_failed;
1301
1302 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1303 hidden_args) != 0)
1304 goto create_failed;
1305 }
1306 }
1307
1308 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1309 goto create_failed;
1310
1311 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1312
1313 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1314
1315 zcmd_free_nvlists(&zc);
1316 nvlist_free(zc_props);
1317 nvlist_free(zc_fsprops);
1318 nvlist_free(hidden_args);
1319 if (wkeydata != NULL)
1320 free(wkeydata);
1321
1322 switch (errno) {
1323 case EBUSY:
1324 /*
1325 * This can happen if the user has specified the same
1326 * device multiple times. We can't reliably detect this
1327 * until we try to add it and see we already have a
1328 * label. This can also happen under if the device is
1329 * part of an active md or lvm device.
1330 */
1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1332 "one or more vdevs refer to the same device, or "
1333 "one of\nthe devices is part of an active md or "
1334 "lvm device"));
1335 return (zfs_error(hdl, EZFS_BADDEV, msg));
1336
1337 case ERANGE:
1338 /*
1339 * This happens if the record size is smaller or larger
1340 * than the allowed size range, or not a power of 2.
1341 *
1342 * NOTE: although zfs_valid_proplist is called earlier,
1343 * this case may have slipped through since the
1344 * pool does not exist yet and it is therefore
1345 * impossible to read properties e.g. max blocksize
1346 * from the pool.
1347 */
1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1349 "record size invalid"));
1350 return (zfs_error(hdl, EZFS_BADPROP, msg));
1351
1352 case EOVERFLOW:
1353 /*
1354 * This occurs when one of the devices is below
1355 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1356 * device was the problem device since there's no
1357 * reliable way to determine device size from userland.
1358 */
1359 {
1360 char buf[64];
1361
1362 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1363 sizeof (buf));
1364
1365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1366 "one or more devices is less than the "
1367 "minimum size (%s)"), buf);
1368 }
1369 return (zfs_error(hdl, EZFS_BADDEV, msg));
1370
1371 case ENOSPC:
1372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1373 "one or more devices is out of space"));
1374 return (zfs_error(hdl, EZFS_BADDEV, msg));
1375
1376 default:
1377 return (zpool_standard_error(hdl, errno, msg));
1378 }
1379 }
1380
1381 create_failed:
1382 zcmd_free_nvlists(&zc);
1383 nvlist_free(zc_props);
1384 nvlist_free(zc_fsprops);
1385 nvlist_free(hidden_args);
1386 if (wkeydata != NULL)
1387 free(wkeydata);
1388 return (ret);
1389 }
1390
1391 /*
1392 * Destroy the given pool. It is up to the caller to ensure that there are no
1393 * datasets left in the pool.
1394 */
1395 int
1396 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1397 {
1398 zfs_cmd_t zc = {"\0"};
1399 zfs_handle_t *zfp = NULL;
1400 libzfs_handle_t *hdl = zhp->zpool_hdl;
1401 char msg[1024];
1402
1403 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1404 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1405 return (-1);
1406
1407 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1408 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1409
1410 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1411 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1412 "cannot destroy '%s'"), zhp->zpool_name);
1413
1414 if (errno == EROFS) {
1415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1416 "one or more devices is read only"));
1417 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1418 } else {
1419 (void) zpool_standard_error(hdl, errno, msg);
1420 }
1421
1422 if (zfp)
1423 zfs_close(zfp);
1424 return (-1);
1425 }
1426
1427 if (zfp) {
1428 remove_mountpoint(zfp);
1429 zfs_close(zfp);
1430 }
1431
1432 return (0);
1433 }
1434
1435 /*
1436 * Create a checkpoint in the given pool.
1437 */
1438 int
1439 zpool_checkpoint(zpool_handle_t *zhp)
1440 {
1441 libzfs_handle_t *hdl = zhp->zpool_hdl;
1442 char msg[1024];
1443 int error;
1444
1445 error = lzc_pool_checkpoint(zhp->zpool_name);
1446 if (error != 0) {
1447 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1448 "cannot checkpoint '%s'"), zhp->zpool_name);
1449 (void) zpool_standard_error(hdl, error, msg);
1450 return (-1);
1451 }
1452
1453 return (0);
1454 }
1455
1456 /*
1457 * Discard the checkpoint from the given pool.
1458 */
1459 int
1460 zpool_discard_checkpoint(zpool_handle_t *zhp)
1461 {
1462 libzfs_handle_t *hdl = zhp->zpool_hdl;
1463 char msg[1024];
1464 int error;
1465
1466 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1467 if (error != 0) {
1468 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1469 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1470 (void) zpool_standard_error(hdl, error, msg);
1471 return (-1);
1472 }
1473
1474 return (0);
1475 }
1476
1477 /*
1478 * Add the given vdevs to the pool. The caller must have already performed the
1479 * necessary verification to ensure that the vdev specification is well-formed.
1480 */
1481 int
1482 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1483 {
1484 zfs_cmd_t zc = {"\0"};
1485 int ret;
1486 libzfs_handle_t *hdl = zhp->zpool_hdl;
1487 char msg[1024];
1488 nvlist_t **spares, **l2cache;
1489 uint_t nspares, nl2cache;
1490
1491 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1492 "cannot add to '%s'"), zhp->zpool_name);
1493
1494 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1495 SPA_VERSION_SPARES &&
1496 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1497 &spares, &nspares) == 0) {
1498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1499 "upgraded to add hot spares"));
1500 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1501 }
1502
1503 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1504 SPA_VERSION_L2CACHE &&
1505 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1506 &l2cache, &nl2cache) == 0) {
1507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1508 "upgraded to add cache devices"));
1509 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1510 }
1511
1512 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1513 return (-1);
1514 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1515
1516 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1517 switch (errno) {
1518 case EBUSY:
1519 /*
1520 * This can happen if the user has specified the same
1521 * device multiple times. We can't reliably detect this
1522 * until we try to add it and see we already have a
1523 * label.
1524 */
1525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1526 "one or more vdevs refer to the same device"));
1527 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1528 break;
1529
1530 case EINVAL:
1531 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1532 "invalid config; a pool with removing/removed "
1533 "vdevs does not support adding raidz vdevs"));
1534 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1535 break;
1536
1537 case EOVERFLOW:
1538 /*
1539 * This occurs when one of the devices is below
1540 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1541 * device was the problem device since there's no
1542 * reliable way to determine device size from userland.
1543 */
1544 {
1545 char buf[64];
1546
1547 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1548 sizeof (buf));
1549
1550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1551 "device is less than the minimum "
1552 "size (%s)"), buf);
1553 }
1554 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1555 break;
1556
1557 case ENOTSUP:
1558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1559 "pool must be upgraded to add these vdevs"));
1560 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1561 break;
1562
1563 default:
1564 (void) zpool_standard_error(hdl, errno, msg);
1565 }
1566
1567 ret = -1;
1568 } else {
1569 ret = 0;
1570 }
1571
1572 zcmd_free_nvlists(&zc);
1573
1574 return (ret);
1575 }
1576
1577 /*
1578 * Exports the pool from the system. The caller must ensure that there are no
1579 * mounted datasets in the pool.
1580 */
1581 static int
1582 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1583 const char *log_str)
1584 {
1585 zfs_cmd_t zc = {"\0"};
1586 char msg[1024];
1587
1588 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1589 "cannot export '%s'"), zhp->zpool_name);
1590
1591 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1592 zc.zc_cookie = force;
1593 zc.zc_guid = hardforce;
1594 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1595
1596 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1597 switch (errno) {
1598 case EXDEV:
1599 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1600 "use '-f' to override the following errors:\n"
1601 "'%s' has an active shared spare which could be"
1602 " used by other pools once '%s' is exported."),
1603 zhp->zpool_name, zhp->zpool_name);
1604 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1605 msg));
1606 default:
1607 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1608 msg));
1609 }
1610 }
1611
1612 return (0);
1613 }
1614
1615 int
1616 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1617 {
1618 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1619 }
1620
1621 int
1622 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1623 {
1624 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1625 }
1626
1627 static void
1628 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1629 nvlist_t *config)
1630 {
1631 nvlist_t *nv = NULL;
1632 uint64_t rewindto;
1633 int64_t loss = -1;
1634 struct tm t;
1635 char timestr[128];
1636
1637 if (!hdl->libzfs_printerr || config == NULL)
1638 return;
1639
1640 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1641 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1642 return;
1643 }
1644
1645 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1646 return;
1647 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1648
1649 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1650 strftime(timestr, 128, "%c", &t) != 0) {
1651 if (dryrun) {
1652 (void) printf(dgettext(TEXT_DOMAIN,
1653 "Would be able to return %s "
1654 "to its state as of %s.\n"),
1655 name, timestr);
1656 } else {
1657 (void) printf(dgettext(TEXT_DOMAIN,
1658 "Pool %s returned to its state as of %s.\n"),
1659 name, timestr);
1660 }
1661 if (loss > 120) {
1662 (void) printf(dgettext(TEXT_DOMAIN,
1663 "%s approximately %lld "),
1664 dryrun ? "Would discard" : "Discarded",
1665 ((longlong_t)loss + 30) / 60);
1666 (void) printf(dgettext(TEXT_DOMAIN,
1667 "minutes of transactions.\n"));
1668 } else if (loss > 0) {
1669 (void) printf(dgettext(TEXT_DOMAIN,
1670 "%s approximately %lld "),
1671 dryrun ? "Would discard" : "Discarded",
1672 (longlong_t)loss);
1673 (void) printf(dgettext(TEXT_DOMAIN,
1674 "seconds of transactions.\n"));
1675 }
1676 }
1677 }
1678
1679 void
1680 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1681 nvlist_t *config)
1682 {
1683 nvlist_t *nv = NULL;
1684 int64_t loss = -1;
1685 uint64_t edata = UINT64_MAX;
1686 uint64_t rewindto;
1687 struct tm t;
1688 char timestr[128];
1689
1690 if (!hdl->libzfs_printerr)
1691 return;
1692
1693 if (reason >= 0)
1694 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1695 else
1696 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1697
1698 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1699 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1700 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1701 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1702 goto no_info;
1703
1704 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1705 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1706 &edata);
1707
1708 (void) printf(dgettext(TEXT_DOMAIN,
1709 "Recovery is possible, but will result in some data loss.\n"));
1710
1711 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1712 strftime(timestr, 128, "%c", &t) != 0) {
1713 (void) printf(dgettext(TEXT_DOMAIN,
1714 "\tReturning the pool to its state as of %s\n"
1715 "\tshould correct the problem. "),
1716 timestr);
1717 } else {
1718 (void) printf(dgettext(TEXT_DOMAIN,
1719 "\tReverting the pool to an earlier state "
1720 "should correct the problem.\n\t"));
1721 }
1722
1723 if (loss > 120) {
1724 (void) printf(dgettext(TEXT_DOMAIN,
1725 "Approximately %lld minutes of data\n"
1726 "\tmust be discarded, irreversibly. "),
1727 ((longlong_t)loss + 30) / 60);
1728 } else if (loss > 0) {
1729 (void) printf(dgettext(TEXT_DOMAIN,
1730 "Approximately %lld seconds of data\n"
1731 "\tmust be discarded, irreversibly. "),
1732 (longlong_t)loss);
1733 }
1734 if (edata != 0 && edata != UINT64_MAX) {
1735 if (edata == 1) {
1736 (void) printf(dgettext(TEXT_DOMAIN,
1737 "After rewind, at least\n"
1738 "\tone persistent user-data error will remain. "));
1739 } else {
1740 (void) printf(dgettext(TEXT_DOMAIN,
1741 "After rewind, several\n"
1742 "\tpersistent user-data errors will remain. "));
1743 }
1744 }
1745 (void) printf(dgettext(TEXT_DOMAIN,
1746 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1747 reason >= 0 ? "clear" : "import", name);
1748
1749 (void) printf(dgettext(TEXT_DOMAIN,
1750 "A scrub of the pool\n"
1751 "\tis strongly recommended after recovery.\n"));
1752 return;
1753
1754 no_info:
1755 (void) printf(dgettext(TEXT_DOMAIN,
1756 "Destroy and re-create the pool from\n\ta backup source.\n"));
1757 }
1758
1759 /*
1760 * zpool_import() is a contracted interface. Should be kept the same
1761 * if possible.
1762 *
1763 * Applications should use zpool_import_props() to import a pool with
1764 * new properties value to be set.
1765 */
1766 int
1767 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1768 char *altroot)
1769 {
1770 nvlist_t *props = NULL;
1771 int ret;
1772
1773 if (altroot != NULL) {
1774 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1775 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1776 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1777 newname));
1778 }
1779
1780 if (nvlist_add_string(props,
1781 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1782 nvlist_add_string(props,
1783 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1784 nvlist_free(props);
1785 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1786 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1787 newname));
1788 }
1789 }
1790
1791 ret = zpool_import_props(hdl, config, newname, props,
1792 ZFS_IMPORT_NORMAL);
1793 nvlist_free(props);
1794 return (ret);
1795 }
1796
1797 static void
1798 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1799 int indent)
1800 {
1801 nvlist_t **child;
1802 uint_t c, children;
1803 char *vname;
1804 uint64_t is_log = 0;
1805
1806 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1807 &is_log);
1808
1809 if (name != NULL)
1810 (void) printf("\t%*s%s%s\n", indent, "", name,
1811 is_log ? " [log]" : "");
1812
1813 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1814 &child, &children) != 0)
1815 return;
1816
1817 for (c = 0; c < children; c++) {
1818 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1819 print_vdev_tree(hdl, vname, child[c], indent + 2);
1820 free(vname);
1821 }
1822 }
1823
1824 void
1825 zpool_print_unsup_feat(nvlist_t *config)
1826 {
1827 nvlist_t *nvinfo, *unsup_feat;
1828 nvpair_t *nvp;
1829
1830 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1831 0);
1832 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1833 &unsup_feat) == 0);
1834
1835 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1836 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1837 char *desc;
1838
1839 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1840 verify(nvpair_value_string(nvp, &desc) == 0);
1841
1842 if (strlen(desc) > 0)
1843 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1844 else
1845 (void) printf("\t%s\n", nvpair_name(nvp));
1846 }
1847 }
1848
1849 /*
1850 * Import the given pool using the known configuration and a list of
1851 * properties to be set. The configuration should have come from
1852 * zpool_find_import(). The 'newname' parameters control whether the pool
1853 * is imported with a different name.
1854 */
1855 int
1856 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1857 nvlist_t *props, int flags)
1858 {
1859 zfs_cmd_t zc = {"\0"};
1860 zpool_load_policy_t policy;
1861 nvlist_t *nv = NULL;
1862 nvlist_t *nvinfo = NULL;
1863 nvlist_t *missing = NULL;
1864 char *thename;
1865 char *origname;
1866 int ret;
1867 int error = 0;
1868 char errbuf[1024];
1869
1870 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1871 &origname) == 0);
1872
1873 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1874 "cannot import pool '%s'"), origname);
1875
1876 if (newname != NULL) {
1877 if (!zpool_name_valid(hdl, B_FALSE, newname))
1878 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1879 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1880 newname));
1881 thename = (char *)newname;
1882 } else {
1883 thename = origname;
1884 }
1885
1886 if (props != NULL) {
1887 uint64_t version;
1888 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1889
1890 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1891 &version) == 0);
1892
1893 if ((props = zpool_valid_proplist(hdl, origname,
1894 props, version, flags, errbuf)) == NULL)
1895 return (-1);
1896 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1897 nvlist_free(props);
1898 return (-1);
1899 }
1900 nvlist_free(props);
1901 }
1902
1903 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1904
1905 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1906 &zc.zc_guid) == 0);
1907
1908 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1909 zcmd_free_nvlists(&zc);
1910 return (-1);
1911 }
1912 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1913 zcmd_free_nvlists(&zc);
1914 return (-1);
1915 }
1916
1917 zc.zc_cookie = flags;
1918 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1919 errno == ENOMEM) {
1920 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1921 zcmd_free_nvlists(&zc);
1922 return (-1);
1923 }
1924 }
1925 if (ret != 0)
1926 error = errno;
1927
1928 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1929
1930 zcmd_free_nvlists(&zc);
1931
1932 zpool_get_load_policy(config, &policy);
1933
1934 if (error) {
1935 char desc[1024];
1936 char aux[256];
1937
1938 /*
1939 * Dry-run failed, but we print out what success
1940 * looks like if we found a best txg
1941 */
1942 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1943 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1944 B_TRUE, nv);
1945 nvlist_free(nv);
1946 return (-1);
1947 }
1948
1949 if (newname == NULL)
1950 (void) snprintf(desc, sizeof (desc),
1951 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1952 thename);
1953 else
1954 (void) snprintf(desc, sizeof (desc),
1955 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1956 origname, thename);
1957
1958 switch (error) {
1959 case ENOTSUP:
1960 if (nv != NULL && nvlist_lookup_nvlist(nv,
1961 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1962 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1963 (void) printf(dgettext(TEXT_DOMAIN, "This "
1964 "pool uses the following feature(s) not "
1965 "supported by this system:\n"));
1966 zpool_print_unsup_feat(nv);
1967 if (nvlist_exists(nvinfo,
1968 ZPOOL_CONFIG_CAN_RDONLY)) {
1969 (void) printf(dgettext(TEXT_DOMAIN,
1970 "All unsupported features are only "
1971 "required for writing to the pool."
1972 "\nThe pool can be imported using "
1973 "'-o readonly=on'.\n"));
1974 }
1975 }
1976 /*
1977 * Unsupported version.
1978 */
1979 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1980 break;
1981
1982 case EREMOTEIO:
1983 if (nv != NULL && nvlist_lookup_nvlist(nv,
1984 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1985 char *hostname = "<unknown>";
1986 uint64_t hostid = 0;
1987 mmp_state_t mmp_state;
1988
1989 mmp_state = fnvlist_lookup_uint64(nvinfo,
1990 ZPOOL_CONFIG_MMP_STATE);
1991
1992 if (nvlist_exists(nvinfo,
1993 ZPOOL_CONFIG_MMP_HOSTNAME))
1994 hostname = fnvlist_lookup_string(nvinfo,
1995 ZPOOL_CONFIG_MMP_HOSTNAME);
1996
1997 if (nvlist_exists(nvinfo,
1998 ZPOOL_CONFIG_MMP_HOSTID))
1999 hostid = fnvlist_lookup_uint64(nvinfo,
2000 ZPOOL_CONFIG_MMP_HOSTID);
2001
2002 if (mmp_state == MMP_STATE_ACTIVE) {
2003 (void) snprintf(aux, sizeof (aux),
2004 dgettext(TEXT_DOMAIN, "pool is imp"
2005 "orted on host '%s' (hostid=%lx).\n"
2006 "Export the pool on the other "
2007 "system, then run 'zpool import'."),
2008 hostname, (unsigned long) hostid);
2009 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2010 (void) snprintf(aux, sizeof (aux),
2011 dgettext(TEXT_DOMAIN, "pool has "
2012 "the multihost property on and "
2013 "the\nsystem's hostid is not set. "
2014 "Set a unique system hostid with "
2015 "the zgenhostid(8) command.\n"));
2016 }
2017
2018 (void) zfs_error_aux(hdl, aux);
2019 }
2020 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2021 break;
2022
2023 case EINVAL:
2024 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2025 break;
2026
2027 case EROFS:
2028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2029 "one or more devices is read only"));
2030 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2031 break;
2032
2033 case ENXIO:
2034 if (nv && nvlist_lookup_nvlist(nv,
2035 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2036 nvlist_lookup_nvlist(nvinfo,
2037 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2038 (void) printf(dgettext(TEXT_DOMAIN,
2039 "The devices below are missing or "
2040 "corrupted, use '-m' to import the pool "
2041 "anyway:\n"));
2042 print_vdev_tree(hdl, NULL, missing, 2);
2043 (void) printf("\n");
2044 }
2045 (void) zpool_standard_error(hdl, error, desc);
2046 break;
2047
2048 case EEXIST:
2049 (void) zpool_standard_error(hdl, error, desc);
2050 break;
2051
2052 case EBUSY:
2053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2054 "one or more devices are already in use\n"));
2055 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2056 break;
2057 case ENAMETOOLONG:
2058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2059 "new name of at least one dataset is longer than "
2060 "the maximum allowable length"));
2061 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2062 break;
2063 default:
2064 (void) zpool_standard_error(hdl, error, desc);
2065 zpool_explain_recover(hdl,
2066 newname ? origname : thename, -error, nv);
2067 break;
2068 }
2069
2070 nvlist_free(nv);
2071 ret = -1;
2072 } else {
2073 zpool_handle_t *zhp;
2074
2075 /*
2076 * This should never fail, but play it safe anyway.
2077 */
2078 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2079 ret = -1;
2080 else if (zhp != NULL)
2081 zpool_close(zhp);
2082 if (policy.zlp_rewind &
2083 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2084 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2085 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2086 }
2087 nvlist_free(nv);
2088 return (0);
2089 }
2090
2091 return (ret);
2092 }
2093
2094 /*
2095 * Translate vdev names to guids. If a vdev_path is determined to be
2096 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2097 * are added to it.
2098 */
2099 static int
2100 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2101 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2102 {
2103 nvlist_t *errlist = NULL;
2104 int error = 0;
2105
2106 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2107 elem = nvlist_next_nvpair(vds, elem)) {
2108 boolean_t spare, cache;
2109
2110 char *vd_path = nvpair_name(elem);
2111 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2112 NULL);
2113
2114 if ((tgt == NULL) || cache || spare) {
2115 if (errlist == NULL) {
2116 errlist = fnvlist_alloc();
2117 error = EINVAL;
2118 }
2119
2120 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2121 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2122 fnvlist_add_int64(errlist, vd_path, err);
2123 continue;
2124 }
2125
2126 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2127 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2128
2129 char msg[MAXNAMELEN];
2130 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2131 fnvlist_add_string(guids_to_paths, msg, vd_path);
2132 }
2133
2134 if (error != 0) {
2135 verify(errlist != NULL);
2136 if (vd_errlist != NULL)
2137 *vd_errlist = errlist;
2138 else
2139 fnvlist_free(errlist);
2140 }
2141
2142 return (error);
2143 }
2144
2145 static int
2146 xlate_init_err(int err)
2147 {
2148 switch (err) {
2149 case ENODEV:
2150 return (EZFS_NODEVICE);
2151 case EINVAL:
2152 case EROFS:
2153 return (EZFS_BADDEV);
2154 case EBUSY:
2155 return (EZFS_INITIALIZING);
2156 case ESRCH:
2157 return (EZFS_NO_INITIALIZE);
2158 }
2159 return (err);
2160 }
2161
2162 /*
2163 * Begin, suspend, or cancel the initialization (initializing of all free
2164 * blocks) for the given vdevs in the given pool.
2165 */
2166 static int
2167 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2168 nvlist_t *vds, boolean_t wait)
2169 {
2170 int err;
2171
2172 nvlist_t *vdev_guids = fnvlist_alloc();
2173 nvlist_t *guids_to_paths = fnvlist_alloc();
2174 nvlist_t *vd_errlist = NULL;
2175 nvlist_t *errlist;
2176 nvpair_t *elem;
2177
2178 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2179 guids_to_paths, &vd_errlist);
2180
2181 if (err != 0) {
2182 verify(vd_errlist != NULL);
2183 goto list_errors;
2184 }
2185
2186 err = lzc_initialize(zhp->zpool_name, cmd_type,
2187 vdev_guids, &errlist);
2188
2189 if (err != 0) {
2190 if (errlist != NULL) {
2191 vd_errlist = fnvlist_lookup_nvlist(errlist,
2192 ZPOOL_INITIALIZE_VDEVS);
2193 goto list_errors;
2194 }
2195 (void) zpool_standard_error(zhp->zpool_hdl, err,
2196 dgettext(TEXT_DOMAIN, "operation failed"));
2197 goto out;
2198 }
2199
2200 if (wait) {
2201 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2202 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2203
2204 uint64_t guid = fnvpair_value_uint64(elem);
2205
2206 err = lzc_wait_tag(zhp->zpool_name,
2207 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2208 if (err != 0) {
2209 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2210 err, dgettext(TEXT_DOMAIN, "error "
2211 "waiting for '%s' to initialize"),
2212 nvpair_name(elem));
2213
2214 goto out;
2215 }
2216 }
2217 }
2218 goto out;
2219
2220 list_errors:
2221 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2222 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2223 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2224 char *path;
2225
2226 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2227 &path) != 0)
2228 path = nvpair_name(elem);
2229
2230 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2231 "cannot initialize '%s'", path);
2232 }
2233
2234 out:
2235 fnvlist_free(vdev_guids);
2236 fnvlist_free(guids_to_paths);
2237
2238 if (vd_errlist != NULL)
2239 fnvlist_free(vd_errlist);
2240
2241 return (err == 0 ? 0 : -1);
2242 }
2243
2244 int
2245 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2246 nvlist_t *vds)
2247 {
2248 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2249 }
2250
2251 int
2252 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2253 nvlist_t *vds)
2254 {
2255 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2256 }
2257
2258 static int
2259 xlate_trim_err(int err)
2260 {
2261 switch (err) {
2262 case ENODEV:
2263 return (EZFS_NODEVICE);
2264 case EINVAL:
2265 case EROFS:
2266 return (EZFS_BADDEV);
2267 case EBUSY:
2268 return (EZFS_TRIMMING);
2269 case ESRCH:
2270 return (EZFS_NO_TRIM);
2271 case EOPNOTSUPP:
2272 return (EZFS_TRIM_NOTSUP);
2273 }
2274 return (err);
2275 }
2276
2277 static int
2278 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2279 {
2280 int err;
2281 nvpair_t *elem;
2282
2283 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2284 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2285
2286 uint64_t guid = fnvpair_value_uint64(elem);
2287
2288 err = lzc_wait_tag(zhp->zpool_name,
2289 ZPOOL_WAIT_TRIM, guid, NULL);
2290 if (err != 0) {
2291 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2292 err, dgettext(TEXT_DOMAIN, "error "
2293 "waiting to trim '%s'"), nvpair_name(elem));
2294
2295 return (err);
2296 }
2297 }
2298 return (0);
2299 }
2300
2301 /*
2302 * Check errlist and report any errors, omitting ones which should be
2303 * suppressed. Returns B_TRUE if any errors were reported.
2304 */
2305 static boolean_t
2306 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2307 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2308 {
2309 nvpair_t *elem;
2310 boolean_t reported_errs = B_FALSE;
2311 int num_vds = 0;
2312 int num_suppressed_errs = 0;
2313
2314 for (elem = nvlist_next_nvpair(vds, NULL);
2315 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2316 num_vds++;
2317 }
2318
2319 for (elem = nvlist_next_nvpair(errlist, NULL);
2320 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2321 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2322 char *path;
2323
2324 /*
2325 * If only the pool was specified, and it was not a secure
2326 * trim then suppress warnings for individual vdevs which
2327 * do not support trimming.
2328 */
2329 if (vd_error == EZFS_TRIM_NOTSUP &&
2330 trim_flags->fullpool &&
2331 !trim_flags->secure) {
2332 num_suppressed_errs++;
2333 continue;
2334 }
2335
2336 reported_errs = B_TRUE;
2337 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2338 &path) != 0)
2339 path = nvpair_name(elem);
2340
2341 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2342 "cannot trim '%s'", path);
2343 }
2344
2345 if (num_suppressed_errs == num_vds) {
2346 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2347 "no devices in pool support trim operations"));
2348 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2349 dgettext(TEXT_DOMAIN, "cannot trim")));
2350 reported_errs = B_TRUE;
2351 }
2352
2353 return (reported_errs);
2354 }
2355
2356 /*
2357 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2358 * the given vdevs in the given pool.
2359 */
2360 int
2361 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2362 trimflags_t *trim_flags)
2363 {
2364 int err;
2365 int retval = 0;
2366
2367 nvlist_t *vdev_guids = fnvlist_alloc();
2368 nvlist_t *guids_to_paths = fnvlist_alloc();
2369 nvlist_t *errlist = NULL;
2370
2371 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2372 guids_to_paths, &errlist);
2373 if (err != 0) {
2374 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2375 retval = -1;
2376 goto out;
2377 }
2378
2379 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2380 trim_flags->secure, vdev_guids, &errlist);
2381 if (err != 0) {
2382 nvlist_t *vd_errlist;
2383 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2384 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2385 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2386 vds, vd_errlist)) {
2387 retval = -1;
2388 goto out;
2389 }
2390 } else {
2391 char msg[1024];
2392
2393 (void) snprintf(msg, sizeof (msg),
2394 dgettext(TEXT_DOMAIN, "operation failed"));
2395 zpool_standard_error(zhp->zpool_hdl, err, msg);
2396 retval = -1;
2397 goto out;
2398 }
2399 }
2400
2401
2402 if (trim_flags->wait)
2403 retval = zpool_trim_wait(zhp, vdev_guids);
2404
2405 out:
2406 if (errlist != NULL)
2407 fnvlist_free(errlist);
2408 fnvlist_free(vdev_guids);
2409 fnvlist_free(guids_to_paths);
2410 return (retval);
2411 }
2412
2413 /*
2414 * Scan the pool.
2415 */
2416 int
2417 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2418 {
2419 zfs_cmd_t zc = {"\0"};
2420 char msg[1024];
2421 int err;
2422 libzfs_handle_t *hdl = zhp->zpool_hdl;
2423
2424 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2425 zc.zc_cookie = func;
2426 zc.zc_flags = cmd;
2427
2428 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2429 return (0);
2430
2431 err = errno;
2432
2433 /* ECANCELED on a scrub means we resumed a paused scrub */
2434 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2435 cmd == POOL_SCRUB_NORMAL)
2436 return (0);
2437
2438 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2439 return (0);
2440
2441 if (func == POOL_SCAN_SCRUB) {
2442 if (cmd == POOL_SCRUB_PAUSE) {
2443 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2444 "cannot pause scrubbing %s"), zc.zc_name);
2445 } else {
2446 assert(cmd == POOL_SCRUB_NORMAL);
2447 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2448 "cannot scrub %s"), zc.zc_name);
2449 }
2450 } else if (func == POOL_SCAN_RESILVER) {
2451 assert(cmd == POOL_SCRUB_NORMAL);
2452 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2453 "cannot restart resilver on %s"), zc.zc_name);
2454 } else if (func == POOL_SCAN_NONE) {
2455 (void) snprintf(msg, sizeof (msg),
2456 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2457 zc.zc_name);
2458 } else {
2459 assert(!"unexpected result");
2460 }
2461
2462 if (err == EBUSY) {
2463 nvlist_t *nvroot;
2464 pool_scan_stat_t *ps = NULL;
2465 uint_t psc;
2466
2467 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2468 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2469 (void) nvlist_lookup_uint64_array(nvroot,
2470 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2471 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2472 ps->pss_state == DSS_SCANNING) {
2473 if (cmd == POOL_SCRUB_PAUSE)
2474 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2475 else
2476 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2477 } else {
2478 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2479 }
2480 } else if (err == ENOENT) {
2481 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2482 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2483 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2484 } else {
2485 return (zpool_standard_error(hdl, err, msg));
2486 }
2487 }
2488
2489 /*
2490 * Find a vdev that matches the search criteria specified. We use the
2491 * the nvpair name to determine how we should look for the device.
2492 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2493 * spare; but FALSE if its an INUSE spare.
2494 */
2495 static nvlist_t *
2496 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2497 boolean_t *l2cache, boolean_t *log)
2498 {
2499 uint_t c, children;
2500 nvlist_t **child;
2501 nvlist_t *ret;
2502 uint64_t is_log;
2503 char *srchkey;
2504 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2505
2506 /* Nothing to look for */
2507 if (search == NULL || pair == NULL)
2508 return (NULL);
2509
2510 /* Obtain the key we will use to search */
2511 srchkey = nvpair_name(pair);
2512
2513 switch (nvpair_type(pair)) {
2514 case DATA_TYPE_UINT64:
2515 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2516 uint64_t srchval, theguid;
2517
2518 verify(nvpair_value_uint64(pair, &srchval) == 0);
2519 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2520 &theguid) == 0);
2521 if (theguid == srchval)
2522 return (nv);
2523 }
2524 break;
2525
2526 case DATA_TYPE_STRING: {
2527 char *srchval, *val;
2528
2529 verify(nvpair_value_string(pair, &srchval) == 0);
2530 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2531 break;
2532
2533 /*
2534 * Search for the requested value. Special cases:
2535 *
2536 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2537 * "-part1", or "p1". The suffix is hidden from the user,
2538 * but included in the string, so this matches around it.
2539 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2540 * is used to check all possible expanded paths.
2541 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2542 *
2543 * Otherwise, all other searches are simple string compares.
2544 */
2545 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2546 uint64_t wholedisk = 0;
2547
2548 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2549 &wholedisk);
2550 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2551 return (nv);
2552
2553 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2554 char *type, *idx, *end, *p;
2555 uint64_t id, vdev_id;
2556
2557 /*
2558 * Determine our vdev type, keeping in mind
2559 * that the srchval is composed of a type and
2560 * vdev id pair (i.e. mirror-4).
2561 */
2562 if ((type = strdup(srchval)) == NULL)
2563 return (NULL);
2564
2565 if ((p = strrchr(type, '-')) == NULL) {
2566 free(type);
2567 break;
2568 }
2569 idx = p + 1;
2570 *p = '\0';
2571
2572 /*
2573 * If the types don't match then keep looking.
2574 */
2575 if (strncmp(val, type, strlen(val)) != 0) {
2576 free(type);
2577 break;
2578 }
2579
2580 verify(zpool_vdev_is_interior(type));
2581 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2582 &id) == 0);
2583
2584 errno = 0;
2585 vdev_id = strtoull(idx, &end, 10);
2586
2587 free(type);
2588 if (errno != 0)
2589 return (NULL);
2590
2591 /*
2592 * Now verify that we have the correct vdev id.
2593 */
2594 if (vdev_id == id)
2595 return (nv);
2596 }
2597
2598 /*
2599 * Common case
2600 */
2601 if (strcmp(srchval, val) == 0)
2602 return (nv);
2603 break;
2604 }
2605
2606 default:
2607 break;
2608 }
2609
2610 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2611 &child, &children) != 0)
2612 return (NULL);
2613
2614 for (c = 0; c < children; c++) {
2615 if ((ret = vdev_to_nvlist_iter(child[c], search,
2616 avail_spare, l2cache, NULL)) != NULL) {
2617 /*
2618 * The 'is_log' value is only set for the toplevel
2619 * vdev, not the leaf vdevs. So we always lookup the
2620 * log device from the root of the vdev tree (where
2621 * 'log' is non-NULL).
2622 */
2623 if (log != NULL &&
2624 nvlist_lookup_uint64(child[c],
2625 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2626 is_log) {
2627 *log = B_TRUE;
2628 }
2629 return (ret);
2630 }
2631 }
2632
2633 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2634 &child, &children) == 0) {
2635 for (c = 0; c < children; c++) {
2636 if ((ret = vdev_to_nvlist_iter(child[c], search,
2637 avail_spare, l2cache, NULL)) != NULL) {
2638 *avail_spare = B_TRUE;
2639 return (ret);
2640 }
2641 }
2642 }
2643
2644 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2645 &child, &children) == 0) {
2646 for (c = 0; c < children; c++) {
2647 if ((ret = vdev_to_nvlist_iter(child[c], search,
2648 avail_spare, l2cache, NULL)) != NULL) {
2649 *l2cache = B_TRUE;
2650 return (ret);
2651 }
2652 }
2653 }
2654
2655 return (NULL);
2656 }
2657
2658 /*
2659 * Given a physical path or guid, find the associated vdev.
2660 */
2661 nvlist_t *
2662 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2663 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2664 {
2665 nvlist_t *search, *nvroot, *ret;
2666 uint64_t guid;
2667 char *end;
2668
2669 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2670
2671 guid = strtoull(ppath, &end, 0);
2672 if (guid != 0 && *end == '\0') {
2673 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2674 } else {
2675 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2676 ppath) == 0);
2677 }
2678
2679 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2680 &nvroot) == 0);
2681
2682 *avail_spare = B_FALSE;
2683 *l2cache = B_FALSE;
2684 if (log != NULL)
2685 *log = B_FALSE;
2686 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2687 nvlist_free(search);
2688
2689 return (ret);
2690 }
2691
2692 /*
2693 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2694 */
2695 static boolean_t
2696 zpool_vdev_is_interior(const char *name)
2697 {
2698 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2699 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2700 strncmp(name,
2701 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2702 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2703 return (B_TRUE);
2704
2705 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
2706 !zpool_is_draid_spare(name))
2707 return (B_TRUE);
2708
2709 return (B_FALSE);
2710 }
2711
2712 nvlist_t *
2713 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2714 boolean_t *l2cache, boolean_t *log)
2715 {
2716 char *end;
2717 nvlist_t *nvroot, *search, *ret;
2718 uint64_t guid;
2719
2720 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2721
2722 guid = strtoull(path, &end, 0);
2723 if (guid != 0 && *end == '\0') {
2724 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2725 } else if (zpool_vdev_is_interior(path)) {
2726 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2727 } else {
2728 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2729 }
2730
2731 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2732 &nvroot) == 0);
2733
2734 *avail_spare = B_FALSE;
2735 *l2cache = B_FALSE;
2736 if (log != NULL)
2737 *log = B_FALSE;
2738 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2739 nvlist_free(search);
2740
2741 return (ret);
2742 }
2743
2744 static int
2745 vdev_is_online(nvlist_t *nv)
2746 {
2747 uint64_t ival;
2748
2749 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2750 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2751 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2752 return (0);
2753
2754 return (1);
2755 }
2756
2757 /*
2758 * Helper function for zpool_get_physpaths().
2759 */
2760 static int
2761 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2762 size_t *bytes_written)
2763 {
2764 size_t bytes_left, pos, rsz;
2765 char *tmppath;
2766 const char *format;
2767
2768 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2769 &tmppath) != 0)
2770 return (EZFS_NODEVICE);
2771
2772 pos = *bytes_written;
2773 bytes_left = physpath_size - pos;
2774 format = (pos == 0) ? "%s" : " %s";
2775
2776 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2777 *bytes_written += rsz;
2778
2779 if (rsz >= bytes_left) {
2780 /* if physpath was not copied properly, clear it */
2781 if (bytes_left != 0) {
2782 physpath[pos] = 0;
2783 }
2784 return (EZFS_NOSPC);
2785 }
2786 return (0);
2787 }
2788
2789 static int
2790 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2791 size_t *rsz, boolean_t is_spare)
2792 {
2793 char *type;
2794 int ret;
2795
2796 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2797 return (EZFS_INVALCONFIG);
2798
2799 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2800 /*
2801 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2802 * For a spare vdev, we only want to boot from the active
2803 * spare device.
2804 */
2805 if (is_spare) {
2806 uint64_t spare = 0;
2807 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2808 &spare);
2809 if (!spare)
2810 return (EZFS_INVALCONFIG);
2811 }
2812
2813 if (vdev_is_online(nv)) {
2814 if ((ret = vdev_get_one_physpath(nv, physpath,
2815 phypath_size, rsz)) != 0)
2816 return (ret);
2817 }
2818 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2819 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2820 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2821 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2822 nvlist_t **child;
2823 uint_t count;
2824 int i, ret;
2825
2826 if (nvlist_lookup_nvlist_array(nv,
2827 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2828 return (EZFS_INVALCONFIG);
2829
2830 for (i = 0; i < count; i++) {
2831 ret = vdev_get_physpaths(child[i], physpath,
2832 phypath_size, rsz, is_spare);
2833 if (ret == EZFS_NOSPC)
2834 return (ret);
2835 }
2836 }
2837
2838 return (EZFS_POOL_INVALARG);
2839 }
2840
2841 /*
2842 * Get phys_path for a root pool config.
2843 * Return 0 on success; non-zero on failure.
2844 */
2845 static int
2846 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2847 {
2848 size_t rsz;
2849 nvlist_t *vdev_root;
2850 nvlist_t **child;
2851 uint_t count;
2852 char *type;
2853
2854 rsz = 0;
2855
2856 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2857 &vdev_root) != 0)
2858 return (EZFS_INVALCONFIG);
2859
2860 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2861 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2862 &child, &count) != 0)
2863 return (EZFS_INVALCONFIG);
2864
2865 /*
2866 * root pool can only have a single top-level vdev.
2867 */
2868 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2869 return (EZFS_POOL_INVALARG);
2870
2871 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2872 B_FALSE);
2873
2874 /* No online devices */
2875 if (rsz == 0)
2876 return (EZFS_NODEVICE);
2877
2878 return (0);
2879 }
2880
2881 /*
2882 * Get phys_path for a root pool
2883 * Return 0 on success; non-zero on failure.
2884 */
2885 int
2886 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2887 {
2888 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2889 phypath_size));
2890 }
2891
2892 /*
2893 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2894 *
2895 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2896 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2897 * ignore them.
2898 */
2899 static uint64_t
2900 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2901 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2902 {
2903 uint64_t guid;
2904 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2905 nvlist_t *tgt;
2906
2907 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2908 &log)) == NULL)
2909 return (0);
2910
2911 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2912 if (is_spare != NULL)
2913 *is_spare = spare;
2914 if (is_l2cache != NULL)
2915 *is_l2cache = l2cache;
2916 if (is_log != NULL)
2917 *is_log = log;
2918
2919 return (guid);
2920 }
2921
2922 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2923 uint64_t
2924 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2925 {
2926 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2927 }
2928
2929 /*
2930 * Bring the specified vdev online. The 'flags' parameter is a set of the
2931 * ZFS_ONLINE_* flags.
2932 */
2933 int
2934 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2935 vdev_state_t *newstate)
2936 {
2937 zfs_cmd_t zc = {"\0"};
2938 char msg[1024];
2939 char *pathname;
2940 nvlist_t *tgt;
2941 boolean_t avail_spare, l2cache, islog;
2942 libzfs_handle_t *hdl = zhp->zpool_hdl;
2943 int error;
2944
2945 if (flags & ZFS_ONLINE_EXPAND) {
2946 (void) snprintf(msg, sizeof (msg),
2947 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2948 } else {
2949 (void) snprintf(msg, sizeof (msg),
2950 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2951 }
2952
2953 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2954 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2955 &islog)) == NULL)
2956 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2957
2958 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2959
2960 if (avail_spare)
2961 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2962
2963 if ((flags & ZFS_ONLINE_EXPAND ||
2964 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2965 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2966 uint64_t wholedisk = 0;
2967
2968 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2969 &wholedisk);
2970
2971 /*
2972 * XXX - L2ARC 1.0 devices can't support expansion.
2973 */
2974 if (l2cache) {
2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2976 "cannot expand cache devices"));
2977 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2978 }
2979
2980 if (wholedisk) {
2981 const char *fullpath = path;
2982 char buf[MAXPATHLEN];
2983
2984 if (path[0] != '/') {
2985 error = zfs_resolve_shortname(path, buf,
2986 sizeof (buf));
2987 if (error != 0)
2988 return (zfs_error(hdl, EZFS_NODEVICE,
2989 msg));
2990
2991 fullpath = buf;
2992 }
2993
2994 error = zpool_relabel_disk(hdl, fullpath, msg);
2995 if (error != 0)
2996 return (error);
2997 }
2998 }
2999
3000 zc.zc_cookie = VDEV_STATE_ONLINE;
3001 zc.zc_obj = flags;
3002
3003 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3004 if (errno == EINVAL) {
3005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3006 "from this pool into a new one. Use '%s' "
3007 "instead"), "zpool detach");
3008 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
3009 }
3010 return (zpool_standard_error(hdl, errno, msg));
3011 }
3012
3013 *newstate = zc.zc_cookie;
3014 return (0);
3015 }
3016
3017 /*
3018 * Take the specified vdev offline
3019 */
3020 int
3021 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3022 {
3023 zfs_cmd_t zc = {"\0"};
3024 char msg[1024];
3025 nvlist_t *tgt;
3026 boolean_t avail_spare, l2cache;
3027 libzfs_handle_t *hdl = zhp->zpool_hdl;
3028
3029 (void) snprintf(msg, sizeof (msg),
3030 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3031
3032 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3033 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3034 NULL)) == NULL)
3035 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3036
3037 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3038
3039 if (avail_spare)
3040 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3041
3042 zc.zc_cookie = VDEV_STATE_OFFLINE;
3043 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3044
3045 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3046 return (0);
3047
3048 switch (errno) {
3049 case EBUSY:
3050
3051 /*
3052 * There are no other replicas of this device.
3053 */
3054 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3055
3056 case EEXIST:
3057 /*
3058 * The log device has unplayed logs
3059 */
3060 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3061
3062 default:
3063 return (zpool_standard_error(hdl, errno, msg));
3064 }
3065 }
3066
3067 /*
3068 * Mark the given vdev faulted.
3069 */
3070 int
3071 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3072 {
3073 zfs_cmd_t zc = {"\0"};
3074 char msg[1024];
3075 libzfs_handle_t *hdl = zhp->zpool_hdl;
3076
3077 (void) snprintf(msg, sizeof (msg),
3078 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3079
3080 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3081 zc.zc_guid = guid;
3082 zc.zc_cookie = VDEV_STATE_FAULTED;
3083 zc.zc_obj = aux;
3084
3085 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3086 return (0);
3087
3088 switch (errno) {
3089 case EBUSY:
3090
3091 /*
3092 * There are no other replicas of this device.
3093 */
3094 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3095
3096 default:
3097 return (zpool_standard_error(hdl, errno, msg));
3098 }
3099
3100 }
3101
3102 /*
3103 * Mark the given vdev degraded.
3104 */
3105 int
3106 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3107 {
3108 zfs_cmd_t zc = {"\0"};
3109 char msg[1024];
3110 libzfs_handle_t *hdl = zhp->zpool_hdl;
3111
3112 (void) snprintf(msg, sizeof (msg),
3113 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3114
3115 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3116 zc.zc_guid = guid;
3117 zc.zc_cookie = VDEV_STATE_DEGRADED;
3118 zc.zc_obj = aux;
3119
3120 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3121 return (0);
3122
3123 return (zpool_standard_error(hdl, errno, msg));
3124 }
3125
3126 /*
3127 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3128 * a hot spare.
3129 */
3130 static boolean_t
3131 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3132 {
3133 nvlist_t **child;
3134 uint_t c, children;
3135 char *type;
3136
3137 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3138 &children) == 0) {
3139 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3140 &type) == 0);
3141
3142 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3143 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3144 children == 2 && child[which] == tgt)
3145 return (B_TRUE);
3146
3147 for (c = 0; c < children; c++)
3148 if (is_replacing_spare(child[c], tgt, which))
3149 return (B_TRUE);
3150 }
3151
3152 return (B_FALSE);
3153 }
3154
3155 /*
3156 * Attach new_disk (fully described by nvroot) to old_disk.
3157 * If 'replacing' is specified, the new disk will replace the old one.
3158 */
3159 int
3160 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3161 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3162 {
3163 zfs_cmd_t zc = {"\0"};
3164 char msg[1024];
3165 int ret;
3166 nvlist_t *tgt;
3167 boolean_t avail_spare, l2cache, islog;
3168 uint64_t val;
3169 char *newname;
3170 nvlist_t **child;
3171 uint_t children;
3172 nvlist_t *config_root;
3173 libzfs_handle_t *hdl = zhp->zpool_hdl;
3174
3175 if (replacing)
3176 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3177 "cannot replace %s with %s"), old_disk, new_disk);
3178 else
3179 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3180 "cannot attach %s to %s"), new_disk, old_disk);
3181
3182 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3183 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3184 &islog)) == NULL)
3185 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3186
3187 if (avail_spare)
3188 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3189
3190 if (l2cache)
3191 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3192
3193 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3194 zc.zc_cookie = replacing;
3195 zc.zc_simple = rebuild;
3196
3197 if (rebuild &&
3198 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3200 "the loaded zfs module doesn't support device rebuilds"));
3201 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
3202 }
3203
3204 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3205 &child, &children) != 0 || children != 1) {
3206 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3207 "new device must be a single disk"));
3208 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3209 }
3210
3211 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3212 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3213
3214 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3215 return (-1);
3216
3217 /*
3218 * If the target is a hot spare that has been swapped in, we can only
3219 * replace it with another hot spare.
3220 */
3221 if (replacing &&
3222 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3223 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3224 NULL) == NULL || !avail_spare) &&
3225 is_replacing_spare(config_root, tgt, 1)) {
3226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3227 "can only be replaced by another hot spare"));
3228 free(newname);
3229 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3230 }
3231
3232 free(newname);
3233
3234 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3235 return (-1);
3236
3237 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3238
3239 zcmd_free_nvlists(&zc);
3240
3241 if (ret == 0)
3242 return (0);
3243
3244 switch (errno) {
3245 case ENOTSUP:
3246 /*
3247 * Can't attach to or replace this type of vdev.
3248 */
3249 if (replacing) {
3250 uint64_t version = zpool_get_prop_int(zhp,
3251 ZPOOL_PROP_VERSION, NULL);
3252
3253 if (islog) {
3254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3255 "cannot replace a log with a spare"));
3256 } else if (rebuild) {
3257 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3258 "only mirror and dRAID vdevs support "
3259 "sequential reconstruction"));
3260 } else if (zpool_is_draid_spare(new_disk)) {
3261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3262 "dRAID spares can only replace child "
3263 "devices in their parent's dRAID vdev"));
3264 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3266 "already in replacing/spare config; wait "
3267 "for completion or use 'zpool detach'"));
3268 } else {
3269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3270 "cannot replace a replacing device"));
3271 }
3272 } else {
3273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3274 "can only attach to mirrors and top-level "
3275 "disks"));
3276 }
3277 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3278 break;
3279
3280 case EINVAL:
3281 /*
3282 * The new device must be a single disk.
3283 */
3284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3285 "new device must be a single disk"));
3286 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3287 break;
3288
3289 case EBUSY:
3290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3291 "or device removal is in progress"),
3292 new_disk);
3293 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3294 break;
3295
3296 case EOVERFLOW:
3297 /*
3298 * The new device is too small.
3299 */
3300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3301 "device is too small"));
3302 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3303 break;
3304
3305 case EDOM:
3306 /*
3307 * The new device has a different optimal sector size.
3308 */
3309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3310 "new device has a different optimal sector size; use the "
3311 "option '-o ashift=N' to override the optimal size"));
3312 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3313 break;
3314
3315 case ENAMETOOLONG:
3316 /*
3317 * The resulting top-level vdev spec won't fit in the label.
3318 */
3319 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3320 break;
3321
3322 default:
3323 (void) zpool_standard_error(hdl, errno, msg);
3324 }
3325
3326 return (-1);
3327 }
3328
3329 /*
3330 * Detach the specified device.
3331 */
3332 int
3333 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3334 {
3335 zfs_cmd_t zc = {"\0"};
3336 char msg[1024];
3337 nvlist_t *tgt;
3338 boolean_t avail_spare, l2cache;
3339 libzfs_handle_t *hdl = zhp->zpool_hdl;
3340
3341 (void) snprintf(msg, sizeof (msg),
3342 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3343
3344 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3345 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3346 NULL)) == NULL)
3347 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3348
3349 if (avail_spare)
3350 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3351
3352 if (l2cache)
3353 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3354
3355 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3356
3357 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3358 return (0);
3359
3360 switch (errno) {
3361
3362 case ENOTSUP:
3363 /*
3364 * Can't detach from this type of vdev.
3365 */
3366 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3367 "applicable to mirror and replacing vdevs"));
3368 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3369 break;
3370
3371 case EBUSY:
3372 /*
3373 * There are no other replicas of this device.
3374 */
3375 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3376 break;
3377
3378 default:
3379 (void) zpool_standard_error(hdl, errno, msg);
3380 }
3381
3382 return (-1);
3383 }
3384
3385 /*
3386 * Find a mirror vdev in the source nvlist.
3387 *
3388 * The mchild array contains a list of disks in one of the top-level mirrors
3389 * of the source pool. The schild array contains a list of disks that the
3390 * user specified on the command line. We loop over the mchild array to
3391 * see if any entry in the schild array matches.
3392 *
3393 * If a disk in the mchild array is found in the schild array, we return
3394 * the index of that entry. Otherwise we return -1.
3395 */
3396 static int
3397 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3398 nvlist_t **schild, uint_t schildren)
3399 {
3400 uint_t mc;
3401
3402 for (mc = 0; mc < mchildren; mc++) {
3403 uint_t sc;
3404 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3405 mchild[mc], 0);
3406
3407 for (sc = 0; sc < schildren; sc++) {
3408 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3409 schild[sc], 0);
3410 boolean_t result = (strcmp(mpath, spath) == 0);
3411
3412 free(spath);
3413 if (result) {
3414 free(mpath);
3415 return (mc);
3416 }
3417 }
3418
3419 free(mpath);
3420 }
3421
3422 return (-1);
3423 }
3424
3425 /*
3426 * Split a mirror pool. If newroot points to null, then a new nvlist
3427 * is generated and it is the responsibility of the caller to free it.
3428 */
3429 int
3430 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3431 nvlist_t *props, splitflags_t flags)
3432 {
3433 zfs_cmd_t zc = {"\0"};
3434 char msg[1024];
3435 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3436 nvlist_t **varray = NULL, *zc_props = NULL;
3437 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3438 libzfs_handle_t *hdl = zhp->zpool_hdl;
3439 uint64_t vers, readonly = B_FALSE;
3440 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3441 int retval = 0;
3442
3443 (void) snprintf(msg, sizeof (msg),
3444 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3445
3446 if (!zpool_name_valid(hdl, B_FALSE, newname))
3447 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3448
3449 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3450 (void) fprintf(stderr, gettext("Internal error: unable to "
3451 "retrieve pool configuration\n"));
3452 return (-1);
3453 }
3454
3455 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3456 == 0);
3457 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3458
3459 if (props) {
3460 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3461 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3462 props, vers, flags, msg)) == NULL)
3463 return (-1);
3464 (void) nvlist_lookup_uint64(zc_props,
3465 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3466 if (readonly) {
3467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3468 "property %s can only be set at import time"),
3469 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3470 return (-1);
3471 }
3472 }
3473
3474 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3475 &children) != 0) {
3476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3477 "Source pool is missing vdev tree"));
3478 nvlist_free(zc_props);
3479 return (-1);
3480 }
3481
3482 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3483 vcount = 0;
3484
3485 if (*newroot == NULL ||
3486 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3487 &newchild, &newchildren) != 0)
3488 newchildren = 0;
3489
3490 for (c = 0; c < children; c++) {
3491 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3492 char *type;
3493 nvlist_t **mchild, *vdev;
3494 uint_t mchildren;
3495 int entry;
3496
3497 /*
3498 * Unlike cache & spares, slogs are stored in the
3499 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3500 */
3501 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3502 &is_log);
3503 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3504 &is_hole);
3505 if (is_log || is_hole) {
3506 /*
3507 * Create a hole vdev and put it in the config.
3508 */
3509 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3510 goto out;
3511 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3512 VDEV_TYPE_HOLE) != 0)
3513 goto out;
3514 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3515 1) != 0)
3516 goto out;
3517 if (lastlog == 0)
3518 lastlog = vcount;
3519 varray[vcount++] = vdev;
3520 continue;
3521 }
3522 lastlog = 0;
3523 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3524 == 0);
3525
3526 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3527 vdev = child[c];
3528 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3529 goto out;
3530 continue;
3531 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3533 "Source pool must be composed only of mirrors\n"));
3534 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3535 goto out;
3536 }
3537
3538 verify(nvlist_lookup_nvlist_array(child[c],
3539 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3540
3541 /* find or add an entry for this top-level vdev */
3542 if (newchildren > 0 &&
3543 (entry = find_vdev_entry(zhp, mchild, mchildren,
3544 newchild, newchildren)) >= 0) {
3545 /* We found a disk that the user specified. */
3546 vdev = mchild[entry];
3547 ++found;
3548 } else {
3549 /* User didn't specify a disk for this vdev. */
3550 vdev = mchild[mchildren - 1];
3551 }
3552
3553 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3554 goto out;
3555 }
3556
3557 /* did we find every disk the user specified? */
3558 if (found != newchildren) {
3559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3560 "include at most one disk from each mirror"));
3561 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3562 goto out;
3563 }
3564
3565 /* Prepare the nvlist for populating. */
3566 if (*newroot == NULL) {
3567 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3568 goto out;
3569 freelist = B_TRUE;
3570 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3571 VDEV_TYPE_ROOT) != 0)
3572 goto out;
3573 } else {
3574 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3575 }
3576
3577 /* Add all the children we found */
3578 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3579 lastlog == 0 ? vcount : lastlog) != 0)
3580 goto out;
3581
3582 /*
3583 * If we're just doing a dry run, exit now with success.
3584 */
3585 if (flags.dryrun) {
3586 memory_err = B_FALSE;
3587 freelist = B_FALSE;
3588 goto out;
3589 }
3590
3591 /* now build up the config list & call the ioctl */
3592 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3593 goto out;
3594
3595 if (nvlist_add_nvlist(newconfig,
3596 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3597 nvlist_add_string(newconfig,
3598 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3599 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3600 goto out;
3601
3602 /*
3603 * The new pool is automatically part of the namespace unless we
3604 * explicitly export it.
3605 */
3606 if (!flags.import)
3607 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3608 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3609 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3610 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3611 goto out;
3612 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3613 goto out;
3614
3615 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3616 retval = zpool_standard_error(hdl, errno, msg);
3617 goto out;
3618 }
3619
3620 freelist = B_FALSE;
3621 memory_err = B_FALSE;
3622
3623 out:
3624 if (varray != NULL) {
3625 int v;
3626
3627 for (v = 0; v < vcount; v++)
3628 nvlist_free(varray[v]);
3629 free(varray);
3630 }
3631 zcmd_free_nvlists(&zc);
3632 nvlist_free(zc_props);
3633 nvlist_free(newconfig);
3634 if (freelist) {
3635 nvlist_free(*newroot);
3636 *newroot = NULL;
3637 }
3638
3639 if (retval != 0)
3640 return (retval);
3641
3642 if (memory_err)
3643 return (no_memory(hdl));
3644
3645 return (0);
3646 }
3647
3648 /*
3649 * Remove the given device.
3650 */
3651 int
3652 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3653 {
3654 zfs_cmd_t zc = {"\0"};
3655 char msg[1024];
3656 nvlist_t *tgt;
3657 boolean_t avail_spare, l2cache, islog;
3658 libzfs_handle_t *hdl = zhp->zpool_hdl;
3659 uint64_t version;
3660
3661 (void) snprintf(msg, sizeof (msg),
3662 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3663
3664 if (zpool_is_draid_spare(path)) {
3665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3666 "dRAID spares cannot be removed"));
3667 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3668 }
3669
3670 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3671 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3672 &islog)) == NULL)
3673 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3674
3675 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3676 if (islog && version < SPA_VERSION_HOLES) {
3677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3678 "pool must be upgraded to support log removal"));
3679 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3680 }
3681
3682 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3683
3684 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3685 return (0);
3686
3687 switch (errno) {
3688
3689 case EINVAL:
3690 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3691 "invalid config; all top-level vdevs must "
3692 "have the same sector size and not be raidz."));
3693 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3694 break;
3695
3696 case EBUSY:
3697 if (islog) {
3698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3699 "Mount encrypted datasets to replay logs."));
3700 } else {
3701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3702 "Pool busy; removal may already be in progress"));
3703 }
3704 (void) zfs_error(hdl, EZFS_BUSY, msg);
3705 break;
3706
3707 case EACCES:
3708 if (islog) {
3709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3710 "Mount encrypted datasets to replay logs."));
3711 (void) zfs_error(hdl, EZFS_BUSY, msg);
3712 } else {
3713 (void) zpool_standard_error(hdl, errno, msg);
3714 }
3715 break;
3716
3717 default:
3718 (void) zpool_standard_error(hdl, errno, msg);
3719 }
3720 return (-1);
3721 }
3722
3723 int
3724 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3725 {
3726 zfs_cmd_t zc;
3727 char msg[1024];
3728 libzfs_handle_t *hdl = zhp->zpool_hdl;
3729
3730 (void) snprintf(msg, sizeof (msg),
3731 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3732
3733 bzero(&zc, sizeof (zc));
3734 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3735 zc.zc_cookie = 1;
3736
3737 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3738 return (0);
3739
3740 return (zpool_standard_error(hdl, errno, msg));
3741 }
3742
3743 int
3744 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3745 uint64_t *sizep)
3746 {
3747 char msg[1024];
3748 nvlist_t *tgt;
3749 boolean_t avail_spare, l2cache, islog;
3750 libzfs_handle_t *hdl = zhp->zpool_hdl;
3751
3752 (void) snprintf(msg, sizeof (msg),
3753 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3754 path);
3755
3756 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3757 &islog)) == NULL)
3758 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3759
3760 if (avail_spare || l2cache || islog) {
3761 *sizep = 0;
3762 return (0);
3763 }
3764
3765 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3767 "indirect size not available"));
3768 return (zfs_error(hdl, EINVAL, msg));
3769 }
3770 return (0);
3771 }
3772
3773 /*
3774 * Clear the errors for the pool, or the particular device if specified.
3775 */
3776 int
3777 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3778 {
3779 zfs_cmd_t zc = {"\0"};
3780 char msg[1024];
3781 nvlist_t *tgt;
3782 zpool_load_policy_t policy;
3783 boolean_t avail_spare, l2cache;
3784 libzfs_handle_t *hdl = zhp->zpool_hdl;
3785 nvlist_t *nvi = NULL;
3786 int error;
3787
3788 if (path)
3789 (void) snprintf(msg, sizeof (msg),
3790 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3791 path);
3792 else
3793 (void) snprintf(msg, sizeof (msg),
3794 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3795 zhp->zpool_name);
3796
3797 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3798 if (path) {
3799 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3800 &l2cache, NULL)) == NULL)
3801 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3802
3803 /*
3804 * Don't allow error clearing for hot spares. Do allow
3805 * error clearing for l2cache devices.
3806 */
3807 if (avail_spare)
3808 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3809
3810 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3811 &zc.zc_guid) == 0);
3812 }
3813
3814 zpool_get_load_policy(rewindnvl, &policy);
3815 zc.zc_cookie = policy.zlp_rewind;
3816
3817 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3818 return (-1);
3819
3820 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3821 return (-1);
3822
3823 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3824 errno == ENOMEM) {
3825 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3826 zcmd_free_nvlists(&zc);
3827 return (-1);
3828 }
3829 }
3830
3831 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3832 errno != EPERM && errno != EACCES)) {
3833 if (policy.zlp_rewind &
3834 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3835 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3836 zpool_rewind_exclaim(hdl, zc.zc_name,
3837 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3838 nvi);
3839 nvlist_free(nvi);
3840 }
3841 zcmd_free_nvlists(&zc);
3842 return (0);
3843 }
3844
3845 zcmd_free_nvlists(&zc);
3846 return (zpool_standard_error(hdl, errno, msg));
3847 }
3848
3849 /*
3850 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3851 */
3852 int
3853 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3854 {
3855 zfs_cmd_t zc = {"\0"};
3856 char msg[1024];
3857 libzfs_handle_t *hdl = zhp->zpool_hdl;
3858
3859 (void) snprintf(msg, sizeof (msg),
3860 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3861 (u_longlong_t)guid);
3862
3863 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3864 zc.zc_guid = guid;
3865 zc.zc_cookie = ZPOOL_NO_REWIND;
3866
3867 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
3868 return (0);
3869
3870 return (zpool_standard_error(hdl, errno, msg));
3871 }
3872
3873 /*
3874 * Change the GUID for a pool.
3875 */
3876 int
3877 zpool_reguid(zpool_handle_t *zhp)
3878 {
3879 char msg[1024];
3880 libzfs_handle_t *hdl = zhp->zpool_hdl;
3881 zfs_cmd_t zc = {"\0"};
3882
3883 (void) snprintf(msg, sizeof (msg),
3884 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3885
3886 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3887 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3888 return (0);
3889
3890 return (zpool_standard_error(hdl, errno, msg));
3891 }
3892
3893 /*
3894 * Reopen the pool.
3895 */
3896 int
3897 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3898 {
3899 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3900 const char *pool_name = zpool_get_name(zhp);
3901 boolean_t *scrub_restart = data;
3902 int error;
3903
3904 error = lzc_reopen(pool_name, *scrub_restart);
3905 if (error) {
3906 return (zpool_standard_error_fmt(hdl, error,
3907 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3908 }
3909
3910 return (0);
3911 }
3912
3913 /* call into libzfs_core to execute the sync IOCTL per pool */
3914 int
3915 zpool_sync_one(zpool_handle_t *zhp, void *data)
3916 {
3917 int ret;
3918 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3919 const char *pool_name = zpool_get_name(zhp);
3920 boolean_t *force = data;
3921 nvlist_t *innvl = fnvlist_alloc();
3922
3923 fnvlist_add_boolean_value(innvl, "force", *force);
3924 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3925 nvlist_free(innvl);
3926 return (zpool_standard_error_fmt(hdl, ret,
3927 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3928 }
3929 nvlist_free(innvl);
3930
3931 return (0);
3932 }
3933
3934 #define PATH_BUF_LEN 64
3935
3936 /*
3937 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3938 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3939 * We also check if this is a whole disk, in which case we strip off the
3940 * trailing 's0' slice name.
3941 *
3942 * This routine is also responsible for identifying when disks have been
3943 * reconfigured in a new location. The kernel will have opened the device by
3944 * devid, but the path will still refer to the old location. To catch this, we
3945 * first do a path -> devid translation (which is fast for the common case). If
3946 * the devid matches, we're done. If not, we do a reverse devid -> path
3947 * translation and issue the appropriate ioctl() to update the path of the vdev.
3948 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3949 * of these checks.
3950 */
3951 char *
3952 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3953 int name_flags)
3954 {
3955 char *path, *type, *env;
3956 uint64_t value;
3957 char buf[PATH_BUF_LEN];
3958 char tmpbuf[PATH_BUF_LEN];
3959
3960 /*
3961 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3962 * zpool name that will be displayed to the user.
3963 */
3964 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3965 if (zhp != NULL && strcmp(type, "root") == 0)
3966 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3967
3968 env = getenv("ZPOOL_VDEV_NAME_PATH");
3969 if (env && (strtoul(env, NULL, 0) > 0 ||
3970 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3971 name_flags |= VDEV_NAME_PATH;
3972
3973 env = getenv("ZPOOL_VDEV_NAME_GUID");
3974 if (env && (strtoul(env, NULL, 0) > 0 ||
3975 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3976 name_flags |= VDEV_NAME_GUID;
3977
3978 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3979 if (env && (strtoul(env, NULL, 0) > 0 ||
3980 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3981 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3982
3983 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3984 name_flags & VDEV_NAME_GUID) {
3985 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3986 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3987 path = buf;
3988 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3989 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3990 char *rp = realpath(path, NULL);
3991 if (rp) {
3992 strlcpy(buf, rp, sizeof (buf));
3993 path = buf;
3994 free(rp);
3995 }
3996 }
3997
3998 /*
3999 * For a block device only use the name.
4000 */
4001 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4002 !(name_flags & VDEV_NAME_PATH)) {
4003 path = zfs_strip_path(path);
4004 }
4005
4006 /*
4007 * Remove the partition from the path if this is a whole disk.
4008 */
4009 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4010 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4011 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4012 return (zfs_strip_partition(path));
4013 }
4014 } else {
4015 path = type;
4016
4017 /*
4018 * If it's a raidz device, we need to stick in the parity level.
4019 */
4020 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4021 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
4022 &value) == 0);
4023 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4024 (u_longlong_t)value);
4025 path = buf;
4026 }
4027
4028 /*
4029 * If it's a dRAID device, we add parity, groups, and spares.
4030 */
4031 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4032 uint64_t ndata, nparity, nspares;
4033 nvlist_t **child;
4034 uint_t children;
4035
4036 verify(nvlist_lookup_nvlist_array(nv,
4037 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4038 verify(nvlist_lookup_uint64(nv,
4039 ZPOOL_CONFIG_NPARITY, &nparity) == 0);
4040 verify(nvlist_lookup_uint64(nv,
4041 ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0);
4042 verify(nvlist_lookup_uint64(nv,
4043 ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0);
4044
4045 path = zpool_draid_name(buf, sizeof (buf), ndata,
4046 nparity, nspares, children);
4047 }
4048
4049 /*
4050 * We identify each top-level vdev by using a <type-id>
4051 * naming convention.
4052 */
4053 if (name_flags & VDEV_NAME_TYPE_ID) {
4054 uint64_t id;
4055 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
4056 &id) == 0);
4057 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4058 path, (u_longlong_t)id);
4059 path = tmpbuf;
4060 }
4061 }
4062
4063 return (zfs_strdup(hdl, path));
4064 }
4065
4066 static int
4067 zbookmark_mem_compare(const void *a, const void *b)
4068 {
4069 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4070 }
4071
4072 /*
4073 * Retrieve the persistent error log, uniquify the members, and return to the
4074 * caller.
4075 */
4076 int
4077 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4078 {
4079 zfs_cmd_t zc = {"\0"};
4080 libzfs_handle_t *hdl = zhp->zpool_hdl;
4081 uint64_t count;
4082 zbookmark_phys_t *zb = NULL;
4083 int i;
4084
4085 /*
4086 * Retrieve the raw error list from the kernel. If the number of errors
4087 * has increased, allocate more space and continue until we get the
4088 * entire list.
4089 */
4090 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4091 &count) == 0);
4092 if (count == 0)
4093 return (0);
4094 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4095 count * sizeof (zbookmark_phys_t));
4096 zc.zc_nvlist_dst_size = count;
4097 (void) strcpy(zc.zc_name, zhp->zpool_name);
4098 for (;;) {
4099 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4100 &zc) != 0) {
4101 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4102 if (errno == ENOMEM) {
4103 void *dst;
4104
4105 count = zc.zc_nvlist_dst_size;
4106 dst = zfs_alloc(zhp->zpool_hdl, count *
4107 sizeof (zbookmark_phys_t));
4108 zc.zc_nvlist_dst = (uintptr_t)dst;
4109 } else {
4110 return (zpool_standard_error_fmt(hdl, errno,
4111 dgettext(TEXT_DOMAIN, "errors: List of "
4112 "errors unavailable")));
4113 }
4114 } else {
4115 break;
4116 }
4117 }
4118
4119 /*
4120 * Sort the resulting bookmarks. This is a little confusing due to the
4121 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4122 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4123 * _not_ copied as part of the process. So we point the start of our
4124 * array appropriate and decrement the total number of elements.
4125 */
4126 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4127 zc.zc_nvlist_dst_size;
4128 count -= zc.zc_nvlist_dst_size;
4129
4130 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4131
4132 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4133
4134 /*
4135 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4136 */
4137 for (i = 0; i < count; i++) {
4138 nvlist_t *nv;
4139
4140 /* ignoring zb_blkid and zb_level for now */
4141 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4142 zb[i-1].zb_object == zb[i].zb_object)
4143 continue;
4144
4145 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4146 goto nomem;
4147 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4148 zb[i].zb_objset) != 0) {
4149 nvlist_free(nv);
4150 goto nomem;
4151 }
4152 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4153 zb[i].zb_object) != 0) {
4154 nvlist_free(nv);
4155 goto nomem;
4156 }
4157 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4158 nvlist_free(nv);
4159 goto nomem;
4160 }
4161 nvlist_free(nv);
4162 }
4163
4164 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4165 return (0);
4166
4167 nomem:
4168 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4169 return (no_memory(zhp->zpool_hdl));
4170 }
4171
4172 /*
4173 * Upgrade a ZFS pool to the latest on-disk version.
4174 */
4175 int
4176 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4177 {
4178 zfs_cmd_t zc = {"\0"};
4179 libzfs_handle_t *hdl = zhp->zpool_hdl;
4180
4181 (void) strcpy(zc.zc_name, zhp->zpool_name);
4182 zc.zc_cookie = new_version;
4183
4184 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4185 return (zpool_standard_error_fmt(hdl, errno,
4186 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4187 zhp->zpool_name));
4188 return (0);
4189 }
4190
4191 void
4192 zfs_save_arguments(int argc, char **argv, char *string, int len)
4193 {
4194 int i;
4195
4196 (void) strlcpy(string, basename(argv[0]), len);
4197 for (i = 1; i < argc; i++) {
4198 (void) strlcat(string, " ", len);
4199 (void) strlcat(string, argv[i], len);
4200 }
4201 }
4202
4203 int
4204 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4205 {
4206 zfs_cmd_t zc = {"\0"};
4207 nvlist_t *args;
4208 int err;
4209
4210 args = fnvlist_alloc();
4211 fnvlist_add_string(args, "message", message);
4212 err = zcmd_write_src_nvlist(hdl, &zc, args);
4213 if (err == 0)
4214 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4215 nvlist_free(args);
4216 zcmd_free_nvlists(&zc);
4217 return (err);
4218 }
4219
4220 /*
4221 * Perform ioctl to get some command history of a pool.
4222 *
4223 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4224 * logical offset of the history buffer to start reading from.
4225 *
4226 * Upon return, 'off' is the next logical offset to read from and
4227 * 'len' is the actual amount of bytes read into 'buf'.
4228 */
4229 static int
4230 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4231 {
4232 zfs_cmd_t zc = {"\0"};
4233 libzfs_handle_t *hdl = zhp->zpool_hdl;
4234
4235 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4236
4237 zc.zc_history = (uint64_t)(uintptr_t)buf;
4238 zc.zc_history_len = *len;
4239 zc.zc_history_offset = *off;
4240
4241 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4242 switch (errno) {
4243 case EPERM:
4244 return (zfs_error_fmt(hdl, EZFS_PERM,
4245 dgettext(TEXT_DOMAIN,
4246 "cannot show history for pool '%s'"),
4247 zhp->zpool_name));
4248 case ENOENT:
4249 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4250 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4251 "'%s'"), zhp->zpool_name));
4252 case ENOTSUP:
4253 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4254 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4255 "'%s', pool must be upgraded"), zhp->zpool_name));
4256 default:
4257 return (zpool_standard_error_fmt(hdl, errno,
4258 dgettext(TEXT_DOMAIN,
4259 "cannot get history for '%s'"), zhp->zpool_name));
4260 }
4261 }
4262
4263 *len = zc.zc_history_len;
4264 *off = zc.zc_history_offset;
4265
4266 return (0);
4267 }
4268
4269 /*
4270 * Retrieve the command history of a pool.
4271 */
4272 int
4273 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4274 boolean_t *eof)
4275 {
4276 char *buf;
4277 int buflen = 128 * 1024;
4278 nvlist_t **records = NULL;
4279 uint_t numrecords = 0;
4280 int err, i;
4281 uint64_t start = *off;
4282
4283 buf = malloc(buflen);
4284 if (buf == NULL)
4285 return (ENOMEM);
4286 /* process about 1MB a time */
4287 while (*off - start < 1024 * 1024) {
4288 uint64_t bytes_read = buflen;
4289 uint64_t leftover;
4290
4291 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4292 break;
4293
4294 /* if nothing else was read in, we're at EOF, just return */
4295 if (!bytes_read) {
4296 *eof = B_TRUE;
4297 break;
4298 }
4299
4300 if ((err = zpool_history_unpack(buf, bytes_read,
4301 &leftover, &records, &numrecords)) != 0)
4302 break;
4303 *off -= leftover;
4304 if (leftover == bytes_read) {
4305 /*
4306 * no progress made, because buffer is not big enough
4307 * to hold this record; resize and retry.
4308 */
4309 buflen *= 2;
4310 free(buf);
4311 buf = malloc(buflen);
4312 if (buf == NULL)
4313 return (ENOMEM);
4314 }
4315 }
4316
4317 free(buf);
4318
4319 if (!err) {
4320 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4321 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4322 records, numrecords) == 0);
4323 }
4324 for (i = 0; i < numrecords; i++)
4325 nvlist_free(records[i]);
4326 free(records);
4327
4328 return (err);
4329 }
4330
4331 /*
4332 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4333 * If there is a new event available 'nvp' will contain a newly allocated
4334 * nvlist and 'dropped' will be set to the number of missed events since
4335 * the last call to this function. When 'nvp' is set to NULL it indicates
4336 * no new events are available. In either case the function returns 0 and
4337 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4338 * function will return a non-zero value. When the function is called in
4339 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4340 * it will not return until a new event is available.
4341 */
4342 int
4343 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4344 int *dropped, unsigned flags, int zevent_fd)
4345 {
4346 zfs_cmd_t zc = {"\0"};
4347 int error = 0;
4348
4349 *nvp = NULL;
4350 *dropped = 0;
4351 zc.zc_cleanup_fd = zevent_fd;
4352
4353 if (flags & ZEVENT_NONBLOCK)
4354 zc.zc_guid = ZEVENT_NONBLOCK;
4355
4356 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4357 return (-1);
4358
4359 retry:
4360 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4361 switch (errno) {
4362 case ESHUTDOWN:
4363 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4364 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4365 goto out;
4366 case ENOENT:
4367 /* Blocking error case should not occur */
4368 if (!(flags & ZEVENT_NONBLOCK))
4369 error = zpool_standard_error_fmt(hdl, errno,
4370 dgettext(TEXT_DOMAIN, "cannot get event"));
4371
4372 goto out;
4373 case ENOMEM:
4374 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4375 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4376 dgettext(TEXT_DOMAIN, "cannot get event"));
4377 goto out;
4378 } else {
4379 goto retry;
4380 }
4381 default:
4382 error = zpool_standard_error_fmt(hdl, errno,
4383 dgettext(TEXT_DOMAIN, "cannot get event"));
4384 goto out;
4385 }
4386 }
4387
4388 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4389 if (error != 0)
4390 goto out;
4391
4392 *dropped = (int)zc.zc_cookie;
4393 out:
4394 zcmd_free_nvlists(&zc);
4395
4396 return (error);
4397 }
4398
4399 /*
4400 * Clear all events.
4401 */
4402 int
4403 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4404 {
4405 zfs_cmd_t zc = {"\0"};
4406 char msg[1024];
4407
4408 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4409 "cannot clear events"));
4410
4411 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4412 return (zpool_standard_error_fmt(hdl, errno, msg));
4413
4414 if (count != NULL)
4415 *count = (int)zc.zc_cookie; /* # of events cleared */
4416
4417 return (0);
4418 }
4419
4420 /*
4421 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4422 * the passed zevent_fd file handle. On success zero is returned,
4423 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4424 */
4425 int
4426 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4427 {
4428 zfs_cmd_t zc = {"\0"};
4429 int error = 0;
4430
4431 zc.zc_guid = eid;
4432 zc.zc_cleanup_fd = zevent_fd;
4433
4434 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4435 switch (errno) {
4436 case ENOENT:
4437 error = zfs_error_fmt(hdl, EZFS_NOENT,
4438 dgettext(TEXT_DOMAIN, "cannot get event"));
4439 break;
4440
4441 case ENOMEM:
4442 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4443 dgettext(TEXT_DOMAIN, "cannot get event"));
4444 break;
4445
4446 default:
4447 error = zpool_standard_error_fmt(hdl, errno,
4448 dgettext(TEXT_DOMAIN, "cannot get event"));
4449 break;
4450 }
4451 }
4452
4453 return (error);
4454 }
4455
4456 static void
4457 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4458 char *pathname, size_t len, boolean_t always_unmounted)
4459 {
4460 zfs_cmd_t zc = {"\0"};
4461 boolean_t mounted = B_FALSE;
4462 char *mntpnt = NULL;
4463 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4464
4465 if (dsobj == 0) {
4466 /* special case for the MOS */
4467 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4468 (longlong_t)obj);
4469 return;
4470 }
4471
4472 /* get the dataset's name */
4473 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4474 zc.zc_obj = dsobj;
4475 if (zfs_ioctl(zhp->zpool_hdl,
4476 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4477 /* just write out a path of two object numbers */
4478 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4479 (longlong_t)dsobj, (longlong_t)obj);
4480 return;
4481 }
4482 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4483
4484 /* find out if the dataset is mounted */
4485 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4486 &mntpnt);
4487
4488 /* get the corrupted object's path */
4489 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4490 zc.zc_obj = obj;
4491 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4492 &zc) == 0) {
4493 if (mounted) {
4494 (void) snprintf(pathname, len, "%s%s", mntpnt,
4495 zc.zc_value);
4496 } else {
4497 (void) snprintf(pathname, len, "%s:%s",
4498 dsname, zc.zc_value);
4499 }
4500 } else {
4501 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4502 (longlong_t)obj);
4503 }
4504 free(mntpnt);
4505 }
4506
4507 void
4508 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4509 char *pathname, size_t len)
4510 {
4511 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4512 }
4513
4514 void
4515 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4516 char *pathname, size_t len)
4517 {
4518 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4519 }
4520 /*
4521 * Wait while the specified activity is in progress in the pool.
4522 */
4523 int
4524 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4525 {
4526 boolean_t missing;
4527
4528 int error = zpool_wait_status(zhp, activity, &missing, NULL);
4529
4530 if (missing) {
4531 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4532 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4533 zhp->zpool_name);
4534 return (ENOENT);
4535 } else {
4536 return (error);
4537 }
4538 }
4539
4540 /*
4541 * Wait for the given activity and return the status of the wait (whether or not
4542 * any waiting was done) in the 'waited' parameter. Non-existent pools are
4543 * reported via the 'missing' parameter, rather than by printing an error
4544 * message. This is convenient when this function is called in a loop over a
4545 * long period of time (as it is, for example, by zpool's wait cmd). In that
4546 * scenario, a pool being exported or destroyed should be considered a normal
4547 * event, so we don't want to print an error when we find that the pool doesn't
4548 * exist.
4549 */
4550 int
4551 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4552 boolean_t *missing, boolean_t *waited)
4553 {
4554 int error = lzc_wait(zhp->zpool_name, activity, waited);
4555 *missing = (error == ENOENT);
4556 if (*missing)
4557 return (0);
4558
4559 if (error != 0) {
4560 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4561 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4562 zhp->zpool_name);
4563 }
4564
4565 return (error);
4566 }
4567
4568 int
4569 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
4570 {
4571 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4572 if (error != 0) {
4573 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4574 dgettext(TEXT_DOMAIN,
4575 "error setting bootenv in pool '%s'"), zhp->zpool_name);
4576 }
4577
4578 return (error);
4579 }
4580
4581 int
4582 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
4583 {
4584 nvlist_t *nvl;
4585 int error;
4586
4587 nvl = NULL;
4588 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4589 if (error != 0) {
4590 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4591 dgettext(TEXT_DOMAIN,
4592 "error getting bootenv in pool '%s'"), zhp->zpool_name);
4593 } else {
4594 *nvlp = nvl;
4595 }
4596
4597 return (error);
4598 }