]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Remove bcopy(), bzero(), bcmp()
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
32 * Copyright (c) 2021, Klara Inc.
33 */
34
35 #include <errno.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <libgen.h>
42 #include <zone.h>
43 #include <sys/stat.h>
44 #include <sys/efi_partition.h>
45 #include <sys/systeminfo.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/zfs_sysfs.h>
48 #include <sys/vdev_disk.h>
49 #include <sys/types.h>
50 #include <dlfcn.h>
51 #include <libzutil.h>
52 #include <fcntl.h>
53
54 #include "zfs_namecheck.h"
55 #include "zfs_prop.h"
56 #include "libzfs_impl.h"
57 #include "zfs_comutil.h"
58 #include "zfeature_common.h"
59
60 static boolean_t zpool_vdev_is_interior(const char *name);
61
62 typedef struct prop_flags {
63 int create:1; /* Validate property on creation */
64 int import:1; /* Validate property on import */
65 int vdevprop:1; /* Validate property as a VDEV property */
66 } prop_flags_t;
67
68 /*
69 * ====================================================================
70 * zpool property functions
71 * ====================================================================
72 */
73
74 static int
75 zpool_get_all_props(zpool_handle_t *zhp)
76 {
77 zfs_cmd_t zc = {"\0"};
78 libzfs_handle_t *hdl = zhp->zpool_hdl;
79
80 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
81
82 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
83 return (-1);
84
85 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
86 if (errno == ENOMEM) {
87 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91 } else {
92 zcmd_free_nvlists(&zc);
93 return (-1);
94 }
95 }
96
97 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
98 zcmd_free_nvlists(&zc);
99 return (-1);
100 }
101
102 zcmd_free_nvlists(&zc);
103
104 return (0);
105 }
106
107 int
108 zpool_props_refresh(zpool_handle_t *zhp)
109 {
110 nvlist_t *old_props;
111
112 old_props = zhp->zpool_props;
113
114 if (zpool_get_all_props(zhp) != 0)
115 return (-1);
116
117 nvlist_free(old_props);
118 return (0);
119 }
120
121 static const char *
122 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
123 zprop_source_t *src)
124 {
125 nvlist_t *nv, *nvl;
126 char *value;
127 zprop_source_t source;
128
129 nvl = zhp->zpool_props;
130 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
131 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
132 value = fnvlist_lookup_string(nv, ZPROP_VALUE);
133 } else {
134 source = ZPROP_SRC_DEFAULT;
135 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
136 value = "-";
137 }
138
139 if (src)
140 *src = source;
141
142 return (value);
143 }
144
145 uint64_t
146 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
147 {
148 nvlist_t *nv, *nvl;
149 uint64_t value;
150 zprop_source_t source;
151
152 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
153 /*
154 * zpool_get_all_props() has most likely failed because
155 * the pool is faulted, but if all we need is the top level
156 * vdev's guid then get it from the zhp config nvlist.
157 */
158 if ((prop == ZPOOL_PROP_GUID) &&
159 (nvlist_lookup_nvlist(zhp->zpool_config,
160 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
161 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
162 == 0)) {
163 return (value);
164 }
165 return (zpool_prop_default_numeric(prop));
166 }
167
168 nvl = zhp->zpool_props;
169 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
170 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
171 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
172 } else {
173 source = ZPROP_SRC_DEFAULT;
174 value = zpool_prop_default_numeric(prop);
175 }
176
177 if (src)
178 *src = source;
179
180 return (value);
181 }
182
183 /*
184 * Map VDEV STATE to printed strings.
185 */
186 const char *
187 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
188 {
189 switch (state) {
190 case VDEV_STATE_CLOSED:
191 case VDEV_STATE_OFFLINE:
192 return (gettext("OFFLINE"));
193 case VDEV_STATE_REMOVED:
194 return (gettext("REMOVED"));
195 case VDEV_STATE_CANT_OPEN:
196 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
197 return (gettext("FAULTED"));
198 else if (aux == VDEV_AUX_SPLIT_POOL)
199 return (gettext("SPLIT"));
200 else
201 return (gettext("UNAVAIL"));
202 case VDEV_STATE_FAULTED:
203 return (gettext("FAULTED"));
204 case VDEV_STATE_DEGRADED:
205 return (gettext("DEGRADED"));
206 case VDEV_STATE_HEALTHY:
207 return (gettext("ONLINE"));
208
209 default:
210 break;
211 }
212
213 return (gettext("UNKNOWN"));
214 }
215
216 /*
217 * Map POOL STATE to printed strings.
218 */
219 const char *
220 zpool_pool_state_to_name(pool_state_t state)
221 {
222 switch (state) {
223 default:
224 break;
225 case POOL_STATE_ACTIVE:
226 return (gettext("ACTIVE"));
227 case POOL_STATE_EXPORTED:
228 return (gettext("EXPORTED"));
229 case POOL_STATE_DESTROYED:
230 return (gettext("DESTROYED"));
231 case POOL_STATE_SPARE:
232 return (gettext("SPARE"));
233 case POOL_STATE_L2CACHE:
234 return (gettext("L2CACHE"));
235 case POOL_STATE_UNINITIALIZED:
236 return (gettext("UNINITIALIZED"));
237 case POOL_STATE_UNAVAIL:
238 return (gettext("UNAVAIL"));
239 case POOL_STATE_POTENTIALLY_ACTIVE:
240 return (gettext("POTENTIALLY_ACTIVE"));
241 }
242
243 return (gettext("UNKNOWN"));
244 }
245
246 /*
247 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
248 * "SUSPENDED", etc).
249 */
250 const char *
251 zpool_get_state_str(zpool_handle_t *zhp)
252 {
253 zpool_errata_t errata;
254 zpool_status_t status;
255 const char *str;
256
257 status = zpool_get_status(zhp, NULL, &errata);
258
259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260 str = gettext("FAULTED");
261 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
262 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
263 str = gettext("SUSPENDED");
264 } else {
265 nvlist_t *nvroot = fnvlist_lookup_nvlist(
266 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
267 uint_t vsc;
268 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
269 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
270 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
271 }
272 return (str);
273 }
274
275 /*
276 * Get a zpool property value for 'prop' and return the value in
277 * a pre-allocated buffer.
278 */
279 int
280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
281 size_t len, zprop_source_t *srctype, boolean_t literal)
282 {
283 uint64_t intval;
284 const char *strval;
285 zprop_source_t src = ZPROP_SRC_NONE;
286
287 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
288 switch (prop) {
289 case ZPOOL_PROP_NAME:
290 (void) strlcpy(buf, zpool_get_name(zhp), len);
291 break;
292
293 case ZPOOL_PROP_HEALTH:
294 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
295 break;
296
297 case ZPOOL_PROP_GUID:
298 intval = zpool_get_prop_int(zhp, prop, &src);
299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
300 break;
301
302 case ZPOOL_PROP_ALTROOT:
303 case ZPOOL_PROP_CACHEFILE:
304 case ZPOOL_PROP_COMMENT:
305 case ZPOOL_PROP_COMPATIBILITY:
306 if (zhp->zpool_props != NULL ||
307 zpool_get_all_props(zhp) == 0) {
308 (void) strlcpy(buf,
309 zpool_get_prop_string(zhp, prop, &src),
310 len);
311 break;
312 }
313 zfs_fallthrough;
314 default:
315 (void) strlcpy(buf, "-", len);
316 break;
317 }
318
319 if (srctype != NULL)
320 *srctype = src;
321 return (0);
322 }
323
324 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
325 prop != ZPOOL_PROP_NAME)
326 return (-1);
327
328 switch (zpool_prop_get_type(prop)) {
329 case PROP_TYPE_STRING:
330 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
331 len);
332 break;
333
334 case PROP_TYPE_NUMBER:
335 intval = zpool_get_prop_int(zhp, prop, &src);
336
337 switch (prop) {
338 case ZPOOL_PROP_SIZE:
339 case ZPOOL_PROP_ALLOCATED:
340 case ZPOOL_PROP_FREE:
341 case ZPOOL_PROP_FREEING:
342 case ZPOOL_PROP_LEAKED:
343 case ZPOOL_PROP_ASHIFT:
344 case ZPOOL_PROP_MAXBLOCKSIZE:
345 case ZPOOL_PROP_MAXDNODESIZE:
346 if (literal)
347 (void) snprintf(buf, len, "%llu",
348 (u_longlong_t)intval);
349 else
350 (void) zfs_nicenum(intval, buf, len);
351 break;
352
353 case ZPOOL_PROP_EXPANDSZ:
354 case ZPOOL_PROP_CHECKPOINT:
355 if (intval == 0) {
356 (void) strlcpy(buf, "-", len);
357 } else if (literal) {
358 (void) snprintf(buf, len, "%llu",
359 (u_longlong_t)intval);
360 } else {
361 (void) zfs_nicebytes(intval, buf, len);
362 }
363 break;
364
365 case ZPOOL_PROP_CAPACITY:
366 if (literal) {
367 (void) snprintf(buf, len, "%llu",
368 (u_longlong_t)intval);
369 } else {
370 (void) snprintf(buf, len, "%llu%%",
371 (u_longlong_t)intval);
372 }
373 break;
374
375 case ZPOOL_PROP_FRAGMENTATION:
376 if (intval == UINT64_MAX) {
377 (void) strlcpy(buf, "-", len);
378 } else if (literal) {
379 (void) snprintf(buf, len, "%llu",
380 (u_longlong_t)intval);
381 } else {
382 (void) snprintf(buf, len, "%llu%%",
383 (u_longlong_t)intval);
384 }
385 break;
386
387 case ZPOOL_PROP_DEDUPRATIO:
388 if (literal)
389 (void) snprintf(buf, len, "%llu.%02llu",
390 (u_longlong_t)(intval / 100),
391 (u_longlong_t)(intval % 100));
392 else
393 (void) snprintf(buf, len, "%llu.%02llux",
394 (u_longlong_t)(intval / 100),
395 (u_longlong_t)(intval % 100));
396 break;
397
398 case ZPOOL_PROP_HEALTH:
399 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
400 break;
401 case ZPOOL_PROP_VERSION:
402 if (intval >= SPA_VERSION_FEATURES) {
403 (void) snprintf(buf, len, "-");
404 break;
405 }
406 zfs_fallthrough;
407 default:
408 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
409 }
410 break;
411
412 case PROP_TYPE_INDEX:
413 intval = zpool_get_prop_int(zhp, prop, &src);
414 if (zpool_prop_index_to_string(prop, intval, &strval)
415 != 0)
416 return (-1);
417 (void) strlcpy(buf, strval, len);
418 break;
419
420 default:
421 abort();
422 }
423
424 if (srctype)
425 *srctype = src;
426
427 return (0);
428 }
429
430 /*
431 * Check if the bootfs name has the same pool name as it is set to.
432 * Assuming bootfs is a valid dataset name.
433 */
434 static boolean_t
435 bootfs_name_valid(const char *pool, const char *bootfs)
436 {
437 int len = strlen(pool);
438 if (bootfs[0] == '\0')
439 return (B_TRUE);
440
441 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
442 return (B_FALSE);
443
444 if (strncmp(pool, bootfs, len) == 0 &&
445 (bootfs[len] == '/' || bootfs[len] == '\0'))
446 return (B_TRUE);
447
448 return (B_FALSE);
449 }
450
451 /*
452 * Given an nvlist of zpool properties to be set, validate that they are
453 * correct, and parse any numeric properties (index, boolean, etc) if they are
454 * specified as strings.
455 */
456 static nvlist_t *
457 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
458 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
459 {
460 nvpair_t *elem;
461 nvlist_t *retprops;
462 zpool_prop_t prop;
463 char *strval;
464 uint64_t intval;
465 char *slash, *check;
466 struct stat64 statbuf;
467 zpool_handle_t *zhp;
468 char report[1024];
469
470 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
471 (void) no_memory(hdl);
472 return (NULL);
473 }
474
475 elem = NULL;
476 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
477 const char *propname = nvpair_name(elem);
478
479 if (flags.vdevprop && zpool_prop_vdev(propname)) {
480 vdev_prop_t vprop = vdev_name_to_prop(propname);
481
482 if (vdev_prop_readonly(vprop)) {
483 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
484 "is readonly"), propname);
485 (void) zfs_error(hdl, EZFS_PROPREADONLY,
486 errbuf);
487 goto error;
488 }
489
490 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
491 retprops, &strval, &intval, errbuf) != 0)
492 goto error;
493
494 continue;
495 } else if (flags.vdevprop && vdev_prop_user(propname)) {
496 if (nvlist_add_nvpair(retprops, elem) != 0) {
497 (void) no_memory(hdl);
498 goto error;
499 }
500 continue;
501 } else if (flags.vdevprop) {
502 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
503 "invalid property: '%s'"), propname);
504 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
505 goto error;
506 }
507
508 prop = zpool_name_to_prop(propname);
509 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
510 int err;
511 char *fname = strchr(propname, '@') + 1;
512
513 err = zfeature_lookup_name(fname, NULL);
514 if (err != 0) {
515 ASSERT3U(err, ==, ENOENT);
516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
517 "feature '%s' unsupported by kernel"),
518 fname);
519 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
520 goto error;
521 }
522
523 if (nvpair_type(elem) != DATA_TYPE_STRING) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "'%s' must be a string"), propname);
526 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
527 goto error;
528 }
529
530 (void) nvpair_value_string(elem, &strval);
531 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
532 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
534 "property '%s' can only be set to "
535 "'enabled' or 'disabled'"), propname);
536 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
537 goto error;
538 }
539
540 if (!flags.create &&
541 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "property '%s' can only be set to "
544 "'disabled' at creation time"), propname);
545 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
546 goto error;
547 }
548
549 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
550 (void) no_memory(hdl);
551 goto error;
552 }
553 continue;
554 }
555
556 /*
557 * Make sure this property is valid and applies to this type.
558 */
559 if (prop == ZPOOL_PROP_INVAL) {
560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561 "invalid property '%s'"), propname);
562 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
563 goto error;
564 }
565
566 if (zpool_prop_readonly(prop)) {
567 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
568 "is readonly"), propname);
569 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
570 goto error;
571 }
572
573 if (!flags.create && zpool_prop_setonce(prop)) {
574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 "property '%s' can only be set at "
576 "creation time"), propname);
577 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
578 goto error;
579 }
580
581 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
582 &strval, &intval, errbuf) != 0)
583 goto error;
584
585 /*
586 * Perform additional checking for specific properties.
587 */
588 switch (prop) {
589 case ZPOOL_PROP_VERSION:
590 if (intval < version ||
591 !SPA_VERSION_IS_SUPPORTED(intval)) {
592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593 "property '%s' number %llu is invalid."),
594 propname, (unsigned long long)intval);
595 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
596 goto error;
597 }
598 break;
599
600 case ZPOOL_PROP_ASHIFT:
601 if (intval != 0 &&
602 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "property '%s' number %llu is invalid, "
605 "only values between %" PRId32 " and %"
606 PRId32 " are allowed."),
607 propname, (unsigned long long)intval,
608 ASHIFT_MIN, ASHIFT_MAX);
609 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
610 goto error;
611 }
612 break;
613
614 case ZPOOL_PROP_BOOTFS:
615 if (flags.create || flags.import) {
616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
617 "property '%s' cannot be set at creation "
618 "or import time"), propname);
619 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
620 goto error;
621 }
622
623 if (version < SPA_VERSION_BOOTFS) {
624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
625 "pool must be upgraded to support "
626 "'%s' property"), propname);
627 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
628 goto error;
629 }
630
631 /*
632 * bootfs property value has to be a dataset name and
633 * the dataset has to be in the same pool as it sets to.
634 */
635 if (!bootfs_name_valid(poolname, strval)) {
636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
637 "is an invalid name"), strval);
638 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
639 goto error;
640 }
641
642 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
644 "could not open pool '%s'"), poolname);
645 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
646 goto error;
647 }
648 zpool_close(zhp);
649 break;
650
651 case ZPOOL_PROP_ALTROOT:
652 if (!flags.create && !flags.import) {
653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 "property '%s' can only be set during pool "
655 "creation or import"), propname);
656 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
657 goto error;
658 }
659
660 if (strval[0] != '/') {
661 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
662 "bad alternate root '%s'"), strval);
663 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
664 goto error;
665 }
666 break;
667
668 case ZPOOL_PROP_CACHEFILE:
669 if (strval[0] == '\0')
670 break;
671
672 if (strcmp(strval, "none") == 0)
673 break;
674
675 if (strval[0] != '/') {
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "property '%s' must be empty, an "
678 "absolute path, or 'none'"), propname);
679 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
680 goto error;
681 }
682
683 slash = strrchr(strval, '/');
684
685 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
686 strcmp(slash, "/..") == 0) {
687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
688 "'%s' is not a valid file"), strval);
689 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
690 goto error;
691 }
692
693 *slash = '\0';
694
695 if (strval[0] != '\0' &&
696 (stat64(strval, &statbuf) != 0 ||
697 !S_ISDIR(statbuf.st_mode))) {
698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
699 "'%s' is not a valid directory"),
700 strval);
701 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
702 goto error;
703 }
704
705 *slash = '/';
706 break;
707
708 case ZPOOL_PROP_COMPATIBILITY:
709 switch (zpool_load_compat(strval, NULL, report, 1024)) {
710 case ZPOOL_COMPATIBILITY_OK:
711 case ZPOOL_COMPATIBILITY_WARNTOKEN:
712 break;
713 case ZPOOL_COMPATIBILITY_BADFILE:
714 case ZPOOL_COMPATIBILITY_BADTOKEN:
715 case ZPOOL_COMPATIBILITY_NOFILES:
716 zfs_error_aux(hdl, "%s", report);
717 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
718 goto error;
719 }
720 break;
721
722 case ZPOOL_PROP_COMMENT:
723 for (check = strval; *check != '\0'; check++) {
724 if (!isprint(*check)) {
725 zfs_error_aux(hdl,
726 dgettext(TEXT_DOMAIN,
727 "comment may only have printable "
728 "characters"));
729 (void) zfs_error(hdl, EZFS_BADPROP,
730 errbuf);
731 goto error;
732 }
733 }
734 if (strlen(strval) > ZPROP_MAX_COMMENT) {
735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736 "comment must not exceed %d characters"),
737 ZPROP_MAX_COMMENT);
738 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
739 goto error;
740 }
741 break;
742 case ZPOOL_PROP_READONLY:
743 if (!flags.import) {
744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 "property '%s' can only be set at "
746 "import time"), propname);
747 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
748 goto error;
749 }
750 break;
751 case ZPOOL_PROP_MULTIHOST:
752 if (get_system_hostid() == 0) {
753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
754 "requires a non-zero system hostid"));
755 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
756 goto error;
757 }
758 break;
759 case ZPOOL_PROP_DEDUPDITTO:
760 printf("Note: property '%s' no longer has "
761 "any effect\n", propname);
762 break;
763
764 default:
765 break;
766 }
767 }
768
769 return (retprops);
770 error:
771 nvlist_free(retprops);
772 return (NULL);
773 }
774
775 /*
776 * Set zpool property : propname=propval.
777 */
778 int
779 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
780 {
781 zfs_cmd_t zc = {"\0"};
782 int ret = -1;
783 char errbuf[1024];
784 nvlist_t *nvl = NULL;
785 nvlist_t *realprops;
786 uint64_t version;
787 prop_flags_t flags = { 0 };
788
789 (void) snprintf(errbuf, sizeof (errbuf),
790 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
791 zhp->zpool_name);
792
793 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
794 return (no_memory(zhp->zpool_hdl));
795
796 if (nvlist_add_string(nvl, propname, propval) != 0) {
797 nvlist_free(nvl);
798 return (no_memory(zhp->zpool_hdl));
799 }
800
801 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
802 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
803 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
804 nvlist_free(nvl);
805 return (-1);
806 }
807
808 nvlist_free(nvl);
809 nvl = realprops;
810
811 /*
812 * Execute the corresponding ioctl() to set this property.
813 */
814 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
815
816 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
817 nvlist_free(nvl);
818 return (-1);
819 }
820
821 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
822
823 zcmd_free_nvlists(&zc);
824 nvlist_free(nvl);
825
826 if (ret)
827 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
828 else
829 (void) zpool_props_refresh(zhp);
830
831 return (ret);
832 }
833
834 int
835 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
836 zfs_type_t type, boolean_t literal)
837 {
838 libzfs_handle_t *hdl = zhp->zpool_hdl;
839 zprop_list_t *entry;
840 char buf[ZFS_MAXPROPLEN];
841 nvlist_t *features = NULL;
842 nvpair_t *nvp;
843 zprop_list_t **last;
844 boolean_t firstexpand = (NULL == *plp);
845 int i;
846
847 if (zprop_expand_list(hdl, plp, type) != 0)
848 return (-1);
849
850 if (type == ZFS_TYPE_VDEV)
851 return (0);
852
853 last = plp;
854 while (*last != NULL)
855 last = &(*last)->pl_next;
856
857 if ((*plp)->pl_all)
858 features = zpool_get_features(zhp);
859
860 if ((*plp)->pl_all && firstexpand) {
861 for (i = 0; i < SPA_FEATURES; i++) {
862 zprop_list_t *entry = zfs_alloc(hdl,
863 sizeof (zprop_list_t));
864 entry->pl_prop = ZPROP_INVAL;
865 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
866 spa_feature_table[i].fi_uname);
867 entry->pl_width = strlen(entry->pl_user_prop);
868 entry->pl_all = B_TRUE;
869
870 *last = entry;
871 last = &entry->pl_next;
872 }
873 }
874
875 /* add any unsupported features */
876 for (nvp = nvlist_next_nvpair(features, NULL);
877 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
878 char *propname;
879 boolean_t found;
880 zprop_list_t *entry;
881
882 if (zfeature_is_supported(nvpair_name(nvp)))
883 continue;
884
885 propname = zfs_asprintf(hdl, "unsupported@%s",
886 nvpair_name(nvp));
887
888 /*
889 * Before adding the property to the list make sure that no
890 * other pool already added the same property.
891 */
892 found = B_FALSE;
893 entry = *plp;
894 while (entry != NULL) {
895 if (entry->pl_user_prop != NULL &&
896 strcmp(propname, entry->pl_user_prop) == 0) {
897 found = B_TRUE;
898 break;
899 }
900 entry = entry->pl_next;
901 }
902 if (found) {
903 free(propname);
904 continue;
905 }
906
907 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
908 entry->pl_prop = ZPROP_INVAL;
909 entry->pl_user_prop = propname;
910 entry->pl_width = strlen(entry->pl_user_prop);
911 entry->pl_all = B_TRUE;
912
913 *last = entry;
914 last = &entry->pl_next;
915 }
916
917 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
918 if (entry->pl_fixed && !literal)
919 continue;
920
921 if (entry->pl_prop != ZPROP_INVAL &&
922 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
923 NULL, literal) == 0) {
924 if (strlen(buf) > entry->pl_width)
925 entry->pl_width = strlen(buf);
926 }
927 }
928
929 return (0);
930 }
931
932 int
933 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
934 zprop_list_t **plp)
935 {
936 zprop_list_t *entry;
937 char buf[ZFS_MAXPROPLEN];
938 char *strval = NULL;
939 int err = 0;
940 nvpair_t *elem = NULL;
941 nvlist_t *vprops = NULL;
942 nvlist_t *propval = NULL;
943 const char *propname;
944 vdev_prop_t prop;
945 zprop_list_t **last;
946
947 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
948 if (entry->pl_fixed)
949 continue;
950
951 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
952 entry->pl_user_prop, buf, sizeof (buf), NULL,
953 B_FALSE) == 0) {
954 if (strlen(buf) > entry->pl_width)
955 entry->pl_width = strlen(buf);
956 }
957 if (entry->pl_prop == VDEV_PROP_NAME &&
958 strlen(vdevname) > entry->pl_width)
959 entry->pl_width = strlen(vdevname);
960 }
961
962 /* Handle the all properties case */
963 last = plp;
964 if (*last != NULL && (*last)->pl_all == B_TRUE) {
965 while (*last != NULL)
966 last = &(*last)->pl_next;
967
968 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
969 if (err != 0)
970 return (err);
971
972 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
973 propname = nvpair_name(elem);
974
975 /* Skip properties that are not user defined */
976 if ((prop = vdev_name_to_prop(propname)) !=
977 VDEV_PROP_USER)
978 continue;
979
980 if (nvpair_value_nvlist(elem, &propval) != 0)
981 continue;
982
983 strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
984
985 if ((entry = zfs_alloc(zhp->zpool_hdl,
986 sizeof (zprop_list_t))) == NULL)
987 return (ENOMEM);
988
989 entry->pl_prop = prop;
990 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
991 propname);
992 entry->pl_width = strlen(strval);
993 entry->pl_all = B_TRUE;
994 *last = entry;
995 last = &entry->pl_next;
996 }
997 }
998
999 return (0);
1000 }
1001
1002 /*
1003 * Get the state for the given feature on the given ZFS pool.
1004 */
1005 int
1006 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1007 size_t len)
1008 {
1009 uint64_t refcount;
1010 boolean_t found = B_FALSE;
1011 nvlist_t *features = zpool_get_features(zhp);
1012 boolean_t supported;
1013 const char *feature = strchr(propname, '@') + 1;
1014
1015 supported = zpool_prop_feature(propname);
1016 ASSERT(supported || zpool_prop_unsupported(propname));
1017
1018 /*
1019 * Convert from feature name to feature guid. This conversion is
1020 * unnecessary for unsupported@... properties because they already
1021 * use guids.
1022 */
1023 if (supported) {
1024 int ret;
1025 spa_feature_t fid;
1026
1027 ret = zfeature_lookup_name(feature, &fid);
1028 if (ret != 0) {
1029 (void) strlcpy(buf, "-", len);
1030 return (ENOTSUP);
1031 }
1032 feature = spa_feature_table[fid].fi_guid;
1033 }
1034
1035 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1036 found = B_TRUE;
1037
1038 if (supported) {
1039 if (!found) {
1040 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1041 } else {
1042 if (refcount == 0)
1043 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1044 else
1045 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1046 }
1047 } else {
1048 if (found) {
1049 if (refcount == 0) {
1050 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1051 } else {
1052 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1053 }
1054 } else {
1055 (void) strlcpy(buf, "-", len);
1056 return (ENOTSUP);
1057 }
1058 }
1059
1060 return (0);
1061 }
1062
1063 /*
1064 * Validate the given pool name, optionally putting an extended error message in
1065 * 'buf'.
1066 */
1067 boolean_t
1068 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1069 {
1070 namecheck_err_t why;
1071 char what;
1072 int ret;
1073
1074 ret = pool_namecheck(pool, &why, &what);
1075
1076 /*
1077 * The rules for reserved pool names were extended at a later point.
1078 * But we need to support users with existing pools that may now be
1079 * invalid. So we only check for this expanded set of names during a
1080 * create (or import), and only in userland.
1081 */
1082 if (ret == 0 && !isopen &&
1083 (strncmp(pool, "mirror", 6) == 0 ||
1084 strncmp(pool, "raidz", 5) == 0 ||
1085 strncmp(pool, "draid", 5) == 0 ||
1086 strncmp(pool, "spare", 5) == 0 ||
1087 strcmp(pool, "log") == 0)) {
1088 if (hdl != NULL)
1089 zfs_error_aux(hdl,
1090 dgettext(TEXT_DOMAIN, "name is reserved"));
1091 return (B_FALSE);
1092 }
1093
1094
1095 if (ret != 0) {
1096 if (hdl != NULL) {
1097 switch (why) {
1098 case NAME_ERR_TOOLONG:
1099 zfs_error_aux(hdl,
1100 dgettext(TEXT_DOMAIN, "name is too long"));
1101 break;
1102
1103 case NAME_ERR_INVALCHAR:
1104 zfs_error_aux(hdl,
1105 dgettext(TEXT_DOMAIN, "invalid character "
1106 "'%c' in pool name"), what);
1107 break;
1108
1109 case NAME_ERR_NOLETTER:
1110 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1111 "name must begin with a letter"));
1112 break;
1113
1114 case NAME_ERR_RESERVED:
1115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1116 "name is reserved"));
1117 break;
1118
1119 case NAME_ERR_DISKLIKE:
1120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1121 "pool name is reserved"));
1122 break;
1123
1124 case NAME_ERR_LEADING_SLASH:
1125 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1126 "leading slash in name"));
1127 break;
1128
1129 case NAME_ERR_EMPTY_COMPONENT:
1130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 "empty component in name"));
1132 break;
1133
1134 case NAME_ERR_TRAILING_SLASH:
1135 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1136 "trailing slash in name"));
1137 break;
1138
1139 case NAME_ERR_MULTIPLE_DELIMITERS:
1140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1141 "multiple '@' and/or '#' delimiters in "
1142 "name"));
1143 break;
1144
1145 case NAME_ERR_NO_AT:
1146 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1147 "permission set is missing '@'"));
1148 break;
1149
1150 default:
1151 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1152 "(%d) not defined"), why);
1153 break;
1154 }
1155 }
1156 return (B_FALSE);
1157 }
1158
1159 return (B_TRUE);
1160 }
1161
1162 /*
1163 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1164 * state.
1165 */
1166 zpool_handle_t *
1167 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1168 {
1169 zpool_handle_t *zhp;
1170 boolean_t missing;
1171
1172 /*
1173 * Make sure the pool name is valid.
1174 */
1175 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1176 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1177 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1178 pool);
1179 return (NULL);
1180 }
1181
1182 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1183 return (NULL);
1184
1185 zhp->zpool_hdl = hdl;
1186 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1187
1188 if (zpool_refresh_stats(zhp, &missing) != 0) {
1189 zpool_close(zhp);
1190 return (NULL);
1191 }
1192
1193 if (missing) {
1194 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1195 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1196 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1197 zpool_close(zhp);
1198 return (NULL);
1199 }
1200
1201 return (zhp);
1202 }
1203
1204 /*
1205 * Like the above, but silent on error. Used when iterating over pools (because
1206 * the configuration cache may be out of date).
1207 */
1208 int
1209 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1210 {
1211 zpool_handle_t *zhp;
1212 boolean_t missing;
1213
1214 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1215 return (-1);
1216
1217 zhp->zpool_hdl = hdl;
1218 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1219
1220 if (zpool_refresh_stats(zhp, &missing) != 0) {
1221 zpool_close(zhp);
1222 return (-1);
1223 }
1224
1225 if (missing) {
1226 zpool_close(zhp);
1227 *ret = NULL;
1228 return (0);
1229 }
1230
1231 *ret = zhp;
1232 return (0);
1233 }
1234
1235 /*
1236 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1237 * state.
1238 */
1239 zpool_handle_t *
1240 zpool_open(libzfs_handle_t *hdl, const char *pool)
1241 {
1242 zpool_handle_t *zhp;
1243
1244 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1245 return (NULL);
1246
1247 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1248 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1249 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1250 zpool_close(zhp);
1251 return (NULL);
1252 }
1253
1254 return (zhp);
1255 }
1256
1257 /*
1258 * Close the handle. Simply frees the memory associated with the handle.
1259 */
1260 void
1261 zpool_close(zpool_handle_t *zhp)
1262 {
1263 nvlist_free(zhp->zpool_config);
1264 nvlist_free(zhp->zpool_old_config);
1265 nvlist_free(zhp->zpool_props);
1266 free(zhp);
1267 }
1268
1269 /*
1270 * Return the name of the pool.
1271 */
1272 const char *
1273 zpool_get_name(zpool_handle_t *zhp)
1274 {
1275 return (zhp->zpool_name);
1276 }
1277
1278
1279 /*
1280 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1281 */
1282 int
1283 zpool_get_state(zpool_handle_t *zhp)
1284 {
1285 return (zhp->zpool_state);
1286 }
1287
1288 /*
1289 * Check if vdev list contains a special vdev
1290 */
1291 static boolean_t
1292 zpool_has_special_vdev(nvlist_t *nvroot)
1293 {
1294 nvlist_t **child;
1295 uint_t children;
1296
1297 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1298 &children) == 0) {
1299 for (uint_t c = 0; c < children; c++) {
1300 char *bias;
1301
1302 if (nvlist_lookup_string(child[c],
1303 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1304 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1305 return (B_TRUE);
1306 }
1307 }
1308 }
1309 return (B_FALSE);
1310 }
1311
1312 /*
1313 * Check if vdev list contains a dRAID vdev
1314 */
1315 static boolean_t
1316 zpool_has_draid_vdev(nvlist_t *nvroot)
1317 {
1318 nvlist_t **child;
1319 uint_t children;
1320
1321 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1322 &child, &children) == 0) {
1323 for (uint_t c = 0; c < children; c++) {
1324 char *type;
1325
1326 if (nvlist_lookup_string(child[c],
1327 ZPOOL_CONFIG_TYPE, &type) == 0 &&
1328 strcmp(type, VDEV_TYPE_DRAID) == 0) {
1329 return (B_TRUE);
1330 }
1331 }
1332 }
1333 return (B_FALSE);
1334 }
1335
1336 /*
1337 * Output a dRAID top-level vdev name in to the provided buffer.
1338 */
1339 static char *
1340 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1341 uint64_t spares, uint64_t children)
1342 {
1343 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1344 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1345 (u_longlong_t)children, (u_longlong_t)spares);
1346
1347 return (name);
1348 }
1349
1350 /*
1351 * Return B_TRUE if the provided name is a dRAID spare name.
1352 */
1353 boolean_t
1354 zpool_is_draid_spare(const char *name)
1355 {
1356 uint64_t spare_id, parity, vdev_id;
1357
1358 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1359 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1360 (u_longlong_t *)&spare_id) == 3) {
1361 return (B_TRUE);
1362 }
1363
1364 return (B_FALSE);
1365 }
1366
1367 /*
1368 * Create the named pool, using the provided vdev list. It is assumed
1369 * that the consumer has already validated the contents of the nvlist, so we
1370 * don't have to worry about error semantics.
1371 */
1372 int
1373 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1374 nvlist_t *props, nvlist_t *fsprops)
1375 {
1376 zfs_cmd_t zc = {"\0"};
1377 nvlist_t *zc_fsprops = NULL;
1378 nvlist_t *zc_props = NULL;
1379 nvlist_t *hidden_args = NULL;
1380 uint8_t *wkeydata = NULL;
1381 uint_t wkeylen = 0;
1382 char msg[1024];
1383 int ret = -1;
1384
1385 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1386 "cannot create '%s'"), pool);
1387
1388 if (!zpool_name_valid(hdl, B_FALSE, pool))
1389 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1390
1391 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1392 return (-1);
1393
1394 if (props) {
1395 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1396
1397 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1398 SPA_VERSION_1, flags, msg)) == NULL) {
1399 goto create_failed;
1400 }
1401 }
1402
1403 if (fsprops) {
1404 uint64_t zoned;
1405 char *zonestr;
1406
1407 zoned = ((nvlist_lookup_string(fsprops,
1408 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1409 strcmp(zonestr, "on") == 0);
1410
1411 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1412 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1413 goto create_failed;
1414 }
1415
1416 if (nvlist_exists(zc_fsprops,
1417 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1418 !zpool_has_special_vdev(nvroot)) {
1419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1420 "%s property requires a special vdev"),
1421 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1422 (void) zfs_error(hdl, EZFS_BADPROP, msg);
1423 goto create_failed;
1424 }
1425
1426 if (!zc_props &&
1427 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1428 goto create_failed;
1429 }
1430 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1431 &wkeydata, &wkeylen) != 0) {
1432 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1433 goto create_failed;
1434 }
1435 if (nvlist_add_nvlist(zc_props,
1436 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1437 goto create_failed;
1438 }
1439 if (wkeydata != NULL) {
1440 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1441 goto create_failed;
1442
1443 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1444 wkeydata, wkeylen) != 0)
1445 goto create_failed;
1446
1447 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1448 hidden_args) != 0)
1449 goto create_failed;
1450 }
1451 }
1452
1453 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1454 goto create_failed;
1455
1456 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1457
1458 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1459
1460 zcmd_free_nvlists(&zc);
1461 nvlist_free(zc_props);
1462 nvlist_free(zc_fsprops);
1463 nvlist_free(hidden_args);
1464 if (wkeydata != NULL)
1465 free(wkeydata);
1466
1467 switch (errno) {
1468 case EBUSY:
1469 /*
1470 * This can happen if the user has specified the same
1471 * device multiple times. We can't reliably detect this
1472 * until we try to add it and see we already have a
1473 * label. This can also happen under if the device is
1474 * part of an active md or lvm device.
1475 */
1476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1477 "one or more vdevs refer to the same device, or "
1478 "one of\nthe devices is part of an active md or "
1479 "lvm device"));
1480 return (zfs_error(hdl, EZFS_BADDEV, msg));
1481
1482 case ERANGE:
1483 /*
1484 * This happens if the record size is smaller or larger
1485 * than the allowed size range, or not a power of 2.
1486 *
1487 * NOTE: although zfs_valid_proplist is called earlier,
1488 * this case may have slipped through since the
1489 * pool does not exist yet and it is therefore
1490 * impossible to read properties e.g. max blocksize
1491 * from the pool.
1492 */
1493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1494 "record size invalid"));
1495 return (zfs_error(hdl, EZFS_BADPROP, msg));
1496
1497 case EOVERFLOW:
1498 /*
1499 * This occurs when one of the devices is below
1500 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1501 * device was the problem device since there's no
1502 * reliable way to determine device size from userland.
1503 */
1504 {
1505 char buf[64];
1506
1507 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1508 sizeof (buf));
1509
1510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1511 "one or more devices is less than the "
1512 "minimum size (%s)"), buf);
1513 }
1514 return (zfs_error(hdl, EZFS_BADDEV, msg));
1515
1516 case ENOSPC:
1517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1518 "one or more devices is out of space"));
1519 return (zfs_error(hdl, EZFS_BADDEV, msg));
1520
1521 case EINVAL:
1522 if (zpool_has_draid_vdev(nvroot) &&
1523 zfeature_lookup_name("draid", NULL) != 0) {
1524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1525 "dRAID vdevs are unsupported by the "
1526 "kernel"));
1527 return (zfs_error(hdl, EZFS_BADDEV, msg));
1528 } else {
1529 return (zpool_standard_error(hdl, errno, msg));
1530 }
1531
1532 default:
1533 return (zpool_standard_error(hdl, errno, msg));
1534 }
1535 }
1536
1537 create_failed:
1538 zcmd_free_nvlists(&zc);
1539 nvlist_free(zc_props);
1540 nvlist_free(zc_fsprops);
1541 nvlist_free(hidden_args);
1542 if (wkeydata != NULL)
1543 free(wkeydata);
1544 return (ret);
1545 }
1546
1547 /*
1548 * Destroy the given pool. It is up to the caller to ensure that there are no
1549 * datasets left in the pool.
1550 */
1551 int
1552 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1553 {
1554 zfs_cmd_t zc = {"\0"};
1555 zfs_handle_t *zfp = NULL;
1556 libzfs_handle_t *hdl = zhp->zpool_hdl;
1557 char msg[1024];
1558
1559 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1560 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1561 return (-1);
1562
1563 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1564 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1565
1566 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1567 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1568 "cannot destroy '%s'"), zhp->zpool_name);
1569
1570 if (errno == EROFS) {
1571 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1572 "one or more devices is read only"));
1573 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1574 } else {
1575 (void) zpool_standard_error(hdl, errno, msg);
1576 }
1577
1578 if (zfp)
1579 zfs_close(zfp);
1580 return (-1);
1581 }
1582
1583 if (zfp) {
1584 remove_mountpoint(zfp);
1585 zfs_close(zfp);
1586 }
1587
1588 return (0);
1589 }
1590
1591 /*
1592 * Create a checkpoint in the given pool.
1593 */
1594 int
1595 zpool_checkpoint(zpool_handle_t *zhp)
1596 {
1597 libzfs_handle_t *hdl = zhp->zpool_hdl;
1598 char msg[1024];
1599 int error;
1600
1601 error = lzc_pool_checkpoint(zhp->zpool_name);
1602 if (error != 0) {
1603 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1604 "cannot checkpoint '%s'"), zhp->zpool_name);
1605 (void) zpool_standard_error(hdl, error, msg);
1606 return (-1);
1607 }
1608
1609 return (0);
1610 }
1611
1612 /*
1613 * Discard the checkpoint from the given pool.
1614 */
1615 int
1616 zpool_discard_checkpoint(zpool_handle_t *zhp)
1617 {
1618 libzfs_handle_t *hdl = zhp->zpool_hdl;
1619 char msg[1024];
1620 int error;
1621
1622 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1623 if (error != 0) {
1624 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1625 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1626 (void) zpool_standard_error(hdl, error, msg);
1627 return (-1);
1628 }
1629
1630 return (0);
1631 }
1632
1633 /*
1634 * Add the given vdevs to the pool. The caller must have already performed the
1635 * necessary verification to ensure that the vdev specification is well-formed.
1636 */
1637 int
1638 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1639 {
1640 zfs_cmd_t zc = {"\0"};
1641 int ret;
1642 libzfs_handle_t *hdl = zhp->zpool_hdl;
1643 char msg[1024];
1644 nvlist_t **spares, **l2cache;
1645 uint_t nspares, nl2cache;
1646
1647 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1648 "cannot add to '%s'"), zhp->zpool_name);
1649
1650 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1651 SPA_VERSION_SPARES &&
1652 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1653 &spares, &nspares) == 0) {
1654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1655 "upgraded to add hot spares"));
1656 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1657 }
1658
1659 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1660 SPA_VERSION_L2CACHE &&
1661 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1662 &l2cache, &nl2cache) == 0) {
1663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1664 "upgraded to add cache devices"));
1665 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1666 }
1667
1668 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1669 return (-1);
1670 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1671
1672 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1673 switch (errno) {
1674 case EBUSY:
1675 /*
1676 * This can happen if the user has specified the same
1677 * device multiple times. We can't reliably detect this
1678 * until we try to add it and see we already have a
1679 * label.
1680 */
1681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1682 "one or more vdevs refer to the same device"));
1683 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1684 break;
1685
1686 case EINVAL:
1687
1688 if (zpool_has_draid_vdev(nvroot) &&
1689 zfeature_lookup_name("draid", NULL) != 0) {
1690 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1691 "dRAID vdevs are unsupported by the "
1692 "kernel"));
1693 } else {
1694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1695 "invalid config; a pool with removing/"
1696 "removed vdevs does not support adding "
1697 "raidz or dRAID vdevs"));
1698 }
1699
1700 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1701 break;
1702
1703 case EOVERFLOW:
1704 /*
1705 * This occurs when one of the devices is below
1706 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1707 * device was the problem device since there's no
1708 * reliable way to determine device size from userland.
1709 */
1710 {
1711 char buf[64];
1712
1713 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1714 sizeof (buf));
1715
1716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1717 "device is less than the minimum "
1718 "size (%s)"), buf);
1719 }
1720 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1721 break;
1722
1723 case ENOTSUP:
1724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1725 "pool must be upgraded to add these vdevs"));
1726 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1727 break;
1728
1729 default:
1730 (void) zpool_standard_error(hdl, errno, msg);
1731 }
1732
1733 ret = -1;
1734 } else {
1735 ret = 0;
1736 }
1737
1738 zcmd_free_nvlists(&zc);
1739
1740 return (ret);
1741 }
1742
1743 /*
1744 * Exports the pool from the system. The caller must ensure that there are no
1745 * mounted datasets in the pool.
1746 */
1747 static int
1748 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1749 const char *log_str)
1750 {
1751 zfs_cmd_t zc = {"\0"};
1752
1753 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1754 zc.zc_cookie = force;
1755 zc.zc_guid = hardforce;
1756 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1757
1758 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1759 switch (errno) {
1760 case EXDEV:
1761 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1762 "use '-f' to override the following errors:\n"
1763 "'%s' has an active shared spare which could be"
1764 " used by other pools once '%s' is exported."),
1765 zhp->zpool_name, zhp->zpool_name);
1766 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1767 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1768 zhp->zpool_name));
1769 default:
1770 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1771 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1772 zhp->zpool_name));
1773 }
1774 }
1775
1776 return (0);
1777 }
1778
1779 int
1780 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1781 {
1782 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1783 }
1784
1785 int
1786 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1787 {
1788 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1789 }
1790
1791 static void
1792 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1793 nvlist_t *config)
1794 {
1795 nvlist_t *nv = NULL;
1796 uint64_t rewindto;
1797 int64_t loss = -1;
1798 struct tm t;
1799 char timestr[128];
1800
1801 if (!hdl->libzfs_printerr || config == NULL)
1802 return;
1803
1804 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1805 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1806 return;
1807 }
1808
1809 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1810 return;
1811 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1812
1813 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1814 strftime(timestr, 128, "%c", &t) != 0) {
1815 if (dryrun) {
1816 (void) printf(dgettext(TEXT_DOMAIN,
1817 "Would be able to return %s "
1818 "to its state as of %s.\n"),
1819 name, timestr);
1820 } else {
1821 (void) printf(dgettext(TEXT_DOMAIN,
1822 "Pool %s returned to its state as of %s.\n"),
1823 name, timestr);
1824 }
1825 if (loss > 120) {
1826 (void) printf(dgettext(TEXT_DOMAIN,
1827 "%s approximately %lld "),
1828 dryrun ? "Would discard" : "Discarded",
1829 ((longlong_t)loss + 30) / 60);
1830 (void) printf(dgettext(TEXT_DOMAIN,
1831 "minutes of transactions.\n"));
1832 } else if (loss > 0) {
1833 (void) printf(dgettext(TEXT_DOMAIN,
1834 "%s approximately %lld "),
1835 dryrun ? "Would discard" : "Discarded",
1836 (longlong_t)loss);
1837 (void) printf(dgettext(TEXT_DOMAIN,
1838 "seconds of transactions.\n"));
1839 }
1840 }
1841 }
1842
1843 void
1844 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1845 nvlist_t *config)
1846 {
1847 nvlist_t *nv = NULL;
1848 int64_t loss = -1;
1849 uint64_t edata = UINT64_MAX;
1850 uint64_t rewindto;
1851 struct tm t;
1852 char timestr[128];
1853
1854 if (!hdl->libzfs_printerr)
1855 return;
1856
1857 if (reason >= 0)
1858 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1859 else
1860 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1861
1862 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1863 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1864 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1865 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1866 goto no_info;
1867
1868 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1869 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1870 &edata);
1871
1872 (void) printf(dgettext(TEXT_DOMAIN,
1873 "Recovery is possible, but will result in some data loss.\n"));
1874
1875 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1876 strftime(timestr, 128, "%c", &t) != 0) {
1877 (void) printf(dgettext(TEXT_DOMAIN,
1878 "\tReturning the pool to its state as of %s\n"
1879 "\tshould correct the problem. "),
1880 timestr);
1881 } else {
1882 (void) printf(dgettext(TEXT_DOMAIN,
1883 "\tReverting the pool to an earlier state "
1884 "should correct the problem.\n\t"));
1885 }
1886
1887 if (loss > 120) {
1888 (void) printf(dgettext(TEXT_DOMAIN,
1889 "Approximately %lld minutes of data\n"
1890 "\tmust be discarded, irreversibly. "),
1891 ((longlong_t)loss + 30) / 60);
1892 } else if (loss > 0) {
1893 (void) printf(dgettext(TEXT_DOMAIN,
1894 "Approximately %lld seconds of data\n"
1895 "\tmust be discarded, irreversibly. "),
1896 (longlong_t)loss);
1897 }
1898 if (edata != 0 && edata != UINT64_MAX) {
1899 if (edata == 1) {
1900 (void) printf(dgettext(TEXT_DOMAIN,
1901 "After rewind, at least\n"
1902 "\tone persistent user-data error will remain. "));
1903 } else {
1904 (void) printf(dgettext(TEXT_DOMAIN,
1905 "After rewind, several\n"
1906 "\tpersistent user-data errors will remain. "));
1907 }
1908 }
1909 (void) printf(dgettext(TEXT_DOMAIN,
1910 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1911 reason >= 0 ? "clear" : "import", name);
1912
1913 (void) printf(dgettext(TEXT_DOMAIN,
1914 "A scrub of the pool\n"
1915 "\tis strongly recommended after recovery.\n"));
1916 return;
1917
1918 no_info:
1919 (void) printf(dgettext(TEXT_DOMAIN,
1920 "Destroy and re-create the pool from\n\ta backup source.\n"));
1921 }
1922
1923 /*
1924 * zpool_import() is a contracted interface. Should be kept the same
1925 * if possible.
1926 *
1927 * Applications should use zpool_import_props() to import a pool with
1928 * new properties value to be set.
1929 */
1930 int
1931 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1932 char *altroot)
1933 {
1934 nvlist_t *props = NULL;
1935 int ret;
1936
1937 if (altroot != NULL) {
1938 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1939 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1940 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1941 newname));
1942 }
1943
1944 if (nvlist_add_string(props,
1945 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1946 nvlist_add_string(props,
1947 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1948 nvlist_free(props);
1949 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1950 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1951 newname));
1952 }
1953 }
1954
1955 ret = zpool_import_props(hdl, config, newname, props,
1956 ZFS_IMPORT_NORMAL);
1957 nvlist_free(props);
1958 return (ret);
1959 }
1960
1961 static void
1962 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1963 int indent)
1964 {
1965 nvlist_t **child;
1966 uint_t c, children;
1967 char *vname;
1968 uint64_t is_log = 0;
1969
1970 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1971 &is_log);
1972
1973 if (name != NULL)
1974 (void) printf("\t%*s%s%s\n", indent, "", name,
1975 is_log ? " [log]" : "");
1976
1977 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1978 &child, &children) != 0)
1979 return;
1980
1981 for (c = 0; c < children; c++) {
1982 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1983 print_vdev_tree(hdl, vname, child[c], indent + 2);
1984 free(vname);
1985 }
1986 }
1987
1988 void
1989 zpool_print_unsup_feat(nvlist_t *config)
1990 {
1991 nvlist_t *nvinfo, *unsup_feat;
1992
1993 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
1994 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
1995
1996 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
1997 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1998 char *desc = fnvpair_value_string(nvp);
1999 if (strlen(desc) > 0)
2000 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
2001 else
2002 (void) printf("\t%s\n", nvpair_name(nvp));
2003 }
2004 }
2005
2006 /*
2007 * Import the given pool using the known configuration and a list of
2008 * properties to be set. The configuration should have come from
2009 * zpool_find_import(). The 'newname' parameters control whether the pool
2010 * is imported with a different name.
2011 */
2012 int
2013 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2014 nvlist_t *props, int flags)
2015 {
2016 zfs_cmd_t zc = {"\0"};
2017 zpool_load_policy_t policy;
2018 nvlist_t *nv = NULL;
2019 nvlist_t *nvinfo = NULL;
2020 nvlist_t *missing = NULL;
2021 const char *thename;
2022 char *origname;
2023 int ret;
2024 int error = 0;
2025 char errbuf[1024];
2026
2027 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
2028
2029 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2030 "cannot import pool '%s'"), origname);
2031
2032 if (newname != NULL) {
2033 if (!zpool_name_valid(hdl, B_FALSE, newname))
2034 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2035 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2036 newname));
2037 thename = newname;
2038 } else {
2039 thename = origname;
2040 }
2041
2042 if (props != NULL) {
2043 uint64_t version;
2044 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2045
2046 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
2047
2048 if ((props = zpool_valid_proplist(hdl, origname,
2049 props, version, flags, errbuf)) == NULL)
2050 return (-1);
2051 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
2052 nvlist_free(props);
2053 return (-1);
2054 }
2055 nvlist_free(props);
2056 }
2057
2058 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2059
2060 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
2061
2062 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
2063 zcmd_free_nvlists(&zc);
2064 return (-1);
2065 }
2066 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
2067 zcmd_free_nvlists(&zc);
2068 return (-1);
2069 }
2070
2071 zc.zc_cookie = flags;
2072 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2073 errno == ENOMEM) {
2074 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2075 zcmd_free_nvlists(&zc);
2076 return (-1);
2077 }
2078 }
2079 if (ret != 0)
2080 error = errno;
2081
2082 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2083
2084 zcmd_free_nvlists(&zc);
2085
2086 zpool_get_load_policy(config, &policy);
2087
2088 if (error) {
2089 char desc[1024];
2090 char aux[256];
2091
2092 /*
2093 * Dry-run failed, but we print out what success
2094 * looks like if we found a best txg
2095 */
2096 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2097 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2098 B_TRUE, nv);
2099 nvlist_free(nv);
2100 return (-1);
2101 }
2102
2103 if (newname == NULL)
2104 (void) snprintf(desc, sizeof (desc),
2105 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2106 thename);
2107 else
2108 (void) snprintf(desc, sizeof (desc),
2109 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2110 origname, thename);
2111
2112 switch (error) {
2113 case ENOTSUP:
2114 if (nv != NULL && nvlist_lookup_nvlist(nv,
2115 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2116 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2117 (void) printf(dgettext(TEXT_DOMAIN, "This "
2118 "pool uses the following feature(s) not "
2119 "supported by this system:\n"));
2120 zpool_print_unsup_feat(nv);
2121 if (nvlist_exists(nvinfo,
2122 ZPOOL_CONFIG_CAN_RDONLY)) {
2123 (void) printf(dgettext(TEXT_DOMAIN,
2124 "All unsupported features are only "
2125 "required for writing to the pool."
2126 "\nThe pool can be imported using "
2127 "'-o readonly=on'.\n"));
2128 }
2129 }
2130 /*
2131 * Unsupported version.
2132 */
2133 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
2134 break;
2135
2136 case EREMOTEIO:
2137 if (nv != NULL && nvlist_lookup_nvlist(nv,
2138 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2139 char *hostname = "<unknown>";
2140 uint64_t hostid = 0;
2141 mmp_state_t mmp_state;
2142
2143 mmp_state = fnvlist_lookup_uint64(nvinfo,
2144 ZPOOL_CONFIG_MMP_STATE);
2145
2146 if (nvlist_exists(nvinfo,
2147 ZPOOL_CONFIG_MMP_HOSTNAME))
2148 hostname = fnvlist_lookup_string(nvinfo,
2149 ZPOOL_CONFIG_MMP_HOSTNAME);
2150
2151 if (nvlist_exists(nvinfo,
2152 ZPOOL_CONFIG_MMP_HOSTID))
2153 hostid = fnvlist_lookup_uint64(nvinfo,
2154 ZPOOL_CONFIG_MMP_HOSTID);
2155
2156 if (mmp_state == MMP_STATE_ACTIVE) {
2157 (void) snprintf(aux, sizeof (aux),
2158 dgettext(TEXT_DOMAIN, "pool is imp"
2159 "orted on host '%s' (hostid=%lx).\n"
2160 "Export the pool on the other "
2161 "system, then run 'zpool import'."),
2162 hostname, (unsigned long) hostid);
2163 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2164 (void) snprintf(aux, sizeof (aux),
2165 dgettext(TEXT_DOMAIN, "pool has "
2166 "the multihost property on and "
2167 "the\nsystem's hostid is not set. "
2168 "Set a unique system hostid with "
2169 "the zgenhostid(8) command.\n"));
2170 }
2171
2172 (void) zfs_error_aux(hdl, "%s", aux);
2173 }
2174 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2175 break;
2176
2177 case EINVAL:
2178 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2179 break;
2180
2181 case EROFS:
2182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2183 "one or more devices is read only"));
2184 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2185 break;
2186
2187 case ENXIO:
2188 if (nv && nvlist_lookup_nvlist(nv,
2189 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2190 nvlist_lookup_nvlist(nvinfo,
2191 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2192 (void) printf(dgettext(TEXT_DOMAIN,
2193 "The devices below are missing or "
2194 "corrupted, use '-m' to import the pool "
2195 "anyway:\n"));
2196 print_vdev_tree(hdl, NULL, missing, 2);
2197 (void) printf("\n");
2198 }
2199 (void) zpool_standard_error(hdl, error, desc);
2200 break;
2201
2202 case EEXIST:
2203 (void) zpool_standard_error(hdl, error, desc);
2204 break;
2205
2206 case EBUSY:
2207 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2208 "one or more devices are already in use\n"));
2209 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2210 break;
2211 case ENAMETOOLONG:
2212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2213 "new name of at least one dataset is longer than "
2214 "the maximum allowable length"));
2215 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2216 break;
2217 default:
2218 (void) zpool_standard_error(hdl, error, desc);
2219 zpool_explain_recover(hdl,
2220 newname ? origname : thename, -error, nv);
2221 break;
2222 }
2223
2224 nvlist_free(nv);
2225 ret = -1;
2226 } else {
2227 zpool_handle_t *zhp;
2228
2229 /*
2230 * This should never fail, but play it safe anyway.
2231 */
2232 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2233 ret = -1;
2234 else if (zhp != NULL)
2235 zpool_close(zhp);
2236 if (policy.zlp_rewind &
2237 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2238 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2239 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2240 }
2241 nvlist_free(nv);
2242 return (0);
2243 }
2244
2245 return (ret);
2246 }
2247
2248 /*
2249 * Translate vdev names to guids. If a vdev_path is determined to be
2250 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2251 * are added to it.
2252 */
2253 static int
2254 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2255 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2256 {
2257 nvlist_t *errlist = NULL;
2258 int error = 0;
2259
2260 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2261 elem = nvlist_next_nvpair(vds, elem)) {
2262 boolean_t spare, cache;
2263
2264 char *vd_path = nvpair_name(elem);
2265 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2266 NULL);
2267
2268 if ((tgt == NULL) || cache || spare) {
2269 if (errlist == NULL) {
2270 errlist = fnvlist_alloc();
2271 error = EINVAL;
2272 }
2273
2274 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2275 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2276 fnvlist_add_int64(errlist, vd_path, err);
2277 continue;
2278 }
2279
2280 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2281 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2282
2283 char msg[MAXNAMELEN];
2284 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2285 fnvlist_add_string(guids_to_paths, msg, vd_path);
2286 }
2287
2288 if (error != 0) {
2289 verify(errlist != NULL);
2290 if (vd_errlist != NULL)
2291 *vd_errlist = errlist;
2292 else
2293 fnvlist_free(errlist);
2294 }
2295
2296 return (error);
2297 }
2298
2299 static int
2300 xlate_init_err(int err)
2301 {
2302 switch (err) {
2303 case ENODEV:
2304 return (EZFS_NODEVICE);
2305 case EINVAL:
2306 case EROFS:
2307 return (EZFS_BADDEV);
2308 case EBUSY:
2309 return (EZFS_INITIALIZING);
2310 case ESRCH:
2311 return (EZFS_NO_INITIALIZE);
2312 }
2313 return (err);
2314 }
2315
2316 /*
2317 * Begin, suspend, or cancel the initialization (initializing of all free
2318 * blocks) for the given vdevs in the given pool.
2319 */
2320 static int
2321 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2322 nvlist_t *vds, boolean_t wait)
2323 {
2324 int err;
2325
2326 nvlist_t *vdev_guids = fnvlist_alloc();
2327 nvlist_t *guids_to_paths = fnvlist_alloc();
2328 nvlist_t *vd_errlist = NULL;
2329 nvlist_t *errlist;
2330 nvpair_t *elem;
2331
2332 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2333 guids_to_paths, &vd_errlist);
2334
2335 if (err != 0) {
2336 verify(vd_errlist != NULL);
2337 goto list_errors;
2338 }
2339
2340 err = lzc_initialize(zhp->zpool_name, cmd_type,
2341 vdev_guids, &errlist);
2342
2343 if (err != 0) {
2344 if (errlist != NULL) {
2345 vd_errlist = fnvlist_lookup_nvlist(errlist,
2346 ZPOOL_INITIALIZE_VDEVS);
2347 goto list_errors;
2348 }
2349 (void) zpool_standard_error(zhp->zpool_hdl, err,
2350 dgettext(TEXT_DOMAIN, "operation failed"));
2351 goto out;
2352 }
2353
2354 if (wait) {
2355 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2356 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2357
2358 uint64_t guid = fnvpair_value_uint64(elem);
2359
2360 err = lzc_wait_tag(zhp->zpool_name,
2361 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2362 if (err != 0) {
2363 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2364 err, dgettext(TEXT_DOMAIN, "error "
2365 "waiting for '%s' to initialize"),
2366 nvpair_name(elem));
2367
2368 goto out;
2369 }
2370 }
2371 }
2372 goto out;
2373
2374 list_errors:
2375 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2376 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2377 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2378 char *path;
2379
2380 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2381 &path) != 0)
2382 path = nvpair_name(elem);
2383
2384 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2385 "cannot initialize '%s'", path);
2386 }
2387
2388 out:
2389 fnvlist_free(vdev_guids);
2390 fnvlist_free(guids_to_paths);
2391
2392 if (vd_errlist != NULL)
2393 fnvlist_free(vd_errlist);
2394
2395 return (err == 0 ? 0 : -1);
2396 }
2397
2398 int
2399 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2400 nvlist_t *vds)
2401 {
2402 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2403 }
2404
2405 int
2406 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2407 nvlist_t *vds)
2408 {
2409 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2410 }
2411
2412 static int
2413 xlate_trim_err(int err)
2414 {
2415 switch (err) {
2416 case ENODEV:
2417 return (EZFS_NODEVICE);
2418 case EINVAL:
2419 case EROFS:
2420 return (EZFS_BADDEV);
2421 case EBUSY:
2422 return (EZFS_TRIMMING);
2423 case ESRCH:
2424 return (EZFS_NO_TRIM);
2425 case EOPNOTSUPP:
2426 return (EZFS_TRIM_NOTSUP);
2427 }
2428 return (err);
2429 }
2430
2431 static int
2432 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2433 {
2434 int err;
2435 nvpair_t *elem;
2436
2437 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2438 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2439
2440 uint64_t guid = fnvpair_value_uint64(elem);
2441
2442 err = lzc_wait_tag(zhp->zpool_name,
2443 ZPOOL_WAIT_TRIM, guid, NULL);
2444 if (err != 0) {
2445 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2446 err, dgettext(TEXT_DOMAIN, "error "
2447 "waiting to trim '%s'"), nvpair_name(elem));
2448
2449 return (err);
2450 }
2451 }
2452 return (0);
2453 }
2454
2455 /*
2456 * Check errlist and report any errors, omitting ones which should be
2457 * suppressed. Returns B_TRUE if any errors were reported.
2458 */
2459 static boolean_t
2460 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2461 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2462 {
2463 nvpair_t *elem;
2464 boolean_t reported_errs = B_FALSE;
2465 int num_vds = 0;
2466 int num_suppressed_errs = 0;
2467
2468 for (elem = nvlist_next_nvpair(vds, NULL);
2469 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2470 num_vds++;
2471 }
2472
2473 for (elem = nvlist_next_nvpair(errlist, NULL);
2474 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2475 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2476 char *path;
2477
2478 /*
2479 * If only the pool was specified, and it was not a secure
2480 * trim then suppress warnings for individual vdevs which
2481 * do not support trimming.
2482 */
2483 if (vd_error == EZFS_TRIM_NOTSUP &&
2484 trim_flags->fullpool &&
2485 !trim_flags->secure) {
2486 num_suppressed_errs++;
2487 continue;
2488 }
2489
2490 reported_errs = B_TRUE;
2491 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2492 &path) != 0)
2493 path = nvpair_name(elem);
2494
2495 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2496 "cannot trim '%s'", path);
2497 }
2498
2499 if (num_suppressed_errs == num_vds) {
2500 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2501 "no devices in pool support trim operations"));
2502 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2503 dgettext(TEXT_DOMAIN, "cannot trim")));
2504 reported_errs = B_TRUE;
2505 }
2506
2507 return (reported_errs);
2508 }
2509
2510 /*
2511 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2512 * the given vdevs in the given pool.
2513 */
2514 int
2515 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2516 trimflags_t *trim_flags)
2517 {
2518 int err;
2519 int retval = 0;
2520
2521 nvlist_t *vdev_guids = fnvlist_alloc();
2522 nvlist_t *guids_to_paths = fnvlist_alloc();
2523 nvlist_t *errlist = NULL;
2524
2525 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2526 guids_to_paths, &errlist);
2527 if (err != 0) {
2528 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2529 retval = -1;
2530 goto out;
2531 }
2532
2533 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2534 trim_flags->secure, vdev_guids, &errlist);
2535 if (err != 0) {
2536 nvlist_t *vd_errlist;
2537 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2538 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2539 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2540 vds, vd_errlist)) {
2541 retval = -1;
2542 goto out;
2543 }
2544 } else {
2545 char msg[1024];
2546
2547 (void) snprintf(msg, sizeof (msg),
2548 dgettext(TEXT_DOMAIN, "operation failed"));
2549 zpool_standard_error(zhp->zpool_hdl, err, msg);
2550 retval = -1;
2551 goto out;
2552 }
2553 }
2554
2555
2556 if (trim_flags->wait)
2557 retval = zpool_trim_wait(zhp, vdev_guids);
2558
2559 out:
2560 if (errlist != NULL)
2561 fnvlist_free(errlist);
2562 fnvlist_free(vdev_guids);
2563 fnvlist_free(guids_to_paths);
2564 return (retval);
2565 }
2566
2567 /*
2568 * Scan the pool.
2569 */
2570 int
2571 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2572 {
2573 zfs_cmd_t zc = {"\0"};
2574 char msg[1024];
2575 int err;
2576 libzfs_handle_t *hdl = zhp->zpool_hdl;
2577
2578 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2579 zc.zc_cookie = func;
2580 zc.zc_flags = cmd;
2581
2582 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2583 return (0);
2584
2585 err = errno;
2586
2587 /* ECANCELED on a scrub means we resumed a paused scrub */
2588 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2589 cmd == POOL_SCRUB_NORMAL)
2590 return (0);
2591
2592 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2593 return (0);
2594
2595 if (func == POOL_SCAN_SCRUB) {
2596 if (cmd == POOL_SCRUB_PAUSE) {
2597 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2598 "cannot pause scrubbing %s"), zc.zc_name);
2599 } else {
2600 assert(cmd == POOL_SCRUB_NORMAL);
2601 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2602 "cannot scrub %s"), zc.zc_name);
2603 }
2604 } else if (func == POOL_SCAN_RESILVER) {
2605 assert(cmd == POOL_SCRUB_NORMAL);
2606 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2607 "cannot restart resilver on %s"), zc.zc_name);
2608 } else if (func == POOL_SCAN_NONE) {
2609 (void) snprintf(msg, sizeof (msg),
2610 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2611 zc.zc_name);
2612 } else {
2613 assert(!"unexpected result");
2614 }
2615
2616 if (err == EBUSY) {
2617 nvlist_t *nvroot;
2618 pool_scan_stat_t *ps = NULL;
2619 uint_t psc;
2620
2621 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2622 ZPOOL_CONFIG_VDEV_TREE);
2623 (void) nvlist_lookup_uint64_array(nvroot,
2624 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2625 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2626 ps->pss_state == DSS_SCANNING) {
2627 if (cmd == POOL_SCRUB_PAUSE)
2628 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2629 else
2630 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2631 } else {
2632 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2633 }
2634 } else if (err == ENOENT) {
2635 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2636 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2637 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2638 } else {
2639 return (zpool_standard_error(hdl, err, msg));
2640 }
2641 }
2642
2643 /*
2644 * Find a vdev that matches the search criteria specified. We use the
2645 * the nvpair name to determine how we should look for the device.
2646 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2647 * spare; but FALSE if its an INUSE spare.
2648 */
2649 static nvlist_t *
2650 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2651 boolean_t *l2cache, boolean_t *log)
2652 {
2653 uint_t c, children;
2654 nvlist_t **child;
2655 nvlist_t *ret;
2656 uint64_t is_log;
2657 char *srchkey;
2658 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2659
2660 /* Nothing to look for */
2661 if (search == NULL || pair == NULL)
2662 return (NULL);
2663
2664 /* Obtain the key we will use to search */
2665 srchkey = nvpair_name(pair);
2666
2667 switch (nvpair_type(pair)) {
2668 case DATA_TYPE_UINT64:
2669 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2670 uint64_t srchval = fnvpair_value_uint64(pair);
2671 uint64_t theguid = fnvlist_lookup_uint64(nv,
2672 ZPOOL_CONFIG_GUID);
2673 if (theguid == srchval)
2674 return (nv);
2675 }
2676 break;
2677
2678 case DATA_TYPE_STRING: {
2679 char *srchval, *val;
2680
2681 srchval = fnvpair_value_string(pair);
2682 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2683 break;
2684
2685 /*
2686 * Search for the requested value. Special cases:
2687 *
2688 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2689 * "-part1", or "p1". The suffix is hidden from the user,
2690 * but included in the string, so this matches around it.
2691 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2692 * is used to check all possible expanded paths.
2693 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2694 *
2695 * Otherwise, all other searches are simple string compares.
2696 */
2697 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2698 uint64_t wholedisk = 0;
2699
2700 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2701 &wholedisk);
2702 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2703 return (nv);
2704
2705 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2706 char *type, *idx, *end, *p;
2707 uint64_t id, vdev_id;
2708
2709 /*
2710 * Determine our vdev type, keeping in mind
2711 * that the srchval is composed of a type and
2712 * vdev id pair (i.e. mirror-4).
2713 */
2714 if ((type = strdup(srchval)) == NULL)
2715 return (NULL);
2716
2717 if ((p = strrchr(type, '-')) == NULL) {
2718 free(type);
2719 break;
2720 }
2721 idx = p + 1;
2722 *p = '\0';
2723
2724 /*
2725 * If the types don't match then keep looking.
2726 */
2727 if (strncmp(val, type, strlen(val)) != 0) {
2728 free(type);
2729 break;
2730 }
2731
2732 verify(zpool_vdev_is_interior(type));
2733
2734 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
2735 errno = 0;
2736 vdev_id = strtoull(idx, &end, 10);
2737
2738 /*
2739 * If we are looking for a raidz and a parity is
2740 * specified, make sure it matches.
2741 */
2742 int rzlen = strlen(VDEV_TYPE_RAIDZ);
2743 assert(rzlen == strlen(VDEV_TYPE_DRAID));
2744 int typlen = strlen(type);
2745 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
2746 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
2747 typlen != rzlen) {
2748 uint64_t vdev_parity;
2749 int parity = *(type + rzlen) - '0';
2750
2751 if (parity <= 0 || parity > 3 ||
2752 (typlen - rzlen) != 1) {
2753 /*
2754 * Nonsense parity specified, can
2755 * never match
2756 */
2757 free(type);
2758 return (NULL);
2759 }
2760 vdev_parity = fnvlist_lookup_uint64(nv,
2761 ZPOOL_CONFIG_NPARITY);
2762 if ((int)vdev_parity != parity) {
2763 free(type);
2764 break;
2765 }
2766 }
2767
2768 free(type);
2769 if (errno != 0)
2770 return (NULL);
2771
2772 /*
2773 * Now verify that we have the correct vdev id.
2774 */
2775 if (vdev_id == id)
2776 return (nv);
2777 }
2778
2779 /*
2780 * Common case
2781 */
2782 if (strcmp(srchval, val) == 0)
2783 return (nv);
2784 break;
2785 }
2786
2787 default:
2788 break;
2789 }
2790
2791 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2792 &child, &children) != 0)
2793 return (NULL);
2794
2795 for (c = 0; c < children; c++) {
2796 if ((ret = vdev_to_nvlist_iter(child[c], search,
2797 avail_spare, l2cache, NULL)) != NULL) {
2798 /*
2799 * The 'is_log' value is only set for the toplevel
2800 * vdev, not the leaf vdevs. So we always lookup the
2801 * log device from the root of the vdev tree (where
2802 * 'log' is non-NULL).
2803 */
2804 if (log != NULL &&
2805 nvlist_lookup_uint64(child[c],
2806 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2807 is_log) {
2808 *log = B_TRUE;
2809 }
2810 return (ret);
2811 }
2812 }
2813
2814 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2815 &child, &children) == 0) {
2816 for (c = 0; c < children; c++) {
2817 if ((ret = vdev_to_nvlist_iter(child[c], search,
2818 avail_spare, l2cache, NULL)) != NULL) {
2819 *avail_spare = B_TRUE;
2820 return (ret);
2821 }
2822 }
2823 }
2824
2825 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2826 &child, &children) == 0) {
2827 for (c = 0; c < children; c++) {
2828 if ((ret = vdev_to_nvlist_iter(child[c], search,
2829 avail_spare, l2cache, NULL)) != NULL) {
2830 *l2cache = B_TRUE;
2831 return (ret);
2832 }
2833 }
2834 }
2835
2836 return (NULL);
2837 }
2838
2839 /*
2840 * Given a physical path or guid, find the associated vdev.
2841 */
2842 nvlist_t *
2843 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2844 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2845 {
2846 nvlist_t *search, *nvroot, *ret;
2847 uint64_t guid;
2848 char *end;
2849
2850 search = fnvlist_alloc();
2851
2852 guid = strtoull(ppath, &end, 0);
2853 if (guid != 0 && *end == '\0') {
2854 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
2855 } else {
2856 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
2857 }
2858
2859 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2860 ZPOOL_CONFIG_VDEV_TREE);
2861
2862 *avail_spare = B_FALSE;
2863 *l2cache = B_FALSE;
2864 if (log != NULL)
2865 *log = B_FALSE;
2866 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2867 fnvlist_free(search);
2868
2869 return (ret);
2870 }
2871
2872 /*
2873 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2874 */
2875 static boolean_t
2876 zpool_vdev_is_interior(const char *name)
2877 {
2878 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2879 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2880 strncmp(name,
2881 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2882 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2883 return (B_TRUE);
2884
2885 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
2886 !zpool_is_draid_spare(name))
2887 return (B_TRUE);
2888
2889 return (B_FALSE);
2890 }
2891
2892 nvlist_t *
2893 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2894 boolean_t *l2cache, boolean_t *log)
2895 {
2896 char *end;
2897 nvlist_t *nvroot, *search, *ret;
2898 uint64_t guid;
2899
2900 search = fnvlist_alloc();
2901
2902 guid = strtoull(path, &end, 0);
2903 if (guid != 0 && *end == '\0') {
2904 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
2905 } else if (zpool_vdev_is_interior(path)) {
2906 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
2907 } else {
2908 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
2909 }
2910
2911 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2912 ZPOOL_CONFIG_VDEV_TREE);
2913
2914 *avail_spare = B_FALSE;
2915 *l2cache = B_FALSE;
2916 if (log != NULL)
2917 *log = B_FALSE;
2918 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2919 fnvlist_free(search);
2920
2921 return (ret);
2922 }
2923
2924 static int
2925 vdev_is_online(nvlist_t *nv)
2926 {
2927 uint64_t ival;
2928
2929 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2930 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2931 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2932 return (0);
2933
2934 return (1);
2935 }
2936
2937 /*
2938 * Helper function for zpool_get_physpaths().
2939 */
2940 static int
2941 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2942 size_t *bytes_written)
2943 {
2944 size_t bytes_left, pos, rsz;
2945 char *tmppath;
2946 const char *format;
2947
2948 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2949 &tmppath) != 0)
2950 return (EZFS_NODEVICE);
2951
2952 pos = *bytes_written;
2953 bytes_left = physpath_size - pos;
2954 format = (pos == 0) ? "%s" : " %s";
2955
2956 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2957 *bytes_written += rsz;
2958
2959 if (rsz >= bytes_left) {
2960 /* if physpath was not copied properly, clear it */
2961 if (bytes_left != 0) {
2962 physpath[pos] = 0;
2963 }
2964 return (EZFS_NOSPC);
2965 }
2966 return (0);
2967 }
2968
2969 static int
2970 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2971 size_t *rsz, boolean_t is_spare)
2972 {
2973 char *type;
2974 int ret;
2975
2976 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2977 return (EZFS_INVALCONFIG);
2978
2979 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2980 /*
2981 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2982 * For a spare vdev, we only want to boot from the active
2983 * spare device.
2984 */
2985 if (is_spare) {
2986 uint64_t spare = 0;
2987 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2988 &spare);
2989 if (!spare)
2990 return (EZFS_INVALCONFIG);
2991 }
2992
2993 if (vdev_is_online(nv)) {
2994 if ((ret = vdev_get_one_physpath(nv, physpath,
2995 phypath_size, rsz)) != 0)
2996 return (ret);
2997 }
2998 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2999 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3000 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
3001 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
3002 nvlist_t **child;
3003 uint_t count;
3004 int i, ret;
3005
3006 if (nvlist_lookup_nvlist_array(nv,
3007 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
3008 return (EZFS_INVALCONFIG);
3009
3010 for (i = 0; i < count; i++) {
3011 ret = vdev_get_physpaths(child[i], physpath,
3012 phypath_size, rsz, is_spare);
3013 if (ret == EZFS_NOSPC)
3014 return (ret);
3015 }
3016 }
3017
3018 return (EZFS_POOL_INVALARG);
3019 }
3020
3021 /*
3022 * Get phys_path for a root pool config.
3023 * Return 0 on success; non-zero on failure.
3024 */
3025 static int
3026 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
3027 {
3028 size_t rsz;
3029 nvlist_t *vdev_root;
3030 nvlist_t **child;
3031 uint_t count;
3032 char *type;
3033
3034 rsz = 0;
3035
3036 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3037 &vdev_root) != 0)
3038 return (EZFS_INVALCONFIG);
3039
3040 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
3041 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
3042 &child, &count) != 0)
3043 return (EZFS_INVALCONFIG);
3044
3045 /*
3046 * root pool can only have a single top-level vdev.
3047 */
3048 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
3049 return (EZFS_POOL_INVALARG);
3050
3051 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
3052 B_FALSE);
3053
3054 /* No online devices */
3055 if (rsz == 0)
3056 return (EZFS_NODEVICE);
3057
3058 return (0);
3059 }
3060
3061 /*
3062 * Get phys_path for a root pool
3063 * Return 0 on success; non-zero on failure.
3064 */
3065 int
3066 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
3067 {
3068 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
3069 phypath_size));
3070 }
3071
3072 /*
3073 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
3074 *
3075 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3076 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
3077 * ignore them.
3078 */
3079 static uint64_t
3080 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3081 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3082 {
3083 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3084 nvlist_t *tgt;
3085
3086 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3087 &log)) == NULL)
3088 return (0);
3089
3090 if (is_spare != NULL)
3091 *is_spare = spare;
3092 if (is_l2cache != NULL)
3093 *is_l2cache = l2cache;
3094 if (is_log != NULL)
3095 *is_log = log;
3096
3097 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
3098 }
3099
3100 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
3101 uint64_t
3102 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3103 {
3104 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3105 }
3106
3107 /*
3108 * Bring the specified vdev online. The 'flags' parameter is a set of the
3109 * ZFS_ONLINE_* flags.
3110 */
3111 int
3112 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3113 vdev_state_t *newstate)
3114 {
3115 zfs_cmd_t zc = {"\0"};
3116 char msg[1024];
3117 nvlist_t *tgt;
3118 boolean_t avail_spare, l2cache, islog;
3119 libzfs_handle_t *hdl = zhp->zpool_hdl;
3120
3121 if (flags & ZFS_ONLINE_EXPAND) {
3122 (void) snprintf(msg, sizeof (msg),
3123 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3124 } else {
3125 (void) snprintf(msg, sizeof (msg),
3126 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3127 }
3128
3129 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3130 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3131 &islog)) == NULL)
3132 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3133
3134 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3135
3136 if (avail_spare)
3137 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3138
3139 #ifndef __FreeBSD__
3140 char *pathname;
3141 if ((flags & ZFS_ONLINE_EXPAND ||
3142 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3143 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3144 uint64_t wholedisk = 0;
3145
3146 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3147 &wholedisk);
3148
3149 /*
3150 * XXX - L2ARC 1.0 devices can't support expansion.
3151 */
3152 if (l2cache) {
3153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3154 "cannot expand cache devices"));
3155 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
3156 }
3157
3158 if (wholedisk) {
3159 const char *fullpath = path;
3160 char buf[MAXPATHLEN];
3161 int error;
3162
3163 if (path[0] != '/') {
3164 error = zfs_resolve_shortname(path, buf,
3165 sizeof (buf));
3166 if (error != 0)
3167 return (zfs_error(hdl, EZFS_NODEVICE,
3168 msg));
3169
3170 fullpath = buf;
3171 }
3172
3173 error = zpool_relabel_disk(hdl, fullpath, msg);
3174 if (error != 0)
3175 return (error);
3176 }
3177 }
3178 #endif
3179
3180 zc.zc_cookie = VDEV_STATE_ONLINE;
3181 zc.zc_obj = flags;
3182
3183 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3184 if (errno == EINVAL) {
3185 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3186 "from this pool into a new one. Use '%s' "
3187 "instead"), "zpool detach");
3188 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
3189 }
3190 return (zpool_standard_error(hdl, errno, msg));
3191 }
3192
3193 *newstate = zc.zc_cookie;
3194 return (0);
3195 }
3196
3197 /*
3198 * Take the specified vdev offline
3199 */
3200 int
3201 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3202 {
3203 zfs_cmd_t zc = {"\0"};
3204 char msg[1024];
3205 nvlist_t *tgt;
3206 boolean_t avail_spare, l2cache;
3207 libzfs_handle_t *hdl = zhp->zpool_hdl;
3208
3209 (void) snprintf(msg, sizeof (msg),
3210 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3211
3212 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3213 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3214 NULL)) == NULL)
3215 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3216
3217 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3218
3219 if (avail_spare)
3220 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3221
3222 zc.zc_cookie = VDEV_STATE_OFFLINE;
3223 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3224
3225 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3226 return (0);
3227
3228 switch (errno) {
3229 case EBUSY:
3230
3231 /*
3232 * There are no other replicas of this device.
3233 */
3234 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3235
3236 case EEXIST:
3237 /*
3238 * The log device has unplayed logs
3239 */
3240 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3241
3242 default:
3243 return (zpool_standard_error(hdl, errno, msg));
3244 }
3245 }
3246
3247 /*
3248 * Mark the given vdev faulted.
3249 */
3250 int
3251 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3252 {
3253 zfs_cmd_t zc = {"\0"};
3254 char msg[1024];
3255 libzfs_handle_t *hdl = zhp->zpool_hdl;
3256
3257 (void) snprintf(msg, sizeof (msg),
3258 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3259
3260 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3261 zc.zc_guid = guid;
3262 zc.zc_cookie = VDEV_STATE_FAULTED;
3263 zc.zc_obj = aux;
3264
3265 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3266 return (0);
3267
3268 switch (errno) {
3269 case EBUSY:
3270
3271 /*
3272 * There are no other replicas of this device.
3273 */
3274 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3275
3276 default:
3277 return (zpool_standard_error(hdl, errno, msg));
3278 }
3279
3280 }
3281
3282 /*
3283 * Mark the given vdev degraded.
3284 */
3285 int
3286 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3287 {
3288 zfs_cmd_t zc = {"\0"};
3289 char msg[1024];
3290 libzfs_handle_t *hdl = zhp->zpool_hdl;
3291
3292 (void) snprintf(msg, sizeof (msg),
3293 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3294
3295 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3296 zc.zc_guid = guid;
3297 zc.zc_cookie = VDEV_STATE_DEGRADED;
3298 zc.zc_obj = aux;
3299
3300 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3301 return (0);
3302
3303 return (zpool_standard_error(hdl, errno, msg));
3304 }
3305
3306 /*
3307 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3308 * a hot spare.
3309 */
3310 static boolean_t
3311 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3312 {
3313 nvlist_t **child;
3314 uint_t c, children;
3315
3316 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3317 &children) == 0) {
3318 char *type = fnvlist_lookup_string(search, ZPOOL_CONFIG_TYPE);
3319 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3320 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3321 children == 2 && child[which] == tgt)
3322 return (B_TRUE);
3323
3324 for (c = 0; c < children; c++)
3325 if (is_replacing_spare(child[c], tgt, which))
3326 return (B_TRUE);
3327 }
3328
3329 return (B_FALSE);
3330 }
3331
3332 /*
3333 * Attach new_disk (fully described by nvroot) to old_disk.
3334 * If 'replacing' is specified, the new disk will replace the old one.
3335 */
3336 int
3337 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3338 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3339 {
3340 zfs_cmd_t zc = {"\0"};
3341 char msg[1024];
3342 int ret;
3343 nvlist_t *tgt;
3344 boolean_t avail_spare, l2cache, islog;
3345 uint64_t val;
3346 char *newname;
3347 nvlist_t **child;
3348 uint_t children;
3349 nvlist_t *config_root;
3350 libzfs_handle_t *hdl = zhp->zpool_hdl;
3351
3352 if (replacing)
3353 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3354 "cannot replace %s with %s"), old_disk, new_disk);
3355 else
3356 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3357 "cannot attach %s to %s"), new_disk, old_disk);
3358
3359 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3360 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3361 &islog)) == NULL)
3362 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3363
3364 if (avail_spare)
3365 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3366
3367 if (l2cache)
3368 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3369
3370 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3371 zc.zc_cookie = replacing;
3372 zc.zc_simple = rebuild;
3373
3374 if (rebuild &&
3375 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3377 "the loaded zfs module doesn't support device rebuilds"));
3378 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
3379 }
3380
3381 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3382 &child, &children) != 0 || children != 1) {
3383 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3384 "new device must be a single disk"));
3385 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3386 }
3387
3388 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3389 ZPOOL_CONFIG_VDEV_TREE);
3390
3391 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3392 return (-1);
3393
3394 /*
3395 * If the target is a hot spare that has been swapped in, we can only
3396 * replace it with another hot spare.
3397 */
3398 if (replacing &&
3399 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3400 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3401 NULL) == NULL || !avail_spare) &&
3402 is_replacing_spare(config_root, tgt, 1)) {
3403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3404 "can only be replaced by another hot spare"));
3405 free(newname);
3406 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3407 }
3408
3409 free(newname);
3410
3411 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3412 return (-1);
3413
3414 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3415
3416 zcmd_free_nvlists(&zc);
3417
3418 if (ret == 0)
3419 return (0);
3420
3421 switch (errno) {
3422 case ENOTSUP:
3423 /*
3424 * Can't attach to or replace this type of vdev.
3425 */
3426 if (replacing) {
3427 uint64_t version = zpool_get_prop_int(zhp,
3428 ZPOOL_PROP_VERSION, NULL);
3429
3430 if (islog) {
3431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3432 "cannot replace a log with a spare"));
3433 } else if (rebuild) {
3434 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3435 "only mirror and dRAID vdevs support "
3436 "sequential reconstruction"));
3437 } else if (zpool_is_draid_spare(new_disk)) {
3438 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3439 "dRAID spares can only replace child "
3440 "devices in their parent's dRAID vdev"));
3441 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3443 "already in replacing/spare config; wait "
3444 "for completion or use 'zpool detach'"));
3445 } else {
3446 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3447 "cannot replace a replacing device"));
3448 }
3449 } else {
3450 char status[64] = {0};
3451 zpool_prop_get_feature(zhp,
3452 "feature@device_rebuild", status, 63);
3453 if (rebuild &&
3454 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3456 "device_rebuild feature must be enabled "
3457 "in order to use sequential "
3458 "reconstruction"));
3459 } else {
3460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3461 "can only attach to mirrors and top-level "
3462 "disks"));
3463 }
3464 }
3465 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3466 break;
3467
3468 case EINVAL:
3469 /*
3470 * The new device must be a single disk.
3471 */
3472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3473 "new device must be a single disk"));
3474 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3475 break;
3476
3477 case EBUSY:
3478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3479 "or device removal is in progress"),
3480 new_disk);
3481 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3482 break;
3483
3484 case EOVERFLOW:
3485 /*
3486 * The new device is too small.
3487 */
3488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3489 "device is too small"));
3490 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3491 break;
3492
3493 case EDOM:
3494 /*
3495 * The new device has a different optimal sector size.
3496 */
3497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3498 "new device has a different optimal sector size; use the "
3499 "option '-o ashift=N' to override the optimal size"));
3500 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3501 break;
3502
3503 case ENAMETOOLONG:
3504 /*
3505 * The resulting top-level vdev spec won't fit in the label.
3506 */
3507 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3508 break;
3509
3510 default:
3511 (void) zpool_standard_error(hdl, errno, msg);
3512 }
3513
3514 return (-1);
3515 }
3516
3517 /*
3518 * Detach the specified device.
3519 */
3520 int
3521 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3522 {
3523 zfs_cmd_t zc = {"\0"};
3524 char msg[1024];
3525 nvlist_t *tgt;
3526 boolean_t avail_spare, l2cache;
3527 libzfs_handle_t *hdl = zhp->zpool_hdl;
3528
3529 (void) snprintf(msg, sizeof (msg),
3530 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3531
3532 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3533 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3534 NULL)) == NULL)
3535 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3536
3537 if (avail_spare)
3538 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3539
3540 if (l2cache)
3541 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3542
3543 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3544
3545 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3546 return (0);
3547
3548 switch (errno) {
3549
3550 case ENOTSUP:
3551 /*
3552 * Can't detach from this type of vdev.
3553 */
3554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3555 "applicable to mirror and replacing vdevs"));
3556 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3557 break;
3558
3559 case EBUSY:
3560 /*
3561 * There are no other replicas of this device.
3562 */
3563 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3564 break;
3565
3566 default:
3567 (void) zpool_standard_error(hdl, errno, msg);
3568 }
3569
3570 return (-1);
3571 }
3572
3573 /*
3574 * Find a mirror vdev in the source nvlist.
3575 *
3576 * The mchild array contains a list of disks in one of the top-level mirrors
3577 * of the source pool. The schild array contains a list of disks that the
3578 * user specified on the command line. We loop over the mchild array to
3579 * see if any entry in the schild array matches.
3580 *
3581 * If a disk in the mchild array is found in the schild array, we return
3582 * the index of that entry. Otherwise we return -1.
3583 */
3584 static int
3585 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3586 nvlist_t **schild, uint_t schildren)
3587 {
3588 uint_t mc;
3589
3590 for (mc = 0; mc < mchildren; mc++) {
3591 uint_t sc;
3592 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3593 mchild[mc], 0);
3594
3595 for (sc = 0; sc < schildren; sc++) {
3596 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3597 schild[sc], 0);
3598 boolean_t result = (strcmp(mpath, spath) == 0);
3599
3600 free(spath);
3601 if (result) {
3602 free(mpath);
3603 return (mc);
3604 }
3605 }
3606
3607 free(mpath);
3608 }
3609
3610 return (-1);
3611 }
3612
3613 /*
3614 * Split a mirror pool. If newroot points to null, then a new nvlist
3615 * is generated and it is the responsibility of the caller to free it.
3616 */
3617 int
3618 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3619 nvlist_t *props, splitflags_t flags)
3620 {
3621 zfs_cmd_t zc = {"\0"};
3622 char msg[1024], *bias;
3623 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3624 nvlist_t **varray = NULL, *zc_props = NULL;
3625 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3626 libzfs_handle_t *hdl = zhp->zpool_hdl;
3627 uint64_t vers, readonly = B_FALSE;
3628 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3629 int retval = 0;
3630
3631 (void) snprintf(msg, sizeof (msg),
3632 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3633
3634 if (!zpool_name_valid(hdl, B_FALSE, newname))
3635 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3636
3637 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3638 (void) fprintf(stderr, gettext("Internal error: unable to "
3639 "retrieve pool configuration\n"));
3640 return (-1);
3641 }
3642
3643 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
3644 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3645
3646 if (props) {
3647 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3648 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3649 props, vers, flags, msg)) == NULL)
3650 return (-1);
3651 (void) nvlist_lookup_uint64(zc_props,
3652 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3653 if (readonly) {
3654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3655 "property %s can only be set at import time"),
3656 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3657 return (-1);
3658 }
3659 }
3660
3661 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3662 &children) != 0) {
3663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3664 "Source pool is missing vdev tree"));
3665 nvlist_free(zc_props);
3666 return (-1);
3667 }
3668
3669 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3670 vcount = 0;
3671
3672 if (*newroot == NULL ||
3673 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3674 &newchild, &newchildren) != 0)
3675 newchildren = 0;
3676
3677 for (c = 0; c < children; c++) {
3678 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3679 boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
3680 char *type;
3681 nvlist_t **mchild, *vdev;
3682 uint_t mchildren;
3683 int entry;
3684
3685 /*
3686 * Unlike cache & spares, slogs are stored in the
3687 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3688 */
3689 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3690 &is_log);
3691 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3692 &is_hole);
3693 if (is_log || is_hole) {
3694 /*
3695 * Create a hole vdev and put it in the config.
3696 */
3697 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3698 goto out;
3699 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3700 VDEV_TYPE_HOLE) != 0)
3701 goto out;
3702 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3703 1) != 0)
3704 goto out;
3705 if (lastlog == 0)
3706 lastlog = vcount;
3707 varray[vcount++] = vdev;
3708 continue;
3709 }
3710 lastlog = 0;
3711 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
3712
3713 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3714 vdev = child[c];
3715 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3716 goto out;
3717 continue;
3718 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3719 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3720 "Source pool must be composed only of mirrors\n"));
3721 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3722 goto out;
3723 }
3724
3725 if (nvlist_lookup_string(child[c],
3726 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
3727 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
3728 is_special = B_TRUE;
3729 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
3730 is_dedup = B_TRUE;
3731 }
3732 verify(nvlist_lookup_nvlist_array(child[c],
3733 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3734
3735 /* find or add an entry for this top-level vdev */
3736 if (newchildren > 0 &&
3737 (entry = find_vdev_entry(zhp, mchild, mchildren,
3738 newchild, newchildren)) >= 0) {
3739 /* We found a disk that the user specified. */
3740 vdev = mchild[entry];
3741 ++found;
3742 } else {
3743 /* User didn't specify a disk for this vdev. */
3744 vdev = mchild[mchildren - 1];
3745 }
3746
3747 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3748 goto out;
3749
3750 if (flags.dryrun != 0) {
3751 if (is_dedup == B_TRUE) {
3752 if (nvlist_add_string(varray[vcount - 1],
3753 ZPOOL_CONFIG_ALLOCATION_BIAS,
3754 VDEV_ALLOC_BIAS_DEDUP) != 0)
3755 goto out;
3756 } else if (is_special == B_TRUE) {
3757 if (nvlist_add_string(varray[vcount - 1],
3758 ZPOOL_CONFIG_ALLOCATION_BIAS,
3759 VDEV_ALLOC_BIAS_SPECIAL) != 0)
3760 goto out;
3761 }
3762 }
3763 }
3764
3765 /* did we find every disk the user specified? */
3766 if (found != newchildren) {
3767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3768 "include at most one disk from each mirror"));
3769 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3770 goto out;
3771 }
3772
3773 /* Prepare the nvlist for populating. */
3774 if (*newroot == NULL) {
3775 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3776 goto out;
3777 freelist = B_TRUE;
3778 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3779 VDEV_TYPE_ROOT) != 0)
3780 goto out;
3781 } else {
3782 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3783 }
3784
3785 /* Add all the children we found */
3786 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3787 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
3788 goto out;
3789
3790 /*
3791 * If we're just doing a dry run, exit now with success.
3792 */
3793 if (flags.dryrun) {
3794 memory_err = B_FALSE;
3795 freelist = B_FALSE;
3796 goto out;
3797 }
3798
3799 /* now build up the config list & call the ioctl */
3800 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3801 goto out;
3802
3803 if (nvlist_add_nvlist(newconfig,
3804 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3805 nvlist_add_string(newconfig,
3806 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3807 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3808 goto out;
3809
3810 /*
3811 * The new pool is automatically part of the namespace unless we
3812 * explicitly export it.
3813 */
3814 if (!flags.import)
3815 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3816 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3817 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3818 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3819 goto out;
3820 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3821 goto out;
3822
3823 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3824 retval = zpool_standard_error(hdl, errno, msg);
3825 goto out;
3826 }
3827
3828 freelist = B_FALSE;
3829 memory_err = B_FALSE;
3830
3831 out:
3832 if (varray != NULL) {
3833 int v;
3834
3835 for (v = 0; v < vcount; v++)
3836 nvlist_free(varray[v]);
3837 free(varray);
3838 }
3839 zcmd_free_nvlists(&zc);
3840 nvlist_free(zc_props);
3841 nvlist_free(newconfig);
3842 if (freelist) {
3843 nvlist_free(*newroot);
3844 *newroot = NULL;
3845 }
3846
3847 if (retval != 0)
3848 return (retval);
3849
3850 if (memory_err)
3851 return (no_memory(hdl));
3852
3853 return (0);
3854 }
3855
3856 /*
3857 * Remove the given device.
3858 */
3859 int
3860 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3861 {
3862 zfs_cmd_t zc = {"\0"};
3863 char msg[1024];
3864 nvlist_t *tgt;
3865 boolean_t avail_spare, l2cache, islog;
3866 libzfs_handle_t *hdl = zhp->zpool_hdl;
3867 uint64_t version;
3868
3869 (void) snprintf(msg, sizeof (msg),
3870 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3871
3872 if (zpool_is_draid_spare(path)) {
3873 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3874 "dRAID spares cannot be removed"));
3875 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3876 }
3877
3878 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3879 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3880 &islog)) == NULL)
3881 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3882
3883 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3884 if (islog && version < SPA_VERSION_HOLES) {
3885 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3886 "pool must be upgraded to support log removal"));
3887 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3888 }
3889
3890 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3891
3892 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3893 return (0);
3894
3895 switch (errno) {
3896
3897 case EINVAL:
3898 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3899 "invalid config; all top-level vdevs must "
3900 "have the same sector size and not be raidz."));
3901 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3902 break;
3903
3904 case EBUSY:
3905 if (islog) {
3906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3907 "Mount encrypted datasets to replay logs."));
3908 } else {
3909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3910 "Pool busy; removal may already be in progress"));
3911 }
3912 (void) zfs_error(hdl, EZFS_BUSY, msg);
3913 break;
3914
3915 case EACCES:
3916 if (islog) {
3917 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3918 "Mount encrypted datasets to replay logs."));
3919 (void) zfs_error(hdl, EZFS_BUSY, msg);
3920 } else {
3921 (void) zpool_standard_error(hdl, errno, msg);
3922 }
3923 break;
3924
3925 default:
3926 (void) zpool_standard_error(hdl, errno, msg);
3927 }
3928 return (-1);
3929 }
3930
3931 int
3932 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3933 {
3934 zfs_cmd_t zc = {{0}};
3935 char msg[1024];
3936 libzfs_handle_t *hdl = zhp->zpool_hdl;
3937
3938 (void) snprintf(msg, sizeof (msg),
3939 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3940
3941 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3942 zc.zc_cookie = 1;
3943
3944 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3945 return (0);
3946
3947 return (zpool_standard_error(hdl, errno, msg));
3948 }
3949
3950 int
3951 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3952 uint64_t *sizep)
3953 {
3954 char msg[1024];
3955 nvlist_t *tgt;
3956 boolean_t avail_spare, l2cache, islog;
3957 libzfs_handle_t *hdl = zhp->zpool_hdl;
3958
3959 (void) snprintf(msg, sizeof (msg),
3960 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3961 path);
3962
3963 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3964 &islog)) == NULL)
3965 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3966
3967 if (avail_spare || l2cache || islog) {
3968 *sizep = 0;
3969 return (0);
3970 }
3971
3972 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3974 "indirect size not available"));
3975 return (zfs_error(hdl, EINVAL, msg));
3976 }
3977 return (0);
3978 }
3979
3980 /*
3981 * Clear the errors for the pool, or the particular device if specified.
3982 */
3983 int
3984 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3985 {
3986 zfs_cmd_t zc = {"\0"};
3987 char msg[1024];
3988 nvlist_t *tgt;
3989 zpool_load_policy_t policy;
3990 boolean_t avail_spare, l2cache;
3991 libzfs_handle_t *hdl = zhp->zpool_hdl;
3992 nvlist_t *nvi = NULL;
3993 int error;
3994
3995 if (path)
3996 (void) snprintf(msg, sizeof (msg),
3997 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3998 path);
3999 else
4000 (void) snprintf(msg, sizeof (msg),
4001 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4002 zhp->zpool_name);
4003
4004 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4005 if (path) {
4006 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4007 &l2cache, NULL)) == NULL)
4008 return (zfs_error(hdl, EZFS_NODEVICE, msg));
4009
4010 /*
4011 * Don't allow error clearing for hot spares. Do allow
4012 * error clearing for l2cache devices.
4013 */
4014 if (avail_spare)
4015 return (zfs_error(hdl, EZFS_ISSPARE, msg));
4016
4017 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4018 }
4019
4020 zpool_get_load_policy(rewindnvl, &policy);
4021 zc.zc_cookie = policy.zlp_rewind;
4022
4023 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
4024 return (-1);
4025
4026 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
4027 return (-1);
4028
4029 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4030 errno == ENOMEM) {
4031 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4032 zcmd_free_nvlists(&zc);
4033 return (-1);
4034 }
4035 }
4036
4037 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4038 errno != EPERM && errno != EACCES)) {
4039 if (policy.zlp_rewind &
4040 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4041 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4042 zpool_rewind_exclaim(hdl, zc.zc_name,
4043 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4044 nvi);
4045 nvlist_free(nvi);
4046 }
4047 zcmd_free_nvlists(&zc);
4048 return (0);
4049 }
4050
4051 zcmd_free_nvlists(&zc);
4052 return (zpool_standard_error(hdl, errno, msg));
4053 }
4054
4055 /*
4056 * Similar to zpool_clear(), but takes a GUID (used by fmd).
4057 */
4058 int
4059 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4060 {
4061 zfs_cmd_t zc = {"\0"};
4062 char msg[1024];
4063 libzfs_handle_t *hdl = zhp->zpool_hdl;
4064
4065 (void) snprintf(msg, sizeof (msg),
4066 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4067 (u_longlong_t)guid);
4068
4069 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4070 zc.zc_guid = guid;
4071 zc.zc_cookie = ZPOOL_NO_REWIND;
4072
4073 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4074 return (0);
4075
4076 return (zpool_standard_error(hdl, errno, msg));
4077 }
4078
4079 /*
4080 * Change the GUID for a pool.
4081 */
4082 int
4083 zpool_reguid(zpool_handle_t *zhp)
4084 {
4085 char msg[1024];
4086 libzfs_handle_t *hdl = zhp->zpool_hdl;
4087 zfs_cmd_t zc = {"\0"};
4088
4089 (void) snprintf(msg, sizeof (msg),
4090 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4091
4092 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4093 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
4094 return (0);
4095
4096 return (zpool_standard_error(hdl, errno, msg));
4097 }
4098
4099 /*
4100 * Reopen the pool.
4101 */
4102 int
4103 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4104 {
4105 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4106 const char *pool_name = zpool_get_name(zhp);
4107 boolean_t *scrub_restart = data;
4108 int error;
4109
4110 error = lzc_reopen(pool_name, *scrub_restart);
4111 if (error) {
4112 return (zpool_standard_error_fmt(hdl, error,
4113 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4114 }
4115
4116 return (0);
4117 }
4118
4119 /* call into libzfs_core to execute the sync IOCTL per pool */
4120 int
4121 zpool_sync_one(zpool_handle_t *zhp, void *data)
4122 {
4123 int ret;
4124 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4125 const char *pool_name = zpool_get_name(zhp);
4126 boolean_t *force = data;
4127 nvlist_t *innvl = fnvlist_alloc();
4128
4129 fnvlist_add_boolean_value(innvl, "force", *force);
4130 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4131 nvlist_free(innvl);
4132 return (zpool_standard_error_fmt(hdl, ret,
4133 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4134 }
4135 nvlist_free(innvl);
4136
4137 return (0);
4138 }
4139
4140 #define PATH_BUF_LEN 64
4141
4142 /*
4143 * Given a vdev, return the name to display in iostat. If the vdev has a path,
4144 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4145 * We also check if this is a whole disk, in which case we strip off the
4146 * trailing 's0' slice name.
4147 *
4148 * This routine is also responsible for identifying when disks have been
4149 * reconfigured in a new location. The kernel will have opened the device by
4150 * devid, but the path will still refer to the old location. To catch this, we
4151 * first do a path -> devid translation (which is fast for the common case). If
4152 * the devid matches, we're done. If not, we do a reverse devid -> path
4153 * translation and issue the appropriate ioctl() to update the path of the vdev.
4154 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4155 * of these checks.
4156 */
4157 char *
4158 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4159 int name_flags)
4160 {
4161 char *path, *type, *env;
4162 uint64_t value;
4163 char buf[PATH_BUF_LEN];
4164 char tmpbuf[PATH_BUF_LEN];
4165
4166 /*
4167 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4168 * zpool name that will be displayed to the user.
4169 */
4170 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
4171 if (zhp != NULL && strcmp(type, "root") == 0)
4172 return (zfs_strdup(hdl, zpool_get_name(zhp)));
4173
4174 env = getenv("ZPOOL_VDEV_NAME_PATH");
4175 if (env && (strtoul(env, NULL, 0) > 0 ||
4176 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4177 name_flags |= VDEV_NAME_PATH;
4178
4179 env = getenv("ZPOOL_VDEV_NAME_GUID");
4180 if (env && (strtoul(env, NULL, 0) > 0 ||
4181 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4182 name_flags |= VDEV_NAME_GUID;
4183
4184 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
4185 if (env && (strtoul(env, NULL, 0) > 0 ||
4186 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4187 name_flags |= VDEV_NAME_FOLLOW_LINKS;
4188
4189 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4190 name_flags & VDEV_NAME_GUID) {
4191 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4192 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4193 path = buf;
4194 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
4195 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4196 char *rp = realpath(path, NULL);
4197 if (rp) {
4198 strlcpy(buf, rp, sizeof (buf));
4199 path = buf;
4200 free(rp);
4201 }
4202 }
4203
4204 /*
4205 * For a block device only use the name.
4206 */
4207 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4208 !(name_flags & VDEV_NAME_PATH)) {
4209 path = zfs_strip_path(path);
4210 }
4211
4212 /*
4213 * Remove the partition from the path if this is a whole disk.
4214 */
4215 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4216 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4217 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4218 return (zfs_strip_partition(path));
4219 }
4220 } else {
4221 path = type;
4222
4223 /*
4224 * If it's a raidz device, we need to stick in the parity level.
4225 */
4226 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4227 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
4228 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4229 (u_longlong_t)value);
4230 path = buf;
4231 }
4232
4233 /*
4234 * If it's a dRAID device, we add parity, groups, and spares.
4235 */
4236 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4237 uint64_t ndata, nparity, nspares;
4238 nvlist_t **child;
4239 uint_t children;
4240
4241 verify(nvlist_lookup_nvlist_array(nv,
4242 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4243 nparity = fnvlist_lookup_uint64(nv,
4244 ZPOOL_CONFIG_NPARITY);
4245 ndata = fnvlist_lookup_uint64(nv,
4246 ZPOOL_CONFIG_DRAID_NDATA);
4247 nspares = fnvlist_lookup_uint64(nv,
4248 ZPOOL_CONFIG_DRAID_NSPARES);
4249
4250 path = zpool_draid_name(buf, sizeof (buf), ndata,
4251 nparity, nspares, children);
4252 }
4253
4254 /*
4255 * We identify each top-level vdev by using a <type-id>
4256 * naming convention.
4257 */
4258 if (name_flags & VDEV_NAME_TYPE_ID) {
4259 uint64_t id = fnvlist_lookup_uint64(nv,
4260 ZPOOL_CONFIG_ID);
4261 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4262 path, (u_longlong_t)id);
4263 path = tmpbuf;
4264 }
4265 }
4266
4267 return (zfs_strdup(hdl, path));
4268 }
4269
4270 static int
4271 zbookmark_mem_compare(const void *a, const void *b)
4272 {
4273 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4274 }
4275
4276 /*
4277 * Retrieve the persistent error log, uniquify the members, and return to the
4278 * caller.
4279 */
4280 int
4281 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4282 {
4283 zfs_cmd_t zc = {"\0"};
4284 libzfs_handle_t *hdl = zhp->zpool_hdl;
4285 uint64_t count;
4286 zbookmark_phys_t *zb = NULL;
4287 int i;
4288
4289 /*
4290 * Retrieve the raw error list from the kernel. If the number of errors
4291 * has increased, allocate more space and continue until we get the
4292 * entire list.
4293 */
4294 count = fnvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT);
4295 if (count == 0)
4296 return (0);
4297 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4298 count * sizeof (zbookmark_phys_t));
4299 zc.zc_nvlist_dst_size = count;
4300 (void) strcpy(zc.zc_name, zhp->zpool_name);
4301 for (;;) {
4302 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4303 &zc) != 0) {
4304 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4305 if (errno == ENOMEM) {
4306 void *dst;
4307
4308 count = zc.zc_nvlist_dst_size;
4309 dst = zfs_alloc(zhp->zpool_hdl, count *
4310 sizeof (zbookmark_phys_t));
4311 zc.zc_nvlist_dst = (uintptr_t)dst;
4312 } else {
4313 return (zpool_standard_error_fmt(hdl, errno,
4314 dgettext(TEXT_DOMAIN, "errors: List of "
4315 "errors unavailable")));
4316 }
4317 } else {
4318 break;
4319 }
4320 }
4321
4322 /*
4323 * Sort the resulting bookmarks. This is a little confusing due to the
4324 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4325 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4326 * _not_ copied as part of the process. So we point the start of our
4327 * array appropriate and decrement the total number of elements.
4328 */
4329 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4330 zc.zc_nvlist_dst_size;
4331 count -= zc.zc_nvlist_dst_size;
4332
4333 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4334
4335 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4336
4337 /*
4338 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4339 */
4340 for (i = 0; i < count; i++) {
4341 nvlist_t *nv;
4342
4343 /* ignoring zb_blkid and zb_level for now */
4344 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4345 zb[i-1].zb_object == zb[i].zb_object)
4346 continue;
4347
4348 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4349 goto nomem;
4350 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4351 zb[i].zb_objset) != 0) {
4352 nvlist_free(nv);
4353 goto nomem;
4354 }
4355 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4356 zb[i].zb_object) != 0) {
4357 nvlist_free(nv);
4358 goto nomem;
4359 }
4360 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4361 nvlist_free(nv);
4362 goto nomem;
4363 }
4364 nvlist_free(nv);
4365 }
4366
4367 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4368 return (0);
4369
4370 nomem:
4371 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4372 return (no_memory(zhp->zpool_hdl));
4373 }
4374
4375 /*
4376 * Upgrade a ZFS pool to the latest on-disk version.
4377 */
4378 int
4379 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4380 {
4381 zfs_cmd_t zc = {"\0"};
4382 libzfs_handle_t *hdl = zhp->zpool_hdl;
4383
4384 (void) strcpy(zc.zc_name, zhp->zpool_name);
4385 zc.zc_cookie = new_version;
4386
4387 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4388 return (zpool_standard_error_fmt(hdl, errno,
4389 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4390 zhp->zpool_name));
4391 return (0);
4392 }
4393
4394 void
4395 zfs_save_arguments(int argc, char **argv, char *string, int len)
4396 {
4397 int i;
4398
4399 (void) strlcpy(string, zfs_basename(argv[0]), len);
4400 for (i = 1; i < argc; i++) {
4401 (void) strlcat(string, " ", len);
4402 (void) strlcat(string, argv[i], len);
4403 }
4404 }
4405
4406 int
4407 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4408 {
4409 zfs_cmd_t zc = {"\0"};
4410 nvlist_t *args;
4411 int err;
4412
4413 args = fnvlist_alloc();
4414 fnvlist_add_string(args, "message", message);
4415 err = zcmd_write_src_nvlist(hdl, &zc, args);
4416 if (err == 0)
4417 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4418 nvlist_free(args);
4419 zcmd_free_nvlists(&zc);
4420 return (err);
4421 }
4422
4423 /*
4424 * Perform ioctl to get some command history of a pool.
4425 *
4426 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4427 * logical offset of the history buffer to start reading from.
4428 *
4429 * Upon return, 'off' is the next logical offset to read from and
4430 * 'len' is the actual amount of bytes read into 'buf'.
4431 */
4432 static int
4433 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4434 {
4435 zfs_cmd_t zc = {"\0"};
4436 libzfs_handle_t *hdl = zhp->zpool_hdl;
4437
4438 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4439
4440 zc.zc_history = (uint64_t)(uintptr_t)buf;
4441 zc.zc_history_len = *len;
4442 zc.zc_history_offset = *off;
4443
4444 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4445 switch (errno) {
4446 case EPERM:
4447 return (zfs_error_fmt(hdl, EZFS_PERM,
4448 dgettext(TEXT_DOMAIN,
4449 "cannot show history for pool '%s'"),
4450 zhp->zpool_name));
4451 case ENOENT:
4452 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4453 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4454 "'%s'"), zhp->zpool_name));
4455 case ENOTSUP:
4456 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4457 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4458 "'%s', pool must be upgraded"), zhp->zpool_name));
4459 default:
4460 return (zpool_standard_error_fmt(hdl, errno,
4461 dgettext(TEXT_DOMAIN,
4462 "cannot get history for '%s'"), zhp->zpool_name));
4463 }
4464 }
4465
4466 *len = zc.zc_history_len;
4467 *off = zc.zc_history_offset;
4468
4469 return (0);
4470 }
4471
4472 /*
4473 * Retrieve the command history of a pool.
4474 */
4475 int
4476 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4477 boolean_t *eof)
4478 {
4479 char *buf;
4480 int buflen = 128 * 1024;
4481 nvlist_t **records = NULL;
4482 uint_t numrecords = 0;
4483 int err, i;
4484 uint64_t start = *off;
4485
4486 buf = malloc(buflen);
4487 if (buf == NULL)
4488 return (ENOMEM);
4489 /* process about 1MB a time */
4490 while (*off - start < 1024 * 1024) {
4491 uint64_t bytes_read = buflen;
4492 uint64_t leftover;
4493
4494 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4495 break;
4496
4497 /* if nothing else was read in, we're at EOF, just return */
4498 if (!bytes_read) {
4499 *eof = B_TRUE;
4500 break;
4501 }
4502
4503 if ((err = zpool_history_unpack(buf, bytes_read,
4504 &leftover, &records, &numrecords)) != 0)
4505 break;
4506 *off -= leftover;
4507 if (leftover == bytes_read) {
4508 /*
4509 * no progress made, because buffer is not big enough
4510 * to hold this record; resize and retry.
4511 */
4512 buflen *= 2;
4513 free(buf);
4514 buf = malloc(buflen);
4515 if (buf == NULL)
4516 return (ENOMEM);
4517 }
4518 }
4519
4520 free(buf);
4521
4522 if (!err) {
4523 *nvhisp = fnvlist_alloc();
4524 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4525 (const nvlist_t **)records, numrecords);
4526 }
4527 for (i = 0; i < numrecords; i++)
4528 nvlist_free(records[i]);
4529 free(records);
4530
4531 return (err);
4532 }
4533
4534 /*
4535 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4536 * If there is a new event available 'nvp' will contain a newly allocated
4537 * nvlist and 'dropped' will be set to the number of missed events since
4538 * the last call to this function. When 'nvp' is set to NULL it indicates
4539 * no new events are available. In either case the function returns 0 and
4540 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4541 * function will return a non-zero value. When the function is called in
4542 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4543 * it will not return until a new event is available.
4544 */
4545 int
4546 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4547 int *dropped, unsigned flags, int zevent_fd)
4548 {
4549 zfs_cmd_t zc = {"\0"};
4550 int error = 0;
4551
4552 *nvp = NULL;
4553 *dropped = 0;
4554 zc.zc_cleanup_fd = zevent_fd;
4555
4556 if (flags & ZEVENT_NONBLOCK)
4557 zc.zc_guid = ZEVENT_NONBLOCK;
4558
4559 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4560 return (-1);
4561
4562 retry:
4563 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4564 switch (errno) {
4565 case ESHUTDOWN:
4566 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4567 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4568 goto out;
4569 case ENOENT:
4570 /* Blocking error case should not occur */
4571 if (!(flags & ZEVENT_NONBLOCK))
4572 error = zpool_standard_error_fmt(hdl, errno,
4573 dgettext(TEXT_DOMAIN, "cannot get event"));
4574
4575 goto out;
4576 case ENOMEM:
4577 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4578 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4579 dgettext(TEXT_DOMAIN, "cannot get event"));
4580 goto out;
4581 } else {
4582 goto retry;
4583 }
4584 default:
4585 error = zpool_standard_error_fmt(hdl, errno,
4586 dgettext(TEXT_DOMAIN, "cannot get event"));
4587 goto out;
4588 }
4589 }
4590
4591 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4592 if (error != 0)
4593 goto out;
4594
4595 *dropped = (int)zc.zc_cookie;
4596 out:
4597 zcmd_free_nvlists(&zc);
4598
4599 return (error);
4600 }
4601
4602 /*
4603 * Clear all events.
4604 */
4605 int
4606 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4607 {
4608 zfs_cmd_t zc = {"\0"};
4609
4610 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4611 return (zpool_standard_error(hdl, errno,
4612 dgettext(TEXT_DOMAIN, "cannot clear events")));
4613
4614 if (count != NULL)
4615 *count = (int)zc.zc_cookie; /* # of events cleared */
4616
4617 return (0);
4618 }
4619
4620 /*
4621 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4622 * the passed zevent_fd file handle. On success zero is returned,
4623 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4624 */
4625 int
4626 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4627 {
4628 zfs_cmd_t zc = {"\0"};
4629 int error = 0;
4630
4631 zc.zc_guid = eid;
4632 zc.zc_cleanup_fd = zevent_fd;
4633
4634 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4635 switch (errno) {
4636 case ENOENT:
4637 error = zfs_error_fmt(hdl, EZFS_NOENT,
4638 dgettext(TEXT_DOMAIN, "cannot get event"));
4639 break;
4640
4641 case ENOMEM:
4642 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4643 dgettext(TEXT_DOMAIN, "cannot get event"));
4644 break;
4645
4646 default:
4647 error = zpool_standard_error_fmt(hdl, errno,
4648 dgettext(TEXT_DOMAIN, "cannot get event"));
4649 break;
4650 }
4651 }
4652
4653 return (error);
4654 }
4655
4656 static void
4657 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4658 char *pathname, size_t len, boolean_t always_unmounted)
4659 {
4660 zfs_cmd_t zc = {"\0"};
4661 boolean_t mounted = B_FALSE;
4662 char *mntpnt = NULL;
4663 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4664
4665 if (dsobj == 0) {
4666 /* special case for the MOS */
4667 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4668 (longlong_t)obj);
4669 return;
4670 }
4671
4672 /* get the dataset's name */
4673 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4674 zc.zc_obj = dsobj;
4675 if (zfs_ioctl(zhp->zpool_hdl,
4676 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4677 /* just write out a path of two object numbers */
4678 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4679 (longlong_t)dsobj, (longlong_t)obj);
4680 return;
4681 }
4682 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4683
4684 /* find out if the dataset is mounted */
4685 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4686 &mntpnt);
4687
4688 /* get the corrupted object's path */
4689 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4690 zc.zc_obj = obj;
4691 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4692 &zc) == 0) {
4693 if (mounted) {
4694 (void) snprintf(pathname, len, "%s%s", mntpnt,
4695 zc.zc_value);
4696 } else {
4697 (void) snprintf(pathname, len, "%s:%s",
4698 dsname, zc.zc_value);
4699 }
4700 } else {
4701 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4702 (longlong_t)obj);
4703 }
4704 free(mntpnt);
4705 }
4706
4707 void
4708 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4709 char *pathname, size_t len)
4710 {
4711 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4712 }
4713
4714 void
4715 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4716 char *pathname, size_t len)
4717 {
4718 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4719 }
4720 /*
4721 * Wait while the specified activity is in progress in the pool.
4722 */
4723 int
4724 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4725 {
4726 boolean_t missing;
4727
4728 int error = zpool_wait_status(zhp, activity, &missing, NULL);
4729
4730 if (missing) {
4731 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4732 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4733 zhp->zpool_name);
4734 return (ENOENT);
4735 } else {
4736 return (error);
4737 }
4738 }
4739
4740 /*
4741 * Wait for the given activity and return the status of the wait (whether or not
4742 * any waiting was done) in the 'waited' parameter. Non-existent pools are
4743 * reported via the 'missing' parameter, rather than by printing an error
4744 * message. This is convenient when this function is called in a loop over a
4745 * long period of time (as it is, for example, by zpool's wait cmd). In that
4746 * scenario, a pool being exported or destroyed should be considered a normal
4747 * event, so we don't want to print an error when we find that the pool doesn't
4748 * exist.
4749 */
4750 int
4751 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4752 boolean_t *missing, boolean_t *waited)
4753 {
4754 int error = lzc_wait(zhp->zpool_name, activity, waited);
4755 *missing = (error == ENOENT);
4756 if (*missing)
4757 return (0);
4758
4759 if (error != 0) {
4760 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4761 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4762 zhp->zpool_name);
4763 }
4764
4765 return (error);
4766 }
4767
4768 int
4769 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
4770 {
4771 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4772 if (error != 0) {
4773 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4774 dgettext(TEXT_DOMAIN,
4775 "error setting bootenv in pool '%s'"), zhp->zpool_name);
4776 }
4777
4778 return (error);
4779 }
4780
4781 int
4782 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
4783 {
4784 nvlist_t *nvl;
4785 int error;
4786
4787 nvl = NULL;
4788 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4789 if (error != 0) {
4790 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4791 dgettext(TEXT_DOMAIN,
4792 "error getting bootenv in pool '%s'"), zhp->zpool_name);
4793 } else {
4794 *nvlp = nvl;
4795 }
4796
4797 return (error);
4798 }
4799
4800 /*
4801 * Attempt to read and parse feature file(s) (from "compatibility" property).
4802 * Files contain zpool feature names, comma or whitespace-separated.
4803 * Comments (# character to next newline) are discarded.
4804 *
4805 * Arguments:
4806 * compatibility : string containing feature filenames
4807 * features : either NULL or pointer to array of boolean
4808 * report : either NULL or pointer to string buffer
4809 * rlen : length of "report" buffer
4810 *
4811 * compatibility is NULL (unset), "", "off", "legacy", or list of
4812 * comma-separated filenames. filenames should either be absolute,
4813 * or relative to:
4814 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
4815 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
4816 * (Unset), "" or "off" => enable all features
4817 * "legacy" => disable all features
4818 *
4819 * Any feature names read from files which match unames in spa_feature_table
4820 * will have the corresponding boolean set in the features array (if non-NULL).
4821 * If more than one feature set specified, only features present in *all* of
4822 * them will be set.
4823 *
4824 * "report" if not NULL will be populated with a suitable status message.
4825 *
4826 * Return values:
4827 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
4828 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
4829 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
4830 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
4831 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
4832 */
4833 zpool_compat_status_t
4834 zpool_load_compat(const char *compat, boolean_t *features, char *report,
4835 size_t rlen)
4836 {
4837 int sdirfd, ddirfd, featfd;
4838 struct stat fs;
4839 char *fc;
4840 char *ps, *ls, *ws;
4841 char *file, *line, *word;
4842
4843 char l_compat[ZFS_MAXPROPLEN];
4844
4845 boolean_t ret_nofiles = B_TRUE;
4846 boolean_t ret_badfile = B_FALSE;
4847 boolean_t ret_badtoken = B_FALSE;
4848 boolean_t ret_warntoken = B_FALSE;
4849
4850 /* special cases (unset), "" and "off" => enable all features */
4851 if (compat == NULL || compat[0] == '\0' ||
4852 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
4853 if (features != NULL)
4854 for (uint_t i = 0; i < SPA_FEATURES; i++)
4855 features[i] = B_TRUE;
4856 if (report != NULL)
4857 strlcpy(report, gettext("all features enabled"), rlen);
4858 return (ZPOOL_COMPATIBILITY_OK);
4859 }
4860
4861 /* Final special case "legacy" => disable all features */
4862 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
4863 if (features != NULL)
4864 for (uint_t i = 0; i < SPA_FEATURES; i++)
4865 features[i] = B_FALSE;
4866 if (report != NULL)
4867 strlcpy(report, gettext("all features disabled"), rlen);
4868 return (ZPOOL_COMPATIBILITY_OK);
4869 }
4870
4871 /*
4872 * Start with all true; will be ANDed with results from each file
4873 */
4874 if (features != NULL)
4875 for (uint_t i = 0; i < SPA_FEATURES; i++)
4876 features[i] = B_TRUE;
4877
4878 char err_badfile[1024] = "";
4879 char err_badtoken[1024] = "";
4880
4881 /*
4882 * We ignore errors from the directory open()
4883 * as they're only needed if the filename is relative
4884 * which will be checked during the openat().
4885 */
4886
4887 /* O_PATH safer than O_RDONLY if system allows it */
4888 #if defined(O_PATH)
4889 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
4890 #else
4891 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
4892 #endif
4893
4894 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
4895 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
4896
4897 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
4898
4899 for (file = strtok_r(l_compat, ",", &ps);
4900 file != NULL;
4901 file = strtok_r(NULL, ",", &ps)) {
4902
4903 boolean_t l_features[SPA_FEATURES];
4904
4905 enum { Z_SYSCONF, Z_DATA } source;
4906
4907 /* try sysconfdir first, then datadir */
4908 source = Z_SYSCONF;
4909 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
4910 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
4911 source = Z_DATA;
4912 }
4913
4914 /* File readable and correct size? */
4915 if (featfd < 0 ||
4916 fstat(featfd, &fs) < 0 ||
4917 fs.st_size < 1 ||
4918 fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
4919 (void) close(featfd);
4920 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
4921 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
4922 ret_badfile = B_TRUE;
4923 continue;
4924 }
4925
4926 /* Prefault the file if system allows */
4927 #if defined(MAP_POPULATE)
4928 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
4929 #elif defined(MAP_PREFAULT_READ)
4930 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
4931 #else
4932 #define ZC_MMAP_FLAGS (MAP_PRIVATE)
4933 #endif
4934
4935 /* private mmap() so we can strtok safely */
4936 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
4937 ZC_MMAP_FLAGS, featfd, 0);
4938 (void) close(featfd);
4939
4940 /* map ok, and last character == newline? */
4941 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
4942 (void) munmap((void *) fc, fs.st_size);
4943 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
4944 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
4945 ret_badfile = B_TRUE;
4946 continue;
4947 }
4948
4949 ret_nofiles = B_FALSE;
4950
4951 for (uint_t i = 0; i < SPA_FEATURES; i++)
4952 l_features[i] = B_FALSE;
4953
4954 /* replace final newline with NULL to ensure string ends */
4955 fc[fs.st_size - 1] = '\0';
4956
4957 for (line = strtok_r(fc, "\n", &ls);
4958 line != NULL;
4959 line = strtok_r(NULL, "\n", &ls)) {
4960 /* discard comments */
4961 char *r = strchr(line, '#');
4962 if (r != NULL)
4963 *r = '\0';
4964
4965 for (word = strtok_r(line, ", \t", &ws);
4966 word != NULL;
4967 word = strtok_r(NULL, ", \t", &ws)) {
4968 /* Find matching feature name */
4969 uint_t f;
4970 for (f = 0; f < SPA_FEATURES; f++) {
4971 zfeature_info_t *fi =
4972 &spa_feature_table[f];
4973 if (strcmp(word, fi->fi_uname) == 0) {
4974 l_features[f] = B_TRUE;
4975 break;
4976 }
4977 }
4978 if (f < SPA_FEATURES)
4979 continue;
4980
4981 /* found an unrecognized word */
4982 /* lightly sanitize it */
4983 if (strlen(word) > 32)
4984 word[32] = '\0';
4985 for (char *c = word; *c != '\0'; c++)
4986 if (!isprint(*c))
4987 *c = '?';
4988
4989 strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
4990 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
4991 if (source == Z_SYSCONF)
4992 ret_badtoken = B_TRUE;
4993 else
4994 ret_warntoken = B_TRUE;
4995 }
4996 }
4997 (void) munmap((void *) fc, fs.st_size);
4998
4999 if (features != NULL)
5000 for (uint_t i = 0; i < SPA_FEATURES; i++)
5001 features[i] &= l_features[i];
5002 }
5003 (void) close(sdirfd);
5004 (void) close(ddirfd);
5005
5006 /* Return the most serious error */
5007 if (ret_badfile) {
5008 if (report != NULL)
5009 snprintf(report, rlen, gettext("could not read/"
5010 "parse feature file(s): %s"), err_badfile);
5011 return (ZPOOL_COMPATIBILITY_BADFILE);
5012 }
5013 if (ret_nofiles) {
5014 if (report != NULL)
5015 strlcpy(report,
5016 gettext("no valid compatibility files specified"),
5017 rlen);
5018 return (ZPOOL_COMPATIBILITY_NOFILES);
5019 }
5020 if (ret_badtoken) {
5021 if (report != NULL)
5022 snprintf(report, rlen, gettext("invalid feature "
5023 "name(s) in local compatibility files: %s"),
5024 err_badtoken);
5025 return (ZPOOL_COMPATIBILITY_BADTOKEN);
5026 }
5027 if (ret_warntoken) {
5028 if (report != NULL)
5029 snprintf(report, rlen, gettext("unrecognized feature "
5030 "name(s) in distribution compatibility files: %s"),
5031 err_badtoken);
5032 return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5033 }
5034 if (report != NULL)
5035 strlcpy(report, gettext("compatibility set ok"), rlen);
5036 return (ZPOOL_COMPATIBILITY_OK);
5037 }
5038
5039 static int
5040 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5041 {
5042 nvlist_t *tgt;
5043 boolean_t avail_spare, l2cache;
5044
5045 verify(zhp != NULL);
5046 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5047 char errbuf[1024];
5048 (void) snprintf(errbuf, sizeof (errbuf),
5049 dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5050 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5051 }
5052
5053 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5054 NULL)) == NULL) {
5055 char errbuf[1024];
5056 (void) snprintf(errbuf, sizeof (errbuf),
5057 dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5058 vdevname, zhp->zpool_name);
5059 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5060 }
5061
5062 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
5063 return (0);
5064 }
5065
5066 /*
5067 * Get a vdev property value for 'prop' and return the value in
5068 * a pre-allocated buffer.
5069 */
5070 int
5071 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5072 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5073 {
5074 nvlist_t *nv;
5075 char *strval;
5076 uint64_t intval;
5077 zprop_source_t src = ZPROP_SRC_NONE;
5078
5079 if (prop == VDEV_PROP_USER) {
5080 /* user property, prop_name must contain the property name */
5081 assert(prop_name != NULL);
5082 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5083 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5084 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5085 } else {
5086 /* user prop not found */
5087 return (-1);
5088 }
5089 (void) strlcpy(buf, strval, len);
5090 if (srctype)
5091 *srctype = src;
5092 return (0);
5093 }
5094
5095 if (prop_name == NULL)
5096 prop_name = (char *)vdev_prop_to_name(prop);
5097
5098 switch (vdev_prop_get_type(prop)) {
5099 case PROP_TYPE_STRING:
5100 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5101 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5102 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5103 } else {
5104 src = ZPROP_SRC_DEFAULT;
5105 if ((strval = (char *)vdev_prop_default_string(prop))
5106 == NULL)
5107 strval = "-";
5108 }
5109 (void) strlcpy(buf, strval, len);
5110 break;
5111
5112 case PROP_TYPE_NUMBER:
5113 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5114 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5115 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5116 } else {
5117 src = ZPROP_SRC_DEFAULT;
5118 intval = vdev_prop_default_numeric(prop);
5119 }
5120
5121 switch (prop) {
5122 case VDEV_PROP_ASIZE:
5123 case VDEV_PROP_PSIZE:
5124 case VDEV_PROP_SIZE:
5125 case VDEV_PROP_BOOTSIZE:
5126 case VDEV_PROP_ALLOCATED:
5127 case VDEV_PROP_FREE:
5128 case VDEV_PROP_READ_ERRORS:
5129 case VDEV_PROP_WRITE_ERRORS:
5130 case VDEV_PROP_CHECKSUM_ERRORS:
5131 case VDEV_PROP_INITIALIZE_ERRORS:
5132 case VDEV_PROP_OPS_NULL:
5133 case VDEV_PROP_OPS_READ:
5134 case VDEV_PROP_OPS_WRITE:
5135 case VDEV_PROP_OPS_FREE:
5136 case VDEV_PROP_OPS_CLAIM:
5137 case VDEV_PROP_OPS_TRIM:
5138 case VDEV_PROP_BYTES_NULL:
5139 case VDEV_PROP_BYTES_READ:
5140 case VDEV_PROP_BYTES_WRITE:
5141 case VDEV_PROP_BYTES_FREE:
5142 case VDEV_PROP_BYTES_CLAIM:
5143 case VDEV_PROP_BYTES_TRIM:
5144 if (literal) {
5145 (void) snprintf(buf, len, "%llu",
5146 (u_longlong_t)intval);
5147 } else {
5148 (void) zfs_nicenum(intval, buf, len);
5149 }
5150 break;
5151 case VDEV_PROP_EXPANDSZ:
5152 if (intval == 0) {
5153 (void) strlcpy(buf, "-", len);
5154 } else if (literal) {
5155 (void) snprintf(buf, len, "%llu",
5156 (u_longlong_t)intval);
5157 } else {
5158 (void) zfs_nicenum(intval, buf, len);
5159 }
5160 break;
5161 case VDEV_PROP_CAPACITY:
5162 if (literal) {
5163 (void) snprintf(buf, len, "%llu",
5164 (u_longlong_t)intval);
5165 } else {
5166 (void) snprintf(buf, len, "%llu%%",
5167 (u_longlong_t)intval);
5168 }
5169 break;
5170 case VDEV_PROP_FRAGMENTATION:
5171 if (intval == UINT64_MAX) {
5172 (void) strlcpy(buf, "-", len);
5173 } else {
5174 (void) snprintf(buf, len, "%llu%%",
5175 (u_longlong_t)intval);
5176 }
5177 break;
5178 case VDEV_PROP_STATE:
5179 if (literal) {
5180 (void) snprintf(buf, len, "%llu",
5181 (u_longlong_t)intval);
5182 } else {
5183 (void) strlcpy(buf, zpool_state_to_name(intval,
5184 VDEV_AUX_NONE), len);
5185 }
5186 break;
5187 default:
5188 (void) snprintf(buf, len, "%llu",
5189 (u_longlong_t)intval);
5190 }
5191 break;
5192
5193 case PROP_TYPE_INDEX:
5194 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5195 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5196 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5197 } else {
5198 src = ZPROP_SRC_DEFAULT;
5199 intval = vdev_prop_default_numeric(prop);
5200 }
5201 if (vdev_prop_index_to_string(prop, intval,
5202 (const char **)&strval) != 0)
5203 return (-1);
5204 (void) strlcpy(buf, strval, len);
5205 break;
5206
5207 default:
5208 abort();
5209 }
5210
5211 if (srctype)
5212 *srctype = src;
5213
5214 return (0);
5215 }
5216
5217 /*
5218 * Get a vdev property value for 'prop_name' and return the value in
5219 * a pre-allocated buffer.
5220 */
5221 int
5222 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5223 char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5224 boolean_t literal)
5225 {
5226 nvlist_t *reqnvl, *reqprops;
5227 nvlist_t *retprops = NULL;
5228 uint64_t vdev_guid;
5229 int ret;
5230
5231 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5232 return (ret);
5233
5234 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5235 return (no_memory(zhp->zpool_hdl));
5236 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5237 return (no_memory(zhp->zpool_hdl));
5238
5239 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5240
5241 if (prop != VDEV_PROP_USER) {
5242 /* prop_name overrides prop value */
5243 if (prop_name != NULL)
5244 prop = vdev_name_to_prop(prop_name);
5245 else
5246 prop_name = (char *)vdev_prop_to_name(prop);
5247 assert(prop < VDEV_NUM_PROPS);
5248 }
5249
5250 assert(prop_name != NULL);
5251 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5252 nvlist_free(reqnvl);
5253 nvlist_free(reqprops);
5254 return (no_memory(zhp->zpool_hdl));
5255 }
5256
5257 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5258
5259 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5260
5261 if (ret == 0) {
5262 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5263 len, srctype, literal);
5264 } else {
5265 char errbuf[1024];
5266 (void) snprintf(errbuf, sizeof (errbuf),
5267 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5268 " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5269 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5270 }
5271
5272 nvlist_free(reqnvl);
5273 nvlist_free(reqprops);
5274 nvlist_free(retprops);
5275
5276 return (ret);
5277 }
5278
5279 /*
5280 * Get all vdev properties
5281 */
5282 int
5283 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5284 nvlist_t **outnvl)
5285 {
5286 nvlist_t *nvl = NULL;
5287 uint64_t vdev_guid;
5288 int ret;
5289
5290 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5291 return (ret);
5292
5293 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5294 return (no_memory(zhp->zpool_hdl));
5295
5296 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5297
5298 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5299
5300 nvlist_free(nvl);
5301
5302 if (ret) {
5303 char errbuf[1024];
5304 (void) snprintf(errbuf, sizeof (errbuf),
5305 dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5306 " %s in %s"), vdevname, zhp->zpool_name);
5307 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5308 }
5309
5310 return (ret);
5311 }
5312
5313 /*
5314 * Set vdev property
5315 */
5316 int
5317 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5318 const char *propname, const char *propval)
5319 {
5320 int ret;
5321 nvlist_t *nvl = NULL;
5322 nvlist_t *outnvl = NULL;
5323 nvlist_t *props;
5324 nvlist_t *realprops;
5325 prop_flags_t flags = { 0 };
5326 uint64_t version;
5327 uint64_t vdev_guid;
5328
5329 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5330 return (ret);
5331
5332 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5333 return (no_memory(zhp->zpool_hdl));
5334 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5335 return (no_memory(zhp->zpool_hdl));
5336
5337 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5338
5339 if (nvlist_add_string(props, propname, propval) != 0) {
5340 nvlist_free(props);
5341 return (no_memory(zhp->zpool_hdl));
5342 }
5343
5344 char errbuf[1024];
5345 (void) snprintf(errbuf, sizeof (errbuf),
5346 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5347 propname, vdevname, zhp->zpool_name);
5348
5349 flags.vdevprop = 1;
5350 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5351 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5352 zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5353 nvlist_free(props);
5354 nvlist_free(nvl);
5355 return (-1);
5356 }
5357
5358 nvlist_free(props);
5359 props = realprops;
5360
5361 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5362
5363 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5364
5365 nvlist_free(props);
5366 nvlist_free(nvl);
5367 nvlist_free(outnvl);
5368
5369 if (ret)
5370 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5371
5372 return (ret);
5373 }