]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Add support for boot environment data to be stored in the label
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31 */
32
33 #include <errno.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <libgen.h>
40 #include <zone.h>
41 #include <sys/stat.h>
42 #include <sys/efi_partition.h>
43 #include <sys/systeminfo.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/vdev_disk.h>
46 #include <dlfcn.h>
47 #include <libzutil.h>
48
49 #include "zfs_namecheck.h"
50 #include "zfs_prop.h"
51 #include "libzfs_impl.h"
52 #include "zfs_comutil.h"
53 #include "zfeature_common.h"
54
55 static boolean_t zpool_vdev_is_interior(const char *name);
56
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
60 } prop_flags_t;
61
62 /*
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
66 */
67
68 static int
69 zpool_get_all_props(zpool_handle_t *zhp)
70 {
71 zfs_cmd_t zc = {"\0"};
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
73
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 return (-1);
78
79 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 } else {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89 }
90
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
93 return (-1);
94 }
95
96 zcmd_free_nvlists(&zc);
97
98 return (0);
99 }
100
101 int
102 zpool_props_refresh(zpool_handle_t *zhp)
103 {
104 nvlist_t *old_props;
105
106 old_props = zhp->zpool_props;
107
108 if (zpool_get_all_props(zhp) != 0)
109 return (-1);
110
111 nvlist_free(old_props);
112 return (0);
113 }
114
115 static const char *
116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
117 zprop_source_t *src)
118 {
119 nvlist_t *nv, *nvl;
120 uint64_t ival;
121 char *value;
122 zprop_source_t source;
123
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 source = ival;
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 } else {
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
132 value = "-";
133 }
134
135 if (src)
136 *src = source;
137
138 return (value);
139 }
140
141 uint64_t
142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143 {
144 nvlist_t *nv, *nvl;
145 uint64_t value;
146 zprop_source_t source;
147
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 /*
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
153 */
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
158 == 0)) {
159 return (value);
160 }
161 return (zpool_prop_default_numeric(prop));
162 }
163
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 source = value;
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 } else {
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
172 }
173
174 if (src)
175 *src = source;
176
177 return (value);
178 }
179
180 /*
181 * Map VDEV STATE to printed strings.
182 */
183 const char *
184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 {
186 switch (state) {
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194 return (gettext("FAULTED"));
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
197 else
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
205
206 default:
207 break;
208 }
209
210 return (gettext("UNKNOWN"));
211 }
212
213 /*
214 * Map POOL STATE to printed strings.
215 */
216 const char *
217 zpool_pool_state_to_name(pool_state_t state)
218 {
219 switch (state) {
220 default:
221 break;
222 case POOL_STATE_ACTIVE:
223 return (gettext("ACTIVE"));
224 case POOL_STATE_EXPORTED:
225 return (gettext("EXPORTED"));
226 case POOL_STATE_DESTROYED:
227 return (gettext("DESTROYED"));
228 case POOL_STATE_SPARE:
229 return (gettext("SPARE"));
230 case POOL_STATE_L2CACHE:
231 return (gettext("L2CACHE"));
232 case POOL_STATE_UNINITIALIZED:
233 return (gettext("UNINITIALIZED"));
234 case POOL_STATE_UNAVAIL:
235 return (gettext("UNAVAIL"));
236 case POOL_STATE_POTENTIALLY_ACTIVE:
237 return (gettext("POTENTIALLY_ACTIVE"));
238 }
239
240 return (gettext("UNKNOWN"));
241 }
242
243 /*
244 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
245 * "SUSPENDED", etc).
246 */
247 const char *
248 zpool_get_state_str(zpool_handle_t *zhp)
249 {
250 zpool_errata_t errata;
251 zpool_status_t status;
252 nvlist_t *nvroot;
253 vdev_stat_t *vs;
254 uint_t vsc;
255 const char *str;
256
257 status = zpool_get_status(zhp, NULL, &errata);
258
259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260 str = gettext("FAULTED");
261 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
262 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
263 str = gettext("SUSPENDED");
264 } else {
265 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
266 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
267 verify(nvlist_lookup_uint64_array(nvroot,
268 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
269 == 0);
270 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
271 }
272 return (str);
273 }
274
275 /*
276 * Get a zpool property value for 'prop' and return the value in
277 * a pre-allocated buffer.
278 */
279 int
280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
281 size_t len, zprop_source_t *srctype, boolean_t literal)
282 {
283 uint64_t intval;
284 const char *strval;
285 zprop_source_t src = ZPROP_SRC_NONE;
286
287 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
288 switch (prop) {
289 case ZPOOL_PROP_NAME:
290 (void) strlcpy(buf, zpool_get_name(zhp), len);
291 break;
292
293 case ZPOOL_PROP_HEALTH:
294 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
295 break;
296
297 case ZPOOL_PROP_GUID:
298 intval = zpool_get_prop_int(zhp, prop, &src);
299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
300 break;
301
302 case ZPOOL_PROP_ALTROOT:
303 case ZPOOL_PROP_CACHEFILE:
304 case ZPOOL_PROP_COMMENT:
305 if (zhp->zpool_props != NULL ||
306 zpool_get_all_props(zhp) == 0) {
307 (void) strlcpy(buf,
308 zpool_get_prop_string(zhp, prop, &src),
309 len);
310 break;
311 }
312 /* FALLTHROUGH */
313 default:
314 (void) strlcpy(buf, "-", len);
315 break;
316 }
317
318 if (srctype != NULL)
319 *srctype = src;
320 return (0);
321 }
322
323 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
324 prop != ZPOOL_PROP_NAME)
325 return (-1);
326
327 switch (zpool_prop_get_type(prop)) {
328 case PROP_TYPE_STRING:
329 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
330 len);
331 break;
332
333 case PROP_TYPE_NUMBER:
334 intval = zpool_get_prop_int(zhp, prop, &src);
335
336 switch (prop) {
337 case ZPOOL_PROP_SIZE:
338 case ZPOOL_PROP_ALLOCATED:
339 case ZPOOL_PROP_FREE:
340 case ZPOOL_PROP_FREEING:
341 case ZPOOL_PROP_LEAKED:
342 case ZPOOL_PROP_ASHIFT:
343 if (literal)
344 (void) snprintf(buf, len, "%llu",
345 (u_longlong_t)intval);
346 else
347 (void) zfs_nicenum(intval, buf, len);
348 break;
349
350 case ZPOOL_PROP_EXPANDSZ:
351 case ZPOOL_PROP_CHECKPOINT:
352 if (intval == 0) {
353 (void) strlcpy(buf, "-", len);
354 } else if (literal) {
355 (void) snprintf(buf, len, "%llu",
356 (u_longlong_t)intval);
357 } else {
358 (void) zfs_nicebytes(intval, buf, len);
359 }
360 break;
361
362 case ZPOOL_PROP_CAPACITY:
363 if (literal) {
364 (void) snprintf(buf, len, "%llu",
365 (u_longlong_t)intval);
366 } else {
367 (void) snprintf(buf, len, "%llu%%",
368 (u_longlong_t)intval);
369 }
370 break;
371
372 case ZPOOL_PROP_FRAGMENTATION:
373 if (intval == UINT64_MAX) {
374 (void) strlcpy(buf, "-", len);
375 } else if (literal) {
376 (void) snprintf(buf, len, "%llu",
377 (u_longlong_t)intval);
378 } else {
379 (void) snprintf(buf, len, "%llu%%",
380 (u_longlong_t)intval);
381 }
382 break;
383
384 case ZPOOL_PROP_DEDUPRATIO:
385 if (literal)
386 (void) snprintf(buf, len, "%llu.%02llu",
387 (u_longlong_t)(intval / 100),
388 (u_longlong_t)(intval % 100));
389 else
390 (void) snprintf(buf, len, "%llu.%02llux",
391 (u_longlong_t)(intval / 100),
392 (u_longlong_t)(intval % 100));
393 break;
394
395 case ZPOOL_PROP_HEALTH:
396 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
397 break;
398 case ZPOOL_PROP_VERSION:
399 if (intval >= SPA_VERSION_FEATURES) {
400 (void) snprintf(buf, len, "-");
401 break;
402 }
403 /* FALLTHROUGH */
404 default:
405 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
406 }
407 break;
408
409 case PROP_TYPE_INDEX:
410 intval = zpool_get_prop_int(zhp, prop, &src);
411 if (zpool_prop_index_to_string(prop, intval, &strval)
412 != 0)
413 return (-1);
414 (void) strlcpy(buf, strval, len);
415 break;
416
417 default:
418 abort();
419 }
420
421 if (srctype)
422 *srctype = src;
423
424 return (0);
425 }
426
427 /*
428 * Check if the bootfs name has the same pool name as it is set to.
429 * Assuming bootfs is a valid dataset name.
430 */
431 static boolean_t
432 bootfs_name_valid(const char *pool, const char *bootfs)
433 {
434 int len = strlen(pool);
435 if (bootfs[0] == '\0')
436 return (B_TRUE);
437
438 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
439 return (B_FALSE);
440
441 if (strncmp(pool, bootfs, len) == 0 &&
442 (bootfs[len] == '/' || bootfs[len] == '\0'))
443 return (B_TRUE);
444
445 return (B_FALSE);
446 }
447
448 boolean_t
449 zpool_is_bootable(zpool_handle_t *zhp)
450 {
451 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
452
453 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
454 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
455 sizeof (bootfs)) != 0);
456 }
457
458
459 /*
460 * Given an nvlist of zpool properties to be set, validate that they are
461 * correct, and parse any numeric properties (index, boolean, etc) if they are
462 * specified as strings.
463 */
464 static nvlist_t *
465 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
466 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
467 {
468 nvpair_t *elem;
469 nvlist_t *retprops;
470 zpool_prop_t prop;
471 char *strval;
472 uint64_t intval;
473 char *slash, *check;
474 struct stat64 statbuf;
475 zpool_handle_t *zhp;
476
477 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
478 (void) no_memory(hdl);
479 return (NULL);
480 }
481
482 elem = NULL;
483 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
484 const char *propname = nvpair_name(elem);
485
486 prop = zpool_name_to_prop(propname);
487 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
488 int err;
489 char *fname = strchr(propname, '@') + 1;
490
491 err = zfeature_lookup_name(fname, NULL);
492 if (err != 0) {
493 ASSERT3U(err, ==, ENOENT);
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
495 "invalid feature '%s'"), fname);
496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
497 goto error;
498 }
499
500 if (nvpair_type(elem) != DATA_TYPE_STRING) {
501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 "'%s' must be a string"), propname);
503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
504 goto error;
505 }
506
507 (void) nvpair_value_string(elem, &strval);
508 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
509 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
511 "property '%s' can only be set to "
512 "'enabled' or 'disabled'"), propname);
513 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
514 goto error;
515 }
516
517 if (!flags.create &&
518 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
520 "property '%s' can only be set to "
521 "'disabled' at creation time"), propname);
522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
523 goto error;
524 }
525
526 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
527 (void) no_memory(hdl);
528 goto error;
529 }
530 continue;
531 }
532
533 /*
534 * Make sure this property is valid and applies to this type.
535 */
536 if (prop == ZPOOL_PROP_INVAL) {
537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
538 "invalid property '%s'"), propname);
539 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
540 goto error;
541 }
542
543 if (zpool_prop_readonly(prop)) {
544 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
545 "is readonly"), propname);
546 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
547 goto error;
548 }
549
550 if (!flags.create && zpool_prop_setonce(prop)) {
551 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
552 "property '%s' can only be set at "
553 "creation time"), propname);
554 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
555 goto error;
556 }
557
558 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
559 &strval, &intval, errbuf) != 0)
560 goto error;
561
562 /*
563 * Perform additional checking for specific properties.
564 */
565 switch (prop) {
566 case ZPOOL_PROP_VERSION:
567 if (intval < version ||
568 !SPA_VERSION_IS_SUPPORTED(intval)) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 "property '%s' number %d is invalid."),
571 propname, intval);
572 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
573 goto error;
574 }
575 break;
576
577 case ZPOOL_PROP_ASHIFT:
578 if (intval != 0 &&
579 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "property '%s' number %d is invalid, only "
582 "values between %" PRId32 " and "
583 "%" PRId32 " are allowed."),
584 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
585 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
586 goto error;
587 }
588 break;
589
590 case ZPOOL_PROP_BOOTFS:
591 if (flags.create || flags.import) {
592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593 "property '%s' cannot be set at creation "
594 "or import time"), propname);
595 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
596 goto error;
597 }
598
599 if (version < SPA_VERSION_BOOTFS) {
600 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
601 "pool must be upgraded to support "
602 "'%s' property"), propname);
603 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
604 goto error;
605 }
606
607 /*
608 * bootfs property value has to be a dataset name and
609 * the dataset has to be in the same pool as it sets to.
610 */
611 if (!bootfs_name_valid(poolname, strval)) {
612 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
613 "is an invalid name"), strval);
614 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
615 goto error;
616 }
617
618 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "could not open pool '%s'"), poolname);
621 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
622 goto error;
623 }
624 zpool_close(zhp);
625 break;
626
627 case ZPOOL_PROP_ALTROOT:
628 if (!flags.create && !flags.import) {
629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
630 "property '%s' can only be set during pool "
631 "creation or import"), propname);
632 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
633 goto error;
634 }
635
636 if (strval[0] != '/') {
637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
638 "bad alternate root '%s'"), strval);
639 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
640 goto error;
641 }
642 break;
643
644 case ZPOOL_PROP_CACHEFILE:
645 if (strval[0] == '\0')
646 break;
647
648 if (strcmp(strval, "none") == 0)
649 break;
650
651 if (strval[0] != '/') {
652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 "property '%s' must be empty, an "
654 "absolute path, or 'none'"), propname);
655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
656 goto error;
657 }
658
659 slash = strrchr(strval, '/');
660
661 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
662 strcmp(slash, "/..") == 0) {
663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664 "'%s' is not a valid file"), strval);
665 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
666 goto error;
667 }
668
669 *slash = '\0';
670
671 if (strval[0] != '\0' &&
672 (stat64(strval, &statbuf) != 0 ||
673 !S_ISDIR(statbuf.st_mode))) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "'%s' is not a valid directory"),
676 strval);
677 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
678 goto error;
679 }
680
681 *slash = '/';
682 break;
683
684 case ZPOOL_PROP_COMMENT:
685 for (check = strval; *check != '\0'; check++) {
686 if (!isprint(*check)) {
687 zfs_error_aux(hdl,
688 dgettext(TEXT_DOMAIN,
689 "comment may only have printable "
690 "characters"));
691 (void) zfs_error(hdl, EZFS_BADPROP,
692 errbuf);
693 goto error;
694 }
695 }
696 if (strlen(strval) > ZPROP_MAX_COMMENT) {
697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
698 "comment must not exceed %d characters"),
699 ZPROP_MAX_COMMENT);
700 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
701 goto error;
702 }
703 break;
704 case ZPOOL_PROP_READONLY:
705 if (!flags.import) {
706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707 "property '%s' can only be set at "
708 "import time"), propname);
709 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
710 goto error;
711 }
712 break;
713 case ZPOOL_PROP_MULTIHOST:
714 if (get_system_hostid() == 0) {
715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
716 "requires a non-zero system hostid"));
717 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
718 goto error;
719 }
720 break;
721 case ZPOOL_PROP_DEDUPDITTO:
722 printf("Note: property '%s' no longer has "
723 "any effect\n", propname);
724 break;
725
726 default:
727 break;
728 }
729 }
730
731 return (retprops);
732 error:
733 nvlist_free(retprops);
734 return (NULL);
735 }
736
737 /*
738 * Set zpool property : propname=propval.
739 */
740 int
741 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
742 {
743 zfs_cmd_t zc = {"\0"};
744 int ret = -1;
745 char errbuf[1024];
746 nvlist_t *nvl = NULL;
747 nvlist_t *realprops;
748 uint64_t version;
749 prop_flags_t flags = { 0 };
750
751 (void) snprintf(errbuf, sizeof (errbuf),
752 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
753 zhp->zpool_name);
754
755 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
756 return (no_memory(zhp->zpool_hdl));
757
758 if (nvlist_add_string(nvl, propname, propval) != 0) {
759 nvlist_free(nvl);
760 return (no_memory(zhp->zpool_hdl));
761 }
762
763 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
764 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
765 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
766 nvlist_free(nvl);
767 return (-1);
768 }
769
770 nvlist_free(nvl);
771 nvl = realprops;
772
773 /*
774 * Execute the corresponding ioctl() to set this property.
775 */
776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
777
778 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
779 nvlist_free(nvl);
780 return (-1);
781 }
782
783 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
784
785 zcmd_free_nvlists(&zc);
786 nvlist_free(nvl);
787
788 if (ret)
789 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
790 else
791 (void) zpool_props_refresh(zhp);
792
793 return (ret);
794 }
795
796 int
797 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
798 {
799 libzfs_handle_t *hdl = zhp->zpool_hdl;
800 zprop_list_t *entry;
801 char buf[ZFS_MAXPROPLEN];
802 nvlist_t *features = NULL;
803 nvpair_t *nvp;
804 zprop_list_t **last;
805 boolean_t firstexpand = (NULL == *plp);
806 int i;
807
808 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
809 return (-1);
810
811 last = plp;
812 while (*last != NULL)
813 last = &(*last)->pl_next;
814
815 if ((*plp)->pl_all)
816 features = zpool_get_features(zhp);
817
818 if ((*plp)->pl_all && firstexpand) {
819 for (i = 0; i < SPA_FEATURES; i++) {
820 zprop_list_t *entry = zfs_alloc(hdl,
821 sizeof (zprop_list_t));
822 entry->pl_prop = ZPROP_INVAL;
823 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
824 spa_feature_table[i].fi_uname);
825 entry->pl_width = strlen(entry->pl_user_prop);
826 entry->pl_all = B_TRUE;
827
828 *last = entry;
829 last = &entry->pl_next;
830 }
831 }
832
833 /* add any unsupported features */
834 for (nvp = nvlist_next_nvpair(features, NULL);
835 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
836 char *propname;
837 boolean_t found;
838 zprop_list_t *entry;
839
840 if (zfeature_is_supported(nvpair_name(nvp)))
841 continue;
842
843 propname = zfs_asprintf(hdl, "unsupported@%s",
844 nvpair_name(nvp));
845
846 /*
847 * Before adding the property to the list make sure that no
848 * other pool already added the same property.
849 */
850 found = B_FALSE;
851 entry = *plp;
852 while (entry != NULL) {
853 if (entry->pl_user_prop != NULL &&
854 strcmp(propname, entry->pl_user_prop) == 0) {
855 found = B_TRUE;
856 break;
857 }
858 entry = entry->pl_next;
859 }
860 if (found) {
861 free(propname);
862 continue;
863 }
864
865 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
866 entry->pl_prop = ZPROP_INVAL;
867 entry->pl_user_prop = propname;
868 entry->pl_width = strlen(entry->pl_user_prop);
869 entry->pl_all = B_TRUE;
870
871 *last = entry;
872 last = &entry->pl_next;
873 }
874
875 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
876
877 if (entry->pl_fixed)
878 continue;
879
880 if (entry->pl_prop != ZPROP_INVAL &&
881 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
882 NULL, B_FALSE) == 0) {
883 if (strlen(buf) > entry->pl_width)
884 entry->pl_width = strlen(buf);
885 }
886 }
887
888 return (0);
889 }
890
891 /*
892 * Get the state for the given feature on the given ZFS pool.
893 */
894 int
895 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
896 size_t len)
897 {
898 uint64_t refcount;
899 boolean_t found = B_FALSE;
900 nvlist_t *features = zpool_get_features(zhp);
901 boolean_t supported;
902 const char *feature = strchr(propname, '@') + 1;
903
904 supported = zpool_prop_feature(propname);
905 ASSERT(supported || zpool_prop_unsupported(propname));
906
907 /*
908 * Convert from feature name to feature guid. This conversion is
909 * unnecessary for unsupported@... properties because they already
910 * use guids.
911 */
912 if (supported) {
913 int ret;
914 spa_feature_t fid;
915
916 ret = zfeature_lookup_name(feature, &fid);
917 if (ret != 0) {
918 (void) strlcpy(buf, "-", len);
919 return (ENOTSUP);
920 }
921 feature = spa_feature_table[fid].fi_guid;
922 }
923
924 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
925 found = B_TRUE;
926
927 if (supported) {
928 if (!found) {
929 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
930 } else {
931 if (refcount == 0)
932 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
933 else
934 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
935 }
936 } else {
937 if (found) {
938 if (refcount == 0) {
939 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
940 } else {
941 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
942 }
943 } else {
944 (void) strlcpy(buf, "-", len);
945 return (ENOTSUP);
946 }
947 }
948
949 return (0);
950 }
951
952 /*
953 * Validate the given pool name, optionally putting an extended error message in
954 * 'buf'.
955 */
956 boolean_t
957 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
958 {
959 namecheck_err_t why;
960 char what;
961 int ret;
962
963 ret = pool_namecheck(pool, &why, &what);
964
965 /*
966 * The rules for reserved pool names were extended at a later point.
967 * But we need to support users with existing pools that may now be
968 * invalid. So we only check for this expanded set of names during a
969 * create (or import), and only in userland.
970 */
971 if (ret == 0 && !isopen &&
972 (strncmp(pool, "mirror", 6) == 0 ||
973 strncmp(pool, "raidz", 5) == 0 ||
974 strncmp(pool, "spare", 5) == 0 ||
975 strcmp(pool, "log") == 0)) {
976 if (hdl != NULL)
977 zfs_error_aux(hdl,
978 dgettext(TEXT_DOMAIN, "name is reserved"));
979 return (B_FALSE);
980 }
981
982
983 if (ret != 0) {
984 if (hdl != NULL) {
985 switch (why) {
986 case NAME_ERR_TOOLONG:
987 zfs_error_aux(hdl,
988 dgettext(TEXT_DOMAIN, "name is too long"));
989 break;
990
991 case NAME_ERR_INVALCHAR:
992 zfs_error_aux(hdl,
993 dgettext(TEXT_DOMAIN, "invalid character "
994 "'%c' in pool name"), what);
995 break;
996
997 case NAME_ERR_NOLETTER:
998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 "name must begin with a letter"));
1000 break;
1001
1002 case NAME_ERR_RESERVED:
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "name is reserved"));
1005 break;
1006
1007 case NAME_ERR_DISKLIKE:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "pool name is reserved"));
1010 break;
1011
1012 case NAME_ERR_LEADING_SLASH:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "leading slash in name"));
1015 break;
1016
1017 case NAME_ERR_EMPTY_COMPONENT:
1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019 "empty component in name"));
1020 break;
1021
1022 case NAME_ERR_TRAILING_SLASH:
1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024 "trailing slash in name"));
1025 break;
1026
1027 case NAME_ERR_MULTIPLE_DELIMITERS:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "multiple '@' and/or '#' delimiters in "
1030 "name"));
1031 break;
1032
1033 case NAME_ERR_NO_AT:
1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1035 "permission set is missing '@'"));
1036 break;
1037
1038 default:
1039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1040 "(%d) not defined"), why);
1041 break;
1042 }
1043 }
1044 return (B_FALSE);
1045 }
1046
1047 return (B_TRUE);
1048 }
1049
1050 /*
1051 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1052 * state.
1053 */
1054 zpool_handle_t *
1055 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1056 {
1057 zpool_handle_t *zhp;
1058 boolean_t missing;
1059
1060 /*
1061 * Make sure the pool name is valid.
1062 */
1063 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1064 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1065 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1066 pool);
1067 return (NULL);
1068 }
1069
1070 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1071 return (NULL);
1072
1073 zhp->zpool_hdl = hdl;
1074 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1075
1076 if (zpool_refresh_stats(zhp, &missing) != 0) {
1077 zpool_close(zhp);
1078 return (NULL);
1079 }
1080
1081 if (missing) {
1082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1083 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1084 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1085 zpool_close(zhp);
1086 return (NULL);
1087 }
1088
1089 return (zhp);
1090 }
1091
1092 /*
1093 * Like the above, but silent on error. Used when iterating over pools (because
1094 * the configuration cache may be out of date).
1095 */
1096 int
1097 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1098 {
1099 zpool_handle_t *zhp;
1100 boolean_t missing;
1101
1102 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1103 return (-1);
1104
1105 zhp->zpool_hdl = hdl;
1106 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1107
1108 if (zpool_refresh_stats(zhp, &missing) != 0) {
1109 zpool_close(zhp);
1110 return (-1);
1111 }
1112
1113 if (missing) {
1114 zpool_close(zhp);
1115 *ret = NULL;
1116 return (0);
1117 }
1118
1119 *ret = zhp;
1120 return (0);
1121 }
1122
1123 /*
1124 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1125 * state.
1126 */
1127 zpool_handle_t *
1128 zpool_open(libzfs_handle_t *hdl, const char *pool)
1129 {
1130 zpool_handle_t *zhp;
1131
1132 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1133 return (NULL);
1134
1135 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1136 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1137 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1138 zpool_close(zhp);
1139 return (NULL);
1140 }
1141
1142 return (zhp);
1143 }
1144
1145 /*
1146 * Close the handle. Simply frees the memory associated with the handle.
1147 */
1148 void
1149 zpool_close(zpool_handle_t *zhp)
1150 {
1151 nvlist_free(zhp->zpool_config);
1152 nvlist_free(zhp->zpool_old_config);
1153 nvlist_free(zhp->zpool_props);
1154 free(zhp);
1155 }
1156
1157 /*
1158 * Return the name of the pool.
1159 */
1160 const char *
1161 zpool_get_name(zpool_handle_t *zhp)
1162 {
1163 return (zhp->zpool_name);
1164 }
1165
1166
1167 /*
1168 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1169 */
1170 int
1171 zpool_get_state(zpool_handle_t *zhp)
1172 {
1173 return (zhp->zpool_state);
1174 }
1175
1176 /*
1177 * Check if vdev list contains a special vdev
1178 */
1179 static boolean_t
1180 zpool_has_special_vdev(nvlist_t *nvroot)
1181 {
1182 nvlist_t **child;
1183 uint_t children;
1184
1185 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1186 &children) == 0) {
1187 for (uint_t c = 0; c < children; c++) {
1188 char *bias;
1189
1190 if (nvlist_lookup_string(child[c],
1191 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1192 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1193 return (B_TRUE);
1194 }
1195 }
1196 }
1197 return (B_FALSE);
1198 }
1199
1200 /*
1201 * Create the named pool, using the provided vdev list. It is assumed
1202 * that the consumer has already validated the contents of the nvlist, so we
1203 * don't have to worry about error semantics.
1204 */
1205 int
1206 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1207 nvlist_t *props, nvlist_t *fsprops)
1208 {
1209 zfs_cmd_t zc = {"\0"};
1210 nvlist_t *zc_fsprops = NULL;
1211 nvlist_t *zc_props = NULL;
1212 nvlist_t *hidden_args = NULL;
1213 uint8_t *wkeydata = NULL;
1214 uint_t wkeylen = 0;
1215 char msg[1024];
1216 int ret = -1;
1217
1218 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1219 "cannot create '%s'"), pool);
1220
1221 if (!zpool_name_valid(hdl, B_FALSE, pool))
1222 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1223
1224 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1225 return (-1);
1226
1227 if (props) {
1228 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1229
1230 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1231 SPA_VERSION_1, flags, msg)) == NULL) {
1232 goto create_failed;
1233 }
1234 }
1235
1236 if (fsprops) {
1237 uint64_t zoned;
1238 char *zonestr;
1239
1240 zoned = ((nvlist_lookup_string(fsprops,
1241 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1242 strcmp(zonestr, "on") == 0);
1243
1244 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1245 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1246 goto create_failed;
1247 }
1248
1249 if (nvlist_exists(zc_fsprops,
1250 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1251 !zpool_has_special_vdev(nvroot)) {
1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1253 "%s property requires a special vdev"),
1254 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1255 (void) zfs_error(hdl, EZFS_BADPROP, msg);
1256 goto create_failed;
1257 }
1258
1259 if (!zc_props &&
1260 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1261 goto create_failed;
1262 }
1263 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1264 &wkeydata, &wkeylen) != 0) {
1265 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1266 goto create_failed;
1267 }
1268 if (nvlist_add_nvlist(zc_props,
1269 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1270 goto create_failed;
1271 }
1272 if (wkeydata != NULL) {
1273 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1274 goto create_failed;
1275
1276 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1277 wkeydata, wkeylen) != 0)
1278 goto create_failed;
1279
1280 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1281 hidden_args) != 0)
1282 goto create_failed;
1283 }
1284 }
1285
1286 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1287 goto create_failed;
1288
1289 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1290
1291 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1292
1293 zcmd_free_nvlists(&zc);
1294 nvlist_free(zc_props);
1295 nvlist_free(zc_fsprops);
1296 nvlist_free(hidden_args);
1297 if (wkeydata != NULL)
1298 free(wkeydata);
1299
1300 switch (errno) {
1301 case EBUSY:
1302 /*
1303 * This can happen if the user has specified the same
1304 * device multiple times. We can't reliably detect this
1305 * until we try to add it and see we already have a
1306 * label. This can also happen under if the device is
1307 * part of an active md or lvm device.
1308 */
1309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1310 "one or more vdevs refer to the same device, or "
1311 "one of\nthe devices is part of an active md or "
1312 "lvm device"));
1313 return (zfs_error(hdl, EZFS_BADDEV, msg));
1314
1315 case ERANGE:
1316 /*
1317 * This happens if the record size is smaller or larger
1318 * than the allowed size range, or not a power of 2.
1319 *
1320 * NOTE: although zfs_valid_proplist is called earlier,
1321 * this case may have slipped through since the
1322 * pool does not exist yet and it is therefore
1323 * impossible to read properties e.g. max blocksize
1324 * from the pool.
1325 */
1326 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1327 "record size invalid"));
1328 return (zfs_error(hdl, EZFS_BADPROP, msg));
1329
1330 case EOVERFLOW:
1331 /*
1332 * This occurs when one of the devices is below
1333 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1334 * device was the problem device since there's no
1335 * reliable way to determine device size from userland.
1336 */
1337 {
1338 char buf[64];
1339
1340 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1341 sizeof (buf));
1342
1343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1344 "one or more devices is less than the "
1345 "minimum size (%s)"), buf);
1346 }
1347 return (zfs_error(hdl, EZFS_BADDEV, msg));
1348
1349 case ENOSPC:
1350 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1351 "one or more devices is out of space"));
1352 return (zfs_error(hdl, EZFS_BADDEV, msg));
1353
1354 default:
1355 return (zpool_standard_error(hdl, errno, msg));
1356 }
1357 }
1358
1359 create_failed:
1360 zcmd_free_nvlists(&zc);
1361 nvlist_free(zc_props);
1362 nvlist_free(zc_fsprops);
1363 nvlist_free(hidden_args);
1364 if (wkeydata != NULL)
1365 free(wkeydata);
1366 return (ret);
1367 }
1368
1369 /*
1370 * Destroy the given pool. It is up to the caller to ensure that there are no
1371 * datasets left in the pool.
1372 */
1373 int
1374 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1375 {
1376 zfs_cmd_t zc = {"\0"};
1377 zfs_handle_t *zfp = NULL;
1378 libzfs_handle_t *hdl = zhp->zpool_hdl;
1379 char msg[1024];
1380
1381 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1382 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1383 return (-1);
1384
1385 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1386 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1387
1388 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1389 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1390 "cannot destroy '%s'"), zhp->zpool_name);
1391
1392 if (errno == EROFS) {
1393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1394 "one or more devices is read only"));
1395 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1396 } else {
1397 (void) zpool_standard_error(hdl, errno, msg);
1398 }
1399
1400 if (zfp)
1401 zfs_close(zfp);
1402 return (-1);
1403 }
1404
1405 if (zfp) {
1406 remove_mountpoint(zfp);
1407 zfs_close(zfp);
1408 }
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * Create a checkpoint in the given pool.
1415 */
1416 int
1417 zpool_checkpoint(zpool_handle_t *zhp)
1418 {
1419 libzfs_handle_t *hdl = zhp->zpool_hdl;
1420 char msg[1024];
1421 int error;
1422
1423 error = lzc_pool_checkpoint(zhp->zpool_name);
1424 if (error != 0) {
1425 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1426 "cannot checkpoint '%s'"), zhp->zpool_name);
1427 (void) zpool_standard_error(hdl, error, msg);
1428 return (-1);
1429 }
1430
1431 return (0);
1432 }
1433
1434 /*
1435 * Discard the checkpoint from the given pool.
1436 */
1437 int
1438 zpool_discard_checkpoint(zpool_handle_t *zhp)
1439 {
1440 libzfs_handle_t *hdl = zhp->zpool_hdl;
1441 char msg[1024];
1442 int error;
1443
1444 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1445 if (error != 0) {
1446 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1447 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1448 (void) zpool_standard_error(hdl, error, msg);
1449 return (-1);
1450 }
1451
1452 return (0);
1453 }
1454
1455 /*
1456 * Add the given vdevs to the pool. The caller must have already performed the
1457 * necessary verification to ensure that the vdev specification is well-formed.
1458 */
1459 int
1460 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1461 {
1462 zfs_cmd_t zc = {"\0"};
1463 int ret;
1464 libzfs_handle_t *hdl = zhp->zpool_hdl;
1465 char msg[1024];
1466 nvlist_t **spares, **l2cache;
1467 uint_t nspares, nl2cache;
1468
1469 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1470 "cannot add to '%s'"), zhp->zpool_name);
1471
1472 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1473 SPA_VERSION_SPARES &&
1474 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1475 &spares, &nspares) == 0) {
1476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1477 "upgraded to add hot spares"));
1478 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1479 }
1480
1481 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1482 SPA_VERSION_L2CACHE &&
1483 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1484 &l2cache, &nl2cache) == 0) {
1485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1486 "upgraded to add cache devices"));
1487 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1488 }
1489
1490 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1491 return (-1);
1492 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1493
1494 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1495 switch (errno) {
1496 case EBUSY:
1497 /*
1498 * This can happen if the user has specified the same
1499 * device multiple times. We can't reliably detect this
1500 * until we try to add it and see we already have a
1501 * label.
1502 */
1503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1504 "one or more vdevs refer to the same device"));
1505 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1506 break;
1507
1508 case EINVAL:
1509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1510 "invalid config; a pool with removing/removed "
1511 "vdevs does not support adding raidz vdevs"));
1512 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1513 break;
1514
1515 case EOVERFLOW:
1516 /*
1517 * This occurs when one of the devices is below
1518 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1519 * device was the problem device since there's no
1520 * reliable way to determine device size from userland.
1521 */
1522 {
1523 char buf[64];
1524
1525 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1526 sizeof (buf));
1527
1528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1529 "device is less than the minimum "
1530 "size (%s)"), buf);
1531 }
1532 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1533 break;
1534
1535 case ENOTSUP:
1536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1537 "pool must be upgraded to add these vdevs"));
1538 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1539 break;
1540
1541 default:
1542 (void) zpool_standard_error(hdl, errno, msg);
1543 }
1544
1545 ret = -1;
1546 } else {
1547 ret = 0;
1548 }
1549
1550 zcmd_free_nvlists(&zc);
1551
1552 return (ret);
1553 }
1554
1555 /*
1556 * Exports the pool from the system. The caller must ensure that there are no
1557 * mounted datasets in the pool.
1558 */
1559 static int
1560 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1561 const char *log_str)
1562 {
1563 zfs_cmd_t zc = {"\0"};
1564 char msg[1024];
1565
1566 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1567 "cannot export '%s'"), zhp->zpool_name);
1568
1569 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1570 zc.zc_cookie = force;
1571 zc.zc_guid = hardforce;
1572 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1573
1574 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1575 switch (errno) {
1576 case EXDEV:
1577 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1578 "use '-f' to override the following errors:\n"
1579 "'%s' has an active shared spare which could be"
1580 " used by other pools once '%s' is exported."),
1581 zhp->zpool_name, zhp->zpool_name);
1582 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1583 msg));
1584 default:
1585 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1586 msg));
1587 }
1588 }
1589
1590 return (0);
1591 }
1592
1593 int
1594 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1595 {
1596 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1597 }
1598
1599 int
1600 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1601 {
1602 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1603 }
1604
1605 static void
1606 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1607 nvlist_t *config)
1608 {
1609 nvlist_t *nv = NULL;
1610 uint64_t rewindto;
1611 int64_t loss = -1;
1612 struct tm t;
1613 char timestr[128];
1614
1615 if (!hdl->libzfs_printerr || config == NULL)
1616 return;
1617
1618 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1619 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1620 return;
1621 }
1622
1623 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1624 return;
1625 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1626
1627 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1628 strftime(timestr, 128, "%c", &t) != 0) {
1629 if (dryrun) {
1630 (void) printf(dgettext(TEXT_DOMAIN,
1631 "Would be able to return %s "
1632 "to its state as of %s.\n"),
1633 name, timestr);
1634 } else {
1635 (void) printf(dgettext(TEXT_DOMAIN,
1636 "Pool %s returned to its state as of %s.\n"),
1637 name, timestr);
1638 }
1639 if (loss > 120) {
1640 (void) printf(dgettext(TEXT_DOMAIN,
1641 "%s approximately %lld "),
1642 dryrun ? "Would discard" : "Discarded",
1643 ((longlong_t)loss + 30) / 60);
1644 (void) printf(dgettext(TEXT_DOMAIN,
1645 "minutes of transactions.\n"));
1646 } else if (loss > 0) {
1647 (void) printf(dgettext(TEXT_DOMAIN,
1648 "%s approximately %lld "),
1649 dryrun ? "Would discard" : "Discarded",
1650 (longlong_t)loss);
1651 (void) printf(dgettext(TEXT_DOMAIN,
1652 "seconds of transactions.\n"));
1653 }
1654 }
1655 }
1656
1657 void
1658 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1659 nvlist_t *config)
1660 {
1661 nvlist_t *nv = NULL;
1662 int64_t loss = -1;
1663 uint64_t edata = UINT64_MAX;
1664 uint64_t rewindto;
1665 struct tm t;
1666 char timestr[128];
1667
1668 if (!hdl->libzfs_printerr)
1669 return;
1670
1671 if (reason >= 0)
1672 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1673 else
1674 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1675
1676 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1677 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1678 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1679 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1680 goto no_info;
1681
1682 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1683 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1684 &edata);
1685
1686 (void) printf(dgettext(TEXT_DOMAIN,
1687 "Recovery is possible, but will result in some data loss.\n"));
1688
1689 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1690 strftime(timestr, 128, "%c", &t) != 0) {
1691 (void) printf(dgettext(TEXT_DOMAIN,
1692 "\tReturning the pool to its state as of %s\n"
1693 "\tshould correct the problem. "),
1694 timestr);
1695 } else {
1696 (void) printf(dgettext(TEXT_DOMAIN,
1697 "\tReverting the pool to an earlier state "
1698 "should correct the problem.\n\t"));
1699 }
1700
1701 if (loss > 120) {
1702 (void) printf(dgettext(TEXT_DOMAIN,
1703 "Approximately %lld minutes of data\n"
1704 "\tmust be discarded, irreversibly. "),
1705 ((longlong_t)loss + 30) / 60);
1706 } else if (loss > 0) {
1707 (void) printf(dgettext(TEXT_DOMAIN,
1708 "Approximately %lld seconds of data\n"
1709 "\tmust be discarded, irreversibly. "),
1710 (longlong_t)loss);
1711 }
1712 if (edata != 0 && edata != UINT64_MAX) {
1713 if (edata == 1) {
1714 (void) printf(dgettext(TEXT_DOMAIN,
1715 "After rewind, at least\n"
1716 "\tone persistent user-data error will remain. "));
1717 } else {
1718 (void) printf(dgettext(TEXT_DOMAIN,
1719 "After rewind, several\n"
1720 "\tpersistent user-data errors will remain. "));
1721 }
1722 }
1723 (void) printf(dgettext(TEXT_DOMAIN,
1724 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1725 reason >= 0 ? "clear" : "import", name);
1726
1727 (void) printf(dgettext(TEXT_DOMAIN,
1728 "A scrub of the pool\n"
1729 "\tis strongly recommended after recovery.\n"));
1730 return;
1731
1732 no_info:
1733 (void) printf(dgettext(TEXT_DOMAIN,
1734 "Destroy and re-create the pool from\n\ta backup source.\n"));
1735 }
1736
1737 /*
1738 * zpool_import() is a contracted interface. Should be kept the same
1739 * if possible.
1740 *
1741 * Applications should use zpool_import_props() to import a pool with
1742 * new properties value to be set.
1743 */
1744 int
1745 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1746 char *altroot)
1747 {
1748 nvlist_t *props = NULL;
1749 int ret;
1750
1751 if (altroot != NULL) {
1752 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1753 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1754 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1755 newname));
1756 }
1757
1758 if (nvlist_add_string(props,
1759 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1760 nvlist_add_string(props,
1761 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1762 nvlist_free(props);
1763 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1764 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1765 newname));
1766 }
1767 }
1768
1769 ret = zpool_import_props(hdl, config, newname, props,
1770 ZFS_IMPORT_NORMAL);
1771 nvlist_free(props);
1772 return (ret);
1773 }
1774
1775 static void
1776 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1777 int indent)
1778 {
1779 nvlist_t **child;
1780 uint_t c, children;
1781 char *vname;
1782 uint64_t is_log = 0;
1783
1784 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1785 &is_log);
1786
1787 if (name != NULL)
1788 (void) printf("\t%*s%s%s\n", indent, "", name,
1789 is_log ? " [log]" : "");
1790
1791 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1792 &child, &children) != 0)
1793 return;
1794
1795 for (c = 0; c < children; c++) {
1796 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1797 print_vdev_tree(hdl, vname, child[c], indent + 2);
1798 free(vname);
1799 }
1800 }
1801
1802 void
1803 zpool_print_unsup_feat(nvlist_t *config)
1804 {
1805 nvlist_t *nvinfo, *unsup_feat;
1806 nvpair_t *nvp;
1807
1808 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1809 0);
1810 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1811 &unsup_feat) == 0);
1812
1813 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1814 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1815 char *desc;
1816
1817 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1818 verify(nvpair_value_string(nvp, &desc) == 0);
1819
1820 if (strlen(desc) > 0)
1821 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1822 else
1823 (void) printf("\t%s\n", nvpair_name(nvp));
1824 }
1825 }
1826
1827 /*
1828 * Import the given pool using the known configuration and a list of
1829 * properties to be set. The configuration should have come from
1830 * zpool_find_import(). The 'newname' parameters control whether the pool
1831 * is imported with a different name.
1832 */
1833 int
1834 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1835 nvlist_t *props, int flags)
1836 {
1837 zfs_cmd_t zc = {"\0"};
1838 zpool_load_policy_t policy;
1839 nvlist_t *nv = NULL;
1840 nvlist_t *nvinfo = NULL;
1841 nvlist_t *missing = NULL;
1842 char *thename;
1843 char *origname;
1844 int ret;
1845 int error = 0;
1846 char errbuf[1024];
1847
1848 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1849 &origname) == 0);
1850
1851 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1852 "cannot import pool '%s'"), origname);
1853
1854 if (newname != NULL) {
1855 if (!zpool_name_valid(hdl, B_FALSE, newname))
1856 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1857 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1858 newname));
1859 thename = (char *)newname;
1860 } else {
1861 thename = origname;
1862 }
1863
1864 if (props != NULL) {
1865 uint64_t version;
1866 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1867
1868 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1869 &version) == 0);
1870
1871 if ((props = zpool_valid_proplist(hdl, origname,
1872 props, version, flags, errbuf)) == NULL)
1873 return (-1);
1874 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1875 nvlist_free(props);
1876 return (-1);
1877 }
1878 nvlist_free(props);
1879 }
1880
1881 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1882
1883 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1884 &zc.zc_guid) == 0);
1885
1886 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1887 zcmd_free_nvlists(&zc);
1888 return (-1);
1889 }
1890 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1891 zcmd_free_nvlists(&zc);
1892 return (-1);
1893 }
1894
1895 zc.zc_cookie = flags;
1896 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1897 errno == ENOMEM) {
1898 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1899 zcmd_free_nvlists(&zc);
1900 return (-1);
1901 }
1902 }
1903 if (ret != 0)
1904 error = errno;
1905
1906 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1907
1908 zcmd_free_nvlists(&zc);
1909
1910 zpool_get_load_policy(config, &policy);
1911
1912 if (error) {
1913 char desc[1024];
1914 char aux[256];
1915
1916 /*
1917 * Dry-run failed, but we print out what success
1918 * looks like if we found a best txg
1919 */
1920 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1921 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1922 B_TRUE, nv);
1923 nvlist_free(nv);
1924 return (-1);
1925 }
1926
1927 if (newname == NULL)
1928 (void) snprintf(desc, sizeof (desc),
1929 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1930 thename);
1931 else
1932 (void) snprintf(desc, sizeof (desc),
1933 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1934 origname, thename);
1935
1936 switch (error) {
1937 case ENOTSUP:
1938 if (nv != NULL && nvlist_lookup_nvlist(nv,
1939 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1940 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1941 (void) printf(dgettext(TEXT_DOMAIN, "This "
1942 "pool uses the following feature(s) not "
1943 "supported by this system:\n"));
1944 zpool_print_unsup_feat(nv);
1945 if (nvlist_exists(nvinfo,
1946 ZPOOL_CONFIG_CAN_RDONLY)) {
1947 (void) printf(dgettext(TEXT_DOMAIN,
1948 "All unsupported features are only "
1949 "required for writing to the pool."
1950 "\nThe pool can be imported using "
1951 "'-o readonly=on'.\n"));
1952 }
1953 }
1954 /*
1955 * Unsupported version.
1956 */
1957 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1958 break;
1959
1960 case EREMOTEIO:
1961 if (nv != NULL && nvlist_lookup_nvlist(nv,
1962 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1963 char *hostname = "<unknown>";
1964 uint64_t hostid = 0;
1965 mmp_state_t mmp_state;
1966
1967 mmp_state = fnvlist_lookup_uint64(nvinfo,
1968 ZPOOL_CONFIG_MMP_STATE);
1969
1970 if (nvlist_exists(nvinfo,
1971 ZPOOL_CONFIG_MMP_HOSTNAME))
1972 hostname = fnvlist_lookup_string(nvinfo,
1973 ZPOOL_CONFIG_MMP_HOSTNAME);
1974
1975 if (nvlist_exists(nvinfo,
1976 ZPOOL_CONFIG_MMP_HOSTID))
1977 hostid = fnvlist_lookup_uint64(nvinfo,
1978 ZPOOL_CONFIG_MMP_HOSTID);
1979
1980 if (mmp_state == MMP_STATE_ACTIVE) {
1981 (void) snprintf(aux, sizeof (aux),
1982 dgettext(TEXT_DOMAIN, "pool is imp"
1983 "orted on host '%s' (hostid=%lx).\n"
1984 "Export the pool on the other "
1985 "system, then run 'zpool import'."),
1986 hostname, (unsigned long) hostid);
1987 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1988 (void) snprintf(aux, sizeof (aux),
1989 dgettext(TEXT_DOMAIN, "pool has "
1990 "the multihost property on and "
1991 "the\nsystem's hostid is not set. "
1992 "Set a unique system hostid with "
1993 "the zgenhostid(8) command.\n"));
1994 }
1995
1996 (void) zfs_error_aux(hdl, aux);
1997 }
1998 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1999 break;
2000
2001 case EINVAL:
2002 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2003 break;
2004
2005 case EROFS:
2006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2007 "one or more devices is read only"));
2008 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2009 break;
2010
2011 case ENXIO:
2012 if (nv && nvlist_lookup_nvlist(nv,
2013 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2014 nvlist_lookup_nvlist(nvinfo,
2015 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2016 (void) printf(dgettext(TEXT_DOMAIN,
2017 "The devices below are missing or "
2018 "corrupted, use '-m' to import the pool "
2019 "anyway:\n"));
2020 print_vdev_tree(hdl, NULL, missing, 2);
2021 (void) printf("\n");
2022 }
2023 (void) zpool_standard_error(hdl, error, desc);
2024 break;
2025
2026 case EEXIST:
2027 (void) zpool_standard_error(hdl, error, desc);
2028 break;
2029
2030 case EBUSY:
2031 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2032 "one or more devices are already in use\n"));
2033 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2034 break;
2035 case ENAMETOOLONG:
2036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2037 "new name of at least one dataset is longer than "
2038 "the maximum allowable length"));
2039 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2040 break;
2041 default:
2042 (void) zpool_standard_error(hdl, error, desc);
2043 zpool_explain_recover(hdl,
2044 newname ? origname : thename, -error, nv);
2045 break;
2046 }
2047
2048 nvlist_free(nv);
2049 ret = -1;
2050 } else {
2051 zpool_handle_t *zhp;
2052
2053 /*
2054 * This should never fail, but play it safe anyway.
2055 */
2056 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2057 ret = -1;
2058 else if (zhp != NULL)
2059 zpool_close(zhp);
2060 if (policy.zlp_rewind &
2061 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2062 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2063 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2064 }
2065 nvlist_free(nv);
2066 return (0);
2067 }
2068
2069 return (ret);
2070 }
2071
2072 /*
2073 * Translate vdev names to guids. If a vdev_path is determined to be
2074 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2075 * are added to it.
2076 */
2077 static int
2078 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2079 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2080 {
2081 nvlist_t *errlist = NULL;
2082 int error = 0;
2083
2084 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2085 elem = nvlist_next_nvpair(vds, elem)) {
2086 boolean_t spare, cache;
2087
2088 char *vd_path = nvpair_name(elem);
2089 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2090 NULL);
2091
2092 if ((tgt == NULL) || cache || spare) {
2093 if (errlist == NULL) {
2094 errlist = fnvlist_alloc();
2095 error = EINVAL;
2096 }
2097
2098 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2099 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2100 fnvlist_add_int64(errlist, vd_path, err);
2101 continue;
2102 }
2103
2104 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2105 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2106
2107 char msg[MAXNAMELEN];
2108 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2109 fnvlist_add_string(guids_to_paths, msg, vd_path);
2110 }
2111
2112 if (error != 0) {
2113 verify(errlist != NULL);
2114 if (vd_errlist != NULL)
2115 *vd_errlist = errlist;
2116 else
2117 fnvlist_free(errlist);
2118 }
2119
2120 return (error);
2121 }
2122
2123 static int
2124 xlate_init_err(int err)
2125 {
2126 switch (err) {
2127 case ENODEV:
2128 return (EZFS_NODEVICE);
2129 case EINVAL:
2130 case EROFS:
2131 return (EZFS_BADDEV);
2132 case EBUSY:
2133 return (EZFS_INITIALIZING);
2134 case ESRCH:
2135 return (EZFS_NO_INITIALIZE);
2136 }
2137 return (err);
2138 }
2139
2140 /*
2141 * Begin, suspend, or cancel the initialization (initializing of all free
2142 * blocks) for the given vdevs in the given pool.
2143 */
2144 int
2145 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2146 nvlist_t *vds, boolean_t wait)
2147 {
2148 int err;
2149
2150 nvlist_t *vdev_guids = fnvlist_alloc();
2151 nvlist_t *guids_to_paths = fnvlist_alloc();
2152 nvlist_t *vd_errlist = NULL;
2153 nvlist_t *errlist;
2154 nvpair_t *elem;
2155
2156 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2157 guids_to_paths, &vd_errlist);
2158
2159 if (err != 0) {
2160 verify(vd_errlist != NULL);
2161 goto list_errors;
2162 }
2163
2164 err = lzc_initialize(zhp->zpool_name, cmd_type,
2165 vdev_guids, &errlist);
2166
2167 if (err != 0) {
2168 if (errlist != NULL) {
2169 vd_errlist = fnvlist_lookup_nvlist(errlist,
2170 ZPOOL_INITIALIZE_VDEVS);
2171 goto list_errors;
2172 }
2173 (void) zpool_standard_error(zhp->zpool_hdl, err,
2174 dgettext(TEXT_DOMAIN, "operation failed"));
2175 goto out;
2176 }
2177
2178 if (wait) {
2179 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2180 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2181
2182 uint64_t guid = fnvpair_value_uint64(elem);
2183
2184 err = lzc_wait_tag(zhp->zpool_name,
2185 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2186 if (err != 0) {
2187 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2188 err, dgettext(TEXT_DOMAIN, "error "
2189 "waiting for '%s' to initialize"),
2190 nvpair_name(elem));
2191
2192 goto out;
2193 }
2194 }
2195 }
2196 goto out;
2197
2198 list_errors:
2199 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2200 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2201 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2202 char *path;
2203
2204 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2205 &path) != 0)
2206 path = nvpair_name(elem);
2207
2208 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2209 "cannot initialize '%s'", path);
2210 }
2211
2212 out:
2213 fnvlist_free(vdev_guids);
2214 fnvlist_free(guids_to_paths);
2215
2216 if (vd_errlist != NULL)
2217 fnvlist_free(vd_errlist);
2218
2219 return (err == 0 ? 0 : -1);
2220 }
2221
2222 int
2223 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2224 nvlist_t *vds)
2225 {
2226 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2227 }
2228
2229 int
2230 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2231 nvlist_t *vds)
2232 {
2233 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2234 }
2235
2236 static int
2237 xlate_trim_err(int err)
2238 {
2239 switch (err) {
2240 case ENODEV:
2241 return (EZFS_NODEVICE);
2242 case EINVAL:
2243 case EROFS:
2244 return (EZFS_BADDEV);
2245 case EBUSY:
2246 return (EZFS_TRIMMING);
2247 case ESRCH:
2248 return (EZFS_NO_TRIM);
2249 case EOPNOTSUPP:
2250 return (EZFS_TRIM_NOTSUP);
2251 }
2252 return (err);
2253 }
2254
2255 static int
2256 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2257 {
2258 int err;
2259 nvpair_t *elem;
2260
2261 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2262 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2263
2264 uint64_t guid = fnvpair_value_uint64(elem);
2265
2266 err = lzc_wait_tag(zhp->zpool_name,
2267 ZPOOL_WAIT_TRIM, guid, NULL);
2268 if (err != 0) {
2269 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2270 err, dgettext(TEXT_DOMAIN, "error "
2271 "waiting to trim '%s'"), nvpair_name(elem));
2272
2273 return (err);
2274 }
2275 }
2276 return (0);
2277 }
2278
2279 /*
2280 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2281 * the given vdevs in the given pool.
2282 */
2283 int
2284 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2285 trimflags_t *trim_flags)
2286 {
2287 char msg[1024];
2288 int err;
2289
2290 nvlist_t *vdev_guids = fnvlist_alloc();
2291 nvlist_t *guids_to_paths = fnvlist_alloc();
2292 nvlist_t *vd_errlist = NULL;
2293 nvlist_t *errlist;
2294 nvpair_t *elem;
2295
2296 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2297 guids_to_paths, &vd_errlist);
2298 if (err == 0) {
2299 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2300 trim_flags->secure, vdev_guids, &errlist);
2301 if (err == 0) {
2302 if (trim_flags->wait)
2303 err = zpool_trim_wait(zhp, vdev_guids);
2304
2305 fnvlist_free(vdev_guids);
2306 fnvlist_free(guids_to_paths);
2307 return (err);
2308 }
2309
2310 if (errlist != NULL) {
2311 vd_errlist = fnvlist_lookup_nvlist(errlist,
2312 ZPOOL_TRIM_VDEVS);
2313 }
2314
2315 (void) snprintf(msg, sizeof (msg),
2316 dgettext(TEXT_DOMAIN, "operation failed"));
2317 } else {
2318 verify(vd_errlist != NULL);
2319 }
2320
2321 for (elem = nvlist_next_nvpair(vd_errlist, NULL);
2322 elem != NULL; elem = nvlist_next_nvpair(vd_errlist, elem)) {
2323 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2324 char *path;
2325
2326 /*
2327 * If only the pool was specified, and it was not a secure
2328 * trim then suppress warnings for individual vdevs which
2329 * do not support trimming.
2330 */
2331 if (vd_error == EZFS_TRIM_NOTSUP &&
2332 trim_flags->fullpool &&
2333 !trim_flags->secure) {
2334 continue;
2335 }
2336
2337 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2338 &path) != 0)
2339 path = nvpair_name(elem);
2340
2341 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2342 "cannot trim '%s'", path);
2343 }
2344
2345 fnvlist_free(vdev_guids);
2346 fnvlist_free(guids_to_paths);
2347
2348 if (vd_errlist != NULL) {
2349 fnvlist_free(vd_errlist);
2350 return (-1);
2351 }
2352
2353 return (zpool_standard_error(zhp->zpool_hdl, err, msg));
2354 }
2355
2356 /*
2357 * Scan the pool.
2358 */
2359 int
2360 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2361 {
2362 zfs_cmd_t zc = {"\0"};
2363 char msg[1024];
2364 int err;
2365 libzfs_handle_t *hdl = zhp->zpool_hdl;
2366
2367 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2368 zc.zc_cookie = func;
2369 zc.zc_flags = cmd;
2370
2371 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2372 return (0);
2373
2374 err = errno;
2375
2376 /* ECANCELED on a scrub means we resumed a paused scrub */
2377 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2378 cmd == POOL_SCRUB_NORMAL)
2379 return (0);
2380
2381 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2382 return (0);
2383
2384 if (func == POOL_SCAN_SCRUB) {
2385 if (cmd == POOL_SCRUB_PAUSE) {
2386 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2387 "cannot pause scrubbing %s"), zc.zc_name);
2388 } else {
2389 assert(cmd == POOL_SCRUB_NORMAL);
2390 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2391 "cannot scrub %s"), zc.zc_name);
2392 }
2393 } else if (func == POOL_SCAN_RESILVER) {
2394 assert(cmd == POOL_SCRUB_NORMAL);
2395 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2396 "cannot restart resilver on %s"), zc.zc_name);
2397 } else if (func == POOL_SCAN_NONE) {
2398 (void) snprintf(msg, sizeof (msg),
2399 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2400 zc.zc_name);
2401 } else {
2402 assert(!"unexpected result");
2403 }
2404
2405 if (err == EBUSY) {
2406 nvlist_t *nvroot;
2407 pool_scan_stat_t *ps = NULL;
2408 uint_t psc;
2409
2410 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2411 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2412 (void) nvlist_lookup_uint64_array(nvroot,
2413 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2414 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2415 if (cmd == POOL_SCRUB_PAUSE)
2416 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2417 else
2418 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2419 } else {
2420 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2421 }
2422 } else if (err == ENOENT) {
2423 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2424 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2425 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2426 } else {
2427 return (zpool_standard_error(hdl, err, msg));
2428 }
2429 }
2430
2431 /*
2432 * Find a vdev that matches the search criteria specified. We use the
2433 * the nvpair name to determine how we should look for the device.
2434 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2435 * spare; but FALSE if its an INUSE spare.
2436 */
2437 static nvlist_t *
2438 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2439 boolean_t *l2cache, boolean_t *log)
2440 {
2441 uint_t c, children;
2442 nvlist_t **child;
2443 nvlist_t *ret;
2444 uint64_t is_log;
2445 char *srchkey;
2446 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2447
2448 /* Nothing to look for */
2449 if (search == NULL || pair == NULL)
2450 return (NULL);
2451
2452 /* Obtain the key we will use to search */
2453 srchkey = nvpair_name(pair);
2454
2455 switch (nvpair_type(pair)) {
2456 case DATA_TYPE_UINT64:
2457 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2458 uint64_t srchval, theguid;
2459
2460 verify(nvpair_value_uint64(pair, &srchval) == 0);
2461 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2462 &theguid) == 0);
2463 if (theguid == srchval)
2464 return (nv);
2465 }
2466 break;
2467
2468 case DATA_TYPE_STRING: {
2469 char *srchval, *val;
2470
2471 verify(nvpair_value_string(pair, &srchval) == 0);
2472 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2473 break;
2474
2475 /*
2476 * Search for the requested value. Special cases:
2477 *
2478 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2479 * "-part1", or "p1". The suffix is hidden from the user,
2480 * but included in the string, so this matches around it.
2481 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2482 * is used to check all possible expanded paths.
2483 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2484 *
2485 * Otherwise, all other searches are simple string compares.
2486 */
2487 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2488 uint64_t wholedisk = 0;
2489
2490 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2491 &wholedisk);
2492 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2493 return (nv);
2494
2495 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2496 char *type, *idx, *end, *p;
2497 uint64_t id, vdev_id;
2498
2499 /*
2500 * Determine our vdev type, keeping in mind
2501 * that the srchval is composed of a type and
2502 * vdev id pair (i.e. mirror-4).
2503 */
2504 if ((type = strdup(srchval)) == NULL)
2505 return (NULL);
2506
2507 if ((p = strrchr(type, '-')) == NULL) {
2508 free(type);
2509 break;
2510 }
2511 idx = p + 1;
2512 *p = '\0';
2513
2514 /*
2515 * If the types don't match then keep looking.
2516 */
2517 if (strncmp(val, type, strlen(val)) != 0) {
2518 free(type);
2519 break;
2520 }
2521
2522 verify(zpool_vdev_is_interior(type));
2523 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2524 &id) == 0);
2525
2526 errno = 0;
2527 vdev_id = strtoull(idx, &end, 10);
2528
2529 free(type);
2530 if (errno != 0)
2531 return (NULL);
2532
2533 /*
2534 * Now verify that we have the correct vdev id.
2535 */
2536 if (vdev_id == id)
2537 return (nv);
2538 }
2539
2540 /*
2541 * Common case
2542 */
2543 if (strcmp(srchval, val) == 0)
2544 return (nv);
2545 break;
2546 }
2547
2548 default:
2549 break;
2550 }
2551
2552 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2553 &child, &children) != 0)
2554 return (NULL);
2555
2556 for (c = 0; c < children; c++) {
2557 if ((ret = vdev_to_nvlist_iter(child[c], search,
2558 avail_spare, l2cache, NULL)) != NULL) {
2559 /*
2560 * The 'is_log' value is only set for the toplevel
2561 * vdev, not the leaf vdevs. So we always lookup the
2562 * log device from the root of the vdev tree (where
2563 * 'log' is non-NULL).
2564 */
2565 if (log != NULL &&
2566 nvlist_lookup_uint64(child[c],
2567 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2568 is_log) {
2569 *log = B_TRUE;
2570 }
2571 return (ret);
2572 }
2573 }
2574
2575 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2576 &child, &children) == 0) {
2577 for (c = 0; c < children; c++) {
2578 if ((ret = vdev_to_nvlist_iter(child[c], search,
2579 avail_spare, l2cache, NULL)) != NULL) {
2580 *avail_spare = B_TRUE;
2581 return (ret);
2582 }
2583 }
2584 }
2585
2586 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2587 &child, &children) == 0) {
2588 for (c = 0; c < children; c++) {
2589 if ((ret = vdev_to_nvlist_iter(child[c], search,
2590 avail_spare, l2cache, NULL)) != NULL) {
2591 *l2cache = B_TRUE;
2592 return (ret);
2593 }
2594 }
2595 }
2596
2597 return (NULL);
2598 }
2599
2600 /*
2601 * Given a physical path or guid, find the associated vdev.
2602 */
2603 nvlist_t *
2604 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2605 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2606 {
2607 nvlist_t *search, *nvroot, *ret;
2608 uint64_t guid;
2609 char *end;
2610
2611 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2612
2613 guid = strtoull(ppath, &end, 0);
2614 if (guid != 0 && *end == '\0') {
2615 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2616 } else {
2617 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2618 ppath) == 0);
2619 }
2620
2621 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2622 &nvroot) == 0);
2623
2624 *avail_spare = B_FALSE;
2625 *l2cache = B_FALSE;
2626 if (log != NULL)
2627 *log = B_FALSE;
2628 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2629 nvlist_free(search);
2630
2631 return (ret);
2632 }
2633
2634 /*
2635 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2636 */
2637 static boolean_t
2638 zpool_vdev_is_interior(const char *name)
2639 {
2640 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2641 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2642 strncmp(name,
2643 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2644 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2645 return (B_TRUE);
2646 return (B_FALSE);
2647 }
2648
2649 nvlist_t *
2650 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2651 boolean_t *l2cache, boolean_t *log)
2652 {
2653 char *end;
2654 nvlist_t *nvroot, *search, *ret;
2655 uint64_t guid;
2656
2657 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2658
2659 guid = strtoull(path, &end, 0);
2660 if (guid != 0 && *end == '\0') {
2661 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2662 } else if (zpool_vdev_is_interior(path)) {
2663 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2664 } else {
2665 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2666 }
2667
2668 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2669 &nvroot) == 0);
2670
2671 *avail_spare = B_FALSE;
2672 *l2cache = B_FALSE;
2673 if (log != NULL)
2674 *log = B_FALSE;
2675 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2676 nvlist_free(search);
2677
2678 return (ret);
2679 }
2680
2681 static int
2682 vdev_is_online(nvlist_t *nv)
2683 {
2684 uint64_t ival;
2685
2686 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2687 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2688 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2689 return (0);
2690
2691 return (1);
2692 }
2693
2694 /*
2695 * Helper function for zpool_get_physpaths().
2696 */
2697 static int
2698 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2699 size_t *bytes_written)
2700 {
2701 size_t bytes_left, pos, rsz;
2702 char *tmppath;
2703 const char *format;
2704
2705 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2706 &tmppath) != 0)
2707 return (EZFS_NODEVICE);
2708
2709 pos = *bytes_written;
2710 bytes_left = physpath_size - pos;
2711 format = (pos == 0) ? "%s" : " %s";
2712
2713 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2714 *bytes_written += rsz;
2715
2716 if (rsz >= bytes_left) {
2717 /* if physpath was not copied properly, clear it */
2718 if (bytes_left != 0) {
2719 physpath[pos] = 0;
2720 }
2721 return (EZFS_NOSPC);
2722 }
2723 return (0);
2724 }
2725
2726 static int
2727 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2728 size_t *rsz, boolean_t is_spare)
2729 {
2730 char *type;
2731 int ret;
2732
2733 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2734 return (EZFS_INVALCONFIG);
2735
2736 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2737 /*
2738 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2739 * For a spare vdev, we only want to boot from the active
2740 * spare device.
2741 */
2742 if (is_spare) {
2743 uint64_t spare = 0;
2744 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2745 &spare);
2746 if (!spare)
2747 return (EZFS_INVALCONFIG);
2748 }
2749
2750 if (vdev_is_online(nv)) {
2751 if ((ret = vdev_get_one_physpath(nv, physpath,
2752 phypath_size, rsz)) != 0)
2753 return (ret);
2754 }
2755 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2756 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2757 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2758 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2759 nvlist_t **child;
2760 uint_t count;
2761 int i, ret;
2762
2763 if (nvlist_lookup_nvlist_array(nv,
2764 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2765 return (EZFS_INVALCONFIG);
2766
2767 for (i = 0; i < count; i++) {
2768 ret = vdev_get_physpaths(child[i], physpath,
2769 phypath_size, rsz, is_spare);
2770 if (ret == EZFS_NOSPC)
2771 return (ret);
2772 }
2773 }
2774
2775 return (EZFS_POOL_INVALARG);
2776 }
2777
2778 /*
2779 * Get phys_path for a root pool config.
2780 * Return 0 on success; non-zero on failure.
2781 */
2782 static int
2783 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2784 {
2785 size_t rsz;
2786 nvlist_t *vdev_root;
2787 nvlist_t **child;
2788 uint_t count;
2789 char *type;
2790
2791 rsz = 0;
2792
2793 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2794 &vdev_root) != 0)
2795 return (EZFS_INVALCONFIG);
2796
2797 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2798 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2799 &child, &count) != 0)
2800 return (EZFS_INVALCONFIG);
2801
2802 /*
2803 * root pool can only have a single top-level vdev.
2804 */
2805 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2806 return (EZFS_POOL_INVALARG);
2807
2808 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2809 B_FALSE);
2810
2811 /* No online devices */
2812 if (rsz == 0)
2813 return (EZFS_NODEVICE);
2814
2815 return (0);
2816 }
2817
2818 /*
2819 * Get phys_path for a root pool
2820 * Return 0 on success; non-zero on failure.
2821 */
2822 int
2823 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2824 {
2825 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2826 phypath_size));
2827 }
2828
2829 /*
2830 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2831 *
2832 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2833 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2834 * ignore them.
2835 */
2836 static uint64_t
2837 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2838 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2839 {
2840 uint64_t guid;
2841 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2842 nvlist_t *tgt;
2843
2844 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2845 &log)) == NULL)
2846 return (0);
2847
2848 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2849 if (is_spare != NULL)
2850 *is_spare = spare;
2851 if (is_l2cache != NULL)
2852 *is_l2cache = l2cache;
2853 if (is_log != NULL)
2854 *is_log = log;
2855
2856 return (guid);
2857 }
2858
2859 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2860 uint64_t
2861 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2862 {
2863 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2864 }
2865
2866 /*
2867 * Bring the specified vdev online. The 'flags' parameter is a set of the
2868 * ZFS_ONLINE_* flags.
2869 */
2870 int
2871 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2872 vdev_state_t *newstate)
2873 {
2874 zfs_cmd_t zc = {"\0"};
2875 char msg[1024];
2876 char *pathname;
2877 nvlist_t *tgt;
2878 boolean_t avail_spare, l2cache, islog;
2879 libzfs_handle_t *hdl = zhp->zpool_hdl;
2880 int error;
2881
2882 if (flags & ZFS_ONLINE_EXPAND) {
2883 (void) snprintf(msg, sizeof (msg),
2884 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2885 } else {
2886 (void) snprintf(msg, sizeof (msg),
2887 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2888 }
2889
2890 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2891 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2892 &islog)) == NULL)
2893 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2894
2895 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2896
2897 if (avail_spare)
2898 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2899
2900 if ((flags & ZFS_ONLINE_EXPAND ||
2901 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2902 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2903 uint64_t wholedisk = 0;
2904
2905 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2906 &wholedisk);
2907
2908 /*
2909 * XXX - L2ARC 1.0 devices can't support expansion.
2910 */
2911 if (l2cache) {
2912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2913 "cannot expand cache devices"));
2914 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2915 }
2916
2917 if (wholedisk) {
2918 const char *fullpath = path;
2919 char buf[MAXPATHLEN];
2920
2921 if (path[0] != '/') {
2922 error = zfs_resolve_shortname(path, buf,
2923 sizeof (buf));
2924 if (error != 0)
2925 return (zfs_error(hdl, EZFS_NODEVICE,
2926 msg));
2927
2928 fullpath = buf;
2929 }
2930
2931 error = zpool_relabel_disk(hdl, fullpath, msg);
2932 if (error != 0)
2933 return (error);
2934 }
2935 }
2936
2937 zc.zc_cookie = VDEV_STATE_ONLINE;
2938 zc.zc_obj = flags;
2939
2940 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2941 if (errno == EINVAL) {
2942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2943 "from this pool into a new one. Use '%s' "
2944 "instead"), "zpool detach");
2945 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2946 }
2947 return (zpool_standard_error(hdl, errno, msg));
2948 }
2949
2950 *newstate = zc.zc_cookie;
2951 return (0);
2952 }
2953
2954 /*
2955 * Take the specified vdev offline
2956 */
2957 int
2958 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2959 {
2960 zfs_cmd_t zc = {"\0"};
2961 char msg[1024];
2962 nvlist_t *tgt;
2963 boolean_t avail_spare, l2cache;
2964 libzfs_handle_t *hdl = zhp->zpool_hdl;
2965
2966 (void) snprintf(msg, sizeof (msg),
2967 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2968
2969 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2970 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2971 NULL)) == NULL)
2972 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2973
2974 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2975
2976 if (avail_spare)
2977 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2978
2979 zc.zc_cookie = VDEV_STATE_OFFLINE;
2980 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2981
2982 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2983 return (0);
2984
2985 switch (errno) {
2986 case EBUSY:
2987
2988 /*
2989 * There are no other replicas of this device.
2990 */
2991 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2992
2993 case EEXIST:
2994 /*
2995 * The log device has unplayed logs
2996 */
2997 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2998
2999 default:
3000 return (zpool_standard_error(hdl, errno, msg));
3001 }
3002 }
3003
3004 /*
3005 * Mark the given vdev faulted.
3006 */
3007 int
3008 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3009 {
3010 zfs_cmd_t zc = {"\0"};
3011 char msg[1024];
3012 libzfs_handle_t *hdl = zhp->zpool_hdl;
3013
3014 (void) snprintf(msg, sizeof (msg),
3015 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3016
3017 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3018 zc.zc_guid = guid;
3019 zc.zc_cookie = VDEV_STATE_FAULTED;
3020 zc.zc_obj = aux;
3021
3022 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3023 return (0);
3024
3025 switch (errno) {
3026 case EBUSY:
3027
3028 /*
3029 * There are no other replicas of this device.
3030 */
3031 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3032
3033 default:
3034 return (zpool_standard_error(hdl, errno, msg));
3035 }
3036
3037 }
3038
3039 /*
3040 * Mark the given vdev degraded.
3041 */
3042 int
3043 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3044 {
3045 zfs_cmd_t zc = {"\0"};
3046 char msg[1024];
3047 libzfs_handle_t *hdl = zhp->zpool_hdl;
3048
3049 (void) snprintf(msg, sizeof (msg),
3050 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3051
3052 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3053 zc.zc_guid = guid;
3054 zc.zc_cookie = VDEV_STATE_DEGRADED;
3055 zc.zc_obj = aux;
3056
3057 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3058 return (0);
3059
3060 return (zpool_standard_error(hdl, errno, msg));
3061 }
3062
3063 /*
3064 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3065 * a hot spare.
3066 */
3067 static boolean_t
3068 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3069 {
3070 nvlist_t **child;
3071 uint_t c, children;
3072 char *type;
3073
3074 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3075 &children) == 0) {
3076 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3077 &type) == 0);
3078
3079 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
3080 children == 2 && child[which] == tgt)
3081 return (B_TRUE);
3082
3083 for (c = 0; c < children; c++)
3084 if (is_replacing_spare(child[c], tgt, which))
3085 return (B_TRUE);
3086 }
3087
3088 return (B_FALSE);
3089 }
3090
3091 /*
3092 * Attach new_disk (fully described by nvroot) to old_disk.
3093 * If 'replacing' is specified, the new disk will replace the old one.
3094 */
3095 int
3096 zpool_vdev_attach(zpool_handle_t *zhp,
3097 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
3098 {
3099 zfs_cmd_t zc = {"\0"};
3100 char msg[1024];
3101 int ret;
3102 nvlist_t *tgt;
3103 boolean_t avail_spare, l2cache, islog;
3104 uint64_t val;
3105 char *newname;
3106 nvlist_t **child;
3107 uint_t children;
3108 nvlist_t *config_root;
3109 libzfs_handle_t *hdl = zhp->zpool_hdl;
3110 boolean_t rootpool = zpool_is_bootable(zhp);
3111
3112 if (replacing)
3113 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3114 "cannot replace %s with %s"), old_disk, new_disk);
3115 else
3116 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3117 "cannot attach %s to %s"), new_disk, old_disk);
3118
3119 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3120 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3121 &islog)) == NULL)
3122 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3123
3124 if (avail_spare)
3125 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3126
3127 if (l2cache)
3128 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3129
3130 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3131 zc.zc_cookie = replacing;
3132
3133 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3134 &child, &children) != 0 || children != 1) {
3135 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3136 "new device must be a single disk"));
3137 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3138 }
3139
3140 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3141 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3142
3143 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3144 return (-1);
3145
3146 /*
3147 * If the target is a hot spare that has been swapped in, we can only
3148 * replace it with another hot spare.
3149 */
3150 if (replacing &&
3151 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3152 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3153 NULL) == NULL || !avail_spare) &&
3154 is_replacing_spare(config_root, tgt, 1)) {
3155 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3156 "can only be replaced by another hot spare"));
3157 free(newname);
3158 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3159 }
3160
3161 free(newname);
3162
3163 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3164 return (-1);
3165
3166 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3167
3168 zcmd_free_nvlists(&zc);
3169
3170 if (ret == 0) {
3171 if (rootpool) {
3172 /*
3173 * XXX need a better way to prevent user from
3174 * booting up a half-baked vdev.
3175 */
3176 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
3177 "sure to wait until resilver is done "
3178 "before rebooting.\n"));
3179 }
3180 return (0);
3181 }
3182
3183 switch (errno) {
3184 case ENOTSUP:
3185 /*
3186 * Can't attach to or replace this type of vdev.
3187 */
3188 if (replacing) {
3189 uint64_t version = zpool_get_prop_int(zhp,
3190 ZPOOL_PROP_VERSION, NULL);
3191
3192 if (islog)
3193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3194 "cannot replace a log with a spare"));
3195 else if (version >= SPA_VERSION_MULTI_REPLACE)
3196 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3197 "already in replacing/spare config; wait "
3198 "for completion or use 'zpool detach'"));
3199 else
3200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3201 "cannot replace a replacing device"));
3202 } else {
3203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3204 "can only attach to mirrors and top-level "
3205 "disks"));
3206 }
3207 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3208 break;
3209
3210 case EINVAL:
3211 /*
3212 * The new device must be a single disk.
3213 */
3214 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3215 "new device must be a single disk"));
3216 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3217 break;
3218
3219 case EBUSY:
3220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3221 "or device removal is in progress"),
3222 new_disk);
3223 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3224 break;
3225
3226 case EOVERFLOW:
3227 /*
3228 * The new device is too small.
3229 */
3230 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3231 "device is too small"));
3232 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3233 break;
3234
3235 case EDOM:
3236 /*
3237 * The new device has a different optimal sector size.
3238 */
3239 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3240 "new device has a different optimal sector size; use the "
3241 "option '-o ashift=N' to override the optimal size"));
3242 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3243 break;
3244
3245 case ENAMETOOLONG:
3246 /*
3247 * The resulting top-level vdev spec won't fit in the label.
3248 */
3249 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3250 break;
3251
3252 default:
3253 (void) zpool_standard_error(hdl, errno, msg);
3254 }
3255
3256 return (-1);
3257 }
3258
3259 /*
3260 * Detach the specified device.
3261 */
3262 int
3263 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3264 {
3265 zfs_cmd_t zc = {"\0"};
3266 char msg[1024];
3267 nvlist_t *tgt;
3268 boolean_t avail_spare, l2cache;
3269 libzfs_handle_t *hdl = zhp->zpool_hdl;
3270
3271 (void) snprintf(msg, sizeof (msg),
3272 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3273
3274 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3275 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3276 NULL)) == NULL)
3277 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3278
3279 if (avail_spare)
3280 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3281
3282 if (l2cache)
3283 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3284
3285 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3286
3287 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3288 return (0);
3289
3290 switch (errno) {
3291
3292 case ENOTSUP:
3293 /*
3294 * Can't detach from this type of vdev.
3295 */
3296 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3297 "applicable to mirror and replacing vdevs"));
3298 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3299 break;
3300
3301 case EBUSY:
3302 /*
3303 * There are no other replicas of this device.
3304 */
3305 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3306 break;
3307
3308 default:
3309 (void) zpool_standard_error(hdl, errno, msg);
3310 }
3311
3312 return (-1);
3313 }
3314
3315 /*
3316 * Find a mirror vdev in the source nvlist.
3317 *
3318 * The mchild array contains a list of disks in one of the top-level mirrors
3319 * of the source pool. The schild array contains a list of disks that the
3320 * user specified on the command line. We loop over the mchild array to
3321 * see if any entry in the schild array matches.
3322 *
3323 * If a disk in the mchild array is found in the schild array, we return
3324 * the index of that entry. Otherwise we return -1.
3325 */
3326 static int
3327 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3328 nvlist_t **schild, uint_t schildren)
3329 {
3330 uint_t mc;
3331
3332 for (mc = 0; mc < mchildren; mc++) {
3333 uint_t sc;
3334 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3335 mchild[mc], 0);
3336
3337 for (sc = 0; sc < schildren; sc++) {
3338 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3339 schild[sc], 0);
3340 boolean_t result = (strcmp(mpath, spath) == 0);
3341
3342 free(spath);
3343 if (result) {
3344 free(mpath);
3345 return (mc);
3346 }
3347 }
3348
3349 free(mpath);
3350 }
3351
3352 return (-1);
3353 }
3354
3355 /*
3356 * Split a mirror pool. If newroot points to null, then a new nvlist
3357 * is generated and it is the responsibility of the caller to free it.
3358 */
3359 int
3360 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3361 nvlist_t *props, splitflags_t flags)
3362 {
3363 zfs_cmd_t zc = {"\0"};
3364 char msg[1024];
3365 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3366 nvlist_t **varray = NULL, *zc_props = NULL;
3367 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3368 libzfs_handle_t *hdl = zhp->zpool_hdl;
3369 uint64_t vers, readonly = B_FALSE;
3370 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3371 int retval = 0;
3372
3373 (void) snprintf(msg, sizeof (msg),
3374 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3375
3376 if (!zpool_name_valid(hdl, B_FALSE, newname))
3377 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3378
3379 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3380 (void) fprintf(stderr, gettext("Internal error: unable to "
3381 "retrieve pool configuration\n"));
3382 return (-1);
3383 }
3384
3385 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3386 == 0);
3387 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3388
3389 if (props) {
3390 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3391 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3392 props, vers, flags, msg)) == NULL)
3393 return (-1);
3394 (void) nvlist_lookup_uint64(zc_props,
3395 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3396 if (readonly) {
3397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3398 "property %s can only be set at import time"),
3399 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3400 return (-1);
3401 }
3402 }
3403
3404 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3405 &children) != 0) {
3406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3407 "Source pool is missing vdev tree"));
3408 nvlist_free(zc_props);
3409 return (-1);
3410 }
3411
3412 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3413 vcount = 0;
3414
3415 if (*newroot == NULL ||
3416 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3417 &newchild, &newchildren) != 0)
3418 newchildren = 0;
3419
3420 for (c = 0; c < children; c++) {
3421 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3422 char *type;
3423 nvlist_t **mchild, *vdev;
3424 uint_t mchildren;
3425 int entry;
3426
3427 /*
3428 * Unlike cache & spares, slogs are stored in the
3429 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3430 */
3431 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3432 &is_log);
3433 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3434 &is_hole);
3435 if (is_log || is_hole) {
3436 /*
3437 * Create a hole vdev and put it in the config.
3438 */
3439 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3440 goto out;
3441 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3442 VDEV_TYPE_HOLE) != 0)
3443 goto out;
3444 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3445 1) != 0)
3446 goto out;
3447 if (lastlog == 0)
3448 lastlog = vcount;
3449 varray[vcount++] = vdev;
3450 continue;
3451 }
3452 lastlog = 0;
3453 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3454 == 0);
3455
3456 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3457 vdev = child[c];
3458 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3459 goto out;
3460 continue;
3461 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3463 "Source pool must be composed only of mirrors\n"));
3464 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3465 goto out;
3466 }
3467
3468 verify(nvlist_lookup_nvlist_array(child[c],
3469 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3470
3471 /* find or add an entry for this top-level vdev */
3472 if (newchildren > 0 &&
3473 (entry = find_vdev_entry(zhp, mchild, mchildren,
3474 newchild, newchildren)) >= 0) {
3475 /* We found a disk that the user specified. */
3476 vdev = mchild[entry];
3477 ++found;
3478 } else {
3479 /* User didn't specify a disk for this vdev. */
3480 vdev = mchild[mchildren - 1];
3481 }
3482
3483 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3484 goto out;
3485 }
3486
3487 /* did we find every disk the user specified? */
3488 if (found != newchildren) {
3489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3490 "include at most one disk from each mirror"));
3491 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3492 goto out;
3493 }
3494
3495 /* Prepare the nvlist for populating. */
3496 if (*newroot == NULL) {
3497 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3498 goto out;
3499 freelist = B_TRUE;
3500 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3501 VDEV_TYPE_ROOT) != 0)
3502 goto out;
3503 } else {
3504 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3505 }
3506
3507 /* Add all the children we found */
3508 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3509 lastlog == 0 ? vcount : lastlog) != 0)
3510 goto out;
3511
3512 /*
3513 * If we're just doing a dry run, exit now with success.
3514 */
3515 if (flags.dryrun) {
3516 memory_err = B_FALSE;
3517 freelist = B_FALSE;
3518 goto out;
3519 }
3520
3521 /* now build up the config list & call the ioctl */
3522 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3523 goto out;
3524
3525 if (nvlist_add_nvlist(newconfig,
3526 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3527 nvlist_add_string(newconfig,
3528 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3529 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3530 goto out;
3531
3532 /*
3533 * The new pool is automatically part of the namespace unless we
3534 * explicitly export it.
3535 */
3536 if (!flags.import)
3537 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3538 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3539 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3540 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3541 goto out;
3542 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3543 goto out;
3544
3545 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3546 retval = zpool_standard_error(hdl, errno, msg);
3547 goto out;
3548 }
3549
3550 freelist = B_FALSE;
3551 memory_err = B_FALSE;
3552
3553 out:
3554 if (varray != NULL) {
3555 int v;
3556
3557 for (v = 0; v < vcount; v++)
3558 nvlist_free(varray[v]);
3559 free(varray);
3560 }
3561 zcmd_free_nvlists(&zc);
3562 nvlist_free(zc_props);
3563 nvlist_free(newconfig);
3564 if (freelist) {
3565 nvlist_free(*newroot);
3566 *newroot = NULL;
3567 }
3568
3569 if (retval != 0)
3570 return (retval);
3571
3572 if (memory_err)
3573 return (no_memory(hdl));
3574
3575 return (0);
3576 }
3577
3578 /*
3579 * Remove the given device.
3580 */
3581 int
3582 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3583 {
3584 zfs_cmd_t zc = {"\0"};
3585 char msg[1024];
3586 nvlist_t *tgt;
3587 boolean_t avail_spare, l2cache, islog;
3588 libzfs_handle_t *hdl = zhp->zpool_hdl;
3589 uint64_t version;
3590
3591 (void) snprintf(msg, sizeof (msg),
3592 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3593
3594 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3595 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3596 &islog)) == NULL)
3597 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3598
3599 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3600 if (islog && version < SPA_VERSION_HOLES) {
3601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3602 "pool must be upgraded to support log removal"));
3603 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3604 }
3605
3606 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3608 "root pool can not have removed devices, "
3609 "because GRUB does not understand them"));
3610 return (zfs_error(hdl, EINVAL, msg));
3611 }
3612
3613 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3614
3615 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3616 return (0);
3617
3618 switch (errno) {
3619
3620 case EINVAL:
3621 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3622 "invalid config; all top-level vdevs must "
3623 "have the same sector size and not be raidz."));
3624 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3625 break;
3626
3627 case EBUSY:
3628 if (islog) {
3629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3630 "Mount encrypted datasets to replay logs."));
3631 } else {
3632 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3633 "Pool busy; removal may already be in progress"));
3634 }
3635 (void) zfs_error(hdl, EZFS_BUSY, msg);
3636 break;
3637
3638 case EACCES:
3639 if (islog) {
3640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3641 "Mount encrypted datasets to replay logs."));
3642 (void) zfs_error(hdl, EZFS_BUSY, msg);
3643 } else {
3644 (void) zpool_standard_error(hdl, errno, msg);
3645 }
3646 break;
3647
3648 default:
3649 (void) zpool_standard_error(hdl, errno, msg);
3650 }
3651 return (-1);
3652 }
3653
3654 int
3655 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3656 {
3657 zfs_cmd_t zc;
3658 char msg[1024];
3659 libzfs_handle_t *hdl = zhp->zpool_hdl;
3660
3661 (void) snprintf(msg, sizeof (msg),
3662 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3663
3664 bzero(&zc, sizeof (zc));
3665 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3666 zc.zc_cookie = 1;
3667
3668 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3669 return (0);
3670
3671 return (zpool_standard_error(hdl, errno, msg));
3672 }
3673
3674 int
3675 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3676 uint64_t *sizep)
3677 {
3678 char msg[1024];
3679 nvlist_t *tgt;
3680 boolean_t avail_spare, l2cache, islog;
3681 libzfs_handle_t *hdl = zhp->zpool_hdl;
3682
3683 (void) snprintf(msg, sizeof (msg),
3684 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3685 path);
3686
3687 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3688 &islog)) == NULL)
3689 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3690
3691 if (avail_spare || l2cache || islog) {
3692 *sizep = 0;
3693 return (0);
3694 }
3695
3696 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3698 "indirect size not available"));
3699 return (zfs_error(hdl, EINVAL, msg));
3700 }
3701 return (0);
3702 }
3703
3704 /*
3705 * Clear the errors for the pool, or the particular device if specified.
3706 */
3707 int
3708 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3709 {
3710 zfs_cmd_t zc = {"\0"};
3711 char msg[1024];
3712 nvlist_t *tgt;
3713 zpool_load_policy_t policy;
3714 boolean_t avail_spare, l2cache;
3715 libzfs_handle_t *hdl = zhp->zpool_hdl;
3716 nvlist_t *nvi = NULL;
3717 int error;
3718
3719 if (path)
3720 (void) snprintf(msg, sizeof (msg),
3721 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3722 path);
3723 else
3724 (void) snprintf(msg, sizeof (msg),
3725 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3726 zhp->zpool_name);
3727
3728 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3729 if (path) {
3730 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3731 &l2cache, NULL)) == NULL)
3732 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3733
3734 /*
3735 * Don't allow error clearing for hot spares. Do allow
3736 * error clearing for l2cache devices.
3737 */
3738 if (avail_spare)
3739 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3740
3741 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3742 &zc.zc_guid) == 0);
3743 }
3744
3745 zpool_get_load_policy(rewindnvl, &policy);
3746 zc.zc_cookie = policy.zlp_rewind;
3747
3748 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3749 return (-1);
3750
3751 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3752 return (-1);
3753
3754 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3755 errno == ENOMEM) {
3756 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3757 zcmd_free_nvlists(&zc);
3758 return (-1);
3759 }
3760 }
3761
3762 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3763 errno != EPERM && errno != EACCES)) {
3764 if (policy.zlp_rewind &
3765 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3766 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3767 zpool_rewind_exclaim(hdl, zc.zc_name,
3768 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3769 nvi);
3770 nvlist_free(nvi);
3771 }
3772 zcmd_free_nvlists(&zc);
3773 return (0);
3774 }
3775
3776 zcmd_free_nvlists(&zc);
3777 return (zpool_standard_error(hdl, errno, msg));
3778 }
3779
3780 /*
3781 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3782 */
3783 int
3784 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3785 {
3786 zfs_cmd_t zc = {"\0"};
3787 char msg[1024];
3788 libzfs_handle_t *hdl = zhp->zpool_hdl;
3789
3790 (void) snprintf(msg, sizeof (msg),
3791 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3792 (u_longlong_t)guid);
3793
3794 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3795 zc.zc_guid = guid;
3796 zc.zc_cookie = ZPOOL_NO_REWIND;
3797
3798 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
3799 return (0);
3800
3801 return (zpool_standard_error(hdl, errno, msg));
3802 }
3803
3804 /*
3805 * Change the GUID for a pool.
3806 */
3807 int
3808 zpool_reguid(zpool_handle_t *zhp)
3809 {
3810 char msg[1024];
3811 libzfs_handle_t *hdl = zhp->zpool_hdl;
3812 zfs_cmd_t zc = {"\0"};
3813
3814 (void) snprintf(msg, sizeof (msg),
3815 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3816
3817 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3818 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3819 return (0);
3820
3821 return (zpool_standard_error(hdl, errno, msg));
3822 }
3823
3824 /*
3825 * Reopen the pool.
3826 */
3827 int
3828 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3829 {
3830 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3831 const char *pool_name = zpool_get_name(zhp);
3832 boolean_t *scrub_restart = data;
3833 int error;
3834
3835 error = lzc_reopen(pool_name, *scrub_restart);
3836 if (error) {
3837 return (zpool_standard_error_fmt(hdl, error,
3838 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3839 }
3840
3841 return (0);
3842 }
3843
3844 /* call into libzfs_core to execute the sync IOCTL per pool */
3845 int
3846 zpool_sync_one(zpool_handle_t *zhp, void *data)
3847 {
3848 int ret;
3849 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3850 const char *pool_name = zpool_get_name(zhp);
3851 boolean_t *force = data;
3852 nvlist_t *innvl = fnvlist_alloc();
3853
3854 fnvlist_add_boolean_value(innvl, "force", *force);
3855 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3856 nvlist_free(innvl);
3857 return (zpool_standard_error_fmt(hdl, ret,
3858 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3859 }
3860 nvlist_free(innvl);
3861
3862 return (0);
3863 }
3864
3865 #define PATH_BUF_LEN 64
3866
3867 /*
3868 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3869 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3870 * We also check if this is a whole disk, in which case we strip off the
3871 * trailing 's0' slice name.
3872 *
3873 * This routine is also responsible for identifying when disks have been
3874 * reconfigured in a new location. The kernel will have opened the device by
3875 * devid, but the path will still refer to the old location. To catch this, we
3876 * first do a path -> devid translation (which is fast for the common case). If
3877 * the devid matches, we're done. If not, we do a reverse devid -> path
3878 * translation and issue the appropriate ioctl() to update the path of the vdev.
3879 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3880 * of these checks.
3881 */
3882 char *
3883 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3884 int name_flags)
3885 {
3886 char *path, *type, *env;
3887 uint64_t value;
3888 char buf[PATH_BUF_LEN];
3889 char tmpbuf[PATH_BUF_LEN];
3890
3891 /*
3892 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3893 * zpool name that will be displayed to the user.
3894 */
3895 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3896 if (zhp != NULL && strcmp(type, "root") == 0)
3897 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3898
3899 env = getenv("ZPOOL_VDEV_NAME_PATH");
3900 if (env && (strtoul(env, NULL, 0) > 0 ||
3901 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3902 name_flags |= VDEV_NAME_PATH;
3903
3904 env = getenv("ZPOOL_VDEV_NAME_GUID");
3905 if (env && (strtoul(env, NULL, 0) > 0 ||
3906 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3907 name_flags |= VDEV_NAME_GUID;
3908
3909 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3910 if (env && (strtoul(env, NULL, 0) > 0 ||
3911 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3912 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3913
3914 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3915 name_flags & VDEV_NAME_GUID) {
3916 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3917 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3918 path = buf;
3919 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3920 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3921 char *rp = realpath(path, NULL);
3922 if (rp) {
3923 strlcpy(buf, rp, sizeof (buf));
3924 path = buf;
3925 free(rp);
3926 }
3927 }
3928
3929 /*
3930 * For a block device only use the name.
3931 */
3932 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3933 !(name_flags & VDEV_NAME_PATH)) {
3934 path = zfs_strip_path(path);
3935 }
3936
3937 /*
3938 * Remove the partition from the path it this is a whole disk.
3939 */
3940 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3941 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3942 return (zfs_strip_partition(path));
3943 }
3944 } else {
3945 path = type;
3946
3947 /*
3948 * If it's a raidz device, we need to stick in the parity level.
3949 */
3950 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3951 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3952 &value) == 0);
3953 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3954 (u_longlong_t)value);
3955 path = buf;
3956 }
3957
3958 /*
3959 * We identify each top-level vdev by using a <type-id>
3960 * naming convention.
3961 */
3962 if (name_flags & VDEV_NAME_TYPE_ID) {
3963 uint64_t id;
3964 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3965 &id) == 0);
3966 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3967 path, (u_longlong_t)id);
3968 path = tmpbuf;
3969 }
3970 }
3971
3972 return (zfs_strdup(hdl, path));
3973 }
3974
3975 static int
3976 zbookmark_mem_compare(const void *a, const void *b)
3977 {
3978 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3979 }
3980
3981 /*
3982 * Retrieve the persistent error log, uniquify the members, and return to the
3983 * caller.
3984 */
3985 int
3986 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3987 {
3988 zfs_cmd_t zc = {"\0"};
3989 libzfs_handle_t *hdl = zhp->zpool_hdl;
3990 uint64_t count;
3991 zbookmark_phys_t *zb = NULL;
3992 int i;
3993
3994 /*
3995 * Retrieve the raw error list from the kernel. If the number of errors
3996 * has increased, allocate more space and continue until we get the
3997 * entire list.
3998 */
3999 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4000 &count) == 0);
4001 if (count == 0)
4002 return (0);
4003 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4004 count * sizeof (zbookmark_phys_t));
4005 zc.zc_nvlist_dst_size = count;
4006 (void) strcpy(zc.zc_name, zhp->zpool_name);
4007 for (;;) {
4008 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4009 &zc) != 0) {
4010 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4011 if (errno == ENOMEM) {
4012 void *dst;
4013
4014 count = zc.zc_nvlist_dst_size;
4015 dst = zfs_alloc(zhp->zpool_hdl, count *
4016 sizeof (zbookmark_phys_t));
4017 zc.zc_nvlist_dst = (uintptr_t)dst;
4018 } else {
4019 return (zpool_standard_error_fmt(hdl, errno,
4020 dgettext(TEXT_DOMAIN, "errors: List of "
4021 "errors unavailable")));
4022 }
4023 } else {
4024 break;
4025 }
4026 }
4027
4028 /*
4029 * Sort the resulting bookmarks. This is a little confusing due to the
4030 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4031 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4032 * _not_ copied as part of the process. So we point the start of our
4033 * array appropriate and decrement the total number of elements.
4034 */
4035 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4036 zc.zc_nvlist_dst_size;
4037 count -= zc.zc_nvlist_dst_size;
4038
4039 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4040
4041 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4042
4043 /*
4044 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4045 */
4046 for (i = 0; i < count; i++) {
4047 nvlist_t *nv;
4048
4049 /* ignoring zb_blkid and zb_level for now */
4050 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4051 zb[i-1].zb_object == zb[i].zb_object)
4052 continue;
4053
4054 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4055 goto nomem;
4056 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4057 zb[i].zb_objset) != 0) {
4058 nvlist_free(nv);
4059 goto nomem;
4060 }
4061 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4062 zb[i].zb_object) != 0) {
4063 nvlist_free(nv);
4064 goto nomem;
4065 }
4066 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4067 nvlist_free(nv);
4068 goto nomem;
4069 }
4070 nvlist_free(nv);
4071 }
4072
4073 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4074 return (0);
4075
4076 nomem:
4077 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4078 return (no_memory(zhp->zpool_hdl));
4079 }
4080
4081 /*
4082 * Upgrade a ZFS pool to the latest on-disk version.
4083 */
4084 int
4085 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4086 {
4087 zfs_cmd_t zc = {"\0"};
4088 libzfs_handle_t *hdl = zhp->zpool_hdl;
4089
4090 (void) strcpy(zc.zc_name, zhp->zpool_name);
4091 zc.zc_cookie = new_version;
4092
4093 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4094 return (zpool_standard_error_fmt(hdl, errno,
4095 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4096 zhp->zpool_name));
4097 return (0);
4098 }
4099
4100 void
4101 zfs_save_arguments(int argc, char **argv, char *string, int len)
4102 {
4103 int i;
4104
4105 (void) strlcpy(string, basename(argv[0]), len);
4106 for (i = 1; i < argc; i++) {
4107 (void) strlcat(string, " ", len);
4108 (void) strlcat(string, argv[i], len);
4109 }
4110 }
4111
4112 int
4113 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4114 {
4115 zfs_cmd_t zc = {"\0"};
4116 nvlist_t *args;
4117 int err;
4118
4119 args = fnvlist_alloc();
4120 fnvlist_add_string(args, "message", message);
4121 err = zcmd_write_src_nvlist(hdl, &zc, args);
4122 if (err == 0)
4123 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4124 nvlist_free(args);
4125 zcmd_free_nvlists(&zc);
4126 return (err);
4127 }
4128
4129 /*
4130 * Perform ioctl to get some command history of a pool.
4131 *
4132 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4133 * logical offset of the history buffer to start reading from.
4134 *
4135 * Upon return, 'off' is the next logical offset to read from and
4136 * 'len' is the actual amount of bytes read into 'buf'.
4137 */
4138 static int
4139 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4140 {
4141 zfs_cmd_t zc = {"\0"};
4142 libzfs_handle_t *hdl = zhp->zpool_hdl;
4143
4144 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4145
4146 zc.zc_history = (uint64_t)(uintptr_t)buf;
4147 zc.zc_history_len = *len;
4148 zc.zc_history_offset = *off;
4149
4150 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4151 switch (errno) {
4152 case EPERM:
4153 return (zfs_error_fmt(hdl, EZFS_PERM,
4154 dgettext(TEXT_DOMAIN,
4155 "cannot show history for pool '%s'"),
4156 zhp->zpool_name));
4157 case ENOENT:
4158 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4159 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4160 "'%s'"), zhp->zpool_name));
4161 case ENOTSUP:
4162 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4163 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4164 "'%s', pool must be upgraded"), zhp->zpool_name));
4165 default:
4166 return (zpool_standard_error_fmt(hdl, errno,
4167 dgettext(TEXT_DOMAIN,
4168 "cannot get history for '%s'"), zhp->zpool_name));
4169 }
4170 }
4171
4172 *len = zc.zc_history_len;
4173 *off = zc.zc_history_offset;
4174
4175 return (0);
4176 }
4177
4178 /*
4179 * Retrieve the command history of a pool.
4180 */
4181 int
4182 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4183 boolean_t *eof)
4184 {
4185 char *buf;
4186 int buflen = 128 * 1024;
4187 nvlist_t **records = NULL;
4188 uint_t numrecords = 0;
4189 int err, i;
4190 uint64_t start = *off;
4191
4192 buf = malloc(buflen);
4193 if (buf == NULL)
4194 return (ENOMEM);
4195 /* process about 1MB a time */
4196 while (*off - start < 1024 * 1024) {
4197 uint64_t bytes_read = buflen;
4198 uint64_t leftover;
4199
4200 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4201 break;
4202
4203 /* if nothing else was read in, we're at EOF, just return */
4204 if (!bytes_read) {
4205 *eof = B_TRUE;
4206 break;
4207 }
4208
4209 if ((err = zpool_history_unpack(buf, bytes_read,
4210 &leftover, &records, &numrecords)) != 0)
4211 break;
4212 *off -= leftover;
4213 if (leftover == bytes_read) {
4214 /*
4215 * no progress made, because buffer is not big enough
4216 * to hold this record; resize and retry.
4217 */
4218 buflen *= 2;
4219 free(buf);
4220 buf = malloc(buflen);
4221 if (buf == NULL)
4222 return (ENOMEM);
4223 }
4224 }
4225
4226 free(buf);
4227
4228 if (!err) {
4229 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4230 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4231 records, numrecords) == 0);
4232 }
4233 for (i = 0; i < numrecords; i++)
4234 nvlist_free(records[i]);
4235 free(records);
4236
4237 return (err);
4238 }
4239
4240 /*
4241 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4242 * If there is a new event available 'nvp' will contain a newly allocated
4243 * nvlist and 'dropped' will be set to the number of missed events since
4244 * the last call to this function. When 'nvp' is set to NULL it indicates
4245 * no new events are available. In either case the function returns 0 and
4246 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4247 * function will return a non-zero value. When the function is called in
4248 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4249 * it will not return until a new event is available.
4250 */
4251 int
4252 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4253 int *dropped, unsigned flags, int zevent_fd)
4254 {
4255 zfs_cmd_t zc = {"\0"};
4256 int error = 0;
4257
4258 *nvp = NULL;
4259 *dropped = 0;
4260 zc.zc_cleanup_fd = zevent_fd;
4261
4262 if (flags & ZEVENT_NONBLOCK)
4263 zc.zc_guid = ZEVENT_NONBLOCK;
4264
4265 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4266 return (-1);
4267
4268 retry:
4269 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4270 switch (errno) {
4271 case ESHUTDOWN:
4272 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4273 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4274 goto out;
4275 case ENOENT:
4276 /* Blocking error case should not occur */
4277 if (!(flags & ZEVENT_NONBLOCK))
4278 error = zpool_standard_error_fmt(hdl, errno,
4279 dgettext(TEXT_DOMAIN, "cannot get event"));
4280
4281 goto out;
4282 case ENOMEM:
4283 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4284 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4285 dgettext(TEXT_DOMAIN, "cannot get event"));
4286 goto out;
4287 } else {
4288 goto retry;
4289 }
4290 default:
4291 error = zpool_standard_error_fmt(hdl, errno,
4292 dgettext(TEXT_DOMAIN, "cannot get event"));
4293 goto out;
4294 }
4295 }
4296
4297 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4298 if (error != 0)
4299 goto out;
4300
4301 *dropped = (int)zc.zc_cookie;
4302 out:
4303 zcmd_free_nvlists(&zc);
4304
4305 return (error);
4306 }
4307
4308 /*
4309 * Clear all events.
4310 */
4311 int
4312 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4313 {
4314 zfs_cmd_t zc = {"\0"};
4315 char msg[1024];
4316
4317 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4318 "cannot clear events"));
4319
4320 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4321 return (zpool_standard_error_fmt(hdl, errno, msg));
4322
4323 if (count != NULL)
4324 *count = (int)zc.zc_cookie; /* # of events cleared */
4325
4326 return (0);
4327 }
4328
4329 /*
4330 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4331 * the passed zevent_fd file handle. On success zero is returned,
4332 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4333 */
4334 int
4335 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4336 {
4337 zfs_cmd_t zc = {"\0"};
4338 int error = 0;
4339
4340 zc.zc_guid = eid;
4341 zc.zc_cleanup_fd = zevent_fd;
4342
4343 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4344 switch (errno) {
4345 case ENOENT:
4346 error = zfs_error_fmt(hdl, EZFS_NOENT,
4347 dgettext(TEXT_DOMAIN, "cannot get event"));
4348 break;
4349
4350 case ENOMEM:
4351 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4352 dgettext(TEXT_DOMAIN, "cannot get event"));
4353 break;
4354
4355 default:
4356 error = zpool_standard_error_fmt(hdl, errno,
4357 dgettext(TEXT_DOMAIN, "cannot get event"));
4358 break;
4359 }
4360 }
4361
4362 return (error);
4363 }
4364
4365 void
4366 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4367 char *pathname, size_t len)
4368 {
4369 zfs_cmd_t zc = {"\0"};
4370 boolean_t mounted = B_FALSE;
4371 char *mntpnt = NULL;
4372 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4373
4374 if (dsobj == 0) {
4375 /* special case for the MOS */
4376 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4377 (longlong_t)obj);
4378 return;
4379 }
4380
4381 /* get the dataset's name */
4382 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4383 zc.zc_obj = dsobj;
4384 if (zfs_ioctl(zhp->zpool_hdl,
4385 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4386 /* just write out a path of two object numbers */
4387 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4388 (longlong_t)dsobj, (longlong_t)obj);
4389 return;
4390 }
4391 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4392
4393 /* find out if the dataset is mounted */
4394 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4395
4396 /* get the corrupted object's path */
4397 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4398 zc.zc_obj = obj;
4399 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4400 &zc) == 0) {
4401 if (mounted) {
4402 (void) snprintf(pathname, len, "%s%s", mntpnt,
4403 zc.zc_value);
4404 } else {
4405 (void) snprintf(pathname, len, "%s:%s",
4406 dsname, zc.zc_value);
4407 }
4408 } else {
4409 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4410 (longlong_t)obj);
4411 }
4412 free(mntpnt);
4413 }
4414
4415 /*
4416 * Wait while the specified activity is in progress in the pool.
4417 */
4418 int
4419 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4420 {
4421 boolean_t missing;
4422
4423 int error = zpool_wait_status(zhp, activity, &missing, NULL);
4424
4425 if (missing) {
4426 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4427 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4428 zhp->zpool_name);
4429 return (ENOENT);
4430 } else {
4431 return (error);
4432 }
4433 }
4434
4435 /*
4436 * Wait for the given activity and return the status of the wait (whether or not
4437 * any waiting was done) in the 'waited' parameter. Non-existent pools are
4438 * reported via the 'missing' parameter, rather than by printing an error
4439 * message. This is convenient when this function is called in a loop over a
4440 * long period of time (as it is, for example, by zpool's wait cmd). In that
4441 * scenario, a pool being exported or destroyed should be considered a normal
4442 * event, so we don't want to print an error when we find that the pool doesn't
4443 * exist.
4444 */
4445 int
4446 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4447 boolean_t *missing, boolean_t *waited)
4448 {
4449 int error = lzc_wait(zhp->zpool_name, activity, waited);
4450 *missing = (error == ENOENT);
4451 if (*missing)
4452 return (0);
4453
4454 if (error != 0) {
4455 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4456 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4457 zhp->zpool_name);
4458 }
4459
4460 return (error);
4461 }
4462
4463 int
4464 zpool_set_bootenv(zpool_handle_t *zhp, const char *envmap)
4465 {
4466 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4467 if (error != 0) {
4468 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4469 dgettext(TEXT_DOMAIN,
4470 "error setting bootenv in pool '%s'"), zhp->zpool_name);
4471 }
4472
4473 return (error);
4474 }
4475
4476 int
4477 zpool_get_bootenv(zpool_handle_t *zhp, char *outbuf, size_t size, off_t offset)
4478 {
4479 nvlist_t *nvl = NULL;
4480 int error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4481 if (error != 0) {
4482 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4483 dgettext(TEXT_DOMAIN,
4484 "error getting bootenv in pool '%s'"), zhp->zpool_name);
4485 return (-1);
4486 }
4487 char *envmap = fnvlist_lookup_string(nvl, "envmap");
4488 if (offset >= strlen(envmap)) {
4489 fnvlist_free(nvl);
4490 return (0);
4491 }
4492
4493 strlcpy(outbuf, envmap + offset, size);
4494 int bytes = MIN(strlen(envmap + offset), size);
4495 fnvlist_free(nvl);
4496 return (bytes);
4497 }