]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Use udev for partition detection
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * API compatibility wrapper around zpool_get_prop_literal
240 */
241 int
242 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243 zprop_source_t *srctype)
244 {
245 return (zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE));
246 }
247
248 /*
249 * Get a zpool property value for 'prop' and return the value in
250 * a pre-allocated buffer.
251 */
252 int
253 zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
254 size_t len, zprop_source_t *srctype, boolean_t literal)
255 {
256 uint64_t intval;
257 const char *strval;
258 zprop_source_t src = ZPROP_SRC_NONE;
259 nvlist_t *nvroot;
260 vdev_stat_t *vs;
261 uint_t vsc;
262
263 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
264 switch (prop) {
265 case ZPOOL_PROP_NAME:
266 (void) strlcpy(buf, zpool_get_name(zhp), len);
267 break;
268
269 case ZPOOL_PROP_HEALTH:
270 (void) strlcpy(buf, "FAULTED", len);
271 break;
272
273 case ZPOOL_PROP_GUID:
274 intval = zpool_get_prop_int(zhp, prop, &src);
275 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
276 break;
277
278 case ZPOOL_PROP_ALTROOT:
279 case ZPOOL_PROP_CACHEFILE:
280 case ZPOOL_PROP_COMMENT:
281 if (zhp->zpool_props != NULL ||
282 zpool_get_all_props(zhp) == 0) {
283 (void) strlcpy(buf,
284 zpool_get_prop_string(zhp, prop, &src),
285 len);
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290 /* FALLTHROUGH */
291 default:
292 (void) strlcpy(buf, "-", len);
293 break;
294 }
295
296 if (srctype != NULL)
297 *srctype = src;
298 return (0);
299 }
300
301 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
302 prop != ZPOOL_PROP_NAME)
303 return (-1);
304
305 switch (zpool_prop_get_type(prop)) {
306 case PROP_TYPE_STRING:
307 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
308 len);
309 break;
310
311 case PROP_TYPE_NUMBER:
312 intval = zpool_get_prop_int(zhp, prop, &src);
313
314 switch (prop) {
315 case ZPOOL_PROP_SIZE:
316 case ZPOOL_PROP_ALLOCATED:
317 case ZPOOL_PROP_FREE:
318 case ZPOOL_PROP_FREEING:
319 case ZPOOL_PROP_LEAKED:
320 case ZPOOL_PROP_ASHIFT:
321 if (literal)
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 else
325 (void) zfs_nicenum(intval, buf, len);
326 break;
327
328 case ZPOOL_PROP_EXPANDSZ:
329 if (intval == 0) {
330 (void) strlcpy(buf, "-", len);
331 } else if (literal) {
332 (void) snprintf(buf, len, "%llu",
333 (u_longlong_t)intval);
334 } else {
335 (void) zfs_nicenum(intval, buf, len);
336 }
337 break;
338
339 case ZPOOL_PROP_CAPACITY:
340 (void) snprintf(buf, len, "%llu%%",
341 (u_longlong_t)intval);
342 break;
343
344 case ZPOOL_PROP_FRAGMENTATION:
345 if (intval == UINT64_MAX) {
346 (void) strlcpy(buf, "-", len);
347 } else {
348 (void) snprintf(buf, len, "%llu%%",
349 (u_longlong_t)intval);
350 }
351 break;
352
353 case ZPOOL_PROP_DEDUPRATIO:
354 (void) snprintf(buf, len, "%llu.%02llux",
355 (u_longlong_t)(intval / 100),
356 (u_longlong_t)(intval % 100));
357 break;
358
359 case ZPOOL_PROP_HEALTH:
360 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
361 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
362 verify(nvlist_lookup_uint64_array(nvroot,
363 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
364 == 0);
365
366 (void) strlcpy(buf, zpool_state_to_name(intval,
367 vs->vs_aux), len);
368 break;
369 case ZPOOL_PROP_VERSION:
370 if (intval >= SPA_VERSION_FEATURES) {
371 (void) snprintf(buf, len, "-");
372 break;
373 }
374 /* FALLTHROUGH */
375 default:
376 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
377 }
378 break;
379
380 case PROP_TYPE_INDEX:
381 intval = zpool_get_prop_int(zhp, prop, &src);
382 if (zpool_prop_index_to_string(prop, intval, &strval)
383 != 0)
384 return (-1);
385 (void) strlcpy(buf, strval, len);
386 break;
387
388 default:
389 abort();
390 }
391
392 if (srctype)
393 *srctype = src;
394
395 return (0);
396 }
397
398 /*
399 * Check if the bootfs name has the same pool name as it is set to.
400 * Assuming bootfs is a valid dataset name.
401 */
402 static boolean_t
403 bootfs_name_valid(const char *pool, char *bootfs)
404 {
405 int len = strlen(pool);
406
407 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
408 return (B_FALSE);
409
410 if (strncmp(pool, bootfs, len) == 0 &&
411 (bootfs[len] == '/' || bootfs[len] == '\0'))
412 return (B_TRUE);
413
414 return (B_FALSE);
415 }
416
417 #if defined(__sun__) || defined(__sun)
418 /*
419 * Inspect the configuration to determine if any of the devices contain
420 * an EFI label.
421 */
422 static boolean_t
423 pool_uses_efi(nvlist_t *config)
424 {
425 nvlist_t **child;
426 uint_t c, children;
427
428 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
429 &child, &children) != 0)
430 return (read_efi_label(config, NULL) >= 0);
431
432 for (c = 0; c < children; c++) {
433 if (pool_uses_efi(child[c]))
434 return (B_TRUE);
435 }
436 return (B_FALSE);
437 }
438 #endif
439
440 boolean_t
441 zpool_is_bootable(zpool_handle_t *zhp)
442 {
443 char bootfs[ZPOOL_MAXNAMELEN];
444
445 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
446 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
447 sizeof (bootfs)) != 0);
448 }
449
450
451 /*
452 * Given an nvlist of zpool properties to be set, validate that they are
453 * correct, and parse any numeric properties (index, boolean, etc) if they are
454 * specified as strings.
455 */
456 static nvlist_t *
457 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
458 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
459 {
460 nvpair_t *elem;
461 nvlist_t *retprops;
462 zpool_prop_t prop;
463 char *strval;
464 uint64_t intval;
465 char *slash, *check;
466 struct stat64 statbuf;
467 zpool_handle_t *zhp;
468 nvlist_t *nvroot;
469
470 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
471 (void) no_memory(hdl);
472 return (NULL);
473 }
474
475 elem = NULL;
476 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
477 const char *propname = nvpair_name(elem);
478
479 prop = zpool_name_to_prop(propname);
480 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
481 int err;
482 char *fname = strchr(propname, '@') + 1;
483
484 err = zfeature_lookup_name(fname, NULL);
485 if (err != 0) {
486 ASSERT3U(err, ==, ENOENT);
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "invalid feature '%s'"), fname);
489 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
490 goto error;
491 }
492
493 if (nvpair_type(elem) != DATA_TYPE_STRING) {
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
495 "'%s' must be a string"), propname);
496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
497 goto error;
498 }
499
500 (void) nvpair_value_string(elem, &strval);
501 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
502 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
503 "property '%s' can only be set to "
504 "'enabled'"), propname);
505 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
506 goto error;
507 }
508
509 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
510 (void) no_memory(hdl);
511 goto error;
512 }
513 continue;
514 }
515
516 /*
517 * Make sure this property is valid and applies to this type.
518 */
519 if (prop == ZPROP_INVAL) {
520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
521 "invalid property '%s'"), propname);
522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
523 goto error;
524 }
525
526 if (zpool_prop_readonly(prop)) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
528 "is readonly"), propname);
529 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
530 goto error;
531 }
532
533 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
534 &strval, &intval, errbuf) != 0)
535 goto error;
536
537 /*
538 * Perform additional checking for specific properties.
539 */
540 switch (prop) {
541 default:
542 break;
543 case ZPOOL_PROP_VERSION:
544 if (intval < version ||
545 !SPA_VERSION_IS_SUPPORTED(intval)) {
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
547 "property '%s' number %d is invalid."),
548 propname, intval);
549 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
550 goto error;
551 }
552 break;
553
554 case ZPOOL_PROP_ASHIFT:
555 if (!flags.create) {
556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
557 "property '%s' can only be set at "
558 "creation time"), propname);
559 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
560 goto error;
561 }
562
563 if (intval != 0 && (intval < 9 || intval > 13)) {
564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
565 "property '%s' number %d is invalid."),
566 propname, intval);
567 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
568 goto error;
569 }
570 break;
571
572 case ZPOOL_PROP_BOOTFS:
573 if (flags.create || flags.import) {
574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 "property '%s' cannot be set at creation "
576 "or import time"), propname);
577 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
578 goto error;
579 }
580
581 if (version < SPA_VERSION_BOOTFS) {
582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
583 "pool must be upgraded to support "
584 "'%s' property"), propname);
585 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
586 goto error;
587 }
588
589 /*
590 * bootfs property value has to be a dataset name and
591 * the dataset has to be in the same pool as it sets to.
592 */
593 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
594 strval)) {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
596 "is an invalid name"), strval);
597 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
598 goto error;
599 }
600
601 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
603 "could not open pool '%s'"), poolname);
604 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
605 goto error;
606 }
607 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
608 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
609
610 #if defined(__sun__) || defined(__sun)
611 /*
612 * bootfs property cannot be set on a disk which has
613 * been EFI labeled.
614 */
615 if (pool_uses_efi(nvroot)) {
616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
617 "property '%s' not supported on "
618 "EFI labeled devices"), propname);
619 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
620 zpool_close(zhp);
621 goto error;
622 }
623 #endif
624 zpool_close(zhp);
625 break;
626
627 case ZPOOL_PROP_ALTROOT:
628 if (!flags.create && !flags.import) {
629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
630 "property '%s' can only be set during pool "
631 "creation or import"), propname);
632 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
633 goto error;
634 }
635
636 if (strval[0] != '/') {
637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
638 "bad alternate root '%s'"), strval);
639 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
640 goto error;
641 }
642 break;
643
644 case ZPOOL_PROP_CACHEFILE:
645 if (strval[0] == '\0')
646 break;
647
648 if (strcmp(strval, "none") == 0)
649 break;
650
651 if (strval[0] != '/') {
652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 "property '%s' must be empty, an "
654 "absolute path, or 'none'"), propname);
655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
656 goto error;
657 }
658
659 slash = strrchr(strval, '/');
660
661 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
662 strcmp(slash, "/..") == 0) {
663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664 "'%s' is not a valid file"), strval);
665 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
666 goto error;
667 }
668
669 *slash = '\0';
670
671 if (strval[0] != '\0' &&
672 (stat64(strval, &statbuf) != 0 ||
673 !S_ISDIR(statbuf.st_mode))) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "'%s' is not a valid directory"),
676 strval);
677 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
678 goto error;
679 }
680
681 *slash = '/';
682 break;
683
684 case ZPOOL_PROP_COMMENT:
685 for (check = strval; *check != '\0'; check++) {
686 if (!isprint(*check)) {
687 zfs_error_aux(hdl,
688 dgettext(TEXT_DOMAIN,
689 "comment may only have printable "
690 "characters"));
691 (void) zfs_error(hdl, EZFS_BADPROP,
692 errbuf);
693 goto error;
694 }
695 }
696 if (strlen(strval) > ZPROP_MAX_COMMENT) {
697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
698 "comment must not exceed %d characters"),
699 ZPROP_MAX_COMMENT);
700 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
701 goto error;
702 }
703 break;
704 case ZPOOL_PROP_READONLY:
705 if (!flags.import) {
706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707 "property '%s' can only be set at "
708 "import time"), propname);
709 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
710 goto error;
711 }
712 break;
713 case ZPOOL_PROP_TNAME:
714 if (!flags.create) {
715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
716 "property '%s' can only be set at "
717 "creation time"), propname);
718 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
719 goto error;
720 }
721 break;
722 }
723 }
724
725 return (retprops);
726 error:
727 nvlist_free(retprops);
728 return (NULL);
729 }
730
731 /*
732 * Set zpool property : propname=propval.
733 */
734 int
735 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
736 {
737 zfs_cmd_t zc = {"\0"};
738 int ret = -1;
739 char errbuf[1024];
740 nvlist_t *nvl = NULL;
741 nvlist_t *realprops;
742 uint64_t version;
743 prop_flags_t flags = { 0 };
744
745 (void) snprintf(errbuf, sizeof (errbuf),
746 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
747 zhp->zpool_name);
748
749 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
750 return (no_memory(zhp->zpool_hdl));
751
752 if (nvlist_add_string(nvl, propname, propval) != 0) {
753 nvlist_free(nvl);
754 return (no_memory(zhp->zpool_hdl));
755 }
756
757 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
758 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
759 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
760 nvlist_free(nvl);
761 return (-1);
762 }
763
764 nvlist_free(nvl);
765 nvl = realprops;
766
767 /*
768 * Execute the corresponding ioctl() to set this property.
769 */
770 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
771
772 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
773 nvlist_free(nvl);
774 return (-1);
775 }
776
777 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
778
779 zcmd_free_nvlists(&zc);
780 nvlist_free(nvl);
781
782 if (ret)
783 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
784 else
785 (void) zpool_props_refresh(zhp);
786
787 return (ret);
788 }
789
790 int
791 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
792 {
793 libzfs_handle_t *hdl = zhp->zpool_hdl;
794 zprop_list_t *entry;
795 char buf[ZFS_MAXPROPLEN];
796 nvlist_t *features = NULL;
797 nvpair_t *nvp;
798 zprop_list_t **last;
799 boolean_t firstexpand = (NULL == *plp);
800 int i;
801
802 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
803 return (-1);
804
805 last = plp;
806 while (*last != NULL)
807 last = &(*last)->pl_next;
808
809 if ((*plp)->pl_all)
810 features = zpool_get_features(zhp);
811
812 if ((*plp)->pl_all && firstexpand) {
813 for (i = 0; i < SPA_FEATURES; i++) {
814 zprop_list_t *entry = zfs_alloc(hdl,
815 sizeof (zprop_list_t));
816 entry->pl_prop = ZPROP_INVAL;
817 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
818 spa_feature_table[i].fi_uname);
819 entry->pl_width = strlen(entry->pl_user_prop);
820 entry->pl_all = B_TRUE;
821
822 *last = entry;
823 last = &entry->pl_next;
824 }
825 }
826
827 /* add any unsupported features */
828 for (nvp = nvlist_next_nvpair(features, NULL);
829 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
830 char *propname;
831 boolean_t found;
832 zprop_list_t *entry;
833
834 if (zfeature_is_supported(nvpair_name(nvp)))
835 continue;
836
837 propname = zfs_asprintf(hdl, "unsupported@%s",
838 nvpair_name(nvp));
839
840 /*
841 * Before adding the property to the list make sure that no
842 * other pool already added the same property.
843 */
844 found = B_FALSE;
845 entry = *plp;
846 while (entry != NULL) {
847 if (entry->pl_user_prop != NULL &&
848 strcmp(propname, entry->pl_user_prop) == 0) {
849 found = B_TRUE;
850 break;
851 }
852 entry = entry->pl_next;
853 }
854 if (found) {
855 free(propname);
856 continue;
857 }
858
859 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
860 entry->pl_prop = ZPROP_INVAL;
861 entry->pl_user_prop = propname;
862 entry->pl_width = strlen(entry->pl_user_prop);
863 entry->pl_all = B_TRUE;
864
865 *last = entry;
866 last = &entry->pl_next;
867 }
868
869 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
870
871 if (entry->pl_fixed)
872 continue;
873
874 if (entry->pl_prop != ZPROP_INVAL &&
875 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
876 NULL) == 0) {
877 if (strlen(buf) > entry->pl_width)
878 entry->pl_width = strlen(buf);
879 }
880 }
881
882 return (0);
883 }
884
885 /*
886 * Get the state for the given feature on the given ZFS pool.
887 */
888 int
889 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
890 size_t len)
891 {
892 uint64_t refcount;
893 boolean_t found = B_FALSE;
894 nvlist_t *features = zpool_get_features(zhp);
895 boolean_t supported;
896 const char *feature = strchr(propname, '@') + 1;
897
898 supported = zpool_prop_feature(propname);
899 ASSERT(supported || zpool_prop_unsupported(propname));
900
901 /*
902 * Convert from feature name to feature guid. This conversion is
903 * unecessary for unsupported@... properties because they already
904 * use guids.
905 */
906 if (supported) {
907 int ret;
908 spa_feature_t fid;
909
910 ret = zfeature_lookup_name(feature, &fid);
911 if (ret != 0) {
912 (void) strlcpy(buf, "-", len);
913 return (ENOTSUP);
914 }
915 feature = spa_feature_table[fid].fi_guid;
916 }
917
918 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
919 found = B_TRUE;
920
921 if (supported) {
922 if (!found) {
923 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
924 } else {
925 if (refcount == 0)
926 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
927 else
928 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
929 }
930 } else {
931 if (found) {
932 if (refcount == 0) {
933 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
934 } else {
935 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
936 }
937 } else {
938 (void) strlcpy(buf, "-", len);
939 return (ENOTSUP);
940 }
941 }
942
943 return (0);
944 }
945
946 /*
947 * Don't start the slice at the default block of 34; many storage
948 * devices will use a stripe width of 128k, other vendors prefer a 1m
949 * alignment. It is best to play it safe and ensure a 1m alignment
950 * given 512B blocks. When the block size is larger by a power of 2
951 * we will still be 1m aligned. Some devices are sensitive to the
952 * partition ending alignment as well.
953 */
954 #define NEW_START_BLOCK 2048
955 #define PARTITION_END_ALIGNMENT 2048
956
957 /*
958 * Validate the given pool name, optionally putting an extended error message in
959 * 'buf'.
960 */
961 boolean_t
962 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
963 {
964 namecheck_err_t why;
965 char what;
966 int ret;
967
968 ret = pool_namecheck(pool, &why, &what);
969
970 /*
971 * The rules for reserved pool names were extended at a later point.
972 * But we need to support users with existing pools that may now be
973 * invalid. So we only check for this expanded set of names during a
974 * create (or import), and only in userland.
975 */
976 if (ret == 0 && !isopen &&
977 (strncmp(pool, "mirror", 6) == 0 ||
978 strncmp(pool, "raidz", 5) == 0 ||
979 strncmp(pool, "spare", 5) == 0 ||
980 strcmp(pool, "log") == 0)) {
981 if (hdl != NULL)
982 zfs_error_aux(hdl,
983 dgettext(TEXT_DOMAIN, "name is reserved"));
984 return (B_FALSE);
985 }
986
987
988 if (ret != 0) {
989 if (hdl != NULL) {
990 switch (why) {
991 case NAME_ERR_TOOLONG:
992 zfs_error_aux(hdl,
993 dgettext(TEXT_DOMAIN, "name is too long"));
994 break;
995
996 case NAME_ERR_INVALCHAR:
997 zfs_error_aux(hdl,
998 dgettext(TEXT_DOMAIN, "invalid character "
999 "'%c' in pool name"), what);
1000 break;
1001
1002 case NAME_ERR_NOLETTER:
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "name must begin with a letter"));
1005 break;
1006
1007 case NAME_ERR_RESERVED:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "name is reserved"));
1010 break;
1011
1012 case NAME_ERR_DISKLIKE:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "pool name is reserved"));
1015 break;
1016
1017 case NAME_ERR_LEADING_SLASH:
1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019 "leading slash in name"));
1020 break;
1021
1022 case NAME_ERR_EMPTY_COMPONENT:
1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024 "empty component in name"));
1025 break;
1026
1027 case NAME_ERR_TRAILING_SLASH:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "trailing slash in name"));
1030 break;
1031
1032 case NAME_ERR_MULTIPLE_AT:
1033 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034 "multiple '@' delimiters in name"));
1035 break;
1036 case NAME_ERR_NO_AT:
1037 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1038 "permission set is missing '@'"));
1039 break;
1040 }
1041 }
1042 return (B_FALSE);
1043 }
1044
1045 return (B_TRUE);
1046 }
1047
1048 /*
1049 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1050 * state.
1051 */
1052 zpool_handle_t *
1053 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1054 {
1055 zpool_handle_t *zhp;
1056 boolean_t missing;
1057
1058 /*
1059 * Make sure the pool name is valid.
1060 */
1061 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1062 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1063 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1064 pool);
1065 return (NULL);
1066 }
1067
1068 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1069 return (NULL);
1070
1071 zhp->zpool_hdl = hdl;
1072 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1073
1074 if (zpool_refresh_stats(zhp, &missing) != 0) {
1075 zpool_close(zhp);
1076 return (NULL);
1077 }
1078
1079 if (missing) {
1080 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1081 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1082 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1083 zpool_close(zhp);
1084 return (NULL);
1085 }
1086
1087 return (zhp);
1088 }
1089
1090 /*
1091 * Like the above, but silent on error. Used when iterating over pools (because
1092 * the configuration cache may be out of date).
1093 */
1094 int
1095 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1096 {
1097 zpool_handle_t *zhp;
1098 boolean_t missing;
1099
1100 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1101 return (-1);
1102
1103 zhp->zpool_hdl = hdl;
1104 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1105
1106 if (zpool_refresh_stats(zhp, &missing) != 0) {
1107 zpool_close(zhp);
1108 return (-1);
1109 }
1110
1111 if (missing) {
1112 zpool_close(zhp);
1113 *ret = NULL;
1114 return (0);
1115 }
1116
1117 *ret = zhp;
1118 return (0);
1119 }
1120
1121 /*
1122 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1123 * state.
1124 */
1125 zpool_handle_t *
1126 zpool_open(libzfs_handle_t *hdl, const char *pool)
1127 {
1128 zpool_handle_t *zhp;
1129
1130 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1131 return (NULL);
1132
1133 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1134 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1135 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1136 zpool_close(zhp);
1137 return (NULL);
1138 }
1139
1140 return (zhp);
1141 }
1142
1143 /*
1144 * Close the handle. Simply frees the memory associated with the handle.
1145 */
1146 void
1147 zpool_close(zpool_handle_t *zhp)
1148 {
1149 if (zhp->zpool_config)
1150 nvlist_free(zhp->zpool_config);
1151 if (zhp->zpool_old_config)
1152 nvlist_free(zhp->zpool_old_config);
1153 if (zhp->zpool_props)
1154 nvlist_free(zhp->zpool_props);
1155 free(zhp);
1156 }
1157
1158 /*
1159 * Return the name of the pool.
1160 */
1161 const char *
1162 zpool_get_name(zpool_handle_t *zhp)
1163 {
1164 return (zhp->zpool_name);
1165 }
1166
1167
1168 /*
1169 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1170 */
1171 int
1172 zpool_get_state(zpool_handle_t *zhp)
1173 {
1174 return (zhp->zpool_state);
1175 }
1176
1177 /*
1178 * Create the named pool, using the provided vdev list. It is assumed
1179 * that the consumer has already validated the contents of the nvlist, so we
1180 * don't have to worry about error semantics.
1181 */
1182 int
1183 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1184 nvlist_t *props, nvlist_t *fsprops)
1185 {
1186 zfs_cmd_t zc = {"\0"};
1187 nvlist_t *zc_fsprops = NULL;
1188 nvlist_t *zc_props = NULL;
1189 char msg[1024];
1190 int ret = -1;
1191
1192 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1193 "cannot create '%s'"), pool);
1194
1195 if (!zpool_name_valid(hdl, B_FALSE, pool))
1196 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1197
1198 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1199 return (-1);
1200
1201 if (props) {
1202 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1203
1204 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1205 SPA_VERSION_1, flags, msg)) == NULL) {
1206 goto create_failed;
1207 }
1208 }
1209
1210 if (fsprops) {
1211 uint64_t zoned;
1212 char *zonestr;
1213
1214 zoned = ((nvlist_lookup_string(fsprops,
1215 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1216 strcmp(zonestr, "on") == 0);
1217
1218 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1219 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1220 goto create_failed;
1221 }
1222 if (!zc_props &&
1223 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1224 goto create_failed;
1225 }
1226 if (nvlist_add_nvlist(zc_props,
1227 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1228 goto create_failed;
1229 }
1230 }
1231
1232 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1233 goto create_failed;
1234
1235 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1236
1237 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1238
1239 zcmd_free_nvlists(&zc);
1240 nvlist_free(zc_props);
1241 nvlist_free(zc_fsprops);
1242
1243 switch (errno) {
1244 case EBUSY:
1245 /*
1246 * This can happen if the user has specified the same
1247 * device multiple times. We can't reliably detect this
1248 * until we try to add it and see we already have a
1249 * label. This can also happen under if the device is
1250 * part of an active md or lvm device.
1251 */
1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1253 "one or more vdevs refer to the same device, or "
1254 "one of\nthe devices is part of an active md or "
1255 "lvm device"));
1256 return (zfs_error(hdl, EZFS_BADDEV, msg));
1257
1258 case ERANGE:
1259 /*
1260 * This happens if the record size is smaller or larger
1261 * than the allowed size range, or not a power of 2.
1262 *
1263 * NOTE: although zfs_valid_proplist is called earlier,
1264 * this case may have slipped through since the
1265 * pool does not exist yet and it is therefore
1266 * impossible to read properties e.g. max blocksize
1267 * from the pool.
1268 */
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1270 "record size invalid"));
1271 return (zfs_error(hdl, EZFS_BADPROP, msg));
1272
1273 case EOVERFLOW:
1274 /*
1275 * This occurs when one of the devices is below
1276 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1277 * device was the problem device since there's no
1278 * reliable way to determine device size from userland.
1279 */
1280 {
1281 char buf[64];
1282
1283 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1284
1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1286 "one or more devices is less than the "
1287 "minimum size (%s)"), buf);
1288 }
1289 return (zfs_error(hdl, EZFS_BADDEV, msg));
1290
1291 case ENOSPC:
1292 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1293 "one or more devices is out of space"));
1294 return (zfs_error(hdl, EZFS_BADDEV, msg));
1295
1296 case ENOTBLK:
1297 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1298 "cache device must be a disk or disk slice"));
1299 return (zfs_error(hdl, EZFS_BADDEV, msg));
1300
1301 default:
1302 return (zpool_standard_error(hdl, errno, msg));
1303 }
1304 }
1305
1306 create_failed:
1307 zcmd_free_nvlists(&zc);
1308 nvlist_free(zc_props);
1309 nvlist_free(zc_fsprops);
1310 return (ret);
1311 }
1312
1313 /*
1314 * Destroy the given pool. It is up to the caller to ensure that there are no
1315 * datasets left in the pool.
1316 */
1317 int
1318 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1319 {
1320 zfs_cmd_t zc = {"\0"};
1321 zfs_handle_t *zfp = NULL;
1322 libzfs_handle_t *hdl = zhp->zpool_hdl;
1323 char msg[1024];
1324
1325 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1326 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1327 return (-1);
1328
1329 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1330 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1331
1332 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1333 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1334 "cannot destroy '%s'"), zhp->zpool_name);
1335
1336 if (errno == EROFS) {
1337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1338 "one or more devices is read only"));
1339 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1340 } else {
1341 (void) zpool_standard_error(hdl, errno, msg);
1342 }
1343
1344 if (zfp)
1345 zfs_close(zfp);
1346 return (-1);
1347 }
1348
1349 if (zfp) {
1350 remove_mountpoint(zfp);
1351 zfs_close(zfp);
1352 }
1353
1354 return (0);
1355 }
1356
1357 /*
1358 * Add the given vdevs to the pool. The caller must have already performed the
1359 * necessary verification to ensure that the vdev specification is well-formed.
1360 */
1361 int
1362 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1363 {
1364 zfs_cmd_t zc = {"\0"};
1365 int ret;
1366 libzfs_handle_t *hdl = zhp->zpool_hdl;
1367 char msg[1024];
1368 nvlist_t **spares, **l2cache;
1369 uint_t nspares, nl2cache;
1370
1371 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1372 "cannot add to '%s'"), zhp->zpool_name);
1373
1374 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1375 SPA_VERSION_SPARES &&
1376 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1377 &spares, &nspares) == 0) {
1378 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1379 "upgraded to add hot spares"));
1380 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1381 }
1382
1383 #if defined(__sun__) || defined(__sun)
1384 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1385 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1386 uint64_t s;
1387
1388 for (s = 0; s < nspares; s++) {
1389 char *path;
1390
1391 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1392 &path) == 0 && pool_uses_efi(spares[s])) {
1393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1394 "device '%s' contains an EFI label and "
1395 "cannot be used on root pools."),
1396 zpool_vdev_name(hdl, NULL, spares[s], 0));
1397 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1398 }
1399 }
1400 }
1401 #endif
1402
1403 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1404 SPA_VERSION_L2CACHE &&
1405 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1406 &l2cache, &nl2cache) == 0) {
1407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1408 "upgraded to add cache devices"));
1409 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1410 }
1411
1412 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1413 return (-1);
1414 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1415
1416 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1417 switch (errno) {
1418 case EBUSY:
1419 /*
1420 * This can happen if the user has specified the same
1421 * device multiple times. We can't reliably detect this
1422 * until we try to add it and see we already have a
1423 * label.
1424 */
1425 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1426 "one or more vdevs refer to the same device"));
1427 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1428 break;
1429
1430 case EOVERFLOW:
1431 /*
1432 * This occurrs when one of the devices is below
1433 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1434 * device was the problem device since there's no
1435 * reliable way to determine device size from userland.
1436 */
1437 {
1438 char buf[64];
1439
1440 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1441
1442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1443 "device is less than the minimum "
1444 "size (%s)"), buf);
1445 }
1446 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1447 break;
1448
1449 case ENOTSUP:
1450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1451 "pool must be upgraded to add these vdevs"));
1452 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1453 break;
1454
1455 case ENOTBLK:
1456 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1457 "cache device must be a disk or disk slice"));
1458 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1459 break;
1460
1461 default:
1462 (void) zpool_standard_error(hdl, errno, msg);
1463 }
1464
1465 ret = -1;
1466 } else {
1467 ret = 0;
1468 }
1469
1470 zcmd_free_nvlists(&zc);
1471
1472 return (ret);
1473 }
1474
1475 /*
1476 * Exports the pool from the system. The caller must ensure that there are no
1477 * mounted datasets in the pool.
1478 */
1479 static int
1480 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1481 const char *log_str)
1482 {
1483 zfs_cmd_t zc = {"\0"};
1484 char msg[1024];
1485
1486 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1487 "cannot export '%s'"), zhp->zpool_name);
1488
1489 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1490 zc.zc_cookie = force;
1491 zc.zc_guid = hardforce;
1492 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1493
1494 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1495 switch (errno) {
1496 case EXDEV:
1497 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1498 "use '-f' to override the following errors:\n"
1499 "'%s' has an active shared spare which could be"
1500 " used by other pools once '%s' is exported."),
1501 zhp->zpool_name, zhp->zpool_name);
1502 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1503 msg));
1504 default:
1505 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1506 msg));
1507 }
1508 }
1509
1510 return (0);
1511 }
1512
1513 int
1514 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1515 {
1516 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1517 }
1518
1519 int
1520 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1521 {
1522 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1523 }
1524
1525 static void
1526 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1527 nvlist_t *config)
1528 {
1529 nvlist_t *nv = NULL;
1530 uint64_t rewindto;
1531 int64_t loss = -1;
1532 struct tm t;
1533 char timestr[128];
1534
1535 if (!hdl->libzfs_printerr || config == NULL)
1536 return;
1537
1538 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1539 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1540 return;
1541 }
1542
1543 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1544 return;
1545 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1546
1547 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1548 strftime(timestr, 128, "%c", &t) != 0) {
1549 if (dryrun) {
1550 (void) printf(dgettext(TEXT_DOMAIN,
1551 "Would be able to return %s "
1552 "to its state as of %s.\n"),
1553 name, timestr);
1554 } else {
1555 (void) printf(dgettext(TEXT_DOMAIN,
1556 "Pool %s returned to its state as of %s.\n"),
1557 name, timestr);
1558 }
1559 if (loss > 120) {
1560 (void) printf(dgettext(TEXT_DOMAIN,
1561 "%s approximately %lld "),
1562 dryrun ? "Would discard" : "Discarded",
1563 ((longlong_t)loss + 30) / 60);
1564 (void) printf(dgettext(TEXT_DOMAIN,
1565 "minutes of transactions.\n"));
1566 } else if (loss > 0) {
1567 (void) printf(dgettext(TEXT_DOMAIN,
1568 "%s approximately %lld "),
1569 dryrun ? "Would discard" : "Discarded",
1570 (longlong_t)loss);
1571 (void) printf(dgettext(TEXT_DOMAIN,
1572 "seconds of transactions.\n"));
1573 }
1574 }
1575 }
1576
1577 void
1578 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1579 nvlist_t *config)
1580 {
1581 nvlist_t *nv = NULL;
1582 int64_t loss = -1;
1583 uint64_t edata = UINT64_MAX;
1584 uint64_t rewindto;
1585 struct tm t;
1586 char timestr[128];
1587
1588 if (!hdl->libzfs_printerr)
1589 return;
1590
1591 if (reason >= 0)
1592 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1593 else
1594 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1595
1596 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1597 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1598 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1599 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1600 goto no_info;
1601
1602 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1603 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1604 &edata);
1605
1606 (void) printf(dgettext(TEXT_DOMAIN,
1607 "Recovery is possible, but will result in some data loss.\n"));
1608
1609 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1610 strftime(timestr, 128, "%c", &t) != 0) {
1611 (void) printf(dgettext(TEXT_DOMAIN,
1612 "\tReturning the pool to its state as of %s\n"
1613 "\tshould correct the problem. "),
1614 timestr);
1615 } else {
1616 (void) printf(dgettext(TEXT_DOMAIN,
1617 "\tReverting the pool to an earlier state "
1618 "should correct the problem.\n\t"));
1619 }
1620
1621 if (loss > 120) {
1622 (void) printf(dgettext(TEXT_DOMAIN,
1623 "Approximately %lld minutes of data\n"
1624 "\tmust be discarded, irreversibly. "),
1625 ((longlong_t)loss + 30) / 60);
1626 } else if (loss > 0) {
1627 (void) printf(dgettext(TEXT_DOMAIN,
1628 "Approximately %lld seconds of data\n"
1629 "\tmust be discarded, irreversibly. "),
1630 (longlong_t)loss);
1631 }
1632 if (edata != 0 && edata != UINT64_MAX) {
1633 if (edata == 1) {
1634 (void) printf(dgettext(TEXT_DOMAIN,
1635 "After rewind, at least\n"
1636 "\tone persistent user-data error will remain. "));
1637 } else {
1638 (void) printf(dgettext(TEXT_DOMAIN,
1639 "After rewind, several\n"
1640 "\tpersistent user-data errors will remain. "));
1641 }
1642 }
1643 (void) printf(dgettext(TEXT_DOMAIN,
1644 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1645 reason >= 0 ? "clear" : "import", name);
1646
1647 (void) printf(dgettext(TEXT_DOMAIN,
1648 "A scrub of the pool\n"
1649 "\tis strongly recommended after recovery.\n"));
1650 return;
1651
1652 no_info:
1653 (void) printf(dgettext(TEXT_DOMAIN,
1654 "Destroy and re-create the pool from\n\ta backup source.\n"));
1655 }
1656
1657 /*
1658 * zpool_import() is a contracted interface. Should be kept the same
1659 * if possible.
1660 *
1661 * Applications should use zpool_import_props() to import a pool with
1662 * new properties value to be set.
1663 */
1664 int
1665 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1666 char *altroot)
1667 {
1668 nvlist_t *props = NULL;
1669 int ret;
1670
1671 if (altroot != NULL) {
1672 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1673 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1674 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1675 newname));
1676 }
1677
1678 if (nvlist_add_string(props,
1679 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1680 nvlist_add_string(props,
1681 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1682 nvlist_free(props);
1683 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1684 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1685 newname));
1686 }
1687 }
1688
1689 ret = zpool_import_props(hdl, config, newname, props,
1690 ZFS_IMPORT_NORMAL);
1691 if (props)
1692 nvlist_free(props);
1693 return (ret);
1694 }
1695
1696 static void
1697 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1698 int indent)
1699 {
1700 nvlist_t **child;
1701 uint_t c, children;
1702 char *vname;
1703 uint64_t is_log = 0;
1704
1705 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1706 &is_log);
1707
1708 if (name != NULL)
1709 (void) printf("\t%*s%s%s\n", indent, "", name,
1710 is_log ? " [log]" : "");
1711
1712 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1713 &child, &children) != 0)
1714 return;
1715
1716 for (c = 0; c < children; c++) {
1717 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1718 print_vdev_tree(hdl, vname, child[c], indent + 2);
1719 free(vname);
1720 }
1721 }
1722
1723 void
1724 zpool_print_unsup_feat(nvlist_t *config)
1725 {
1726 nvlist_t *nvinfo, *unsup_feat;
1727 nvpair_t *nvp;
1728
1729 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1730 0);
1731 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1732 &unsup_feat) == 0);
1733
1734 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1735 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1736 char *desc;
1737
1738 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1739 verify(nvpair_value_string(nvp, &desc) == 0);
1740
1741 if (strlen(desc) > 0)
1742 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1743 else
1744 (void) printf("\t%s\n", nvpair_name(nvp));
1745 }
1746 }
1747
1748 /*
1749 * Import the given pool using the known configuration and a list of
1750 * properties to be set. The configuration should have come from
1751 * zpool_find_import(). The 'newname' parameters control whether the pool
1752 * is imported with a different name.
1753 */
1754 int
1755 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1756 nvlist_t *props, int flags)
1757 {
1758 zfs_cmd_t zc = {"\0"};
1759 zpool_rewind_policy_t policy;
1760 nvlist_t *nv = NULL;
1761 nvlist_t *nvinfo = NULL;
1762 nvlist_t *missing = NULL;
1763 char *thename;
1764 char *origname;
1765 int ret;
1766 int error = 0;
1767 char errbuf[1024];
1768
1769 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1770 &origname) == 0);
1771
1772 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1773 "cannot import pool '%s'"), origname);
1774
1775 if (newname != NULL) {
1776 if (!zpool_name_valid(hdl, B_FALSE, newname))
1777 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1778 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1779 newname));
1780 thename = (char *)newname;
1781 } else {
1782 thename = origname;
1783 }
1784
1785 if (props != NULL) {
1786 uint64_t version;
1787 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1788
1789 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1790 &version) == 0);
1791
1792 if ((props = zpool_valid_proplist(hdl, origname,
1793 props, version, flags, errbuf)) == NULL)
1794 return (-1);
1795 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1796 nvlist_free(props);
1797 return (-1);
1798 }
1799 nvlist_free(props);
1800 }
1801
1802 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1803
1804 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1805 &zc.zc_guid) == 0);
1806
1807 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1808 zcmd_free_nvlists(&zc);
1809 return (-1);
1810 }
1811 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1812 zcmd_free_nvlists(&zc);
1813 return (-1);
1814 }
1815
1816 zc.zc_cookie = flags;
1817 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1818 errno == ENOMEM) {
1819 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1820 zcmd_free_nvlists(&zc);
1821 return (-1);
1822 }
1823 }
1824 if (ret != 0)
1825 error = errno;
1826
1827 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1828
1829 zcmd_free_nvlists(&zc);
1830
1831 zpool_get_rewind_policy(config, &policy);
1832
1833 if (error) {
1834 char desc[1024];
1835
1836 /*
1837 * Dry-run failed, but we print out what success
1838 * looks like if we found a best txg
1839 */
1840 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1841 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1842 B_TRUE, nv);
1843 nvlist_free(nv);
1844 return (-1);
1845 }
1846
1847 if (newname == NULL)
1848 (void) snprintf(desc, sizeof (desc),
1849 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1850 thename);
1851 else
1852 (void) snprintf(desc, sizeof (desc),
1853 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1854 origname, thename);
1855
1856 switch (error) {
1857 case ENOTSUP:
1858 if (nv != NULL && nvlist_lookup_nvlist(nv,
1859 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1860 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1861 (void) printf(dgettext(TEXT_DOMAIN, "This "
1862 "pool uses the following feature(s) not "
1863 "supported by this system:\n"));
1864 zpool_print_unsup_feat(nv);
1865 if (nvlist_exists(nvinfo,
1866 ZPOOL_CONFIG_CAN_RDONLY)) {
1867 (void) printf(dgettext(TEXT_DOMAIN,
1868 "All unsupported features are only "
1869 "required for writing to the pool."
1870 "\nThe pool can be imported using "
1871 "'-o readonly=on'.\n"));
1872 }
1873 }
1874 /*
1875 * Unsupported version.
1876 */
1877 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1878 break;
1879
1880 case EINVAL:
1881 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1882 break;
1883
1884 case EROFS:
1885 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1886 "one or more devices is read only"));
1887 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1888 break;
1889
1890 case ENXIO:
1891 if (nv && nvlist_lookup_nvlist(nv,
1892 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1893 nvlist_lookup_nvlist(nvinfo,
1894 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1895 (void) printf(dgettext(TEXT_DOMAIN,
1896 "The devices below are missing, use "
1897 "'-m' to import the pool anyway:\n"));
1898 print_vdev_tree(hdl, NULL, missing, 2);
1899 (void) printf("\n");
1900 }
1901 (void) zpool_standard_error(hdl, error, desc);
1902 break;
1903
1904 case EEXIST:
1905 (void) zpool_standard_error(hdl, error, desc);
1906 break;
1907
1908 case EBUSY:
1909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1910 "one or more devices are already in use\n"));
1911 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1912 break;
1913
1914 default:
1915 (void) zpool_standard_error(hdl, error, desc);
1916 zpool_explain_recover(hdl,
1917 newname ? origname : thename, -error, nv);
1918 break;
1919 }
1920
1921 nvlist_free(nv);
1922 ret = -1;
1923 } else {
1924 zpool_handle_t *zhp;
1925
1926 /*
1927 * This should never fail, but play it safe anyway.
1928 */
1929 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1930 ret = -1;
1931 else if (zhp != NULL)
1932 zpool_close(zhp);
1933 if (policy.zrp_request &
1934 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1935 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1936 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1937 }
1938 nvlist_free(nv);
1939 return (0);
1940 }
1941
1942 return (ret);
1943 }
1944
1945 /*
1946 * Scan the pool.
1947 */
1948 int
1949 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1950 {
1951 zfs_cmd_t zc = {"\0"};
1952 char msg[1024];
1953 libzfs_handle_t *hdl = zhp->zpool_hdl;
1954
1955 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1956 zc.zc_cookie = func;
1957
1958 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1959 (errno == ENOENT && func != POOL_SCAN_NONE))
1960 return (0);
1961
1962 if (func == POOL_SCAN_SCRUB) {
1963 (void) snprintf(msg, sizeof (msg),
1964 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1965 } else if (func == POOL_SCAN_NONE) {
1966 (void) snprintf(msg, sizeof (msg),
1967 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1968 zc.zc_name);
1969 } else {
1970 assert(!"unexpected result");
1971 }
1972
1973 if (errno == EBUSY) {
1974 nvlist_t *nvroot;
1975 pool_scan_stat_t *ps = NULL;
1976 uint_t psc;
1977
1978 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1979 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1980 (void) nvlist_lookup_uint64_array(nvroot,
1981 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1982 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1983 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1984 else
1985 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1986 } else if (errno == ENOENT) {
1987 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1988 } else {
1989 return (zpool_standard_error(hdl, errno, msg));
1990 }
1991 }
1992
1993 /*
1994 * Find a vdev that matches the search criteria specified. We use the
1995 * the nvpair name to determine how we should look for the device.
1996 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1997 * spare; but FALSE if its an INUSE spare.
1998 */
1999 static nvlist_t *
2000 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2001 boolean_t *l2cache, boolean_t *log)
2002 {
2003 uint_t c, children;
2004 nvlist_t **child;
2005 nvlist_t *ret;
2006 uint64_t is_log;
2007 char *srchkey;
2008 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2009
2010 /* Nothing to look for */
2011 if (search == NULL || pair == NULL)
2012 return (NULL);
2013
2014 /* Obtain the key we will use to search */
2015 srchkey = nvpair_name(pair);
2016
2017 switch (nvpair_type(pair)) {
2018 case DATA_TYPE_UINT64:
2019 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2020 uint64_t srchval, theguid;
2021
2022 verify(nvpair_value_uint64(pair, &srchval) == 0);
2023 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2024 &theguid) == 0);
2025 if (theguid == srchval)
2026 return (nv);
2027 }
2028 break;
2029
2030 case DATA_TYPE_STRING: {
2031 char *srchval, *val;
2032
2033 verify(nvpair_value_string(pair, &srchval) == 0);
2034 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2035 break;
2036
2037 /*
2038 * Search for the requested value. Special cases:
2039 *
2040 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2041 * "-part1", or "p1". The suffix is hidden from the user,
2042 * but included in the string, so this matches around it.
2043 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2044 * is used to check all possible expanded paths.
2045 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2046 *
2047 * Otherwise, all other searches are simple string compares.
2048 */
2049 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2050 uint64_t wholedisk = 0;
2051
2052 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2053 &wholedisk);
2054 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2055 return (nv);
2056
2057 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2058 char *type, *idx, *end, *p;
2059 uint64_t id, vdev_id;
2060
2061 /*
2062 * Determine our vdev type, keeping in mind
2063 * that the srchval is composed of a type and
2064 * vdev id pair (i.e. mirror-4).
2065 */
2066 if ((type = strdup(srchval)) == NULL)
2067 return (NULL);
2068
2069 if ((p = strrchr(type, '-')) == NULL) {
2070 free(type);
2071 break;
2072 }
2073 idx = p + 1;
2074 *p = '\0';
2075
2076 /*
2077 * If the types don't match then keep looking.
2078 */
2079 if (strncmp(val, type, strlen(val)) != 0) {
2080 free(type);
2081 break;
2082 }
2083
2084 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2085 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2086 strncmp(type, VDEV_TYPE_MIRROR,
2087 strlen(VDEV_TYPE_MIRROR)) == 0);
2088 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2089 &id) == 0);
2090
2091 errno = 0;
2092 vdev_id = strtoull(idx, &end, 10);
2093
2094 free(type);
2095 if (errno != 0)
2096 return (NULL);
2097
2098 /*
2099 * Now verify that we have the correct vdev id.
2100 */
2101 if (vdev_id == id)
2102 return (nv);
2103 }
2104
2105 /*
2106 * Common case
2107 */
2108 if (strcmp(srchval, val) == 0)
2109 return (nv);
2110 break;
2111 }
2112
2113 default:
2114 break;
2115 }
2116
2117 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2118 &child, &children) != 0)
2119 return (NULL);
2120
2121 for (c = 0; c < children; c++) {
2122 if ((ret = vdev_to_nvlist_iter(child[c], search,
2123 avail_spare, l2cache, NULL)) != NULL) {
2124 /*
2125 * The 'is_log' value is only set for the toplevel
2126 * vdev, not the leaf vdevs. So we always lookup the
2127 * log device from the root of the vdev tree (where
2128 * 'log' is non-NULL).
2129 */
2130 if (log != NULL &&
2131 nvlist_lookup_uint64(child[c],
2132 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2133 is_log) {
2134 *log = B_TRUE;
2135 }
2136 return (ret);
2137 }
2138 }
2139
2140 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2141 &child, &children) == 0) {
2142 for (c = 0; c < children; c++) {
2143 if ((ret = vdev_to_nvlist_iter(child[c], search,
2144 avail_spare, l2cache, NULL)) != NULL) {
2145 *avail_spare = B_TRUE;
2146 return (ret);
2147 }
2148 }
2149 }
2150
2151 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2152 &child, &children) == 0) {
2153 for (c = 0; c < children; c++) {
2154 if ((ret = vdev_to_nvlist_iter(child[c], search,
2155 avail_spare, l2cache, NULL)) != NULL) {
2156 *l2cache = B_TRUE;
2157 return (ret);
2158 }
2159 }
2160 }
2161
2162 return (NULL);
2163 }
2164
2165 /*
2166 * Given a physical path (minus the "/devices" prefix), find the
2167 * associated vdev.
2168 */
2169 nvlist_t *
2170 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2171 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2172 {
2173 nvlist_t *search, *nvroot, *ret;
2174
2175 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2176 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2177
2178 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2179 &nvroot) == 0);
2180
2181 *avail_spare = B_FALSE;
2182 *l2cache = B_FALSE;
2183 if (log != NULL)
2184 *log = B_FALSE;
2185 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2186 nvlist_free(search);
2187
2188 return (ret);
2189 }
2190
2191 /*
2192 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2193 */
2194 boolean_t
2195 zpool_vdev_is_interior(const char *name)
2196 {
2197 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2198 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2199 return (B_TRUE);
2200 return (B_FALSE);
2201 }
2202
2203 nvlist_t *
2204 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2205 boolean_t *l2cache, boolean_t *log)
2206 {
2207 char *end;
2208 nvlist_t *nvroot, *search, *ret;
2209 uint64_t guid;
2210
2211 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2212
2213 guid = strtoull(path, &end, 0);
2214 if (guid != 0 && *end == '\0') {
2215 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2216 } else if (zpool_vdev_is_interior(path)) {
2217 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2218 } else {
2219 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2220 }
2221
2222 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2223 &nvroot) == 0);
2224
2225 *avail_spare = B_FALSE;
2226 *l2cache = B_FALSE;
2227 if (log != NULL)
2228 *log = B_FALSE;
2229 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2230 nvlist_free(search);
2231
2232 return (ret);
2233 }
2234
2235 static int
2236 vdev_online(nvlist_t *nv)
2237 {
2238 uint64_t ival;
2239
2240 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2241 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2242 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2243 return (0);
2244
2245 return (1);
2246 }
2247
2248 /*
2249 * Helper function for zpool_get_physpaths().
2250 */
2251 static int
2252 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2253 size_t *bytes_written)
2254 {
2255 size_t bytes_left, pos, rsz;
2256 char *tmppath;
2257 const char *format;
2258
2259 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2260 &tmppath) != 0)
2261 return (EZFS_NODEVICE);
2262
2263 pos = *bytes_written;
2264 bytes_left = physpath_size - pos;
2265 format = (pos == 0) ? "%s" : " %s";
2266
2267 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2268 *bytes_written += rsz;
2269
2270 if (rsz >= bytes_left) {
2271 /* if physpath was not copied properly, clear it */
2272 if (bytes_left != 0) {
2273 physpath[pos] = 0;
2274 }
2275 return (EZFS_NOSPC);
2276 }
2277 return (0);
2278 }
2279
2280 static int
2281 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2282 size_t *rsz, boolean_t is_spare)
2283 {
2284 char *type;
2285 int ret;
2286
2287 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2288 return (EZFS_INVALCONFIG);
2289
2290 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2291 /*
2292 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2293 * For a spare vdev, we only want to boot from the active
2294 * spare device.
2295 */
2296 if (is_spare) {
2297 uint64_t spare = 0;
2298 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2299 &spare);
2300 if (!spare)
2301 return (EZFS_INVALCONFIG);
2302 }
2303
2304 if (vdev_online(nv)) {
2305 if ((ret = vdev_get_one_physpath(nv, physpath,
2306 phypath_size, rsz)) != 0)
2307 return (ret);
2308 }
2309 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2310 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2311 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2312 nvlist_t **child;
2313 uint_t count;
2314 int i, ret;
2315
2316 if (nvlist_lookup_nvlist_array(nv,
2317 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2318 return (EZFS_INVALCONFIG);
2319
2320 for (i = 0; i < count; i++) {
2321 ret = vdev_get_physpaths(child[i], physpath,
2322 phypath_size, rsz, is_spare);
2323 if (ret == EZFS_NOSPC)
2324 return (ret);
2325 }
2326 }
2327
2328 return (EZFS_POOL_INVALARG);
2329 }
2330
2331 /*
2332 * Get phys_path for a root pool config.
2333 * Return 0 on success; non-zero on failure.
2334 */
2335 static int
2336 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2337 {
2338 size_t rsz;
2339 nvlist_t *vdev_root;
2340 nvlist_t **child;
2341 uint_t count;
2342 char *type;
2343
2344 rsz = 0;
2345
2346 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2347 &vdev_root) != 0)
2348 return (EZFS_INVALCONFIG);
2349
2350 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2351 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2352 &child, &count) != 0)
2353 return (EZFS_INVALCONFIG);
2354
2355 #if defined(__sun__) || defined(__sun)
2356 /*
2357 * root pool can not have EFI labeled disks and can only have
2358 * a single top-level vdev.
2359 */
2360 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2361 pool_uses_efi(vdev_root))
2362 return (EZFS_POOL_INVALARG);
2363 #endif
2364
2365 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2366 B_FALSE);
2367
2368 /* No online devices */
2369 if (rsz == 0)
2370 return (EZFS_NODEVICE);
2371
2372 return (0);
2373 }
2374
2375 /*
2376 * Get phys_path for a root pool
2377 * Return 0 on success; non-zero on failure.
2378 */
2379 int
2380 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2381 {
2382 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2383 phypath_size));
2384 }
2385
2386 /*
2387 * If the device has being dynamically expanded then we need to relabel
2388 * the disk to use the new unallocated space.
2389 */
2390 static int
2391 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2392 {
2393 int fd, error;
2394
2395 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2397 "relabel '%s': unable to open device: %d"), path, errno);
2398 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2399 }
2400
2401 /*
2402 * It's possible that we might encounter an error if the device
2403 * does not have any unallocated space left. If so, we simply
2404 * ignore that error and continue on.
2405 *
2406 * Also, we don't call efi_rescan() - that would just return EBUSY.
2407 * The module will do it for us in vdev_disk_open().
2408 */
2409 error = efi_use_whole_disk(fd);
2410 (void) close(fd);
2411 if (error && error != VT_ENOSPC) {
2412 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2413 "relabel '%s': unable to read disk capacity"), path);
2414 return (zfs_error(hdl, EZFS_NOCAP, msg));
2415 }
2416 return (0);
2417 }
2418
2419 /*
2420 * Bring the specified vdev online. The 'flags' parameter is a set of the
2421 * ZFS_ONLINE_* flags.
2422 */
2423 int
2424 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2425 vdev_state_t *newstate)
2426 {
2427 zfs_cmd_t zc = {"\0"};
2428 char msg[1024];
2429 nvlist_t *tgt;
2430 boolean_t avail_spare, l2cache, islog;
2431 libzfs_handle_t *hdl = zhp->zpool_hdl;
2432 int error;
2433
2434 if (flags & ZFS_ONLINE_EXPAND) {
2435 (void) snprintf(msg, sizeof (msg),
2436 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2437 } else {
2438 (void) snprintf(msg, sizeof (msg),
2439 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2440 }
2441
2442 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2443 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2444 &islog)) == NULL)
2445 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2446
2447 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2448
2449 if (avail_spare)
2450 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2451
2452 if (flags & ZFS_ONLINE_EXPAND ||
2453 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2454 uint64_t wholedisk = 0;
2455
2456 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2457 &wholedisk);
2458
2459 /*
2460 * XXX - L2ARC 1.0 devices can't support expansion.
2461 */
2462 if (l2cache) {
2463 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2464 "cannot expand cache devices"));
2465 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2466 }
2467
2468 if (wholedisk) {
2469 const char *fullpath = path;
2470 char buf[MAXPATHLEN];
2471
2472 if (path[0] != '/') {
2473 error = zfs_resolve_shortname(path, buf,
2474 sizeof (buf));
2475 if (error != 0)
2476 return (zfs_error(hdl, EZFS_NODEVICE,
2477 msg));
2478
2479 fullpath = buf;
2480 }
2481
2482 error = zpool_relabel_disk(hdl, fullpath, msg);
2483 if (error != 0)
2484 return (error);
2485 }
2486 }
2487
2488 zc.zc_cookie = VDEV_STATE_ONLINE;
2489 zc.zc_obj = flags;
2490
2491 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2492 if (errno == EINVAL) {
2493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2494 "from this pool into a new one. Use '%s' "
2495 "instead"), "zpool detach");
2496 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2497 }
2498 return (zpool_standard_error(hdl, errno, msg));
2499 }
2500
2501 *newstate = zc.zc_cookie;
2502 return (0);
2503 }
2504
2505 /*
2506 * Take the specified vdev offline
2507 */
2508 int
2509 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2510 {
2511 zfs_cmd_t zc = {"\0"};
2512 char msg[1024];
2513 nvlist_t *tgt;
2514 boolean_t avail_spare, l2cache;
2515 libzfs_handle_t *hdl = zhp->zpool_hdl;
2516
2517 (void) snprintf(msg, sizeof (msg),
2518 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2519
2520 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2521 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2522 NULL)) == NULL)
2523 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2524
2525 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2526
2527 if (avail_spare)
2528 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2529
2530 zc.zc_cookie = VDEV_STATE_OFFLINE;
2531 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2532
2533 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2534 return (0);
2535
2536 switch (errno) {
2537 case EBUSY:
2538
2539 /*
2540 * There are no other replicas of this device.
2541 */
2542 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2543
2544 case EEXIST:
2545 /*
2546 * The log device has unplayed logs
2547 */
2548 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2549
2550 default:
2551 return (zpool_standard_error(hdl, errno, msg));
2552 }
2553 }
2554
2555 /*
2556 * Mark the given vdev faulted.
2557 */
2558 int
2559 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2560 {
2561 zfs_cmd_t zc = {"\0"};
2562 char msg[1024];
2563 libzfs_handle_t *hdl = zhp->zpool_hdl;
2564
2565 (void) snprintf(msg, sizeof (msg),
2566 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2567
2568 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2569 zc.zc_guid = guid;
2570 zc.zc_cookie = VDEV_STATE_FAULTED;
2571 zc.zc_obj = aux;
2572
2573 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2574 return (0);
2575
2576 switch (errno) {
2577 case EBUSY:
2578
2579 /*
2580 * There are no other replicas of this device.
2581 */
2582 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2583
2584 default:
2585 return (zpool_standard_error(hdl, errno, msg));
2586 }
2587
2588 }
2589
2590 /*
2591 * Mark the given vdev degraded.
2592 */
2593 int
2594 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2595 {
2596 zfs_cmd_t zc = {"\0"};
2597 char msg[1024];
2598 libzfs_handle_t *hdl = zhp->zpool_hdl;
2599
2600 (void) snprintf(msg, sizeof (msg),
2601 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2602
2603 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2604 zc.zc_guid = guid;
2605 zc.zc_cookie = VDEV_STATE_DEGRADED;
2606 zc.zc_obj = aux;
2607
2608 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2609 return (0);
2610
2611 return (zpool_standard_error(hdl, errno, msg));
2612 }
2613
2614 /*
2615 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2616 * a hot spare.
2617 */
2618 static boolean_t
2619 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2620 {
2621 nvlist_t **child;
2622 uint_t c, children;
2623 char *type;
2624
2625 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2626 &children) == 0) {
2627 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2628 &type) == 0);
2629
2630 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2631 children == 2 && child[which] == tgt)
2632 return (B_TRUE);
2633
2634 for (c = 0; c < children; c++)
2635 if (is_replacing_spare(child[c], tgt, which))
2636 return (B_TRUE);
2637 }
2638
2639 return (B_FALSE);
2640 }
2641
2642 /*
2643 * Attach new_disk (fully described by nvroot) to old_disk.
2644 * If 'replacing' is specified, the new disk will replace the old one.
2645 */
2646 int
2647 zpool_vdev_attach(zpool_handle_t *zhp,
2648 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2649 {
2650 zfs_cmd_t zc = {"\0"};
2651 char msg[1024];
2652 int ret;
2653 nvlist_t *tgt;
2654 boolean_t avail_spare, l2cache, islog;
2655 uint64_t val;
2656 char *newname;
2657 nvlist_t **child;
2658 uint_t children;
2659 nvlist_t *config_root;
2660 libzfs_handle_t *hdl = zhp->zpool_hdl;
2661 boolean_t rootpool = zpool_is_bootable(zhp);
2662
2663 if (replacing)
2664 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2665 "cannot replace %s with %s"), old_disk, new_disk);
2666 else
2667 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2668 "cannot attach %s to %s"), new_disk, old_disk);
2669
2670 #if defined(__sun__) || defined(__sun)
2671 /*
2672 * If this is a root pool, make sure that we're not attaching an
2673 * EFI labeled device.
2674 */
2675 if (rootpool && pool_uses_efi(nvroot)) {
2676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2677 "EFI labeled devices are not supported on root pools."));
2678 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2679 }
2680 #endif
2681
2682 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2683 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2684 &islog)) == 0)
2685 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2686
2687 if (avail_spare)
2688 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2689
2690 if (l2cache)
2691 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2692
2693 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2694 zc.zc_cookie = replacing;
2695
2696 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2697 &child, &children) != 0 || children != 1) {
2698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2699 "new device must be a single disk"));
2700 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2701 }
2702
2703 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2704 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2705
2706 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2707 return (-1);
2708
2709 /*
2710 * If the target is a hot spare that has been swapped in, we can only
2711 * replace it with another hot spare.
2712 */
2713 if (replacing &&
2714 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2715 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2716 NULL) == NULL || !avail_spare) &&
2717 is_replacing_spare(config_root, tgt, 1)) {
2718 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2719 "can only be replaced by another hot spare"));
2720 free(newname);
2721 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2722 }
2723
2724 free(newname);
2725
2726 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2727 return (-1);
2728
2729 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2730
2731 zcmd_free_nvlists(&zc);
2732
2733 if (ret == 0) {
2734 if (rootpool) {
2735 /*
2736 * XXX need a better way to prevent user from
2737 * booting up a half-baked vdev.
2738 */
2739 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2740 "sure to wait until resilver is done "
2741 "before rebooting.\n"));
2742 }
2743 return (0);
2744 }
2745
2746 switch (errno) {
2747 case ENOTSUP:
2748 /*
2749 * Can't attach to or replace this type of vdev.
2750 */
2751 if (replacing) {
2752 uint64_t version = zpool_get_prop_int(zhp,
2753 ZPOOL_PROP_VERSION, NULL);
2754
2755 if (islog)
2756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2757 "cannot replace a log with a spare"));
2758 else if (version >= SPA_VERSION_MULTI_REPLACE)
2759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2760 "already in replacing/spare config; wait "
2761 "for completion or use 'zpool detach'"));
2762 else
2763 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2764 "cannot replace a replacing device"));
2765 } else {
2766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2767 "can only attach to mirrors and top-level "
2768 "disks"));
2769 }
2770 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2771 break;
2772
2773 case EINVAL:
2774 /*
2775 * The new device must be a single disk.
2776 */
2777 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2778 "new device must be a single disk"));
2779 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2780 break;
2781
2782 case EBUSY:
2783 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2784 new_disk);
2785 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2786 break;
2787
2788 case EOVERFLOW:
2789 /*
2790 * The new device is too small.
2791 */
2792 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2793 "device is too small"));
2794 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2795 break;
2796
2797 case EDOM:
2798 /*
2799 * The new device has a different optimal sector size.
2800 */
2801 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2802 "new device has a different optimal sector size; use the "
2803 "option '-o ashift=N' to override the optimal size"));
2804 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2805 break;
2806
2807 case ENAMETOOLONG:
2808 /*
2809 * The resulting top-level vdev spec won't fit in the label.
2810 */
2811 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2812 break;
2813
2814 default:
2815 (void) zpool_standard_error(hdl, errno, msg);
2816 }
2817
2818 return (-1);
2819 }
2820
2821 /*
2822 * Detach the specified device.
2823 */
2824 int
2825 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2826 {
2827 zfs_cmd_t zc = {"\0"};
2828 char msg[1024];
2829 nvlist_t *tgt;
2830 boolean_t avail_spare, l2cache;
2831 libzfs_handle_t *hdl = zhp->zpool_hdl;
2832
2833 (void) snprintf(msg, sizeof (msg),
2834 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2835
2836 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2837 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2838 NULL)) == 0)
2839 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2840
2841 if (avail_spare)
2842 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2843
2844 if (l2cache)
2845 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2846
2847 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2848
2849 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2850 return (0);
2851
2852 switch (errno) {
2853
2854 case ENOTSUP:
2855 /*
2856 * Can't detach from this type of vdev.
2857 */
2858 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2859 "applicable to mirror and replacing vdevs"));
2860 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2861 break;
2862
2863 case EBUSY:
2864 /*
2865 * There are no other replicas of this device.
2866 */
2867 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2868 break;
2869
2870 default:
2871 (void) zpool_standard_error(hdl, errno, msg);
2872 }
2873
2874 return (-1);
2875 }
2876
2877 /*
2878 * Find a mirror vdev in the source nvlist.
2879 *
2880 * The mchild array contains a list of disks in one of the top-level mirrors
2881 * of the source pool. The schild array contains a list of disks that the
2882 * user specified on the command line. We loop over the mchild array to
2883 * see if any entry in the schild array matches.
2884 *
2885 * If a disk in the mchild array is found in the schild array, we return
2886 * the index of that entry. Otherwise we return -1.
2887 */
2888 static int
2889 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2890 nvlist_t **schild, uint_t schildren)
2891 {
2892 uint_t mc;
2893
2894 for (mc = 0; mc < mchildren; mc++) {
2895 uint_t sc;
2896 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2897 mchild[mc], 0);
2898
2899 for (sc = 0; sc < schildren; sc++) {
2900 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2901 schild[sc], 0);
2902 boolean_t result = (strcmp(mpath, spath) == 0);
2903
2904 free(spath);
2905 if (result) {
2906 free(mpath);
2907 return (mc);
2908 }
2909 }
2910
2911 free(mpath);
2912 }
2913
2914 return (-1);
2915 }
2916
2917 /*
2918 * Split a mirror pool. If newroot points to null, then a new nvlist
2919 * is generated and it is the responsibility of the caller to free it.
2920 */
2921 int
2922 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2923 nvlist_t *props, splitflags_t flags)
2924 {
2925 zfs_cmd_t zc = {"\0"};
2926 char msg[1024];
2927 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2928 nvlist_t **varray = NULL, *zc_props = NULL;
2929 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2930 libzfs_handle_t *hdl = zhp->zpool_hdl;
2931 uint64_t vers;
2932 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2933 int retval = 0;
2934
2935 (void) snprintf(msg, sizeof (msg),
2936 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2937
2938 if (!zpool_name_valid(hdl, B_FALSE, newname))
2939 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2940
2941 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2942 (void) fprintf(stderr, gettext("Internal error: unable to "
2943 "retrieve pool configuration\n"));
2944 return (-1);
2945 }
2946
2947 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2948 == 0);
2949 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2950
2951 if (props) {
2952 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2953 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2954 props, vers, flags, msg)) == NULL)
2955 return (-1);
2956 }
2957
2958 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2959 &children) != 0) {
2960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2961 "Source pool is missing vdev tree"));
2962 if (zc_props)
2963 nvlist_free(zc_props);
2964 return (-1);
2965 }
2966
2967 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2968 vcount = 0;
2969
2970 if (*newroot == NULL ||
2971 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2972 &newchild, &newchildren) != 0)
2973 newchildren = 0;
2974
2975 for (c = 0; c < children; c++) {
2976 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2977 char *type;
2978 nvlist_t **mchild, *vdev;
2979 uint_t mchildren;
2980 int entry;
2981
2982 /*
2983 * Unlike cache & spares, slogs are stored in the
2984 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2985 */
2986 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2987 &is_log);
2988 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2989 &is_hole);
2990 if (is_log || is_hole) {
2991 /*
2992 * Create a hole vdev and put it in the config.
2993 */
2994 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2995 goto out;
2996 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2997 VDEV_TYPE_HOLE) != 0)
2998 goto out;
2999 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3000 1) != 0)
3001 goto out;
3002 if (lastlog == 0)
3003 lastlog = vcount;
3004 varray[vcount++] = vdev;
3005 continue;
3006 }
3007 lastlog = 0;
3008 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3009 == 0);
3010 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3011 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3012 "Source pool must be composed only of mirrors\n"));
3013 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3014 goto out;
3015 }
3016
3017 verify(nvlist_lookup_nvlist_array(child[c],
3018 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3019
3020 /* find or add an entry for this top-level vdev */
3021 if (newchildren > 0 &&
3022 (entry = find_vdev_entry(zhp, mchild, mchildren,
3023 newchild, newchildren)) >= 0) {
3024 /* We found a disk that the user specified. */
3025 vdev = mchild[entry];
3026 ++found;
3027 } else {
3028 /* User didn't specify a disk for this vdev. */
3029 vdev = mchild[mchildren - 1];
3030 }
3031
3032 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3033 goto out;
3034 }
3035
3036 /* did we find every disk the user specified? */
3037 if (found != newchildren) {
3038 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3039 "include at most one disk from each mirror"));
3040 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3041 goto out;
3042 }
3043
3044 /* Prepare the nvlist for populating. */
3045 if (*newroot == NULL) {
3046 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3047 goto out;
3048 freelist = B_TRUE;
3049 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3050 VDEV_TYPE_ROOT) != 0)
3051 goto out;
3052 } else {
3053 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3054 }
3055
3056 /* Add all the children we found */
3057 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3058 lastlog == 0 ? vcount : lastlog) != 0)
3059 goto out;
3060
3061 /*
3062 * If we're just doing a dry run, exit now with success.
3063 */
3064 if (flags.dryrun) {
3065 memory_err = B_FALSE;
3066 freelist = B_FALSE;
3067 goto out;
3068 }
3069
3070 /* now build up the config list & call the ioctl */
3071 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3072 goto out;
3073
3074 if (nvlist_add_nvlist(newconfig,
3075 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3076 nvlist_add_string(newconfig,
3077 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3078 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3079 goto out;
3080
3081 /*
3082 * The new pool is automatically part of the namespace unless we
3083 * explicitly export it.
3084 */
3085 if (!flags.import)
3086 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3087 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3088 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3089 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3090 goto out;
3091 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3092 goto out;
3093
3094 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3095 retval = zpool_standard_error(hdl, errno, msg);
3096 goto out;
3097 }
3098
3099 freelist = B_FALSE;
3100 memory_err = B_FALSE;
3101
3102 out:
3103 if (varray != NULL) {
3104 int v;
3105
3106 for (v = 0; v < vcount; v++)
3107 nvlist_free(varray[v]);
3108 free(varray);
3109 }
3110 zcmd_free_nvlists(&zc);
3111 if (zc_props)
3112 nvlist_free(zc_props);
3113 if (newconfig)
3114 nvlist_free(newconfig);
3115 if (freelist) {
3116 nvlist_free(*newroot);
3117 *newroot = NULL;
3118 }
3119
3120 if (retval != 0)
3121 return (retval);
3122
3123 if (memory_err)
3124 return (no_memory(hdl));
3125
3126 return (0);
3127 }
3128
3129 /*
3130 * Remove the given device. Currently, this is supported only for hot spares
3131 * and level 2 cache devices.
3132 */
3133 int
3134 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3135 {
3136 zfs_cmd_t zc = {"\0"};
3137 char msg[1024];
3138 nvlist_t *tgt;
3139 boolean_t avail_spare, l2cache, islog;
3140 libzfs_handle_t *hdl = zhp->zpool_hdl;
3141 uint64_t version;
3142
3143 (void) snprintf(msg, sizeof (msg),
3144 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3145
3146 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3147 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3148 &islog)) == 0)
3149 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3150 /*
3151 * XXX - this should just go away.
3152 */
3153 if (!avail_spare && !l2cache && !islog) {
3154 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3155 "only inactive hot spares, cache, top-level, "
3156 "or log devices can be removed"));
3157 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3158 }
3159
3160 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3161 if (islog && version < SPA_VERSION_HOLES) {
3162 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3163 "pool must be upgrade to support log removal"));
3164 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3165 }
3166
3167 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3168
3169 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3170 return (0);
3171
3172 return (zpool_standard_error(hdl, errno, msg));
3173 }
3174
3175 /*
3176 * Clear the errors for the pool, or the particular device if specified.
3177 */
3178 int
3179 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3180 {
3181 zfs_cmd_t zc = {"\0"};
3182 char msg[1024];
3183 nvlist_t *tgt;
3184 zpool_rewind_policy_t policy;
3185 boolean_t avail_spare, l2cache;
3186 libzfs_handle_t *hdl = zhp->zpool_hdl;
3187 nvlist_t *nvi = NULL;
3188 int error;
3189
3190 if (path)
3191 (void) snprintf(msg, sizeof (msg),
3192 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3193 path);
3194 else
3195 (void) snprintf(msg, sizeof (msg),
3196 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3197 zhp->zpool_name);
3198
3199 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3200 if (path) {
3201 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3202 &l2cache, NULL)) == 0)
3203 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3204
3205 /*
3206 * Don't allow error clearing for hot spares. Do allow
3207 * error clearing for l2cache devices.
3208 */
3209 if (avail_spare)
3210 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3211
3212 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3213 &zc.zc_guid) == 0);
3214 }
3215
3216 zpool_get_rewind_policy(rewindnvl, &policy);
3217 zc.zc_cookie = policy.zrp_request;
3218
3219 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3220 return (-1);
3221
3222 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3223 return (-1);
3224
3225 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3226 errno == ENOMEM) {
3227 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3228 zcmd_free_nvlists(&zc);
3229 return (-1);
3230 }
3231 }
3232
3233 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3234 errno != EPERM && errno != EACCES)) {
3235 if (policy.zrp_request &
3236 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3237 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3238 zpool_rewind_exclaim(hdl, zc.zc_name,
3239 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3240 nvi);
3241 nvlist_free(nvi);
3242 }
3243 zcmd_free_nvlists(&zc);
3244 return (0);
3245 }
3246
3247 zcmd_free_nvlists(&zc);
3248 return (zpool_standard_error(hdl, errno, msg));
3249 }
3250
3251 /*
3252 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3253 */
3254 int
3255 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3256 {
3257 zfs_cmd_t zc = {"\0"};
3258 char msg[1024];
3259 libzfs_handle_t *hdl = zhp->zpool_hdl;
3260
3261 (void) snprintf(msg, sizeof (msg),
3262 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3263 (u_longlong_t)guid);
3264
3265 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3266 zc.zc_guid = guid;
3267 zc.zc_cookie = ZPOOL_NO_REWIND;
3268
3269 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3270 return (0);
3271
3272 return (zpool_standard_error(hdl, errno, msg));
3273 }
3274
3275 /*
3276 * Change the GUID for a pool.
3277 */
3278 int
3279 zpool_reguid(zpool_handle_t *zhp)
3280 {
3281 char msg[1024];
3282 libzfs_handle_t *hdl = zhp->zpool_hdl;
3283 zfs_cmd_t zc = {"\0"};
3284
3285 (void) snprintf(msg, sizeof (msg),
3286 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3287
3288 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3289 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3290 return (0);
3291
3292 return (zpool_standard_error(hdl, errno, msg));
3293 }
3294
3295 /*
3296 * Reopen the pool.
3297 */
3298 int
3299 zpool_reopen(zpool_handle_t *zhp)
3300 {
3301 zfs_cmd_t zc = {"\0"};
3302 char msg[1024];
3303 libzfs_handle_t *hdl = zhp->zpool_hdl;
3304
3305 (void) snprintf(msg, sizeof (msg),
3306 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3307 zhp->zpool_name);
3308
3309 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3310 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3311 return (0);
3312 return (zpool_standard_error(hdl, errno, msg));
3313 }
3314
3315 #if defined(__sun__) || defined(__sun)
3316 /*
3317 * Convert from a devid string to a path.
3318 */
3319 static char *
3320 devid_to_path(char *devid_str)
3321 {
3322 ddi_devid_t devid;
3323 char *minor;
3324 char *path;
3325 devid_nmlist_t *list = NULL;
3326 int ret;
3327
3328 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3329 return (NULL);
3330
3331 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3332
3333 devid_str_free(minor);
3334 devid_free(devid);
3335
3336 if (ret != 0)
3337 return (NULL);
3338
3339 /*
3340 * In a case the strdup() fails, we will just return NULL below.
3341 */
3342 path = strdup(list[0].devname);
3343
3344 devid_free_nmlist(list);
3345
3346 return (path);
3347 }
3348
3349 /*
3350 * Convert from a path to a devid string.
3351 */
3352 static char *
3353 path_to_devid(const char *path)
3354 {
3355 int fd;
3356 ddi_devid_t devid;
3357 char *minor, *ret;
3358
3359 if ((fd = open(path, O_RDONLY)) < 0)
3360 return (NULL);
3361
3362 minor = NULL;
3363 ret = NULL;
3364 if (devid_get(fd, &devid) == 0) {
3365 if (devid_get_minor_name(fd, &minor) == 0)
3366 ret = devid_str_encode(devid, minor);
3367 if (minor != NULL)
3368 devid_str_free(minor);
3369 devid_free(devid);
3370 }
3371 (void) close(fd);
3372
3373 return (ret);
3374 }
3375
3376 /*
3377 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3378 * ignore any failure here, since a common case is for an unprivileged user to
3379 * type 'zpool status', and we'll display the correct information anyway.
3380 */
3381 static void
3382 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3383 {
3384 zfs_cmd_t zc = {"\0"};
3385
3386 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3387 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3388 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3389 &zc.zc_guid) == 0);
3390
3391 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3392 }
3393 #endif /* sun */
3394
3395 /*
3396 * Remove partition suffix from a vdev path. Partition suffixes may take three
3397 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3398 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3399 * third case only occurs when preceded by a string matching the regular
3400 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3401 */
3402 static char *
3403 strip_partition(libzfs_handle_t *hdl, char *path)
3404 {
3405 char *tmp = zfs_strdup(hdl, path);
3406 char *part = NULL, *d = NULL;
3407
3408 if ((part = strstr(tmp, "-part")) && part != tmp) {
3409 d = part + 5;
3410 } else if ((part = strrchr(tmp, 'p')) &&
3411 part > tmp + 1 && isdigit(*(part-1))) {
3412 d = part + 1;
3413 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3414 tmp[1] == 'd') {
3415 for (d = &tmp[2]; isalpha(*d); part = ++d);
3416 } else if (strncmp("xvd", tmp, 3) == 0) {
3417 for (d = &tmp[3]; isalpha(*d); part = ++d);
3418 }
3419 if (part && d && *d != '\0') {
3420 for (; isdigit(*d); d++);
3421 if (*d == '\0')
3422 *part = '\0';
3423 }
3424 return (tmp);
3425 }
3426
3427 #define PATH_BUF_LEN 64
3428
3429 /*
3430 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3431 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3432 * We also check if this is a whole disk, in which case we strip off the
3433 * trailing 's0' slice name.
3434 *
3435 * This routine is also responsible for identifying when disks have been
3436 * reconfigured in a new location. The kernel will have opened the device by
3437 * devid, but the path will still refer to the old location. To catch this, we
3438 * first do a path -> devid translation (which is fast for the common case). If
3439 * the devid matches, we're done. If not, we do a reverse devid -> path
3440 * translation and issue the appropriate ioctl() to update the path of the vdev.
3441 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3442 * of these checks.
3443 */
3444 char *
3445 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3446 int name_flags)
3447 {
3448 char *path, *type, *env;
3449 uint64_t value;
3450 char buf[PATH_BUF_LEN];
3451 char tmpbuf[PATH_BUF_LEN];
3452
3453 env = getenv("ZPOOL_VDEV_NAME_PATH");
3454 if (env && (strtoul(env, NULL, 0) > 0 ||
3455 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3456 name_flags |= VDEV_NAME_PATH;
3457
3458 env = getenv("ZPOOL_VDEV_NAME_GUID");
3459 if (env && (strtoul(env, NULL, 0) > 0 ||
3460 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3461 name_flags |= VDEV_NAME_GUID;
3462
3463 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3464 if (env && (strtoul(env, NULL, 0) > 0 ||
3465 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3466 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3467
3468 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3469 name_flags & VDEV_NAME_GUID) {
3470 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3471 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3472 path = buf;
3473 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3474 #if defined(__sun__) || defined(__sun)
3475 /*
3476 * Live VDEV path updates to a kernel VDEV during a
3477 * zpool_vdev_name lookup are not supported on Linux.
3478 */
3479 char *devid;
3480 vdev_stat_t *vs;
3481 uint_t vsc;
3482
3483 /*
3484 * If the device is dead (faulted, offline, etc) then don't
3485 * bother opening it. Otherwise we may be forcing the user to
3486 * open a misbehaving device, which can have undesirable
3487 * effects.
3488 */
3489 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3490 (uint64_t **)&vs, &vsc) != 0 ||
3491 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3492 zhp != NULL &&
3493 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3494 /*
3495 * Determine if the current path is correct.
3496 */
3497 char *newdevid = path_to_devid(path);
3498
3499 if (newdevid == NULL ||
3500 strcmp(devid, newdevid) != 0) {
3501 char *newpath;
3502
3503 if ((newpath = devid_to_path(devid)) != NULL) {
3504 /*
3505 * Update the path appropriately.
3506 */
3507 set_path(zhp, nv, newpath);
3508 if (nvlist_add_string(nv,
3509 ZPOOL_CONFIG_PATH, newpath) == 0)
3510 verify(nvlist_lookup_string(nv,
3511 ZPOOL_CONFIG_PATH,
3512 &path) == 0);
3513 free(newpath);
3514 }
3515 }
3516
3517 if (newdevid)
3518 devid_str_free(newdevid);
3519 }
3520 #endif /* sun */
3521
3522 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3523 char *rp = realpath(path, NULL);
3524 if (rp) {
3525 strlcpy(buf, rp, sizeof (buf));
3526 path = buf;
3527 free(rp);
3528 }
3529 }
3530
3531 /*
3532 * For a block device only use the name.
3533 */
3534 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3535 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3536 !(name_flags & VDEV_NAME_PATH)) {
3537 path = strrchr(path, '/');
3538 path++;
3539 }
3540
3541 /*
3542 * Remove the partition from the path it this is a whole disk.
3543 */
3544 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3545 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3546 return (strip_partition(hdl, path));
3547 }
3548 } else {
3549 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3550
3551 /*
3552 * If it's a raidz device, we need to stick in the parity level.
3553 */
3554 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3555
3556 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3557 &value) == 0);
3558 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3559 (u_longlong_t)value);
3560 path = buf;
3561 }
3562
3563 /*
3564 * We identify each top-level vdev by using a <type-id>
3565 * naming convention.
3566 */
3567 if (name_flags & VDEV_NAME_TYPE_ID) {
3568 uint64_t id;
3569
3570 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3571 &id) == 0);
3572 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3573 path, (u_longlong_t)id);
3574 path = tmpbuf;
3575 }
3576 }
3577
3578 return (zfs_strdup(hdl, path));
3579 }
3580
3581 static int
3582 zbookmark_mem_compare(const void *a, const void *b)
3583 {
3584 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3585 }
3586
3587 /*
3588 * Retrieve the persistent error log, uniquify the members, and return to the
3589 * caller.
3590 */
3591 int
3592 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3593 {
3594 zfs_cmd_t zc = {"\0"};
3595 uint64_t count;
3596 zbookmark_phys_t *zb = NULL;
3597 int i;
3598
3599 /*
3600 * Retrieve the raw error list from the kernel. If the number of errors
3601 * has increased, allocate more space and continue until we get the
3602 * entire list.
3603 */
3604 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3605 &count) == 0);
3606 if (count == 0)
3607 return (0);
3608 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3609 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3610 return (-1);
3611 zc.zc_nvlist_dst_size = count;
3612 (void) strcpy(zc.zc_name, zhp->zpool_name);
3613 for (;;) {
3614 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3615 &zc) != 0) {
3616 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3617 if (errno == ENOMEM) {
3618 void *dst;
3619
3620 count = zc.zc_nvlist_dst_size;
3621 dst = zfs_alloc(zhp->zpool_hdl, count *
3622 sizeof (zbookmark_phys_t));
3623 if (dst == NULL)
3624 return (-1);
3625 zc.zc_nvlist_dst = (uintptr_t)dst;
3626 } else {
3627 return (-1);
3628 }
3629 } else {
3630 break;
3631 }
3632 }
3633
3634 /*
3635 * Sort the resulting bookmarks. This is a little confusing due to the
3636 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3637 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3638 * _not_ copied as part of the process. So we point the start of our
3639 * array appropriate and decrement the total number of elements.
3640 */
3641 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3642 zc.zc_nvlist_dst_size;
3643 count -= zc.zc_nvlist_dst_size;
3644
3645 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3646
3647 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3648
3649 /*
3650 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3651 */
3652 for (i = 0; i < count; i++) {
3653 nvlist_t *nv;
3654
3655 /* ignoring zb_blkid and zb_level for now */
3656 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3657 zb[i-1].zb_object == zb[i].zb_object)
3658 continue;
3659
3660 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3661 goto nomem;
3662 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3663 zb[i].zb_objset) != 0) {
3664 nvlist_free(nv);
3665 goto nomem;
3666 }
3667 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3668 zb[i].zb_object) != 0) {
3669 nvlist_free(nv);
3670 goto nomem;
3671 }
3672 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3673 nvlist_free(nv);
3674 goto nomem;
3675 }
3676 nvlist_free(nv);
3677 }
3678
3679 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3680 return (0);
3681
3682 nomem:
3683 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3684 return (no_memory(zhp->zpool_hdl));
3685 }
3686
3687 /*
3688 * Upgrade a ZFS pool to the latest on-disk version.
3689 */
3690 int
3691 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3692 {
3693 zfs_cmd_t zc = {"\0"};
3694 libzfs_handle_t *hdl = zhp->zpool_hdl;
3695
3696 (void) strcpy(zc.zc_name, zhp->zpool_name);
3697 zc.zc_cookie = new_version;
3698
3699 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3700 return (zpool_standard_error_fmt(hdl, errno,
3701 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3702 zhp->zpool_name));
3703 return (0);
3704 }
3705
3706 void
3707 zfs_save_arguments(int argc, char **argv, char *string, int len)
3708 {
3709 int i;
3710
3711 (void) strlcpy(string, basename(argv[0]), len);
3712 for (i = 1; i < argc; i++) {
3713 (void) strlcat(string, " ", len);
3714 (void) strlcat(string, argv[i], len);
3715 }
3716 }
3717
3718 int
3719 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3720 {
3721 zfs_cmd_t zc = {"\0"};
3722 nvlist_t *args;
3723 int err;
3724
3725 args = fnvlist_alloc();
3726 fnvlist_add_string(args, "message", message);
3727 err = zcmd_write_src_nvlist(hdl, &zc, args);
3728 if (err == 0)
3729 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3730 nvlist_free(args);
3731 zcmd_free_nvlists(&zc);
3732 return (err);
3733 }
3734
3735 /*
3736 * Perform ioctl to get some command history of a pool.
3737 *
3738 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3739 * logical offset of the history buffer to start reading from.
3740 *
3741 * Upon return, 'off' is the next logical offset to read from and
3742 * 'len' is the actual amount of bytes read into 'buf'.
3743 */
3744 static int
3745 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3746 {
3747 zfs_cmd_t zc = {"\0"};
3748 libzfs_handle_t *hdl = zhp->zpool_hdl;
3749
3750 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3751
3752 zc.zc_history = (uint64_t)(uintptr_t)buf;
3753 zc.zc_history_len = *len;
3754 zc.zc_history_offset = *off;
3755
3756 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3757 switch (errno) {
3758 case EPERM:
3759 return (zfs_error_fmt(hdl, EZFS_PERM,
3760 dgettext(TEXT_DOMAIN,
3761 "cannot show history for pool '%s'"),
3762 zhp->zpool_name));
3763 case ENOENT:
3764 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3765 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3766 "'%s'"), zhp->zpool_name));
3767 case ENOTSUP:
3768 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3769 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3770 "'%s', pool must be upgraded"), zhp->zpool_name));
3771 default:
3772 return (zpool_standard_error_fmt(hdl, errno,
3773 dgettext(TEXT_DOMAIN,
3774 "cannot get history for '%s'"), zhp->zpool_name));
3775 }
3776 }
3777
3778 *len = zc.zc_history_len;
3779 *off = zc.zc_history_offset;
3780
3781 return (0);
3782 }
3783
3784 /*
3785 * Process the buffer of nvlists, unpacking and storing each nvlist record
3786 * into 'records'. 'leftover' is set to the number of bytes that weren't
3787 * processed as there wasn't a complete record.
3788 */
3789 int
3790 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3791 nvlist_t ***records, uint_t *numrecords)
3792 {
3793 uint64_t reclen;
3794 nvlist_t *nv;
3795 int i;
3796
3797 while (bytes_read > sizeof (reclen)) {
3798
3799 /* get length of packed record (stored as little endian) */
3800 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3801 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3802
3803 if (bytes_read < sizeof (reclen) + reclen)
3804 break;
3805
3806 /* unpack record */
3807 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3808 return (ENOMEM);
3809 bytes_read -= sizeof (reclen) + reclen;
3810 buf += sizeof (reclen) + reclen;
3811
3812 /* add record to nvlist array */
3813 (*numrecords)++;
3814 if (ISP2(*numrecords + 1)) {
3815 *records = realloc(*records,
3816 *numrecords * 2 * sizeof (nvlist_t *));
3817 }
3818 (*records)[*numrecords - 1] = nv;
3819 }
3820
3821 *leftover = bytes_read;
3822 return (0);
3823 }
3824
3825 /*
3826 * Retrieve the command history of a pool.
3827 */
3828 int
3829 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3830 {
3831 char *buf;
3832 int buflen = 128 * 1024;
3833 uint64_t off = 0;
3834 nvlist_t **records = NULL;
3835 uint_t numrecords = 0;
3836 int err, i;
3837
3838 buf = malloc(buflen);
3839 if (buf == NULL)
3840 return (ENOMEM);
3841 do {
3842 uint64_t bytes_read = buflen;
3843 uint64_t leftover;
3844
3845 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3846 break;
3847
3848 /* if nothing else was read in, we're at EOF, just return */
3849 if (!bytes_read)
3850 break;
3851
3852 if ((err = zpool_history_unpack(buf, bytes_read,
3853 &leftover, &records, &numrecords)) != 0)
3854 break;
3855 off -= leftover;
3856 if (leftover == bytes_read) {
3857 /*
3858 * no progress made, because buffer is not big enough
3859 * to hold this record; resize and retry.
3860 */
3861 buflen *= 2;
3862 free(buf);
3863 buf = malloc(buflen);
3864 if (buf == NULL)
3865 return (ENOMEM);
3866 }
3867
3868 /* CONSTCOND */
3869 } while (1);
3870
3871 free(buf);
3872
3873 if (!err) {
3874 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3875 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3876 records, numrecords) == 0);
3877 }
3878 for (i = 0; i < numrecords; i++)
3879 nvlist_free(records[i]);
3880 free(records);
3881
3882 return (err);
3883 }
3884
3885 /*
3886 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3887 * If there is a new event available 'nvp' will contain a newly allocated
3888 * nvlist and 'dropped' will be set to the number of missed events since
3889 * the last call to this function. When 'nvp' is set to NULL it indicates
3890 * no new events are available. In either case the function returns 0 and
3891 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3892 * function will return a non-zero value. When the function is called in
3893 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3894 * it will not return until a new event is available.
3895 */
3896 int
3897 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3898 int *dropped, unsigned flags, int zevent_fd)
3899 {
3900 zfs_cmd_t zc = {"\0"};
3901 int error = 0;
3902
3903 *nvp = NULL;
3904 *dropped = 0;
3905 zc.zc_cleanup_fd = zevent_fd;
3906
3907 if (flags & ZEVENT_NONBLOCK)
3908 zc.zc_guid = ZEVENT_NONBLOCK;
3909
3910 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3911 return (-1);
3912
3913 retry:
3914 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3915 switch (errno) {
3916 case ESHUTDOWN:
3917 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3918 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3919 goto out;
3920 case ENOENT:
3921 /* Blocking error case should not occur */
3922 if (!(flags & ZEVENT_NONBLOCK))
3923 error = zpool_standard_error_fmt(hdl, errno,
3924 dgettext(TEXT_DOMAIN, "cannot get event"));
3925
3926 goto out;
3927 case ENOMEM:
3928 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3929 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3930 dgettext(TEXT_DOMAIN, "cannot get event"));
3931 goto out;
3932 } else {
3933 goto retry;
3934 }
3935 default:
3936 error = zpool_standard_error_fmt(hdl, errno,
3937 dgettext(TEXT_DOMAIN, "cannot get event"));
3938 goto out;
3939 }
3940 }
3941
3942 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3943 if (error != 0)
3944 goto out;
3945
3946 *dropped = (int)zc.zc_cookie;
3947 out:
3948 zcmd_free_nvlists(&zc);
3949
3950 return (error);
3951 }
3952
3953 /*
3954 * Clear all events.
3955 */
3956 int
3957 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3958 {
3959 zfs_cmd_t zc = {"\0"};
3960 char msg[1024];
3961
3962 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3963 "cannot clear events"));
3964
3965 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3966 return (zpool_standard_error_fmt(hdl, errno, msg));
3967
3968 if (count != NULL)
3969 *count = (int)zc.zc_cookie; /* # of events cleared */
3970
3971 return (0);
3972 }
3973
3974 /*
3975 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3976 * the passed zevent_fd file handle. On success zero is returned,
3977 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3978 */
3979 int
3980 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3981 {
3982 zfs_cmd_t zc = {"\0"};
3983 int error = 0;
3984
3985 zc.zc_guid = eid;
3986 zc.zc_cleanup_fd = zevent_fd;
3987
3988 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3989 switch (errno) {
3990 case ENOENT:
3991 error = zfs_error_fmt(hdl, EZFS_NOENT,
3992 dgettext(TEXT_DOMAIN, "cannot get event"));
3993 break;
3994
3995 case ENOMEM:
3996 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3997 dgettext(TEXT_DOMAIN, "cannot get event"));
3998 break;
3999
4000 default:
4001 error = zpool_standard_error_fmt(hdl, errno,
4002 dgettext(TEXT_DOMAIN, "cannot get event"));
4003 break;
4004 }
4005 }
4006
4007 return (error);
4008 }
4009
4010 void
4011 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4012 char *pathname, size_t len)
4013 {
4014 zfs_cmd_t zc = {"\0"};
4015 boolean_t mounted = B_FALSE;
4016 char *mntpnt = NULL;
4017 char dsname[MAXNAMELEN];
4018
4019 if (dsobj == 0) {
4020 /* special case for the MOS */
4021 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4022 (longlong_t)obj);
4023 return;
4024 }
4025
4026 /* get the dataset's name */
4027 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4028 zc.zc_obj = dsobj;
4029 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4030 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4031 /* just write out a path of two object numbers */
4032 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4033 (longlong_t)dsobj, (longlong_t)obj);
4034 return;
4035 }
4036 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4037
4038 /* find out if the dataset is mounted */
4039 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4040
4041 /* get the corrupted object's path */
4042 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4043 zc.zc_obj = obj;
4044 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4045 &zc) == 0) {
4046 if (mounted) {
4047 (void) snprintf(pathname, len, "%s%s", mntpnt,
4048 zc.zc_value);
4049 } else {
4050 (void) snprintf(pathname, len, "%s:%s",
4051 dsname, zc.zc_value);
4052 }
4053 } else {
4054 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4055 (longlong_t)obj);
4056 }
4057 free(mntpnt);
4058 }
4059
4060 /*
4061 * Read the EFI label from the config, if a label does not exist then
4062 * pass back the error to the caller. If the caller has passed a non-NULL
4063 * diskaddr argument then we set it to the starting address of the EFI
4064 * partition.
4065 */
4066 static int
4067 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4068 {
4069 char *path;
4070 int fd;
4071 char diskname[MAXPATHLEN];
4072 int err = -1;
4073
4074 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4075 return (err);
4076
4077 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4078 strrchr(path, '/'));
4079 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4080 struct dk_gpt *vtoc;
4081
4082 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4083 if (sb != NULL)
4084 *sb = vtoc->efi_parts[0].p_start;
4085 efi_free(vtoc);
4086 }
4087 (void) close(fd);
4088 }
4089 return (err);
4090 }
4091
4092 /*
4093 * determine where a partition starts on a disk in the current
4094 * configuration
4095 */
4096 static diskaddr_t
4097 find_start_block(nvlist_t *config)
4098 {
4099 nvlist_t **child;
4100 uint_t c, children;
4101 diskaddr_t sb = MAXOFFSET_T;
4102 uint64_t wholedisk;
4103
4104 if (nvlist_lookup_nvlist_array(config,
4105 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4106 if (nvlist_lookup_uint64(config,
4107 ZPOOL_CONFIG_WHOLE_DISK,
4108 &wholedisk) != 0 || !wholedisk) {
4109 return (MAXOFFSET_T);
4110 }
4111 if (read_efi_label(config, &sb) < 0)
4112 sb = MAXOFFSET_T;
4113 return (sb);
4114 }
4115
4116 for (c = 0; c < children; c++) {
4117 sb = find_start_block(child[c]);
4118 if (sb != MAXOFFSET_T) {
4119 return (sb);
4120 }
4121 }
4122 return (MAXOFFSET_T);
4123 }
4124
4125 static int
4126 zpool_label_disk_check(char *path)
4127 {
4128 struct dk_gpt *vtoc;
4129 int fd, err;
4130
4131 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4132 return (errno);
4133
4134 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4135 (void) close(fd);
4136 return (err);
4137 }
4138
4139 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4140 efi_free(vtoc);
4141 (void) close(fd);
4142 return (EIDRM);
4143 }
4144
4145 efi_free(vtoc);
4146 (void) close(fd);
4147 return (0);
4148 }
4149
4150 /*
4151 * Generate a unique partition name for the ZFS member. Partitions must
4152 * have unique names to ensure udev will be able to create symlinks under
4153 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4154 * of the form <pool>-<unique-id>.
4155 */
4156 static void
4157 zpool_label_name(char *label_name, int label_size)
4158 {
4159 uint64_t id = 0;
4160 int fd;
4161
4162 fd = open("/dev/urandom", O_RDONLY);
4163 if (fd > 0) {
4164 if (read(fd, &id, sizeof (id)) != sizeof (id))
4165 id = 0;
4166
4167 close(fd);
4168 }
4169
4170 if (id == 0)
4171 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4172
4173 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t) id);
4174 }
4175
4176 /*
4177 * Label an individual disk. The name provided is the short name,
4178 * stripped of any leading /dev path.
4179 */
4180 int
4181 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4182 {
4183 char path[MAXPATHLEN];
4184 struct dk_gpt *vtoc;
4185 int rval, fd;
4186 size_t resv = EFI_MIN_RESV_SIZE;
4187 uint64_t slice_size;
4188 diskaddr_t start_block;
4189 char errbuf[1024];
4190
4191 /* prepare an error message just in case */
4192 (void) snprintf(errbuf, sizeof (errbuf),
4193 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4194
4195 if (zhp) {
4196 nvlist_t *nvroot;
4197
4198 #if defined(__sun__) || defined(__sun)
4199 if (zpool_is_bootable(zhp)) {
4200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4201 "EFI labeled devices are not supported on root "
4202 "pools."));
4203 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4204 }
4205 #endif
4206
4207 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4208 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4209
4210 if (zhp->zpool_start_block == 0)
4211 start_block = find_start_block(nvroot);
4212 else
4213 start_block = zhp->zpool_start_block;
4214 zhp->zpool_start_block = start_block;
4215 } else {
4216 /* new pool */
4217 start_block = NEW_START_BLOCK;
4218 }
4219
4220 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4221
4222 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
4223 /*
4224 * This shouldn't happen. We've long since verified that this
4225 * is a valid device.
4226 */
4227 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4228 "label '%s': unable to open device: %d"), path, errno);
4229 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4230 }
4231
4232 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4233 /*
4234 * The only way this can fail is if we run out of memory, or we
4235 * were unable to read the disk's capacity
4236 */
4237 if (errno == ENOMEM)
4238 (void) no_memory(hdl);
4239
4240 (void) close(fd);
4241 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4242 "label '%s': unable to read disk capacity"), path);
4243
4244 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4245 }
4246
4247 slice_size = vtoc->efi_last_u_lba + 1;
4248 slice_size -= EFI_MIN_RESV_SIZE;
4249 if (start_block == MAXOFFSET_T)
4250 start_block = NEW_START_BLOCK;
4251 slice_size -= start_block;
4252 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4253
4254 vtoc->efi_parts[0].p_start = start_block;
4255 vtoc->efi_parts[0].p_size = slice_size;
4256
4257 /*
4258 * Why we use V_USR: V_BACKUP confuses users, and is considered
4259 * disposable by some EFI utilities (since EFI doesn't have a backup
4260 * slice). V_UNASSIGNED is supposed to be used only for zero size
4261 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4262 * etc. were all pretty specific. V_USR is as close to reality as we
4263 * can get, in the absence of V_OTHER.
4264 */
4265 vtoc->efi_parts[0].p_tag = V_USR;
4266 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4267
4268 vtoc->efi_parts[8].p_start = slice_size + start_block;
4269 vtoc->efi_parts[8].p_size = resv;
4270 vtoc->efi_parts[8].p_tag = V_RESERVED;
4271
4272 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4273 /*
4274 * Some block drivers (like pcata) may not support EFI
4275 * GPT labels. Print out a helpful error message dir-
4276 * ecting the user to manually label the disk and give
4277 * a specific slice.
4278 */
4279 (void) close(fd);
4280 efi_free(vtoc);
4281
4282 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4283 "parted(8) and then provide a specific slice: %d"), rval);
4284 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4285 }
4286
4287 (void) close(fd);
4288 efi_free(vtoc);
4289
4290 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4291 (void) zfs_append_partition(path, MAXPATHLEN);
4292
4293 /* Wait to udev to signal use the device has settled. */
4294 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4295 if (rval) {
4296 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4297 "detect device partitions on '%s': %d"), path, rval);
4298 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4299 }
4300
4301 /* We can't be to paranoid. Read the label back and verify it. */
4302 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4303 rval = zpool_label_disk_check(path);
4304 if (rval) {
4305 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4306 "EFI label on '%s' is damaged. Ensure\nthis device "
4307 "is not in in use, and is functioning properly: %d"),
4308 path, rval);
4309 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4310 }
4311
4312 return (0);
4313 }