]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Illumos 5147 - zpool list -v should show individual disk capacity
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * API compatibility wrapper around zpool_get_prop_literal
240 */
241 int
242 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243 zprop_source_t *srctype)
244 {
245 return (zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE));
246 }
247
248 /*
249 * Get a zpool property value for 'prop' and return the value in
250 * a pre-allocated buffer.
251 */
252 int
253 zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
254 size_t len, zprop_source_t *srctype, boolean_t literal)
255 {
256 uint64_t intval;
257 const char *strval;
258 zprop_source_t src = ZPROP_SRC_NONE;
259 nvlist_t *nvroot;
260 vdev_stat_t *vs;
261 uint_t vsc;
262
263 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
264 switch (prop) {
265 case ZPOOL_PROP_NAME:
266 (void) strlcpy(buf, zpool_get_name(zhp), len);
267 break;
268
269 case ZPOOL_PROP_HEALTH:
270 (void) strlcpy(buf, "FAULTED", len);
271 break;
272
273 case ZPOOL_PROP_GUID:
274 intval = zpool_get_prop_int(zhp, prop, &src);
275 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
276 break;
277
278 case ZPOOL_PROP_ALTROOT:
279 case ZPOOL_PROP_CACHEFILE:
280 case ZPOOL_PROP_COMMENT:
281 if (zhp->zpool_props != NULL ||
282 zpool_get_all_props(zhp) == 0) {
283 (void) strlcpy(buf,
284 zpool_get_prop_string(zhp, prop, &src),
285 len);
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290 /* FALLTHROUGH */
291 default:
292 (void) strlcpy(buf, "-", len);
293 break;
294 }
295
296 if (srctype != NULL)
297 *srctype = src;
298 return (0);
299 }
300
301 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
302 prop != ZPOOL_PROP_NAME)
303 return (-1);
304
305 switch (zpool_prop_get_type(prop)) {
306 case PROP_TYPE_STRING:
307 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
308 len);
309 break;
310
311 case PROP_TYPE_NUMBER:
312 intval = zpool_get_prop_int(zhp, prop, &src);
313
314 switch (prop) {
315 case ZPOOL_PROP_SIZE:
316 case ZPOOL_PROP_ALLOCATED:
317 case ZPOOL_PROP_FREE:
318 case ZPOOL_PROP_FREEING:
319 case ZPOOL_PROP_LEAKED:
320 case ZPOOL_PROP_ASHIFT:
321 if (literal)
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 else
325 (void) zfs_nicenum(intval, buf, len);
326 break;
327
328 case ZPOOL_PROP_EXPANDSZ:
329 if (intval == 0) {
330 (void) strlcpy(buf, "-", len);
331 } else if (literal) {
332 (void) snprintf(buf, len, "%llu",
333 (u_longlong_t)intval);
334 } else {
335 (void) zfs_nicenum(intval, buf, len);
336 }
337 break;
338
339 case ZPOOL_PROP_CAPACITY:
340 (void) snprintf(buf, len, "%llu%%",
341 (u_longlong_t)intval);
342 break;
343
344 case ZPOOL_PROP_FRAGMENTATION:
345 if (intval == UINT64_MAX) {
346 (void) strlcpy(buf, "-", len);
347 } else {
348 (void) snprintf(buf, len, "%llu%%",
349 (u_longlong_t)intval);
350 }
351 break;
352
353 case ZPOOL_PROP_DEDUPRATIO:
354 (void) snprintf(buf, len, "%llu.%02llux",
355 (u_longlong_t)(intval / 100),
356 (u_longlong_t)(intval % 100));
357 break;
358
359 case ZPOOL_PROP_HEALTH:
360 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
361 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
362 verify(nvlist_lookup_uint64_array(nvroot,
363 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
364 == 0);
365
366 (void) strlcpy(buf, zpool_state_to_name(intval,
367 vs->vs_aux), len);
368 break;
369 case ZPOOL_PROP_VERSION:
370 if (intval >= SPA_VERSION_FEATURES) {
371 (void) snprintf(buf, len, "-");
372 break;
373 }
374 /* FALLTHROUGH */
375 default:
376 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
377 }
378 break;
379
380 case PROP_TYPE_INDEX:
381 intval = zpool_get_prop_int(zhp, prop, &src);
382 if (zpool_prop_index_to_string(prop, intval, &strval)
383 != 0)
384 return (-1);
385 (void) strlcpy(buf, strval, len);
386 break;
387
388 default:
389 abort();
390 }
391
392 if (srctype)
393 *srctype = src;
394
395 return (0);
396 }
397
398 /*
399 * Check if the bootfs name has the same pool name as it is set to.
400 * Assuming bootfs is a valid dataset name.
401 */
402 static boolean_t
403 bootfs_name_valid(const char *pool, char *bootfs)
404 {
405 int len = strlen(pool);
406
407 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
408 return (B_FALSE);
409
410 if (strncmp(pool, bootfs, len) == 0 &&
411 (bootfs[len] == '/' || bootfs[len] == '\0'))
412 return (B_TRUE);
413
414 return (B_FALSE);
415 }
416
417 #if defined(__sun__) || defined(__sun)
418 /*
419 * Inspect the configuration to determine if any of the devices contain
420 * an EFI label.
421 */
422 static boolean_t
423 pool_uses_efi(nvlist_t *config)
424 {
425 nvlist_t **child;
426 uint_t c, children;
427
428 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
429 &child, &children) != 0)
430 return (read_efi_label(config, NULL) >= 0);
431
432 for (c = 0; c < children; c++) {
433 if (pool_uses_efi(child[c]))
434 return (B_TRUE);
435 }
436 return (B_FALSE);
437 }
438 #endif
439
440 boolean_t
441 zpool_is_bootable(zpool_handle_t *zhp)
442 {
443 char bootfs[ZPOOL_MAXNAMELEN];
444
445 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
446 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
447 sizeof (bootfs)) != 0);
448 }
449
450
451 /*
452 * Given an nvlist of zpool properties to be set, validate that they are
453 * correct, and parse any numeric properties (index, boolean, etc) if they are
454 * specified as strings.
455 */
456 static nvlist_t *
457 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
458 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
459 {
460 nvpair_t *elem;
461 nvlist_t *retprops;
462 zpool_prop_t prop;
463 char *strval;
464 uint64_t intval;
465 char *slash, *check;
466 struct stat64 statbuf;
467 zpool_handle_t *zhp;
468 nvlist_t *nvroot;
469
470 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
471 (void) no_memory(hdl);
472 return (NULL);
473 }
474
475 elem = NULL;
476 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
477 const char *propname = nvpair_name(elem);
478
479 prop = zpool_name_to_prop(propname);
480 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
481 int err;
482 char *fname = strchr(propname, '@') + 1;
483
484 err = zfeature_lookup_name(fname, NULL);
485 if (err != 0) {
486 ASSERT3U(err, ==, ENOENT);
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "invalid feature '%s'"), fname);
489 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
490 goto error;
491 }
492
493 if (nvpair_type(elem) != DATA_TYPE_STRING) {
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
495 "'%s' must be a string"), propname);
496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
497 goto error;
498 }
499
500 (void) nvpair_value_string(elem, &strval);
501 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
502 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
503 "property '%s' can only be set to "
504 "'enabled'"), propname);
505 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
506 goto error;
507 }
508
509 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
510 (void) no_memory(hdl);
511 goto error;
512 }
513 continue;
514 }
515
516 /*
517 * Make sure this property is valid and applies to this type.
518 */
519 if (prop == ZPROP_INVAL) {
520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
521 "invalid property '%s'"), propname);
522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
523 goto error;
524 }
525
526 if (zpool_prop_readonly(prop)) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
528 "is readonly"), propname);
529 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
530 goto error;
531 }
532
533 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
534 &strval, &intval, errbuf) != 0)
535 goto error;
536
537 /*
538 * Perform additional checking for specific properties.
539 */
540 switch (prop) {
541 default:
542 break;
543 case ZPOOL_PROP_VERSION:
544 if (intval < version ||
545 !SPA_VERSION_IS_SUPPORTED(intval)) {
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
547 "property '%s' number %d is invalid."),
548 propname, intval);
549 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
550 goto error;
551 }
552 break;
553
554 case ZPOOL_PROP_ASHIFT:
555 if (!flags.create) {
556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
557 "property '%s' can only be set at "
558 "creation time"), propname);
559 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
560 goto error;
561 }
562
563 if (intval != 0 && (intval < 9 || intval > 13)) {
564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
565 "property '%s' number %d is invalid."),
566 propname, intval);
567 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
568 goto error;
569 }
570 break;
571
572 case ZPOOL_PROP_BOOTFS:
573 if (flags.create || flags.import) {
574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 "property '%s' cannot be set at creation "
576 "or import time"), propname);
577 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
578 goto error;
579 }
580
581 if (version < SPA_VERSION_BOOTFS) {
582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
583 "pool must be upgraded to support "
584 "'%s' property"), propname);
585 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
586 goto error;
587 }
588
589 /*
590 * bootfs property value has to be a dataset name and
591 * the dataset has to be in the same pool as it sets to.
592 */
593 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
594 strval)) {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
596 "is an invalid name"), strval);
597 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
598 goto error;
599 }
600
601 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
603 "could not open pool '%s'"), poolname);
604 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
605 goto error;
606 }
607 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
608 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
609
610 #if defined(__sun__) || defined(__sun)
611 /*
612 * bootfs property cannot be set on a disk which has
613 * been EFI labeled.
614 */
615 if (pool_uses_efi(nvroot)) {
616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
617 "property '%s' not supported on "
618 "EFI labeled devices"), propname);
619 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
620 zpool_close(zhp);
621 goto error;
622 }
623 #endif
624 zpool_close(zhp);
625 break;
626
627 case ZPOOL_PROP_ALTROOT:
628 if (!flags.create && !flags.import) {
629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
630 "property '%s' can only be set during pool "
631 "creation or import"), propname);
632 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
633 goto error;
634 }
635
636 if (strval[0] != '/') {
637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
638 "bad alternate root '%s'"), strval);
639 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
640 goto error;
641 }
642 break;
643
644 case ZPOOL_PROP_CACHEFILE:
645 if (strval[0] == '\0')
646 break;
647
648 if (strcmp(strval, "none") == 0)
649 break;
650
651 if (strval[0] != '/') {
652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 "property '%s' must be empty, an "
654 "absolute path, or 'none'"), propname);
655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
656 goto error;
657 }
658
659 slash = strrchr(strval, '/');
660
661 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
662 strcmp(slash, "/..") == 0) {
663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664 "'%s' is not a valid file"), strval);
665 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
666 goto error;
667 }
668
669 *slash = '\0';
670
671 if (strval[0] != '\0' &&
672 (stat64(strval, &statbuf) != 0 ||
673 !S_ISDIR(statbuf.st_mode))) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "'%s' is not a valid directory"),
676 strval);
677 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
678 goto error;
679 }
680
681 *slash = '/';
682 break;
683
684 case ZPOOL_PROP_COMMENT:
685 for (check = strval; *check != '\0'; check++) {
686 if (!isprint(*check)) {
687 zfs_error_aux(hdl,
688 dgettext(TEXT_DOMAIN,
689 "comment may only have printable "
690 "characters"));
691 (void) zfs_error(hdl, EZFS_BADPROP,
692 errbuf);
693 goto error;
694 }
695 }
696 if (strlen(strval) > ZPROP_MAX_COMMENT) {
697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
698 "comment must not exceed %d characters"),
699 ZPROP_MAX_COMMENT);
700 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
701 goto error;
702 }
703 break;
704 case ZPOOL_PROP_READONLY:
705 if (!flags.import) {
706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707 "property '%s' can only be set at "
708 "import time"), propname);
709 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
710 goto error;
711 }
712 break;
713 }
714 }
715
716 return (retprops);
717 error:
718 nvlist_free(retprops);
719 return (NULL);
720 }
721
722 /*
723 * Set zpool property : propname=propval.
724 */
725 int
726 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
727 {
728 zfs_cmd_t zc = {"\0"};
729 int ret = -1;
730 char errbuf[1024];
731 nvlist_t *nvl = NULL;
732 nvlist_t *realprops;
733 uint64_t version;
734 prop_flags_t flags = { 0 };
735
736 (void) snprintf(errbuf, sizeof (errbuf),
737 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
738 zhp->zpool_name);
739
740 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
741 return (no_memory(zhp->zpool_hdl));
742
743 if (nvlist_add_string(nvl, propname, propval) != 0) {
744 nvlist_free(nvl);
745 return (no_memory(zhp->zpool_hdl));
746 }
747
748 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
749 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
750 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
751 nvlist_free(nvl);
752 return (-1);
753 }
754
755 nvlist_free(nvl);
756 nvl = realprops;
757
758 /*
759 * Execute the corresponding ioctl() to set this property.
760 */
761 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
762
763 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
764 nvlist_free(nvl);
765 return (-1);
766 }
767
768 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
769
770 zcmd_free_nvlists(&zc);
771 nvlist_free(nvl);
772
773 if (ret)
774 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
775 else
776 (void) zpool_props_refresh(zhp);
777
778 return (ret);
779 }
780
781 int
782 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
783 {
784 libzfs_handle_t *hdl = zhp->zpool_hdl;
785 zprop_list_t *entry;
786 char buf[ZFS_MAXPROPLEN];
787 nvlist_t *features = NULL;
788 nvpair_t *nvp;
789 zprop_list_t **last;
790 boolean_t firstexpand = (NULL == *plp);
791 int i;
792
793 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
794 return (-1);
795
796 last = plp;
797 while (*last != NULL)
798 last = &(*last)->pl_next;
799
800 if ((*plp)->pl_all)
801 features = zpool_get_features(zhp);
802
803 if ((*plp)->pl_all && firstexpand) {
804 for (i = 0; i < SPA_FEATURES; i++) {
805 zprop_list_t *entry = zfs_alloc(hdl,
806 sizeof (zprop_list_t));
807 entry->pl_prop = ZPROP_INVAL;
808 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
809 spa_feature_table[i].fi_uname);
810 entry->pl_width = strlen(entry->pl_user_prop);
811 entry->pl_all = B_TRUE;
812
813 *last = entry;
814 last = &entry->pl_next;
815 }
816 }
817
818 /* add any unsupported features */
819 for (nvp = nvlist_next_nvpair(features, NULL);
820 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
821 char *propname;
822 boolean_t found;
823 zprop_list_t *entry;
824
825 if (zfeature_is_supported(nvpair_name(nvp)))
826 continue;
827
828 propname = zfs_asprintf(hdl, "unsupported@%s",
829 nvpair_name(nvp));
830
831 /*
832 * Before adding the property to the list make sure that no
833 * other pool already added the same property.
834 */
835 found = B_FALSE;
836 entry = *plp;
837 while (entry != NULL) {
838 if (entry->pl_user_prop != NULL &&
839 strcmp(propname, entry->pl_user_prop) == 0) {
840 found = B_TRUE;
841 break;
842 }
843 entry = entry->pl_next;
844 }
845 if (found) {
846 free(propname);
847 continue;
848 }
849
850 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
851 entry->pl_prop = ZPROP_INVAL;
852 entry->pl_user_prop = propname;
853 entry->pl_width = strlen(entry->pl_user_prop);
854 entry->pl_all = B_TRUE;
855
856 *last = entry;
857 last = &entry->pl_next;
858 }
859
860 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
861
862 if (entry->pl_fixed)
863 continue;
864
865 if (entry->pl_prop != ZPROP_INVAL &&
866 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
867 NULL) == 0) {
868 if (strlen(buf) > entry->pl_width)
869 entry->pl_width = strlen(buf);
870 }
871 }
872
873 return (0);
874 }
875
876 /*
877 * Get the state for the given feature on the given ZFS pool.
878 */
879 int
880 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
881 size_t len)
882 {
883 uint64_t refcount;
884 boolean_t found = B_FALSE;
885 nvlist_t *features = zpool_get_features(zhp);
886 boolean_t supported;
887 const char *feature = strchr(propname, '@') + 1;
888
889 supported = zpool_prop_feature(propname);
890 ASSERT(supported || zpool_prop_unsupported(propname));
891
892 /*
893 * Convert from feature name to feature guid. This conversion is
894 * unecessary for unsupported@... properties because they already
895 * use guids.
896 */
897 if (supported) {
898 int ret;
899 spa_feature_t fid;
900
901 ret = zfeature_lookup_name(feature, &fid);
902 if (ret != 0) {
903 (void) strlcpy(buf, "-", len);
904 return (ENOTSUP);
905 }
906 feature = spa_feature_table[fid].fi_guid;
907 }
908
909 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
910 found = B_TRUE;
911
912 if (supported) {
913 if (!found) {
914 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
915 } else {
916 if (refcount == 0)
917 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
918 else
919 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
920 }
921 } else {
922 if (found) {
923 if (refcount == 0) {
924 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
925 } else {
926 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
927 }
928 } else {
929 (void) strlcpy(buf, "-", len);
930 return (ENOTSUP);
931 }
932 }
933
934 return (0);
935 }
936
937 /*
938 * Don't start the slice at the default block of 34; many storage
939 * devices will use a stripe width of 128k, other vendors prefer a 1m
940 * alignment. It is best to play it safe and ensure a 1m alignment
941 * given 512B blocks. When the block size is larger by a power of 2
942 * we will still be 1m aligned. Some devices are sensitive to the
943 * partition ending alignment as well.
944 */
945 #define NEW_START_BLOCK 2048
946 #define PARTITION_END_ALIGNMENT 2048
947
948 /*
949 * Validate the given pool name, optionally putting an extended error message in
950 * 'buf'.
951 */
952 boolean_t
953 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
954 {
955 namecheck_err_t why;
956 char what;
957 int ret;
958
959 ret = pool_namecheck(pool, &why, &what);
960
961 /*
962 * The rules for reserved pool names were extended at a later point.
963 * But we need to support users with existing pools that may now be
964 * invalid. So we only check for this expanded set of names during a
965 * create (or import), and only in userland.
966 */
967 if (ret == 0 && !isopen &&
968 (strncmp(pool, "mirror", 6) == 0 ||
969 strncmp(pool, "raidz", 5) == 0 ||
970 strncmp(pool, "spare", 5) == 0 ||
971 strcmp(pool, "log") == 0)) {
972 if (hdl != NULL)
973 zfs_error_aux(hdl,
974 dgettext(TEXT_DOMAIN, "name is reserved"));
975 return (B_FALSE);
976 }
977
978
979 if (ret != 0) {
980 if (hdl != NULL) {
981 switch (why) {
982 case NAME_ERR_TOOLONG:
983 zfs_error_aux(hdl,
984 dgettext(TEXT_DOMAIN, "name is too long"));
985 break;
986
987 case NAME_ERR_INVALCHAR:
988 zfs_error_aux(hdl,
989 dgettext(TEXT_DOMAIN, "invalid character "
990 "'%c' in pool name"), what);
991 break;
992
993 case NAME_ERR_NOLETTER:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "name must begin with a letter"));
996 break;
997
998 case NAME_ERR_RESERVED:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "name is reserved"));
1001 break;
1002
1003 case NAME_ERR_DISKLIKE:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "pool name is reserved"));
1006 break;
1007
1008 case NAME_ERR_LEADING_SLASH:
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "leading slash in name"));
1011 break;
1012
1013 case NAME_ERR_EMPTY_COMPONENT:
1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1015 "empty component in name"));
1016 break;
1017
1018 case NAME_ERR_TRAILING_SLASH:
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1020 "trailing slash in name"));
1021 break;
1022
1023 case NAME_ERR_MULTIPLE_AT:
1024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1025 "multiple '@' delimiters in name"));
1026 break;
1027 case NAME_ERR_NO_AT:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "permission set is missing '@'"));
1030 break;
1031 }
1032 }
1033 return (B_FALSE);
1034 }
1035
1036 return (B_TRUE);
1037 }
1038
1039 /*
1040 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1041 * state.
1042 */
1043 zpool_handle_t *
1044 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1045 {
1046 zpool_handle_t *zhp;
1047 boolean_t missing;
1048
1049 /*
1050 * Make sure the pool name is valid.
1051 */
1052 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1053 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1054 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1055 pool);
1056 return (NULL);
1057 }
1058
1059 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1060 return (NULL);
1061
1062 zhp->zpool_hdl = hdl;
1063 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1064
1065 if (zpool_refresh_stats(zhp, &missing) != 0) {
1066 zpool_close(zhp);
1067 return (NULL);
1068 }
1069
1070 if (missing) {
1071 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1072 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1073 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1074 zpool_close(zhp);
1075 return (NULL);
1076 }
1077
1078 return (zhp);
1079 }
1080
1081 /*
1082 * Like the above, but silent on error. Used when iterating over pools (because
1083 * the configuration cache may be out of date).
1084 */
1085 int
1086 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1087 {
1088 zpool_handle_t *zhp;
1089 boolean_t missing;
1090
1091 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1092 return (-1);
1093
1094 zhp->zpool_hdl = hdl;
1095 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1096
1097 if (zpool_refresh_stats(zhp, &missing) != 0) {
1098 zpool_close(zhp);
1099 return (-1);
1100 }
1101
1102 if (missing) {
1103 zpool_close(zhp);
1104 *ret = NULL;
1105 return (0);
1106 }
1107
1108 *ret = zhp;
1109 return (0);
1110 }
1111
1112 /*
1113 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1114 * state.
1115 */
1116 zpool_handle_t *
1117 zpool_open(libzfs_handle_t *hdl, const char *pool)
1118 {
1119 zpool_handle_t *zhp;
1120
1121 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1122 return (NULL);
1123
1124 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1125 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1126 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1127 zpool_close(zhp);
1128 return (NULL);
1129 }
1130
1131 return (zhp);
1132 }
1133
1134 /*
1135 * Close the handle. Simply frees the memory associated with the handle.
1136 */
1137 void
1138 zpool_close(zpool_handle_t *zhp)
1139 {
1140 if (zhp->zpool_config)
1141 nvlist_free(zhp->zpool_config);
1142 if (zhp->zpool_old_config)
1143 nvlist_free(zhp->zpool_old_config);
1144 if (zhp->zpool_props)
1145 nvlist_free(zhp->zpool_props);
1146 free(zhp);
1147 }
1148
1149 /*
1150 * Return the name of the pool.
1151 */
1152 const char *
1153 zpool_get_name(zpool_handle_t *zhp)
1154 {
1155 return (zhp->zpool_name);
1156 }
1157
1158
1159 /*
1160 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1161 */
1162 int
1163 zpool_get_state(zpool_handle_t *zhp)
1164 {
1165 return (zhp->zpool_state);
1166 }
1167
1168 /*
1169 * Create the named pool, using the provided vdev list. It is assumed
1170 * that the consumer has already validated the contents of the nvlist, so we
1171 * don't have to worry about error semantics.
1172 */
1173 int
1174 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1175 nvlist_t *props, nvlist_t *fsprops)
1176 {
1177 zfs_cmd_t zc = {"\0"};
1178 nvlist_t *zc_fsprops = NULL;
1179 nvlist_t *zc_props = NULL;
1180 char msg[1024];
1181 int ret = -1;
1182
1183 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1184 "cannot create '%s'"), pool);
1185
1186 if (!zpool_name_valid(hdl, B_FALSE, pool))
1187 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1188
1189 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1190 return (-1);
1191
1192 if (props) {
1193 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1194
1195 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1196 SPA_VERSION_1, flags, msg)) == NULL) {
1197 goto create_failed;
1198 }
1199 }
1200
1201 if (fsprops) {
1202 uint64_t zoned;
1203 char *zonestr;
1204
1205 zoned = ((nvlist_lookup_string(fsprops,
1206 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1207 strcmp(zonestr, "on") == 0);
1208
1209 if ((zc_fsprops = zfs_valid_proplist(hdl,
1210 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1211 goto create_failed;
1212 }
1213 if (!zc_props &&
1214 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1215 goto create_failed;
1216 }
1217 if (nvlist_add_nvlist(zc_props,
1218 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1219 goto create_failed;
1220 }
1221 }
1222
1223 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1224 goto create_failed;
1225
1226 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1227
1228 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1229
1230 zcmd_free_nvlists(&zc);
1231 nvlist_free(zc_props);
1232 nvlist_free(zc_fsprops);
1233
1234 switch (errno) {
1235 case EBUSY:
1236 /*
1237 * This can happen if the user has specified the same
1238 * device multiple times. We can't reliably detect this
1239 * until we try to add it and see we already have a
1240 * label. This can also happen under if the device is
1241 * part of an active md or lvm device.
1242 */
1243 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1244 "one or more vdevs refer to the same device, or "
1245 "one of\nthe devices is part of an active md or "
1246 "lvm device"));
1247 return (zfs_error(hdl, EZFS_BADDEV, msg));
1248
1249 case EOVERFLOW:
1250 /*
1251 * This occurs when one of the devices is below
1252 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1253 * device was the problem device since there's no
1254 * reliable way to determine device size from userland.
1255 */
1256 {
1257 char buf[64];
1258
1259 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1260
1261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1262 "one or more devices is less than the "
1263 "minimum size (%s)"), buf);
1264 }
1265 return (zfs_error(hdl, EZFS_BADDEV, msg));
1266
1267 case ENOSPC:
1268 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1269 "one or more devices is out of space"));
1270 return (zfs_error(hdl, EZFS_BADDEV, msg));
1271
1272 case ENOTBLK:
1273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1274 "cache device must be a disk or disk slice"));
1275 return (zfs_error(hdl, EZFS_BADDEV, msg));
1276
1277 default:
1278 return (zpool_standard_error(hdl, errno, msg));
1279 }
1280 }
1281
1282 create_failed:
1283 zcmd_free_nvlists(&zc);
1284 nvlist_free(zc_props);
1285 nvlist_free(zc_fsprops);
1286 return (ret);
1287 }
1288
1289 /*
1290 * Destroy the given pool. It is up to the caller to ensure that there are no
1291 * datasets left in the pool.
1292 */
1293 int
1294 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1295 {
1296 zfs_cmd_t zc = {"\0"};
1297 zfs_handle_t *zfp = NULL;
1298 libzfs_handle_t *hdl = zhp->zpool_hdl;
1299 char msg[1024];
1300
1301 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1302 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1303 return (-1);
1304
1305 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1306 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1307
1308 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1309 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1310 "cannot destroy '%s'"), zhp->zpool_name);
1311
1312 if (errno == EROFS) {
1313 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1314 "one or more devices is read only"));
1315 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1316 } else {
1317 (void) zpool_standard_error(hdl, errno, msg);
1318 }
1319
1320 if (zfp)
1321 zfs_close(zfp);
1322 return (-1);
1323 }
1324
1325 if (zfp) {
1326 remove_mountpoint(zfp);
1327 zfs_close(zfp);
1328 }
1329
1330 return (0);
1331 }
1332
1333 /*
1334 * Add the given vdevs to the pool. The caller must have already performed the
1335 * necessary verification to ensure that the vdev specification is well-formed.
1336 */
1337 int
1338 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1339 {
1340 zfs_cmd_t zc = {"\0"};
1341 int ret;
1342 libzfs_handle_t *hdl = zhp->zpool_hdl;
1343 char msg[1024];
1344 nvlist_t **spares, **l2cache;
1345 uint_t nspares, nl2cache;
1346
1347 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1348 "cannot add to '%s'"), zhp->zpool_name);
1349
1350 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1351 SPA_VERSION_SPARES &&
1352 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1353 &spares, &nspares) == 0) {
1354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1355 "upgraded to add hot spares"));
1356 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1357 }
1358
1359 #if defined(__sun__) || defined(__sun)
1360 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1361 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1362 uint64_t s;
1363
1364 for (s = 0; s < nspares; s++) {
1365 char *path;
1366
1367 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1368 &path) == 0 && pool_uses_efi(spares[s])) {
1369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1370 "device '%s' contains an EFI label and "
1371 "cannot be used on root pools."),
1372 zpool_vdev_name(hdl, NULL, spares[s],
1373 B_FALSE));
1374 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1375 }
1376 }
1377 }
1378 #endif
1379
1380 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1381 SPA_VERSION_L2CACHE &&
1382 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1383 &l2cache, &nl2cache) == 0) {
1384 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1385 "upgraded to add cache devices"));
1386 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1387 }
1388
1389 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1390 return (-1);
1391 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1392
1393 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1394 switch (errno) {
1395 case EBUSY:
1396 /*
1397 * This can happen if the user has specified the same
1398 * device multiple times. We can't reliably detect this
1399 * until we try to add it and see we already have a
1400 * label.
1401 */
1402 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1403 "one or more vdevs refer to the same device"));
1404 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1405 break;
1406
1407 case EOVERFLOW:
1408 /*
1409 * This occurrs when one of the devices is below
1410 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1411 * device was the problem device since there's no
1412 * reliable way to determine device size from userland.
1413 */
1414 {
1415 char buf[64];
1416
1417 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1418
1419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1420 "device is less than the minimum "
1421 "size (%s)"), buf);
1422 }
1423 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1424 break;
1425
1426 case ENOTSUP:
1427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1428 "pool must be upgraded to add these vdevs"));
1429 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1430 break;
1431
1432 case ENOTBLK:
1433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1434 "cache device must be a disk or disk slice"));
1435 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1436 break;
1437
1438 default:
1439 (void) zpool_standard_error(hdl, errno, msg);
1440 }
1441
1442 ret = -1;
1443 } else {
1444 ret = 0;
1445 }
1446
1447 zcmd_free_nvlists(&zc);
1448
1449 return (ret);
1450 }
1451
1452 /*
1453 * Exports the pool from the system. The caller must ensure that there are no
1454 * mounted datasets in the pool.
1455 */
1456 static int
1457 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1458 const char *log_str)
1459 {
1460 zfs_cmd_t zc = {"\0"};
1461 char msg[1024];
1462
1463 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1464 "cannot export '%s'"), zhp->zpool_name);
1465
1466 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1467 zc.zc_cookie = force;
1468 zc.zc_guid = hardforce;
1469 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1470
1471 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1472 switch (errno) {
1473 case EXDEV:
1474 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1475 "use '-f' to override the following errors:\n"
1476 "'%s' has an active shared spare which could be"
1477 " used by other pools once '%s' is exported."),
1478 zhp->zpool_name, zhp->zpool_name);
1479 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1480 msg));
1481 default:
1482 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1483 msg));
1484 }
1485 }
1486
1487 return (0);
1488 }
1489
1490 int
1491 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1492 {
1493 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1494 }
1495
1496 int
1497 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1498 {
1499 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1500 }
1501
1502 static void
1503 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1504 nvlist_t *config)
1505 {
1506 nvlist_t *nv = NULL;
1507 uint64_t rewindto;
1508 int64_t loss = -1;
1509 struct tm t;
1510 char timestr[128];
1511
1512 if (!hdl->libzfs_printerr || config == NULL)
1513 return;
1514
1515 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1516 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1517 return;
1518 }
1519
1520 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1521 return;
1522 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1523
1524 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1525 strftime(timestr, 128, "%c", &t) != 0) {
1526 if (dryrun) {
1527 (void) printf(dgettext(TEXT_DOMAIN,
1528 "Would be able to return %s "
1529 "to its state as of %s.\n"),
1530 name, timestr);
1531 } else {
1532 (void) printf(dgettext(TEXT_DOMAIN,
1533 "Pool %s returned to its state as of %s.\n"),
1534 name, timestr);
1535 }
1536 if (loss > 120) {
1537 (void) printf(dgettext(TEXT_DOMAIN,
1538 "%s approximately %lld "),
1539 dryrun ? "Would discard" : "Discarded",
1540 ((longlong_t)loss + 30) / 60);
1541 (void) printf(dgettext(TEXT_DOMAIN,
1542 "minutes of transactions.\n"));
1543 } else if (loss > 0) {
1544 (void) printf(dgettext(TEXT_DOMAIN,
1545 "%s approximately %lld "),
1546 dryrun ? "Would discard" : "Discarded",
1547 (longlong_t)loss);
1548 (void) printf(dgettext(TEXT_DOMAIN,
1549 "seconds of transactions.\n"));
1550 }
1551 }
1552 }
1553
1554 void
1555 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1556 nvlist_t *config)
1557 {
1558 nvlist_t *nv = NULL;
1559 int64_t loss = -1;
1560 uint64_t edata = UINT64_MAX;
1561 uint64_t rewindto;
1562 struct tm t;
1563 char timestr[128];
1564
1565 if (!hdl->libzfs_printerr)
1566 return;
1567
1568 if (reason >= 0)
1569 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1570 else
1571 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1572
1573 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1574 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1575 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1576 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1577 goto no_info;
1578
1579 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1580 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1581 &edata);
1582
1583 (void) printf(dgettext(TEXT_DOMAIN,
1584 "Recovery is possible, but will result in some data loss.\n"));
1585
1586 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1587 strftime(timestr, 128, "%c", &t) != 0) {
1588 (void) printf(dgettext(TEXT_DOMAIN,
1589 "\tReturning the pool to its state as of %s\n"
1590 "\tshould correct the problem. "),
1591 timestr);
1592 } else {
1593 (void) printf(dgettext(TEXT_DOMAIN,
1594 "\tReverting the pool to an earlier state "
1595 "should correct the problem.\n\t"));
1596 }
1597
1598 if (loss > 120) {
1599 (void) printf(dgettext(TEXT_DOMAIN,
1600 "Approximately %lld minutes of data\n"
1601 "\tmust be discarded, irreversibly. "),
1602 ((longlong_t)loss + 30) / 60);
1603 } else if (loss > 0) {
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "Approximately %lld seconds of data\n"
1606 "\tmust be discarded, irreversibly. "),
1607 (longlong_t)loss);
1608 }
1609 if (edata != 0 && edata != UINT64_MAX) {
1610 if (edata == 1) {
1611 (void) printf(dgettext(TEXT_DOMAIN,
1612 "After rewind, at least\n"
1613 "\tone persistent user-data error will remain. "));
1614 } else {
1615 (void) printf(dgettext(TEXT_DOMAIN,
1616 "After rewind, several\n"
1617 "\tpersistent user-data errors will remain. "));
1618 }
1619 }
1620 (void) printf(dgettext(TEXT_DOMAIN,
1621 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1622 reason >= 0 ? "clear" : "import", name);
1623
1624 (void) printf(dgettext(TEXT_DOMAIN,
1625 "A scrub of the pool\n"
1626 "\tis strongly recommended after recovery.\n"));
1627 return;
1628
1629 no_info:
1630 (void) printf(dgettext(TEXT_DOMAIN,
1631 "Destroy and re-create the pool from\n\ta backup source.\n"));
1632 }
1633
1634 /*
1635 * zpool_import() is a contracted interface. Should be kept the same
1636 * if possible.
1637 *
1638 * Applications should use zpool_import_props() to import a pool with
1639 * new properties value to be set.
1640 */
1641 int
1642 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1643 char *altroot)
1644 {
1645 nvlist_t *props = NULL;
1646 int ret;
1647
1648 if (altroot != NULL) {
1649 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1650 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1651 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1652 newname));
1653 }
1654
1655 if (nvlist_add_string(props,
1656 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1657 nvlist_add_string(props,
1658 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1659 nvlist_free(props);
1660 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1661 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1662 newname));
1663 }
1664 }
1665
1666 ret = zpool_import_props(hdl, config, newname, props,
1667 ZFS_IMPORT_NORMAL);
1668 if (props)
1669 nvlist_free(props);
1670 return (ret);
1671 }
1672
1673 static void
1674 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1675 int indent)
1676 {
1677 nvlist_t **child;
1678 uint_t c, children;
1679 char *vname;
1680 uint64_t is_log = 0;
1681
1682 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1683 &is_log);
1684
1685 if (name != NULL)
1686 (void) printf("\t%*s%s%s\n", indent, "", name,
1687 is_log ? " [log]" : "");
1688
1689 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1690 &child, &children) != 0)
1691 return;
1692
1693 for (c = 0; c < children; c++) {
1694 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1695 print_vdev_tree(hdl, vname, child[c], indent + 2);
1696 free(vname);
1697 }
1698 }
1699
1700 void
1701 zpool_print_unsup_feat(nvlist_t *config)
1702 {
1703 nvlist_t *nvinfo, *unsup_feat;
1704 nvpair_t *nvp;
1705
1706 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1707 0);
1708 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1709 &unsup_feat) == 0);
1710
1711 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1712 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1713 char *desc;
1714
1715 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1716 verify(nvpair_value_string(nvp, &desc) == 0);
1717
1718 if (strlen(desc) > 0)
1719 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1720 else
1721 (void) printf("\t%s\n", nvpair_name(nvp));
1722 }
1723 }
1724
1725 /*
1726 * Import the given pool using the known configuration and a list of
1727 * properties to be set. The configuration should have come from
1728 * zpool_find_import(). The 'newname' parameters control whether the pool
1729 * is imported with a different name.
1730 */
1731 int
1732 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1733 nvlist_t *props, int flags)
1734 {
1735 zfs_cmd_t zc = {"\0"};
1736 zpool_rewind_policy_t policy;
1737 nvlist_t *nv = NULL;
1738 nvlist_t *nvinfo = NULL;
1739 nvlist_t *missing = NULL;
1740 char *thename;
1741 char *origname;
1742 int ret;
1743 int error = 0;
1744 char errbuf[1024];
1745
1746 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1747 &origname) == 0);
1748
1749 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1750 "cannot import pool '%s'"), origname);
1751
1752 if (newname != NULL) {
1753 if (!zpool_name_valid(hdl, B_FALSE, newname))
1754 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1755 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1756 newname));
1757 thename = (char *)newname;
1758 } else {
1759 thename = origname;
1760 }
1761
1762 if (props) {
1763 uint64_t version;
1764 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1765
1766 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1767 &version) == 0);
1768
1769 if ((props = zpool_valid_proplist(hdl, origname,
1770 props, version, flags, errbuf)) == NULL) {
1771 return (-1);
1772 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1773 nvlist_free(props);
1774 return (-1);
1775 }
1776 }
1777
1778 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1779
1780 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1781 &zc.zc_guid) == 0);
1782
1783 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1784 nvlist_free(props);
1785 return (-1);
1786 }
1787 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1788 nvlist_free(props);
1789 return (-1);
1790 }
1791
1792 zc.zc_cookie = flags;
1793 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1794 errno == ENOMEM) {
1795 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1796 zcmd_free_nvlists(&zc);
1797 return (-1);
1798 }
1799 }
1800 if (ret != 0)
1801 error = errno;
1802
1803 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1804 zpool_get_rewind_policy(config, &policy);
1805
1806 if (error) {
1807 char desc[1024];
1808
1809 /*
1810 * Dry-run failed, but we print out what success
1811 * looks like if we found a best txg
1812 */
1813 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1814 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1815 B_TRUE, nv);
1816 nvlist_free(nv);
1817 return (-1);
1818 }
1819
1820 if (newname == NULL)
1821 (void) snprintf(desc, sizeof (desc),
1822 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1823 thename);
1824 else
1825 (void) snprintf(desc, sizeof (desc),
1826 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1827 origname, thename);
1828
1829 switch (error) {
1830 case ENOTSUP:
1831 if (nv != NULL && nvlist_lookup_nvlist(nv,
1832 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1833 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1834 (void) printf(dgettext(TEXT_DOMAIN, "This "
1835 "pool uses the following feature(s) not "
1836 "supported by this system:\n"));
1837 zpool_print_unsup_feat(nv);
1838 if (nvlist_exists(nvinfo,
1839 ZPOOL_CONFIG_CAN_RDONLY)) {
1840 (void) printf(dgettext(TEXT_DOMAIN,
1841 "All unsupported features are only "
1842 "required for writing to the pool."
1843 "\nThe pool can be imported using "
1844 "'-o readonly=on'.\n"));
1845 }
1846 }
1847 /*
1848 * Unsupported version.
1849 */
1850 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1851 break;
1852
1853 case EINVAL:
1854 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1855 break;
1856
1857 case EROFS:
1858 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1859 "one or more devices is read only"));
1860 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1861 break;
1862
1863 case ENXIO:
1864 if (nv && nvlist_lookup_nvlist(nv,
1865 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1866 nvlist_lookup_nvlist(nvinfo,
1867 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1868 (void) printf(dgettext(TEXT_DOMAIN,
1869 "The devices below are missing, use "
1870 "'-m' to import the pool anyway:\n"));
1871 print_vdev_tree(hdl, NULL, missing, 2);
1872 (void) printf("\n");
1873 }
1874 (void) zpool_standard_error(hdl, error, desc);
1875 break;
1876
1877 case EEXIST:
1878 (void) zpool_standard_error(hdl, error, desc);
1879 break;
1880
1881 case EBUSY:
1882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1883 "one or more devices are already in use\n"));
1884 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1885 break;
1886
1887 default:
1888 (void) zpool_standard_error(hdl, error, desc);
1889 zpool_explain_recover(hdl,
1890 newname ? origname : thename, -error, nv);
1891 break;
1892 }
1893
1894 nvlist_free(nv);
1895 ret = -1;
1896 } else {
1897 zpool_handle_t *zhp;
1898
1899 /*
1900 * This should never fail, but play it safe anyway.
1901 */
1902 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1903 ret = -1;
1904 else if (zhp != NULL)
1905 zpool_close(zhp);
1906 if (policy.zrp_request &
1907 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1908 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1909 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1910 }
1911 nvlist_free(nv);
1912 return (0);
1913 }
1914
1915 zcmd_free_nvlists(&zc);
1916 nvlist_free(props);
1917
1918 return (ret);
1919 }
1920
1921 /*
1922 * Scan the pool.
1923 */
1924 int
1925 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1926 {
1927 zfs_cmd_t zc = {"\0"};
1928 char msg[1024];
1929 libzfs_handle_t *hdl = zhp->zpool_hdl;
1930
1931 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1932 zc.zc_cookie = func;
1933
1934 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1935 (errno == ENOENT && func != POOL_SCAN_NONE))
1936 return (0);
1937
1938 if (func == POOL_SCAN_SCRUB) {
1939 (void) snprintf(msg, sizeof (msg),
1940 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1941 } else if (func == POOL_SCAN_NONE) {
1942 (void) snprintf(msg, sizeof (msg),
1943 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1944 zc.zc_name);
1945 } else {
1946 assert(!"unexpected result");
1947 }
1948
1949 if (errno == EBUSY) {
1950 nvlist_t *nvroot;
1951 pool_scan_stat_t *ps = NULL;
1952 uint_t psc;
1953
1954 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1955 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1956 (void) nvlist_lookup_uint64_array(nvroot,
1957 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1958 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1959 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1960 else
1961 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1962 } else if (errno == ENOENT) {
1963 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1964 } else {
1965 return (zpool_standard_error(hdl, errno, msg));
1966 }
1967 }
1968
1969 /*
1970 * Find a vdev that matches the search criteria specified. We use the
1971 * the nvpair name to determine how we should look for the device.
1972 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1973 * spare; but FALSE if its an INUSE spare.
1974 */
1975 static nvlist_t *
1976 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1977 boolean_t *l2cache, boolean_t *log)
1978 {
1979 uint_t c, children;
1980 nvlist_t **child;
1981 nvlist_t *ret;
1982 uint64_t is_log;
1983 char *srchkey;
1984 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1985
1986 /* Nothing to look for */
1987 if (search == NULL || pair == NULL)
1988 return (NULL);
1989
1990 /* Obtain the key we will use to search */
1991 srchkey = nvpair_name(pair);
1992
1993 switch (nvpair_type(pair)) {
1994 case DATA_TYPE_UINT64:
1995 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1996 uint64_t srchval, theguid;
1997
1998 verify(nvpair_value_uint64(pair, &srchval) == 0);
1999 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2000 &theguid) == 0);
2001 if (theguid == srchval)
2002 return (nv);
2003 }
2004 break;
2005
2006 case DATA_TYPE_STRING: {
2007 char *srchval, *val;
2008
2009 verify(nvpair_value_string(pair, &srchval) == 0);
2010 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2011 break;
2012
2013 /*
2014 * Search for the requested value. Special cases:
2015 *
2016 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2017 * "-part1", or "p1". The suffix is hidden from the user,
2018 * but included in the string, so this matches around it.
2019 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2020 * is used to check all possible expanded paths.
2021 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2022 *
2023 * Otherwise, all other searches are simple string compares.
2024 */
2025 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2026 uint64_t wholedisk = 0;
2027
2028 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2029 &wholedisk);
2030 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2031 return (nv);
2032
2033 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2034 char *type, *idx, *end, *p;
2035 uint64_t id, vdev_id;
2036
2037 /*
2038 * Determine our vdev type, keeping in mind
2039 * that the srchval is composed of a type and
2040 * vdev id pair (i.e. mirror-4).
2041 */
2042 if ((type = strdup(srchval)) == NULL)
2043 return (NULL);
2044
2045 if ((p = strrchr(type, '-')) == NULL) {
2046 free(type);
2047 break;
2048 }
2049 idx = p + 1;
2050 *p = '\0';
2051
2052 /*
2053 * If the types don't match then keep looking.
2054 */
2055 if (strncmp(val, type, strlen(val)) != 0) {
2056 free(type);
2057 break;
2058 }
2059
2060 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2061 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2062 strncmp(type, VDEV_TYPE_MIRROR,
2063 strlen(VDEV_TYPE_MIRROR)) == 0);
2064 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2065 &id) == 0);
2066
2067 errno = 0;
2068 vdev_id = strtoull(idx, &end, 10);
2069
2070 free(type);
2071 if (errno != 0)
2072 return (NULL);
2073
2074 /*
2075 * Now verify that we have the correct vdev id.
2076 */
2077 if (vdev_id == id)
2078 return (nv);
2079 }
2080
2081 /*
2082 * Common case
2083 */
2084 if (strcmp(srchval, val) == 0)
2085 return (nv);
2086 break;
2087 }
2088
2089 default:
2090 break;
2091 }
2092
2093 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2094 &child, &children) != 0)
2095 return (NULL);
2096
2097 for (c = 0; c < children; c++) {
2098 if ((ret = vdev_to_nvlist_iter(child[c], search,
2099 avail_spare, l2cache, NULL)) != NULL) {
2100 /*
2101 * The 'is_log' value is only set for the toplevel
2102 * vdev, not the leaf vdevs. So we always lookup the
2103 * log device from the root of the vdev tree (where
2104 * 'log' is non-NULL).
2105 */
2106 if (log != NULL &&
2107 nvlist_lookup_uint64(child[c],
2108 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2109 is_log) {
2110 *log = B_TRUE;
2111 }
2112 return (ret);
2113 }
2114 }
2115
2116 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2117 &child, &children) == 0) {
2118 for (c = 0; c < children; c++) {
2119 if ((ret = vdev_to_nvlist_iter(child[c], search,
2120 avail_spare, l2cache, NULL)) != NULL) {
2121 *avail_spare = B_TRUE;
2122 return (ret);
2123 }
2124 }
2125 }
2126
2127 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2128 &child, &children) == 0) {
2129 for (c = 0; c < children; c++) {
2130 if ((ret = vdev_to_nvlist_iter(child[c], search,
2131 avail_spare, l2cache, NULL)) != NULL) {
2132 *l2cache = B_TRUE;
2133 return (ret);
2134 }
2135 }
2136 }
2137
2138 return (NULL);
2139 }
2140
2141 /*
2142 * Given a physical path (minus the "/devices" prefix), find the
2143 * associated vdev.
2144 */
2145 nvlist_t *
2146 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2147 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2148 {
2149 nvlist_t *search, *nvroot, *ret;
2150
2151 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2152 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2153
2154 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2155 &nvroot) == 0);
2156
2157 *avail_spare = B_FALSE;
2158 *l2cache = B_FALSE;
2159 if (log != NULL)
2160 *log = B_FALSE;
2161 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2162 nvlist_free(search);
2163
2164 return (ret);
2165 }
2166
2167 /*
2168 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2169 */
2170 boolean_t
2171 zpool_vdev_is_interior(const char *name)
2172 {
2173 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2174 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2175 return (B_TRUE);
2176 return (B_FALSE);
2177 }
2178
2179 nvlist_t *
2180 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2181 boolean_t *l2cache, boolean_t *log)
2182 {
2183 char *end;
2184 nvlist_t *nvroot, *search, *ret;
2185 uint64_t guid;
2186
2187 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2188
2189 guid = strtoull(path, &end, 0);
2190 if (guid != 0 && *end == '\0') {
2191 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2192 } else if (zpool_vdev_is_interior(path)) {
2193 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2194 } else {
2195 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2196 }
2197
2198 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2199 &nvroot) == 0);
2200
2201 *avail_spare = B_FALSE;
2202 *l2cache = B_FALSE;
2203 if (log != NULL)
2204 *log = B_FALSE;
2205 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2206 nvlist_free(search);
2207
2208 return (ret);
2209 }
2210
2211 static int
2212 vdev_online(nvlist_t *nv)
2213 {
2214 uint64_t ival;
2215
2216 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2217 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2218 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2219 return (0);
2220
2221 return (1);
2222 }
2223
2224 /*
2225 * Helper function for zpool_get_physpaths().
2226 */
2227 static int
2228 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2229 size_t *bytes_written)
2230 {
2231 size_t bytes_left, pos, rsz;
2232 char *tmppath;
2233 const char *format;
2234
2235 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2236 &tmppath) != 0)
2237 return (EZFS_NODEVICE);
2238
2239 pos = *bytes_written;
2240 bytes_left = physpath_size - pos;
2241 format = (pos == 0) ? "%s" : " %s";
2242
2243 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2244 *bytes_written += rsz;
2245
2246 if (rsz >= bytes_left) {
2247 /* if physpath was not copied properly, clear it */
2248 if (bytes_left != 0) {
2249 physpath[pos] = 0;
2250 }
2251 return (EZFS_NOSPC);
2252 }
2253 return (0);
2254 }
2255
2256 static int
2257 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2258 size_t *rsz, boolean_t is_spare)
2259 {
2260 char *type;
2261 int ret;
2262
2263 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2264 return (EZFS_INVALCONFIG);
2265
2266 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2267 /*
2268 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2269 * For a spare vdev, we only want to boot from the active
2270 * spare device.
2271 */
2272 if (is_spare) {
2273 uint64_t spare = 0;
2274 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2275 &spare);
2276 if (!spare)
2277 return (EZFS_INVALCONFIG);
2278 }
2279
2280 if (vdev_online(nv)) {
2281 if ((ret = vdev_get_one_physpath(nv, physpath,
2282 phypath_size, rsz)) != 0)
2283 return (ret);
2284 }
2285 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2286 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2287 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2288 nvlist_t **child;
2289 uint_t count;
2290 int i, ret;
2291
2292 if (nvlist_lookup_nvlist_array(nv,
2293 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2294 return (EZFS_INVALCONFIG);
2295
2296 for (i = 0; i < count; i++) {
2297 ret = vdev_get_physpaths(child[i], physpath,
2298 phypath_size, rsz, is_spare);
2299 if (ret == EZFS_NOSPC)
2300 return (ret);
2301 }
2302 }
2303
2304 return (EZFS_POOL_INVALARG);
2305 }
2306
2307 /*
2308 * Get phys_path for a root pool config.
2309 * Return 0 on success; non-zero on failure.
2310 */
2311 static int
2312 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2313 {
2314 size_t rsz;
2315 nvlist_t *vdev_root;
2316 nvlist_t **child;
2317 uint_t count;
2318 char *type;
2319
2320 rsz = 0;
2321
2322 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2323 &vdev_root) != 0)
2324 return (EZFS_INVALCONFIG);
2325
2326 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2327 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2328 &child, &count) != 0)
2329 return (EZFS_INVALCONFIG);
2330
2331 #if defined(__sun__) || defined(__sun)
2332 /*
2333 * root pool can not have EFI labeled disks and can only have
2334 * a single top-level vdev.
2335 */
2336 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2337 pool_uses_efi(vdev_root))
2338 return (EZFS_POOL_INVALARG);
2339 #endif
2340
2341 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2342 B_FALSE);
2343
2344 /* No online devices */
2345 if (rsz == 0)
2346 return (EZFS_NODEVICE);
2347
2348 return (0);
2349 }
2350
2351 /*
2352 * Get phys_path for a root pool
2353 * Return 0 on success; non-zero on failure.
2354 */
2355 int
2356 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2357 {
2358 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2359 phypath_size));
2360 }
2361
2362 /*
2363 * If the device has being dynamically expanded then we need to relabel
2364 * the disk to use the new unallocated space.
2365 */
2366 static int
2367 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2368 {
2369 int fd, error;
2370
2371 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2373 "relabel '%s': unable to open device: %d"), path, errno);
2374 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2375 }
2376
2377 /*
2378 * It's possible that we might encounter an error if the device
2379 * does not have any unallocated space left. If so, we simply
2380 * ignore that error and continue on.
2381 *
2382 * Also, we don't call efi_rescan() - that would just return EBUSY.
2383 * The module will do it for us in vdev_disk_open().
2384 */
2385 error = efi_use_whole_disk(fd);
2386 (void) close(fd);
2387 if (error && error != VT_ENOSPC) {
2388 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2389 "relabel '%s': unable to read disk capacity"), path);
2390 return (zfs_error(hdl, EZFS_NOCAP, msg));
2391 }
2392 return (0);
2393 }
2394
2395 /*
2396 * Bring the specified vdev online. The 'flags' parameter is a set of the
2397 * ZFS_ONLINE_* flags.
2398 */
2399 int
2400 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2401 vdev_state_t *newstate)
2402 {
2403 zfs_cmd_t zc = {"\0"};
2404 char msg[1024];
2405 nvlist_t *tgt;
2406 boolean_t avail_spare, l2cache, islog;
2407 libzfs_handle_t *hdl = zhp->zpool_hdl;
2408 int error;
2409
2410 if (flags & ZFS_ONLINE_EXPAND) {
2411 (void) snprintf(msg, sizeof (msg),
2412 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2413 } else {
2414 (void) snprintf(msg, sizeof (msg),
2415 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2416 }
2417
2418 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2419 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2420 &islog)) == NULL)
2421 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2422
2423 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2424
2425 if (avail_spare)
2426 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2427
2428 if (flags & ZFS_ONLINE_EXPAND ||
2429 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2430 uint64_t wholedisk = 0;
2431
2432 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2433 &wholedisk);
2434
2435 /*
2436 * XXX - L2ARC 1.0 devices can't support expansion.
2437 */
2438 if (l2cache) {
2439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2440 "cannot expand cache devices"));
2441 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2442 }
2443
2444 if (wholedisk) {
2445 const char *fullpath = path;
2446 char buf[MAXPATHLEN];
2447
2448 if (path[0] != '/') {
2449 error = zfs_resolve_shortname(path, buf,
2450 sizeof (buf));
2451 if (error != 0)
2452 return (zfs_error(hdl, EZFS_NODEVICE,
2453 msg));
2454
2455 fullpath = buf;
2456 }
2457
2458 error = zpool_relabel_disk(hdl, fullpath, msg);
2459 if (error != 0)
2460 return (error);
2461 }
2462 }
2463
2464 zc.zc_cookie = VDEV_STATE_ONLINE;
2465 zc.zc_obj = flags;
2466
2467 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2468 if (errno == EINVAL) {
2469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2470 "from this pool into a new one. Use '%s' "
2471 "instead"), "zpool detach");
2472 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2473 }
2474 return (zpool_standard_error(hdl, errno, msg));
2475 }
2476
2477 *newstate = zc.zc_cookie;
2478 return (0);
2479 }
2480
2481 /*
2482 * Take the specified vdev offline
2483 */
2484 int
2485 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2486 {
2487 zfs_cmd_t zc = {"\0"};
2488 char msg[1024];
2489 nvlist_t *tgt;
2490 boolean_t avail_spare, l2cache;
2491 libzfs_handle_t *hdl = zhp->zpool_hdl;
2492
2493 (void) snprintf(msg, sizeof (msg),
2494 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2495
2496 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2497 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2498 NULL)) == NULL)
2499 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2500
2501 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2502
2503 if (avail_spare)
2504 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2505
2506 zc.zc_cookie = VDEV_STATE_OFFLINE;
2507 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2508
2509 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2510 return (0);
2511
2512 switch (errno) {
2513 case EBUSY:
2514
2515 /*
2516 * There are no other replicas of this device.
2517 */
2518 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2519
2520 case EEXIST:
2521 /*
2522 * The log device has unplayed logs
2523 */
2524 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2525
2526 default:
2527 return (zpool_standard_error(hdl, errno, msg));
2528 }
2529 }
2530
2531 /*
2532 * Mark the given vdev faulted.
2533 */
2534 int
2535 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2536 {
2537 zfs_cmd_t zc = {"\0"};
2538 char msg[1024];
2539 libzfs_handle_t *hdl = zhp->zpool_hdl;
2540
2541 (void) snprintf(msg, sizeof (msg),
2542 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2543
2544 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2545 zc.zc_guid = guid;
2546 zc.zc_cookie = VDEV_STATE_FAULTED;
2547 zc.zc_obj = aux;
2548
2549 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2550 return (0);
2551
2552 switch (errno) {
2553 case EBUSY:
2554
2555 /*
2556 * There are no other replicas of this device.
2557 */
2558 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2559
2560 default:
2561 return (zpool_standard_error(hdl, errno, msg));
2562 }
2563
2564 }
2565
2566 /*
2567 * Mark the given vdev degraded.
2568 */
2569 int
2570 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2571 {
2572 zfs_cmd_t zc = {"\0"};
2573 char msg[1024];
2574 libzfs_handle_t *hdl = zhp->zpool_hdl;
2575
2576 (void) snprintf(msg, sizeof (msg),
2577 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2578
2579 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2580 zc.zc_guid = guid;
2581 zc.zc_cookie = VDEV_STATE_DEGRADED;
2582 zc.zc_obj = aux;
2583
2584 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2585 return (0);
2586
2587 return (zpool_standard_error(hdl, errno, msg));
2588 }
2589
2590 /*
2591 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2592 * a hot spare.
2593 */
2594 static boolean_t
2595 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2596 {
2597 nvlist_t **child;
2598 uint_t c, children;
2599 char *type;
2600
2601 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2602 &children) == 0) {
2603 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2604 &type) == 0);
2605
2606 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2607 children == 2 && child[which] == tgt)
2608 return (B_TRUE);
2609
2610 for (c = 0; c < children; c++)
2611 if (is_replacing_spare(child[c], tgt, which))
2612 return (B_TRUE);
2613 }
2614
2615 return (B_FALSE);
2616 }
2617
2618 /*
2619 * Attach new_disk (fully described by nvroot) to old_disk.
2620 * If 'replacing' is specified, the new disk will replace the old one.
2621 */
2622 int
2623 zpool_vdev_attach(zpool_handle_t *zhp,
2624 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2625 {
2626 zfs_cmd_t zc = {"\0"};
2627 char msg[1024];
2628 int ret;
2629 nvlist_t *tgt;
2630 boolean_t avail_spare, l2cache, islog;
2631 uint64_t val;
2632 char *newname;
2633 nvlist_t **child;
2634 uint_t children;
2635 nvlist_t *config_root;
2636 libzfs_handle_t *hdl = zhp->zpool_hdl;
2637 boolean_t rootpool = zpool_is_bootable(zhp);
2638
2639 if (replacing)
2640 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2641 "cannot replace %s with %s"), old_disk, new_disk);
2642 else
2643 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2644 "cannot attach %s to %s"), new_disk, old_disk);
2645
2646 #if defined(__sun__) || defined(__sun)
2647 /*
2648 * If this is a root pool, make sure that we're not attaching an
2649 * EFI labeled device.
2650 */
2651 if (rootpool && pool_uses_efi(nvroot)) {
2652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2653 "EFI labeled devices are not supported on root pools."));
2654 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2655 }
2656 #endif
2657
2658 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2659 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2660 &islog)) == 0)
2661 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2662
2663 if (avail_spare)
2664 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2665
2666 if (l2cache)
2667 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2668
2669 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2670 zc.zc_cookie = replacing;
2671
2672 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2673 &child, &children) != 0 || children != 1) {
2674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2675 "new device must be a single disk"));
2676 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2677 }
2678
2679 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2680 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2681
2682 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2683 return (-1);
2684
2685 /*
2686 * If the target is a hot spare that has been swapped in, we can only
2687 * replace it with another hot spare.
2688 */
2689 if (replacing &&
2690 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2691 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2692 NULL) == NULL || !avail_spare) &&
2693 is_replacing_spare(config_root, tgt, 1)) {
2694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2695 "can only be replaced by another hot spare"));
2696 free(newname);
2697 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2698 }
2699
2700 free(newname);
2701
2702 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2703 return (-1);
2704
2705 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2706
2707 zcmd_free_nvlists(&zc);
2708
2709 if (ret == 0) {
2710 if (rootpool) {
2711 /*
2712 * XXX need a better way to prevent user from
2713 * booting up a half-baked vdev.
2714 */
2715 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2716 "sure to wait until resilver is done "
2717 "before rebooting.\n"));
2718 }
2719 return (0);
2720 }
2721
2722 switch (errno) {
2723 case ENOTSUP:
2724 /*
2725 * Can't attach to or replace this type of vdev.
2726 */
2727 if (replacing) {
2728 uint64_t version = zpool_get_prop_int(zhp,
2729 ZPOOL_PROP_VERSION, NULL);
2730
2731 if (islog)
2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2733 "cannot replace a log with a spare"));
2734 else if (version >= SPA_VERSION_MULTI_REPLACE)
2735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2736 "already in replacing/spare config; wait "
2737 "for completion or use 'zpool detach'"));
2738 else
2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2740 "cannot replace a replacing device"));
2741 } else {
2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2743 "can only attach to mirrors and top-level "
2744 "disks"));
2745 }
2746 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2747 break;
2748
2749 case EINVAL:
2750 /*
2751 * The new device must be a single disk.
2752 */
2753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2754 "new device must be a single disk"));
2755 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2756 break;
2757
2758 case EBUSY:
2759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2760 new_disk);
2761 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2762 break;
2763
2764 case EOVERFLOW:
2765 /*
2766 * The new device is too small.
2767 */
2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2769 "device is too small"));
2770 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2771 break;
2772
2773 case EDOM:
2774 /*
2775 * The new device has a different optimal sector size.
2776 */
2777 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2778 "new device has a different optimal sector size; use the "
2779 "option '-o ashift=N' to override the optimal size"));
2780 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2781 break;
2782
2783 case ENAMETOOLONG:
2784 /*
2785 * The resulting top-level vdev spec won't fit in the label.
2786 */
2787 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2788 break;
2789
2790 default:
2791 (void) zpool_standard_error(hdl, errno, msg);
2792 }
2793
2794 return (-1);
2795 }
2796
2797 /*
2798 * Detach the specified device.
2799 */
2800 int
2801 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2802 {
2803 zfs_cmd_t zc = {"\0"};
2804 char msg[1024];
2805 nvlist_t *tgt;
2806 boolean_t avail_spare, l2cache;
2807 libzfs_handle_t *hdl = zhp->zpool_hdl;
2808
2809 (void) snprintf(msg, sizeof (msg),
2810 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2811
2812 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2813 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2814 NULL)) == 0)
2815 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2816
2817 if (avail_spare)
2818 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2819
2820 if (l2cache)
2821 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2822
2823 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2824
2825 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2826 return (0);
2827
2828 switch (errno) {
2829
2830 case ENOTSUP:
2831 /*
2832 * Can't detach from this type of vdev.
2833 */
2834 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2835 "applicable to mirror and replacing vdevs"));
2836 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2837 break;
2838
2839 case EBUSY:
2840 /*
2841 * There are no other replicas of this device.
2842 */
2843 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2844 break;
2845
2846 default:
2847 (void) zpool_standard_error(hdl, errno, msg);
2848 }
2849
2850 return (-1);
2851 }
2852
2853 /*
2854 * Find a mirror vdev in the source nvlist.
2855 *
2856 * The mchild array contains a list of disks in one of the top-level mirrors
2857 * of the source pool. The schild array contains a list of disks that the
2858 * user specified on the command line. We loop over the mchild array to
2859 * see if any entry in the schild array matches.
2860 *
2861 * If a disk in the mchild array is found in the schild array, we return
2862 * the index of that entry. Otherwise we return -1.
2863 */
2864 static int
2865 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2866 nvlist_t **schild, uint_t schildren)
2867 {
2868 uint_t mc;
2869
2870 for (mc = 0; mc < mchildren; mc++) {
2871 uint_t sc;
2872 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2873 mchild[mc], B_FALSE);
2874
2875 for (sc = 0; sc < schildren; sc++) {
2876 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2877 schild[sc], B_FALSE);
2878 boolean_t result = (strcmp(mpath, spath) == 0);
2879
2880 free(spath);
2881 if (result) {
2882 free(mpath);
2883 return (mc);
2884 }
2885 }
2886
2887 free(mpath);
2888 }
2889
2890 return (-1);
2891 }
2892
2893 /*
2894 * Split a mirror pool. If newroot points to null, then a new nvlist
2895 * is generated and it is the responsibility of the caller to free it.
2896 */
2897 int
2898 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2899 nvlist_t *props, splitflags_t flags)
2900 {
2901 zfs_cmd_t zc = {"\0"};
2902 char msg[1024];
2903 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2904 nvlist_t **varray = NULL, *zc_props = NULL;
2905 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2906 libzfs_handle_t *hdl = zhp->zpool_hdl;
2907 uint64_t vers;
2908 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2909 int retval = 0;
2910
2911 (void) snprintf(msg, sizeof (msg),
2912 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2913
2914 if (!zpool_name_valid(hdl, B_FALSE, newname))
2915 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2916
2917 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2918 (void) fprintf(stderr, gettext("Internal error: unable to "
2919 "retrieve pool configuration\n"));
2920 return (-1);
2921 }
2922
2923 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2924 == 0);
2925 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2926
2927 if (props) {
2928 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2929 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2930 props, vers, flags, msg)) == NULL)
2931 return (-1);
2932 }
2933
2934 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2935 &children) != 0) {
2936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2937 "Source pool is missing vdev tree"));
2938 if (zc_props)
2939 nvlist_free(zc_props);
2940 return (-1);
2941 }
2942
2943 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2944 vcount = 0;
2945
2946 if (*newroot == NULL ||
2947 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2948 &newchild, &newchildren) != 0)
2949 newchildren = 0;
2950
2951 for (c = 0; c < children; c++) {
2952 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2953 char *type;
2954 nvlist_t **mchild, *vdev;
2955 uint_t mchildren;
2956 int entry;
2957
2958 /*
2959 * Unlike cache & spares, slogs are stored in the
2960 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2961 */
2962 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2963 &is_log);
2964 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2965 &is_hole);
2966 if (is_log || is_hole) {
2967 /*
2968 * Create a hole vdev and put it in the config.
2969 */
2970 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2971 goto out;
2972 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2973 VDEV_TYPE_HOLE) != 0)
2974 goto out;
2975 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2976 1) != 0)
2977 goto out;
2978 if (lastlog == 0)
2979 lastlog = vcount;
2980 varray[vcount++] = vdev;
2981 continue;
2982 }
2983 lastlog = 0;
2984 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2985 == 0);
2986 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2987 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2988 "Source pool must be composed only of mirrors\n"));
2989 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2990 goto out;
2991 }
2992
2993 verify(nvlist_lookup_nvlist_array(child[c],
2994 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2995
2996 /* find or add an entry for this top-level vdev */
2997 if (newchildren > 0 &&
2998 (entry = find_vdev_entry(zhp, mchild, mchildren,
2999 newchild, newchildren)) >= 0) {
3000 /* We found a disk that the user specified. */
3001 vdev = mchild[entry];
3002 ++found;
3003 } else {
3004 /* User didn't specify a disk for this vdev. */
3005 vdev = mchild[mchildren - 1];
3006 }
3007
3008 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3009 goto out;
3010 }
3011
3012 /* did we find every disk the user specified? */
3013 if (found != newchildren) {
3014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3015 "include at most one disk from each mirror"));
3016 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3017 goto out;
3018 }
3019
3020 /* Prepare the nvlist for populating. */
3021 if (*newroot == NULL) {
3022 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3023 goto out;
3024 freelist = B_TRUE;
3025 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3026 VDEV_TYPE_ROOT) != 0)
3027 goto out;
3028 } else {
3029 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3030 }
3031
3032 /* Add all the children we found */
3033 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3034 lastlog == 0 ? vcount : lastlog) != 0)
3035 goto out;
3036
3037 /*
3038 * If we're just doing a dry run, exit now with success.
3039 */
3040 if (flags.dryrun) {
3041 memory_err = B_FALSE;
3042 freelist = B_FALSE;
3043 goto out;
3044 }
3045
3046 /* now build up the config list & call the ioctl */
3047 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3048 goto out;
3049
3050 if (nvlist_add_nvlist(newconfig,
3051 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3052 nvlist_add_string(newconfig,
3053 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3054 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3055 goto out;
3056
3057 /*
3058 * The new pool is automatically part of the namespace unless we
3059 * explicitly export it.
3060 */
3061 if (!flags.import)
3062 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3063 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3064 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3065 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3066 goto out;
3067 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3068 goto out;
3069
3070 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3071 retval = zpool_standard_error(hdl, errno, msg);
3072 goto out;
3073 }
3074
3075 freelist = B_FALSE;
3076 memory_err = B_FALSE;
3077
3078 out:
3079 if (varray != NULL) {
3080 int v;
3081
3082 for (v = 0; v < vcount; v++)
3083 nvlist_free(varray[v]);
3084 free(varray);
3085 }
3086 zcmd_free_nvlists(&zc);
3087 if (zc_props)
3088 nvlist_free(zc_props);
3089 if (newconfig)
3090 nvlist_free(newconfig);
3091 if (freelist) {
3092 nvlist_free(*newroot);
3093 *newroot = NULL;
3094 }
3095
3096 if (retval != 0)
3097 return (retval);
3098
3099 if (memory_err)
3100 return (no_memory(hdl));
3101
3102 return (0);
3103 }
3104
3105 /*
3106 * Remove the given device. Currently, this is supported only for hot spares
3107 * and level 2 cache devices.
3108 */
3109 int
3110 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3111 {
3112 zfs_cmd_t zc = {"\0"};
3113 char msg[1024];
3114 nvlist_t *tgt;
3115 boolean_t avail_spare, l2cache, islog;
3116 libzfs_handle_t *hdl = zhp->zpool_hdl;
3117 uint64_t version;
3118
3119 (void) snprintf(msg, sizeof (msg),
3120 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3121
3122 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3123 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3124 &islog)) == 0)
3125 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3126 /*
3127 * XXX - this should just go away.
3128 */
3129 if (!avail_spare && !l2cache && !islog) {
3130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3131 "only inactive hot spares, cache, top-level, "
3132 "or log devices can be removed"));
3133 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3134 }
3135
3136 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3137 if (islog && version < SPA_VERSION_HOLES) {
3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3139 "pool must be upgrade to support log removal"));
3140 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3141 }
3142
3143 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3144
3145 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3146 return (0);
3147
3148 return (zpool_standard_error(hdl, errno, msg));
3149 }
3150
3151 /*
3152 * Clear the errors for the pool, or the particular device if specified.
3153 */
3154 int
3155 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3156 {
3157 zfs_cmd_t zc = {"\0"};
3158 char msg[1024];
3159 nvlist_t *tgt;
3160 zpool_rewind_policy_t policy;
3161 boolean_t avail_spare, l2cache;
3162 libzfs_handle_t *hdl = zhp->zpool_hdl;
3163 nvlist_t *nvi = NULL;
3164 int error;
3165
3166 if (path)
3167 (void) snprintf(msg, sizeof (msg),
3168 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3169 path);
3170 else
3171 (void) snprintf(msg, sizeof (msg),
3172 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3173 zhp->zpool_name);
3174
3175 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3176 if (path) {
3177 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3178 &l2cache, NULL)) == 0)
3179 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3180
3181 /*
3182 * Don't allow error clearing for hot spares. Do allow
3183 * error clearing for l2cache devices.
3184 */
3185 if (avail_spare)
3186 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3187
3188 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3189 &zc.zc_guid) == 0);
3190 }
3191
3192 zpool_get_rewind_policy(rewindnvl, &policy);
3193 zc.zc_cookie = policy.zrp_request;
3194
3195 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3196 return (-1);
3197
3198 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3199 return (-1);
3200
3201 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3202 errno == ENOMEM) {
3203 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3204 zcmd_free_nvlists(&zc);
3205 return (-1);
3206 }
3207 }
3208
3209 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3210 errno != EPERM && errno != EACCES)) {
3211 if (policy.zrp_request &
3212 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3213 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3214 zpool_rewind_exclaim(hdl, zc.zc_name,
3215 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3216 nvi);
3217 nvlist_free(nvi);
3218 }
3219 zcmd_free_nvlists(&zc);
3220 return (0);
3221 }
3222
3223 zcmd_free_nvlists(&zc);
3224 return (zpool_standard_error(hdl, errno, msg));
3225 }
3226
3227 /*
3228 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3229 */
3230 int
3231 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3232 {
3233 zfs_cmd_t zc = {"\0"};
3234 char msg[1024];
3235 libzfs_handle_t *hdl = zhp->zpool_hdl;
3236
3237 (void) snprintf(msg, sizeof (msg),
3238 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3239 (u_longlong_t)guid);
3240
3241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3242 zc.zc_guid = guid;
3243 zc.zc_cookie = ZPOOL_NO_REWIND;
3244
3245 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3246 return (0);
3247
3248 return (zpool_standard_error(hdl, errno, msg));
3249 }
3250
3251 /*
3252 * Change the GUID for a pool.
3253 */
3254 int
3255 zpool_reguid(zpool_handle_t *zhp)
3256 {
3257 char msg[1024];
3258 libzfs_handle_t *hdl = zhp->zpool_hdl;
3259 zfs_cmd_t zc = {"\0"};
3260
3261 (void) snprintf(msg, sizeof (msg),
3262 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3263
3264 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3265 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3266 return (0);
3267
3268 return (zpool_standard_error(hdl, errno, msg));
3269 }
3270
3271 /*
3272 * Reopen the pool.
3273 */
3274 int
3275 zpool_reopen(zpool_handle_t *zhp)
3276 {
3277 zfs_cmd_t zc = {"\0"};
3278 char msg[1024];
3279 libzfs_handle_t *hdl = zhp->zpool_hdl;
3280
3281 (void) snprintf(msg, sizeof (msg),
3282 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3283 zhp->zpool_name);
3284
3285 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3286 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3287 return (0);
3288 return (zpool_standard_error(hdl, errno, msg));
3289 }
3290
3291 /*
3292 * Convert from a devid string to a path.
3293 */
3294 static char *
3295 devid_to_path(char *devid_str)
3296 {
3297 ddi_devid_t devid;
3298 char *minor;
3299 char *path;
3300 devid_nmlist_t *list = NULL;
3301 int ret;
3302
3303 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3304 return (NULL);
3305
3306 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3307
3308 devid_str_free(minor);
3309 devid_free(devid);
3310
3311 if (ret != 0)
3312 return (NULL);
3313
3314 if ((path = strdup(list[0].devname)) == NULL)
3315 return (NULL);
3316
3317 devid_free_nmlist(list);
3318
3319 return (path);
3320 }
3321
3322 /*
3323 * Convert from a path to a devid string.
3324 */
3325 static char *
3326 path_to_devid(const char *path)
3327 {
3328 int fd;
3329 ddi_devid_t devid;
3330 char *minor, *ret;
3331
3332 if ((fd = open(path, O_RDONLY)) < 0)
3333 return (NULL);
3334
3335 minor = NULL;
3336 ret = NULL;
3337 if (devid_get(fd, &devid) == 0) {
3338 if (devid_get_minor_name(fd, &minor) == 0)
3339 ret = devid_str_encode(devid, minor);
3340 if (minor != NULL)
3341 devid_str_free(minor);
3342 devid_free(devid);
3343 }
3344 (void) close(fd);
3345
3346 return (ret);
3347 }
3348
3349 /*
3350 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3351 * ignore any failure here, since a common case is for an unprivileged user to
3352 * type 'zpool status', and we'll display the correct information anyway.
3353 */
3354 static void
3355 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3356 {
3357 zfs_cmd_t zc = {"\0"};
3358
3359 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3360 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3361 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3362 &zc.zc_guid) == 0);
3363
3364 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3365 }
3366
3367 /*
3368 * Remove partition suffix from a vdev path. Partition suffixes may take three
3369 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3370 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3371 * third case only occurs when preceded by a string matching the regular
3372 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3373 */
3374 static char *
3375 strip_partition(libzfs_handle_t *hdl, char *path)
3376 {
3377 char *tmp = zfs_strdup(hdl, path);
3378 char *part = NULL, *d = NULL;
3379
3380 if ((part = strstr(tmp, "-part")) && part != tmp) {
3381 d = part + 5;
3382 } else if ((part = strrchr(tmp, 'p')) &&
3383 part > tmp + 1 && isdigit(*(part-1))) {
3384 d = part + 1;
3385 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3386 for (d = &tmp[2]; isalpha(*d); part = ++d);
3387 }
3388 if (part && d && *d != '\0') {
3389 for (; isdigit(*d); d++);
3390 if (*d == '\0')
3391 *part = '\0';
3392 }
3393 return (tmp);
3394 }
3395
3396 #define PATH_BUF_LEN 64
3397
3398 /*
3399 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3400 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3401 * We also check if this is a whole disk, in which case we strip off the
3402 * trailing 's0' slice name.
3403 *
3404 * This routine is also responsible for identifying when disks have been
3405 * reconfigured in a new location. The kernel will have opened the device by
3406 * devid, but the path will still refer to the old location. To catch this, we
3407 * first do a path -> devid translation (which is fast for the common case). If
3408 * the devid matches, we're done. If not, we do a reverse devid -> path
3409 * translation and issue the appropriate ioctl() to update the path of the vdev.
3410 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3411 * of these checks.
3412 */
3413 char *
3414 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3415 boolean_t verbose)
3416 {
3417 char *path, *devid, *type;
3418 uint64_t value;
3419 char buf[PATH_BUF_LEN];
3420 char tmpbuf[PATH_BUF_LEN];
3421 vdev_stat_t *vs;
3422 uint_t vsc;
3423
3424 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3425 &value) == 0) {
3426 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3427 &value) == 0);
3428 (void) snprintf(buf, sizeof (buf), "%llu",
3429 (u_longlong_t)value);
3430 path = buf;
3431 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3432 /*
3433 * If the device is dead (faulted, offline, etc) then don't
3434 * bother opening it. Otherwise we may be forcing the user to
3435 * open a misbehaving device, which can have undesirable
3436 * effects.
3437 */
3438 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3439 (uint64_t **)&vs, &vsc) != 0 ||
3440 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3441 zhp != NULL &&
3442 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3443 /*
3444 * Determine if the current path is correct.
3445 */
3446 char *newdevid = path_to_devid(path);
3447
3448 if (newdevid == NULL ||
3449 strcmp(devid, newdevid) != 0) {
3450 char *newpath;
3451
3452 if ((newpath = devid_to_path(devid)) != NULL) {
3453 /*
3454 * Update the path appropriately.
3455 */
3456 set_path(zhp, nv, newpath);
3457 if (nvlist_add_string(nv,
3458 ZPOOL_CONFIG_PATH, newpath) == 0)
3459 verify(nvlist_lookup_string(nv,
3460 ZPOOL_CONFIG_PATH,
3461 &path) == 0);
3462 free(newpath);
3463 }
3464 }
3465
3466 if (newdevid)
3467 devid_str_free(newdevid);
3468 }
3469
3470 /*
3471 * For a block device only use the name.
3472 */
3473 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3474 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3475 path = strrchr(path, '/');
3476 path++;
3477 }
3478
3479 /*
3480 * Remove the partition from the path it this is a whole disk.
3481 */
3482 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3483 &value) == 0 && value) {
3484 return (strip_partition(hdl, path));
3485 }
3486 } else {
3487 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3488
3489 /*
3490 * If it's a raidz device, we need to stick in the parity level.
3491 */
3492 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3493
3494 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3495 &value) == 0);
3496 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3497 (u_longlong_t)value);
3498 path = buf;
3499 }
3500
3501 /*
3502 * We identify each top-level vdev by using a <type-id>
3503 * naming convention.
3504 */
3505 if (verbose) {
3506 uint64_t id;
3507
3508 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3509 &id) == 0);
3510 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3511 path, (u_longlong_t)id);
3512 path = tmpbuf;
3513 }
3514 }
3515
3516 return (zfs_strdup(hdl, path));
3517 }
3518
3519 static int
3520 zbookmark_compare(const void *a, const void *b)
3521 {
3522 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3523 }
3524
3525 /*
3526 * Retrieve the persistent error log, uniquify the members, and return to the
3527 * caller.
3528 */
3529 int
3530 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3531 {
3532 zfs_cmd_t zc = {"\0"};
3533 uint64_t count;
3534 zbookmark_phys_t *zb = NULL;
3535 int i;
3536
3537 /*
3538 * Retrieve the raw error list from the kernel. If the number of errors
3539 * has increased, allocate more space and continue until we get the
3540 * entire list.
3541 */
3542 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3543 &count) == 0);
3544 if (count == 0)
3545 return (0);
3546 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3547 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3548 return (-1);
3549 zc.zc_nvlist_dst_size = count;
3550 (void) strcpy(zc.zc_name, zhp->zpool_name);
3551 for (;;) {
3552 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3553 &zc) != 0) {
3554 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3555 if (errno == ENOMEM) {
3556 void *dst;
3557
3558 count = zc.zc_nvlist_dst_size;
3559 dst = zfs_alloc(zhp->zpool_hdl, count *
3560 sizeof (zbookmark_phys_t));
3561 if (dst == NULL)
3562 return (-1);
3563 zc.zc_nvlist_dst = (uintptr_t)dst;
3564 } else {
3565 return (-1);
3566 }
3567 } else {
3568 break;
3569 }
3570 }
3571
3572 /*
3573 * Sort the resulting bookmarks. This is a little confusing due to the
3574 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3575 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3576 * _not_ copied as part of the process. So we point the start of our
3577 * array appropriate and decrement the total number of elements.
3578 */
3579 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3580 zc.zc_nvlist_dst_size;
3581 count -= zc.zc_nvlist_dst_size;
3582
3583 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare);
3584
3585 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3586
3587 /*
3588 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3589 */
3590 for (i = 0; i < count; i++) {
3591 nvlist_t *nv;
3592
3593 /* ignoring zb_blkid and zb_level for now */
3594 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3595 zb[i-1].zb_object == zb[i].zb_object)
3596 continue;
3597
3598 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3599 goto nomem;
3600 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3601 zb[i].zb_objset) != 0) {
3602 nvlist_free(nv);
3603 goto nomem;
3604 }
3605 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3606 zb[i].zb_object) != 0) {
3607 nvlist_free(nv);
3608 goto nomem;
3609 }
3610 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3611 nvlist_free(nv);
3612 goto nomem;
3613 }
3614 nvlist_free(nv);
3615 }
3616
3617 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3618 return (0);
3619
3620 nomem:
3621 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3622 return (no_memory(zhp->zpool_hdl));
3623 }
3624
3625 /*
3626 * Upgrade a ZFS pool to the latest on-disk version.
3627 */
3628 int
3629 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3630 {
3631 zfs_cmd_t zc = {"\0"};
3632 libzfs_handle_t *hdl = zhp->zpool_hdl;
3633
3634 (void) strcpy(zc.zc_name, zhp->zpool_name);
3635 zc.zc_cookie = new_version;
3636
3637 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3638 return (zpool_standard_error_fmt(hdl, errno,
3639 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3640 zhp->zpool_name));
3641 return (0);
3642 }
3643
3644 void
3645 zfs_save_arguments(int argc, char **argv, char *string, int len)
3646 {
3647 int i;
3648
3649 (void) strlcpy(string, basename(argv[0]), len);
3650 for (i = 1; i < argc; i++) {
3651 (void) strlcat(string, " ", len);
3652 (void) strlcat(string, argv[i], len);
3653 }
3654 }
3655
3656 int
3657 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3658 {
3659 zfs_cmd_t zc = {"\0"};
3660 nvlist_t *args;
3661 int err;
3662
3663 args = fnvlist_alloc();
3664 fnvlist_add_string(args, "message", message);
3665 err = zcmd_write_src_nvlist(hdl, &zc, args);
3666 if (err == 0)
3667 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3668 nvlist_free(args);
3669 zcmd_free_nvlists(&zc);
3670 return (err);
3671 }
3672
3673 /*
3674 * Perform ioctl to get some command history of a pool.
3675 *
3676 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3677 * logical offset of the history buffer to start reading from.
3678 *
3679 * Upon return, 'off' is the next logical offset to read from and
3680 * 'len' is the actual amount of bytes read into 'buf'.
3681 */
3682 static int
3683 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3684 {
3685 zfs_cmd_t zc = {"\0"};
3686 libzfs_handle_t *hdl = zhp->zpool_hdl;
3687
3688 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3689
3690 zc.zc_history = (uint64_t)(uintptr_t)buf;
3691 zc.zc_history_len = *len;
3692 zc.zc_history_offset = *off;
3693
3694 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3695 switch (errno) {
3696 case EPERM:
3697 return (zfs_error_fmt(hdl, EZFS_PERM,
3698 dgettext(TEXT_DOMAIN,
3699 "cannot show history for pool '%s'"),
3700 zhp->zpool_name));
3701 case ENOENT:
3702 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3703 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3704 "'%s'"), zhp->zpool_name));
3705 case ENOTSUP:
3706 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3707 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3708 "'%s', pool must be upgraded"), zhp->zpool_name));
3709 default:
3710 return (zpool_standard_error_fmt(hdl, errno,
3711 dgettext(TEXT_DOMAIN,
3712 "cannot get history for '%s'"), zhp->zpool_name));
3713 }
3714 }
3715
3716 *len = zc.zc_history_len;
3717 *off = zc.zc_history_offset;
3718
3719 return (0);
3720 }
3721
3722 /*
3723 * Process the buffer of nvlists, unpacking and storing each nvlist record
3724 * into 'records'. 'leftover' is set to the number of bytes that weren't
3725 * processed as there wasn't a complete record.
3726 */
3727 int
3728 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3729 nvlist_t ***records, uint_t *numrecords)
3730 {
3731 uint64_t reclen;
3732 nvlist_t *nv;
3733 int i;
3734
3735 while (bytes_read > sizeof (reclen)) {
3736
3737 /* get length of packed record (stored as little endian) */
3738 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3739 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3740
3741 if (bytes_read < sizeof (reclen) + reclen)
3742 break;
3743
3744 /* unpack record */
3745 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3746 return (ENOMEM);
3747 bytes_read -= sizeof (reclen) + reclen;
3748 buf += sizeof (reclen) + reclen;
3749
3750 /* add record to nvlist array */
3751 (*numrecords)++;
3752 if (ISP2(*numrecords + 1)) {
3753 *records = realloc(*records,
3754 *numrecords * 2 * sizeof (nvlist_t *));
3755 }
3756 (*records)[*numrecords - 1] = nv;
3757 }
3758
3759 *leftover = bytes_read;
3760 return (0);
3761 }
3762
3763 #define HIS_BUF_LEN (128*1024)
3764
3765 /*
3766 * Retrieve the command history of a pool.
3767 */
3768 int
3769 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3770 {
3771 char buf[HIS_BUF_LEN];
3772 uint64_t off = 0;
3773 nvlist_t **records = NULL;
3774 uint_t numrecords = 0;
3775 int err, i;
3776
3777 do {
3778 uint64_t bytes_read = sizeof (buf);
3779 uint64_t leftover;
3780
3781 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3782 break;
3783
3784 /* if nothing else was read in, we're at EOF, just return */
3785 if (!bytes_read)
3786 break;
3787
3788 if ((err = zpool_history_unpack(buf, bytes_read,
3789 &leftover, &records, &numrecords)) != 0)
3790 break;
3791 off -= leftover;
3792
3793 /* CONSTCOND */
3794 } while (1);
3795
3796 if (!err) {
3797 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3798 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3799 records, numrecords) == 0);
3800 }
3801 for (i = 0; i < numrecords; i++)
3802 nvlist_free(records[i]);
3803 free(records);
3804
3805 return (err);
3806 }
3807
3808 /*
3809 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3810 * If there is a new event available 'nvp' will contain a newly allocated
3811 * nvlist and 'dropped' will be set to the number of missed events since
3812 * the last call to this function. When 'nvp' is set to NULL it indicates
3813 * no new events are available. In either case the function returns 0 and
3814 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3815 * function will return a non-zero value. When the function is called in
3816 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3817 * it will not return until a new event is available.
3818 */
3819 int
3820 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3821 int *dropped, unsigned flags, int zevent_fd)
3822 {
3823 zfs_cmd_t zc = {"\0"};
3824 int error = 0;
3825
3826 *nvp = NULL;
3827 *dropped = 0;
3828 zc.zc_cleanup_fd = zevent_fd;
3829
3830 if (flags & ZEVENT_NONBLOCK)
3831 zc.zc_guid = ZEVENT_NONBLOCK;
3832
3833 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3834 return (-1);
3835
3836 retry:
3837 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3838 switch (errno) {
3839 case ESHUTDOWN:
3840 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3841 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3842 goto out;
3843 case ENOENT:
3844 /* Blocking error case should not occur */
3845 if (!(flags & ZEVENT_NONBLOCK))
3846 error = zpool_standard_error_fmt(hdl, errno,
3847 dgettext(TEXT_DOMAIN, "cannot get event"));
3848
3849 goto out;
3850 case ENOMEM:
3851 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3852 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3853 dgettext(TEXT_DOMAIN, "cannot get event"));
3854 goto out;
3855 } else {
3856 goto retry;
3857 }
3858 default:
3859 error = zpool_standard_error_fmt(hdl, errno,
3860 dgettext(TEXT_DOMAIN, "cannot get event"));
3861 goto out;
3862 }
3863 }
3864
3865 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3866 if (error != 0)
3867 goto out;
3868
3869 *dropped = (int)zc.zc_cookie;
3870 out:
3871 zcmd_free_nvlists(&zc);
3872
3873 return (error);
3874 }
3875
3876 /*
3877 * Clear all events.
3878 */
3879 int
3880 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3881 {
3882 zfs_cmd_t zc = {"\0"};
3883 char msg[1024];
3884
3885 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3886 "cannot clear events"));
3887
3888 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3889 return (zpool_standard_error_fmt(hdl, errno, msg));
3890
3891 if (count != NULL)
3892 *count = (int)zc.zc_cookie; /* # of events cleared */
3893
3894 return (0);
3895 }
3896
3897 /*
3898 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3899 * the passed zevent_fd file handle. On success zero is returned,
3900 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3901 */
3902 int
3903 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3904 {
3905 zfs_cmd_t zc = {"\0"};
3906 int error = 0;
3907
3908 zc.zc_guid = eid;
3909 zc.zc_cleanup_fd = zevent_fd;
3910
3911 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3912 switch (errno) {
3913 case ENOENT:
3914 error = zfs_error_fmt(hdl, EZFS_NOENT,
3915 dgettext(TEXT_DOMAIN, "cannot get event"));
3916 break;
3917
3918 case ENOMEM:
3919 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3920 dgettext(TEXT_DOMAIN, "cannot get event"));
3921 break;
3922
3923 default:
3924 error = zpool_standard_error_fmt(hdl, errno,
3925 dgettext(TEXT_DOMAIN, "cannot get event"));
3926 break;
3927 }
3928 }
3929
3930 return (error);
3931 }
3932
3933 void
3934 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3935 char *pathname, size_t len)
3936 {
3937 zfs_cmd_t zc = {"\0"};
3938 boolean_t mounted = B_FALSE;
3939 char *mntpnt = NULL;
3940 char dsname[MAXNAMELEN];
3941
3942 if (dsobj == 0) {
3943 /* special case for the MOS */
3944 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
3945 (longlong_t)obj);
3946 return;
3947 }
3948
3949 /* get the dataset's name */
3950 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3951 zc.zc_obj = dsobj;
3952 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3953 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3954 /* just write out a path of two object numbers */
3955 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3956 (longlong_t)dsobj, (longlong_t)obj);
3957 return;
3958 }
3959 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3960
3961 /* find out if the dataset is mounted */
3962 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3963
3964 /* get the corrupted object's path */
3965 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3966 zc.zc_obj = obj;
3967 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3968 &zc) == 0) {
3969 if (mounted) {
3970 (void) snprintf(pathname, len, "%s%s", mntpnt,
3971 zc.zc_value);
3972 } else {
3973 (void) snprintf(pathname, len, "%s:%s",
3974 dsname, zc.zc_value);
3975 }
3976 } else {
3977 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
3978 (longlong_t)obj);
3979 }
3980 free(mntpnt);
3981 }
3982
3983 /*
3984 * Read the EFI label from the config, if a label does not exist then
3985 * pass back the error to the caller. If the caller has passed a non-NULL
3986 * diskaddr argument then we set it to the starting address of the EFI
3987 * partition.
3988 */
3989 static int
3990 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3991 {
3992 char *path;
3993 int fd;
3994 char diskname[MAXPATHLEN];
3995 int err = -1;
3996
3997 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3998 return (err);
3999
4000 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4001 strrchr(path, '/'));
4002 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4003 struct dk_gpt *vtoc;
4004
4005 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4006 if (sb != NULL)
4007 *sb = vtoc->efi_parts[0].p_start;
4008 efi_free(vtoc);
4009 }
4010 (void) close(fd);
4011 }
4012 return (err);
4013 }
4014
4015 /*
4016 * determine where a partition starts on a disk in the current
4017 * configuration
4018 */
4019 static diskaddr_t
4020 find_start_block(nvlist_t *config)
4021 {
4022 nvlist_t **child;
4023 uint_t c, children;
4024 diskaddr_t sb = MAXOFFSET_T;
4025 uint64_t wholedisk;
4026
4027 if (nvlist_lookup_nvlist_array(config,
4028 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4029 if (nvlist_lookup_uint64(config,
4030 ZPOOL_CONFIG_WHOLE_DISK,
4031 &wholedisk) != 0 || !wholedisk) {
4032 return (MAXOFFSET_T);
4033 }
4034 if (read_efi_label(config, &sb) < 0)
4035 sb = MAXOFFSET_T;
4036 return (sb);
4037 }
4038
4039 for (c = 0; c < children; c++) {
4040 sb = find_start_block(child[c]);
4041 if (sb != MAXOFFSET_T) {
4042 return (sb);
4043 }
4044 }
4045 return (MAXOFFSET_T);
4046 }
4047
4048 int
4049 zpool_label_disk_wait(char *path, int timeout)
4050 {
4051 struct stat64 statbuf;
4052 int i;
4053
4054 /*
4055 * Wait timeout miliseconds for a newly created device to be available
4056 * from the given path. There is a small window when a /dev/ device
4057 * will exist and the udev link will not, so we must wait for the
4058 * symlink. Depending on the udev rules this may take a few seconds.
4059 */
4060 for (i = 0; i < timeout; i++) {
4061 usleep(1000);
4062
4063 errno = 0;
4064 if ((stat64(path, &statbuf) == 0) && (errno == 0))
4065 return (0);
4066 }
4067
4068 return (ENOENT);
4069 }
4070
4071 int
4072 zpool_label_disk_check(char *path)
4073 {
4074 struct dk_gpt *vtoc;
4075 int fd, err;
4076
4077 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4078 return (errno);
4079
4080 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4081 (void) close(fd);
4082 return (err);
4083 }
4084
4085 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4086 efi_free(vtoc);
4087 (void) close(fd);
4088 return (EIDRM);
4089 }
4090
4091 efi_free(vtoc);
4092 (void) close(fd);
4093 return (0);
4094 }
4095
4096 /*
4097 * Label an individual disk. The name provided is the short name,
4098 * stripped of any leading /dev path.
4099 */
4100 int
4101 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4102 {
4103 char path[MAXPATHLEN];
4104 struct dk_gpt *vtoc;
4105 int rval, fd;
4106 size_t resv = EFI_MIN_RESV_SIZE;
4107 uint64_t slice_size;
4108 diskaddr_t start_block;
4109 char errbuf[1024];
4110
4111 /* prepare an error message just in case */
4112 (void) snprintf(errbuf, sizeof (errbuf),
4113 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4114
4115 if (zhp) {
4116 nvlist_t *nvroot;
4117
4118 #if defined(__sun__) || defined(__sun)
4119 if (zpool_is_bootable(zhp)) {
4120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4121 "EFI labeled devices are not supported on root "
4122 "pools."));
4123 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4124 }
4125 #endif
4126
4127 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4128 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4129
4130 if (zhp->zpool_start_block == 0)
4131 start_block = find_start_block(nvroot);
4132 else
4133 start_block = zhp->zpool_start_block;
4134 zhp->zpool_start_block = start_block;
4135 } else {
4136 /* new pool */
4137 start_block = NEW_START_BLOCK;
4138 }
4139
4140 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4141
4142 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
4143 /*
4144 * This shouldn't happen. We've long since verified that this
4145 * is a valid device.
4146 */
4147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4148 "label '%s': unable to open device: %d"), path, errno);
4149 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4150 }
4151
4152 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4153 /*
4154 * The only way this can fail is if we run out of memory, or we
4155 * were unable to read the disk's capacity
4156 */
4157 if (errno == ENOMEM)
4158 (void) no_memory(hdl);
4159
4160 (void) close(fd);
4161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4162 "label '%s': unable to read disk capacity"), path);
4163
4164 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4165 }
4166
4167 slice_size = vtoc->efi_last_u_lba + 1;
4168 slice_size -= EFI_MIN_RESV_SIZE;
4169 if (start_block == MAXOFFSET_T)
4170 start_block = NEW_START_BLOCK;
4171 slice_size -= start_block;
4172 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4173
4174 vtoc->efi_parts[0].p_start = start_block;
4175 vtoc->efi_parts[0].p_size = slice_size;
4176
4177 /*
4178 * Why we use V_USR: V_BACKUP confuses users, and is considered
4179 * disposable by some EFI utilities (since EFI doesn't have a backup
4180 * slice). V_UNASSIGNED is supposed to be used only for zero size
4181 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4182 * etc. were all pretty specific. V_USR is as close to reality as we
4183 * can get, in the absence of V_OTHER.
4184 */
4185 vtoc->efi_parts[0].p_tag = V_USR;
4186 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4187
4188 vtoc->efi_parts[8].p_start = slice_size + start_block;
4189 vtoc->efi_parts[8].p_size = resv;
4190 vtoc->efi_parts[8].p_tag = V_RESERVED;
4191
4192 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4193 /*
4194 * Some block drivers (like pcata) may not support EFI
4195 * GPT labels. Print out a helpful error message dir-
4196 * ecting the user to manually label the disk and give
4197 * a specific slice.
4198 */
4199 (void) close(fd);
4200 efi_free(vtoc);
4201
4202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4203 "parted(8) and then provide a specific slice: %d"), rval);
4204 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4205 }
4206
4207 (void) close(fd);
4208 efi_free(vtoc);
4209
4210 /* Wait for the first expected partition to appear. */
4211
4212 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4213 (void) zfs_append_partition(path, MAXPATHLEN);
4214
4215 rval = zpool_label_disk_wait(path, 3000);
4216 if (rval) {
4217 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4218 "detect device partitions on '%s': %d"), path, rval);
4219 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4220 }
4221
4222 /* We can't be to paranoid. Read the label back and verify it. */
4223 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4224 rval = zpool_label_disk_check(path);
4225 if (rval) {
4226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4227 "EFI label on '%s' is damaged. Ensure\nthis device "
4228 "is not in in use, and is functioning properly: %d"),
4229 path, rval);
4230 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4231 }
4232
4233 return (0);
4234 }