]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
0a9780733fae85906fa5d7e5aedf963cb84e4278
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
241 */
242 int
243 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
244 size_t len, zprop_source_t *srctype, boolean_t literal)
245 {
246 uint64_t intval;
247 const char *strval;
248 zprop_source_t src = ZPROP_SRC_NONE;
249 nvlist_t *nvroot;
250 vdev_stat_t *vs;
251 uint_t vsc;
252
253 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
254 switch (prop) {
255 case ZPOOL_PROP_NAME:
256 (void) strlcpy(buf, zpool_get_name(zhp), len);
257 break;
258
259 case ZPOOL_PROP_HEALTH:
260 (void) strlcpy(buf, "FAULTED", len);
261 break;
262
263 case ZPOOL_PROP_GUID:
264 intval = zpool_get_prop_int(zhp, prop, &src);
265 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
266 break;
267
268 case ZPOOL_PROP_ALTROOT:
269 case ZPOOL_PROP_CACHEFILE:
270 case ZPOOL_PROP_COMMENT:
271 if (zhp->zpool_props != NULL ||
272 zpool_get_all_props(zhp) == 0) {
273 (void) strlcpy(buf,
274 zpool_get_prop_string(zhp, prop, &src),
275 len);
276 break;
277 }
278 /* FALLTHROUGH */
279 default:
280 (void) strlcpy(buf, "-", len);
281 break;
282 }
283
284 if (srctype != NULL)
285 *srctype = src;
286 return (0);
287 }
288
289 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
290 prop != ZPOOL_PROP_NAME)
291 return (-1);
292
293 switch (zpool_prop_get_type(prop)) {
294 case PROP_TYPE_STRING:
295 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
296 len);
297 break;
298
299 case PROP_TYPE_NUMBER:
300 intval = zpool_get_prop_int(zhp, prop, &src);
301
302 switch (prop) {
303 case ZPOOL_PROP_SIZE:
304 case ZPOOL_PROP_ALLOCATED:
305 case ZPOOL_PROP_FREE:
306 case ZPOOL_PROP_FREEING:
307 case ZPOOL_PROP_LEAKED:
308 case ZPOOL_PROP_ASHIFT:
309 if (literal)
310 (void) snprintf(buf, len, "%llu",
311 (u_longlong_t)intval);
312 else
313 (void) zfs_nicenum(intval, buf, len);
314 break;
315
316 case ZPOOL_PROP_EXPANDSZ:
317 if (intval == 0) {
318 (void) strlcpy(buf, "-", len);
319 } else if (literal) {
320 (void) snprintf(buf, len, "%llu",
321 (u_longlong_t)intval);
322 } else {
323 (void) zfs_nicenum(intval, buf, len);
324 }
325 break;
326
327 case ZPOOL_PROP_CAPACITY:
328 if (literal) {
329 (void) snprintf(buf, len, "%llu",
330 (u_longlong_t)intval);
331 } else {
332 (void) snprintf(buf, len, "%llu%%",
333 (u_longlong_t)intval);
334 }
335 break;
336
337 case ZPOOL_PROP_FRAGMENTATION:
338 if (intval == UINT64_MAX) {
339 (void) strlcpy(buf, "-", len);
340 } else if (literal) {
341 (void) snprintf(buf, len, "%llu",
342 (u_longlong_t)intval);
343 } else {
344 (void) snprintf(buf, len, "%llu%%",
345 (u_longlong_t)intval);
346 }
347 break;
348
349 case ZPOOL_PROP_DEDUPRATIO:
350 if (literal)
351 (void) snprintf(buf, len, "%llu.%02llu",
352 (u_longlong_t)(intval / 100),
353 (u_longlong_t)(intval % 100));
354 else
355 (void) snprintf(buf, len, "%llu.%02llux",
356 (u_longlong_t)(intval / 100),
357 (u_longlong_t)(intval % 100));
358 break;
359
360 case ZPOOL_PROP_HEALTH:
361 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
362 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
363 verify(nvlist_lookup_uint64_array(nvroot,
364 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
365 == 0);
366
367 (void) strlcpy(buf, zpool_state_to_name(intval,
368 vs->vs_aux), len);
369 break;
370 case ZPOOL_PROP_VERSION:
371 if (intval >= SPA_VERSION_FEATURES) {
372 (void) snprintf(buf, len, "-");
373 break;
374 }
375 /* FALLTHROUGH */
376 default:
377 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
378 }
379 break;
380
381 case PROP_TYPE_INDEX:
382 intval = zpool_get_prop_int(zhp, prop, &src);
383 if (zpool_prop_index_to_string(prop, intval, &strval)
384 != 0)
385 return (-1);
386 (void) strlcpy(buf, strval, len);
387 break;
388
389 default:
390 abort();
391 }
392
393 if (srctype)
394 *srctype = src;
395
396 return (0);
397 }
398
399 /*
400 * Check if the bootfs name has the same pool name as it is set to.
401 * Assuming bootfs is a valid dataset name.
402 */
403 static boolean_t
404 bootfs_name_valid(const char *pool, char *bootfs)
405 {
406 int len = strlen(pool);
407
408 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
409 return (B_FALSE);
410
411 if (strncmp(pool, bootfs, len) == 0 &&
412 (bootfs[len] == '/' || bootfs[len] == '\0'))
413 return (B_TRUE);
414
415 return (B_FALSE);
416 }
417
418 #if defined(__sun__) || defined(__sun)
419 /*
420 * Inspect the configuration to determine if any of the devices contain
421 * an EFI label.
422 */
423 static boolean_t
424 pool_uses_efi(nvlist_t *config)
425 {
426 nvlist_t **child;
427 uint_t c, children;
428
429 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
430 &child, &children) != 0)
431 return (read_efi_label(config, NULL) >= 0);
432
433 for (c = 0; c < children; c++) {
434 if (pool_uses_efi(child[c]))
435 return (B_TRUE);
436 }
437 return (B_FALSE);
438 }
439 #endif
440
441 boolean_t
442 zpool_is_bootable(zpool_handle_t *zhp)
443 {
444 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
445
446 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
447 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
448 sizeof (bootfs)) != 0);
449 }
450
451
452 /*
453 * Given an nvlist of zpool properties to be set, validate that they are
454 * correct, and parse any numeric properties (index, boolean, etc) if they are
455 * specified as strings.
456 */
457 static nvlist_t *
458 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
459 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
460 {
461 nvpair_t *elem;
462 nvlist_t *retprops;
463 zpool_prop_t prop;
464 char *strval;
465 uint64_t intval;
466 char *slash, *check;
467 struct stat64 statbuf;
468 zpool_handle_t *zhp;
469 nvlist_t *nvroot;
470
471 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
472 (void) no_memory(hdl);
473 return (NULL);
474 }
475
476 elem = NULL;
477 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
478 const char *propname = nvpair_name(elem);
479
480 prop = zpool_name_to_prop(propname);
481 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
482 int err;
483 char *fname = strchr(propname, '@') + 1;
484
485 err = zfeature_lookup_name(fname, NULL);
486 if (err != 0) {
487 ASSERT3U(err, ==, ENOENT);
488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489 "invalid feature '%s'"), fname);
490 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
491 goto error;
492 }
493
494 if (nvpair_type(elem) != DATA_TYPE_STRING) {
495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 "'%s' must be a string"), propname);
497 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
498 goto error;
499 }
500
501 (void) nvpair_value_string(elem, &strval);
502 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
504 "property '%s' can only be set to "
505 "'enabled'"), propname);
506 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
507 goto error;
508 }
509
510 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
511 (void) no_memory(hdl);
512 goto error;
513 }
514 continue;
515 }
516
517 /*
518 * Make sure this property is valid and applies to this type.
519 */
520 if (prop == ZPROP_INVAL) {
521 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
522 "invalid property '%s'"), propname);
523 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
524 goto error;
525 }
526
527 if (zpool_prop_readonly(prop)) {
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
529 "is readonly"), propname);
530 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
531 goto error;
532 }
533
534 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
535 &strval, &intval, errbuf) != 0)
536 goto error;
537
538 /*
539 * Perform additional checking for specific properties.
540 */
541 switch (prop) {
542 default:
543 break;
544 case ZPOOL_PROP_VERSION:
545 if (intval < version ||
546 !SPA_VERSION_IS_SUPPORTED(intval)) {
547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
548 "property '%s' number %d is invalid."),
549 propname, intval);
550 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
551 goto error;
552 }
553 break;
554
555 case ZPOOL_PROP_ASHIFT:
556 if (!flags.create) {
557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
558 "property '%s' can only be set at "
559 "creation time"), propname);
560 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
561 goto error;
562 }
563
564 if (intval != 0 && (intval < 9 || intval > 13)) {
565 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
566 "property '%s' number %d is invalid."),
567 propname, intval);
568 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
569 goto error;
570 }
571 break;
572
573 case ZPOOL_PROP_BOOTFS:
574 if (flags.create || flags.import) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "property '%s' cannot be set at creation "
577 "or import time"), propname);
578 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
579 goto error;
580 }
581
582 if (version < SPA_VERSION_BOOTFS) {
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "pool must be upgraded to support "
585 "'%s' property"), propname);
586 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
587 goto error;
588 }
589
590 /*
591 * bootfs property value has to be a dataset name and
592 * the dataset has to be in the same pool as it sets to.
593 */
594 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
595 strval)) {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
597 "is an invalid name"), strval);
598 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
599 goto error;
600 }
601
602 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "could not open pool '%s'"), poolname);
605 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
606 goto error;
607 }
608 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
609 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
610
611 #if defined(__sun__) || defined(__sun)
612 /*
613 * bootfs property cannot be set on a disk which has
614 * been EFI labeled.
615 */
616 if (pool_uses_efi(nvroot)) {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "property '%s' not supported on "
619 "EFI labeled devices"), propname);
620 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
621 zpool_close(zhp);
622 goto error;
623 }
624 #endif
625 zpool_close(zhp);
626 break;
627
628 case ZPOOL_PROP_ALTROOT:
629 if (!flags.create && !flags.import) {
630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 "property '%s' can only be set during pool "
632 "creation or import"), propname);
633 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
634 goto error;
635 }
636
637 if (strval[0] != '/') {
638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
639 "bad alternate root '%s'"), strval);
640 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
641 goto error;
642 }
643 break;
644
645 case ZPOOL_PROP_CACHEFILE:
646 if (strval[0] == '\0')
647 break;
648
649 if (strcmp(strval, "none") == 0)
650 break;
651
652 if (strval[0] != '/') {
653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 "property '%s' must be empty, an "
655 "absolute path, or 'none'"), propname);
656 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
657 goto error;
658 }
659
660 slash = strrchr(strval, '/');
661
662 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
663 strcmp(slash, "/..") == 0) {
664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665 "'%s' is not a valid file"), strval);
666 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
667 goto error;
668 }
669
670 *slash = '\0';
671
672 if (strval[0] != '\0' &&
673 (stat64(strval, &statbuf) != 0 ||
674 !S_ISDIR(statbuf.st_mode))) {
675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
676 "'%s' is not a valid directory"),
677 strval);
678 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
679 goto error;
680 }
681
682 *slash = '/';
683 break;
684
685 case ZPOOL_PROP_COMMENT:
686 for (check = strval; *check != '\0'; check++) {
687 if (!isprint(*check)) {
688 zfs_error_aux(hdl,
689 dgettext(TEXT_DOMAIN,
690 "comment may only have printable "
691 "characters"));
692 (void) zfs_error(hdl, EZFS_BADPROP,
693 errbuf);
694 goto error;
695 }
696 }
697 if (strlen(strval) > ZPROP_MAX_COMMENT) {
698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
699 "comment must not exceed %d characters"),
700 ZPROP_MAX_COMMENT);
701 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
702 goto error;
703 }
704 break;
705 case ZPOOL_PROP_READONLY:
706 if (!flags.import) {
707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
708 "property '%s' can only be set at "
709 "import time"), propname);
710 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
711 goto error;
712 }
713 break;
714 case ZPOOL_PROP_TNAME:
715 if (!flags.create) {
716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
717 "property '%s' can only be set at "
718 "creation time"), propname);
719 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
720 goto error;
721 }
722 break;
723 }
724 }
725
726 return (retprops);
727 error:
728 nvlist_free(retprops);
729 return (NULL);
730 }
731
732 /*
733 * Set zpool property : propname=propval.
734 */
735 int
736 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
737 {
738 zfs_cmd_t zc = {"\0"};
739 int ret = -1;
740 char errbuf[1024];
741 nvlist_t *nvl = NULL;
742 nvlist_t *realprops;
743 uint64_t version;
744 prop_flags_t flags = { 0 };
745
746 (void) snprintf(errbuf, sizeof (errbuf),
747 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
748 zhp->zpool_name);
749
750 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
751 return (no_memory(zhp->zpool_hdl));
752
753 if (nvlist_add_string(nvl, propname, propval) != 0) {
754 nvlist_free(nvl);
755 return (no_memory(zhp->zpool_hdl));
756 }
757
758 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
759 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
760 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
761 nvlist_free(nvl);
762 return (-1);
763 }
764
765 nvlist_free(nvl);
766 nvl = realprops;
767
768 /*
769 * Execute the corresponding ioctl() to set this property.
770 */
771 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
772
773 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
774 nvlist_free(nvl);
775 return (-1);
776 }
777
778 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
779
780 zcmd_free_nvlists(&zc);
781 nvlist_free(nvl);
782
783 if (ret)
784 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
785 else
786 (void) zpool_props_refresh(zhp);
787
788 return (ret);
789 }
790
791 int
792 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
793 {
794 libzfs_handle_t *hdl = zhp->zpool_hdl;
795 zprop_list_t *entry;
796 char buf[ZFS_MAXPROPLEN];
797 nvlist_t *features = NULL;
798 nvpair_t *nvp;
799 zprop_list_t **last;
800 boolean_t firstexpand = (NULL == *plp);
801 int i;
802
803 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
804 return (-1);
805
806 last = plp;
807 while (*last != NULL)
808 last = &(*last)->pl_next;
809
810 if ((*plp)->pl_all)
811 features = zpool_get_features(zhp);
812
813 if ((*plp)->pl_all && firstexpand) {
814 for (i = 0; i < SPA_FEATURES; i++) {
815 zprop_list_t *entry = zfs_alloc(hdl,
816 sizeof (zprop_list_t));
817 entry->pl_prop = ZPROP_INVAL;
818 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
819 spa_feature_table[i].fi_uname);
820 entry->pl_width = strlen(entry->pl_user_prop);
821 entry->pl_all = B_TRUE;
822
823 *last = entry;
824 last = &entry->pl_next;
825 }
826 }
827
828 /* add any unsupported features */
829 for (nvp = nvlist_next_nvpair(features, NULL);
830 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
831 char *propname;
832 boolean_t found;
833 zprop_list_t *entry;
834
835 if (zfeature_is_supported(nvpair_name(nvp)))
836 continue;
837
838 propname = zfs_asprintf(hdl, "unsupported@%s",
839 nvpair_name(nvp));
840
841 /*
842 * Before adding the property to the list make sure that no
843 * other pool already added the same property.
844 */
845 found = B_FALSE;
846 entry = *plp;
847 while (entry != NULL) {
848 if (entry->pl_user_prop != NULL &&
849 strcmp(propname, entry->pl_user_prop) == 0) {
850 found = B_TRUE;
851 break;
852 }
853 entry = entry->pl_next;
854 }
855 if (found) {
856 free(propname);
857 continue;
858 }
859
860 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
861 entry->pl_prop = ZPROP_INVAL;
862 entry->pl_user_prop = propname;
863 entry->pl_width = strlen(entry->pl_user_prop);
864 entry->pl_all = B_TRUE;
865
866 *last = entry;
867 last = &entry->pl_next;
868 }
869
870 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
871
872 if (entry->pl_fixed)
873 continue;
874
875 if (entry->pl_prop != ZPROP_INVAL &&
876 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
877 NULL, B_FALSE) == 0) {
878 if (strlen(buf) > entry->pl_width)
879 entry->pl_width = strlen(buf);
880 }
881 }
882
883 return (0);
884 }
885
886 /*
887 * Get the state for the given feature on the given ZFS pool.
888 */
889 int
890 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
891 size_t len)
892 {
893 uint64_t refcount;
894 boolean_t found = B_FALSE;
895 nvlist_t *features = zpool_get_features(zhp);
896 boolean_t supported;
897 const char *feature = strchr(propname, '@') + 1;
898
899 supported = zpool_prop_feature(propname);
900 ASSERT(supported || zpool_prop_unsupported(propname));
901
902 /*
903 * Convert from feature name to feature guid. This conversion is
904 * unecessary for unsupported@... properties because they already
905 * use guids.
906 */
907 if (supported) {
908 int ret;
909 spa_feature_t fid;
910
911 ret = zfeature_lookup_name(feature, &fid);
912 if (ret != 0) {
913 (void) strlcpy(buf, "-", len);
914 return (ENOTSUP);
915 }
916 feature = spa_feature_table[fid].fi_guid;
917 }
918
919 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
920 found = B_TRUE;
921
922 if (supported) {
923 if (!found) {
924 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
925 } else {
926 if (refcount == 0)
927 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
928 else
929 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
930 }
931 } else {
932 if (found) {
933 if (refcount == 0) {
934 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
935 } else {
936 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
937 }
938 } else {
939 (void) strlcpy(buf, "-", len);
940 return (ENOTSUP);
941 }
942 }
943
944 return (0);
945 }
946
947 /*
948 * Don't start the slice at the default block of 34; many storage
949 * devices will use a stripe width of 128k, other vendors prefer a 1m
950 * alignment. It is best to play it safe and ensure a 1m alignment
951 * given 512B blocks. When the block size is larger by a power of 2
952 * we will still be 1m aligned. Some devices are sensitive to the
953 * partition ending alignment as well.
954 */
955 #define NEW_START_BLOCK 2048
956 #define PARTITION_END_ALIGNMENT 2048
957
958 /*
959 * Validate the given pool name, optionally putting an extended error message in
960 * 'buf'.
961 */
962 boolean_t
963 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
964 {
965 namecheck_err_t why;
966 char what;
967 int ret;
968
969 ret = pool_namecheck(pool, &why, &what);
970
971 /*
972 * The rules for reserved pool names were extended at a later point.
973 * But we need to support users with existing pools that may now be
974 * invalid. So we only check for this expanded set of names during a
975 * create (or import), and only in userland.
976 */
977 if (ret == 0 && !isopen &&
978 (strncmp(pool, "mirror", 6) == 0 ||
979 strncmp(pool, "raidz", 5) == 0 ||
980 strncmp(pool, "spare", 5) == 0 ||
981 strcmp(pool, "log") == 0)) {
982 if (hdl != NULL)
983 zfs_error_aux(hdl,
984 dgettext(TEXT_DOMAIN, "name is reserved"));
985 return (B_FALSE);
986 }
987
988
989 if (ret != 0) {
990 if (hdl != NULL) {
991 switch (why) {
992 case NAME_ERR_TOOLONG:
993 zfs_error_aux(hdl,
994 dgettext(TEXT_DOMAIN, "name is too long"));
995 break;
996
997 case NAME_ERR_INVALCHAR:
998 zfs_error_aux(hdl,
999 dgettext(TEXT_DOMAIN, "invalid character "
1000 "'%c' in pool name"), what);
1001 break;
1002
1003 case NAME_ERR_NOLETTER:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "name must begin with a letter"));
1006 break;
1007
1008 case NAME_ERR_RESERVED:
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "name is reserved"));
1011 break;
1012
1013 case NAME_ERR_DISKLIKE:
1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1015 "pool name is reserved"));
1016 break;
1017
1018 case NAME_ERR_LEADING_SLASH:
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1020 "leading slash in name"));
1021 break;
1022
1023 case NAME_ERR_EMPTY_COMPONENT:
1024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1025 "empty component in name"));
1026 break;
1027
1028 case NAME_ERR_TRAILING_SLASH:
1029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1030 "trailing slash in name"));
1031 break;
1032
1033 case NAME_ERR_MULTIPLE_AT:
1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1035 "multiple '@' delimiters in name"));
1036 break;
1037 case NAME_ERR_NO_AT:
1038 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1039 "permission set is missing '@'"));
1040 break;
1041 }
1042 }
1043 return (B_FALSE);
1044 }
1045
1046 return (B_TRUE);
1047 }
1048
1049 /*
1050 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1051 * state.
1052 */
1053 zpool_handle_t *
1054 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1055 {
1056 zpool_handle_t *zhp;
1057 boolean_t missing;
1058
1059 /*
1060 * Make sure the pool name is valid.
1061 */
1062 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1063 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1064 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1065 pool);
1066 return (NULL);
1067 }
1068
1069 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1070 return (NULL);
1071
1072 zhp->zpool_hdl = hdl;
1073 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1074
1075 if (zpool_refresh_stats(zhp, &missing) != 0) {
1076 zpool_close(zhp);
1077 return (NULL);
1078 }
1079
1080 if (missing) {
1081 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1082 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1083 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1084 zpool_close(zhp);
1085 return (NULL);
1086 }
1087
1088 return (zhp);
1089 }
1090
1091 /*
1092 * Like the above, but silent on error. Used when iterating over pools (because
1093 * the configuration cache may be out of date).
1094 */
1095 int
1096 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1097 {
1098 zpool_handle_t *zhp;
1099 boolean_t missing;
1100
1101 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1102 return (-1);
1103
1104 zhp->zpool_hdl = hdl;
1105 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1106
1107 if (zpool_refresh_stats(zhp, &missing) != 0) {
1108 zpool_close(zhp);
1109 return (-1);
1110 }
1111
1112 if (missing) {
1113 zpool_close(zhp);
1114 *ret = NULL;
1115 return (0);
1116 }
1117
1118 *ret = zhp;
1119 return (0);
1120 }
1121
1122 /*
1123 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1124 * state.
1125 */
1126 zpool_handle_t *
1127 zpool_open(libzfs_handle_t *hdl, const char *pool)
1128 {
1129 zpool_handle_t *zhp;
1130
1131 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1132 return (NULL);
1133
1134 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1135 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1136 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1137 zpool_close(zhp);
1138 return (NULL);
1139 }
1140
1141 return (zhp);
1142 }
1143
1144 /*
1145 * Close the handle. Simply frees the memory associated with the handle.
1146 */
1147 void
1148 zpool_close(zpool_handle_t *zhp)
1149 {
1150 nvlist_free(zhp->zpool_config);
1151 nvlist_free(zhp->zpool_old_config);
1152 nvlist_free(zhp->zpool_props);
1153 free(zhp);
1154 }
1155
1156 /*
1157 * Return the name of the pool.
1158 */
1159 const char *
1160 zpool_get_name(zpool_handle_t *zhp)
1161 {
1162 return (zhp->zpool_name);
1163 }
1164
1165
1166 /*
1167 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1168 */
1169 int
1170 zpool_get_state(zpool_handle_t *zhp)
1171 {
1172 return (zhp->zpool_state);
1173 }
1174
1175 /*
1176 * Create the named pool, using the provided vdev list. It is assumed
1177 * that the consumer has already validated the contents of the nvlist, so we
1178 * don't have to worry about error semantics.
1179 */
1180 int
1181 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1182 nvlist_t *props, nvlist_t *fsprops)
1183 {
1184 zfs_cmd_t zc = {"\0"};
1185 nvlist_t *zc_fsprops = NULL;
1186 nvlist_t *zc_props = NULL;
1187 char msg[1024];
1188 int ret = -1;
1189
1190 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1191 "cannot create '%s'"), pool);
1192
1193 if (!zpool_name_valid(hdl, B_FALSE, pool))
1194 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1195
1196 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1197 return (-1);
1198
1199 if (props) {
1200 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1201
1202 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1203 SPA_VERSION_1, flags, msg)) == NULL) {
1204 goto create_failed;
1205 }
1206 }
1207
1208 if (fsprops) {
1209 uint64_t zoned;
1210 char *zonestr;
1211
1212 zoned = ((nvlist_lookup_string(fsprops,
1213 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1214 strcmp(zonestr, "on") == 0);
1215
1216 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1217 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1218 goto create_failed;
1219 }
1220 if (!zc_props &&
1221 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1222 goto create_failed;
1223 }
1224 if (nvlist_add_nvlist(zc_props,
1225 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1226 goto create_failed;
1227 }
1228 }
1229
1230 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1231 goto create_failed;
1232
1233 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1234
1235 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1236
1237 zcmd_free_nvlists(&zc);
1238 nvlist_free(zc_props);
1239 nvlist_free(zc_fsprops);
1240
1241 switch (errno) {
1242 case EBUSY:
1243 /*
1244 * This can happen if the user has specified the same
1245 * device multiple times. We can't reliably detect this
1246 * until we try to add it and see we already have a
1247 * label. This can also happen under if the device is
1248 * part of an active md or lvm device.
1249 */
1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1251 "one or more vdevs refer to the same device, or "
1252 "one of\nthe devices is part of an active md or "
1253 "lvm device"));
1254 return (zfs_error(hdl, EZFS_BADDEV, msg));
1255
1256 case ERANGE:
1257 /*
1258 * This happens if the record size is smaller or larger
1259 * than the allowed size range, or not a power of 2.
1260 *
1261 * NOTE: although zfs_valid_proplist is called earlier,
1262 * this case may have slipped through since the
1263 * pool does not exist yet and it is therefore
1264 * impossible to read properties e.g. max blocksize
1265 * from the pool.
1266 */
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "record size invalid"));
1269 return (zfs_error(hdl, EZFS_BADPROP, msg));
1270
1271 case EOVERFLOW:
1272 /*
1273 * This occurs when one of the devices is below
1274 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1275 * device was the problem device since there's no
1276 * reliable way to determine device size from userland.
1277 */
1278 {
1279 char buf[64];
1280
1281 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1282
1283 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1284 "one or more devices is less than the "
1285 "minimum size (%s)"), buf);
1286 }
1287 return (zfs_error(hdl, EZFS_BADDEV, msg));
1288
1289 case ENOSPC:
1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1291 "one or more devices is out of space"));
1292 return (zfs_error(hdl, EZFS_BADDEV, msg));
1293
1294 case ENOTBLK:
1295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1296 "cache device must be a disk or disk slice"));
1297 return (zfs_error(hdl, EZFS_BADDEV, msg));
1298
1299 default:
1300 return (zpool_standard_error(hdl, errno, msg));
1301 }
1302 }
1303
1304 create_failed:
1305 zcmd_free_nvlists(&zc);
1306 nvlist_free(zc_props);
1307 nvlist_free(zc_fsprops);
1308 return (ret);
1309 }
1310
1311 /*
1312 * Destroy the given pool. It is up to the caller to ensure that there are no
1313 * datasets left in the pool.
1314 */
1315 int
1316 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1317 {
1318 zfs_cmd_t zc = {"\0"};
1319 zfs_handle_t *zfp = NULL;
1320 libzfs_handle_t *hdl = zhp->zpool_hdl;
1321 char msg[1024];
1322
1323 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1324 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1325 return (-1);
1326
1327 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1328 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1329
1330 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1331 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1332 "cannot destroy '%s'"), zhp->zpool_name);
1333
1334 if (errno == EROFS) {
1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1336 "one or more devices is read only"));
1337 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1338 } else {
1339 (void) zpool_standard_error(hdl, errno, msg);
1340 }
1341
1342 if (zfp)
1343 zfs_close(zfp);
1344 return (-1);
1345 }
1346
1347 if (zfp) {
1348 remove_mountpoint(zfp);
1349 zfs_close(zfp);
1350 }
1351
1352 return (0);
1353 }
1354
1355 /*
1356 * Add the given vdevs to the pool. The caller must have already performed the
1357 * necessary verification to ensure that the vdev specification is well-formed.
1358 */
1359 int
1360 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1361 {
1362 zfs_cmd_t zc = {"\0"};
1363 int ret;
1364 libzfs_handle_t *hdl = zhp->zpool_hdl;
1365 char msg[1024];
1366 nvlist_t **spares, **l2cache;
1367 uint_t nspares, nl2cache;
1368
1369 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1370 "cannot add to '%s'"), zhp->zpool_name);
1371
1372 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1373 SPA_VERSION_SPARES &&
1374 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1375 &spares, &nspares) == 0) {
1376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1377 "upgraded to add hot spares"));
1378 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1379 }
1380
1381 #if defined(__sun__) || defined(__sun)
1382 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1383 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1384 uint64_t s;
1385
1386 for (s = 0; s < nspares; s++) {
1387 char *path;
1388
1389 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1390 &path) == 0 && pool_uses_efi(spares[s])) {
1391 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1392 "device '%s' contains an EFI label and "
1393 "cannot be used on root pools."),
1394 zpool_vdev_name(hdl, NULL, spares[s], 0));
1395 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1396 }
1397 }
1398 }
1399 #endif
1400
1401 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1402 SPA_VERSION_L2CACHE &&
1403 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1404 &l2cache, &nl2cache) == 0) {
1405 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1406 "upgraded to add cache devices"));
1407 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1408 }
1409
1410 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1411 return (-1);
1412 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1413
1414 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1415 switch (errno) {
1416 case EBUSY:
1417 /*
1418 * This can happen if the user has specified the same
1419 * device multiple times. We can't reliably detect this
1420 * until we try to add it and see we already have a
1421 * label.
1422 */
1423 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1424 "one or more vdevs refer to the same device"));
1425 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1426 break;
1427
1428 case EOVERFLOW:
1429 /*
1430 * This occurrs when one of the devices is below
1431 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1432 * device was the problem device since there's no
1433 * reliable way to determine device size from userland.
1434 */
1435 {
1436 char buf[64];
1437
1438 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1439
1440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1441 "device is less than the minimum "
1442 "size (%s)"), buf);
1443 }
1444 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1445 break;
1446
1447 case ENOTSUP:
1448 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1449 "pool must be upgraded to add these vdevs"));
1450 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1451 break;
1452
1453 case ENOTBLK:
1454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1455 "cache device must be a disk or disk slice"));
1456 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1457 break;
1458
1459 default:
1460 (void) zpool_standard_error(hdl, errno, msg);
1461 }
1462
1463 ret = -1;
1464 } else {
1465 ret = 0;
1466 }
1467
1468 zcmd_free_nvlists(&zc);
1469
1470 return (ret);
1471 }
1472
1473 /*
1474 * Exports the pool from the system. The caller must ensure that there are no
1475 * mounted datasets in the pool.
1476 */
1477 static int
1478 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1479 const char *log_str)
1480 {
1481 zfs_cmd_t zc = {"\0"};
1482 char msg[1024];
1483
1484 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1485 "cannot export '%s'"), zhp->zpool_name);
1486
1487 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1488 zc.zc_cookie = force;
1489 zc.zc_guid = hardforce;
1490 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1491
1492 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1493 switch (errno) {
1494 case EXDEV:
1495 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1496 "use '-f' to override the following errors:\n"
1497 "'%s' has an active shared spare which could be"
1498 " used by other pools once '%s' is exported."),
1499 zhp->zpool_name, zhp->zpool_name);
1500 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1501 msg));
1502 default:
1503 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1504 msg));
1505 }
1506 }
1507
1508 return (0);
1509 }
1510
1511 int
1512 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1513 {
1514 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1515 }
1516
1517 int
1518 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1519 {
1520 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1521 }
1522
1523 static void
1524 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1525 nvlist_t *config)
1526 {
1527 nvlist_t *nv = NULL;
1528 uint64_t rewindto;
1529 int64_t loss = -1;
1530 struct tm t;
1531 char timestr[128];
1532
1533 if (!hdl->libzfs_printerr || config == NULL)
1534 return;
1535
1536 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1537 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1538 return;
1539 }
1540
1541 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1542 return;
1543 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1544
1545 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1546 strftime(timestr, 128, "%c", &t) != 0) {
1547 if (dryrun) {
1548 (void) printf(dgettext(TEXT_DOMAIN,
1549 "Would be able to return %s "
1550 "to its state as of %s.\n"),
1551 name, timestr);
1552 } else {
1553 (void) printf(dgettext(TEXT_DOMAIN,
1554 "Pool %s returned to its state as of %s.\n"),
1555 name, timestr);
1556 }
1557 if (loss > 120) {
1558 (void) printf(dgettext(TEXT_DOMAIN,
1559 "%s approximately %lld "),
1560 dryrun ? "Would discard" : "Discarded",
1561 ((longlong_t)loss + 30) / 60);
1562 (void) printf(dgettext(TEXT_DOMAIN,
1563 "minutes of transactions.\n"));
1564 } else if (loss > 0) {
1565 (void) printf(dgettext(TEXT_DOMAIN,
1566 "%s approximately %lld "),
1567 dryrun ? "Would discard" : "Discarded",
1568 (longlong_t)loss);
1569 (void) printf(dgettext(TEXT_DOMAIN,
1570 "seconds of transactions.\n"));
1571 }
1572 }
1573 }
1574
1575 void
1576 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1577 nvlist_t *config)
1578 {
1579 nvlist_t *nv = NULL;
1580 int64_t loss = -1;
1581 uint64_t edata = UINT64_MAX;
1582 uint64_t rewindto;
1583 struct tm t;
1584 char timestr[128];
1585
1586 if (!hdl->libzfs_printerr)
1587 return;
1588
1589 if (reason >= 0)
1590 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1591 else
1592 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1593
1594 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1595 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1596 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1597 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1598 goto no_info;
1599
1600 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1602 &edata);
1603
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "Recovery is possible, but will result in some data loss.\n"));
1606
1607 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1608 strftime(timestr, 128, "%c", &t) != 0) {
1609 (void) printf(dgettext(TEXT_DOMAIN,
1610 "\tReturning the pool to its state as of %s\n"
1611 "\tshould correct the problem. "),
1612 timestr);
1613 } else {
1614 (void) printf(dgettext(TEXT_DOMAIN,
1615 "\tReverting the pool to an earlier state "
1616 "should correct the problem.\n\t"));
1617 }
1618
1619 if (loss > 120) {
1620 (void) printf(dgettext(TEXT_DOMAIN,
1621 "Approximately %lld minutes of data\n"
1622 "\tmust be discarded, irreversibly. "),
1623 ((longlong_t)loss + 30) / 60);
1624 } else if (loss > 0) {
1625 (void) printf(dgettext(TEXT_DOMAIN,
1626 "Approximately %lld seconds of data\n"
1627 "\tmust be discarded, irreversibly. "),
1628 (longlong_t)loss);
1629 }
1630 if (edata != 0 && edata != UINT64_MAX) {
1631 if (edata == 1) {
1632 (void) printf(dgettext(TEXT_DOMAIN,
1633 "After rewind, at least\n"
1634 "\tone persistent user-data error will remain. "));
1635 } else {
1636 (void) printf(dgettext(TEXT_DOMAIN,
1637 "After rewind, several\n"
1638 "\tpersistent user-data errors will remain. "));
1639 }
1640 }
1641 (void) printf(dgettext(TEXT_DOMAIN,
1642 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1643 reason >= 0 ? "clear" : "import", name);
1644
1645 (void) printf(dgettext(TEXT_DOMAIN,
1646 "A scrub of the pool\n"
1647 "\tis strongly recommended after recovery.\n"));
1648 return;
1649
1650 no_info:
1651 (void) printf(dgettext(TEXT_DOMAIN,
1652 "Destroy and re-create the pool from\n\ta backup source.\n"));
1653 }
1654
1655 /*
1656 * zpool_import() is a contracted interface. Should be kept the same
1657 * if possible.
1658 *
1659 * Applications should use zpool_import_props() to import a pool with
1660 * new properties value to be set.
1661 */
1662 int
1663 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1664 char *altroot)
1665 {
1666 nvlist_t *props = NULL;
1667 int ret;
1668
1669 if (altroot != NULL) {
1670 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1671 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1672 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1673 newname));
1674 }
1675
1676 if (nvlist_add_string(props,
1677 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1678 nvlist_add_string(props,
1679 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1680 nvlist_free(props);
1681 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1682 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1683 newname));
1684 }
1685 }
1686
1687 ret = zpool_import_props(hdl, config, newname, props,
1688 ZFS_IMPORT_NORMAL);
1689 nvlist_free(props);
1690 return (ret);
1691 }
1692
1693 static void
1694 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1695 int indent)
1696 {
1697 nvlist_t **child;
1698 uint_t c, children;
1699 char *vname;
1700 uint64_t is_log = 0;
1701
1702 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1703 &is_log);
1704
1705 if (name != NULL)
1706 (void) printf("\t%*s%s%s\n", indent, "", name,
1707 is_log ? " [log]" : "");
1708
1709 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1710 &child, &children) != 0)
1711 return;
1712
1713 for (c = 0; c < children; c++) {
1714 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1715 print_vdev_tree(hdl, vname, child[c], indent + 2);
1716 free(vname);
1717 }
1718 }
1719
1720 void
1721 zpool_print_unsup_feat(nvlist_t *config)
1722 {
1723 nvlist_t *nvinfo, *unsup_feat;
1724 nvpair_t *nvp;
1725
1726 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1727 0);
1728 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1729 &unsup_feat) == 0);
1730
1731 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1732 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1733 char *desc;
1734
1735 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1736 verify(nvpair_value_string(nvp, &desc) == 0);
1737
1738 if (strlen(desc) > 0)
1739 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1740 else
1741 (void) printf("\t%s\n", nvpair_name(nvp));
1742 }
1743 }
1744
1745 /*
1746 * Import the given pool using the known configuration and a list of
1747 * properties to be set. The configuration should have come from
1748 * zpool_find_import(). The 'newname' parameters control whether the pool
1749 * is imported with a different name.
1750 */
1751 int
1752 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1753 nvlist_t *props, int flags)
1754 {
1755 zfs_cmd_t zc = {"\0"};
1756 zpool_rewind_policy_t policy;
1757 nvlist_t *nv = NULL;
1758 nvlist_t *nvinfo = NULL;
1759 nvlist_t *missing = NULL;
1760 char *thename;
1761 char *origname;
1762 int ret;
1763 int error = 0;
1764 char errbuf[1024];
1765
1766 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1767 &origname) == 0);
1768
1769 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1770 "cannot import pool '%s'"), origname);
1771
1772 if (newname != NULL) {
1773 if (!zpool_name_valid(hdl, B_FALSE, newname))
1774 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1775 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1776 newname));
1777 thename = (char *)newname;
1778 } else {
1779 thename = origname;
1780 }
1781
1782 if (props != NULL) {
1783 uint64_t version;
1784 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1785
1786 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1787 &version) == 0);
1788
1789 if ((props = zpool_valid_proplist(hdl, origname,
1790 props, version, flags, errbuf)) == NULL)
1791 return (-1);
1792 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1793 nvlist_free(props);
1794 return (-1);
1795 }
1796 nvlist_free(props);
1797 }
1798
1799 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1800
1801 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1802 &zc.zc_guid) == 0);
1803
1804 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1805 zcmd_free_nvlists(&zc);
1806 return (-1);
1807 }
1808 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1809 zcmd_free_nvlists(&zc);
1810 return (-1);
1811 }
1812
1813 zc.zc_cookie = flags;
1814 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1815 errno == ENOMEM) {
1816 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1817 zcmd_free_nvlists(&zc);
1818 return (-1);
1819 }
1820 }
1821 if (ret != 0)
1822 error = errno;
1823
1824 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1825
1826 zcmd_free_nvlists(&zc);
1827
1828 zpool_get_rewind_policy(config, &policy);
1829
1830 if (error) {
1831 char desc[1024];
1832
1833 /*
1834 * Dry-run failed, but we print out what success
1835 * looks like if we found a best txg
1836 */
1837 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1838 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1839 B_TRUE, nv);
1840 nvlist_free(nv);
1841 return (-1);
1842 }
1843
1844 if (newname == NULL)
1845 (void) snprintf(desc, sizeof (desc),
1846 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1847 thename);
1848 else
1849 (void) snprintf(desc, sizeof (desc),
1850 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1851 origname, thename);
1852
1853 switch (error) {
1854 case ENOTSUP:
1855 if (nv != NULL && nvlist_lookup_nvlist(nv,
1856 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1857 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1858 (void) printf(dgettext(TEXT_DOMAIN, "This "
1859 "pool uses the following feature(s) not "
1860 "supported by this system:\n"));
1861 zpool_print_unsup_feat(nv);
1862 if (nvlist_exists(nvinfo,
1863 ZPOOL_CONFIG_CAN_RDONLY)) {
1864 (void) printf(dgettext(TEXT_DOMAIN,
1865 "All unsupported features are only "
1866 "required for writing to the pool."
1867 "\nThe pool can be imported using "
1868 "'-o readonly=on'.\n"));
1869 }
1870 }
1871 /*
1872 * Unsupported version.
1873 */
1874 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1875 break;
1876
1877 case EINVAL:
1878 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1879 break;
1880
1881 case EROFS:
1882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1883 "one or more devices is read only"));
1884 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1885 break;
1886
1887 case ENXIO:
1888 if (nv && nvlist_lookup_nvlist(nv,
1889 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1890 nvlist_lookup_nvlist(nvinfo,
1891 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1892 (void) printf(dgettext(TEXT_DOMAIN,
1893 "The devices below are missing, use "
1894 "'-m' to import the pool anyway:\n"));
1895 print_vdev_tree(hdl, NULL, missing, 2);
1896 (void) printf("\n");
1897 }
1898 (void) zpool_standard_error(hdl, error, desc);
1899 break;
1900
1901 case EEXIST:
1902 (void) zpool_standard_error(hdl, error, desc);
1903 break;
1904
1905 case EBUSY:
1906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1907 "one or more devices are already in use\n"));
1908 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1909 break;
1910
1911 default:
1912 (void) zpool_standard_error(hdl, error, desc);
1913 zpool_explain_recover(hdl,
1914 newname ? origname : thename, -error, nv);
1915 break;
1916 }
1917
1918 nvlist_free(nv);
1919 ret = -1;
1920 } else {
1921 zpool_handle_t *zhp;
1922
1923 /*
1924 * This should never fail, but play it safe anyway.
1925 */
1926 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1927 ret = -1;
1928 else if (zhp != NULL)
1929 zpool_close(zhp);
1930 if (policy.zrp_request &
1931 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1932 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1933 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1934 }
1935 nvlist_free(nv);
1936 return (0);
1937 }
1938
1939 return (ret);
1940 }
1941
1942 /*
1943 * Scan the pool.
1944 */
1945 int
1946 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1947 {
1948 zfs_cmd_t zc = {"\0"};
1949 char msg[1024];
1950 libzfs_handle_t *hdl = zhp->zpool_hdl;
1951
1952 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1953 zc.zc_cookie = func;
1954
1955 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1956 (errno == ENOENT && func != POOL_SCAN_NONE))
1957 return (0);
1958
1959 if (func == POOL_SCAN_SCRUB) {
1960 (void) snprintf(msg, sizeof (msg),
1961 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1962 } else if (func == POOL_SCAN_NONE) {
1963 (void) snprintf(msg, sizeof (msg),
1964 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1965 zc.zc_name);
1966 } else {
1967 assert(!"unexpected result");
1968 }
1969
1970 if (errno == EBUSY) {
1971 nvlist_t *nvroot;
1972 pool_scan_stat_t *ps = NULL;
1973 uint_t psc;
1974
1975 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1976 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1977 (void) nvlist_lookup_uint64_array(nvroot,
1978 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1979 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1980 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1981 else
1982 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1983 } else if (errno == ENOENT) {
1984 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1985 } else {
1986 return (zpool_standard_error(hdl, errno, msg));
1987 }
1988 }
1989
1990 /*
1991 * Find a vdev that matches the search criteria specified. We use the
1992 * the nvpair name to determine how we should look for the device.
1993 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1994 * spare; but FALSE if its an INUSE spare.
1995 */
1996 static nvlist_t *
1997 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1998 boolean_t *l2cache, boolean_t *log)
1999 {
2000 uint_t c, children;
2001 nvlist_t **child;
2002 nvlist_t *ret;
2003 uint64_t is_log;
2004 char *srchkey;
2005 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2006
2007 /* Nothing to look for */
2008 if (search == NULL || pair == NULL)
2009 return (NULL);
2010
2011 /* Obtain the key we will use to search */
2012 srchkey = nvpair_name(pair);
2013
2014 switch (nvpair_type(pair)) {
2015 case DATA_TYPE_UINT64:
2016 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2017 uint64_t srchval, theguid;
2018
2019 verify(nvpair_value_uint64(pair, &srchval) == 0);
2020 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2021 &theguid) == 0);
2022 if (theguid == srchval)
2023 return (nv);
2024 }
2025 break;
2026
2027 case DATA_TYPE_STRING: {
2028 char *srchval, *val;
2029
2030 verify(nvpair_value_string(pair, &srchval) == 0);
2031 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2032 break;
2033
2034 /*
2035 * Search for the requested value. Special cases:
2036 *
2037 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2038 * "-part1", or "p1". The suffix is hidden from the user,
2039 * but included in the string, so this matches around it.
2040 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2041 * is used to check all possible expanded paths.
2042 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2043 *
2044 * Otherwise, all other searches are simple string compares.
2045 */
2046 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2047 uint64_t wholedisk = 0;
2048
2049 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2050 &wholedisk);
2051 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2052 return (nv);
2053
2054 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2055 char *type, *idx, *end, *p;
2056 uint64_t id, vdev_id;
2057
2058 /*
2059 * Determine our vdev type, keeping in mind
2060 * that the srchval is composed of a type and
2061 * vdev id pair (i.e. mirror-4).
2062 */
2063 if ((type = strdup(srchval)) == NULL)
2064 return (NULL);
2065
2066 if ((p = strrchr(type, '-')) == NULL) {
2067 free(type);
2068 break;
2069 }
2070 idx = p + 1;
2071 *p = '\0';
2072
2073 /*
2074 * If the types don't match then keep looking.
2075 */
2076 if (strncmp(val, type, strlen(val)) != 0) {
2077 free(type);
2078 break;
2079 }
2080
2081 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2082 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2083 strncmp(type, VDEV_TYPE_MIRROR,
2084 strlen(VDEV_TYPE_MIRROR)) == 0);
2085 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2086 &id) == 0);
2087
2088 errno = 0;
2089 vdev_id = strtoull(idx, &end, 10);
2090
2091 free(type);
2092 if (errno != 0)
2093 return (NULL);
2094
2095 /*
2096 * Now verify that we have the correct vdev id.
2097 */
2098 if (vdev_id == id)
2099 return (nv);
2100 }
2101
2102 /*
2103 * Common case
2104 */
2105 if (strcmp(srchval, val) == 0)
2106 return (nv);
2107 break;
2108 }
2109
2110 default:
2111 break;
2112 }
2113
2114 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2115 &child, &children) != 0)
2116 return (NULL);
2117
2118 for (c = 0; c < children; c++) {
2119 if ((ret = vdev_to_nvlist_iter(child[c], search,
2120 avail_spare, l2cache, NULL)) != NULL) {
2121 /*
2122 * The 'is_log' value is only set for the toplevel
2123 * vdev, not the leaf vdevs. So we always lookup the
2124 * log device from the root of the vdev tree (where
2125 * 'log' is non-NULL).
2126 */
2127 if (log != NULL &&
2128 nvlist_lookup_uint64(child[c],
2129 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2130 is_log) {
2131 *log = B_TRUE;
2132 }
2133 return (ret);
2134 }
2135 }
2136
2137 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2138 &child, &children) == 0) {
2139 for (c = 0; c < children; c++) {
2140 if ((ret = vdev_to_nvlist_iter(child[c], search,
2141 avail_spare, l2cache, NULL)) != NULL) {
2142 *avail_spare = B_TRUE;
2143 return (ret);
2144 }
2145 }
2146 }
2147
2148 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2149 &child, &children) == 0) {
2150 for (c = 0; c < children; c++) {
2151 if ((ret = vdev_to_nvlist_iter(child[c], search,
2152 avail_spare, l2cache, NULL)) != NULL) {
2153 *l2cache = B_TRUE;
2154 return (ret);
2155 }
2156 }
2157 }
2158
2159 return (NULL);
2160 }
2161
2162 /*
2163 * Given a physical path (minus the "/devices" prefix), find the
2164 * associated vdev.
2165 */
2166 nvlist_t *
2167 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2168 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2169 {
2170 nvlist_t *search, *nvroot, *ret;
2171
2172 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2173 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2174
2175 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2176 &nvroot) == 0);
2177
2178 *avail_spare = B_FALSE;
2179 *l2cache = B_FALSE;
2180 if (log != NULL)
2181 *log = B_FALSE;
2182 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2183 nvlist_free(search);
2184
2185 return (ret);
2186 }
2187
2188 /*
2189 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2190 */
2191 boolean_t
2192 zpool_vdev_is_interior(const char *name)
2193 {
2194 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2195 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2196 return (B_TRUE);
2197 return (B_FALSE);
2198 }
2199
2200 nvlist_t *
2201 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2202 boolean_t *l2cache, boolean_t *log)
2203 {
2204 char *end;
2205 nvlist_t *nvroot, *search, *ret;
2206 uint64_t guid;
2207
2208 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2209
2210 guid = strtoull(path, &end, 0);
2211 if (guid != 0 && *end == '\0') {
2212 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2213 } else if (zpool_vdev_is_interior(path)) {
2214 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2215 } else {
2216 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2217 }
2218
2219 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2220 &nvroot) == 0);
2221
2222 *avail_spare = B_FALSE;
2223 *l2cache = B_FALSE;
2224 if (log != NULL)
2225 *log = B_FALSE;
2226 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2227 nvlist_free(search);
2228
2229 return (ret);
2230 }
2231
2232 static int
2233 vdev_online(nvlist_t *nv)
2234 {
2235 uint64_t ival;
2236
2237 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2238 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2239 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2240 return (0);
2241
2242 return (1);
2243 }
2244
2245 /*
2246 * Helper function for zpool_get_physpaths().
2247 */
2248 static int
2249 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2250 size_t *bytes_written)
2251 {
2252 size_t bytes_left, pos, rsz;
2253 char *tmppath;
2254 const char *format;
2255
2256 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2257 &tmppath) != 0)
2258 return (EZFS_NODEVICE);
2259
2260 pos = *bytes_written;
2261 bytes_left = physpath_size - pos;
2262 format = (pos == 0) ? "%s" : " %s";
2263
2264 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2265 *bytes_written += rsz;
2266
2267 if (rsz >= bytes_left) {
2268 /* if physpath was not copied properly, clear it */
2269 if (bytes_left != 0) {
2270 physpath[pos] = 0;
2271 }
2272 return (EZFS_NOSPC);
2273 }
2274 return (0);
2275 }
2276
2277 static int
2278 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2279 size_t *rsz, boolean_t is_spare)
2280 {
2281 char *type;
2282 int ret;
2283
2284 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2285 return (EZFS_INVALCONFIG);
2286
2287 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2288 /*
2289 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2290 * For a spare vdev, we only want to boot from the active
2291 * spare device.
2292 */
2293 if (is_spare) {
2294 uint64_t spare = 0;
2295 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2296 &spare);
2297 if (!spare)
2298 return (EZFS_INVALCONFIG);
2299 }
2300
2301 if (vdev_online(nv)) {
2302 if ((ret = vdev_get_one_physpath(nv, physpath,
2303 phypath_size, rsz)) != 0)
2304 return (ret);
2305 }
2306 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2307 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2308 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2309 nvlist_t **child;
2310 uint_t count;
2311 int i, ret;
2312
2313 if (nvlist_lookup_nvlist_array(nv,
2314 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2315 return (EZFS_INVALCONFIG);
2316
2317 for (i = 0; i < count; i++) {
2318 ret = vdev_get_physpaths(child[i], physpath,
2319 phypath_size, rsz, is_spare);
2320 if (ret == EZFS_NOSPC)
2321 return (ret);
2322 }
2323 }
2324
2325 return (EZFS_POOL_INVALARG);
2326 }
2327
2328 /*
2329 * Get phys_path for a root pool config.
2330 * Return 0 on success; non-zero on failure.
2331 */
2332 static int
2333 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2334 {
2335 size_t rsz;
2336 nvlist_t *vdev_root;
2337 nvlist_t **child;
2338 uint_t count;
2339 char *type;
2340
2341 rsz = 0;
2342
2343 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2344 &vdev_root) != 0)
2345 return (EZFS_INVALCONFIG);
2346
2347 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2348 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2349 &child, &count) != 0)
2350 return (EZFS_INVALCONFIG);
2351
2352 #if defined(__sun__) || defined(__sun)
2353 /*
2354 * root pool can not have EFI labeled disks and can only have
2355 * a single top-level vdev.
2356 */
2357 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2358 pool_uses_efi(vdev_root))
2359 return (EZFS_POOL_INVALARG);
2360 #endif
2361
2362 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2363 B_FALSE);
2364
2365 /* No online devices */
2366 if (rsz == 0)
2367 return (EZFS_NODEVICE);
2368
2369 return (0);
2370 }
2371
2372 /*
2373 * Get phys_path for a root pool
2374 * Return 0 on success; non-zero on failure.
2375 */
2376 int
2377 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2378 {
2379 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2380 phypath_size));
2381 }
2382
2383 /*
2384 * If the device has being dynamically expanded then we need to relabel
2385 * the disk to use the new unallocated space.
2386 */
2387 static int
2388 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2389 {
2390 int fd, error;
2391
2392 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2394 "relabel '%s': unable to open device: %d"), path, errno);
2395 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2396 }
2397
2398 /*
2399 * It's possible that we might encounter an error if the device
2400 * does not have any unallocated space left. If so, we simply
2401 * ignore that error and continue on.
2402 *
2403 * Also, we don't call efi_rescan() - that would just return EBUSY.
2404 * The module will do it for us in vdev_disk_open().
2405 */
2406 error = efi_use_whole_disk(fd);
2407 (void) close(fd);
2408 if (error && error != VT_ENOSPC) {
2409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2410 "relabel '%s': unable to read disk capacity"), path);
2411 return (zfs_error(hdl, EZFS_NOCAP, msg));
2412 }
2413 return (0);
2414 }
2415
2416 /*
2417 * Bring the specified vdev online. The 'flags' parameter is a set of the
2418 * ZFS_ONLINE_* flags.
2419 */
2420 int
2421 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2422 vdev_state_t *newstate)
2423 {
2424 zfs_cmd_t zc = {"\0"};
2425 char msg[1024];
2426 nvlist_t *tgt;
2427 boolean_t avail_spare, l2cache, islog;
2428 libzfs_handle_t *hdl = zhp->zpool_hdl;
2429 int error;
2430
2431 if (flags & ZFS_ONLINE_EXPAND) {
2432 (void) snprintf(msg, sizeof (msg),
2433 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2434 } else {
2435 (void) snprintf(msg, sizeof (msg),
2436 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2437 }
2438
2439 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2440 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2441 &islog)) == NULL)
2442 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2443
2444 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2445
2446 if (avail_spare)
2447 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2448
2449 if (flags & ZFS_ONLINE_EXPAND ||
2450 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2451 uint64_t wholedisk = 0;
2452
2453 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2454 &wholedisk);
2455
2456 /*
2457 * XXX - L2ARC 1.0 devices can't support expansion.
2458 */
2459 if (l2cache) {
2460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2461 "cannot expand cache devices"));
2462 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2463 }
2464
2465 if (wholedisk) {
2466 const char *fullpath = path;
2467 char buf[MAXPATHLEN];
2468
2469 if (path[0] != '/') {
2470 error = zfs_resolve_shortname(path, buf,
2471 sizeof (buf));
2472 if (error != 0)
2473 return (zfs_error(hdl, EZFS_NODEVICE,
2474 msg));
2475
2476 fullpath = buf;
2477 }
2478
2479 error = zpool_relabel_disk(hdl, fullpath, msg);
2480 if (error != 0)
2481 return (error);
2482 }
2483 }
2484
2485 zc.zc_cookie = VDEV_STATE_ONLINE;
2486 zc.zc_obj = flags;
2487
2488 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2489 if (errno == EINVAL) {
2490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2491 "from this pool into a new one. Use '%s' "
2492 "instead"), "zpool detach");
2493 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2494 }
2495 return (zpool_standard_error(hdl, errno, msg));
2496 }
2497
2498 *newstate = zc.zc_cookie;
2499 return (0);
2500 }
2501
2502 /*
2503 * Take the specified vdev offline
2504 */
2505 int
2506 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2507 {
2508 zfs_cmd_t zc = {"\0"};
2509 char msg[1024];
2510 nvlist_t *tgt;
2511 boolean_t avail_spare, l2cache;
2512 libzfs_handle_t *hdl = zhp->zpool_hdl;
2513
2514 (void) snprintf(msg, sizeof (msg),
2515 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2516
2517 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2518 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2519 NULL)) == NULL)
2520 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2521
2522 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2523
2524 if (avail_spare)
2525 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2526
2527 zc.zc_cookie = VDEV_STATE_OFFLINE;
2528 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2529
2530 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2531 return (0);
2532
2533 switch (errno) {
2534 case EBUSY:
2535
2536 /*
2537 * There are no other replicas of this device.
2538 */
2539 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2540
2541 case EEXIST:
2542 /*
2543 * The log device has unplayed logs
2544 */
2545 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2546
2547 default:
2548 return (zpool_standard_error(hdl, errno, msg));
2549 }
2550 }
2551
2552 /*
2553 * Mark the given vdev faulted.
2554 */
2555 int
2556 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2557 {
2558 zfs_cmd_t zc = {"\0"};
2559 char msg[1024];
2560 libzfs_handle_t *hdl = zhp->zpool_hdl;
2561
2562 (void) snprintf(msg, sizeof (msg),
2563 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2564
2565 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2566 zc.zc_guid = guid;
2567 zc.zc_cookie = VDEV_STATE_FAULTED;
2568 zc.zc_obj = aux;
2569
2570 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2571 return (0);
2572
2573 switch (errno) {
2574 case EBUSY:
2575
2576 /*
2577 * There are no other replicas of this device.
2578 */
2579 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2580
2581 default:
2582 return (zpool_standard_error(hdl, errno, msg));
2583 }
2584
2585 }
2586
2587 /*
2588 * Mark the given vdev degraded.
2589 */
2590 int
2591 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2592 {
2593 zfs_cmd_t zc = {"\0"};
2594 char msg[1024];
2595 libzfs_handle_t *hdl = zhp->zpool_hdl;
2596
2597 (void) snprintf(msg, sizeof (msg),
2598 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2599
2600 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2601 zc.zc_guid = guid;
2602 zc.zc_cookie = VDEV_STATE_DEGRADED;
2603 zc.zc_obj = aux;
2604
2605 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2606 return (0);
2607
2608 return (zpool_standard_error(hdl, errno, msg));
2609 }
2610
2611 /*
2612 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2613 * a hot spare.
2614 */
2615 static boolean_t
2616 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2617 {
2618 nvlist_t **child;
2619 uint_t c, children;
2620 char *type;
2621
2622 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2623 &children) == 0) {
2624 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2625 &type) == 0);
2626
2627 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2628 children == 2 && child[which] == tgt)
2629 return (B_TRUE);
2630
2631 for (c = 0; c < children; c++)
2632 if (is_replacing_spare(child[c], tgt, which))
2633 return (B_TRUE);
2634 }
2635
2636 return (B_FALSE);
2637 }
2638
2639 /*
2640 * Attach new_disk (fully described by nvroot) to old_disk.
2641 * If 'replacing' is specified, the new disk will replace the old one.
2642 */
2643 int
2644 zpool_vdev_attach(zpool_handle_t *zhp,
2645 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2646 {
2647 zfs_cmd_t zc = {"\0"};
2648 char msg[1024];
2649 int ret;
2650 nvlist_t *tgt;
2651 boolean_t avail_spare, l2cache, islog;
2652 uint64_t val;
2653 char *newname;
2654 nvlist_t **child;
2655 uint_t children;
2656 nvlist_t *config_root;
2657 libzfs_handle_t *hdl = zhp->zpool_hdl;
2658 boolean_t rootpool = zpool_is_bootable(zhp);
2659
2660 if (replacing)
2661 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2662 "cannot replace %s with %s"), old_disk, new_disk);
2663 else
2664 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2665 "cannot attach %s to %s"), new_disk, old_disk);
2666
2667 #if defined(__sun__) || defined(__sun)
2668 /*
2669 * If this is a root pool, make sure that we're not attaching an
2670 * EFI labeled device.
2671 */
2672 if (rootpool && pool_uses_efi(nvroot)) {
2673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674 "EFI labeled devices are not supported on root pools."));
2675 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2676 }
2677 #endif
2678
2679 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2680 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2681 &islog)) == 0)
2682 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2683
2684 if (avail_spare)
2685 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2686
2687 if (l2cache)
2688 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2689
2690 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2691 zc.zc_cookie = replacing;
2692
2693 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2694 &child, &children) != 0 || children != 1) {
2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2696 "new device must be a single disk"));
2697 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2698 }
2699
2700 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2701 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2702
2703 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2704 return (-1);
2705
2706 /*
2707 * If the target is a hot spare that has been swapped in, we can only
2708 * replace it with another hot spare.
2709 */
2710 if (replacing &&
2711 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2712 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2713 NULL) == NULL || !avail_spare) &&
2714 is_replacing_spare(config_root, tgt, 1)) {
2715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2716 "can only be replaced by another hot spare"));
2717 free(newname);
2718 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2719 }
2720
2721 free(newname);
2722
2723 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2724 return (-1);
2725
2726 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2727
2728 zcmd_free_nvlists(&zc);
2729
2730 if (ret == 0) {
2731 if (rootpool) {
2732 /*
2733 * XXX need a better way to prevent user from
2734 * booting up a half-baked vdev.
2735 */
2736 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2737 "sure to wait until resilver is done "
2738 "before rebooting.\n"));
2739 }
2740 return (0);
2741 }
2742
2743 switch (errno) {
2744 case ENOTSUP:
2745 /*
2746 * Can't attach to or replace this type of vdev.
2747 */
2748 if (replacing) {
2749 uint64_t version = zpool_get_prop_int(zhp,
2750 ZPOOL_PROP_VERSION, NULL);
2751
2752 if (islog)
2753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2754 "cannot replace a log with a spare"));
2755 else if (version >= SPA_VERSION_MULTI_REPLACE)
2756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2757 "already in replacing/spare config; wait "
2758 "for completion or use 'zpool detach'"));
2759 else
2760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2761 "cannot replace a replacing device"));
2762 } else {
2763 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2764 "can only attach to mirrors and top-level "
2765 "disks"));
2766 }
2767 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2768 break;
2769
2770 case EINVAL:
2771 /*
2772 * The new device must be a single disk.
2773 */
2774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2775 "new device must be a single disk"));
2776 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2777 break;
2778
2779 case EBUSY:
2780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2781 new_disk);
2782 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2783 break;
2784
2785 case EOVERFLOW:
2786 /*
2787 * The new device is too small.
2788 */
2789 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2790 "device is too small"));
2791 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2792 break;
2793
2794 case EDOM:
2795 /*
2796 * The new device has a different optimal sector size.
2797 */
2798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2799 "new device has a different optimal sector size; use the "
2800 "option '-o ashift=N' to override the optimal size"));
2801 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2802 break;
2803
2804 case ENAMETOOLONG:
2805 /*
2806 * The resulting top-level vdev spec won't fit in the label.
2807 */
2808 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2809 break;
2810
2811 default:
2812 (void) zpool_standard_error(hdl, errno, msg);
2813 }
2814
2815 return (-1);
2816 }
2817
2818 /*
2819 * Detach the specified device.
2820 */
2821 int
2822 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2823 {
2824 zfs_cmd_t zc = {"\0"};
2825 char msg[1024];
2826 nvlist_t *tgt;
2827 boolean_t avail_spare, l2cache;
2828 libzfs_handle_t *hdl = zhp->zpool_hdl;
2829
2830 (void) snprintf(msg, sizeof (msg),
2831 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2832
2833 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2834 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2835 NULL)) == 0)
2836 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2837
2838 if (avail_spare)
2839 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2840
2841 if (l2cache)
2842 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2843
2844 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2845
2846 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2847 return (0);
2848
2849 switch (errno) {
2850
2851 case ENOTSUP:
2852 /*
2853 * Can't detach from this type of vdev.
2854 */
2855 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2856 "applicable to mirror and replacing vdevs"));
2857 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2858 break;
2859
2860 case EBUSY:
2861 /*
2862 * There are no other replicas of this device.
2863 */
2864 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2865 break;
2866
2867 default:
2868 (void) zpool_standard_error(hdl, errno, msg);
2869 }
2870
2871 return (-1);
2872 }
2873
2874 /*
2875 * Find a mirror vdev in the source nvlist.
2876 *
2877 * The mchild array contains a list of disks in one of the top-level mirrors
2878 * of the source pool. The schild array contains a list of disks that the
2879 * user specified on the command line. We loop over the mchild array to
2880 * see if any entry in the schild array matches.
2881 *
2882 * If a disk in the mchild array is found in the schild array, we return
2883 * the index of that entry. Otherwise we return -1.
2884 */
2885 static int
2886 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2887 nvlist_t **schild, uint_t schildren)
2888 {
2889 uint_t mc;
2890
2891 for (mc = 0; mc < mchildren; mc++) {
2892 uint_t sc;
2893 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2894 mchild[mc], 0);
2895
2896 for (sc = 0; sc < schildren; sc++) {
2897 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2898 schild[sc], 0);
2899 boolean_t result = (strcmp(mpath, spath) == 0);
2900
2901 free(spath);
2902 if (result) {
2903 free(mpath);
2904 return (mc);
2905 }
2906 }
2907
2908 free(mpath);
2909 }
2910
2911 return (-1);
2912 }
2913
2914 /*
2915 * Split a mirror pool. If newroot points to null, then a new nvlist
2916 * is generated and it is the responsibility of the caller to free it.
2917 */
2918 int
2919 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2920 nvlist_t *props, splitflags_t flags)
2921 {
2922 zfs_cmd_t zc = {"\0"};
2923 char msg[1024];
2924 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2925 nvlist_t **varray = NULL, *zc_props = NULL;
2926 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2927 libzfs_handle_t *hdl = zhp->zpool_hdl;
2928 uint64_t vers;
2929 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2930 int retval = 0;
2931
2932 (void) snprintf(msg, sizeof (msg),
2933 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2934
2935 if (!zpool_name_valid(hdl, B_FALSE, newname))
2936 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2937
2938 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2939 (void) fprintf(stderr, gettext("Internal error: unable to "
2940 "retrieve pool configuration\n"));
2941 return (-1);
2942 }
2943
2944 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2945 == 0);
2946 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2947
2948 if (props) {
2949 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2950 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2951 props, vers, flags, msg)) == NULL)
2952 return (-1);
2953 }
2954
2955 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2956 &children) != 0) {
2957 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2958 "Source pool is missing vdev tree"));
2959 nvlist_free(zc_props);
2960 return (-1);
2961 }
2962
2963 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2964 vcount = 0;
2965
2966 if (*newroot == NULL ||
2967 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2968 &newchild, &newchildren) != 0)
2969 newchildren = 0;
2970
2971 for (c = 0; c < children; c++) {
2972 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2973 char *type;
2974 nvlist_t **mchild, *vdev;
2975 uint_t mchildren;
2976 int entry;
2977
2978 /*
2979 * Unlike cache & spares, slogs are stored in the
2980 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2981 */
2982 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2983 &is_log);
2984 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2985 &is_hole);
2986 if (is_log || is_hole) {
2987 /*
2988 * Create a hole vdev and put it in the config.
2989 */
2990 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2991 goto out;
2992 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2993 VDEV_TYPE_HOLE) != 0)
2994 goto out;
2995 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2996 1) != 0)
2997 goto out;
2998 if (lastlog == 0)
2999 lastlog = vcount;
3000 varray[vcount++] = vdev;
3001 continue;
3002 }
3003 lastlog = 0;
3004 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3005 == 0);
3006 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3008 "Source pool must be composed only of mirrors\n"));
3009 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3010 goto out;
3011 }
3012
3013 verify(nvlist_lookup_nvlist_array(child[c],
3014 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3015
3016 /* find or add an entry for this top-level vdev */
3017 if (newchildren > 0 &&
3018 (entry = find_vdev_entry(zhp, mchild, mchildren,
3019 newchild, newchildren)) >= 0) {
3020 /* We found a disk that the user specified. */
3021 vdev = mchild[entry];
3022 ++found;
3023 } else {
3024 /* User didn't specify a disk for this vdev. */
3025 vdev = mchild[mchildren - 1];
3026 }
3027
3028 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3029 goto out;
3030 }
3031
3032 /* did we find every disk the user specified? */
3033 if (found != newchildren) {
3034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3035 "include at most one disk from each mirror"));
3036 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3037 goto out;
3038 }
3039
3040 /* Prepare the nvlist for populating. */
3041 if (*newroot == NULL) {
3042 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3043 goto out;
3044 freelist = B_TRUE;
3045 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3046 VDEV_TYPE_ROOT) != 0)
3047 goto out;
3048 } else {
3049 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3050 }
3051
3052 /* Add all the children we found */
3053 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3054 lastlog == 0 ? vcount : lastlog) != 0)
3055 goto out;
3056
3057 /*
3058 * If we're just doing a dry run, exit now with success.
3059 */
3060 if (flags.dryrun) {
3061 memory_err = B_FALSE;
3062 freelist = B_FALSE;
3063 goto out;
3064 }
3065
3066 /* now build up the config list & call the ioctl */
3067 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3068 goto out;
3069
3070 if (nvlist_add_nvlist(newconfig,
3071 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3072 nvlist_add_string(newconfig,
3073 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3074 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3075 goto out;
3076
3077 /*
3078 * The new pool is automatically part of the namespace unless we
3079 * explicitly export it.
3080 */
3081 if (!flags.import)
3082 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3083 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3084 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3085 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3086 goto out;
3087 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3088 goto out;
3089
3090 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3091 retval = zpool_standard_error(hdl, errno, msg);
3092 goto out;
3093 }
3094
3095 freelist = B_FALSE;
3096 memory_err = B_FALSE;
3097
3098 out:
3099 if (varray != NULL) {
3100 int v;
3101
3102 for (v = 0; v < vcount; v++)
3103 nvlist_free(varray[v]);
3104 free(varray);
3105 }
3106 zcmd_free_nvlists(&zc);
3107 nvlist_free(zc_props);
3108 nvlist_free(newconfig);
3109 if (freelist) {
3110 nvlist_free(*newroot);
3111 *newroot = NULL;
3112 }
3113
3114 if (retval != 0)
3115 return (retval);
3116
3117 if (memory_err)
3118 return (no_memory(hdl));
3119
3120 return (0);
3121 }
3122
3123 /*
3124 * Remove the given device. Currently, this is supported only for hot spares
3125 * and level 2 cache devices.
3126 */
3127 int
3128 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3129 {
3130 zfs_cmd_t zc = {"\0"};
3131 char msg[1024];
3132 nvlist_t *tgt;
3133 boolean_t avail_spare, l2cache, islog;
3134 libzfs_handle_t *hdl = zhp->zpool_hdl;
3135 uint64_t version;
3136
3137 (void) snprintf(msg, sizeof (msg),
3138 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3139
3140 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3141 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3142 &islog)) == 0)
3143 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3144 /*
3145 * XXX - this should just go away.
3146 */
3147 if (!avail_spare && !l2cache && !islog) {
3148 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3149 "only inactive hot spares, cache, top-level, "
3150 "or log devices can be removed"));
3151 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3152 }
3153
3154 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3155 if (islog && version < SPA_VERSION_HOLES) {
3156 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3157 "pool must be upgrade to support log removal"));
3158 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3159 }
3160
3161 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3162
3163 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3164 return (0);
3165
3166 return (zpool_standard_error(hdl, errno, msg));
3167 }
3168
3169 /*
3170 * Clear the errors for the pool, or the particular device if specified.
3171 */
3172 int
3173 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3174 {
3175 zfs_cmd_t zc = {"\0"};
3176 char msg[1024];
3177 nvlist_t *tgt;
3178 zpool_rewind_policy_t policy;
3179 boolean_t avail_spare, l2cache;
3180 libzfs_handle_t *hdl = zhp->zpool_hdl;
3181 nvlist_t *nvi = NULL;
3182 int error;
3183
3184 if (path)
3185 (void) snprintf(msg, sizeof (msg),
3186 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3187 path);
3188 else
3189 (void) snprintf(msg, sizeof (msg),
3190 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3191 zhp->zpool_name);
3192
3193 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3194 if (path) {
3195 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3196 &l2cache, NULL)) == 0)
3197 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3198
3199 /*
3200 * Don't allow error clearing for hot spares. Do allow
3201 * error clearing for l2cache devices.
3202 */
3203 if (avail_spare)
3204 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3205
3206 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3207 &zc.zc_guid) == 0);
3208 }
3209
3210 zpool_get_rewind_policy(rewindnvl, &policy);
3211 zc.zc_cookie = policy.zrp_request;
3212
3213 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3214 return (-1);
3215
3216 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3217 return (-1);
3218
3219 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3220 errno == ENOMEM) {
3221 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3222 zcmd_free_nvlists(&zc);
3223 return (-1);
3224 }
3225 }
3226
3227 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3228 errno != EPERM && errno != EACCES)) {
3229 if (policy.zrp_request &
3230 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3231 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3232 zpool_rewind_exclaim(hdl, zc.zc_name,
3233 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3234 nvi);
3235 nvlist_free(nvi);
3236 }
3237 zcmd_free_nvlists(&zc);
3238 return (0);
3239 }
3240
3241 zcmd_free_nvlists(&zc);
3242 return (zpool_standard_error(hdl, errno, msg));
3243 }
3244
3245 /*
3246 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3247 */
3248 int
3249 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3250 {
3251 zfs_cmd_t zc = {"\0"};
3252 char msg[1024];
3253 libzfs_handle_t *hdl = zhp->zpool_hdl;
3254
3255 (void) snprintf(msg, sizeof (msg),
3256 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3257 (u_longlong_t)guid);
3258
3259 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3260 zc.zc_guid = guid;
3261 zc.zc_cookie = ZPOOL_NO_REWIND;
3262
3263 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3264 return (0);
3265
3266 return (zpool_standard_error(hdl, errno, msg));
3267 }
3268
3269 /*
3270 * Change the GUID for a pool.
3271 */
3272 int
3273 zpool_reguid(zpool_handle_t *zhp)
3274 {
3275 char msg[1024];
3276 libzfs_handle_t *hdl = zhp->zpool_hdl;
3277 zfs_cmd_t zc = {"\0"};
3278
3279 (void) snprintf(msg, sizeof (msg),
3280 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3281
3282 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3283 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3284 return (0);
3285
3286 return (zpool_standard_error(hdl, errno, msg));
3287 }
3288
3289 /*
3290 * Reopen the pool.
3291 */
3292 int
3293 zpool_reopen(zpool_handle_t *zhp)
3294 {
3295 zfs_cmd_t zc = {"\0"};
3296 char msg[1024];
3297 libzfs_handle_t *hdl = zhp->zpool_hdl;
3298
3299 (void) snprintf(msg, sizeof (msg),
3300 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3301 zhp->zpool_name);
3302
3303 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3304 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3305 return (0);
3306 return (zpool_standard_error(hdl, errno, msg));
3307 }
3308
3309 #if defined(__sun__) || defined(__sun)
3310 /*
3311 * Convert from a devid string to a path.
3312 */
3313 static char *
3314 devid_to_path(char *devid_str)
3315 {
3316 ddi_devid_t devid;
3317 char *minor;
3318 char *path;
3319 devid_nmlist_t *list = NULL;
3320 int ret;
3321
3322 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3323 return (NULL);
3324
3325 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3326
3327 devid_str_free(minor);
3328 devid_free(devid);
3329
3330 if (ret != 0)
3331 return (NULL);
3332
3333 /*
3334 * In a case the strdup() fails, we will just return NULL below.
3335 */
3336 path = strdup(list[0].devname);
3337
3338 devid_free_nmlist(list);
3339
3340 return (path);
3341 }
3342
3343 /*
3344 * Convert from a path to a devid string.
3345 */
3346 static char *
3347 path_to_devid(const char *path)
3348 {
3349 int fd;
3350 ddi_devid_t devid;
3351 char *minor, *ret;
3352
3353 if ((fd = open(path, O_RDONLY)) < 0)
3354 return (NULL);
3355
3356 minor = NULL;
3357 ret = NULL;
3358 if (devid_get(fd, &devid) == 0) {
3359 if (devid_get_minor_name(fd, &minor) == 0)
3360 ret = devid_str_encode(devid, minor);
3361 if (minor != NULL)
3362 devid_str_free(minor);
3363 devid_free(devid);
3364 }
3365 (void) close(fd);
3366
3367 return (ret);
3368 }
3369
3370 /*
3371 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3372 * ignore any failure here, since a common case is for an unprivileged user to
3373 * type 'zpool status', and we'll display the correct information anyway.
3374 */
3375 static void
3376 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3377 {
3378 zfs_cmd_t zc = {"\0"};
3379
3380 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3381 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3382 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3383 &zc.zc_guid) == 0);
3384
3385 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3386 }
3387 #endif /* sun */
3388
3389 /*
3390 * Remove partition suffix from a vdev path. Partition suffixes may take three
3391 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3392 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3393 * third case only occurs when preceded by a string matching the regular
3394 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3395 */
3396 static char *
3397 strip_partition(libzfs_handle_t *hdl, char *path)
3398 {
3399 char *tmp = zfs_strdup(hdl, path);
3400 char *part = NULL, *d = NULL;
3401
3402 if ((part = strstr(tmp, "-part")) && part != tmp) {
3403 d = part + 5;
3404 } else if ((part = strrchr(tmp, 'p')) &&
3405 part > tmp + 1 && isdigit(*(part-1))) {
3406 d = part + 1;
3407 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3408 tmp[1] == 'd') {
3409 for (d = &tmp[2]; isalpha(*d); part = ++d);
3410 } else if (strncmp("xvd", tmp, 3) == 0) {
3411 for (d = &tmp[3]; isalpha(*d); part = ++d);
3412 }
3413 if (part && d && *d != '\0') {
3414 for (; isdigit(*d); d++);
3415 if (*d == '\0')
3416 *part = '\0';
3417 }
3418 return (tmp);
3419 }
3420
3421 #define PATH_BUF_LEN 64
3422
3423 /*
3424 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3425 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3426 * We also check if this is a whole disk, in which case we strip off the
3427 * trailing 's0' slice name.
3428 *
3429 * This routine is also responsible for identifying when disks have been
3430 * reconfigured in a new location. The kernel will have opened the device by
3431 * devid, but the path will still refer to the old location. To catch this, we
3432 * first do a path -> devid translation (which is fast for the common case). If
3433 * the devid matches, we're done. If not, we do a reverse devid -> path
3434 * translation and issue the appropriate ioctl() to update the path of the vdev.
3435 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3436 * of these checks.
3437 */
3438 char *
3439 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3440 int name_flags)
3441 {
3442 char *path, *type, *env;
3443 uint64_t value;
3444 char buf[PATH_BUF_LEN];
3445 char tmpbuf[PATH_BUF_LEN];
3446
3447 env = getenv("ZPOOL_VDEV_NAME_PATH");
3448 if (env && (strtoul(env, NULL, 0) > 0 ||
3449 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3450 name_flags |= VDEV_NAME_PATH;
3451
3452 env = getenv("ZPOOL_VDEV_NAME_GUID");
3453 if (env && (strtoul(env, NULL, 0) > 0 ||
3454 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3455 name_flags |= VDEV_NAME_GUID;
3456
3457 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3458 if (env && (strtoul(env, NULL, 0) > 0 ||
3459 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3460 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3461
3462 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3463 name_flags & VDEV_NAME_GUID) {
3464 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3465 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3466 path = buf;
3467 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3468 #if defined(__sun__) || defined(__sun)
3469 /*
3470 * Live VDEV path updates to a kernel VDEV during a
3471 * zpool_vdev_name lookup are not supported on Linux.
3472 */
3473 char *devid;
3474 vdev_stat_t *vs;
3475 uint_t vsc;
3476
3477 /*
3478 * If the device is dead (faulted, offline, etc) then don't
3479 * bother opening it. Otherwise we may be forcing the user to
3480 * open a misbehaving device, which can have undesirable
3481 * effects.
3482 */
3483 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3484 (uint64_t **)&vs, &vsc) != 0 ||
3485 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3486 zhp != NULL &&
3487 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3488 /*
3489 * Determine if the current path is correct.
3490 */
3491 char *newdevid = path_to_devid(path);
3492
3493 if (newdevid == NULL ||
3494 strcmp(devid, newdevid) != 0) {
3495 char *newpath;
3496
3497 if ((newpath = devid_to_path(devid)) != NULL) {
3498 /*
3499 * Update the path appropriately.
3500 */
3501 set_path(zhp, nv, newpath);
3502 if (nvlist_add_string(nv,
3503 ZPOOL_CONFIG_PATH, newpath) == 0)
3504 verify(nvlist_lookup_string(nv,
3505 ZPOOL_CONFIG_PATH,
3506 &path) == 0);
3507 free(newpath);
3508 }
3509 }
3510
3511 if (newdevid)
3512 devid_str_free(newdevid);
3513 }
3514 #endif /* sun */
3515
3516 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3517 char *rp = realpath(path, NULL);
3518 if (rp) {
3519 strlcpy(buf, rp, sizeof (buf));
3520 path = buf;
3521 free(rp);
3522 }
3523 }
3524
3525 /*
3526 * For a block device only use the name.
3527 */
3528 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3529 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3530 !(name_flags & VDEV_NAME_PATH)) {
3531 path = strrchr(path, '/');
3532 path++;
3533 }
3534
3535 /*
3536 * Remove the partition from the path it this is a whole disk.
3537 */
3538 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3539 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3540 return (strip_partition(hdl, path));
3541 }
3542 } else {
3543 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3544
3545 /*
3546 * If it's a raidz device, we need to stick in the parity level.
3547 */
3548 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3549 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3550 &value) == 0);
3551 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3552 (u_longlong_t)value);
3553 path = buf;
3554 }
3555
3556 /*
3557 * We identify each top-level vdev by using a <type-id>
3558 * naming convention.
3559 */
3560 if (name_flags & VDEV_NAME_TYPE_ID) {
3561 uint64_t id;
3562 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3563 &id) == 0);
3564 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3565 path, (u_longlong_t)id);
3566 path = tmpbuf;
3567 }
3568 }
3569
3570 return (zfs_strdup(hdl, path));
3571 }
3572
3573 static int
3574 zbookmark_mem_compare(const void *a, const void *b)
3575 {
3576 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3577 }
3578
3579 /*
3580 * Retrieve the persistent error log, uniquify the members, and return to the
3581 * caller.
3582 */
3583 int
3584 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3585 {
3586 zfs_cmd_t zc = {"\0"};
3587 uint64_t count;
3588 zbookmark_phys_t *zb = NULL;
3589 int i;
3590
3591 /*
3592 * Retrieve the raw error list from the kernel. If the number of errors
3593 * has increased, allocate more space and continue until we get the
3594 * entire list.
3595 */
3596 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3597 &count) == 0);
3598 if (count == 0)
3599 return (0);
3600 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3601 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3602 return (-1);
3603 zc.zc_nvlist_dst_size = count;
3604 (void) strcpy(zc.zc_name, zhp->zpool_name);
3605 for (;;) {
3606 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3607 &zc) != 0) {
3608 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3609 if (errno == ENOMEM) {
3610 void *dst;
3611
3612 count = zc.zc_nvlist_dst_size;
3613 dst = zfs_alloc(zhp->zpool_hdl, count *
3614 sizeof (zbookmark_phys_t));
3615 if (dst == NULL)
3616 return (-1);
3617 zc.zc_nvlist_dst = (uintptr_t)dst;
3618 } else {
3619 return (-1);
3620 }
3621 } else {
3622 break;
3623 }
3624 }
3625
3626 /*
3627 * Sort the resulting bookmarks. This is a little confusing due to the
3628 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3629 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3630 * _not_ copied as part of the process. So we point the start of our
3631 * array appropriate and decrement the total number of elements.
3632 */
3633 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3634 zc.zc_nvlist_dst_size;
3635 count -= zc.zc_nvlist_dst_size;
3636
3637 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3638
3639 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3640
3641 /*
3642 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3643 */
3644 for (i = 0; i < count; i++) {
3645 nvlist_t *nv;
3646
3647 /* ignoring zb_blkid and zb_level for now */
3648 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3649 zb[i-1].zb_object == zb[i].zb_object)
3650 continue;
3651
3652 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3653 goto nomem;
3654 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3655 zb[i].zb_objset) != 0) {
3656 nvlist_free(nv);
3657 goto nomem;
3658 }
3659 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3660 zb[i].zb_object) != 0) {
3661 nvlist_free(nv);
3662 goto nomem;
3663 }
3664 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3665 nvlist_free(nv);
3666 goto nomem;
3667 }
3668 nvlist_free(nv);
3669 }
3670
3671 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3672 return (0);
3673
3674 nomem:
3675 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3676 return (no_memory(zhp->zpool_hdl));
3677 }
3678
3679 /*
3680 * Upgrade a ZFS pool to the latest on-disk version.
3681 */
3682 int
3683 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3684 {
3685 zfs_cmd_t zc = {"\0"};
3686 libzfs_handle_t *hdl = zhp->zpool_hdl;
3687
3688 (void) strcpy(zc.zc_name, zhp->zpool_name);
3689 zc.zc_cookie = new_version;
3690
3691 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3692 return (zpool_standard_error_fmt(hdl, errno,
3693 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3694 zhp->zpool_name));
3695 return (0);
3696 }
3697
3698 void
3699 zfs_save_arguments(int argc, char **argv, char *string, int len)
3700 {
3701 int i;
3702
3703 (void) strlcpy(string, basename(argv[0]), len);
3704 for (i = 1; i < argc; i++) {
3705 (void) strlcat(string, " ", len);
3706 (void) strlcat(string, argv[i], len);
3707 }
3708 }
3709
3710 int
3711 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3712 {
3713 zfs_cmd_t zc = {"\0"};
3714 nvlist_t *args;
3715 int err;
3716
3717 args = fnvlist_alloc();
3718 fnvlist_add_string(args, "message", message);
3719 err = zcmd_write_src_nvlist(hdl, &zc, args);
3720 if (err == 0)
3721 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3722 nvlist_free(args);
3723 zcmd_free_nvlists(&zc);
3724 return (err);
3725 }
3726
3727 /*
3728 * Perform ioctl to get some command history of a pool.
3729 *
3730 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3731 * logical offset of the history buffer to start reading from.
3732 *
3733 * Upon return, 'off' is the next logical offset to read from and
3734 * 'len' is the actual amount of bytes read into 'buf'.
3735 */
3736 static int
3737 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3738 {
3739 zfs_cmd_t zc = {"\0"};
3740 libzfs_handle_t *hdl = zhp->zpool_hdl;
3741
3742 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3743
3744 zc.zc_history = (uint64_t)(uintptr_t)buf;
3745 zc.zc_history_len = *len;
3746 zc.zc_history_offset = *off;
3747
3748 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3749 switch (errno) {
3750 case EPERM:
3751 return (zfs_error_fmt(hdl, EZFS_PERM,
3752 dgettext(TEXT_DOMAIN,
3753 "cannot show history for pool '%s'"),
3754 zhp->zpool_name));
3755 case ENOENT:
3756 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3757 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3758 "'%s'"), zhp->zpool_name));
3759 case ENOTSUP:
3760 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3761 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3762 "'%s', pool must be upgraded"), zhp->zpool_name));
3763 default:
3764 return (zpool_standard_error_fmt(hdl, errno,
3765 dgettext(TEXT_DOMAIN,
3766 "cannot get history for '%s'"), zhp->zpool_name));
3767 }
3768 }
3769
3770 *len = zc.zc_history_len;
3771 *off = zc.zc_history_offset;
3772
3773 return (0);
3774 }
3775
3776 /*
3777 * Process the buffer of nvlists, unpacking and storing each nvlist record
3778 * into 'records'. 'leftover' is set to the number of bytes that weren't
3779 * processed as there wasn't a complete record.
3780 */
3781 int
3782 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3783 nvlist_t ***records, uint_t *numrecords)
3784 {
3785 uint64_t reclen;
3786 nvlist_t *nv;
3787 int i;
3788
3789 while (bytes_read > sizeof (reclen)) {
3790
3791 /* get length of packed record (stored as little endian) */
3792 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3793 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3794
3795 if (bytes_read < sizeof (reclen) + reclen)
3796 break;
3797
3798 /* unpack record */
3799 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3800 return (ENOMEM);
3801 bytes_read -= sizeof (reclen) + reclen;
3802 buf += sizeof (reclen) + reclen;
3803
3804 /* add record to nvlist array */
3805 (*numrecords)++;
3806 if (ISP2(*numrecords + 1)) {
3807 *records = realloc(*records,
3808 *numrecords * 2 * sizeof (nvlist_t *));
3809 }
3810 (*records)[*numrecords - 1] = nv;
3811 }
3812
3813 *leftover = bytes_read;
3814 return (0);
3815 }
3816
3817 /*
3818 * Retrieve the command history of a pool.
3819 */
3820 int
3821 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3822 {
3823 char *buf;
3824 int buflen = 128 * 1024;
3825 uint64_t off = 0;
3826 nvlist_t **records = NULL;
3827 uint_t numrecords = 0;
3828 int err, i;
3829
3830 buf = malloc(buflen);
3831 if (buf == NULL)
3832 return (ENOMEM);
3833 do {
3834 uint64_t bytes_read = buflen;
3835 uint64_t leftover;
3836
3837 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3838 break;
3839
3840 /* if nothing else was read in, we're at EOF, just return */
3841 if (!bytes_read)
3842 break;
3843
3844 if ((err = zpool_history_unpack(buf, bytes_read,
3845 &leftover, &records, &numrecords)) != 0)
3846 break;
3847 off -= leftover;
3848 if (leftover == bytes_read) {
3849 /*
3850 * no progress made, because buffer is not big enough
3851 * to hold this record; resize and retry.
3852 */
3853 buflen *= 2;
3854 free(buf);
3855 buf = malloc(buflen);
3856 if (buf == NULL)
3857 return (ENOMEM);
3858 }
3859
3860 /* CONSTCOND */
3861 } while (1);
3862
3863 free(buf);
3864
3865 if (!err) {
3866 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3867 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3868 records, numrecords) == 0);
3869 }
3870 for (i = 0; i < numrecords; i++)
3871 nvlist_free(records[i]);
3872 free(records);
3873
3874 return (err);
3875 }
3876
3877 /*
3878 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3879 * If there is a new event available 'nvp' will contain a newly allocated
3880 * nvlist and 'dropped' will be set to the number of missed events since
3881 * the last call to this function. When 'nvp' is set to NULL it indicates
3882 * no new events are available. In either case the function returns 0 and
3883 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3884 * function will return a non-zero value. When the function is called in
3885 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3886 * it will not return until a new event is available.
3887 */
3888 int
3889 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3890 int *dropped, unsigned flags, int zevent_fd)
3891 {
3892 zfs_cmd_t zc = {"\0"};
3893 int error = 0;
3894
3895 *nvp = NULL;
3896 *dropped = 0;
3897 zc.zc_cleanup_fd = zevent_fd;
3898
3899 if (flags & ZEVENT_NONBLOCK)
3900 zc.zc_guid = ZEVENT_NONBLOCK;
3901
3902 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3903 return (-1);
3904
3905 retry:
3906 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3907 switch (errno) {
3908 case ESHUTDOWN:
3909 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3910 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3911 goto out;
3912 case ENOENT:
3913 /* Blocking error case should not occur */
3914 if (!(flags & ZEVENT_NONBLOCK))
3915 error = zpool_standard_error_fmt(hdl, errno,
3916 dgettext(TEXT_DOMAIN, "cannot get event"));
3917
3918 goto out;
3919 case ENOMEM:
3920 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3921 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3922 dgettext(TEXT_DOMAIN, "cannot get event"));
3923 goto out;
3924 } else {
3925 goto retry;
3926 }
3927 default:
3928 error = zpool_standard_error_fmt(hdl, errno,
3929 dgettext(TEXT_DOMAIN, "cannot get event"));
3930 goto out;
3931 }
3932 }
3933
3934 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3935 if (error != 0)
3936 goto out;
3937
3938 *dropped = (int)zc.zc_cookie;
3939 out:
3940 zcmd_free_nvlists(&zc);
3941
3942 return (error);
3943 }
3944
3945 /*
3946 * Clear all events.
3947 */
3948 int
3949 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3950 {
3951 zfs_cmd_t zc = {"\0"};
3952 char msg[1024];
3953
3954 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3955 "cannot clear events"));
3956
3957 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3958 return (zpool_standard_error_fmt(hdl, errno, msg));
3959
3960 if (count != NULL)
3961 *count = (int)zc.zc_cookie; /* # of events cleared */
3962
3963 return (0);
3964 }
3965
3966 /*
3967 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3968 * the passed zevent_fd file handle. On success zero is returned,
3969 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3970 */
3971 int
3972 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3973 {
3974 zfs_cmd_t zc = {"\0"};
3975 int error = 0;
3976
3977 zc.zc_guid = eid;
3978 zc.zc_cleanup_fd = zevent_fd;
3979
3980 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3981 switch (errno) {
3982 case ENOENT:
3983 error = zfs_error_fmt(hdl, EZFS_NOENT,
3984 dgettext(TEXT_DOMAIN, "cannot get event"));
3985 break;
3986
3987 case ENOMEM:
3988 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3989 dgettext(TEXT_DOMAIN, "cannot get event"));
3990 break;
3991
3992 default:
3993 error = zpool_standard_error_fmt(hdl, errno,
3994 dgettext(TEXT_DOMAIN, "cannot get event"));
3995 break;
3996 }
3997 }
3998
3999 return (error);
4000 }
4001
4002 void
4003 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4004 char *pathname, size_t len)
4005 {
4006 zfs_cmd_t zc = {"\0"};
4007 boolean_t mounted = B_FALSE;
4008 char *mntpnt = NULL;
4009 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4010
4011 if (dsobj == 0) {
4012 /* special case for the MOS */
4013 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4014 (longlong_t)obj);
4015 return;
4016 }
4017
4018 /* get the dataset's name */
4019 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4020 zc.zc_obj = dsobj;
4021 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4022 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4023 /* just write out a path of two object numbers */
4024 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4025 (longlong_t)dsobj, (longlong_t)obj);
4026 return;
4027 }
4028 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4029
4030 /* find out if the dataset is mounted */
4031 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4032
4033 /* get the corrupted object's path */
4034 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4035 zc.zc_obj = obj;
4036 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4037 &zc) == 0) {
4038 if (mounted) {
4039 (void) snprintf(pathname, len, "%s%s", mntpnt,
4040 zc.zc_value);
4041 } else {
4042 (void) snprintf(pathname, len, "%s:%s",
4043 dsname, zc.zc_value);
4044 }
4045 } else {
4046 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4047 (longlong_t)obj);
4048 }
4049 free(mntpnt);
4050 }
4051
4052 /*
4053 * Read the EFI label from the config, if a label does not exist then
4054 * pass back the error to the caller. If the caller has passed a non-NULL
4055 * diskaddr argument then we set it to the starting address of the EFI
4056 * partition.
4057 */
4058 static int
4059 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4060 {
4061 char *path;
4062 int fd;
4063 char diskname[MAXPATHLEN];
4064 int err = -1;
4065
4066 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4067 return (err);
4068
4069 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4070 strrchr(path, '/'));
4071 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4072 struct dk_gpt *vtoc;
4073
4074 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4075 if (sb != NULL)
4076 *sb = vtoc->efi_parts[0].p_start;
4077 efi_free(vtoc);
4078 }
4079 (void) close(fd);
4080 }
4081 return (err);
4082 }
4083
4084 /*
4085 * determine where a partition starts on a disk in the current
4086 * configuration
4087 */
4088 static diskaddr_t
4089 find_start_block(nvlist_t *config)
4090 {
4091 nvlist_t **child;
4092 uint_t c, children;
4093 diskaddr_t sb = MAXOFFSET_T;
4094 uint64_t wholedisk;
4095
4096 if (nvlist_lookup_nvlist_array(config,
4097 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4098 if (nvlist_lookup_uint64(config,
4099 ZPOOL_CONFIG_WHOLE_DISK,
4100 &wholedisk) != 0 || !wholedisk) {
4101 return (MAXOFFSET_T);
4102 }
4103 if (read_efi_label(config, &sb) < 0)
4104 sb = MAXOFFSET_T;
4105 return (sb);
4106 }
4107
4108 for (c = 0; c < children; c++) {
4109 sb = find_start_block(child[c]);
4110 if (sb != MAXOFFSET_T) {
4111 return (sb);
4112 }
4113 }
4114 return (MAXOFFSET_T);
4115 }
4116
4117 static int
4118 zpool_label_disk_check(char *path)
4119 {
4120 struct dk_gpt *vtoc;
4121 int fd, err;
4122
4123 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4124 return (errno);
4125
4126 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4127 (void) close(fd);
4128 return (err);
4129 }
4130
4131 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4132 efi_free(vtoc);
4133 (void) close(fd);
4134 return (EIDRM);
4135 }
4136
4137 efi_free(vtoc);
4138 (void) close(fd);
4139 return (0);
4140 }
4141
4142 /*
4143 * Generate a unique partition name for the ZFS member. Partitions must
4144 * have unique names to ensure udev will be able to create symlinks under
4145 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4146 * of the form <pool>-<unique-id>.
4147 */
4148 static void
4149 zpool_label_name(char *label_name, int label_size)
4150 {
4151 uint64_t id = 0;
4152 int fd;
4153
4154 fd = open("/dev/urandom", O_RDONLY);
4155 if (fd > 0) {
4156 if (read(fd, &id, sizeof (id)) != sizeof (id))
4157 id = 0;
4158
4159 close(fd);
4160 }
4161
4162 if (id == 0)
4163 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4164
4165 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t) id);
4166 }
4167
4168 /*
4169 * Label an individual disk. The name provided is the short name,
4170 * stripped of any leading /dev path.
4171 */
4172 int
4173 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4174 {
4175 char path[MAXPATHLEN];
4176 struct dk_gpt *vtoc;
4177 int rval, fd;
4178 size_t resv = EFI_MIN_RESV_SIZE;
4179 uint64_t slice_size;
4180 diskaddr_t start_block;
4181 char errbuf[1024];
4182
4183 /* prepare an error message just in case */
4184 (void) snprintf(errbuf, sizeof (errbuf),
4185 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4186
4187 if (zhp) {
4188 nvlist_t *nvroot;
4189
4190 #if defined(__sun__) || defined(__sun)
4191 if (zpool_is_bootable(zhp)) {
4192 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4193 "EFI labeled devices are not supported on root "
4194 "pools."));
4195 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4196 }
4197 #endif
4198
4199 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4200 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4201
4202 if (zhp->zpool_start_block == 0)
4203 start_block = find_start_block(nvroot);
4204 else
4205 start_block = zhp->zpool_start_block;
4206 zhp->zpool_start_block = start_block;
4207 } else {
4208 /* new pool */
4209 start_block = NEW_START_BLOCK;
4210 }
4211
4212 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4213
4214 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
4215 /*
4216 * This shouldn't happen. We've long since verified that this
4217 * is a valid device.
4218 */
4219 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4220 "label '%s': unable to open device: %d"), path, errno);
4221 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4222 }
4223
4224 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4225 /*
4226 * The only way this can fail is if we run out of memory, or we
4227 * were unable to read the disk's capacity
4228 */
4229 if (errno == ENOMEM)
4230 (void) no_memory(hdl);
4231
4232 (void) close(fd);
4233 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4234 "label '%s': unable to read disk capacity"), path);
4235
4236 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4237 }
4238
4239 slice_size = vtoc->efi_last_u_lba + 1;
4240 slice_size -= EFI_MIN_RESV_SIZE;
4241 if (start_block == MAXOFFSET_T)
4242 start_block = NEW_START_BLOCK;
4243 slice_size -= start_block;
4244 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4245
4246 vtoc->efi_parts[0].p_start = start_block;
4247 vtoc->efi_parts[0].p_size = slice_size;
4248
4249 /*
4250 * Why we use V_USR: V_BACKUP confuses users, and is considered
4251 * disposable by some EFI utilities (since EFI doesn't have a backup
4252 * slice). V_UNASSIGNED is supposed to be used only for zero size
4253 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4254 * etc. were all pretty specific. V_USR is as close to reality as we
4255 * can get, in the absence of V_OTHER.
4256 */
4257 vtoc->efi_parts[0].p_tag = V_USR;
4258 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4259
4260 vtoc->efi_parts[8].p_start = slice_size + start_block;
4261 vtoc->efi_parts[8].p_size = resv;
4262 vtoc->efi_parts[8].p_tag = V_RESERVED;
4263
4264 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4265 /*
4266 * Some block drivers (like pcata) may not support EFI
4267 * GPT labels. Print out a helpful error message dir-
4268 * ecting the user to manually label the disk and give
4269 * a specific slice.
4270 */
4271 (void) close(fd);
4272 efi_free(vtoc);
4273
4274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4275 "parted(8) and then provide a specific slice: %d"), rval);
4276 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4277 }
4278
4279 (void) close(fd);
4280 efi_free(vtoc);
4281
4282 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4283 (void) zfs_append_partition(path, MAXPATHLEN);
4284
4285 /* Wait to udev to signal use the device has settled. */
4286 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4287 if (rval) {
4288 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4289 "detect device partitions on '%s': %d"), path, rval);
4290 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4291 }
4292
4293 /* We can't be to paranoid. Read the label back and verify it. */
4294 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4295 rval = zpool_label_disk_check(path);
4296 if (rval) {
4297 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4298 "EFI label on '%s' is damaged. Ensure\nthis device "
4299 "is not in in use, and is functioning properly: %d"),
4300 path, rval);
4301 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4302 }
4303
4304 return (0);
4305 }