]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Fix coverity defects: CID 147606, 147609
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
241 */
242 int
243 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
244 size_t len, zprop_source_t *srctype, boolean_t literal)
245 {
246 uint64_t intval;
247 const char *strval;
248 zprop_source_t src = ZPROP_SRC_NONE;
249 nvlist_t *nvroot;
250 vdev_stat_t *vs;
251 uint_t vsc;
252
253 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
254 switch (prop) {
255 case ZPOOL_PROP_NAME:
256 (void) strlcpy(buf, zpool_get_name(zhp), len);
257 break;
258
259 case ZPOOL_PROP_HEALTH:
260 (void) strlcpy(buf, "FAULTED", len);
261 break;
262
263 case ZPOOL_PROP_GUID:
264 intval = zpool_get_prop_int(zhp, prop, &src);
265 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
266 break;
267
268 case ZPOOL_PROP_ALTROOT:
269 case ZPOOL_PROP_CACHEFILE:
270 case ZPOOL_PROP_COMMENT:
271 if (zhp->zpool_props != NULL ||
272 zpool_get_all_props(zhp) == 0) {
273 (void) strlcpy(buf,
274 zpool_get_prop_string(zhp, prop, &src),
275 len);
276 break;
277 }
278 /* FALLTHROUGH */
279 default:
280 (void) strlcpy(buf, "-", len);
281 break;
282 }
283
284 if (srctype != NULL)
285 *srctype = src;
286 return (0);
287 }
288
289 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
290 prop != ZPOOL_PROP_NAME)
291 return (-1);
292
293 switch (zpool_prop_get_type(prop)) {
294 case PROP_TYPE_STRING:
295 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
296 len);
297 break;
298
299 case PROP_TYPE_NUMBER:
300 intval = zpool_get_prop_int(zhp, prop, &src);
301
302 switch (prop) {
303 case ZPOOL_PROP_SIZE:
304 case ZPOOL_PROP_ALLOCATED:
305 case ZPOOL_PROP_FREE:
306 case ZPOOL_PROP_FREEING:
307 case ZPOOL_PROP_LEAKED:
308 case ZPOOL_PROP_ASHIFT:
309 if (literal)
310 (void) snprintf(buf, len, "%llu",
311 (u_longlong_t)intval);
312 else
313 (void) zfs_nicenum(intval, buf, len);
314 break;
315
316 case ZPOOL_PROP_EXPANDSZ:
317 if (intval == 0) {
318 (void) strlcpy(buf, "-", len);
319 } else if (literal) {
320 (void) snprintf(buf, len, "%llu",
321 (u_longlong_t)intval);
322 } else {
323 (void) zfs_nicenum(intval, buf, len);
324 }
325 break;
326
327 case ZPOOL_PROP_CAPACITY:
328 if (literal) {
329 (void) snprintf(buf, len, "%llu",
330 (u_longlong_t)intval);
331 } else {
332 (void) snprintf(buf, len, "%llu%%",
333 (u_longlong_t)intval);
334 }
335 break;
336
337 case ZPOOL_PROP_FRAGMENTATION:
338 if (intval == UINT64_MAX) {
339 (void) strlcpy(buf, "-", len);
340 } else if (literal) {
341 (void) snprintf(buf, len, "%llu",
342 (u_longlong_t)intval);
343 } else {
344 (void) snprintf(buf, len, "%llu%%",
345 (u_longlong_t)intval);
346 }
347 break;
348
349 case ZPOOL_PROP_DEDUPRATIO:
350 if (literal)
351 (void) snprintf(buf, len, "%llu.%02llu",
352 (u_longlong_t)(intval / 100),
353 (u_longlong_t)(intval % 100));
354 else
355 (void) snprintf(buf, len, "%llu.%02llux",
356 (u_longlong_t)(intval / 100),
357 (u_longlong_t)(intval % 100));
358 break;
359
360 case ZPOOL_PROP_HEALTH:
361 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
362 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
363 verify(nvlist_lookup_uint64_array(nvroot,
364 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
365 == 0);
366
367 (void) strlcpy(buf, zpool_state_to_name(intval,
368 vs->vs_aux), len);
369 break;
370 case ZPOOL_PROP_VERSION:
371 if (intval >= SPA_VERSION_FEATURES) {
372 (void) snprintf(buf, len, "-");
373 break;
374 }
375 /* FALLTHROUGH */
376 default:
377 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
378 }
379 break;
380
381 case PROP_TYPE_INDEX:
382 intval = zpool_get_prop_int(zhp, prop, &src);
383 if (zpool_prop_index_to_string(prop, intval, &strval)
384 != 0)
385 return (-1);
386 (void) strlcpy(buf, strval, len);
387 break;
388
389 default:
390 abort();
391 }
392
393 if (srctype)
394 *srctype = src;
395
396 return (0);
397 }
398
399 /*
400 * Check if the bootfs name has the same pool name as it is set to.
401 * Assuming bootfs is a valid dataset name.
402 */
403 static boolean_t
404 bootfs_name_valid(const char *pool, char *bootfs)
405 {
406 int len = strlen(pool);
407
408 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
409 return (B_FALSE);
410
411 if (strncmp(pool, bootfs, len) == 0 &&
412 (bootfs[len] == '/' || bootfs[len] == '\0'))
413 return (B_TRUE);
414
415 return (B_FALSE);
416 }
417
418 #if defined(__sun__) || defined(__sun)
419 /*
420 * Inspect the configuration to determine if any of the devices contain
421 * an EFI label.
422 */
423 static boolean_t
424 pool_uses_efi(nvlist_t *config)
425 {
426 nvlist_t **child;
427 uint_t c, children;
428
429 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
430 &child, &children) != 0)
431 return (read_efi_label(config, NULL) >= 0);
432
433 for (c = 0; c < children; c++) {
434 if (pool_uses_efi(child[c]))
435 return (B_TRUE);
436 }
437 return (B_FALSE);
438 }
439 #endif
440
441 boolean_t
442 zpool_is_bootable(zpool_handle_t *zhp)
443 {
444 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
445
446 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
447 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
448 sizeof (bootfs)) != 0);
449 }
450
451
452 /*
453 * Given an nvlist of zpool properties to be set, validate that they are
454 * correct, and parse any numeric properties (index, boolean, etc) if they are
455 * specified as strings.
456 */
457 static nvlist_t *
458 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
459 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
460 {
461 nvpair_t *elem;
462 nvlist_t *retprops;
463 zpool_prop_t prop;
464 char *strval;
465 uint64_t intval;
466 char *slash, *check;
467 struct stat64 statbuf;
468 zpool_handle_t *zhp;
469 nvlist_t *nvroot;
470
471 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
472 (void) no_memory(hdl);
473 return (NULL);
474 }
475
476 elem = NULL;
477 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
478 const char *propname = nvpair_name(elem);
479
480 prop = zpool_name_to_prop(propname);
481 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
482 int err;
483 char *fname = strchr(propname, '@') + 1;
484
485 err = zfeature_lookup_name(fname, NULL);
486 if (err != 0) {
487 ASSERT3U(err, ==, ENOENT);
488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489 "invalid feature '%s'"), fname);
490 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
491 goto error;
492 }
493
494 if (nvpair_type(elem) != DATA_TYPE_STRING) {
495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 "'%s' must be a string"), propname);
497 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
498 goto error;
499 }
500
501 (void) nvpair_value_string(elem, &strval);
502 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
504 "property '%s' can only be set to "
505 "'enabled'"), propname);
506 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
507 goto error;
508 }
509
510 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
511 (void) no_memory(hdl);
512 goto error;
513 }
514 continue;
515 }
516
517 /*
518 * Make sure this property is valid and applies to this type.
519 */
520 if (prop == ZPROP_INVAL) {
521 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
522 "invalid property '%s'"), propname);
523 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
524 goto error;
525 }
526
527 if (zpool_prop_readonly(prop)) {
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
529 "is readonly"), propname);
530 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
531 goto error;
532 }
533
534 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
535 &strval, &intval, errbuf) != 0)
536 goto error;
537
538 /*
539 * Perform additional checking for specific properties.
540 */
541 switch (prop) {
542 default:
543 break;
544 case ZPOOL_PROP_VERSION:
545 if (intval < version ||
546 !SPA_VERSION_IS_SUPPORTED(intval)) {
547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
548 "property '%s' number %d is invalid."),
549 propname, intval);
550 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
551 goto error;
552 }
553 break;
554
555 case ZPOOL_PROP_ASHIFT:
556 if (!flags.create) {
557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
558 "property '%s' can only be set at "
559 "creation time"), propname);
560 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
561 goto error;
562 }
563
564 if (intval != 0 && (intval < 9 || intval > 13)) {
565 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
566 "property '%s' number %d is invalid."),
567 propname, intval);
568 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
569 goto error;
570 }
571 break;
572
573 case ZPOOL_PROP_BOOTFS:
574 if (flags.create || flags.import) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "property '%s' cannot be set at creation "
577 "or import time"), propname);
578 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
579 goto error;
580 }
581
582 if (version < SPA_VERSION_BOOTFS) {
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "pool must be upgraded to support "
585 "'%s' property"), propname);
586 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
587 goto error;
588 }
589
590 /*
591 * bootfs property value has to be a dataset name and
592 * the dataset has to be in the same pool as it sets to.
593 */
594 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
595 strval)) {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
597 "is an invalid name"), strval);
598 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
599 goto error;
600 }
601
602 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "could not open pool '%s'"), poolname);
605 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
606 goto error;
607 }
608 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
609 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
610
611 #if defined(__sun__) || defined(__sun)
612 /*
613 * bootfs property cannot be set on a disk which has
614 * been EFI labeled.
615 */
616 if (pool_uses_efi(nvroot)) {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "property '%s' not supported on "
619 "EFI labeled devices"), propname);
620 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
621 zpool_close(zhp);
622 goto error;
623 }
624 #endif
625 zpool_close(zhp);
626 break;
627
628 case ZPOOL_PROP_ALTROOT:
629 if (!flags.create && !flags.import) {
630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 "property '%s' can only be set during pool "
632 "creation or import"), propname);
633 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
634 goto error;
635 }
636
637 if (strval[0] != '/') {
638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
639 "bad alternate root '%s'"), strval);
640 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
641 goto error;
642 }
643 break;
644
645 case ZPOOL_PROP_CACHEFILE:
646 if (strval[0] == '\0')
647 break;
648
649 if (strcmp(strval, "none") == 0)
650 break;
651
652 if (strval[0] != '/') {
653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 "property '%s' must be empty, an "
655 "absolute path, or 'none'"), propname);
656 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
657 goto error;
658 }
659
660 slash = strrchr(strval, '/');
661
662 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
663 strcmp(slash, "/..") == 0) {
664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665 "'%s' is not a valid file"), strval);
666 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
667 goto error;
668 }
669
670 *slash = '\0';
671
672 if (strval[0] != '\0' &&
673 (stat64(strval, &statbuf) != 0 ||
674 !S_ISDIR(statbuf.st_mode))) {
675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
676 "'%s' is not a valid directory"),
677 strval);
678 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
679 goto error;
680 }
681
682 *slash = '/';
683 break;
684
685 case ZPOOL_PROP_COMMENT:
686 for (check = strval; *check != '\0'; check++) {
687 if (!isprint(*check)) {
688 zfs_error_aux(hdl,
689 dgettext(TEXT_DOMAIN,
690 "comment may only have printable "
691 "characters"));
692 (void) zfs_error(hdl, EZFS_BADPROP,
693 errbuf);
694 goto error;
695 }
696 }
697 if (strlen(strval) > ZPROP_MAX_COMMENT) {
698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
699 "comment must not exceed %d characters"),
700 ZPROP_MAX_COMMENT);
701 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
702 goto error;
703 }
704 break;
705 case ZPOOL_PROP_READONLY:
706 if (!flags.import) {
707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
708 "property '%s' can only be set at "
709 "import time"), propname);
710 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
711 goto error;
712 }
713 break;
714 case ZPOOL_PROP_TNAME:
715 if (!flags.create) {
716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
717 "property '%s' can only be set at "
718 "creation time"), propname);
719 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
720 goto error;
721 }
722 break;
723 }
724 }
725
726 return (retprops);
727 error:
728 nvlist_free(retprops);
729 return (NULL);
730 }
731
732 /*
733 * Set zpool property : propname=propval.
734 */
735 int
736 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
737 {
738 zfs_cmd_t zc = {"\0"};
739 int ret = -1;
740 char errbuf[1024];
741 nvlist_t *nvl = NULL;
742 nvlist_t *realprops;
743 uint64_t version;
744 prop_flags_t flags = { 0 };
745
746 (void) snprintf(errbuf, sizeof (errbuf),
747 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
748 zhp->zpool_name);
749
750 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
751 return (no_memory(zhp->zpool_hdl));
752
753 if (nvlist_add_string(nvl, propname, propval) != 0) {
754 nvlist_free(nvl);
755 return (no_memory(zhp->zpool_hdl));
756 }
757
758 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
759 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
760 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
761 nvlist_free(nvl);
762 return (-1);
763 }
764
765 nvlist_free(nvl);
766 nvl = realprops;
767
768 /*
769 * Execute the corresponding ioctl() to set this property.
770 */
771 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
772
773 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
774 nvlist_free(nvl);
775 return (-1);
776 }
777
778 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
779
780 zcmd_free_nvlists(&zc);
781 nvlist_free(nvl);
782
783 if (ret)
784 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
785 else
786 (void) zpool_props_refresh(zhp);
787
788 return (ret);
789 }
790
791 int
792 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
793 {
794 libzfs_handle_t *hdl = zhp->zpool_hdl;
795 zprop_list_t *entry;
796 char buf[ZFS_MAXPROPLEN];
797 nvlist_t *features = NULL;
798 nvpair_t *nvp;
799 zprop_list_t **last;
800 boolean_t firstexpand = (NULL == *plp);
801 int i;
802
803 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
804 return (-1);
805
806 last = plp;
807 while (*last != NULL)
808 last = &(*last)->pl_next;
809
810 if ((*plp)->pl_all)
811 features = zpool_get_features(zhp);
812
813 if ((*plp)->pl_all && firstexpand) {
814 for (i = 0; i < SPA_FEATURES; i++) {
815 zprop_list_t *entry = zfs_alloc(hdl,
816 sizeof (zprop_list_t));
817 entry->pl_prop = ZPROP_INVAL;
818 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
819 spa_feature_table[i].fi_uname);
820 entry->pl_width = strlen(entry->pl_user_prop);
821 entry->pl_all = B_TRUE;
822
823 *last = entry;
824 last = &entry->pl_next;
825 }
826 }
827
828 /* add any unsupported features */
829 for (nvp = nvlist_next_nvpair(features, NULL);
830 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
831 char *propname;
832 boolean_t found;
833 zprop_list_t *entry;
834
835 if (zfeature_is_supported(nvpair_name(nvp)))
836 continue;
837
838 propname = zfs_asprintf(hdl, "unsupported@%s",
839 nvpair_name(nvp));
840
841 /*
842 * Before adding the property to the list make sure that no
843 * other pool already added the same property.
844 */
845 found = B_FALSE;
846 entry = *plp;
847 while (entry != NULL) {
848 if (entry->pl_user_prop != NULL &&
849 strcmp(propname, entry->pl_user_prop) == 0) {
850 found = B_TRUE;
851 break;
852 }
853 entry = entry->pl_next;
854 }
855 if (found) {
856 free(propname);
857 continue;
858 }
859
860 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
861 entry->pl_prop = ZPROP_INVAL;
862 entry->pl_user_prop = propname;
863 entry->pl_width = strlen(entry->pl_user_prop);
864 entry->pl_all = B_TRUE;
865
866 *last = entry;
867 last = &entry->pl_next;
868 }
869
870 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
871
872 if (entry->pl_fixed)
873 continue;
874
875 if (entry->pl_prop != ZPROP_INVAL &&
876 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
877 NULL, B_FALSE) == 0) {
878 if (strlen(buf) > entry->pl_width)
879 entry->pl_width = strlen(buf);
880 }
881 }
882
883 return (0);
884 }
885
886 /*
887 * Get the state for the given feature on the given ZFS pool.
888 */
889 int
890 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
891 size_t len)
892 {
893 uint64_t refcount;
894 boolean_t found = B_FALSE;
895 nvlist_t *features = zpool_get_features(zhp);
896 boolean_t supported;
897 const char *feature = strchr(propname, '@') + 1;
898
899 supported = zpool_prop_feature(propname);
900 ASSERT(supported || zpool_prop_unsupported(propname));
901
902 /*
903 * Convert from feature name to feature guid. This conversion is
904 * unecessary for unsupported@... properties because they already
905 * use guids.
906 */
907 if (supported) {
908 int ret;
909 spa_feature_t fid;
910
911 ret = zfeature_lookup_name(feature, &fid);
912 if (ret != 0) {
913 (void) strlcpy(buf, "-", len);
914 return (ENOTSUP);
915 }
916 feature = spa_feature_table[fid].fi_guid;
917 }
918
919 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
920 found = B_TRUE;
921
922 if (supported) {
923 if (!found) {
924 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
925 } else {
926 if (refcount == 0)
927 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
928 else
929 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
930 }
931 } else {
932 if (found) {
933 if (refcount == 0) {
934 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
935 } else {
936 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
937 }
938 } else {
939 (void) strlcpy(buf, "-", len);
940 return (ENOTSUP);
941 }
942 }
943
944 return (0);
945 }
946
947 /*
948 * Don't start the slice at the default block of 34; many storage
949 * devices will use a stripe width of 128k, other vendors prefer a 1m
950 * alignment. It is best to play it safe and ensure a 1m alignment
951 * given 512B blocks. When the block size is larger by a power of 2
952 * we will still be 1m aligned. Some devices are sensitive to the
953 * partition ending alignment as well.
954 */
955 #define NEW_START_BLOCK 2048
956 #define PARTITION_END_ALIGNMENT 2048
957
958 /*
959 * Validate the given pool name, optionally putting an extended error message in
960 * 'buf'.
961 */
962 boolean_t
963 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
964 {
965 namecheck_err_t why;
966 char what;
967 int ret;
968
969 ret = pool_namecheck(pool, &why, &what);
970
971 /*
972 * The rules for reserved pool names were extended at a later point.
973 * But we need to support users with existing pools that may now be
974 * invalid. So we only check for this expanded set of names during a
975 * create (or import), and only in userland.
976 */
977 if (ret == 0 && !isopen &&
978 (strncmp(pool, "mirror", 6) == 0 ||
979 strncmp(pool, "raidz", 5) == 0 ||
980 strncmp(pool, "spare", 5) == 0 ||
981 strcmp(pool, "log") == 0)) {
982 if (hdl != NULL)
983 zfs_error_aux(hdl,
984 dgettext(TEXT_DOMAIN, "name is reserved"));
985 return (B_FALSE);
986 }
987
988
989 if (ret != 0) {
990 if (hdl != NULL) {
991 switch (why) {
992 case NAME_ERR_TOOLONG:
993 zfs_error_aux(hdl,
994 dgettext(TEXT_DOMAIN, "name is too long"));
995 break;
996
997 case NAME_ERR_INVALCHAR:
998 zfs_error_aux(hdl,
999 dgettext(TEXT_DOMAIN, "invalid character "
1000 "'%c' in pool name"), what);
1001 break;
1002
1003 case NAME_ERR_NOLETTER:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "name must begin with a letter"));
1006 break;
1007
1008 case NAME_ERR_RESERVED:
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "name is reserved"));
1011 break;
1012
1013 case NAME_ERR_DISKLIKE:
1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1015 "pool name is reserved"));
1016 break;
1017
1018 case NAME_ERR_LEADING_SLASH:
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1020 "leading slash in name"));
1021 break;
1022
1023 case NAME_ERR_EMPTY_COMPONENT:
1024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1025 "empty component in name"));
1026 break;
1027
1028 case NAME_ERR_TRAILING_SLASH:
1029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1030 "trailing slash in name"));
1031 break;
1032
1033 case NAME_ERR_MULTIPLE_AT:
1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1035 "multiple '@' delimiters in name"));
1036 break;
1037 case NAME_ERR_NO_AT:
1038 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1039 "permission set is missing '@'"));
1040 break;
1041 }
1042 }
1043 return (B_FALSE);
1044 }
1045
1046 return (B_TRUE);
1047 }
1048
1049 /*
1050 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1051 * state.
1052 */
1053 zpool_handle_t *
1054 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1055 {
1056 zpool_handle_t *zhp;
1057 boolean_t missing;
1058
1059 /*
1060 * Make sure the pool name is valid.
1061 */
1062 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1063 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1064 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1065 pool);
1066 return (NULL);
1067 }
1068
1069 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1070 return (NULL);
1071
1072 zhp->zpool_hdl = hdl;
1073 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1074
1075 if (zpool_refresh_stats(zhp, &missing) != 0) {
1076 zpool_close(zhp);
1077 return (NULL);
1078 }
1079
1080 if (missing) {
1081 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1082 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1083 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1084 zpool_close(zhp);
1085 return (NULL);
1086 }
1087
1088 return (zhp);
1089 }
1090
1091 /*
1092 * Like the above, but silent on error. Used when iterating over pools (because
1093 * the configuration cache may be out of date).
1094 */
1095 int
1096 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1097 {
1098 zpool_handle_t *zhp;
1099 boolean_t missing;
1100
1101 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1102 return (-1);
1103
1104 zhp->zpool_hdl = hdl;
1105 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1106
1107 if (zpool_refresh_stats(zhp, &missing) != 0) {
1108 zpool_close(zhp);
1109 return (-1);
1110 }
1111
1112 if (missing) {
1113 zpool_close(zhp);
1114 *ret = NULL;
1115 return (0);
1116 }
1117
1118 *ret = zhp;
1119 return (0);
1120 }
1121
1122 /*
1123 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1124 * state.
1125 */
1126 zpool_handle_t *
1127 zpool_open(libzfs_handle_t *hdl, const char *pool)
1128 {
1129 zpool_handle_t *zhp;
1130
1131 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1132 return (NULL);
1133
1134 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1135 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1136 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1137 zpool_close(zhp);
1138 return (NULL);
1139 }
1140
1141 return (zhp);
1142 }
1143
1144 /*
1145 * Close the handle. Simply frees the memory associated with the handle.
1146 */
1147 void
1148 zpool_close(zpool_handle_t *zhp)
1149 {
1150 nvlist_free(zhp->zpool_config);
1151 nvlist_free(zhp->zpool_old_config);
1152 nvlist_free(zhp->zpool_props);
1153 free(zhp);
1154 }
1155
1156 /*
1157 * Return the name of the pool.
1158 */
1159 const char *
1160 zpool_get_name(zpool_handle_t *zhp)
1161 {
1162 return (zhp->zpool_name);
1163 }
1164
1165
1166 /*
1167 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1168 */
1169 int
1170 zpool_get_state(zpool_handle_t *zhp)
1171 {
1172 return (zhp->zpool_state);
1173 }
1174
1175 /*
1176 * Create the named pool, using the provided vdev list. It is assumed
1177 * that the consumer has already validated the contents of the nvlist, so we
1178 * don't have to worry about error semantics.
1179 */
1180 int
1181 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1182 nvlist_t *props, nvlist_t *fsprops)
1183 {
1184 zfs_cmd_t zc = {"\0"};
1185 nvlist_t *zc_fsprops = NULL;
1186 nvlist_t *zc_props = NULL;
1187 char msg[1024];
1188 int ret = -1;
1189
1190 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1191 "cannot create '%s'"), pool);
1192
1193 if (!zpool_name_valid(hdl, B_FALSE, pool))
1194 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1195
1196 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1197 return (-1);
1198
1199 if (props) {
1200 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1201
1202 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1203 SPA_VERSION_1, flags, msg)) == NULL) {
1204 goto create_failed;
1205 }
1206 }
1207
1208 if (fsprops) {
1209 uint64_t zoned;
1210 char *zonestr;
1211
1212 zoned = ((nvlist_lookup_string(fsprops,
1213 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1214 strcmp(zonestr, "on") == 0);
1215
1216 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1217 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1218 goto create_failed;
1219 }
1220 if (!zc_props &&
1221 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1222 goto create_failed;
1223 }
1224 if (nvlist_add_nvlist(zc_props,
1225 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1226 goto create_failed;
1227 }
1228 }
1229
1230 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1231 goto create_failed;
1232
1233 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1234
1235 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1236
1237 zcmd_free_nvlists(&zc);
1238 nvlist_free(zc_props);
1239 nvlist_free(zc_fsprops);
1240
1241 switch (errno) {
1242 case EBUSY:
1243 /*
1244 * This can happen if the user has specified the same
1245 * device multiple times. We can't reliably detect this
1246 * until we try to add it and see we already have a
1247 * label. This can also happen under if the device is
1248 * part of an active md or lvm device.
1249 */
1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1251 "one or more vdevs refer to the same device, or "
1252 "one of\nthe devices is part of an active md or "
1253 "lvm device"));
1254 return (zfs_error(hdl, EZFS_BADDEV, msg));
1255
1256 case ERANGE:
1257 /*
1258 * This happens if the record size is smaller or larger
1259 * than the allowed size range, or not a power of 2.
1260 *
1261 * NOTE: although zfs_valid_proplist is called earlier,
1262 * this case may have slipped through since the
1263 * pool does not exist yet and it is therefore
1264 * impossible to read properties e.g. max blocksize
1265 * from the pool.
1266 */
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "record size invalid"));
1269 return (zfs_error(hdl, EZFS_BADPROP, msg));
1270
1271 case EOVERFLOW:
1272 /*
1273 * This occurs when one of the devices is below
1274 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1275 * device was the problem device since there's no
1276 * reliable way to determine device size from userland.
1277 */
1278 {
1279 char buf[64];
1280
1281 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1282
1283 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1284 "one or more devices is less than the "
1285 "minimum size (%s)"), buf);
1286 }
1287 return (zfs_error(hdl, EZFS_BADDEV, msg));
1288
1289 case ENOSPC:
1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1291 "one or more devices is out of space"));
1292 return (zfs_error(hdl, EZFS_BADDEV, msg));
1293
1294 case ENOTBLK:
1295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1296 "cache device must be a disk or disk slice"));
1297 return (zfs_error(hdl, EZFS_BADDEV, msg));
1298
1299 default:
1300 return (zpool_standard_error(hdl, errno, msg));
1301 }
1302 }
1303
1304 create_failed:
1305 zcmd_free_nvlists(&zc);
1306 nvlist_free(zc_props);
1307 nvlist_free(zc_fsprops);
1308 return (ret);
1309 }
1310
1311 /*
1312 * Destroy the given pool. It is up to the caller to ensure that there are no
1313 * datasets left in the pool.
1314 */
1315 int
1316 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1317 {
1318 zfs_cmd_t zc = {"\0"};
1319 zfs_handle_t *zfp = NULL;
1320 libzfs_handle_t *hdl = zhp->zpool_hdl;
1321 char msg[1024];
1322
1323 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1324 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1325 return (-1);
1326
1327 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1328 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1329
1330 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1331 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1332 "cannot destroy '%s'"), zhp->zpool_name);
1333
1334 if (errno == EROFS) {
1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1336 "one or more devices is read only"));
1337 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1338 } else {
1339 (void) zpool_standard_error(hdl, errno, msg);
1340 }
1341
1342 if (zfp)
1343 zfs_close(zfp);
1344 return (-1);
1345 }
1346
1347 if (zfp) {
1348 remove_mountpoint(zfp);
1349 zfs_close(zfp);
1350 }
1351
1352 return (0);
1353 }
1354
1355 /*
1356 * Add the given vdevs to the pool. The caller must have already performed the
1357 * necessary verification to ensure that the vdev specification is well-formed.
1358 */
1359 int
1360 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1361 {
1362 zfs_cmd_t zc = {"\0"};
1363 int ret;
1364 libzfs_handle_t *hdl = zhp->zpool_hdl;
1365 char msg[1024];
1366 nvlist_t **spares, **l2cache;
1367 uint_t nspares, nl2cache;
1368
1369 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1370 "cannot add to '%s'"), zhp->zpool_name);
1371
1372 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1373 SPA_VERSION_SPARES &&
1374 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1375 &spares, &nspares) == 0) {
1376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1377 "upgraded to add hot spares"));
1378 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1379 }
1380
1381 #if defined(__sun__) || defined(__sun)
1382 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1383 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1384 uint64_t s;
1385
1386 for (s = 0; s < nspares; s++) {
1387 char *path;
1388
1389 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1390 &path) == 0 && pool_uses_efi(spares[s])) {
1391 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1392 "device '%s' contains an EFI label and "
1393 "cannot be used on root pools."),
1394 zpool_vdev_name(hdl, NULL, spares[s], 0));
1395 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1396 }
1397 }
1398 }
1399 #endif
1400
1401 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1402 SPA_VERSION_L2CACHE &&
1403 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1404 &l2cache, &nl2cache) == 0) {
1405 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1406 "upgraded to add cache devices"));
1407 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1408 }
1409
1410 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1411 return (-1);
1412 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1413
1414 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1415 switch (errno) {
1416 case EBUSY:
1417 /*
1418 * This can happen if the user has specified the same
1419 * device multiple times. We can't reliably detect this
1420 * until we try to add it and see we already have a
1421 * label.
1422 */
1423 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1424 "one or more vdevs refer to the same device"));
1425 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1426 break;
1427
1428 case EOVERFLOW:
1429 /*
1430 * This occurrs when one of the devices is below
1431 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1432 * device was the problem device since there's no
1433 * reliable way to determine device size from userland.
1434 */
1435 {
1436 char buf[64];
1437
1438 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1439
1440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1441 "device is less than the minimum "
1442 "size (%s)"), buf);
1443 }
1444 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1445 break;
1446
1447 case ENOTSUP:
1448 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1449 "pool must be upgraded to add these vdevs"));
1450 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1451 break;
1452
1453 case ENOTBLK:
1454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1455 "cache device must be a disk or disk slice"));
1456 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1457 break;
1458
1459 default:
1460 (void) zpool_standard_error(hdl, errno, msg);
1461 }
1462
1463 ret = -1;
1464 } else {
1465 ret = 0;
1466 }
1467
1468 zcmd_free_nvlists(&zc);
1469
1470 return (ret);
1471 }
1472
1473 /*
1474 * Exports the pool from the system. The caller must ensure that there are no
1475 * mounted datasets in the pool.
1476 */
1477 static int
1478 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1479 const char *log_str)
1480 {
1481 zfs_cmd_t zc = {"\0"};
1482 char msg[1024];
1483
1484 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1485 "cannot export '%s'"), zhp->zpool_name);
1486
1487 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1488 zc.zc_cookie = force;
1489 zc.zc_guid = hardforce;
1490 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1491
1492 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1493 switch (errno) {
1494 case EXDEV:
1495 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1496 "use '-f' to override the following errors:\n"
1497 "'%s' has an active shared spare which could be"
1498 " used by other pools once '%s' is exported."),
1499 zhp->zpool_name, zhp->zpool_name);
1500 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1501 msg));
1502 default:
1503 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1504 msg));
1505 }
1506 }
1507
1508 return (0);
1509 }
1510
1511 int
1512 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1513 {
1514 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1515 }
1516
1517 int
1518 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1519 {
1520 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1521 }
1522
1523 static void
1524 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1525 nvlist_t *config)
1526 {
1527 nvlist_t *nv = NULL;
1528 uint64_t rewindto;
1529 int64_t loss = -1;
1530 struct tm t;
1531 char timestr[128];
1532
1533 if (!hdl->libzfs_printerr || config == NULL)
1534 return;
1535
1536 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1537 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1538 return;
1539 }
1540
1541 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1542 return;
1543 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1544
1545 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1546 strftime(timestr, 128, "%c", &t) != 0) {
1547 if (dryrun) {
1548 (void) printf(dgettext(TEXT_DOMAIN,
1549 "Would be able to return %s "
1550 "to its state as of %s.\n"),
1551 name, timestr);
1552 } else {
1553 (void) printf(dgettext(TEXT_DOMAIN,
1554 "Pool %s returned to its state as of %s.\n"),
1555 name, timestr);
1556 }
1557 if (loss > 120) {
1558 (void) printf(dgettext(TEXT_DOMAIN,
1559 "%s approximately %lld "),
1560 dryrun ? "Would discard" : "Discarded",
1561 ((longlong_t)loss + 30) / 60);
1562 (void) printf(dgettext(TEXT_DOMAIN,
1563 "minutes of transactions.\n"));
1564 } else if (loss > 0) {
1565 (void) printf(dgettext(TEXT_DOMAIN,
1566 "%s approximately %lld "),
1567 dryrun ? "Would discard" : "Discarded",
1568 (longlong_t)loss);
1569 (void) printf(dgettext(TEXT_DOMAIN,
1570 "seconds of transactions.\n"));
1571 }
1572 }
1573 }
1574
1575 void
1576 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1577 nvlist_t *config)
1578 {
1579 nvlist_t *nv = NULL;
1580 int64_t loss = -1;
1581 uint64_t edata = UINT64_MAX;
1582 uint64_t rewindto;
1583 struct tm t;
1584 char timestr[128];
1585
1586 if (!hdl->libzfs_printerr)
1587 return;
1588
1589 if (reason >= 0)
1590 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1591 else
1592 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1593
1594 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1595 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1596 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1597 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1598 goto no_info;
1599
1600 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1602 &edata);
1603
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "Recovery is possible, but will result in some data loss.\n"));
1606
1607 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1608 strftime(timestr, 128, "%c", &t) != 0) {
1609 (void) printf(dgettext(TEXT_DOMAIN,
1610 "\tReturning the pool to its state as of %s\n"
1611 "\tshould correct the problem. "),
1612 timestr);
1613 } else {
1614 (void) printf(dgettext(TEXT_DOMAIN,
1615 "\tReverting the pool to an earlier state "
1616 "should correct the problem.\n\t"));
1617 }
1618
1619 if (loss > 120) {
1620 (void) printf(dgettext(TEXT_DOMAIN,
1621 "Approximately %lld minutes of data\n"
1622 "\tmust be discarded, irreversibly. "),
1623 ((longlong_t)loss + 30) / 60);
1624 } else if (loss > 0) {
1625 (void) printf(dgettext(TEXT_DOMAIN,
1626 "Approximately %lld seconds of data\n"
1627 "\tmust be discarded, irreversibly. "),
1628 (longlong_t)loss);
1629 }
1630 if (edata != 0 && edata != UINT64_MAX) {
1631 if (edata == 1) {
1632 (void) printf(dgettext(TEXT_DOMAIN,
1633 "After rewind, at least\n"
1634 "\tone persistent user-data error will remain. "));
1635 } else {
1636 (void) printf(dgettext(TEXT_DOMAIN,
1637 "After rewind, several\n"
1638 "\tpersistent user-data errors will remain. "));
1639 }
1640 }
1641 (void) printf(dgettext(TEXT_DOMAIN,
1642 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1643 reason >= 0 ? "clear" : "import", name);
1644
1645 (void) printf(dgettext(TEXT_DOMAIN,
1646 "A scrub of the pool\n"
1647 "\tis strongly recommended after recovery.\n"));
1648 return;
1649
1650 no_info:
1651 (void) printf(dgettext(TEXT_DOMAIN,
1652 "Destroy and re-create the pool from\n\ta backup source.\n"));
1653 }
1654
1655 /*
1656 * zpool_import() is a contracted interface. Should be kept the same
1657 * if possible.
1658 *
1659 * Applications should use zpool_import_props() to import a pool with
1660 * new properties value to be set.
1661 */
1662 int
1663 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1664 char *altroot)
1665 {
1666 nvlist_t *props = NULL;
1667 int ret;
1668
1669 if (altroot != NULL) {
1670 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1671 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1672 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1673 newname));
1674 }
1675
1676 if (nvlist_add_string(props,
1677 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1678 nvlist_add_string(props,
1679 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1680 nvlist_free(props);
1681 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1682 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1683 newname));
1684 }
1685 }
1686
1687 ret = zpool_import_props(hdl, config, newname, props,
1688 ZFS_IMPORT_NORMAL);
1689 nvlist_free(props);
1690 return (ret);
1691 }
1692
1693 static void
1694 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1695 int indent)
1696 {
1697 nvlist_t **child;
1698 uint_t c, children;
1699 char *vname;
1700 uint64_t is_log = 0;
1701
1702 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1703 &is_log);
1704
1705 if (name != NULL)
1706 (void) printf("\t%*s%s%s\n", indent, "", name,
1707 is_log ? " [log]" : "");
1708
1709 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1710 &child, &children) != 0)
1711 return;
1712
1713 for (c = 0; c < children; c++) {
1714 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1715 print_vdev_tree(hdl, vname, child[c], indent + 2);
1716 free(vname);
1717 }
1718 }
1719
1720 void
1721 zpool_print_unsup_feat(nvlist_t *config)
1722 {
1723 nvlist_t *nvinfo, *unsup_feat;
1724 nvpair_t *nvp;
1725
1726 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1727 0);
1728 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1729 &unsup_feat) == 0);
1730
1731 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1732 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1733 char *desc;
1734
1735 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1736 verify(nvpair_value_string(nvp, &desc) == 0);
1737
1738 if (strlen(desc) > 0)
1739 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1740 else
1741 (void) printf("\t%s\n", nvpair_name(nvp));
1742 }
1743 }
1744
1745 /*
1746 * Import the given pool using the known configuration and a list of
1747 * properties to be set. The configuration should have come from
1748 * zpool_find_import(). The 'newname' parameters control whether the pool
1749 * is imported with a different name.
1750 */
1751 int
1752 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1753 nvlist_t *props, int flags)
1754 {
1755 zfs_cmd_t zc = {"\0"};
1756 zpool_rewind_policy_t policy;
1757 nvlist_t *nv = NULL;
1758 nvlist_t *nvinfo = NULL;
1759 nvlist_t *missing = NULL;
1760 char *thename;
1761 char *origname;
1762 int ret;
1763 int error = 0;
1764 char errbuf[1024];
1765
1766 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1767 &origname) == 0);
1768
1769 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1770 "cannot import pool '%s'"), origname);
1771
1772 if (newname != NULL) {
1773 if (!zpool_name_valid(hdl, B_FALSE, newname))
1774 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1775 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1776 newname));
1777 thename = (char *)newname;
1778 } else {
1779 thename = origname;
1780 }
1781
1782 if (props != NULL) {
1783 uint64_t version;
1784 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1785
1786 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1787 &version) == 0);
1788
1789 if ((props = zpool_valid_proplist(hdl, origname,
1790 props, version, flags, errbuf)) == NULL)
1791 return (-1);
1792 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1793 nvlist_free(props);
1794 return (-1);
1795 }
1796 nvlist_free(props);
1797 }
1798
1799 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1800
1801 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1802 &zc.zc_guid) == 0);
1803
1804 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1805 zcmd_free_nvlists(&zc);
1806 return (-1);
1807 }
1808 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1809 zcmd_free_nvlists(&zc);
1810 return (-1);
1811 }
1812
1813 zc.zc_cookie = flags;
1814 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1815 errno == ENOMEM) {
1816 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1817 zcmd_free_nvlists(&zc);
1818 return (-1);
1819 }
1820 }
1821 if (ret != 0)
1822 error = errno;
1823
1824 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1825
1826 zcmd_free_nvlists(&zc);
1827
1828 zpool_get_rewind_policy(config, &policy);
1829
1830 if (error) {
1831 char desc[1024];
1832
1833 /*
1834 * Dry-run failed, but we print out what success
1835 * looks like if we found a best txg
1836 */
1837 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1838 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1839 B_TRUE, nv);
1840 nvlist_free(nv);
1841 return (-1);
1842 }
1843
1844 if (newname == NULL)
1845 (void) snprintf(desc, sizeof (desc),
1846 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1847 thename);
1848 else
1849 (void) snprintf(desc, sizeof (desc),
1850 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1851 origname, thename);
1852
1853 switch (error) {
1854 case ENOTSUP:
1855 if (nv != NULL && nvlist_lookup_nvlist(nv,
1856 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1857 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1858 (void) printf(dgettext(TEXT_DOMAIN, "This "
1859 "pool uses the following feature(s) not "
1860 "supported by this system:\n"));
1861 zpool_print_unsup_feat(nv);
1862 if (nvlist_exists(nvinfo,
1863 ZPOOL_CONFIG_CAN_RDONLY)) {
1864 (void) printf(dgettext(TEXT_DOMAIN,
1865 "All unsupported features are only "
1866 "required for writing to the pool."
1867 "\nThe pool can be imported using "
1868 "'-o readonly=on'.\n"));
1869 }
1870 }
1871 /*
1872 * Unsupported version.
1873 */
1874 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1875 break;
1876
1877 case EINVAL:
1878 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1879 break;
1880
1881 case EROFS:
1882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1883 "one or more devices is read only"));
1884 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1885 break;
1886
1887 case ENXIO:
1888 if (nv && nvlist_lookup_nvlist(nv,
1889 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1890 nvlist_lookup_nvlist(nvinfo,
1891 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1892 (void) printf(dgettext(TEXT_DOMAIN,
1893 "The devices below are missing, use "
1894 "'-m' to import the pool anyway:\n"));
1895 print_vdev_tree(hdl, NULL, missing, 2);
1896 (void) printf("\n");
1897 }
1898 (void) zpool_standard_error(hdl, error, desc);
1899 break;
1900
1901 case EEXIST:
1902 (void) zpool_standard_error(hdl, error, desc);
1903 break;
1904
1905 case EBUSY:
1906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1907 "one or more devices are already in use\n"));
1908 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1909 break;
1910 case ENAMETOOLONG:
1911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1912 "new name of at least one dataset is longer than "
1913 "the maximum allowable length"));
1914 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1915 break;
1916 default:
1917 (void) zpool_standard_error(hdl, error, desc);
1918 zpool_explain_recover(hdl,
1919 newname ? origname : thename, -error, nv);
1920 break;
1921 }
1922
1923 nvlist_free(nv);
1924 ret = -1;
1925 } else {
1926 zpool_handle_t *zhp;
1927
1928 /*
1929 * This should never fail, but play it safe anyway.
1930 */
1931 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1932 ret = -1;
1933 else if (zhp != NULL)
1934 zpool_close(zhp);
1935 if (policy.zrp_request &
1936 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1937 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1938 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1939 }
1940 nvlist_free(nv);
1941 return (0);
1942 }
1943
1944 return (ret);
1945 }
1946
1947 /*
1948 * Scan the pool.
1949 */
1950 int
1951 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1952 {
1953 zfs_cmd_t zc = {"\0"};
1954 char msg[1024];
1955 libzfs_handle_t *hdl = zhp->zpool_hdl;
1956
1957 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1958 zc.zc_cookie = func;
1959
1960 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1961 (errno == ENOENT && func != POOL_SCAN_NONE))
1962 return (0);
1963
1964 if (func == POOL_SCAN_SCRUB) {
1965 (void) snprintf(msg, sizeof (msg),
1966 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1967 } else if (func == POOL_SCAN_NONE) {
1968 (void) snprintf(msg, sizeof (msg),
1969 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1970 zc.zc_name);
1971 } else {
1972 assert(!"unexpected result");
1973 }
1974
1975 if (errno == EBUSY) {
1976 nvlist_t *nvroot;
1977 pool_scan_stat_t *ps = NULL;
1978 uint_t psc;
1979
1980 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1981 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1982 (void) nvlist_lookup_uint64_array(nvroot,
1983 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1984 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1985 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1986 else
1987 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1988 } else if (errno == ENOENT) {
1989 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1990 } else {
1991 return (zpool_standard_error(hdl, errno, msg));
1992 }
1993 }
1994
1995 /*
1996 * Find a vdev that matches the search criteria specified. We use the
1997 * the nvpair name to determine how we should look for the device.
1998 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1999 * spare; but FALSE if its an INUSE spare.
2000 */
2001 static nvlist_t *
2002 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2003 boolean_t *l2cache, boolean_t *log)
2004 {
2005 uint_t c, children;
2006 nvlist_t **child;
2007 nvlist_t *ret;
2008 uint64_t is_log;
2009 char *srchkey;
2010 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2011
2012 /* Nothing to look for */
2013 if (search == NULL || pair == NULL)
2014 return (NULL);
2015
2016 /* Obtain the key we will use to search */
2017 srchkey = nvpair_name(pair);
2018
2019 switch (nvpair_type(pair)) {
2020 case DATA_TYPE_UINT64:
2021 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2022 uint64_t srchval, theguid;
2023
2024 verify(nvpair_value_uint64(pair, &srchval) == 0);
2025 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2026 &theguid) == 0);
2027 if (theguid == srchval)
2028 return (nv);
2029 }
2030 break;
2031
2032 case DATA_TYPE_STRING: {
2033 char *srchval, *val;
2034
2035 verify(nvpair_value_string(pair, &srchval) == 0);
2036 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2037 break;
2038
2039 /*
2040 * Search for the requested value. Special cases:
2041 *
2042 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2043 * "-part1", or "p1". The suffix is hidden from the user,
2044 * but included in the string, so this matches around it.
2045 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2046 * is used to check all possible expanded paths.
2047 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2048 *
2049 * Otherwise, all other searches are simple string compares.
2050 */
2051 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2052 uint64_t wholedisk = 0;
2053
2054 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2055 &wholedisk);
2056 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2057 return (nv);
2058
2059 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2060 char *type, *idx, *end, *p;
2061 uint64_t id, vdev_id;
2062
2063 /*
2064 * Determine our vdev type, keeping in mind
2065 * that the srchval is composed of a type and
2066 * vdev id pair (i.e. mirror-4).
2067 */
2068 if ((type = strdup(srchval)) == NULL)
2069 return (NULL);
2070
2071 if ((p = strrchr(type, '-')) == NULL) {
2072 free(type);
2073 break;
2074 }
2075 idx = p + 1;
2076 *p = '\0';
2077
2078 /*
2079 * If the types don't match then keep looking.
2080 */
2081 if (strncmp(val, type, strlen(val)) != 0) {
2082 free(type);
2083 break;
2084 }
2085
2086 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2087 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2088 strncmp(type, VDEV_TYPE_MIRROR,
2089 strlen(VDEV_TYPE_MIRROR)) == 0);
2090 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2091 &id) == 0);
2092
2093 errno = 0;
2094 vdev_id = strtoull(idx, &end, 10);
2095
2096 free(type);
2097 if (errno != 0)
2098 return (NULL);
2099
2100 /*
2101 * Now verify that we have the correct vdev id.
2102 */
2103 if (vdev_id == id)
2104 return (nv);
2105 }
2106
2107 /*
2108 * Common case
2109 */
2110 if (strcmp(srchval, val) == 0)
2111 return (nv);
2112 break;
2113 }
2114
2115 default:
2116 break;
2117 }
2118
2119 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2120 &child, &children) != 0)
2121 return (NULL);
2122
2123 for (c = 0; c < children; c++) {
2124 if ((ret = vdev_to_nvlist_iter(child[c], search,
2125 avail_spare, l2cache, NULL)) != NULL) {
2126 /*
2127 * The 'is_log' value is only set for the toplevel
2128 * vdev, not the leaf vdevs. So we always lookup the
2129 * log device from the root of the vdev tree (where
2130 * 'log' is non-NULL).
2131 */
2132 if (log != NULL &&
2133 nvlist_lookup_uint64(child[c],
2134 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2135 is_log) {
2136 *log = B_TRUE;
2137 }
2138 return (ret);
2139 }
2140 }
2141
2142 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2143 &child, &children) == 0) {
2144 for (c = 0; c < children; c++) {
2145 if ((ret = vdev_to_nvlist_iter(child[c], search,
2146 avail_spare, l2cache, NULL)) != NULL) {
2147 *avail_spare = B_TRUE;
2148 return (ret);
2149 }
2150 }
2151 }
2152
2153 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2154 &child, &children) == 0) {
2155 for (c = 0; c < children; c++) {
2156 if ((ret = vdev_to_nvlist_iter(child[c], search,
2157 avail_spare, l2cache, NULL)) != NULL) {
2158 *l2cache = B_TRUE;
2159 return (ret);
2160 }
2161 }
2162 }
2163
2164 return (NULL);
2165 }
2166
2167 /*
2168 * Given a physical path (minus the "/devices" prefix), find the
2169 * associated vdev.
2170 */
2171 nvlist_t *
2172 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2173 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2174 {
2175 nvlist_t *search, *nvroot, *ret;
2176
2177 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2178 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2179
2180 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2181 &nvroot) == 0);
2182
2183 *avail_spare = B_FALSE;
2184 *l2cache = B_FALSE;
2185 if (log != NULL)
2186 *log = B_FALSE;
2187 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2188 nvlist_free(search);
2189
2190 return (ret);
2191 }
2192
2193 /*
2194 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2195 */
2196 boolean_t
2197 zpool_vdev_is_interior(const char *name)
2198 {
2199 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2200 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2201 return (B_TRUE);
2202 return (B_FALSE);
2203 }
2204
2205 nvlist_t *
2206 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2207 boolean_t *l2cache, boolean_t *log)
2208 {
2209 char *end;
2210 nvlist_t *nvroot, *search, *ret;
2211 uint64_t guid;
2212
2213 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2214
2215 guid = strtoull(path, &end, 0);
2216 if (guid != 0 && *end == '\0') {
2217 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2218 } else if (zpool_vdev_is_interior(path)) {
2219 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2220 } else {
2221 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2222 }
2223
2224 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2225 &nvroot) == 0);
2226
2227 *avail_spare = B_FALSE;
2228 *l2cache = B_FALSE;
2229 if (log != NULL)
2230 *log = B_FALSE;
2231 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2232 nvlist_free(search);
2233
2234 return (ret);
2235 }
2236
2237 static int
2238 vdev_online(nvlist_t *nv)
2239 {
2240 uint64_t ival;
2241
2242 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2243 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2244 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2245 return (0);
2246
2247 return (1);
2248 }
2249
2250 /*
2251 * Helper function for zpool_get_physpaths().
2252 */
2253 static int
2254 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2255 size_t *bytes_written)
2256 {
2257 size_t bytes_left, pos, rsz;
2258 char *tmppath;
2259 const char *format;
2260
2261 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2262 &tmppath) != 0)
2263 return (EZFS_NODEVICE);
2264
2265 pos = *bytes_written;
2266 bytes_left = physpath_size - pos;
2267 format = (pos == 0) ? "%s" : " %s";
2268
2269 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2270 *bytes_written += rsz;
2271
2272 if (rsz >= bytes_left) {
2273 /* if physpath was not copied properly, clear it */
2274 if (bytes_left != 0) {
2275 physpath[pos] = 0;
2276 }
2277 return (EZFS_NOSPC);
2278 }
2279 return (0);
2280 }
2281
2282 static int
2283 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2284 size_t *rsz, boolean_t is_spare)
2285 {
2286 char *type;
2287 int ret;
2288
2289 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2290 return (EZFS_INVALCONFIG);
2291
2292 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2293 /*
2294 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2295 * For a spare vdev, we only want to boot from the active
2296 * spare device.
2297 */
2298 if (is_spare) {
2299 uint64_t spare = 0;
2300 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2301 &spare);
2302 if (!spare)
2303 return (EZFS_INVALCONFIG);
2304 }
2305
2306 if (vdev_online(nv)) {
2307 if ((ret = vdev_get_one_physpath(nv, physpath,
2308 phypath_size, rsz)) != 0)
2309 return (ret);
2310 }
2311 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2312 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2313 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2314 nvlist_t **child;
2315 uint_t count;
2316 int i, ret;
2317
2318 if (nvlist_lookup_nvlist_array(nv,
2319 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2320 return (EZFS_INVALCONFIG);
2321
2322 for (i = 0; i < count; i++) {
2323 ret = vdev_get_physpaths(child[i], physpath,
2324 phypath_size, rsz, is_spare);
2325 if (ret == EZFS_NOSPC)
2326 return (ret);
2327 }
2328 }
2329
2330 return (EZFS_POOL_INVALARG);
2331 }
2332
2333 /*
2334 * Get phys_path for a root pool config.
2335 * Return 0 on success; non-zero on failure.
2336 */
2337 static int
2338 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2339 {
2340 size_t rsz;
2341 nvlist_t *vdev_root;
2342 nvlist_t **child;
2343 uint_t count;
2344 char *type;
2345
2346 rsz = 0;
2347
2348 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2349 &vdev_root) != 0)
2350 return (EZFS_INVALCONFIG);
2351
2352 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2353 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2354 &child, &count) != 0)
2355 return (EZFS_INVALCONFIG);
2356
2357 #if defined(__sun__) || defined(__sun)
2358 /*
2359 * root pool can not have EFI labeled disks and can only have
2360 * a single top-level vdev.
2361 */
2362 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2363 pool_uses_efi(vdev_root))
2364 return (EZFS_POOL_INVALARG);
2365 #endif
2366
2367 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2368 B_FALSE);
2369
2370 /* No online devices */
2371 if (rsz == 0)
2372 return (EZFS_NODEVICE);
2373
2374 return (0);
2375 }
2376
2377 /*
2378 * Get phys_path for a root pool
2379 * Return 0 on success; non-zero on failure.
2380 */
2381 int
2382 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2383 {
2384 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2385 phypath_size));
2386 }
2387
2388 /*
2389 * If the device has being dynamically expanded then we need to relabel
2390 * the disk to use the new unallocated space.
2391 */
2392 static int
2393 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2394 {
2395 int fd, error;
2396
2397 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2398 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2399 "relabel '%s': unable to open device: %d"), path, errno);
2400 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2401 }
2402
2403 /*
2404 * It's possible that we might encounter an error if the device
2405 * does not have any unallocated space left. If so, we simply
2406 * ignore that error and continue on.
2407 *
2408 * Also, we don't call efi_rescan() - that would just return EBUSY.
2409 * The module will do it for us in vdev_disk_open().
2410 */
2411 error = efi_use_whole_disk(fd);
2412 (void) close(fd);
2413 if (error && error != VT_ENOSPC) {
2414 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2415 "relabel '%s': unable to read disk capacity"), path);
2416 return (zfs_error(hdl, EZFS_NOCAP, msg));
2417 }
2418 return (0);
2419 }
2420
2421 /*
2422 * Bring the specified vdev online. The 'flags' parameter is a set of the
2423 * ZFS_ONLINE_* flags.
2424 */
2425 int
2426 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2427 vdev_state_t *newstate)
2428 {
2429 zfs_cmd_t zc = {"\0"};
2430 char msg[1024];
2431 nvlist_t *tgt;
2432 boolean_t avail_spare, l2cache, islog;
2433 libzfs_handle_t *hdl = zhp->zpool_hdl;
2434 int error;
2435
2436 if (flags & ZFS_ONLINE_EXPAND) {
2437 (void) snprintf(msg, sizeof (msg),
2438 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2439 } else {
2440 (void) snprintf(msg, sizeof (msg),
2441 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2442 }
2443
2444 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2445 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2446 &islog)) == NULL)
2447 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2448
2449 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2450
2451 if (avail_spare)
2452 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2453
2454 if (flags & ZFS_ONLINE_EXPAND ||
2455 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2456 uint64_t wholedisk = 0;
2457
2458 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2459 &wholedisk);
2460
2461 /*
2462 * XXX - L2ARC 1.0 devices can't support expansion.
2463 */
2464 if (l2cache) {
2465 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2466 "cannot expand cache devices"));
2467 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2468 }
2469
2470 if (wholedisk) {
2471 const char *fullpath = path;
2472 char buf[MAXPATHLEN];
2473
2474 if (path[0] != '/') {
2475 error = zfs_resolve_shortname(path, buf,
2476 sizeof (buf));
2477 if (error != 0)
2478 return (zfs_error(hdl, EZFS_NODEVICE,
2479 msg));
2480
2481 fullpath = buf;
2482 }
2483
2484 error = zpool_relabel_disk(hdl, fullpath, msg);
2485 if (error != 0)
2486 return (error);
2487 }
2488 }
2489
2490 zc.zc_cookie = VDEV_STATE_ONLINE;
2491 zc.zc_obj = flags;
2492
2493 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2494 if (errno == EINVAL) {
2495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2496 "from this pool into a new one. Use '%s' "
2497 "instead"), "zpool detach");
2498 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2499 }
2500 return (zpool_standard_error(hdl, errno, msg));
2501 }
2502
2503 *newstate = zc.zc_cookie;
2504 return (0);
2505 }
2506
2507 /*
2508 * Take the specified vdev offline
2509 */
2510 int
2511 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2512 {
2513 zfs_cmd_t zc = {"\0"};
2514 char msg[1024];
2515 nvlist_t *tgt;
2516 boolean_t avail_spare, l2cache;
2517 libzfs_handle_t *hdl = zhp->zpool_hdl;
2518
2519 (void) snprintf(msg, sizeof (msg),
2520 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2521
2522 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2523 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2524 NULL)) == NULL)
2525 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2526
2527 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2528
2529 if (avail_spare)
2530 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2531
2532 zc.zc_cookie = VDEV_STATE_OFFLINE;
2533 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2534
2535 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2536 return (0);
2537
2538 switch (errno) {
2539 case EBUSY:
2540
2541 /*
2542 * There are no other replicas of this device.
2543 */
2544 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2545
2546 case EEXIST:
2547 /*
2548 * The log device has unplayed logs
2549 */
2550 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2551
2552 default:
2553 return (zpool_standard_error(hdl, errno, msg));
2554 }
2555 }
2556
2557 /*
2558 * Mark the given vdev faulted.
2559 */
2560 int
2561 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2562 {
2563 zfs_cmd_t zc = {"\0"};
2564 char msg[1024];
2565 libzfs_handle_t *hdl = zhp->zpool_hdl;
2566
2567 (void) snprintf(msg, sizeof (msg),
2568 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2569
2570 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2571 zc.zc_guid = guid;
2572 zc.zc_cookie = VDEV_STATE_FAULTED;
2573 zc.zc_obj = aux;
2574
2575 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2576 return (0);
2577
2578 switch (errno) {
2579 case EBUSY:
2580
2581 /*
2582 * There are no other replicas of this device.
2583 */
2584 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2585
2586 default:
2587 return (zpool_standard_error(hdl, errno, msg));
2588 }
2589
2590 }
2591
2592 /*
2593 * Mark the given vdev degraded.
2594 */
2595 int
2596 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2597 {
2598 zfs_cmd_t zc = {"\0"};
2599 char msg[1024];
2600 libzfs_handle_t *hdl = zhp->zpool_hdl;
2601
2602 (void) snprintf(msg, sizeof (msg),
2603 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2604
2605 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2606 zc.zc_guid = guid;
2607 zc.zc_cookie = VDEV_STATE_DEGRADED;
2608 zc.zc_obj = aux;
2609
2610 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2611 return (0);
2612
2613 return (zpool_standard_error(hdl, errno, msg));
2614 }
2615
2616 /*
2617 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2618 * a hot spare.
2619 */
2620 static boolean_t
2621 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2622 {
2623 nvlist_t **child;
2624 uint_t c, children;
2625 char *type;
2626
2627 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2628 &children) == 0) {
2629 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2630 &type) == 0);
2631
2632 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2633 children == 2 && child[which] == tgt)
2634 return (B_TRUE);
2635
2636 for (c = 0; c < children; c++)
2637 if (is_replacing_spare(child[c], tgt, which))
2638 return (B_TRUE);
2639 }
2640
2641 return (B_FALSE);
2642 }
2643
2644 /*
2645 * Attach new_disk (fully described by nvroot) to old_disk.
2646 * If 'replacing' is specified, the new disk will replace the old one.
2647 */
2648 int
2649 zpool_vdev_attach(zpool_handle_t *zhp,
2650 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2651 {
2652 zfs_cmd_t zc = {"\0"};
2653 char msg[1024];
2654 int ret;
2655 nvlist_t *tgt;
2656 boolean_t avail_spare, l2cache, islog;
2657 uint64_t val;
2658 char *newname;
2659 nvlist_t **child;
2660 uint_t children;
2661 nvlist_t *config_root;
2662 libzfs_handle_t *hdl = zhp->zpool_hdl;
2663 boolean_t rootpool = zpool_is_bootable(zhp);
2664
2665 if (replacing)
2666 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2667 "cannot replace %s with %s"), old_disk, new_disk);
2668 else
2669 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2670 "cannot attach %s to %s"), new_disk, old_disk);
2671
2672 #if defined(__sun__) || defined(__sun)
2673 /*
2674 * If this is a root pool, make sure that we're not attaching an
2675 * EFI labeled device.
2676 */
2677 if (rootpool && pool_uses_efi(nvroot)) {
2678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2679 "EFI labeled devices are not supported on root pools."));
2680 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2681 }
2682 #endif
2683
2684 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2685 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2686 &islog)) == 0)
2687 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2688
2689 if (avail_spare)
2690 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2691
2692 if (l2cache)
2693 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2694
2695 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2696 zc.zc_cookie = replacing;
2697
2698 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2699 &child, &children) != 0 || children != 1) {
2700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2701 "new device must be a single disk"));
2702 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2703 }
2704
2705 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2706 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2707
2708 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2709 return (-1);
2710
2711 /*
2712 * If the target is a hot spare that has been swapped in, we can only
2713 * replace it with another hot spare.
2714 */
2715 if (replacing &&
2716 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2717 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2718 NULL) == NULL || !avail_spare) &&
2719 is_replacing_spare(config_root, tgt, 1)) {
2720 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2721 "can only be replaced by another hot spare"));
2722 free(newname);
2723 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2724 }
2725
2726 free(newname);
2727
2728 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2729 return (-1);
2730
2731 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2732
2733 zcmd_free_nvlists(&zc);
2734
2735 if (ret == 0) {
2736 if (rootpool) {
2737 /*
2738 * XXX need a better way to prevent user from
2739 * booting up a half-baked vdev.
2740 */
2741 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2742 "sure to wait until resilver is done "
2743 "before rebooting.\n"));
2744 }
2745 return (0);
2746 }
2747
2748 switch (errno) {
2749 case ENOTSUP:
2750 /*
2751 * Can't attach to or replace this type of vdev.
2752 */
2753 if (replacing) {
2754 uint64_t version = zpool_get_prop_int(zhp,
2755 ZPOOL_PROP_VERSION, NULL);
2756
2757 if (islog)
2758 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2759 "cannot replace a log with a spare"));
2760 else if (version >= SPA_VERSION_MULTI_REPLACE)
2761 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2762 "already in replacing/spare config; wait "
2763 "for completion or use 'zpool detach'"));
2764 else
2765 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2766 "cannot replace a replacing device"));
2767 } else {
2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2769 "can only attach to mirrors and top-level "
2770 "disks"));
2771 }
2772 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2773 break;
2774
2775 case EINVAL:
2776 /*
2777 * The new device must be a single disk.
2778 */
2779 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2780 "new device must be a single disk"));
2781 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2782 break;
2783
2784 case EBUSY:
2785 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2786 new_disk);
2787 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2788 break;
2789
2790 case EOVERFLOW:
2791 /*
2792 * The new device is too small.
2793 */
2794 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2795 "device is too small"));
2796 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2797 break;
2798
2799 case EDOM:
2800 /*
2801 * The new device has a different optimal sector size.
2802 */
2803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2804 "new device has a different optimal sector size; use the "
2805 "option '-o ashift=N' to override the optimal size"));
2806 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2807 break;
2808
2809 case ENAMETOOLONG:
2810 /*
2811 * The resulting top-level vdev spec won't fit in the label.
2812 */
2813 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2814 break;
2815
2816 default:
2817 (void) zpool_standard_error(hdl, errno, msg);
2818 }
2819
2820 return (-1);
2821 }
2822
2823 /*
2824 * Detach the specified device.
2825 */
2826 int
2827 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2828 {
2829 zfs_cmd_t zc = {"\0"};
2830 char msg[1024];
2831 nvlist_t *tgt;
2832 boolean_t avail_spare, l2cache;
2833 libzfs_handle_t *hdl = zhp->zpool_hdl;
2834
2835 (void) snprintf(msg, sizeof (msg),
2836 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2837
2838 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2839 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2840 NULL)) == 0)
2841 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2842
2843 if (avail_spare)
2844 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2845
2846 if (l2cache)
2847 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2848
2849 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2850
2851 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2852 return (0);
2853
2854 switch (errno) {
2855
2856 case ENOTSUP:
2857 /*
2858 * Can't detach from this type of vdev.
2859 */
2860 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2861 "applicable to mirror and replacing vdevs"));
2862 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2863 break;
2864
2865 case EBUSY:
2866 /*
2867 * There are no other replicas of this device.
2868 */
2869 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2870 break;
2871
2872 default:
2873 (void) zpool_standard_error(hdl, errno, msg);
2874 }
2875
2876 return (-1);
2877 }
2878
2879 /*
2880 * Find a mirror vdev in the source nvlist.
2881 *
2882 * The mchild array contains a list of disks in one of the top-level mirrors
2883 * of the source pool. The schild array contains a list of disks that the
2884 * user specified on the command line. We loop over the mchild array to
2885 * see if any entry in the schild array matches.
2886 *
2887 * If a disk in the mchild array is found in the schild array, we return
2888 * the index of that entry. Otherwise we return -1.
2889 */
2890 static int
2891 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2892 nvlist_t **schild, uint_t schildren)
2893 {
2894 uint_t mc;
2895
2896 for (mc = 0; mc < mchildren; mc++) {
2897 uint_t sc;
2898 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2899 mchild[mc], 0);
2900
2901 for (sc = 0; sc < schildren; sc++) {
2902 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2903 schild[sc], 0);
2904 boolean_t result = (strcmp(mpath, spath) == 0);
2905
2906 free(spath);
2907 if (result) {
2908 free(mpath);
2909 return (mc);
2910 }
2911 }
2912
2913 free(mpath);
2914 }
2915
2916 return (-1);
2917 }
2918
2919 /*
2920 * Split a mirror pool. If newroot points to null, then a new nvlist
2921 * is generated and it is the responsibility of the caller to free it.
2922 */
2923 int
2924 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2925 nvlist_t *props, splitflags_t flags)
2926 {
2927 zfs_cmd_t zc = {"\0"};
2928 char msg[1024];
2929 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2930 nvlist_t **varray = NULL, *zc_props = NULL;
2931 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2932 libzfs_handle_t *hdl = zhp->zpool_hdl;
2933 uint64_t vers;
2934 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2935 int retval = 0;
2936
2937 (void) snprintf(msg, sizeof (msg),
2938 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2939
2940 if (!zpool_name_valid(hdl, B_FALSE, newname))
2941 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2942
2943 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2944 (void) fprintf(stderr, gettext("Internal error: unable to "
2945 "retrieve pool configuration\n"));
2946 return (-1);
2947 }
2948
2949 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2950 == 0);
2951 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2952
2953 if (props) {
2954 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2955 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2956 props, vers, flags, msg)) == NULL)
2957 return (-1);
2958 }
2959
2960 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2961 &children) != 0) {
2962 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2963 "Source pool is missing vdev tree"));
2964 nvlist_free(zc_props);
2965 return (-1);
2966 }
2967
2968 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2969 vcount = 0;
2970
2971 if (*newroot == NULL ||
2972 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2973 &newchild, &newchildren) != 0)
2974 newchildren = 0;
2975
2976 for (c = 0; c < children; c++) {
2977 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2978 char *type;
2979 nvlist_t **mchild, *vdev;
2980 uint_t mchildren;
2981 int entry;
2982
2983 /*
2984 * Unlike cache & spares, slogs are stored in the
2985 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2986 */
2987 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2988 &is_log);
2989 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2990 &is_hole);
2991 if (is_log || is_hole) {
2992 /*
2993 * Create a hole vdev and put it in the config.
2994 */
2995 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2996 goto out;
2997 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2998 VDEV_TYPE_HOLE) != 0)
2999 goto out;
3000 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3001 1) != 0)
3002 goto out;
3003 if (lastlog == 0)
3004 lastlog = vcount;
3005 varray[vcount++] = vdev;
3006 continue;
3007 }
3008 lastlog = 0;
3009 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3010 == 0);
3011 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3012 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3013 "Source pool must be composed only of mirrors\n"));
3014 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3015 goto out;
3016 }
3017
3018 verify(nvlist_lookup_nvlist_array(child[c],
3019 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3020
3021 /* find or add an entry for this top-level vdev */
3022 if (newchildren > 0 &&
3023 (entry = find_vdev_entry(zhp, mchild, mchildren,
3024 newchild, newchildren)) >= 0) {
3025 /* We found a disk that the user specified. */
3026 vdev = mchild[entry];
3027 ++found;
3028 } else {
3029 /* User didn't specify a disk for this vdev. */
3030 vdev = mchild[mchildren - 1];
3031 }
3032
3033 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3034 goto out;
3035 }
3036
3037 /* did we find every disk the user specified? */
3038 if (found != newchildren) {
3039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3040 "include at most one disk from each mirror"));
3041 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3042 goto out;
3043 }
3044
3045 /* Prepare the nvlist for populating. */
3046 if (*newroot == NULL) {
3047 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3048 goto out;
3049 freelist = B_TRUE;
3050 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3051 VDEV_TYPE_ROOT) != 0)
3052 goto out;
3053 } else {
3054 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3055 }
3056
3057 /* Add all the children we found */
3058 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3059 lastlog == 0 ? vcount : lastlog) != 0)
3060 goto out;
3061
3062 /*
3063 * If we're just doing a dry run, exit now with success.
3064 */
3065 if (flags.dryrun) {
3066 memory_err = B_FALSE;
3067 freelist = B_FALSE;
3068 goto out;
3069 }
3070
3071 /* now build up the config list & call the ioctl */
3072 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3073 goto out;
3074
3075 if (nvlist_add_nvlist(newconfig,
3076 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3077 nvlist_add_string(newconfig,
3078 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3079 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3080 goto out;
3081
3082 /*
3083 * The new pool is automatically part of the namespace unless we
3084 * explicitly export it.
3085 */
3086 if (!flags.import)
3087 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3088 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3089 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3090 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3091 goto out;
3092 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3093 goto out;
3094
3095 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3096 retval = zpool_standard_error(hdl, errno, msg);
3097 goto out;
3098 }
3099
3100 freelist = B_FALSE;
3101 memory_err = B_FALSE;
3102
3103 out:
3104 if (varray != NULL) {
3105 int v;
3106
3107 for (v = 0; v < vcount; v++)
3108 nvlist_free(varray[v]);
3109 free(varray);
3110 }
3111 zcmd_free_nvlists(&zc);
3112 nvlist_free(zc_props);
3113 nvlist_free(newconfig);
3114 if (freelist) {
3115 nvlist_free(*newroot);
3116 *newroot = NULL;
3117 }
3118
3119 if (retval != 0)
3120 return (retval);
3121
3122 if (memory_err)
3123 return (no_memory(hdl));
3124
3125 return (0);
3126 }
3127
3128 /*
3129 * Remove the given device. Currently, this is supported only for hot spares,
3130 * cache, and log devices.
3131 */
3132 int
3133 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3134 {
3135 zfs_cmd_t zc = {"\0"};
3136 char msg[1024];
3137 nvlist_t *tgt;
3138 boolean_t avail_spare, l2cache, islog;
3139 libzfs_handle_t *hdl = zhp->zpool_hdl;
3140 uint64_t version;
3141
3142 (void) snprintf(msg, sizeof (msg),
3143 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3144
3145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3146 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3147 &islog)) == 0)
3148 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3149 /*
3150 * XXX - this should just go away.
3151 */
3152 if (!avail_spare && !l2cache && !islog) {
3153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3154 "only inactive hot spares, cache, "
3155 "or log devices can be removed"));
3156 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3157 }
3158
3159 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3160 if (islog && version < SPA_VERSION_HOLES) {
3161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3162 "pool must be upgrade to support log removal"));
3163 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3164 }
3165
3166 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3167
3168 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3169 return (0);
3170
3171 return (zpool_standard_error(hdl, errno, msg));
3172 }
3173
3174 /*
3175 * Clear the errors for the pool, or the particular device if specified.
3176 */
3177 int
3178 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3179 {
3180 zfs_cmd_t zc = {"\0"};
3181 char msg[1024];
3182 nvlist_t *tgt;
3183 zpool_rewind_policy_t policy;
3184 boolean_t avail_spare, l2cache;
3185 libzfs_handle_t *hdl = zhp->zpool_hdl;
3186 nvlist_t *nvi = NULL;
3187 int error;
3188
3189 if (path)
3190 (void) snprintf(msg, sizeof (msg),
3191 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3192 path);
3193 else
3194 (void) snprintf(msg, sizeof (msg),
3195 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3196 zhp->zpool_name);
3197
3198 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3199 if (path) {
3200 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3201 &l2cache, NULL)) == 0)
3202 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3203
3204 /*
3205 * Don't allow error clearing for hot spares. Do allow
3206 * error clearing for l2cache devices.
3207 */
3208 if (avail_spare)
3209 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3210
3211 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3212 &zc.zc_guid) == 0);
3213 }
3214
3215 zpool_get_rewind_policy(rewindnvl, &policy);
3216 zc.zc_cookie = policy.zrp_request;
3217
3218 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3219 return (-1);
3220
3221 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3222 return (-1);
3223
3224 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3225 errno == ENOMEM) {
3226 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3227 zcmd_free_nvlists(&zc);
3228 return (-1);
3229 }
3230 }
3231
3232 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3233 errno != EPERM && errno != EACCES)) {
3234 if (policy.zrp_request &
3235 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3236 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3237 zpool_rewind_exclaim(hdl, zc.zc_name,
3238 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3239 nvi);
3240 nvlist_free(nvi);
3241 }
3242 zcmd_free_nvlists(&zc);
3243 return (0);
3244 }
3245
3246 zcmd_free_nvlists(&zc);
3247 return (zpool_standard_error(hdl, errno, msg));
3248 }
3249
3250 /*
3251 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3252 */
3253 int
3254 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3255 {
3256 zfs_cmd_t zc = {"\0"};
3257 char msg[1024];
3258 libzfs_handle_t *hdl = zhp->zpool_hdl;
3259
3260 (void) snprintf(msg, sizeof (msg),
3261 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3262 (u_longlong_t)guid);
3263
3264 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3265 zc.zc_guid = guid;
3266 zc.zc_cookie = ZPOOL_NO_REWIND;
3267
3268 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3269 return (0);
3270
3271 return (zpool_standard_error(hdl, errno, msg));
3272 }
3273
3274 /*
3275 * Change the GUID for a pool.
3276 */
3277 int
3278 zpool_reguid(zpool_handle_t *zhp)
3279 {
3280 char msg[1024];
3281 libzfs_handle_t *hdl = zhp->zpool_hdl;
3282 zfs_cmd_t zc = {"\0"};
3283
3284 (void) snprintf(msg, sizeof (msg),
3285 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3286
3287 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3288 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3289 return (0);
3290
3291 return (zpool_standard_error(hdl, errno, msg));
3292 }
3293
3294 /*
3295 * Reopen the pool.
3296 */
3297 int
3298 zpool_reopen(zpool_handle_t *zhp)
3299 {
3300 zfs_cmd_t zc = {"\0"};
3301 char msg[1024];
3302 libzfs_handle_t *hdl = zhp->zpool_hdl;
3303
3304 (void) snprintf(msg, sizeof (msg),
3305 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3306 zhp->zpool_name);
3307
3308 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3309 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3310 return (0);
3311 return (zpool_standard_error(hdl, errno, msg));
3312 }
3313
3314 #if defined(__sun__) || defined(__sun)
3315 /*
3316 * Convert from a devid string to a path.
3317 */
3318 static char *
3319 devid_to_path(char *devid_str)
3320 {
3321 ddi_devid_t devid;
3322 char *minor;
3323 char *path;
3324 devid_nmlist_t *list = NULL;
3325 int ret;
3326
3327 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3328 return (NULL);
3329
3330 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3331
3332 devid_str_free(minor);
3333 devid_free(devid);
3334
3335 if (ret != 0)
3336 return (NULL);
3337
3338 /*
3339 * In a case the strdup() fails, we will just return NULL below.
3340 */
3341 path = strdup(list[0].devname);
3342
3343 devid_free_nmlist(list);
3344
3345 return (path);
3346 }
3347
3348 /*
3349 * Convert from a path to a devid string.
3350 */
3351 static char *
3352 path_to_devid(const char *path)
3353 {
3354 int fd;
3355 ddi_devid_t devid;
3356 char *minor, *ret;
3357
3358 if ((fd = open(path, O_RDONLY)) < 0)
3359 return (NULL);
3360
3361 minor = NULL;
3362 ret = NULL;
3363 if (devid_get(fd, &devid) == 0) {
3364 if (devid_get_minor_name(fd, &minor) == 0)
3365 ret = devid_str_encode(devid, minor);
3366 if (minor != NULL)
3367 devid_str_free(minor);
3368 devid_free(devid);
3369 }
3370 (void) close(fd);
3371
3372 return (ret);
3373 }
3374
3375 /*
3376 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3377 * ignore any failure here, since a common case is for an unprivileged user to
3378 * type 'zpool status', and we'll display the correct information anyway.
3379 */
3380 static void
3381 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3382 {
3383 zfs_cmd_t zc = {"\0"};
3384
3385 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3386 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3387 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3388 &zc.zc_guid) == 0);
3389
3390 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3391 }
3392 #endif /* sun */
3393
3394 /*
3395 * Remove partition suffix from a vdev path. Partition suffixes may take three
3396 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3397 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3398 * third case only occurs when preceded by a string matching the regular
3399 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3400 *
3401 * caller must free the returned string
3402 */
3403 char *
3404 zfs_strip_partition(libzfs_handle_t *hdl, char *path)
3405 {
3406 char *tmp = zfs_strdup(hdl, path);
3407 char *part = NULL, *d = NULL;
3408
3409 if ((part = strstr(tmp, "-part")) && part != tmp) {
3410 d = part + 5;
3411 } else if ((part = strrchr(tmp, 'p')) &&
3412 part > tmp + 1 && isdigit(*(part-1))) {
3413 d = part + 1;
3414 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3415 tmp[1] == 'd') {
3416 for (d = &tmp[2]; isalpha(*d); part = ++d);
3417 } else if (strncmp("xvd", tmp, 3) == 0) {
3418 for (d = &tmp[3]; isalpha(*d); part = ++d);
3419 }
3420 if (part && d && *d != '\0') {
3421 for (; isdigit(*d); d++);
3422 if (*d == '\0')
3423 *part = '\0';
3424 }
3425 return (tmp);
3426 }
3427
3428 #define PATH_BUF_LEN 64
3429
3430 /*
3431 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3432 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3433 * We also check if this is a whole disk, in which case we strip off the
3434 * trailing 's0' slice name.
3435 *
3436 * This routine is also responsible for identifying when disks have been
3437 * reconfigured in a new location. The kernel will have opened the device by
3438 * devid, but the path will still refer to the old location. To catch this, we
3439 * first do a path -> devid translation (which is fast for the common case). If
3440 * the devid matches, we're done. If not, we do a reverse devid -> path
3441 * translation and issue the appropriate ioctl() to update the path of the vdev.
3442 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3443 * of these checks.
3444 */
3445 char *
3446 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3447 int name_flags)
3448 {
3449 char *path, *type, *env;
3450 uint64_t value;
3451 char buf[PATH_BUF_LEN];
3452 char tmpbuf[PATH_BUF_LEN];
3453
3454 env = getenv("ZPOOL_VDEV_NAME_PATH");
3455 if (env && (strtoul(env, NULL, 0) > 0 ||
3456 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3457 name_flags |= VDEV_NAME_PATH;
3458
3459 env = getenv("ZPOOL_VDEV_NAME_GUID");
3460 if (env && (strtoul(env, NULL, 0) > 0 ||
3461 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3462 name_flags |= VDEV_NAME_GUID;
3463
3464 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3465 if (env && (strtoul(env, NULL, 0) > 0 ||
3466 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3467 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3468
3469 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3470 name_flags & VDEV_NAME_GUID) {
3471 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3472 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3473 path = buf;
3474 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3475 #if defined(__sun__) || defined(__sun)
3476 /*
3477 * Live VDEV path updates to a kernel VDEV during a
3478 * zpool_vdev_name lookup are not supported on Linux.
3479 */
3480 char *devid;
3481 vdev_stat_t *vs;
3482 uint_t vsc;
3483
3484 /*
3485 * If the device is dead (faulted, offline, etc) then don't
3486 * bother opening it. Otherwise we may be forcing the user to
3487 * open a misbehaving device, which can have undesirable
3488 * effects.
3489 */
3490 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3491 (uint64_t **)&vs, &vsc) != 0 ||
3492 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3493 zhp != NULL &&
3494 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3495 /*
3496 * Determine if the current path is correct.
3497 */
3498 char *newdevid = path_to_devid(path);
3499
3500 if (newdevid == NULL ||
3501 strcmp(devid, newdevid) != 0) {
3502 char *newpath;
3503
3504 if ((newpath = devid_to_path(devid)) != NULL) {
3505 /*
3506 * Update the path appropriately.
3507 */
3508 set_path(zhp, nv, newpath);
3509 if (nvlist_add_string(nv,
3510 ZPOOL_CONFIG_PATH, newpath) == 0)
3511 verify(nvlist_lookup_string(nv,
3512 ZPOOL_CONFIG_PATH,
3513 &path) == 0);
3514 free(newpath);
3515 }
3516 }
3517
3518 if (newdevid)
3519 devid_str_free(newdevid);
3520 }
3521 #endif /* sun */
3522
3523 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3524 char *rp = realpath(path, NULL);
3525 if (rp) {
3526 strlcpy(buf, rp, sizeof (buf));
3527 path = buf;
3528 free(rp);
3529 }
3530 }
3531
3532 /*
3533 * For a block device only use the name.
3534 */
3535 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3536 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3537 !(name_flags & VDEV_NAME_PATH)) {
3538 path = strrchr(path, '/');
3539 path++;
3540 }
3541
3542 /*
3543 * Remove the partition from the path it this is a whole disk.
3544 */
3545 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3546 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3547 return (zfs_strip_partition(hdl, path));
3548 }
3549 } else {
3550 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3551
3552 /*
3553 * If it's a raidz device, we need to stick in the parity level.
3554 */
3555 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3556 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3557 &value) == 0);
3558 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3559 (u_longlong_t)value);
3560 path = buf;
3561 }
3562
3563 /*
3564 * We identify each top-level vdev by using a <type-id>
3565 * naming convention.
3566 */
3567 if (name_flags & VDEV_NAME_TYPE_ID) {
3568 uint64_t id;
3569 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3570 &id) == 0);
3571 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3572 path, (u_longlong_t)id);
3573 path = tmpbuf;
3574 }
3575 }
3576
3577 return (zfs_strdup(hdl, path));
3578 }
3579
3580 static int
3581 zbookmark_mem_compare(const void *a, const void *b)
3582 {
3583 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3584 }
3585
3586 /*
3587 * Retrieve the persistent error log, uniquify the members, and return to the
3588 * caller.
3589 */
3590 int
3591 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3592 {
3593 zfs_cmd_t zc = {"\0"};
3594 uint64_t count;
3595 zbookmark_phys_t *zb = NULL;
3596 int i;
3597
3598 /*
3599 * Retrieve the raw error list from the kernel. If the number of errors
3600 * has increased, allocate more space and continue until we get the
3601 * entire list.
3602 */
3603 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3604 &count) == 0);
3605 if (count == 0)
3606 return (0);
3607 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3608 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3609 return (-1);
3610 zc.zc_nvlist_dst_size = count;
3611 (void) strcpy(zc.zc_name, zhp->zpool_name);
3612 for (;;) {
3613 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3614 &zc) != 0) {
3615 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3616 if (errno == ENOMEM) {
3617 void *dst;
3618
3619 count = zc.zc_nvlist_dst_size;
3620 dst = zfs_alloc(zhp->zpool_hdl, count *
3621 sizeof (zbookmark_phys_t));
3622 if (dst == NULL)
3623 return (-1);
3624 zc.zc_nvlist_dst = (uintptr_t)dst;
3625 } else {
3626 return (-1);
3627 }
3628 } else {
3629 break;
3630 }
3631 }
3632
3633 /*
3634 * Sort the resulting bookmarks. This is a little confusing due to the
3635 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3636 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3637 * _not_ copied as part of the process. So we point the start of our
3638 * array appropriate and decrement the total number of elements.
3639 */
3640 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3641 zc.zc_nvlist_dst_size;
3642 count -= zc.zc_nvlist_dst_size;
3643
3644 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3645
3646 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3647
3648 /*
3649 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3650 */
3651 for (i = 0; i < count; i++) {
3652 nvlist_t *nv;
3653
3654 /* ignoring zb_blkid and zb_level for now */
3655 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3656 zb[i-1].zb_object == zb[i].zb_object)
3657 continue;
3658
3659 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3660 goto nomem;
3661 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3662 zb[i].zb_objset) != 0) {
3663 nvlist_free(nv);
3664 goto nomem;
3665 }
3666 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3667 zb[i].zb_object) != 0) {
3668 nvlist_free(nv);
3669 goto nomem;
3670 }
3671 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3672 nvlist_free(nv);
3673 goto nomem;
3674 }
3675 nvlist_free(nv);
3676 }
3677
3678 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3679 return (0);
3680
3681 nomem:
3682 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3683 return (no_memory(zhp->zpool_hdl));
3684 }
3685
3686 /*
3687 * Upgrade a ZFS pool to the latest on-disk version.
3688 */
3689 int
3690 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3691 {
3692 zfs_cmd_t zc = {"\0"};
3693 libzfs_handle_t *hdl = zhp->zpool_hdl;
3694
3695 (void) strcpy(zc.zc_name, zhp->zpool_name);
3696 zc.zc_cookie = new_version;
3697
3698 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3699 return (zpool_standard_error_fmt(hdl, errno,
3700 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3701 zhp->zpool_name));
3702 return (0);
3703 }
3704
3705 void
3706 zfs_save_arguments(int argc, char **argv, char *string, int len)
3707 {
3708 int i;
3709
3710 (void) strlcpy(string, basename(argv[0]), len);
3711 for (i = 1; i < argc; i++) {
3712 (void) strlcat(string, " ", len);
3713 (void) strlcat(string, argv[i], len);
3714 }
3715 }
3716
3717 int
3718 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3719 {
3720 zfs_cmd_t zc = {"\0"};
3721 nvlist_t *args;
3722 int err;
3723
3724 args = fnvlist_alloc();
3725 fnvlist_add_string(args, "message", message);
3726 err = zcmd_write_src_nvlist(hdl, &zc, args);
3727 if (err == 0)
3728 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3729 nvlist_free(args);
3730 zcmd_free_nvlists(&zc);
3731 return (err);
3732 }
3733
3734 /*
3735 * Perform ioctl to get some command history of a pool.
3736 *
3737 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3738 * logical offset of the history buffer to start reading from.
3739 *
3740 * Upon return, 'off' is the next logical offset to read from and
3741 * 'len' is the actual amount of bytes read into 'buf'.
3742 */
3743 static int
3744 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3745 {
3746 zfs_cmd_t zc = {"\0"};
3747 libzfs_handle_t *hdl = zhp->zpool_hdl;
3748
3749 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3750
3751 zc.zc_history = (uint64_t)(uintptr_t)buf;
3752 zc.zc_history_len = *len;
3753 zc.zc_history_offset = *off;
3754
3755 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3756 switch (errno) {
3757 case EPERM:
3758 return (zfs_error_fmt(hdl, EZFS_PERM,
3759 dgettext(TEXT_DOMAIN,
3760 "cannot show history for pool '%s'"),
3761 zhp->zpool_name));
3762 case ENOENT:
3763 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3764 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3765 "'%s'"), zhp->zpool_name));
3766 case ENOTSUP:
3767 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3768 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3769 "'%s', pool must be upgraded"), zhp->zpool_name));
3770 default:
3771 return (zpool_standard_error_fmt(hdl, errno,
3772 dgettext(TEXT_DOMAIN,
3773 "cannot get history for '%s'"), zhp->zpool_name));
3774 }
3775 }
3776
3777 *len = zc.zc_history_len;
3778 *off = zc.zc_history_offset;
3779
3780 return (0);
3781 }
3782
3783 /*
3784 * Process the buffer of nvlists, unpacking and storing each nvlist record
3785 * into 'records'. 'leftover' is set to the number of bytes that weren't
3786 * processed as there wasn't a complete record.
3787 */
3788 int
3789 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3790 nvlist_t ***records, uint_t *numrecords)
3791 {
3792 uint64_t reclen;
3793 nvlist_t *nv;
3794 int i;
3795
3796 while (bytes_read > sizeof (reclen)) {
3797
3798 /* get length of packed record (stored as little endian) */
3799 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3800 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3801
3802 if (bytes_read < sizeof (reclen) + reclen)
3803 break;
3804
3805 /* unpack record */
3806 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3807 return (ENOMEM);
3808 bytes_read -= sizeof (reclen) + reclen;
3809 buf += sizeof (reclen) + reclen;
3810
3811 /* add record to nvlist array */
3812 (*numrecords)++;
3813 if (ISP2(*numrecords + 1)) {
3814 *records = realloc(*records,
3815 *numrecords * 2 * sizeof (nvlist_t *));
3816 }
3817 (*records)[*numrecords - 1] = nv;
3818 }
3819
3820 *leftover = bytes_read;
3821 return (0);
3822 }
3823
3824 /*
3825 * Retrieve the command history of a pool.
3826 */
3827 int
3828 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3829 {
3830 char *buf;
3831 int buflen = 128 * 1024;
3832 uint64_t off = 0;
3833 nvlist_t **records = NULL;
3834 uint_t numrecords = 0;
3835 int err, i;
3836
3837 buf = malloc(buflen);
3838 if (buf == NULL)
3839 return (ENOMEM);
3840 do {
3841 uint64_t bytes_read = buflen;
3842 uint64_t leftover;
3843
3844 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3845 break;
3846
3847 /* if nothing else was read in, we're at EOF, just return */
3848 if (!bytes_read)
3849 break;
3850
3851 if ((err = zpool_history_unpack(buf, bytes_read,
3852 &leftover, &records, &numrecords)) != 0)
3853 break;
3854 off -= leftover;
3855 if (leftover == bytes_read) {
3856 /*
3857 * no progress made, because buffer is not big enough
3858 * to hold this record; resize and retry.
3859 */
3860 buflen *= 2;
3861 free(buf);
3862 buf = malloc(buflen);
3863 if (buf == NULL)
3864 return (ENOMEM);
3865 }
3866
3867 /* CONSTCOND */
3868 } while (1);
3869
3870 free(buf);
3871
3872 if (!err) {
3873 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3874 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3875 records, numrecords) == 0);
3876 }
3877 for (i = 0; i < numrecords; i++)
3878 nvlist_free(records[i]);
3879 free(records);
3880
3881 return (err);
3882 }
3883
3884 /*
3885 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3886 * If there is a new event available 'nvp' will contain a newly allocated
3887 * nvlist and 'dropped' will be set to the number of missed events since
3888 * the last call to this function. When 'nvp' is set to NULL it indicates
3889 * no new events are available. In either case the function returns 0 and
3890 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3891 * function will return a non-zero value. When the function is called in
3892 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3893 * it will not return until a new event is available.
3894 */
3895 int
3896 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3897 int *dropped, unsigned flags, int zevent_fd)
3898 {
3899 zfs_cmd_t zc = {"\0"};
3900 int error = 0;
3901
3902 *nvp = NULL;
3903 *dropped = 0;
3904 zc.zc_cleanup_fd = zevent_fd;
3905
3906 if (flags & ZEVENT_NONBLOCK)
3907 zc.zc_guid = ZEVENT_NONBLOCK;
3908
3909 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3910 return (-1);
3911
3912 retry:
3913 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3914 switch (errno) {
3915 case ESHUTDOWN:
3916 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3917 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3918 goto out;
3919 case ENOENT:
3920 /* Blocking error case should not occur */
3921 if (!(flags & ZEVENT_NONBLOCK))
3922 error = zpool_standard_error_fmt(hdl, errno,
3923 dgettext(TEXT_DOMAIN, "cannot get event"));
3924
3925 goto out;
3926 case ENOMEM:
3927 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3928 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3929 dgettext(TEXT_DOMAIN, "cannot get event"));
3930 goto out;
3931 } else {
3932 goto retry;
3933 }
3934 default:
3935 error = zpool_standard_error_fmt(hdl, errno,
3936 dgettext(TEXT_DOMAIN, "cannot get event"));
3937 goto out;
3938 }
3939 }
3940
3941 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3942 if (error != 0)
3943 goto out;
3944
3945 *dropped = (int)zc.zc_cookie;
3946 out:
3947 zcmd_free_nvlists(&zc);
3948
3949 return (error);
3950 }
3951
3952 /*
3953 * Clear all events.
3954 */
3955 int
3956 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3957 {
3958 zfs_cmd_t zc = {"\0"};
3959 char msg[1024];
3960
3961 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3962 "cannot clear events"));
3963
3964 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3965 return (zpool_standard_error_fmt(hdl, errno, msg));
3966
3967 if (count != NULL)
3968 *count = (int)zc.zc_cookie; /* # of events cleared */
3969
3970 return (0);
3971 }
3972
3973 /*
3974 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3975 * the passed zevent_fd file handle. On success zero is returned,
3976 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3977 */
3978 int
3979 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3980 {
3981 zfs_cmd_t zc = {"\0"};
3982 int error = 0;
3983
3984 zc.zc_guid = eid;
3985 zc.zc_cleanup_fd = zevent_fd;
3986
3987 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3988 switch (errno) {
3989 case ENOENT:
3990 error = zfs_error_fmt(hdl, EZFS_NOENT,
3991 dgettext(TEXT_DOMAIN, "cannot get event"));
3992 break;
3993
3994 case ENOMEM:
3995 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3996 dgettext(TEXT_DOMAIN, "cannot get event"));
3997 break;
3998
3999 default:
4000 error = zpool_standard_error_fmt(hdl, errno,
4001 dgettext(TEXT_DOMAIN, "cannot get event"));
4002 break;
4003 }
4004 }
4005
4006 return (error);
4007 }
4008
4009 void
4010 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4011 char *pathname, size_t len)
4012 {
4013 zfs_cmd_t zc = {"\0"};
4014 boolean_t mounted = B_FALSE;
4015 char *mntpnt = NULL;
4016 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4017
4018 if (dsobj == 0) {
4019 /* special case for the MOS */
4020 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4021 (longlong_t)obj);
4022 return;
4023 }
4024
4025 /* get the dataset's name */
4026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4027 zc.zc_obj = dsobj;
4028 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4029 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4030 /* just write out a path of two object numbers */
4031 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4032 (longlong_t)dsobj, (longlong_t)obj);
4033 return;
4034 }
4035 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4036
4037 /* find out if the dataset is mounted */
4038 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4039
4040 /* get the corrupted object's path */
4041 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4042 zc.zc_obj = obj;
4043 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4044 &zc) == 0) {
4045 if (mounted) {
4046 (void) snprintf(pathname, len, "%s%s", mntpnt,
4047 zc.zc_value);
4048 } else {
4049 (void) snprintf(pathname, len, "%s:%s",
4050 dsname, zc.zc_value);
4051 }
4052 } else {
4053 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4054 (longlong_t)obj);
4055 }
4056 free(mntpnt);
4057 }
4058
4059 /*
4060 * Read the EFI label from the config, if a label does not exist then
4061 * pass back the error to the caller. If the caller has passed a non-NULL
4062 * diskaddr argument then we set it to the starting address of the EFI
4063 * partition.
4064 */
4065 static int
4066 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4067 {
4068 char *path;
4069 int fd;
4070 char diskname[MAXPATHLEN];
4071 int err = -1;
4072
4073 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4074 return (err);
4075
4076 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4077 strrchr(path, '/'));
4078 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4079 struct dk_gpt *vtoc;
4080
4081 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4082 if (sb != NULL)
4083 *sb = vtoc->efi_parts[0].p_start;
4084 efi_free(vtoc);
4085 }
4086 (void) close(fd);
4087 }
4088 return (err);
4089 }
4090
4091 /*
4092 * determine where a partition starts on a disk in the current
4093 * configuration
4094 */
4095 static diskaddr_t
4096 find_start_block(nvlist_t *config)
4097 {
4098 nvlist_t **child;
4099 uint_t c, children;
4100 diskaddr_t sb = MAXOFFSET_T;
4101 uint64_t wholedisk;
4102
4103 if (nvlist_lookup_nvlist_array(config,
4104 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4105 if (nvlist_lookup_uint64(config,
4106 ZPOOL_CONFIG_WHOLE_DISK,
4107 &wholedisk) != 0 || !wholedisk) {
4108 return (MAXOFFSET_T);
4109 }
4110 if (read_efi_label(config, &sb) < 0)
4111 sb = MAXOFFSET_T;
4112 return (sb);
4113 }
4114
4115 for (c = 0; c < children; c++) {
4116 sb = find_start_block(child[c]);
4117 if (sb != MAXOFFSET_T) {
4118 return (sb);
4119 }
4120 }
4121 return (MAXOFFSET_T);
4122 }
4123
4124 static int
4125 zpool_label_disk_check(char *path)
4126 {
4127 struct dk_gpt *vtoc;
4128 int fd, err;
4129
4130 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4131 return (errno);
4132
4133 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4134 (void) close(fd);
4135 return (err);
4136 }
4137
4138 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4139 efi_free(vtoc);
4140 (void) close(fd);
4141 return (EIDRM);
4142 }
4143
4144 efi_free(vtoc);
4145 (void) close(fd);
4146 return (0);
4147 }
4148
4149 /*
4150 * Generate a unique partition name for the ZFS member. Partitions must
4151 * have unique names to ensure udev will be able to create symlinks under
4152 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4153 * of the form <pool>-<unique-id>.
4154 */
4155 static void
4156 zpool_label_name(char *label_name, int label_size)
4157 {
4158 uint64_t id = 0;
4159 int fd;
4160
4161 fd = open("/dev/urandom", O_RDONLY);
4162 if (fd >= 0) {
4163 if (read(fd, &id, sizeof (id)) != sizeof (id))
4164 id = 0;
4165
4166 close(fd);
4167 }
4168
4169 if (id == 0)
4170 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4171
4172 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t) id);
4173 }
4174
4175 /*
4176 * Label an individual disk. The name provided is the short name,
4177 * stripped of any leading /dev path.
4178 */
4179 int
4180 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4181 {
4182 char path[MAXPATHLEN];
4183 struct dk_gpt *vtoc;
4184 int rval, fd;
4185 size_t resv = EFI_MIN_RESV_SIZE;
4186 uint64_t slice_size;
4187 diskaddr_t start_block;
4188 char errbuf[1024];
4189
4190 /* prepare an error message just in case */
4191 (void) snprintf(errbuf, sizeof (errbuf),
4192 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4193
4194 if (zhp) {
4195 nvlist_t *nvroot;
4196
4197 #if defined(__sun__) || defined(__sun)
4198 if (zpool_is_bootable(zhp)) {
4199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4200 "EFI labeled devices are not supported on root "
4201 "pools."));
4202 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4203 }
4204 #endif
4205
4206 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4207 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4208
4209 if (zhp->zpool_start_block == 0)
4210 start_block = find_start_block(nvroot);
4211 else
4212 start_block = zhp->zpool_start_block;
4213 zhp->zpool_start_block = start_block;
4214 } else {
4215 /* new pool */
4216 start_block = NEW_START_BLOCK;
4217 }
4218
4219 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4220
4221 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4222 /*
4223 * This shouldn't happen. We've long since verified that this
4224 * is a valid device.
4225 */
4226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4227 "label '%s': unable to open device: %d"), path, errno);
4228 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4229 }
4230
4231 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4232 /*
4233 * The only way this can fail is if we run out of memory, or we
4234 * were unable to read the disk's capacity
4235 */
4236 if (errno == ENOMEM)
4237 (void) no_memory(hdl);
4238
4239 (void) close(fd);
4240 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4241 "label '%s': unable to read disk capacity"), path);
4242
4243 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4244 }
4245
4246 slice_size = vtoc->efi_last_u_lba + 1;
4247 slice_size -= EFI_MIN_RESV_SIZE;
4248 if (start_block == MAXOFFSET_T)
4249 start_block = NEW_START_BLOCK;
4250 slice_size -= start_block;
4251 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4252
4253 vtoc->efi_parts[0].p_start = start_block;
4254 vtoc->efi_parts[0].p_size = slice_size;
4255
4256 /*
4257 * Why we use V_USR: V_BACKUP confuses users, and is considered
4258 * disposable by some EFI utilities (since EFI doesn't have a backup
4259 * slice). V_UNASSIGNED is supposed to be used only for zero size
4260 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4261 * etc. were all pretty specific. V_USR is as close to reality as we
4262 * can get, in the absence of V_OTHER.
4263 */
4264 vtoc->efi_parts[0].p_tag = V_USR;
4265 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4266
4267 vtoc->efi_parts[8].p_start = slice_size + start_block;
4268 vtoc->efi_parts[8].p_size = resv;
4269 vtoc->efi_parts[8].p_tag = V_RESERVED;
4270
4271 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4272 /*
4273 * Some block drivers (like pcata) may not support EFI
4274 * GPT labels. Print out a helpful error message dir-
4275 * ecting the user to manually label the disk and give
4276 * a specific slice.
4277 */
4278 (void) close(fd);
4279 efi_free(vtoc);
4280
4281 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4282 "parted(8) and then provide a specific slice: %d"), rval);
4283 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4284 }
4285
4286 (void) close(fd);
4287 efi_free(vtoc);
4288
4289 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4290 (void) zfs_append_partition(path, MAXPATHLEN);
4291
4292 /* Wait to udev to signal use the device has settled. */
4293 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4294 if (rval) {
4295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4296 "detect device partitions on '%s': %d"), path, rval);
4297 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4298 }
4299
4300 /* We can't be to paranoid. Read the label back and verify it. */
4301 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4302 rval = zpool_label_disk_check(path);
4303 if (rval) {
4304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4305 "EFI label on '%s' is damaged. Ensure\nthis device "
4306 "is not in in use, and is functioning properly: %d"),
4307 path, rval);
4308 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4309 }
4310
4311 return (0);
4312 }