]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_pool.c
Illumos #2882, #2883, #2900
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3541dc6d 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
1bd201e7 25 * Copyright (c) 2012 by Delphix. All rights reserved.
34dc7c2f
BB
26 */
27
34dc7c2f
BB
28#include <ctype.h>
29#include <errno.h>
30#include <devid.h>
34dc7c2f
BB
31#include <fcntl.h>
32#include <libintl.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <strings.h>
36#include <unistd.h>
6f1ffb06 37#include <libgen.h>
d603ed6c
BB
38#include <zone.h>
39#include <sys/stat.h>
34dc7c2f
BB
40#include <sys/efi_partition.h>
41#include <sys/vtoc.h>
42#include <sys/zfs_ioctl.h>
9babb374 43#include <dlfcn.h>
34dc7c2f
BB
44
45#include "zfs_namecheck.h"
46#include "zfs_prop.h"
47#include "libzfs_impl.h"
428870ff 48#include "zfs_comutil.h"
9ae529ec 49#include "zfeature_common.h"
34dc7c2f 50
b128c09f
BB
51static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
572e2857
BB
53typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56} prop_flags_t;
57
34dc7c2f
BB
58/*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64static int
65zpool_get_all_props(zpool_handle_t *zhp)
66{
6f1ffb06 67 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95}
96
97static int
98zpool_props_refresh(zpool_handle_t *zhp)
99{
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109}
110
111static char *
112zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114{
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135}
136
137uint64_t
138zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139{
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
b128c09f
BB
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
34dc7c2f 157 return (zpool_prop_default_numeric(prop));
b128c09f 158 }
34dc7c2f
BB
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174}
175
176/*
177 * Map VDEV STATE to printed strings.
178 */
179char *
180zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181{
182 switch (state) {
e75c13c3
BB
183 default:
184 break;
34dc7c2f
BB
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
b128c09f 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 192 return (gettext("FAULTED"));
428870ff
BB
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
34dc7c2f
BB
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206}
207
131cc95c
DK
208/*
209 * Map POOL STATE to printed strings.
210 */
211const char *
212zpool_pool_state_to_name(pool_state_t state)
213{
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236}
237
34dc7c2f
BB
238/*
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
241 */
242int
243zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
244 zprop_source_t *srctype)
245{
246 uint64_t intval;
247 const char *strval;
248 zprop_source_t src = ZPROP_SRC_NONE;
249 nvlist_t *nvroot;
250 vdev_stat_t *vs;
251 uint_t vsc;
252
253 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
254 switch (prop) {
255 case ZPOOL_PROP_NAME:
34dc7c2f 256 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
257 break;
258
259 case ZPOOL_PROP_HEALTH:
34dc7c2f 260 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
261 break;
262
263 case ZPOOL_PROP_GUID:
264 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 265 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
266 break;
267
268 case ZPOOL_PROP_ALTROOT:
269 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 270 case ZPOOL_PROP_COMMENT:
d164b209
BB
271 if (zhp->zpool_props != NULL ||
272 zpool_get_all_props(zhp) == 0) {
273 (void) strlcpy(buf,
274 zpool_get_prop_string(zhp, prop, &src),
275 len);
276 if (srctype != NULL)
277 *srctype = src;
278 return (0);
279 }
280 /* FALLTHROUGH */
281 default:
34dc7c2f 282 (void) strlcpy(buf, "-", len);
d164b209
BB
283 break;
284 }
285
286 if (srctype != NULL)
287 *srctype = src;
34dc7c2f
BB
288 return (0);
289 }
290
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
293 return (-1);
294
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 len);
299 break;
300
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
303
304 switch (prop) {
305 case ZPOOL_PROP_SIZE:
428870ff
BB
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
9ae529ec 308 case ZPOOL_PROP_FREEING:
1bd201e7 309 case ZPOOL_PROP_EXPANDSZ:
df30f566 310 case ZPOOL_PROP_ASHIFT:
34dc7c2f
BB
311 (void) zfs_nicenum(intval, buf, len);
312 break;
313
314 case ZPOOL_PROP_CAPACITY:
315 (void) snprintf(buf, len, "%llu%%",
316 (u_longlong_t)intval);
317 break;
318
428870ff
BB
319 case ZPOOL_PROP_DEDUPRATIO:
320 (void) snprintf(buf, len, "%llu.%02llux",
321 (u_longlong_t)(intval / 100),
322 (u_longlong_t)(intval % 100));
323 break;
324
34dc7c2f
BB
325 case ZPOOL_PROP_HEALTH:
326 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
327 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
328 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
329 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
330 == 0);
34dc7c2f
BB
331
332 (void) strlcpy(buf, zpool_state_to_name(intval,
333 vs->vs_aux), len);
334 break;
9ae529ec
CS
335 case ZPOOL_PROP_VERSION:
336 if (intval >= SPA_VERSION_FEATURES) {
337 (void) snprintf(buf, len, "-");
338 break;
339 }
340 /* FALLTHROUGH */
34dc7c2f 341 default:
b8864a23 342 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
343 }
344 break;
345
346 case PROP_TYPE_INDEX:
347 intval = zpool_get_prop_int(zhp, prop, &src);
348 if (zpool_prop_index_to_string(prop, intval, &strval)
349 != 0)
350 return (-1);
351 (void) strlcpy(buf, strval, len);
352 break;
353
354 default:
355 abort();
356 }
357
358 if (srctype)
359 *srctype = src;
360
361 return (0);
362}
363
364/*
365 * Check if the bootfs name has the same pool name as it is set to.
366 * Assuming bootfs is a valid dataset name.
367 */
368static boolean_t
369bootfs_name_valid(const char *pool, char *bootfs)
370{
371 int len = strlen(pool);
372
b128c09f 373 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
374 return (B_FALSE);
375
376 if (strncmp(pool, bootfs, len) == 0 &&
377 (bootfs[len] == '/' || bootfs[len] == '\0'))
378 return (B_TRUE);
379
380 return (B_FALSE);
381}
382
c372b36e 383#if defined(__sun__) || defined(__sun)
b128c09f
BB
384/*
385 * Inspect the configuration to determine if any of the devices contain
386 * an EFI label.
387 */
388static boolean_t
389pool_uses_efi(nvlist_t *config)
390{
391 nvlist_t **child;
392 uint_t c, children;
393
394 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
395 &child, &children) != 0)
396 return (read_efi_label(config, NULL) >= 0);
397
398 for (c = 0; c < children; c++) {
399 if (pool_uses_efi(child[c]))
400 return (B_TRUE);
401 }
402 return (B_FALSE);
403}
c372b36e 404#endif
b128c09f 405
1bd201e7
CS
406boolean_t
407zpool_is_bootable(zpool_handle_t *zhp)
b128c09f
BB
408{
409 char bootfs[ZPOOL_MAXNAMELEN];
410
411 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
412 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
413 sizeof (bootfs)) != 0);
414}
415
416
34dc7c2f
BB
417/*
418 * Given an nvlist of zpool properties to be set, validate that they are
419 * correct, and parse any numeric properties (index, boolean, etc) if they are
420 * specified as strings.
421 */
422static nvlist_t *
b128c09f 423zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 424 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
425{
426 nvpair_t *elem;
427 nvlist_t *retprops;
428 zpool_prop_t prop;
429 char *strval;
430 uint64_t intval;
d96eb2b1 431 char *slash, *check;
34dc7c2f 432 struct stat64 statbuf;
b128c09f
BB
433 zpool_handle_t *zhp;
434 nvlist_t *nvroot;
34dc7c2f
BB
435
436 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
437 (void) no_memory(hdl);
438 return (NULL);
439 }
440
441 elem = NULL;
442 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
443 const char *propname = nvpair_name(elem);
444
9ae529ec
CS
445 prop = zpool_name_to_prop(propname);
446 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
447 int err;
448 zfeature_info_t *feature;
449 char *fname = strchr(propname, '@') + 1;
450
451 err = zfeature_lookup_name(fname, &feature);
452 if (err != 0) {
453 ASSERT3U(err, ==, ENOENT);
454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
455 "invalid feature '%s'"), fname);
456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 goto error;
458 }
459
460 if (nvpair_type(elem) != DATA_TYPE_STRING) {
461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
462 "'%s' must be a string"), propname);
463 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
464 goto error;
465 }
466
467 (void) nvpair_value_string(elem, &strval);
468 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
470 "property '%s' can only be set to "
471 "'enabled'"), propname);
472 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
473 goto error;
474 }
475
476 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
477 (void) no_memory(hdl);
478 goto error;
479 }
480 continue;
481 }
482
34dc7c2f
BB
483 /*
484 * Make sure this property is valid and applies to this type.
485 */
9ae529ec 486 if (prop == ZPROP_INVAL) {
34dc7c2f
BB
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "invalid property '%s'"), propname);
489 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
490 goto error;
491 }
492
493 if (zpool_prop_readonly(prop)) {
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
495 "is readonly"), propname);
496 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
497 goto error;
498 }
499
500 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
501 &strval, &intval, errbuf) != 0)
502 goto error;
503
504 /*
505 * Perform additional checking for specific properties.
506 */
507 switch (prop) {
e75c13c3
BB
508 default:
509 break;
34dc7c2f 510 case ZPOOL_PROP_VERSION:
9ae529ec
CS
511 if (intval < version ||
512 !SPA_VERSION_IS_SUPPORTED(intval)) {
34dc7c2f
BB
513 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
514 "property '%s' number %d is invalid."),
515 propname, intval);
516 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
517 goto error;
518 }
519 break;
520
df30f566
CK
521 case ZPOOL_PROP_ASHIFT:
522 if (!flags.create) {
523 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
524 "property '%s' can only be set at "
525 "creation time"), propname);
526 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
527 goto error;
528 }
529
b41c9906 530 if (intval != 0 && (intval < 9 || intval > 13)) {
df30f566
CK
531 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
532 "property '%s' number %d is invalid."),
533 propname, intval);
534 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
535 goto error;
536 }
537 break;
538
34dc7c2f 539 case ZPOOL_PROP_BOOTFS:
572e2857 540 if (flags.create || flags.import) {
34dc7c2f
BB
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "property '%s' cannot be set at creation "
543 "or import time"), propname);
544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
545 goto error;
546 }
547
548 if (version < SPA_VERSION_BOOTFS) {
549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
550 "pool must be upgraded to support "
551 "'%s' property"), propname);
552 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
553 goto error;
554 }
555
556 /*
557 * bootfs property value has to be a dataset name and
558 * the dataset has to be in the same pool as it sets to.
559 */
560 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
561 strval)) {
562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
563 "is an invalid name"), strval);
564 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
565 goto error;
566 }
b128c09f
BB
567
568 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 "could not open pool '%s'"), poolname);
571 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
572 goto error;
573 }
574 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
575 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
576
f783130a 577#if defined(__sun__) || defined(__sun)
b128c09f
BB
578 /*
579 * bootfs property cannot be set on a disk which has
580 * been EFI labeled.
581 */
582 if (pool_uses_efi(nvroot)) {
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "property '%s' not supported on "
585 "EFI labeled devices"), propname);
586 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
587 zpool_close(zhp);
588 goto error;
589 }
f783130a 590#endif
b128c09f 591 zpool_close(zhp);
34dc7c2f
BB
592 break;
593
594 case ZPOOL_PROP_ALTROOT:
572e2857 595 if (!flags.create && !flags.import) {
34dc7c2f
BB
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 "property '%s' can only be set during pool "
598 "creation or import"), propname);
599 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
600 goto error;
601 }
602
603 if (strval[0] != '/') {
604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
605 "bad alternate root '%s'"), strval);
606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
607 goto error;
608 }
609 break;
610
611 case ZPOOL_PROP_CACHEFILE:
612 if (strval[0] == '\0')
613 break;
614
615 if (strcmp(strval, "none") == 0)
616 break;
617
618 if (strval[0] != '/') {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "property '%s' must be empty, an "
621 "absolute path, or 'none'"), propname);
622 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
623 goto error;
624 }
625
626 slash = strrchr(strval, '/');
627
628 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
629 strcmp(slash, "/..") == 0) {
630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 "'%s' is not a valid file"), strval);
632 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
633 goto error;
634 }
635
636 *slash = '\0';
637
638 if (strval[0] != '\0' &&
639 (stat64(strval, &statbuf) != 0 ||
640 !S_ISDIR(statbuf.st_mode))) {
641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642 "'%s' is not a valid directory"),
643 strval);
644 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
645 goto error;
646 }
647
648 *slash = '/';
649 break;
572e2857 650
d96eb2b1
DM
651 case ZPOOL_PROP_COMMENT:
652 for (check = strval; *check != '\0'; check++) {
653 if (!isprint(*check)) {
654 zfs_error_aux(hdl,
655 dgettext(TEXT_DOMAIN,
656 "comment may only have printable "
657 "characters"));
658 (void) zfs_error(hdl, EZFS_BADPROP,
659 errbuf);
660 goto error;
661 }
662 }
663 if (strlen(strval) > ZPROP_MAX_COMMENT) {
664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665 "comment must not exceed %d characters"),
666 ZPROP_MAX_COMMENT);
667 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
668 goto error;
669 }
670 break;
572e2857
BB
671 case ZPOOL_PROP_READONLY:
672 if (!flags.import) {
673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
674 "property '%s' can only be set at "
675 "import time"), propname);
676 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
677 goto error;
678 }
679 break;
34dc7c2f
BB
680 }
681 }
682
683 return (retprops);
684error:
685 nvlist_free(retprops);
686 return (NULL);
687}
688
689/*
690 * Set zpool property : propname=propval.
691 */
692int
693zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
694{
6f1ffb06 695 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
696 int ret = -1;
697 char errbuf[1024];
698 nvlist_t *nvl = NULL;
699 nvlist_t *realprops;
700 uint64_t version;
572e2857 701 prop_flags_t flags = { 0 };
34dc7c2f
BB
702
703 (void) snprintf(errbuf, sizeof (errbuf),
704 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
705 zhp->zpool_name);
706
34dc7c2f
BB
707 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
708 return (no_memory(zhp->zpool_hdl));
709
710 if (nvlist_add_string(nvl, propname, propval) != 0) {
711 nvlist_free(nvl);
712 return (no_memory(zhp->zpool_hdl));
713 }
714
715 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 716 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 717 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
718 nvlist_free(nvl);
719 return (-1);
720 }
721
722 nvlist_free(nvl);
723 nvl = realprops;
724
725 /*
726 * Execute the corresponding ioctl() to set this property.
727 */
728 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
729
730 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
731 nvlist_free(nvl);
732 return (-1);
733 }
734
735 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
736
737 zcmd_free_nvlists(&zc);
738 nvlist_free(nvl);
739
740 if (ret)
741 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
742 else
743 (void) zpool_props_refresh(zhp);
744
745 return (ret);
746}
747
748int
749zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
750{
751 libzfs_handle_t *hdl = zhp->zpool_hdl;
752 zprop_list_t *entry;
753 char buf[ZFS_MAXPROPLEN];
9ae529ec
CS
754 nvlist_t *features = NULL;
755 nvpair_t *nvp;
756 zprop_list_t **last;
757 boolean_t firstexpand = (NULL == *plp);
758 int i;
34dc7c2f
BB
759
760 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
761 return (-1);
762
9ae529ec
CS
763 last = plp;
764 while (*last != NULL)
765 last = &(*last)->pl_next;
766
767 if ((*plp)->pl_all)
768 features = zpool_get_features(zhp);
769
770 if ((*plp)->pl_all && firstexpand) {
771 for (i = 0; i < SPA_FEATURES; i++) {
772 zprop_list_t *entry = zfs_alloc(hdl,
773 sizeof (zprop_list_t));
774 entry->pl_prop = ZPROP_INVAL;
775 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
776 spa_feature_table[i].fi_uname);
777 entry->pl_width = strlen(entry->pl_user_prop);
778 entry->pl_all = B_TRUE;
779
780 *last = entry;
781 last = &entry->pl_next;
782 }
783 }
784
785 /* add any unsupported features */
786 for (nvp = nvlist_next_nvpair(features, NULL);
787 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
788 char *propname;
789 boolean_t found;
790 zprop_list_t *entry;
791
792 if (zfeature_is_supported(nvpair_name(nvp)))
793 continue;
794
795 propname = zfs_asprintf(hdl, "unsupported@%s",
796 nvpair_name(nvp));
797
798 /*
799 * Before adding the property to the list make sure that no
800 * other pool already added the same property.
801 */
802 found = B_FALSE;
803 entry = *plp;
804 while (entry != NULL) {
805 if (entry->pl_user_prop != NULL &&
806 strcmp(propname, entry->pl_user_prop) == 0) {
807 found = B_TRUE;
808 break;
809 }
810 entry = entry->pl_next;
811 }
812 if (found) {
813 free(propname);
814 continue;
815 }
816
817 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
818 entry->pl_prop = ZPROP_INVAL;
819 entry->pl_user_prop = propname;
820 entry->pl_width = strlen(entry->pl_user_prop);
821 entry->pl_all = B_TRUE;
822
823 *last = entry;
824 last = &entry->pl_next;
825 }
826
34dc7c2f
BB
827 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
828
829 if (entry->pl_fixed)
830 continue;
831
832 if (entry->pl_prop != ZPROP_INVAL &&
833 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
834 NULL) == 0) {
835 if (strlen(buf) > entry->pl_width)
836 entry->pl_width = strlen(buf);
837 }
838 }
839
840 return (0);
841}
842
9ae529ec
CS
843/*
844 * Get the state for the given feature on the given ZFS pool.
845 */
846int
847zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
848 size_t len)
849{
850 uint64_t refcount;
851 boolean_t found = B_FALSE;
852 nvlist_t *features = zpool_get_features(zhp);
853 boolean_t supported;
854 const char *feature = strchr(propname, '@') + 1;
855
856 supported = zpool_prop_feature(propname);
857 ASSERT(supported || zpool_prop_unsupported(propname));
858
859 /*
860 * Convert from feature name to feature guid. This conversion is
861 * unecessary for unsupported@... properties because they already
862 * use guids.
863 */
864 if (supported) {
865 int ret;
866 zfeature_info_t *fi;
867
868 ret = zfeature_lookup_name(feature, &fi);
869 if (ret != 0) {
870 (void) strlcpy(buf, "-", len);
871 return (ENOTSUP);
872 }
873 feature = fi->fi_guid;
874 }
875
876 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
877 found = B_TRUE;
878
879 if (supported) {
880 if (!found) {
881 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
882 } else {
883 if (refcount == 0)
884 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
885 else
886 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
887 }
888 } else {
889 if (found) {
890 if (refcount == 0) {
891 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
892 } else {
893 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
894 }
895 } else {
896 (void) strlcpy(buf, "-", len);
897 return (ENOTSUP);
898 }
899 }
900
901 return (0);
902}
34dc7c2f 903
9babb374
BB
904/*
905 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
906 * devices will use a stripe width of 128k, other vendors prefer a 1m
907 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
908 * given 512B blocks. When the block size is larger by a power of 2
909 * we will still be 1m aligned. Some devices are sensitive to the
910 * partition ending alignment as well.
9babb374 911 */
613d88ed
NB
912#define NEW_START_BLOCK 2048
913#define PARTITION_END_ALIGNMENT 2048
9babb374 914
34dc7c2f
BB
915/*
916 * Validate the given pool name, optionally putting an extended error message in
917 * 'buf'.
918 */
919boolean_t
920zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
921{
922 namecheck_err_t why;
923 char what;
924 int ret;
925
926 ret = pool_namecheck(pool, &why, &what);
927
928 /*
929 * The rules for reserved pool names were extended at a later point.
930 * But we need to support users with existing pools that may now be
931 * invalid. So we only check for this expanded set of names during a
932 * create (or import), and only in userland.
933 */
934 if (ret == 0 && !isopen &&
935 (strncmp(pool, "mirror", 6) == 0 ||
936 strncmp(pool, "raidz", 5) == 0 ||
937 strncmp(pool, "spare", 5) == 0 ||
938 strcmp(pool, "log") == 0)) {
939 if (hdl != NULL)
940 zfs_error_aux(hdl,
941 dgettext(TEXT_DOMAIN, "name is reserved"));
942 return (B_FALSE);
943 }
944
945
946 if (ret != 0) {
947 if (hdl != NULL) {
948 switch (why) {
949 case NAME_ERR_TOOLONG:
950 zfs_error_aux(hdl,
951 dgettext(TEXT_DOMAIN, "name is too long"));
952 break;
953
954 case NAME_ERR_INVALCHAR:
955 zfs_error_aux(hdl,
956 dgettext(TEXT_DOMAIN, "invalid character "
957 "'%c' in pool name"), what);
958 break;
959
960 case NAME_ERR_NOLETTER:
961 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
962 "name must begin with a letter"));
963 break;
964
965 case NAME_ERR_RESERVED:
966 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
967 "name is reserved"));
968 break;
969
970 case NAME_ERR_DISKLIKE:
971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
972 "pool name is reserved"));
973 break;
974
975 case NAME_ERR_LEADING_SLASH:
976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
977 "leading slash in name"));
978 break;
979
980 case NAME_ERR_EMPTY_COMPONENT:
981 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
982 "empty component in name"));
983 break;
984
985 case NAME_ERR_TRAILING_SLASH:
986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
987 "trailing slash in name"));
988 break;
989
990 case NAME_ERR_MULTIPLE_AT:
991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
992 "multiple '@' delimiters in name"));
993 break;
e75c13c3
BB
994 case NAME_ERR_NO_AT:
995 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
996 "permission set is missing '@'"));
997 break;
34dc7c2f
BB
998 }
999 }
1000 return (B_FALSE);
1001 }
1002
1003 return (B_TRUE);
1004}
1005
1006/*
1007 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1008 * state.
1009 */
1010zpool_handle_t *
1011zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1012{
1013 zpool_handle_t *zhp;
1014 boolean_t missing;
1015
1016 /*
1017 * Make sure the pool name is valid.
1018 */
1019 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1020 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1021 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1022 pool);
1023 return (NULL);
1024 }
1025
1026 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1027 return (NULL);
1028
1029 zhp->zpool_hdl = hdl;
1030 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1031
1032 if (zpool_refresh_stats(zhp, &missing) != 0) {
1033 zpool_close(zhp);
1034 return (NULL);
1035 }
1036
1037 if (missing) {
1038 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1039 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1040 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1041 zpool_close(zhp);
1042 return (NULL);
1043 }
1044
1045 return (zhp);
1046}
1047
1048/*
1049 * Like the above, but silent on error. Used when iterating over pools (because
1050 * the configuration cache may be out of date).
1051 */
1052int
1053zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1054{
1055 zpool_handle_t *zhp;
1056 boolean_t missing;
1057
1058 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1059 return (-1);
1060
1061 zhp->zpool_hdl = hdl;
1062 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1063
1064 if (zpool_refresh_stats(zhp, &missing) != 0) {
1065 zpool_close(zhp);
1066 return (-1);
1067 }
1068
1069 if (missing) {
1070 zpool_close(zhp);
1071 *ret = NULL;
1072 return (0);
1073 }
1074
1075 *ret = zhp;
1076 return (0);
1077}
1078
1079/*
1080 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1081 * state.
1082 */
1083zpool_handle_t *
1084zpool_open(libzfs_handle_t *hdl, const char *pool)
1085{
1086 zpool_handle_t *zhp;
1087
1088 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1089 return (NULL);
1090
1091 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1092 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1093 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1094 zpool_close(zhp);
1095 return (NULL);
1096 }
1097
1098 return (zhp);
1099}
1100
1101/*
1102 * Close the handle. Simply frees the memory associated with the handle.
1103 */
1104void
1105zpool_close(zpool_handle_t *zhp)
1106{
1107 if (zhp->zpool_config)
1108 nvlist_free(zhp->zpool_config);
1109 if (zhp->zpool_old_config)
1110 nvlist_free(zhp->zpool_old_config);
1111 if (zhp->zpool_props)
1112 nvlist_free(zhp->zpool_props);
1113 free(zhp);
1114}
1115
1116/*
1117 * Return the name of the pool.
1118 */
1119const char *
1120zpool_get_name(zpool_handle_t *zhp)
1121{
1122 return (zhp->zpool_name);
1123}
1124
1125
1126/*
1127 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1128 */
1129int
1130zpool_get_state(zpool_handle_t *zhp)
1131{
1132 return (zhp->zpool_state);
1133}
1134
1135/*
1136 * Create the named pool, using the provided vdev list. It is assumed
1137 * that the consumer has already validated the contents of the nvlist, so we
1138 * don't have to worry about error semantics.
1139 */
1140int
1141zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 1142 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 1143{
6f1ffb06 1144 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
b128c09f
BB
1145 nvlist_t *zc_fsprops = NULL;
1146 nvlist_t *zc_props = NULL;
34dc7c2f
BB
1147 char msg[1024];
1148 char *altroot;
b128c09f 1149 int ret = -1;
34dc7c2f
BB
1150
1151 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1152 "cannot create '%s'"), pool);
1153
1154 if (!zpool_name_valid(hdl, B_FALSE, pool))
1155 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1156
1157 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1158 return (-1);
1159
b128c09f 1160 if (props) {
572e2857
BB
1161 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1162
b128c09f 1163 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 1164 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
1165 goto create_failed;
1166 }
1167 }
34dc7c2f 1168
b128c09f
BB
1169 if (fsprops) {
1170 uint64_t zoned;
1171 char *zonestr;
1172
1173 zoned = ((nvlist_lookup_string(fsprops,
1174 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1175 strcmp(zonestr, "on") == 0);
1176
1177 if ((zc_fsprops = zfs_valid_proplist(hdl,
1178 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1179 goto create_failed;
1180 }
1181 if (!zc_props &&
1182 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1183 goto create_failed;
1184 }
1185 if (nvlist_add_nvlist(zc_props,
1186 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1187 goto create_failed;
1188 }
34dc7c2f
BB
1189 }
1190
b128c09f
BB
1191 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1192 goto create_failed;
1193
34dc7c2f
BB
1194 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1195
b128c09f 1196 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
1197
1198 zcmd_free_nvlists(&zc);
b128c09f
BB
1199 nvlist_free(zc_props);
1200 nvlist_free(zc_fsprops);
34dc7c2f
BB
1201
1202 switch (errno) {
1203 case EBUSY:
1204 /*
1205 * This can happen if the user has specified the same
1206 * device multiple times. We can't reliably detect this
1207 * until we try to add it and see we already have a
d603ed6c
BB
1208 * label. This can also happen under if the device is
1209 * part of an active md or lvm device.
34dc7c2f
BB
1210 */
1211 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d603ed6c
BB
1212 "one or more vdevs refer to the same device, or one of\n"
1213 "the devices is part of an active md or lvm device"));
34dc7c2f
BB
1214 return (zfs_error(hdl, EZFS_BADDEV, msg));
1215
1216 case EOVERFLOW:
1217 /*
1218 * This occurs when one of the devices is below
1219 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1220 * device was the problem device since there's no
1221 * reliable way to determine device size from userland.
1222 */
1223 {
1224 char buf[64];
1225
1226 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1227
1228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1229 "one or more devices is less than the "
1230 "minimum size (%s)"), buf);
1231 }
1232 return (zfs_error(hdl, EZFS_BADDEV, msg));
1233
1234 case ENOSPC:
1235 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1236 "one or more devices is out of space"));
1237 return (zfs_error(hdl, EZFS_BADDEV, msg));
1238
1239 case ENOTBLK:
1240 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1241 "cache device must be a disk or disk slice"));
1242 return (zfs_error(hdl, EZFS_BADDEV, msg));
1243
1244 default:
1245 return (zpool_standard_error(hdl, errno, msg));
1246 }
1247 }
1248
1249 /*
1250 * If this is an alternate root pool, then we automatically set the
1251 * mountpoint of the root dataset to be '/'.
1252 */
1253 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1254 &altroot) == 0) {
1255 zfs_handle_t *zhp;
1256
1257 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1258 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1259 "/") == 0);
1260
1261 zfs_close(zhp);
1262 }
1263
b128c09f 1264create_failed:
34dc7c2f 1265 zcmd_free_nvlists(&zc);
b128c09f
BB
1266 nvlist_free(zc_props);
1267 nvlist_free(zc_fsprops);
1268 return (ret);
34dc7c2f
BB
1269}
1270
1271/*
1272 * Destroy the given pool. It is up to the caller to ensure that there are no
1273 * datasets left in the pool.
1274 */
1275int
6f1ffb06 1276zpool_destroy(zpool_handle_t *zhp, const char *log_str)
34dc7c2f 1277{
6f1ffb06 1278 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
1279 zfs_handle_t *zfp = NULL;
1280 libzfs_handle_t *hdl = zhp->zpool_hdl;
1281 char msg[1024];
1282
1283 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1284 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1285 return (-1);
1286
34dc7c2f 1287 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
6f1ffb06 1288 zc.zc_history = (uint64_t)(uintptr_t)log_str;
34dc7c2f 1289
572e2857 1290 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1291 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1292 "cannot destroy '%s'"), zhp->zpool_name);
1293
1294 if (errno == EROFS) {
1295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1296 "one or more devices is read only"));
1297 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1298 } else {
1299 (void) zpool_standard_error(hdl, errno, msg);
1300 }
1301
1302 if (zfp)
1303 zfs_close(zfp);
1304 return (-1);
1305 }
1306
1307 if (zfp) {
1308 remove_mountpoint(zfp);
1309 zfs_close(zfp);
1310 }
1311
1312 return (0);
1313}
1314
1315/*
1316 * Add the given vdevs to the pool. The caller must have already performed the
1317 * necessary verification to ensure that the vdev specification is well-formed.
1318 */
1319int
1320zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1321{
6f1ffb06 1322 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
1323 int ret;
1324 libzfs_handle_t *hdl = zhp->zpool_hdl;
1325 char msg[1024];
1326 nvlist_t **spares, **l2cache;
1327 uint_t nspares, nl2cache;
1328
1329 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1330 "cannot add to '%s'"), zhp->zpool_name);
1331
1332 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1333 SPA_VERSION_SPARES &&
1334 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1335 &spares, &nspares) == 0) {
1336 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1337 "upgraded to add hot spares"));
1338 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1339 }
1340
c372b36e 1341#if defined(__sun__) || defined(__sun)
1bd201e7 1342 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
b128c09f
BB
1343 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1344 uint64_t s;
1345
1346 for (s = 0; s < nspares; s++) {
1347 char *path;
1348
1349 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1350 &path) == 0 && pool_uses_efi(spares[s])) {
1351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1352 "device '%s' contains an EFI label and "
1353 "cannot be used on root pools."),
428870ff
BB
1354 zpool_vdev_name(hdl, NULL, spares[s],
1355 B_FALSE));
b128c09f
BB
1356 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1357 }
1358 }
1359 }
c372b36e 1360#endif
b128c09f 1361
34dc7c2f
BB
1362 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1363 SPA_VERSION_L2CACHE &&
1364 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1365 &l2cache, &nl2cache) == 0) {
1366 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1367 "upgraded to add cache devices"));
1368 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1369 }
1370
1371 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1372 return (-1);
1373 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1374
572e2857 1375 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1376 switch (errno) {
1377 case EBUSY:
1378 /*
1379 * This can happen if the user has specified the same
1380 * device multiple times. We can't reliably detect this
1381 * until we try to add it and see we already have a
1382 * label.
1383 */
1384 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1385 "one or more vdevs refer to the same device"));
1386 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1387 break;
1388
1389 case EOVERFLOW:
1390 /*
1391 * This occurrs when one of the devices is below
1392 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1393 * device was the problem device since there's no
1394 * reliable way to determine device size from userland.
1395 */
1396 {
1397 char buf[64];
1398
1399 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1400
1401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1402 "device is less than the minimum "
1403 "size (%s)"), buf);
1404 }
1405 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1406 break;
1407
1408 case ENOTSUP:
1409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1410 "pool must be upgraded to add these vdevs"));
1411 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1412 break;
1413
1414 case EDOM:
1415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1416 "root pool can not have multiple vdevs"
1417 " or separate logs"));
1418 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1419 break;
1420
1421 case ENOTBLK:
1422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1423 "cache device must be a disk or disk slice"));
1424 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1425 break;
1426
1427 default:
1428 (void) zpool_standard_error(hdl, errno, msg);
1429 }
1430
1431 ret = -1;
1432 } else {
1433 ret = 0;
1434 }
1435
1436 zcmd_free_nvlists(&zc);
1437
1438 return (ret);
1439}
1440
1441/*
1442 * Exports the pool from the system. The caller must ensure that there are no
1443 * mounted datasets in the pool.
1444 */
6f1ffb06
MA
1445static int
1446zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1447 const char *log_str)
34dc7c2f 1448{
6f1ffb06 1449 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
b128c09f 1450 char msg[1024];
34dc7c2f 1451
b128c09f
BB
1452 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1453 "cannot export '%s'"), zhp->zpool_name);
1454
34dc7c2f 1455 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1456 zc.zc_cookie = force;
fb5f0bc8 1457 zc.zc_guid = hardforce;
6f1ffb06 1458 zc.zc_history = (uint64_t)(uintptr_t)log_str;
b128c09f
BB
1459
1460 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1461 switch (errno) {
1462 case EXDEV:
1463 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1464 "use '-f' to override the following errors:\n"
1465 "'%s' has an active shared spare which could be"
1466 " used by other pools once '%s' is exported."),
1467 zhp->zpool_name, zhp->zpool_name);
1468 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1469 msg));
1470 default:
1471 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1472 msg));
1473 }
1474 }
34dc7c2f 1475
34dc7c2f
BB
1476 return (0);
1477}
1478
fb5f0bc8 1479int
6f1ffb06 1480zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
fb5f0bc8 1481{
6f1ffb06 1482 return (zpool_export_common(zhp, force, B_FALSE, log_str));
fb5f0bc8
BB
1483}
1484
1485int
6f1ffb06 1486zpool_export_force(zpool_handle_t *zhp, const char *log_str)
fb5f0bc8 1487{
6f1ffb06 1488 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
fb5f0bc8
BB
1489}
1490
428870ff
BB
1491static void
1492zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1493 nvlist_t *config)
428870ff 1494{
572e2857 1495 nvlist_t *nv = NULL;
428870ff
BB
1496 uint64_t rewindto;
1497 int64_t loss = -1;
1498 struct tm t;
1499 char timestr[128];
1500
572e2857
BB
1501 if (!hdl->libzfs_printerr || config == NULL)
1502 return;
1503
9ae529ec
CS
1504 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1505 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
428870ff 1506 return;
9ae529ec 1507 }
428870ff 1508
572e2857 1509 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1510 return;
572e2857 1511 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1512
1513 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1514 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1515 if (dryrun) {
1516 (void) printf(dgettext(TEXT_DOMAIN,
1517 "Would be able to return %s "
1518 "to its state as of %s.\n"),
1519 name, timestr);
1520 } else {
1521 (void) printf(dgettext(TEXT_DOMAIN,
1522 "Pool %s returned to its state as of %s.\n"),
1523 name, timestr);
1524 }
1525 if (loss > 120) {
1526 (void) printf(dgettext(TEXT_DOMAIN,
1527 "%s approximately %lld "),
1528 dryrun ? "Would discard" : "Discarded",
b8864a23 1529 ((longlong_t)loss + 30) / 60);
428870ff
BB
1530 (void) printf(dgettext(TEXT_DOMAIN,
1531 "minutes of transactions.\n"));
1532 } else if (loss > 0) {
1533 (void) printf(dgettext(TEXT_DOMAIN,
1534 "%s approximately %lld "),
b8864a23
BB
1535 dryrun ? "Would discard" : "Discarded",
1536 (longlong_t)loss);
428870ff
BB
1537 (void) printf(dgettext(TEXT_DOMAIN,
1538 "seconds of transactions.\n"));
1539 }
1540 }
1541}
1542
1543void
1544zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1545 nvlist_t *config)
1546{
572e2857 1547 nvlist_t *nv = NULL;
428870ff
BB
1548 int64_t loss = -1;
1549 uint64_t edata = UINT64_MAX;
1550 uint64_t rewindto;
1551 struct tm t;
1552 char timestr[128];
1553
1554 if (!hdl->libzfs_printerr)
1555 return;
1556
1557 if (reason >= 0)
1558 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1559 else
1560 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1561
1562 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857 1563 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
9ae529ec 1564 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
572e2857 1565 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1566 goto no_info;
1567
572e2857
BB
1568 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1569 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1570 &edata);
1571
1572 (void) printf(dgettext(TEXT_DOMAIN,
1573 "Recovery is possible, but will result in some data loss.\n"));
1574
1575 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1576 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "\tReturning the pool to its state as of %s\n"
1579 "\tshould correct the problem. "),
1580 timestr);
1581 } else {
1582 (void) printf(dgettext(TEXT_DOMAIN,
1583 "\tReverting the pool to an earlier state "
1584 "should correct the problem.\n\t"));
1585 }
1586
1587 if (loss > 120) {
1588 (void) printf(dgettext(TEXT_DOMAIN,
1589 "Approximately %lld minutes of data\n"
b8864a23
BB
1590 "\tmust be discarded, irreversibly. "),
1591 ((longlong_t)loss + 30) / 60);
428870ff
BB
1592 } else if (loss > 0) {
1593 (void) printf(dgettext(TEXT_DOMAIN,
1594 "Approximately %lld seconds of data\n"
b8864a23
BB
1595 "\tmust be discarded, irreversibly. "),
1596 (longlong_t)loss);
428870ff
BB
1597 }
1598 if (edata != 0 && edata != UINT64_MAX) {
1599 if (edata == 1) {
1600 (void) printf(dgettext(TEXT_DOMAIN,
1601 "After rewind, at least\n"
1602 "\tone persistent user-data error will remain. "));
1603 } else {
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "After rewind, several\n"
1606 "\tpersistent user-data errors will remain. "));
1607 }
1608 }
1609 (void) printf(dgettext(TEXT_DOMAIN,
1610 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1611 reason >= 0 ? "clear" : "import", name);
1612
1613 (void) printf(dgettext(TEXT_DOMAIN,
1614 "A scrub of the pool\n"
1615 "\tis strongly recommended after recovery.\n"));
1616 return;
1617
1618no_info:
1619 (void) printf(dgettext(TEXT_DOMAIN,
1620 "Destroy and re-create the pool from\n\ta backup source.\n"));
1621}
1622
34dc7c2f
BB
1623/*
1624 * zpool_import() is a contracted interface. Should be kept the same
1625 * if possible.
1626 *
1627 * Applications should use zpool_import_props() to import a pool with
1628 * new properties value to be set.
1629 */
1630int
1631zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1632 char *altroot)
1633{
1634 nvlist_t *props = NULL;
1635 int ret;
1636
1637 if (altroot != NULL) {
1638 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1639 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1640 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1641 newname));
1642 }
1643
1644 if (nvlist_add_string(props,
fb5f0bc8
BB
1645 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1646 nvlist_add_string(props,
1647 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1648 nvlist_free(props);
1649 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1650 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1651 newname));
1652 }
1653 }
1654
572e2857
BB
1655 ret = zpool_import_props(hdl, config, newname, props,
1656 ZFS_IMPORT_NORMAL);
34dc7c2f
BB
1657 if (props)
1658 nvlist_free(props);
1659 return (ret);
1660}
1661
572e2857
BB
1662static void
1663print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1664 int indent)
1665{
1666 nvlist_t **child;
1667 uint_t c, children;
1668 char *vname;
1669 uint64_t is_log = 0;
1670
1671 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1672 &is_log);
1673
1674 if (name != NULL)
1675 (void) printf("\t%*s%s%s\n", indent, "", name,
1676 is_log ? " [log]" : "");
1677
1678 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1679 &child, &children) != 0)
1680 return;
1681
1682 for (c = 0; c < children; c++) {
1683 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1684 print_vdev_tree(hdl, vname, child[c], indent + 2);
1685 free(vname);
1686 }
1687}
1688
9ae529ec
CS
1689void
1690zpool_print_unsup_feat(nvlist_t *config)
1691{
1692 nvlist_t *nvinfo, *unsup_feat;
1693 nvpair_t *nvp;
1694
1695 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1696 0);
1697 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1698 &unsup_feat) == 0);
1699
1700 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1701 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1702 char *desc;
1703
1704 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1705 verify(nvpair_value_string(nvp, &desc) == 0);
1706
1707 if (strlen(desc) > 0)
1708 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1709 else
1710 (void) printf("\t%s\n", nvpair_name(nvp));
1711 }
1712}
1713
34dc7c2f
BB
1714/*
1715 * Import the given pool using the known configuration and a list of
1716 * properties to be set. The configuration should have come from
1717 * zpool_find_import(). The 'newname' parameters control whether the pool
1718 * is imported with a different name.
1719 */
1720int
1721zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1722 nvlist_t *props, int flags)
34dc7c2f 1723{
6f1ffb06 1724 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
428870ff 1725 zpool_rewind_policy_t policy;
572e2857
BB
1726 nvlist_t *nv = NULL;
1727 nvlist_t *nvinfo = NULL;
1728 nvlist_t *missing = NULL;
34dc7c2f
BB
1729 char *thename;
1730 char *origname;
1731 int ret;
572e2857 1732 int error = 0;
34dc7c2f
BB
1733 char errbuf[1024];
1734
1735 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1736 &origname) == 0);
1737
1738 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1739 "cannot import pool '%s'"), origname);
1740
1741 if (newname != NULL) {
1742 if (!zpool_name_valid(hdl, B_FALSE, newname))
1743 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1744 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1745 newname));
1746 thename = (char *)newname;
1747 } else {
1748 thename = origname;
1749 }
1750
1751 if (props) {
1752 uint64_t version;
572e2857 1753 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1754
1755 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1756 &version) == 0);
1757
b128c09f 1758 if ((props = zpool_valid_proplist(hdl, origname,
572e2857 1759 props, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
1760 return (-1);
1761 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1762 nvlist_free(props);
1763 return (-1);
1764 }
1765 }
1766
1767 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1768
1769 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1770 &zc.zc_guid) == 0);
1771
1772 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1773 nvlist_free(props);
1774 return (-1);
1775 }
572e2857 1776 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
428870ff
BB
1777 nvlist_free(props);
1778 return (-1);
1779 }
34dc7c2f 1780
572e2857
BB
1781 zc.zc_cookie = flags;
1782 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1783 errno == ENOMEM) {
1784 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1785 zcmd_free_nvlists(&zc);
1786 return (-1);
1787 }
1788 }
1789 if (ret != 0)
1790 error = errno;
1791
1792 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1793 zpool_get_rewind_policy(config, &policy);
1794
1795 if (error) {
34dc7c2f 1796 char desc[1024];
428870ff 1797
428870ff
BB
1798 /*
1799 * Dry-run failed, but we print out what success
1800 * looks like if we found a best txg
1801 */
572e2857 1802 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1803 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1804 B_TRUE, nv);
1805 nvlist_free(nv);
428870ff
BB
1806 return (-1);
1807 }
1808
34dc7c2f
BB
1809 if (newname == NULL)
1810 (void) snprintf(desc, sizeof (desc),
1811 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1812 thename);
1813 else
1814 (void) snprintf(desc, sizeof (desc),
1815 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1816 origname, thename);
1817
572e2857 1818 switch (error) {
34dc7c2f 1819 case ENOTSUP:
9ae529ec
CS
1820 if (nv != NULL && nvlist_lookup_nvlist(nv,
1821 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1822 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1823 (void) printf(dgettext(TEXT_DOMAIN, "This "
1824 "pool uses the following feature(s) not "
1825 "supported by this system:\n"));
1826 zpool_print_unsup_feat(nv);
1827 if (nvlist_exists(nvinfo,
1828 ZPOOL_CONFIG_CAN_RDONLY)) {
1829 (void) printf(dgettext(TEXT_DOMAIN,
1830 "All unsupported features are only "
1831 "required for writing to the pool."
1832 "\nThe pool can be imported using "
1833 "'-o readonly=on'.\n"));
1834 }
1835 }
34dc7c2f
BB
1836 /*
1837 * Unsupported version.
1838 */
1839 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1840 break;
1841
1842 case EINVAL:
1843 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1844 break;
1845
428870ff
BB
1846 case EROFS:
1847 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1848 "one or more devices is read only"));
1849 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1850 break;
1851
572e2857
BB
1852 case ENXIO:
1853 if (nv && nvlist_lookup_nvlist(nv,
1854 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1855 nvlist_lookup_nvlist(nvinfo,
1856 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1857 (void) printf(dgettext(TEXT_DOMAIN,
1858 "The devices below are missing, use "
1859 "'-m' to import the pool anyway:\n"));
1860 print_vdev_tree(hdl, NULL, missing, 2);
1861 (void) printf("\n");
1862 }
1863 (void) zpool_standard_error(hdl, error, desc);
1864 break;
1865
1866 case EEXIST:
1867 (void) zpool_standard_error(hdl, error, desc);
1868 break;
1869
abe5b8fb
BB
1870 case EBUSY:
1871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1872 "one or more devices are already in use\n"));
1873 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1874 break;
1875
34dc7c2f 1876 default:
572e2857 1877 (void) zpool_standard_error(hdl, error, desc);
428870ff 1878 zpool_explain_recover(hdl,
572e2857 1879 newname ? origname : thename, -error, nv);
428870ff 1880 break;
34dc7c2f
BB
1881 }
1882
572e2857 1883 nvlist_free(nv);
34dc7c2f
BB
1884 ret = -1;
1885 } else {
1886 zpool_handle_t *zhp;
1887
1888 /*
1889 * This should never fail, but play it safe anyway.
1890 */
428870ff 1891 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1892 ret = -1;
428870ff 1893 else if (zhp != NULL)
34dc7c2f 1894 zpool_close(zhp);
428870ff
BB
1895 if (policy.zrp_request &
1896 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1897 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1898 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1899 }
572e2857 1900 nvlist_free(nv);
428870ff 1901 return (0);
34dc7c2f
BB
1902 }
1903
1904 zcmd_free_nvlists(&zc);
1905 nvlist_free(props);
1906
1907 return (ret);
1908}
1909
1910/*
428870ff 1911 * Scan the pool.
34dc7c2f
BB
1912 */
1913int
428870ff 1914zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
34dc7c2f 1915{
6f1ffb06 1916 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
1917 char msg[1024];
1918 libzfs_handle_t *hdl = zhp->zpool_hdl;
1919
1920 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1921 zc.zc_cookie = func;
34dc7c2f 1922
572e2857 1923 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
428870ff 1924 (errno == ENOENT && func != POOL_SCAN_NONE))
34dc7c2f
BB
1925 return (0);
1926
428870ff
BB
1927 if (func == POOL_SCAN_SCRUB) {
1928 (void) snprintf(msg, sizeof (msg),
1929 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1930 } else if (func == POOL_SCAN_NONE) {
1931 (void) snprintf(msg, sizeof (msg),
1932 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1933 zc.zc_name);
1934 } else {
1935 assert(!"unexpected result");
1936 }
34dc7c2f 1937
428870ff
BB
1938 if (errno == EBUSY) {
1939 nvlist_t *nvroot;
1940 pool_scan_stat_t *ps = NULL;
1941 uint_t psc;
1942
1943 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1944 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1945 (void) nvlist_lookup_uint64_array(nvroot,
1946 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1947 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1948 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1949 else
1950 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1951 } else if (errno == ENOENT) {
1952 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1953 } else {
34dc7c2f 1954 return (zpool_standard_error(hdl, errno, msg));
428870ff
BB
1955 }
1956}
1957
34dc7c2f 1958/*
9babb374
BB
1959 * Find a vdev that matches the search criteria specified. We use the
1960 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
1961 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1962 * spare; but FALSE if its an INUSE spare.
1963 */
1964static nvlist_t *
9babb374
BB
1965vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1966 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1967{
1968 uint_t c, children;
1969 nvlist_t **child;
34dc7c2f 1970 nvlist_t *ret;
b128c09f 1971 uint64_t is_log;
9babb374
BB
1972 char *srchkey;
1973 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1974
1975 /* Nothing to look for */
1976 if (search == NULL || pair == NULL)
1977 return (NULL);
1978
1979 /* Obtain the key we will use to search */
1980 srchkey = nvpair_name(pair);
1981
1982 switch (nvpair_type(pair)) {
572e2857 1983 case DATA_TYPE_UINT64:
9babb374 1984 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
1985 uint64_t srchval, theguid;
1986
1987 verify(nvpair_value_uint64(pair, &srchval) == 0);
1988 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1989 &theguid) == 0);
1990 if (theguid == srchval)
1991 return (nv);
9babb374
BB
1992 }
1993 break;
9babb374
BB
1994
1995 case DATA_TYPE_STRING: {
1996 char *srchval, *val;
1997
1998 verify(nvpair_value_string(pair, &srchval) == 0);
1999 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2000 break;
34dc7c2f 2001
9babb374 2002 /*
428870ff
BB
2003 * Search for the requested value. Special cases:
2004 *
eac47204
BB
2005 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2006 * "-part1", or "p1". The suffix is hidden from the user,
2007 * but included in the string, so this matches around it.
2008 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2009 * is used to check all possible expanded paths.
428870ff
BB
2010 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2011 *
2012 * Otherwise, all other searches are simple string compares.
9babb374 2013 */
a2c6816c 2014 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
2015 uint64_t wholedisk = 0;
2016
2017 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2018 &wholedisk);
eac47204
BB
2019 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2020 return (nv);
428870ff 2021
428870ff
BB
2022 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2023 char *type, *idx, *end, *p;
2024 uint64_t id, vdev_id;
2025
2026 /*
2027 * Determine our vdev type, keeping in mind
2028 * that the srchval is composed of a type and
2029 * vdev id pair (i.e. mirror-4).
2030 */
2031 if ((type = strdup(srchval)) == NULL)
2032 return (NULL);
2033
2034 if ((p = strrchr(type, '-')) == NULL) {
2035 free(type);
2036 break;
2037 }
2038 idx = p + 1;
2039 *p = '\0';
2040
2041 /*
2042 * If the types don't match then keep looking.
2043 */
2044 if (strncmp(val, type, strlen(val)) != 0) {
2045 free(type);
2046 break;
2047 }
2048
2049 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2050 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2051 strncmp(type, VDEV_TYPE_MIRROR,
2052 strlen(VDEV_TYPE_MIRROR)) == 0);
2053 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2054 &id) == 0);
2055
2056 errno = 0;
2057 vdev_id = strtoull(idx, &end, 10);
2058
2059 free(type);
2060 if (errno != 0)
2061 return (NULL);
2062
2063 /*
2064 * Now verify that we have the correct vdev id.
2065 */
2066 if (vdev_id == id)
2067 return (nv);
9babb374 2068 }
34dc7c2f 2069
34dc7c2f 2070 /*
9babb374 2071 * Common case
34dc7c2f 2072 */
9babb374 2073 if (strcmp(srchval, val) == 0)
34dc7c2f 2074 return (nv);
9babb374
BB
2075 break;
2076 }
2077
2078 default:
2079 break;
34dc7c2f
BB
2080 }
2081
2082 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2083 &child, &children) != 0)
2084 return (NULL);
2085
b128c09f 2086 for (c = 0; c < children; c++) {
9babb374 2087 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
2088 avail_spare, l2cache, NULL)) != NULL) {
2089 /*
2090 * The 'is_log' value is only set for the toplevel
2091 * vdev, not the leaf vdevs. So we always lookup the
2092 * log device from the root of the vdev tree (where
2093 * 'log' is non-NULL).
2094 */
2095 if (log != NULL &&
2096 nvlist_lookup_uint64(child[c],
2097 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2098 is_log) {
2099 *log = B_TRUE;
2100 }
34dc7c2f 2101 return (ret);
b128c09f
BB
2102 }
2103 }
34dc7c2f
BB
2104
2105 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2106 &child, &children) == 0) {
2107 for (c = 0; c < children; c++) {
9babb374 2108 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2109 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2110 *avail_spare = B_TRUE;
2111 return (ret);
2112 }
2113 }
2114 }
2115
2116 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2117 &child, &children) == 0) {
2118 for (c = 0; c < children; c++) {
9babb374 2119 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2120 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2121 *l2cache = B_TRUE;
2122 return (ret);
2123 }
2124 }
2125 }
2126
2127 return (NULL);
2128}
2129
9babb374
BB
2130/*
2131 * Given a physical path (minus the "/devices" prefix), find the
2132 * associated vdev.
2133 */
2134nvlist_t *
2135zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2136 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2137{
2138 nvlist_t *search, *nvroot, *ret;
2139
2140 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2141 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2142
2143 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2144 &nvroot) == 0);
2145
2146 *avail_spare = B_FALSE;
572e2857
BB
2147 *l2cache = B_FALSE;
2148 if (log != NULL)
2149 *log = B_FALSE;
9babb374
BB
2150 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2151 nvlist_free(search);
2152
2153 return (ret);
2154}
2155
428870ff
BB
2156/*
2157 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2158 */
2159boolean_t
2160zpool_vdev_is_interior(const char *name)
2161{
2162 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2163 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2164 return (B_TRUE);
2165 return (B_FALSE);
2166}
2167
34dc7c2f
BB
2168nvlist_t *
2169zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 2170 boolean_t *l2cache, boolean_t *log)
34dc7c2f 2171{
34dc7c2f 2172 char *end;
9babb374 2173 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
2174 uint64_t guid;
2175
9babb374
BB
2176 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2177
34dc7c2f
BB
2178 guid = strtoull(path, &end, 10);
2179 if (guid != 0 && *end == '\0') {
9babb374 2180 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
2181 } else if (zpool_vdev_is_interior(path)) {
2182 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 2183 } else {
9babb374 2184 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
2185 }
2186
2187 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2188 &nvroot) == 0);
2189
2190 *avail_spare = B_FALSE;
2191 *l2cache = B_FALSE;
b128c09f
BB
2192 if (log != NULL)
2193 *log = B_FALSE;
9babb374
BB
2194 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2195 nvlist_free(search);
2196
2197 return (ret);
b128c09f
BB
2198}
2199
2200static int
2201vdev_online(nvlist_t *nv)
2202{
2203 uint64_t ival;
2204
2205 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2206 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2207 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2208 return (0);
2209
2210 return (1);
2211}
2212
2213/*
9babb374 2214 * Helper function for zpool_get_physpaths().
b128c09f 2215 */
9babb374
BB
2216static int
2217vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2218 size_t *bytes_written)
2219{
2220 size_t bytes_left, pos, rsz;
2221 char *tmppath;
2222 const char *format;
2223
2224 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2225 &tmppath) != 0)
2226 return (EZFS_NODEVICE);
2227
2228 pos = *bytes_written;
2229 bytes_left = physpath_size - pos;
2230 format = (pos == 0) ? "%s" : " %s";
2231
2232 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2233 *bytes_written += rsz;
2234
2235 if (rsz >= bytes_left) {
2236 /* if physpath was not copied properly, clear it */
2237 if (bytes_left != 0) {
2238 physpath[pos] = 0;
2239 }
2240 return (EZFS_NOSPC);
2241 }
2242 return (0);
2243}
2244
2245static int
2246vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2247 size_t *rsz, boolean_t is_spare)
2248{
2249 char *type;
2250 int ret;
2251
2252 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2253 return (EZFS_INVALCONFIG);
2254
2255 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2256 /*
2257 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2258 * For a spare vdev, we only want to boot from the active
2259 * spare device.
2260 */
2261 if (is_spare) {
2262 uint64_t spare = 0;
2263 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2264 &spare);
2265 if (!spare)
2266 return (EZFS_INVALCONFIG);
2267 }
2268
2269 if (vdev_online(nv)) {
2270 if ((ret = vdev_get_one_physpath(nv, physpath,
2271 phypath_size, rsz)) != 0)
2272 return (ret);
2273 }
2274 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2275 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2276 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2277 nvlist_t **child;
2278 uint_t count;
2279 int i, ret;
2280
2281 if (nvlist_lookup_nvlist_array(nv,
2282 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2283 return (EZFS_INVALCONFIG);
2284
2285 for (i = 0; i < count; i++) {
2286 ret = vdev_get_physpaths(child[i], physpath,
2287 phypath_size, rsz, is_spare);
2288 if (ret == EZFS_NOSPC)
2289 return (ret);
2290 }
2291 }
2292
2293 return (EZFS_POOL_INVALARG);
2294}
2295
2296/*
2297 * Get phys_path for a root pool config.
2298 * Return 0 on success; non-zero on failure.
2299 */
2300static int
2301zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2302{
9babb374 2303 size_t rsz;
b128c09f
BB
2304 nvlist_t *vdev_root;
2305 nvlist_t **child;
2306 uint_t count;
9babb374 2307 char *type;
b128c09f 2308
9babb374 2309 rsz = 0;
b128c09f 2310
9babb374
BB
2311 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2312 &vdev_root) != 0)
2313 return (EZFS_INVALCONFIG);
b128c09f 2314
9babb374
BB
2315 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2316 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2317 &child, &count) != 0)
9babb374 2318 return (EZFS_INVALCONFIG);
b128c09f 2319
c372b36e 2320#if defined(__sun__) || defined(__sun)
9babb374
BB
2321 /*
2322 * root pool can not have EFI labeled disks and can only have
2323 * a single top-level vdev.
2324 */
2325 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2326 pool_uses_efi(vdev_root))
2327 return (EZFS_POOL_INVALARG);
c372b36e 2328#endif
b128c09f 2329
9babb374
BB
2330 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2331 B_FALSE);
2332
2333 /* No online devices */
2334 if (rsz == 0)
2335 return (EZFS_NODEVICE);
b128c09f
BB
2336
2337 return (0);
34dc7c2f
BB
2338}
2339
9babb374
BB
2340/*
2341 * Get phys_path for a root pool
2342 * Return 0 on success; non-zero on failure.
2343 */
2344int
2345zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2346{
2347 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2348 phypath_size));
2349}
2350
9babb374
BB
2351/*
2352 * If the device has being dynamically expanded then we need to relabel
2353 * the disk to use the new unallocated space.
2354 */
2355static int
8adf4864 2356zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2357{
9babb374 2358 int fd, error;
9babb374 2359
d603ed6c 2360 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2362 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2363 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2364 }
2365
2366 /*
2367 * It's possible that we might encounter an error if the device
2368 * does not have any unallocated space left. If so, we simply
2369 * ignore that error and continue on.
b5a28807
ED
2370 *
2371 * Also, we don't call efi_rescan() - that would just return EBUSY.
2372 * The module will do it for us in vdev_disk_open().
9babb374 2373 */
d603ed6c 2374 error = efi_use_whole_disk(fd);
9babb374
BB
2375 (void) close(fd);
2376 if (error && error != VT_ENOSPC) {
2377 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2378 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2379 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374
BB
2380 }
2381 return (0);
2382}
2383
34dc7c2f
BB
2384/*
2385 * Bring the specified vdev online. The 'flags' parameter is a set of the
2386 * ZFS_ONLINE_* flags.
2387 */
2388int
2389zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2390 vdev_state_t *newstate)
2391{
6f1ffb06 2392 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2393 char msg[1024];
2394 nvlist_t *tgt;
9babb374 2395 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2396 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2397 int error;
34dc7c2f 2398
9babb374
BB
2399 if (flags & ZFS_ONLINE_EXPAND) {
2400 (void) snprintf(msg, sizeof (msg),
2401 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2402 } else {
2403 (void) snprintf(msg, sizeof (msg),
2404 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2405 }
34dc7c2f
BB
2406
2407 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2408 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2409 &islog)) == NULL)
34dc7c2f
BB
2410 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2411
2412 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2413
428870ff 2414 if (avail_spare)
34dc7c2f
BB
2415 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2416
9babb374
BB
2417 if (flags & ZFS_ONLINE_EXPAND ||
2418 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
9babb374
BB
2419 uint64_t wholedisk = 0;
2420
2421 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2422 &wholedisk);
9babb374
BB
2423
2424 /*
2425 * XXX - L2ARC 1.0 devices can't support expansion.
2426 */
2427 if (l2cache) {
2428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2429 "cannot expand cache devices"));
2430 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2431 }
2432
2433 if (wholedisk) {
7608bd0d
ED
2434 const char *fullpath = path;
2435 char buf[MAXPATHLEN];
2436
2437 if (path[0] != '/') {
2438 error = zfs_resolve_shortname(path, buf,
2439 sizeof(buf));
2440 if (error != 0)
2441 return (zfs_error(hdl, EZFS_NODEVICE,
2442 msg));
2443
2444 fullpath = buf;
2445 }
2446
2447 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2448 if (error != 0)
2449 return (error);
9babb374
BB
2450 }
2451 }
2452
34dc7c2f
BB
2453 zc.zc_cookie = VDEV_STATE_ONLINE;
2454 zc.zc_obj = flags;
2455
572e2857 2456 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2457 if (errno == EINVAL) {
2458 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2459 "from this pool into a new one. Use '%s' "
2460 "instead"), "zpool detach");
2461 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2462 }
34dc7c2f 2463 return (zpool_standard_error(hdl, errno, msg));
428870ff 2464 }
34dc7c2f
BB
2465
2466 *newstate = zc.zc_cookie;
2467 return (0);
2468}
2469
2470/*
2471 * Take the specified vdev offline
2472 */
2473int
2474zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2475{
6f1ffb06 2476 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2477 char msg[1024];
2478 nvlist_t *tgt;
2479 boolean_t avail_spare, l2cache;
2480 libzfs_handle_t *hdl = zhp->zpool_hdl;
2481
2482 (void) snprintf(msg, sizeof (msg),
2483 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2484
2485 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2486 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2487 NULL)) == NULL)
34dc7c2f
BB
2488 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2489
2490 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2491
428870ff 2492 if (avail_spare)
34dc7c2f
BB
2493 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2494
34dc7c2f
BB
2495 zc.zc_cookie = VDEV_STATE_OFFLINE;
2496 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2497
572e2857 2498 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2499 return (0);
2500
2501 switch (errno) {
2502 case EBUSY:
2503
2504 /*
2505 * There are no other replicas of this device.
2506 */
2507 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2508
9babb374
BB
2509 case EEXIST:
2510 /*
2511 * The log device has unplayed logs
2512 */
2513 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2514
34dc7c2f
BB
2515 default:
2516 return (zpool_standard_error(hdl, errno, msg));
2517 }
2518}
2519
2520/*
2521 * Mark the given vdev faulted.
2522 */
2523int
428870ff 2524zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2525{
6f1ffb06 2526 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2527 char msg[1024];
2528 libzfs_handle_t *hdl = zhp->zpool_hdl;
2529
2530 (void) snprintf(msg, sizeof (msg),
b8864a23 2531 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2532
2533 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2534 zc.zc_guid = guid;
2535 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2536 zc.zc_obj = aux;
34dc7c2f 2537
572e2857 2538 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2539 return (0);
2540
2541 switch (errno) {
2542 case EBUSY:
2543
2544 /*
2545 * There are no other replicas of this device.
2546 */
2547 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2548
2549 default:
2550 return (zpool_standard_error(hdl, errno, msg));
2551 }
2552
2553}
2554
2555/*
2556 * Mark the given vdev degraded.
2557 */
2558int
428870ff 2559zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2560{
6f1ffb06 2561 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2562 char msg[1024];
2563 libzfs_handle_t *hdl = zhp->zpool_hdl;
2564
2565 (void) snprintf(msg, sizeof (msg),
b8864a23 2566 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2567
2568 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2569 zc.zc_guid = guid;
2570 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2571 zc.zc_obj = aux;
34dc7c2f 2572
572e2857 2573 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2574 return (0);
2575
2576 return (zpool_standard_error(hdl, errno, msg));
2577}
2578
2579/*
2580 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2581 * a hot spare.
2582 */
2583static boolean_t
2584is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2585{
2586 nvlist_t **child;
2587 uint_t c, children;
2588 char *type;
2589
2590 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2591 &children) == 0) {
2592 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2593 &type) == 0);
2594
2595 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2596 children == 2 && child[which] == tgt)
2597 return (B_TRUE);
2598
2599 for (c = 0; c < children; c++)
2600 if (is_replacing_spare(child[c], tgt, which))
2601 return (B_TRUE);
2602 }
2603
2604 return (B_FALSE);
2605}
2606
2607/*
2608 * Attach new_disk (fully described by nvroot) to old_disk.
2609 * If 'replacing' is specified, the new disk will replace the old one.
2610 */
2611int
2612zpool_vdev_attach(zpool_handle_t *zhp,
2613 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2614{
6f1ffb06 2615 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2616 char msg[1024];
2617 int ret;
2618 nvlist_t *tgt;
b128c09f
BB
2619 boolean_t avail_spare, l2cache, islog;
2620 uint64_t val;
572e2857 2621 char *newname;
34dc7c2f
BB
2622 nvlist_t **child;
2623 uint_t children;
2624 nvlist_t *config_root;
2625 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 2626 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
2627
2628 if (replacing)
2629 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2630 "cannot replace %s with %s"), old_disk, new_disk);
2631 else
2632 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2633 "cannot attach %s to %s"), new_disk, old_disk);
2634
c372b36e 2635#if defined(__sun__) || defined(__sun)
b128c09f
BB
2636 /*
2637 * If this is a root pool, make sure that we're not attaching an
2638 * EFI labeled device.
2639 */
2640 if (rootpool && pool_uses_efi(nvroot)) {
2641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2642 "EFI labeled devices are not supported on root pools."));
2643 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2644 }
c372b36e 2645#endif
b128c09f 2646
34dc7c2f 2647 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2648 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2649 &islog)) == 0)
34dc7c2f
BB
2650 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2651
2652 if (avail_spare)
2653 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2654
2655 if (l2cache)
2656 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2657
2658 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2659 zc.zc_cookie = replacing;
2660
2661 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2662 &child, &children) != 0 || children != 1) {
2663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2664 "new device must be a single disk"));
2665 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2666 }
2667
2668 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2669 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2670
428870ff 2671 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
b128c09f
BB
2672 return (-1);
2673
34dc7c2f
BB
2674 /*
2675 * If the target is a hot spare that has been swapped in, we can only
2676 * replace it with another hot spare.
2677 */
2678 if (replacing &&
2679 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2680 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2681 NULL) == NULL || !avail_spare) &&
2682 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2683 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2684 "can only be replaced by another hot spare"));
b128c09f 2685 free(newname);
34dc7c2f
BB
2686 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2687 }
2688
b128c09f
BB
2689 free(newname);
2690
34dc7c2f
BB
2691 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2692 return (-1);
2693
572e2857 2694 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2695
2696 zcmd_free_nvlists(&zc);
2697
b128c09f
BB
2698 if (ret == 0) {
2699 if (rootpool) {
9babb374
BB
2700 /*
2701 * XXX need a better way to prevent user from
2702 * booting up a half-baked vdev.
2703 */
2704 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2705 "sure to wait until resilver is done "
2706 "before rebooting.\n"));
b128c09f 2707 }
34dc7c2f 2708 return (0);
b128c09f 2709 }
34dc7c2f
BB
2710
2711 switch (errno) {
2712 case ENOTSUP:
2713 /*
2714 * Can't attach to or replace this type of vdev.
2715 */
2716 if (replacing) {
572e2857
BB
2717 uint64_t version = zpool_get_prop_int(zhp,
2718 ZPOOL_PROP_VERSION, NULL);
2719
b128c09f 2720 if (islog)
34dc7c2f
BB
2721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2722 "cannot replace a log with a spare"));
572e2857
BB
2723 else if (version >= SPA_VERSION_MULTI_REPLACE)
2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2725 "already in replacing/spare config; wait "
2726 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2727 else
2728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2729 "cannot replace a replacing device"));
2730 } else {
2731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2732 "can only attach to mirrors and top-level "
2733 "disks"));
2734 }
2735 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2736 break;
2737
2738 case EINVAL:
2739 /*
2740 * The new device must be a single disk.
2741 */
2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2743 "new device must be a single disk"));
2744 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2745 break;
2746
2747 case EBUSY:
2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2749 new_disk);
2750 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2751 break;
2752
2753 case EOVERFLOW:
2754 /*
2755 * The new device is too small.
2756 */
2757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2758 "device is too small"));
2759 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2760 break;
2761
2762 case EDOM:
2763 /*
2764 * The new device has a different alignment requirement.
2765 */
2766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2767 "devices have different sector alignment"));
2768 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2769 break;
2770
2771 case ENAMETOOLONG:
2772 /*
2773 * The resulting top-level vdev spec won't fit in the label.
2774 */
2775 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2776 break;
2777
2778 default:
2779 (void) zpool_standard_error(hdl, errno, msg);
2780 }
2781
2782 return (-1);
2783}
2784
2785/*
2786 * Detach the specified device.
2787 */
2788int
2789zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2790{
6f1ffb06 2791 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
2792 char msg[1024];
2793 nvlist_t *tgt;
2794 boolean_t avail_spare, l2cache;
2795 libzfs_handle_t *hdl = zhp->zpool_hdl;
2796
2797 (void) snprintf(msg, sizeof (msg),
2798 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2799
2800 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2801 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2802 NULL)) == 0)
34dc7c2f
BB
2803 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2804
2805 if (avail_spare)
2806 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2807
2808 if (l2cache)
2809 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2810
2811 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2812
2813 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2814 return (0);
2815
2816 switch (errno) {
2817
2818 case ENOTSUP:
2819 /*
2820 * Can't detach from this type of vdev.
2821 */
2822 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2823 "applicable to mirror and replacing vdevs"));
572e2857 2824 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2825 break;
2826
2827 case EBUSY:
2828 /*
2829 * There are no other replicas of this device.
2830 */
2831 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2832 break;
2833
2834 default:
2835 (void) zpool_standard_error(hdl, errno, msg);
2836 }
2837
2838 return (-1);
2839}
2840
428870ff
BB
2841/*
2842 * Find a mirror vdev in the source nvlist.
2843 *
2844 * The mchild array contains a list of disks in one of the top-level mirrors
2845 * of the source pool. The schild array contains a list of disks that the
2846 * user specified on the command line. We loop over the mchild array to
2847 * see if any entry in the schild array matches.
2848 *
2849 * If a disk in the mchild array is found in the schild array, we return
2850 * the index of that entry. Otherwise we return -1.
2851 */
2852static int
2853find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2854 nvlist_t **schild, uint_t schildren)
2855{
2856 uint_t mc;
2857
2858 for (mc = 0; mc < mchildren; mc++) {
2859 uint_t sc;
2860 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2861 mchild[mc], B_FALSE);
2862
2863 for (sc = 0; sc < schildren; sc++) {
2864 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2865 schild[sc], B_FALSE);
2866 boolean_t result = (strcmp(mpath, spath) == 0);
2867
2868 free(spath);
2869 if (result) {
2870 free(mpath);
2871 return (mc);
2872 }
2873 }
2874
2875 free(mpath);
2876 }
2877
2878 return (-1);
2879}
2880
2881/*
2882 * Split a mirror pool. If newroot points to null, then a new nvlist
2883 * is generated and it is the responsibility of the caller to free it.
2884 */
2885int
2886zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2887 nvlist_t *props, splitflags_t flags)
2888{
6f1ffb06 2889 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
428870ff
BB
2890 char msg[1024];
2891 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2892 nvlist_t **varray = NULL, *zc_props = NULL;
2893 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2894 libzfs_handle_t *hdl = zhp->zpool_hdl;
2895 uint64_t vers;
2896 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2897 int retval = 0;
2898
2899 (void) snprintf(msg, sizeof (msg),
2900 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2901
2902 if (!zpool_name_valid(hdl, B_FALSE, newname))
2903 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2904
2905 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2906 (void) fprintf(stderr, gettext("Internal error: unable to "
2907 "retrieve pool configuration\n"));
2908 return (-1);
2909 }
2910
2911 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2912 == 0);
2913 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2914
2915 if (props) {
572e2857 2916 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 2917 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 2918 props, vers, flags, msg)) == NULL)
428870ff
BB
2919 return (-1);
2920 }
2921
2922 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2923 &children) != 0) {
2924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2925 "Source pool is missing vdev tree"));
2926 if (zc_props)
2927 nvlist_free(zc_props);
2928 return (-1);
2929 }
2930
2931 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2932 vcount = 0;
2933
2934 if (*newroot == NULL ||
2935 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2936 &newchild, &newchildren) != 0)
2937 newchildren = 0;
2938
2939 for (c = 0; c < children; c++) {
2940 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2941 char *type;
2942 nvlist_t **mchild, *vdev;
2943 uint_t mchildren;
2944 int entry;
2945
2946 /*
2947 * Unlike cache & spares, slogs are stored in the
2948 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2949 */
2950 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2951 &is_log);
2952 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2953 &is_hole);
2954 if (is_log || is_hole) {
2955 /*
2956 * Create a hole vdev and put it in the config.
2957 */
2958 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2959 goto out;
2960 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2961 VDEV_TYPE_HOLE) != 0)
2962 goto out;
2963 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2964 1) != 0)
2965 goto out;
2966 if (lastlog == 0)
2967 lastlog = vcount;
2968 varray[vcount++] = vdev;
2969 continue;
2970 }
2971 lastlog = 0;
2972 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2973 == 0);
2974 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2976 "Source pool must be composed only of mirrors\n"));
2977 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2978 goto out;
2979 }
2980
2981 verify(nvlist_lookup_nvlist_array(child[c],
2982 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2983
2984 /* find or add an entry for this top-level vdev */
2985 if (newchildren > 0 &&
2986 (entry = find_vdev_entry(zhp, mchild, mchildren,
2987 newchild, newchildren)) >= 0) {
2988 /* We found a disk that the user specified. */
2989 vdev = mchild[entry];
2990 ++found;
2991 } else {
2992 /* User didn't specify a disk for this vdev. */
2993 vdev = mchild[mchildren - 1];
2994 }
2995
2996 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2997 goto out;
2998 }
2999
3000 /* did we find every disk the user specified? */
3001 if (found != newchildren) {
3002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3003 "include at most one disk from each mirror"));
3004 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3005 goto out;
3006 }
3007
3008 /* Prepare the nvlist for populating. */
3009 if (*newroot == NULL) {
3010 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3011 goto out;
3012 freelist = B_TRUE;
3013 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3014 VDEV_TYPE_ROOT) != 0)
3015 goto out;
3016 } else {
3017 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3018 }
3019
3020 /* Add all the children we found */
3021 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3022 lastlog == 0 ? vcount : lastlog) != 0)
3023 goto out;
3024
3025 /*
3026 * If we're just doing a dry run, exit now with success.
3027 */
3028 if (flags.dryrun) {
3029 memory_err = B_FALSE;
3030 freelist = B_FALSE;
3031 goto out;
3032 }
3033
3034 /* now build up the config list & call the ioctl */
3035 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3036 goto out;
3037
3038 if (nvlist_add_nvlist(newconfig,
3039 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3040 nvlist_add_string(newconfig,
3041 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3042 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3043 goto out;
3044
3045 /*
3046 * The new pool is automatically part of the namespace unless we
3047 * explicitly export it.
3048 */
3049 if (!flags.import)
3050 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3051 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3052 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3053 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3054 goto out;
3055 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3056 goto out;
3057
3058 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3059 retval = zpool_standard_error(hdl, errno, msg);
3060 goto out;
3061 }
3062
3063 freelist = B_FALSE;
3064 memory_err = B_FALSE;
3065
3066out:
3067 if (varray != NULL) {
3068 int v;
3069
3070 for (v = 0; v < vcount; v++)
3071 nvlist_free(varray[v]);
3072 free(varray);
3073 }
3074 zcmd_free_nvlists(&zc);
3075 if (zc_props)
3076 nvlist_free(zc_props);
3077 if (newconfig)
3078 nvlist_free(newconfig);
3079 if (freelist) {
3080 nvlist_free(*newroot);
3081 *newroot = NULL;
3082 }
3083
3084 if (retval != 0)
3085 return (retval);
3086
3087 if (memory_err)
3088 return (no_memory(hdl));
3089
3090 return (0);
3091}
3092
34dc7c2f
BB
3093/*
3094 * Remove the given device. Currently, this is supported only for hot spares
3095 * and level 2 cache devices.
3096 */
3097int
3098zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3099{
6f1ffb06 3100 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3101 char msg[1024];
3102 nvlist_t *tgt;
428870ff 3103 boolean_t avail_spare, l2cache, islog;
34dc7c2f 3104 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3105 uint64_t version;
34dc7c2f
BB
3106
3107 (void) snprintf(msg, sizeof (msg),
3108 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3109
3110 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3111 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 3112 &islog)) == 0)
34dc7c2f 3113 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
3114 /*
3115 * XXX - this should just go away.
3116 */
3117 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 3118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428870ff
BB
3119 "only inactive hot spares, cache, top-level, "
3120 "or log devices can be removed"));
34dc7c2f
BB
3121 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3122 }
3123
428870ff
BB
3124 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3125 if (islog && version < SPA_VERSION_HOLES) {
3126 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3127 "pool must be upgrade to support log removal"));
3128 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3129 }
3130
34dc7c2f
BB
3131 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3132
3133 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3134 return (0);
3135
3136 return (zpool_standard_error(hdl, errno, msg));
3137}
3138
3139/*
3140 * Clear the errors for the pool, or the particular device if specified.
3141 */
3142int
428870ff 3143zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 3144{
6f1ffb06 3145 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3146 char msg[1024];
3147 nvlist_t *tgt;
428870ff 3148 zpool_rewind_policy_t policy;
34dc7c2f
BB
3149 boolean_t avail_spare, l2cache;
3150 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3151 nvlist_t *nvi = NULL;
572e2857 3152 int error;
34dc7c2f
BB
3153
3154 if (path)
3155 (void) snprintf(msg, sizeof (msg),
3156 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3157 path);
3158 else
3159 (void) snprintf(msg, sizeof (msg),
3160 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3161 zhp->zpool_name);
3162
3163 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3164 if (path) {
3165 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 3166 &l2cache, NULL)) == 0)
34dc7c2f
BB
3167 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3168
3169 /*
3170 * Don't allow error clearing for hot spares. Do allow
3171 * error clearing for l2cache devices.
3172 */
3173 if (avail_spare)
3174 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3175
3176 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3177 &zc.zc_guid) == 0);
3178 }
3179
428870ff
BB
3180 zpool_get_rewind_policy(rewindnvl, &policy);
3181 zc.zc_cookie = policy.zrp_request;
3182
572e2857 3183 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
3184 return (-1);
3185
572e2857 3186 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
3187 return (-1);
3188
572e2857
BB
3189 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3190 errno == ENOMEM) {
3191 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3192 zcmd_free_nvlists(&zc);
3193 return (-1);
3194 }
3195 }
3196
3197 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
3198 errno != EPERM && errno != EACCES)) {
3199 if (policy.zrp_request &
3200 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3201 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3202 zpool_rewind_exclaim(hdl, zc.zc_name,
3203 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3204 nvi);
3205 nvlist_free(nvi);
3206 }
3207 zcmd_free_nvlists(&zc);
34dc7c2f 3208 return (0);
428870ff 3209 }
34dc7c2f 3210
428870ff 3211 zcmd_free_nvlists(&zc);
34dc7c2f
BB
3212 return (zpool_standard_error(hdl, errno, msg));
3213}
3214
3215/*
3216 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3217 */
3218int
3219zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3220{
6f1ffb06 3221 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3222 char msg[1024];
3223 libzfs_handle_t *hdl = zhp->zpool_hdl;
3224
3225 (void) snprintf(msg, sizeof (msg),
3226 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
b8864a23 3227 (u_longlong_t)guid);
34dc7c2f
BB
3228
3229 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3230 zc.zc_guid = guid;
428870ff 3231 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
3232
3233 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3234 return (0);
3235
3236 return (zpool_standard_error(hdl, errno, msg));
3237}
3238
3541dc6d
GA
3239/*
3240 * Change the GUID for a pool.
3241 */
3242int
3243zpool_reguid(zpool_handle_t *zhp)
3244{
3245 char msg[1024];
3246 libzfs_handle_t *hdl = zhp->zpool_hdl;
6f1ffb06 3247 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3541dc6d
GA
3248
3249 (void) snprintf(msg, sizeof (msg),
3250 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3251
3252 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3253 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3254 return (0);
3255
3256 return (zpool_standard_error(hdl, errno, msg));
3257}
3258
1bd201e7
CS
3259/*
3260 * Reopen the pool.
3261 */
3262int
3263zpool_reopen(zpool_handle_t *zhp)
3264{
6f1ffb06 3265 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
1bd201e7
CS
3266 char msg[1024];
3267 libzfs_handle_t *hdl = zhp->zpool_hdl;
3268
3269 (void) snprintf(msg, sizeof (msg),
3270 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3271 zhp->zpool_name);
3272
3273 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3274 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3275 return (0);
3276 return (zpool_standard_error(hdl, errno, msg));
3277}
3278
34dc7c2f
BB
3279/*
3280 * Convert from a devid string to a path.
3281 */
3282static char *
3283devid_to_path(char *devid_str)
3284{
3285 ddi_devid_t devid;
3286 char *minor;
3287 char *path;
3288 devid_nmlist_t *list = NULL;
3289 int ret;
3290
3291 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3292 return (NULL);
3293
3294 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3295
3296 devid_str_free(minor);
3297 devid_free(devid);
3298
3299 if (ret != 0)
3300 return (NULL);
3301
3302 if ((path = strdup(list[0].devname)) == NULL)
3303 return (NULL);
3304
3305 devid_free_nmlist(list);
3306
3307 return (path);
3308}
3309
3310/*
3311 * Convert from a path to a devid string.
3312 */
3313static char *
3314path_to_devid(const char *path)
3315{
3316 int fd;
3317 ddi_devid_t devid;
3318 char *minor, *ret;
3319
3320 if ((fd = open(path, O_RDONLY)) < 0)
3321 return (NULL);
3322
3323 minor = NULL;
3324 ret = NULL;
3325 if (devid_get(fd, &devid) == 0) {
3326 if (devid_get_minor_name(fd, &minor) == 0)
3327 ret = devid_str_encode(devid, minor);
3328 if (minor != NULL)
3329 devid_str_free(minor);
3330 devid_free(devid);
3331 }
3332 (void) close(fd);
3333
3334 return (ret);
3335}
3336
3337/*
3338 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3339 * ignore any failure here, since a common case is for an unprivileged user to
3340 * type 'zpool status', and we'll display the correct information anyway.
3341 */
3342static void
3343set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3344{
6f1ffb06 3345 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3346
3347 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3348 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3349 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3350 &zc.zc_guid) == 0);
3351
3352 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3353}
3354
83c62c93
NB
3355/*
3356 * Remove partition suffix from a vdev path. Partition suffixes may take three
3357 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3358 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3359 * third case only occurs when preceded by a string matching the regular
3360 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3361 */
3362static char *
3363strip_partition(libzfs_handle_t *hdl, char *path)
3364{
3365 char *tmp = zfs_strdup(hdl, path);
3366 char *part = NULL, *d = NULL;
3367
3368 if ((part = strstr(tmp, "-part")) && part != tmp) {
3369 d = part + 5;
3370 } else if ((part = strrchr(tmp, 'p')) &&
3371 part > tmp + 1 && isdigit(*(part-1))) {
3372 d = part + 1;
3373 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3374 for (d = &tmp[2]; isalpha(*d); part = ++d);
3375 }
3376 if (part && d && *d != '\0') {
3377 for (; isdigit(*d); d++);
3378 if (*d == '\0')
3379 *part = '\0';
3380 }
3381 return (tmp);
3382}
3383
858219cc
NB
3384#define PATH_BUF_LEN 64
3385
34dc7c2f
BB
3386/*
3387 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3388 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3389 * We also check if this is a whole disk, in which case we strip off the
3390 * trailing 's0' slice name.
3391 *
3392 * This routine is also responsible for identifying when disks have been
3393 * reconfigured in a new location. The kernel will have opened the device by
3394 * devid, but the path will still refer to the old location. To catch this, we
3395 * first do a path -> devid translation (which is fast for the common case). If
3396 * the devid matches, we're done. If not, we do a reverse devid -> path
3397 * translation and issue the appropriate ioctl() to update the path of the vdev.
3398 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3399 * of these checks.
3400 */
3401char *
428870ff
BB
3402zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3403 boolean_t verbose)
34dc7c2f 3404{
d603ed6c 3405 char *path, *devid, *type;
34dc7c2f 3406 uint64_t value;
858219cc 3407 char buf[PATH_BUF_LEN];
fc24f7c8 3408 char tmpbuf[PATH_BUF_LEN];
34dc7c2f
BB
3409 vdev_stat_t *vs;
3410 uint_t vsc;
3411
3412 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3413 &value) == 0) {
3414 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3415 &value) == 0);
3416 (void) snprintf(buf, sizeof (buf), "%llu",
3417 (u_longlong_t)value);
3418 path = buf;
3419 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
34dc7c2f
BB
3420 /*
3421 * If the device is dead (faulted, offline, etc) then don't
3422 * bother opening it. Otherwise we may be forcing the user to
3423 * open a misbehaving device, which can have undesirable
3424 * effects.
3425 */
428870ff 3426 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3427 (uint64_t **)&vs, &vsc) != 0 ||
3428 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3429 zhp != NULL &&
3430 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3431 /*
3432 * Determine if the current path is correct.
3433 */
3434 char *newdevid = path_to_devid(path);
3435
3436 if (newdevid == NULL ||
3437 strcmp(devid, newdevid) != 0) {
3438 char *newpath;
3439
3440 if ((newpath = devid_to_path(devid)) != NULL) {
3441 /*
3442 * Update the path appropriately.
3443 */
3444 set_path(zhp, nv, newpath);
3445 if (nvlist_add_string(nv,
3446 ZPOOL_CONFIG_PATH, newpath) == 0)
3447 verify(nvlist_lookup_string(nv,
3448 ZPOOL_CONFIG_PATH,
3449 &path) == 0);
3450 free(newpath);
3451 }
3452 }
3453
3454 if (newdevid)
3455 devid_str_free(newdevid);
3456 }
3457
d603ed6c
BB
3458 /*
3459 * For a block device only use the name.
3460 */
3461 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3462 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3463 path = strrchr(path, '/');
3464 path++;
3465 }
34dc7c2f 3466
d603ed6c 3467 /*
83c62c93 3468 * Remove the partition from the path it this is a whole disk.
d603ed6c 3469 */
34dc7c2f
BB
3470 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3471 &value) == 0 && value) {
83c62c93 3472 return strip_partition(hdl, path);
34dc7c2f
BB
3473 }
3474 } else {
3475 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3476
3477 /*
3478 * If it's a raidz device, we need to stick in the parity level.
3479 */
3480 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
858219cc 3481
34dc7c2f
BB
3482 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3483 &value) == 0);
fc24f7c8 3484 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 3485 (u_longlong_t)value);
fc24f7c8 3486 path = buf;
34dc7c2f 3487 }
428870ff
BB
3488
3489 /*
3490 * We identify each top-level vdev by using a <type-id>
3491 * naming convention.
3492 */
3493 if (verbose) {
3494 uint64_t id;
3495
3496 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3497 &id) == 0);
fc24f7c8
MM
3498 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3499 path, (u_longlong_t)id);
3500 path = tmpbuf;
428870ff 3501 }
34dc7c2f
BB
3502 }
3503
3504 return (zfs_strdup(hdl, path));
3505}
3506
3507static int
3508zbookmark_compare(const void *a, const void *b)
3509{
3510 return (memcmp(a, b, sizeof (zbookmark_t)));
3511}
3512
3513/*
3514 * Retrieve the persistent error log, uniquify the members, and return to the
3515 * caller.
3516 */
3517int
3518zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3519{
6f1ffb06 3520 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3521 uint64_t count;
3522 zbookmark_t *zb = NULL;
3523 int i;
3524
3525 /*
3526 * Retrieve the raw error list from the kernel. If the number of errors
3527 * has increased, allocate more space and continue until we get the
3528 * entire list.
3529 */
3530 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3531 &count) == 0);
3532 if (count == 0)
3533 return (0);
3534 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3535 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3536 return (-1);
3537 zc.zc_nvlist_dst_size = count;
3538 (void) strcpy(zc.zc_name, zhp->zpool_name);
3539 for (;;) {
3540 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3541 &zc) != 0) {
3542 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3543 if (errno == ENOMEM) {
3544 count = zc.zc_nvlist_dst_size;
3545 if ((zc.zc_nvlist_dst = (uintptr_t)
3546 zfs_alloc(zhp->zpool_hdl, count *
3547 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3548 return (-1);
3549 } else {
3550 return (-1);
3551 }
3552 } else {
3553 break;
3554 }
3555 }
3556
3557 /*
3558 * Sort the resulting bookmarks. This is a little confusing due to the
3559 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3560 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3561 * _not_ copied as part of the process. So we point the start of our
3562 * array appropriate and decrement the total number of elements.
3563 */
3564 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3565 zc.zc_nvlist_dst_size;
3566 count -= zc.zc_nvlist_dst_size;
3567
3568 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3569
3570 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3571
3572 /*
3573 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3574 */
3575 for (i = 0; i < count; i++) {
3576 nvlist_t *nv;
3577
3578 /* ignoring zb_blkid and zb_level for now */
3579 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3580 zb[i-1].zb_object == zb[i].zb_object)
3581 continue;
3582
3583 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3584 goto nomem;
3585 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3586 zb[i].zb_objset) != 0) {
3587 nvlist_free(nv);
3588 goto nomem;
3589 }
3590 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3591 zb[i].zb_object) != 0) {
3592 nvlist_free(nv);
3593 goto nomem;
3594 }
3595 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3596 nvlist_free(nv);
3597 goto nomem;
3598 }
3599 nvlist_free(nv);
3600 }
3601
3602 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3603 return (0);
3604
3605nomem:
3606 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3607 return (no_memory(zhp->zpool_hdl));
3608}
3609
3610/*
3611 * Upgrade a ZFS pool to the latest on-disk version.
3612 */
3613int
3614zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3615{
6f1ffb06 3616 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3617 libzfs_handle_t *hdl = zhp->zpool_hdl;
3618
3619 (void) strcpy(zc.zc_name, zhp->zpool_name);
3620 zc.zc_cookie = new_version;
3621
3622 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3623 return (zpool_standard_error_fmt(hdl, errno,
3624 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3625 zhp->zpool_name));
3626 return (0);
3627}
3628
3629void
6f1ffb06 3630zfs_save_arguments(int argc, char **argv, char *string, int len)
34dc7c2f
BB
3631{
3632 int i;
3633
6f1ffb06 3634 (void) strlcpy(string, basename(argv[0]), len);
34dc7c2f 3635 for (i = 1; i < argc; i++) {
6f1ffb06
MA
3636 (void) strlcat(string, " ", len);
3637 (void) strlcat(string, argv[i], len);
34dc7c2f
BB
3638 }
3639}
3640
34dc7c2f 3641int
6f1ffb06
MA
3642zpool_log_history(libzfs_handle_t *hdl, const char *message)
3643{
3644 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
3645 nvlist_t *args;
3646 int err;
3647
3648 args = fnvlist_alloc();
3649 fnvlist_add_string(args, "message", message);
3650 err = zcmd_write_src_nvlist(hdl, &zc, args);
3651 if (err == 0)
3652 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3653 nvlist_free(args);
3654 zcmd_free_nvlists(&zc);
3655 return (err);
34dc7c2f
BB
3656}
3657
3658/*
3659 * Perform ioctl to get some command history of a pool.
3660 *
3661 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3662 * logical offset of the history buffer to start reading from.
3663 *
3664 * Upon return, 'off' is the next logical offset to read from and
3665 * 'len' is the actual amount of bytes read into 'buf'.
3666 */
3667static int
3668get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3669{
6f1ffb06 3670 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3671 libzfs_handle_t *hdl = zhp->zpool_hdl;
3672
3673 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3674
3675 zc.zc_history = (uint64_t)(uintptr_t)buf;
3676 zc.zc_history_len = *len;
3677 zc.zc_history_offset = *off;
3678
3679 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3680 switch (errno) {
3681 case EPERM:
3682 return (zfs_error_fmt(hdl, EZFS_PERM,
3683 dgettext(TEXT_DOMAIN,
3684 "cannot show history for pool '%s'"),
3685 zhp->zpool_name));
3686 case ENOENT:
3687 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3688 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3689 "'%s'"), zhp->zpool_name));
3690 case ENOTSUP:
3691 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3692 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3693 "'%s', pool must be upgraded"), zhp->zpool_name));
3694 default:
3695 return (zpool_standard_error_fmt(hdl, errno,
3696 dgettext(TEXT_DOMAIN,
3697 "cannot get history for '%s'"), zhp->zpool_name));
3698 }
3699 }
3700
3701 *len = zc.zc_history_len;
3702 *off = zc.zc_history_offset;
3703
3704 return (0);
3705}
3706
3707/*
3708 * Process the buffer of nvlists, unpacking and storing each nvlist record
3709 * into 'records'. 'leftover' is set to the number of bytes that weren't
3710 * processed as there wasn't a complete record.
3711 */
428870ff 3712int
34dc7c2f
BB
3713zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3714 nvlist_t ***records, uint_t *numrecords)
3715{
3716 uint64_t reclen;
3717 nvlist_t *nv;
3718 int i;
3719
3720 while (bytes_read > sizeof (reclen)) {
3721
3722 /* get length of packed record (stored as little endian) */
3723 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3724 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3725
3726 if (bytes_read < sizeof (reclen) + reclen)
3727 break;
3728
3729 /* unpack record */
3730 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3731 return (ENOMEM);
3732 bytes_read -= sizeof (reclen) + reclen;
3733 buf += sizeof (reclen) + reclen;
3734
3735 /* add record to nvlist array */
3736 (*numrecords)++;
3737 if (ISP2(*numrecords + 1)) {
3738 *records = realloc(*records,
3739 *numrecords * 2 * sizeof (nvlist_t *));
3740 }
3741 (*records)[*numrecords - 1] = nv;
3742 }
3743
3744 *leftover = bytes_read;
3745 return (0);
3746}
3747
3748#define HIS_BUF_LEN (128*1024)
3749
3750/*
3751 * Retrieve the command history of a pool.
3752 */
3753int
3754zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3755{
3756 char buf[HIS_BUF_LEN];
3757 uint64_t off = 0;
3758 nvlist_t **records = NULL;
3759 uint_t numrecords = 0;
3760 int err, i;
3761
3762 do {
3763 uint64_t bytes_read = sizeof (buf);
3764 uint64_t leftover;
3765
3766 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3767 break;
3768
3769 /* if nothing else was read in, we're at EOF, just return */
3770 if (!bytes_read)
3771 break;
3772
3773 if ((err = zpool_history_unpack(buf, bytes_read,
3774 &leftover, &records, &numrecords)) != 0)
3775 break;
3776 off -= leftover;
3777
3778 /* CONSTCOND */
3779 } while (1);
3780
3781 if (!err) {
3782 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3783 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3784 records, numrecords) == 0);
3785 }
3786 for (i = 0; i < numrecords; i++)
3787 nvlist_free(records[i]);
3788 free(records);
3789
3790 return (err);
3791}
3792
26685276
BB
3793/*
3794 * Retrieve the next event. If there is a new event available 'nvp' will
3795 * contain a newly allocated nvlist and 'dropped' will be set to the number
3796 * of missed events since the last call to this function. When 'nvp' is
3797 * set to NULL it indicates no new events are available. In either case
3798 * the function returns 0 and it is up to the caller to free 'nvp'. In
3799 * the case of a fatal error the function will return a non-zero value.
3800 * When the function is called in blocking mode it will not return until
3801 * a new event is available.
3802 */
3803int
3804zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3805 int *dropped, int block, int cleanup_fd)
3806{
6f1ffb06 3807 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
26685276
BB
3808 int error = 0;
3809
3810 *nvp = NULL;
3811 *dropped = 0;
3812 zc.zc_cleanup_fd = cleanup_fd;
3813
3814 if (!block)
3815 zc.zc_guid = ZEVENT_NONBLOCK;
3816
3817 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3818 return (-1);
3819
3820retry:
3821 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3822 switch (errno) {
3823 case ESHUTDOWN:
3824 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3825 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3826 goto out;
3827 case ENOENT:
3828 /* Blocking error case should not occur */
3829 if (block)
3830 error = zpool_standard_error_fmt(hdl, errno,
3831 dgettext(TEXT_DOMAIN, "cannot get event"));
3832
3833 goto out;
3834 case ENOMEM:
3835 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3836 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3837 dgettext(TEXT_DOMAIN, "cannot get event"));
3838 goto out;
3839 } else {
3840 goto retry;
3841 }
3842 default:
3843 error = zpool_standard_error_fmt(hdl, errno,
3844 dgettext(TEXT_DOMAIN, "cannot get event"));
3845 goto out;
3846 }
3847 }
3848
3849 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3850 if (error != 0)
3851 goto out;
3852
3853 *dropped = (int)zc.zc_cookie;
3854out:
3855 zcmd_free_nvlists(&zc);
3856
3857 return (error);
3858}
3859
3860/*
3861 * Clear all events.
3862 */
3863int
3864zpool_events_clear(libzfs_handle_t *hdl, int *count)
3865{
6f1ffb06 3866 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
26685276
BB
3867 char msg[1024];
3868
3869 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3870 "cannot clear events"));
3871
3872 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3873 return (zpool_standard_error_fmt(hdl, errno, msg));
3874
3875 if (count != NULL)
3876 *count = (int)zc.zc_cookie; /* # of events cleared */
3877
3878 return (0);
3879}
3880
34dc7c2f
BB
3881void
3882zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3883 char *pathname, size_t len)
3884{
6f1ffb06 3885 zfs_cmd_t zc = {"\0", 0, 0, 0, 0, 0, 0, 0, "\0", "\0", "\0"};
34dc7c2f
BB
3886 boolean_t mounted = B_FALSE;
3887 char *mntpnt = NULL;
3888 char dsname[MAXNAMELEN];
3889
3890 if (dsobj == 0) {
3891 /* special case for the MOS */
b8864a23 3892 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
34dc7c2f
BB
3893 return;
3894 }
3895
3896 /* get the dataset's name */
3897 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3898 zc.zc_obj = dsobj;
3899 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3900 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3901 /* just write out a path of two object numbers */
3902 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 3903 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
3904 return;
3905 }
3906 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3907
3908 /* find out if the dataset is mounted */
3909 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3910
3911 /* get the corrupted object's path */
3912 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3913 zc.zc_obj = obj;
3914 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3915 &zc) == 0) {
3916 if (mounted) {
3917 (void) snprintf(pathname, len, "%s%s", mntpnt,
3918 zc.zc_value);
3919 } else {
3920 (void) snprintf(pathname, len, "%s:%s",
3921 dsname, zc.zc_value);
3922 }
3923 } else {
b8864a23 3924 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
34dc7c2f
BB
3925 }
3926 free(mntpnt);
3927}
3928
b128c09f
BB
3929/*
3930 * Read the EFI label from the config, if a label does not exist then
3931 * pass back the error to the caller. If the caller has passed a non-NULL
3932 * diskaddr argument then we set it to the starting address of the EFI
3933 * partition.
3934 */
3935static int
3936read_efi_label(nvlist_t *config, diskaddr_t *sb)
3937{
3938 char *path;
3939 int fd;
3940 char diskname[MAXPATHLEN];
3941 int err = -1;
3942
3943 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3944 return (err);
3945
eac47204 3946 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 3947 strrchr(path, '/'));
d603ed6c 3948 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
b128c09f
BB
3949 struct dk_gpt *vtoc;
3950
3951 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3952 if (sb != NULL)
3953 *sb = vtoc->efi_parts[0].p_start;
3954 efi_free(vtoc);
3955 }
3956 (void) close(fd);
3957 }
3958 return (err);
3959}
3960
34dc7c2f
BB
3961/*
3962 * determine where a partition starts on a disk in the current
3963 * configuration
3964 */
3965static diskaddr_t
3966find_start_block(nvlist_t *config)
3967{
3968 nvlist_t **child;
3969 uint_t c, children;
34dc7c2f 3970 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
3971 uint64_t wholedisk;
3972
3973 if (nvlist_lookup_nvlist_array(config,
3974 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3975 if (nvlist_lookup_uint64(config,
3976 ZPOOL_CONFIG_WHOLE_DISK,
3977 &wholedisk) != 0 || !wholedisk) {
3978 return (MAXOFFSET_T);
3979 }
b128c09f
BB
3980 if (read_efi_label(config, &sb) < 0)
3981 sb = MAXOFFSET_T;
34dc7c2f
BB
3982 return (sb);
3983 }
3984
3985 for (c = 0; c < children; c++) {
3986 sb = find_start_block(child[c]);
3987 if (sb != MAXOFFSET_T) {
3988 return (sb);
3989 }
3990 }
3991 return (MAXOFFSET_T);
3992}
3993
d603ed6c
BB
3994int
3995zpool_label_disk_wait(char *path, int timeout)
3996{
3997 struct stat64 statbuf;
3998 int i;
3999
4000 /*
4001 * Wait timeout miliseconds for a newly created device to be available
4002 * from the given path. There is a small window when a /dev/ device
4003 * will exist and the udev link will not, so we must wait for the
4004 * symlink. Depending on the udev rules this may take a few seconds.
4005 */
4006 for (i = 0; i < timeout; i++) {
4007 usleep(1000);
4008
4009 errno = 0;
4010 if ((stat64(path, &statbuf) == 0) && (errno == 0))
4011 return (0);
4012 }
4013
4014 return (ENOENT);
4015}
4016
4017int
4018zpool_label_disk_check(char *path)
4019{
4020 struct dk_gpt *vtoc;
4021 int fd, err;
4022
4023 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4024 return errno;
4025
4026 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4027 (void) close(fd);
4028 return err;
4029 }
4030
4031 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4032 efi_free(vtoc);
4033 (void) close(fd);
4034 return EIDRM;
4035 }
4036
4037 efi_free(vtoc);
4038 (void) close(fd);
4039 return 0;
4040}
4041
34dc7c2f
BB
4042/*
4043 * Label an individual disk. The name provided is the short name,
4044 * stripped of any leading /dev path.
4045 */
4046int
4047zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4048{
4049 char path[MAXPATHLEN];
4050 struct dk_gpt *vtoc;
d603ed6c 4051 int rval, fd;
34dc7c2f
BB
4052 size_t resv = EFI_MIN_RESV_SIZE;
4053 uint64_t slice_size;
4054 diskaddr_t start_block;
4055 char errbuf[1024];
4056
4057 /* prepare an error message just in case */
4058 (void) snprintf(errbuf, sizeof (errbuf),
4059 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4060
4061 if (zhp) {
4062 nvlist_t *nvroot;
4063
c372b36e 4064#if defined(__sun__) || defined(__sun)
1bd201e7 4065 if (zpool_is_bootable(zhp)) {
b128c09f
BB
4066 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4067 "EFI labeled devices are not supported on root "
4068 "pools."));
4069 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4070 }
c372b36e 4071#endif
b128c09f 4072
34dc7c2f
BB
4073 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4074 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4075
4076 if (zhp->zpool_start_block == 0)
4077 start_block = find_start_block(nvroot);
4078 else
4079 start_block = zhp->zpool_start_block;
4080 zhp->zpool_start_block = start_block;
4081 } else {
4082 /* new pool */
4083 start_block = NEW_START_BLOCK;
4084 }
4085
eac47204 4086 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 4087
d603ed6c 4088 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
34dc7c2f
BB
4089 /*
4090 * This shouldn't happen. We've long since verified that this
4091 * is a valid device.
4092 */
109491a8
RL
4093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4094 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
4095 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4096 }
4097
4098 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4099 /*
4100 * The only way this can fail is if we run out of memory, or we
4101 * were unable to read the disk's capacity
4102 */
4103 if (errno == ENOMEM)
4104 (void) no_memory(hdl);
4105
4106 (void) close(fd);
109491a8
RL
4107 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4108 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
4109
4110 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4111 }
4112
4113 slice_size = vtoc->efi_last_u_lba + 1;
4114 slice_size -= EFI_MIN_RESV_SIZE;
4115 if (start_block == MAXOFFSET_T)
4116 start_block = NEW_START_BLOCK;
4117 slice_size -= start_block;
613d88ed 4118 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
4119
4120 vtoc->efi_parts[0].p_start = start_block;
4121 vtoc->efi_parts[0].p_size = slice_size;
4122
4123 /*
4124 * Why we use V_USR: V_BACKUP confuses users, and is considered
4125 * disposable by some EFI utilities (since EFI doesn't have a backup
4126 * slice). V_UNASSIGNED is supposed to be used only for zero size
4127 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4128 * etc. were all pretty specific. V_USR is as close to reality as we
4129 * can get, in the absence of V_OTHER.
4130 */
4131 vtoc->efi_parts[0].p_tag = V_USR;
4132 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4133
4134 vtoc->efi_parts[8].p_start = slice_size + start_block;
4135 vtoc->efi_parts[8].p_size = resv;
4136 vtoc->efi_parts[8].p_tag = V_RESERVED;
4137
b5a28807 4138 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
34dc7c2f
BB
4139 /*
4140 * Some block drivers (like pcata) may not support EFI
4141 * GPT labels. Print out a helpful error message dir-
4142 * ecting the user to manually label the disk and give
4143 * a specific slice.
4144 */
4145 (void) close(fd);
4146 efi_free(vtoc);
4147
d603ed6c
BB
4148 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4149 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
4150 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4151 }
4152
4153 (void) close(fd);
4154 efi_free(vtoc);
34dc7c2f 4155
eac47204
BB
4156 /* Wait for the first expected partition to appear. */
4157
4158 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4159 (void) zfs_append_partition(path, MAXPATHLEN);
4160
d603ed6c
BB
4161 rval = zpool_label_disk_wait(path, 3000);
4162 if (rval) {
4163 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4164 "detect device partitions on '%s': %d"), path, rval);
4165 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
4166 }
4167
d603ed6c
BB
4168 /* We can't be to paranoid. Read the label back and verify it. */
4169 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4170 rval = zpool_label_disk_check(path);
4171 if (rval) {
4172 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4173 "EFI label on '%s' is damaged. Ensure\nthis device "
4174 "is not in in use, and is functioning properly: %d"),
4175 path, rval);
4176 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 4177 }
34dc7c2f 4178
d603ed6c 4179 return 0;
34dc7c2f 4180}