]> git.proxmox.com Git - mirror_zfs-debian.git/blame - lib/libzfs/libzfs_pool.c
Add lintian override for zfs-test
[mirror_zfs-debian.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
cae5b340 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
428870ff 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
ea04106b 25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
cae5b340
AX
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2017 Datto Inc.
34dc7c2f
BB
28 */
29
34dc7c2f
BB
30#include <ctype.h>
31#include <errno.h>
32#include <devid.h>
34dc7c2f
BB
33#include <fcntl.h>
34#include <libintl.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <strings.h>
38#include <unistd.h>
a08ee875 39#include <libgen.h>
d603ed6c
BB
40#include <zone.h>
41#include <sys/stat.h>
34dc7c2f
BB
42#include <sys/efi_partition.h>
43#include <sys/vtoc.h>
44#include <sys/zfs_ioctl.h>
9babb374 45#include <dlfcn.h>
34dc7c2f
BB
46
47#include "zfs_namecheck.h"
48#include "zfs_prop.h"
49#include "libzfs_impl.h"
428870ff 50#include "zfs_comutil.h"
9ae529ec 51#include "zfeature_common.h"
34dc7c2f 52
b128c09f
BB
53static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
54
572e2857
BB
55typedef struct prop_flags {
56 int create:1; /* Validate property on creation */
57 int import:1; /* Validate property on import */
58} prop_flags_t;
59
34dc7c2f
BB
60/*
61 * ====================================================================
62 * zpool property functions
63 * ====================================================================
64 */
65
66static int
67zpool_get_all_props(zpool_handle_t *zhp)
68{
a08ee875 69 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
70 libzfs_handle_t *hdl = zhp->zpool_hdl;
71
72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
73
74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
75 return (-1);
76
77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
78 if (errno == ENOMEM) {
79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
80 zcmd_free_nvlists(&zc);
81 return (-1);
82 }
83 } else {
84 zcmd_free_nvlists(&zc);
85 return (-1);
86 }
87 }
88
89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
90 zcmd_free_nvlists(&zc);
91 return (-1);
92 }
93
94 zcmd_free_nvlists(&zc);
95
96 return (0);
97}
98
99static int
100zpool_props_refresh(zpool_handle_t *zhp)
101{
102 nvlist_t *old_props;
103
104 old_props = zhp->zpool_props;
105
106 if (zpool_get_all_props(zhp) != 0)
107 return (-1);
108
109 nvlist_free(old_props);
110 return (0);
111}
112
113static char *
114zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
115 zprop_source_t *src)
116{
117 nvlist_t *nv, *nvl;
118 uint64_t ival;
119 char *value;
120 zprop_source_t source;
121
122 nvl = zhp->zpool_props;
123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
125 source = ival;
126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
127 } else {
128 source = ZPROP_SRC_DEFAULT;
129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
130 value = "-";
131 }
132
133 if (src)
134 *src = source;
135
136 return (value);
137}
138
139uint64_t
140zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
141{
142 nvlist_t *nv, *nvl;
143 uint64_t value;
144 zprop_source_t source;
145
b128c09f
BB
146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
147 /*
148 * zpool_get_all_props() has most likely failed because
149 * the pool is faulted, but if all we need is the top level
150 * vdev's guid then get it from the zhp config nvlist.
151 */
152 if ((prop == ZPOOL_PROP_GUID) &&
153 (nvlist_lookup_nvlist(zhp->zpool_config,
154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
156 == 0)) {
157 return (value);
158 }
34dc7c2f 159 return (zpool_prop_default_numeric(prop));
b128c09f 160 }
34dc7c2f
BB
161
162 nvl = zhp->zpool_props;
163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
165 source = value;
166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
167 } else {
168 source = ZPROP_SRC_DEFAULT;
169 value = zpool_prop_default_numeric(prop);
170 }
171
172 if (src)
173 *src = source;
174
175 return (value);
176}
177
178/*
179 * Map VDEV STATE to printed strings.
180 */
181char *
182zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
183{
184 switch (state) {
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
b128c09f 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 192 return (gettext("FAULTED"));
428870ff
BB
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
34dc7c2f
BB
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
cae5b340
AX
203
204 default:
205 break;
34dc7c2f
BB
206 }
207
208 return (gettext("UNKNOWN"));
209}
210
c06d4368
AX
211/*
212 * Map POOL STATE to printed strings.
213 */
214const char *
215zpool_pool_state_to_name(pool_state_t state)
216{
217 switch (state) {
218 default:
219 break;
220 case POOL_STATE_ACTIVE:
221 return (gettext("ACTIVE"));
222 case POOL_STATE_EXPORTED:
223 return (gettext("EXPORTED"));
224 case POOL_STATE_DESTROYED:
225 return (gettext("DESTROYED"));
226 case POOL_STATE_SPARE:
227 return (gettext("SPARE"));
228 case POOL_STATE_L2CACHE:
229 return (gettext("L2CACHE"));
230 case POOL_STATE_UNINITIALIZED:
231 return (gettext("UNINITIALIZED"));
232 case POOL_STATE_UNAVAIL:
233 return (gettext("UNAVAIL"));
234 case POOL_STATE_POTENTIALLY_ACTIVE:
235 return (gettext("POTENTIALLY_ACTIVE"));
236 }
237
238 return (gettext("UNKNOWN"));
239}
240
a08ee875
LG
241/*
242 * Get a zpool property value for 'prop' and return the value in
243 * a pre-allocated buffer.
244 */
245int
cae5b340 246zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
a08ee875 247 size_t len, zprop_source_t *srctype, boolean_t literal)
34dc7c2f
BB
248{
249 uint64_t intval;
250 const char *strval;
251 zprop_source_t src = ZPROP_SRC_NONE;
252 nvlist_t *nvroot;
253 vdev_stat_t *vs;
254 uint_t vsc;
255
256 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
257 switch (prop) {
258 case ZPOOL_PROP_NAME:
34dc7c2f 259 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
260 break;
261
262 case ZPOOL_PROP_HEALTH:
34dc7c2f 263 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
264 break;
265
266 case ZPOOL_PROP_GUID:
267 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 268 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
269 break;
270
271 case ZPOOL_PROP_ALTROOT:
272 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 273 case ZPOOL_PROP_COMMENT:
d164b209
BB
274 if (zhp->zpool_props != NULL ||
275 zpool_get_all_props(zhp) == 0) {
276 (void) strlcpy(buf,
277 zpool_get_prop_string(zhp, prop, &src),
278 len);
cae5b340 279 break;
d164b209
BB
280 }
281 /* FALLTHROUGH */
282 default:
34dc7c2f 283 (void) strlcpy(buf, "-", len);
d164b209
BB
284 break;
285 }
286
287 if (srctype != NULL)
288 *srctype = src;
34dc7c2f
BB
289 return (0);
290 }
291
292 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
293 prop != ZPOOL_PROP_NAME)
294 return (-1);
295
296 switch (zpool_prop_get_type(prop)) {
297 case PROP_TYPE_STRING:
298 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
299 len);
300 break;
301
302 case PROP_TYPE_NUMBER:
303 intval = zpool_get_prop_int(zhp, prop, &src);
304
305 switch (prop) {
306 case ZPOOL_PROP_SIZE:
428870ff
BB
307 case ZPOOL_PROP_ALLOCATED:
308 case ZPOOL_PROP_FREE:
9ae529ec 309 case ZPOOL_PROP_FREEING:
ea04106b 310 case ZPOOL_PROP_LEAKED:
df30f566 311 case ZPOOL_PROP_ASHIFT:
a08ee875
LG
312 if (literal)
313 (void) snprintf(buf, len, "%llu",
cae5b340 314 (u_longlong_t)intval);
a08ee875
LG
315 else
316 (void) zfs_nicenum(intval, buf, len);
34dc7c2f
BB
317 break;
318
ea04106b
AX
319 case ZPOOL_PROP_EXPANDSZ:
320 if (intval == 0) {
321 (void) strlcpy(buf, "-", len);
322 } else if (literal) {
323 (void) snprintf(buf, len, "%llu",
324 (u_longlong_t)intval);
325 } else {
cae5b340 326 (void) zfs_nicebytes(intval, buf, len);
ea04106b
AX
327 }
328 break;
329
34dc7c2f 330 case ZPOOL_PROP_CAPACITY:
cae5b340
AX
331 if (literal) {
332 (void) snprintf(buf, len, "%llu",
333 (u_longlong_t)intval);
334 } else {
335 (void) snprintf(buf, len, "%llu%%",
336 (u_longlong_t)intval);
337 }
34dc7c2f
BB
338 break;
339
ea04106b
AX
340 case ZPOOL_PROP_FRAGMENTATION:
341 if (intval == UINT64_MAX) {
342 (void) strlcpy(buf, "-", len);
cae5b340
AX
343 } else if (literal) {
344 (void) snprintf(buf, len, "%llu",
345 (u_longlong_t)intval);
ea04106b
AX
346 } else {
347 (void) snprintf(buf, len, "%llu%%",
348 (u_longlong_t)intval);
349 }
350 break;
351
428870ff 352 case ZPOOL_PROP_DEDUPRATIO:
cae5b340
AX
353 if (literal)
354 (void) snprintf(buf, len, "%llu.%02llu",
355 (u_longlong_t)(intval / 100),
356 (u_longlong_t)(intval % 100));
357 else
358 (void) snprintf(buf, len, "%llu.%02llux",
359 (u_longlong_t)(intval / 100),
360 (u_longlong_t)(intval % 100));
428870ff
BB
361 break;
362
34dc7c2f
BB
363 case ZPOOL_PROP_HEALTH:
364 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
365 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
366 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
367 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
368 == 0);
34dc7c2f
BB
369
370 (void) strlcpy(buf, zpool_state_to_name(intval,
371 vs->vs_aux), len);
372 break;
9ae529ec
CS
373 case ZPOOL_PROP_VERSION:
374 if (intval >= SPA_VERSION_FEATURES) {
375 (void) snprintf(buf, len, "-");
376 break;
377 }
378 /* FALLTHROUGH */
34dc7c2f 379 default:
b8864a23 380 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
381 }
382 break;
383
384 case PROP_TYPE_INDEX:
385 intval = zpool_get_prop_int(zhp, prop, &src);
386 if (zpool_prop_index_to_string(prop, intval, &strval)
387 != 0)
388 return (-1);
389 (void) strlcpy(buf, strval, len);
390 break;
391
392 default:
393 abort();
394 }
395
396 if (srctype)
397 *srctype = src;
398
399 return (0);
400}
401
402/*
403 * Check if the bootfs name has the same pool name as it is set to.
404 * Assuming bootfs is a valid dataset name.
405 */
406static boolean_t
407bootfs_name_valid(const char *pool, char *bootfs)
408{
409 int len = strlen(pool);
410
b128c09f 411 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
412 return (B_FALSE);
413
414 if (strncmp(pool, bootfs, len) == 0 &&
415 (bootfs[len] == '/' || bootfs[len] == '\0'))
416 return (B_TRUE);
417
418 return (B_FALSE);
419}
420
1bd201e7
CS
421boolean_t
422zpool_is_bootable(zpool_handle_t *zhp)
b128c09f 423{
cae5b340 424 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
b128c09f
BB
425
426 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
cae5b340 427 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
b128c09f
BB
428 sizeof (bootfs)) != 0);
429}
430
431
34dc7c2f
BB
432/*
433 * Given an nvlist of zpool properties to be set, validate that they are
434 * correct, and parse any numeric properties (index, boolean, etc) if they are
435 * specified as strings.
436 */
437static nvlist_t *
b128c09f 438zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 439 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
440{
441 nvpair_t *elem;
442 nvlist_t *retprops;
443 zpool_prop_t prop;
444 char *strval;
445 uint64_t intval;
d96eb2b1 446 char *slash, *check;
34dc7c2f 447 struct stat64 statbuf;
b128c09f 448 zpool_handle_t *zhp;
34dc7c2f
BB
449
450 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
451 (void) no_memory(hdl);
452 return (NULL);
453 }
454
455 elem = NULL;
456 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
457 const char *propname = nvpair_name(elem);
458
9ae529ec
CS
459 prop = zpool_name_to_prop(propname);
460 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
461 int err;
9ae529ec
CS
462 char *fname = strchr(propname, '@') + 1;
463
ea04106b 464 err = zfeature_lookup_name(fname, NULL);
9ae529ec
CS
465 if (err != 0) {
466 ASSERT3U(err, ==, ENOENT);
467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
468 "invalid feature '%s'"), fname);
469 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
470 goto error;
471 }
472
473 if (nvpair_type(elem) != DATA_TYPE_STRING) {
474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
475 "'%s' must be a string"), propname);
476 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
477 goto error;
478 }
479
480 (void) nvpair_value_string(elem, &strval);
cae5b340
AX
481 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
482 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
9ae529ec
CS
483 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
484 "property '%s' can only be set to "
cae5b340 485 "'enabled' or 'disabled'"), propname);
9ae529ec
CS
486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
487 goto error;
488 }
489
490 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
491 (void) no_memory(hdl);
492 goto error;
493 }
494 continue;
495 }
496
34dc7c2f
BB
497 /*
498 * Make sure this property is valid and applies to this type.
499 */
9ae529ec 500 if (prop == ZPROP_INVAL) {
34dc7c2f
BB
501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 "invalid property '%s'"), propname);
503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
504 goto error;
505 }
506
507 if (zpool_prop_readonly(prop)) {
508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
509 "is readonly"), propname);
510 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
511 goto error;
512 }
513
514 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
515 &strval, &intval, errbuf) != 0)
516 goto error;
517
518 /*
519 * Perform additional checking for specific properties.
520 */
521 switch (prop) {
522 case ZPOOL_PROP_VERSION:
9ae529ec
CS
523 if (intval < version ||
524 !SPA_VERSION_IS_SUPPORTED(intval)) {
34dc7c2f
BB
525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
526 "property '%s' number %d is invalid."),
527 propname, intval);
528 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
529 goto error;
530 }
531 break;
532
df30f566 533 case ZPOOL_PROP_ASHIFT:
cae5b340
AX
534 if (intval != 0 &&
535 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
df30f566 536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
cae5b340
AX
537 "invalid '%s=%d' property: only values "
538 "between %" PRId32 " and %" PRId32 " "
539 "are allowed.\n"),
540 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
df30f566
CK
541 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
542 goto error;
543 }
544 break;
545
34dc7c2f 546 case ZPOOL_PROP_BOOTFS:
572e2857 547 if (flags.create || flags.import) {
34dc7c2f
BB
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "property '%s' cannot be set at creation "
550 "or import time"), propname);
551 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
552 goto error;
553 }
554
555 if (version < SPA_VERSION_BOOTFS) {
556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
557 "pool must be upgraded to support "
558 "'%s' property"), propname);
559 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
560 goto error;
561 }
562
563 /*
564 * bootfs property value has to be a dataset name and
565 * the dataset has to be in the same pool as it sets to.
566 */
567 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
568 strval)) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
570 "is an invalid name"), strval);
571 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
572 goto error;
573 }
b128c09f
BB
574
575 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
576 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
577 "could not open pool '%s'"), poolname);
578 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
579 goto error;
580 }
b128c09f 581 zpool_close(zhp);
34dc7c2f
BB
582 break;
583
584 case ZPOOL_PROP_ALTROOT:
572e2857 585 if (!flags.create && !flags.import) {
34dc7c2f
BB
586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 "property '%s' can only be set during pool "
588 "creation or import"), propname);
589 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
590 goto error;
591 }
592
593 if (strval[0] != '/') {
594 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
595 "bad alternate root '%s'"), strval);
596 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
597 goto error;
598 }
599 break;
600
601 case ZPOOL_PROP_CACHEFILE:
602 if (strval[0] == '\0')
603 break;
604
605 if (strcmp(strval, "none") == 0)
606 break;
607
608 if (strval[0] != '/') {
609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
610 "property '%s' must be empty, an "
611 "absolute path, or 'none'"), propname);
612 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
613 goto error;
614 }
615
616 slash = strrchr(strval, '/');
617
618 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
619 strcmp(slash, "/..") == 0) {
620 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
621 "'%s' is not a valid file"), strval);
622 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
623 goto error;
624 }
625
626 *slash = '\0';
627
628 if (strval[0] != '\0' &&
629 (stat64(strval, &statbuf) != 0 ||
630 !S_ISDIR(statbuf.st_mode))) {
631 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
632 "'%s' is not a valid directory"),
633 strval);
634 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
635 goto error;
636 }
637
638 *slash = '/';
639 break;
572e2857 640
d96eb2b1
DM
641 case ZPOOL_PROP_COMMENT:
642 for (check = strval; *check != '\0'; check++) {
643 if (!isprint(*check)) {
644 zfs_error_aux(hdl,
645 dgettext(TEXT_DOMAIN,
646 "comment may only have printable "
647 "characters"));
648 (void) zfs_error(hdl, EZFS_BADPROP,
649 errbuf);
650 goto error;
651 }
652 }
653 if (strlen(strval) > ZPROP_MAX_COMMENT) {
654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
655 "comment must not exceed %d characters"),
656 ZPROP_MAX_COMMENT);
657 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
658 goto error;
659 }
660 break;
572e2857
BB
661 case ZPOOL_PROP_READONLY:
662 if (!flags.import) {
663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664 "property '%s' can only be set at "
665 "import time"), propname);
666 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
667 goto error;
668 }
669 break;
ea04106b
AX
670 case ZPOOL_PROP_TNAME:
671 if (!flags.create) {
672 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
673 "property '%s' can only be set at "
674 "creation time"), propname);
675 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
676 goto error;
677 }
678 break;
cae5b340
AX
679 case ZPOOL_PROP_MULTIHOST:
680 if (get_system_hostid() == 0) {
681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
682 "requires a non-zero system hostid"));
683 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
684 goto error;
685 }
686 break;
687 default:
688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
689 "property '%s'(%d) not defined"), propname, prop);
690 break;
34dc7c2f
BB
691 }
692 }
693
694 return (retprops);
695error:
696 nvlist_free(retprops);
697 return (NULL);
698}
699
700/*
701 * Set zpool property : propname=propval.
702 */
703int
704zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
705{
a08ee875 706 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
707 int ret = -1;
708 char errbuf[1024];
709 nvlist_t *nvl = NULL;
710 nvlist_t *realprops;
711 uint64_t version;
572e2857 712 prop_flags_t flags = { 0 };
34dc7c2f
BB
713
714 (void) snprintf(errbuf, sizeof (errbuf),
715 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
716 zhp->zpool_name);
717
34dc7c2f
BB
718 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
719 return (no_memory(zhp->zpool_hdl));
720
721 if (nvlist_add_string(nvl, propname, propval) != 0) {
722 nvlist_free(nvl);
723 return (no_memory(zhp->zpool_hdl));
724 }
725
726 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 727 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 728 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
729 nvlist_free(nvl);
730 return (-1);
731 }
732
733 nvlist_free(nvl);
734 nvl = realprops;
735
736 /*
737 * Execute the corresponding ioctl() to set this property.
738 */
739 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
740
741 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
742 nvlist_free(nvl);
743 return (-1);
744 }
745
746 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
747
748 zcmd_free_nvlists(&zc);
749 nvlist_free(nvl);
750
751 if (ret)
752 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
753 else
754 (void) zpool_props_refresh(zhp);
755
756 return (ret);
757}
758
759int
760zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
761{
762 libzfs_handle_t *hdl = zhp->zpool_hdl;
763 zprop_list_t *entry;
764 char buf[ZFS_MAXPROPLEN];
9ae529ec
CS
765 nvlist_t *features = NULL;
766 nvpair_t *nvp;
767 zprop_list_t **last;
768 boolean_t firstexpand = (NULL == *plp);
769 int i;
34dc7c2f
BB
770
771 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
772 return (-1);
773
9ae529ec
CS
774 last = plp;
775 while (*last != NULL)
776 last = &(*last)->pl_next;
777
778 if ((*plp)->pl_all)
779 features = zpool_get_features(zhp);
780
781 if ((*plp)->pl_all && firstexpand) {
782 for (i = 0; i < SPA_FEATURES; i++) {
783 zprop_list_t *entry = zfs_alloc(hdl,
784 sizeof (zprop_list_t));
785 entry->pl_prop = ZPROP_INVAL;
786 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
787 spa_feature_table[i].fi_uname);
788 entry->pl_width = strlen(entry->pl_user_prop);
789 entry->pl_all = B_TRUE;
790
791 *last = entry;
792 last = &entry->pl_next;
793 }
794 }
795
796 /* add any unsupported features */
797 for (nvp = nvlist_next_nvpair(features, NULL);
798 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
799 char *propname;
800 boolean_t found;
801 zprop_list_t *entry;
802
803 if (zfeature_is_supported(nvpair_name(nvp)))
804 continue;
805
806 propname = zfs_asprintf(hdl, "unsupported@%s",
807 nvpair_name(nvp));
808
809 /*
810 * Before adding the property to the list make sure that no
811 * other pool already added the same property.
812 */
813 found = B_FALSE;
814 entry = *plp;
815 while (entry != NULL) {
816 if (entry->pl_user_prop != NULL &&
817 strcmp(propname, entry->pl_user_prop) == 0) {
818 found = B_TRUE;
819 break;
820 }
821 entry = entry->pl_next;
822 }
823 if (found) {
824 free(propname);
825 continue;
826 }
827
828 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
829 entry->pl_prop = ZPROP_INVAL;
830 entry->pl_user_prop = propname;
831 entry->pl_width = strlen(entry->pl_user_prop);
832 entry->pl_all = B_TRUE;
833
834 *last = entry;
835 last = &entry->pl_next;
836 }
837
34dc7c2f
BB
838 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
839
840 if (entry->pl_fixed)
841 continue;
842
843 if (entry->pl_prop != ZPROP_INVAL &&
844 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
cae5b340 845 NULL, B_FALSE) == 0) {
34dc7c2f
BB
846 if (strlen(buf) > entry->pl_width)
847 entry->pl_width = strlen(buf);
848 }
849 }
850
851 return (0);
852}
853
9ae529ec
CS
854/*
855 * Get the state for the given feature on the given ZFS pool.
856 */
857int
858zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
859 size_t len)
860{
861 uint64_t refcount;
862 boolean_t found = B_FALSE;
863 nvlist_t *features = zpool_get_features(zhp);
864 boolean_t supported;
865 const char *feature = strchr(propname, '@') + 1;
866
867 supported = zpool_prop_feature(propname);
868 ASSERT(supported || zpool_prop_unsupported(propname));
869
870 /*
871 * Convert from feature name to feature guid. This conversion is
cae5b340 872 * unnecessary for unsupported@... properties because they already
9ae529ec
CS
873 * use guids.
874 */
875 if (supported) {
876 int ret;
ea04106b 877 spa_feature_t fid;
9ae529ec 878
ea04106b 879 ret = zfeature_lookup_name(feature, &fid);
9ae529ec
CS
880 if (ret != 0) {
881 (void) strlcpy(buf, "-", len);
882 return (ENOTSUP);
883 }
ea04106b 884 feature = spa_feature_table[fid].fi_guid;
9ae529ec
CS
885 }
886
887 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
888 found = B_TRUE;
889
890 if (supported) {
891 if (!found) {
892 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
893 } else {
894 if (refcount == 0)
895 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
896 else
897 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
898 }
899 } else {
900 if (found) {
901 if (refcount == 0) {
902 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
903 } else {
904 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
905 }
906 } else {
907 (void) strlcpy(buf, "-", len);
908 return (ENOTSUP);
909 }
910 }
911
912 return (0);
913}
34dc7c2f 914
9babb374
BB
915/*
916 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
917 * devices will use a stripe width of 128k, other vendors prefer a 1m
918 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
919 * given 512B blocks. When the block size is larger by a power of 2
920 * we will still be 1m aligned. Some devices are sensitive to the
921 * partition ending alignment as well.
9babb374 922 */
613d88ed
NB
923#define NEW_START_BLOCK 2048
924#define PARTITION_END_ALIGNMENT 2048
9babb374 925
34dc7c2f
BB
926/*
927 * Validate the given pool name, optionally putting an extended error message in
928 * 'buf'.
929 */
930boolean_t
931zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
932{
933 namecheck_err_t why;
934 char what;
935 int ret;
936
937 ret = pool_namecheck(pool, &why, &what);
938
939 /*
940 * The rules for reserved pool names were extended at a later point.
941 * But we need to support users with existing pools that may now be
942 * invalid. So we only check for this expanded set of names during a
943 * create (or import), and only in userland.
944 */
945 if (ret == 0 && !isopen &&
946 (strncmp(pool, "mirror", 6) == 0 ||
947 strncmp(pool, "raidz", 5) == 0 ||
948 strncmp(pool, "spare", 5) == 0 ||
949 strcmp(pool, "log") == 0)) {
950 if (hdl != NULL)
951 zfs_error_aux(hdl,
952 dgettext(TEXT_DOMAIN, "name is reserved"));
953 return (B_FALSE);
954 }
955
956
957 if (ret != 0) {
958 if (hdl != NULL) {
959 switch (why) {
960 case NAME_ERR_TOOLONG:
961 zfs_error_aux(hdl,
962 dgettext(TEXT_DOMAIN, "name is too long"));
963 break;
964
965 case NAME_ERR_INVALCHAR:
966 zfs_error_aux(hdl,
967 dgettext(TEXT_DOMAIN, "invalid character "
968 "'%c' in pool name"), what);
969 break;
970
971 case NAME_ERR_NOLETTER:
972 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
973 "name must begin with a letter"));
974 break;
975
976 case NAME_ERR_RESERVED:
977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
978 "name is reserved"));
979 break;
980
981 case NAME_ERR_DISKLIKE:
982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 "pool name is reserved"));
984 break;
985
986 case NAME_ERR_LEADING_SLASH:
987 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
988 "leading slash in name"));
989 break;
990
991 case NAME_ERR_EMPTY_COMPONENT:
992 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
993 "empty component in name"));
994 break;
995
996 case NAME_ERR_TRAILING_SLASH:
997 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998 "trailing slash in name"));
999 break;
1000
cae5b340 1001 case NAME_ERR_MULTIPLE_DELIMITERS:
34dc7c2f 1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
cae5b340
AX
1003 "multiple '@' and/or '#' delimiters in "
1004 "name"));
34dc7c2f 1005 break;
cae5b340 1006
e75c13c3
BB
1007 case NAME_ERR_NO_AT:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "permission set is missing '@'"));
1010 break;
cae5b340
AX
1011
1012 default:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "(%d) not defined"), why);
1015 break;
34dc7c2f
BB
1016 }
1017 }
1018 return (B_FALSE);
1019 }
1020
1021 return (B_TRUE);
1022}
1023
1024/*
1025 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1026 * state.
1027 */
1028zpool_handle_t *
1029zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1030{
1031 zpool_handle_t *zhp;
1032 boolean_t missing;
1033
1034 /*
1035 * Make sure the pool name is valid.
1036 */
1037 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1038 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1039 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1040 pool);
1041 return (NULL);
1042 }
1043
1044 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1045 return (NULL);
1046
1047 zhp->zpool_hdl = hdl;
1048 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1049
1050 if (zpool_refresh_stats(zhp, &missing) != 0) {
1051 zpool_close(zhp);
1052 return (NULL);
1053 }
1054
1055 if (missing) {
1056 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1057 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1058 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1059 zpool_close(zhp);
1060 return (NULL);
1061 }
1062
1063 return (zhp);
1064}
1065
1066/*
1067 * Like the above, but silent on error. Used when iterating over pools (because
1068 * the configuration cache may be out of date).
1069 */
1070int
1071zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1072{
1073 zpool_handle_t *zhp;
1074 boolean_t missing;
1075
1076 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1077 return (-1);
1078
1079 zhp->zpool_hdl = hdl;
1080 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1081
1082 if (zpool_refresh_stats(zhp, &missing) != 0) {
1083 zpool_close(zhp);
1084 return (-1);
1085 }
1086
1087 if (missing) {
1088 zpool_close(zhp);
1089 *ret = NULL;
1090 return (0);
1091 }
1092
1093 *ret = zhp;
1094 return (0);
1095}
1096
1097/*
1098 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1099 * state.
1100 */
1101zpool_handle_t *
1102zpool_open(libzfs_handle_t *hdl, const char *pool)
1103{
1104 zpool_handle_t *zhp;
1105
1106 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1107 return (NULL);
1108
1109 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1110 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1111 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1112 zpool_close(zhp);
1113 return (NULL);
1114 }
1115
1116 return (zhp);
1117}
1118
1119/*
1120 * Close the handle. Simply frees the memory associated with the handle.
1121 */
1122void
1123zpool_close(zpool_handle_t *zhp)
1124{
cae5b340
AX
1125 nvlist_free(zhp->zpool_config);
1126 nvlist_free(zhp->zpool_old_config);
1127 nvlist_free(zhp->zpool_props);
34dc7c2f
BB
1128 free(zhp);
1129}
1130
1131/*
1132 * Return the name of the pool.
1133 */
1134const char *
1135zpool_get_name(zpool_handle_t *zhp)
1136{
1137 return (zhp->zpool_name);
1138}
1139
1140
1141/*
1142 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1143 */
1144int
1145zpool_get_state(zpool_handle_t *zhp)
1146{
1147 return (zhp->zpool_state);
1148}
1149
1150/*
1151 * Create the named pool, using the provided vdev list. It is assumed
1152 * that the consumer has already validated the contents of the nvlist, so we
1153 * don't have to worry about error semantics.
1154 */
1155int
1156zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 1157 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 1158{
a08ee875 1159 zfs_cmd_t zc = {"\0"};
b128c09f
BB
1160 nvlist_t *zc_fsprops = NULL;
1161 nvlist_t *zc_props = NULL;
34dc7c2f 1162 char msg[1024];
b128c09f 1163 int ret = -1;
34dc7c2f
BB
1164
1165 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1166 "cannot create '%s'"), pool);
1167
1168 if (!zpool_name_valid(hdl, B_FALSE, pool))
1169 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1170
1171 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1172 return (-1);
1173
b128c09f 1174 if (props) {
572e2857
BB
1175 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1176
b128c09f 1177 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 1178 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
1179 goto create_failed;
1180 }
1181 }
34dc7c2f 1182
b128c09f
BB
1183 if (fsprops) {
1184 uint64_t zoned;
1185 char *zonestr;
1186
1187 zoned = ((nvlist_lookup_string(fsprops,
1188 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1189 strcmp(zonestr, "on") == 0);
1190
cae5b340
AX
1191 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1192 fsprops, zoned, NULL, NULL, msg)) == NULL) {
b128c09f
BB
1193 goto create_failed;
1194 }
1195 if (!zc_props &&
1196 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1197 goto create_failed;
1198 }
1199 if (nvlist_add_nvlist(zc_props,
1200 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1201 goto create_failed;
1202 }
34dc7c2f
BB
1203 }
1204
b128c09f
BB
1205 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1206 goto create_failed;
1207
34dc7c2f
BB
1208 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1209
b128c09f 1210 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
1211
1212 zcmd_free_nvlists(&zc);
b128c09f
BB
1213 nvlist_free(zc_props);
1214 nvlist_free(zc_fsprops);
34dc7c2f
BB
1215
1216 switch (errno) {
1217 case EBUSY:
1218 /*
1219 * This can happen if the user has specified the same
1220 * device multiple times. We can't reliably detect this
1221 * until we try to add it and see we already have a
d603ed6c
BB
1222 * label. This can also happen under if the device is
1223 * part of an active md or lvm device.
34dc7c2f
BB
1224 */
1225 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
a08ee875
LG
1226 "one or more vdevs refer to the same device, or "
1227 "one of\nthe devices is part of an active md or "
1228 "lvm device"));
34dc7c2f
BB
1229 return (zfs_error(hdl, EZFS_BADDEV, msg));
1230
cae5b340
AX
1231 case ERANGE:
1232 /*
1233 * This happens if the record size is smaller or larger
1234 * than the allowed size range, or not a power of 2.
1235 *
1236 * NOTE: although zfs_valid_proplist is called earlier,
1237 * this case may have slipped through since the
1238 * pool does not exist yet and it is therefore
1239 * impossible to read properties e.g. max blocksize
1240 * from the pool.
1241 */
1242 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1243 "record size invalid"));
1244 return (zfs_error(hdl, EZFS_BADPROP, msg));
1245
34dc7c2f
BB
1246 case EOVERFLOW:
1247 /*
1248 * This occurs when one of the devices is below
1249 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1250 * device was the problem device since there's no
1251 * reliable way to determine device size from userland.
1252 */
1253 {
1254 char buf[64];
1255
cae5b340
AX
1256 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1257 sizeof (buf));
34dc7c2f
BB
1258
1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 "one or more devices is less than the "
1261 "minimum size (%s)"), buf);
1262 }
1263 return (zfs_error(hdl, EZFS_BADDEV, msg));
1264
1265 case ENOSPC:
1266 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1267 "one or more devices is out of space"));
1268 return (zfs_error(hdl, EZFS_BADDEV, msg));
1269
1270 case ENOTBLK:
1271 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1272 "cache device must be a disk or disk slice"));
1273 return (zfs_error(hdl, EZFS_BADDEV, msg));
1274
1275 default:
1276 return (zpool_standard_error(hdl, errno, msg));
1277 }
1278 }
1279
b128c09f 1280create_failed:
34dc7c2f 1281 zcmd_free_nvlists(&zc);
b128c09f
BB
1282 nvlist_free(zc_props);
1283 nvlist_free(zc_fsprops);
1284 return (ret);
34dc7c2f
BB
1285}
1286
1287/*
1288 * Destroy the given pool. It is up to the caller to ensure that there are no
1289 * datasets left in the pool.
1290 */
1291int
a08ee875 1292zpool_destroy(zpool_handle_t *zhp, const char *log_str)
34dc7c2f 1293{
a08ee875 1294 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1295 zfs_handle_t *zfp = NULL;
1296 libzfs_handle_t *hdl = zhp->zpool_hdl;
1297 char msg[1024];
1298
1299 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1300 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1301 return (-1);
1302
34dc7c2f 1303 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
a08ee875 1304 zc.zc_history = (uint64_t)(uintptr_t)log_str;
34dc7c2f 1305
572e2857 1306 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1307 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1308 "cannot destroy '%s'"), zhp->zpool_name);
1309
1310 if (errno == EROFS) {
1311 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1312 "one or more devices is read only"));
1313 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1314 } else {
1315 (void) zpool_standard_error(hdl, errno, msg);
1316 }
1317
1318 if (zfp)
1319 zfs_close(zfp);
1320 return (-1);
1321 }
1322
1323 if (zfp) {
1324 remove_mountpoint(zfp);
1325 zfs_close(zfp);
1326 }
1327
1328 return (0);
1329}
1330
1331/*
1332 * Add the given vdevs to the pool. The caller must have already performed the
1333 * necessary verification to ensure that the vdev specification is well-formed.
1334 */
1335int
1336zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1337{
a08ee875 1338 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1339 int ret;
1340 libzfs_handle_t *hdl = zhp->zpool_hdl;
1341 char msg[1024];
1342 nvlist_t **spares, **l2cache;
1343 uint_t nspares, nl2cache;
1344
1345 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1346 "cannot add to '%s'"), zhp->zpool_name);
1347
1348 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1349 SPA_VERSION_SPARES &&
1350 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1351 &spares, &nspares) == 0) {
1352 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1353 "upgraded to add hot spares"));
1354 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1355 }
1356
1357 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1358 SPA_VERSION_L2CACHE &&
1359 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1360 &l2cache, &nl2cache) == 0) {
1361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1362 "upgraded to add cache devices"));
1363 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1364 }
1365
1366 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1367 return (-1);
1368 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1369
572e2857 1370 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1371 switch (errno) {
1372 case EBUSY:
1373 /*
1374 * This can happen if the user has specified the same
1375 * device multiple times. We can't reliably detect this
1376 * until we try to add it and see we already have a
1377 * label.
1378 */
1379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1380 "one or more vdevs refer to the same device"));
1381 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1382 break;
1383
1384 case EOVERFLOW:
1385 /*
1386 * This occurrs when one of the devices is below
1387 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1388 * device was the problem device since there's no
1389 * reliable way to determine device size from userland.
1390 */
1391 {
1392 char buf[64];
1393
cae5b340
AX
1394 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1395 sizeof (buf));
34dc7c2f
BB
1396
1397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1398 "device is less than the minimum "
1399 "size (%s)"), buf);
1400 }
1401 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1402 break;
1403
1404 case ENOTSUP:
1405 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1406 "pool must be upgraded to add these vdevs"));
1407 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1408 break;
1409
34dc7c2f
BB
1410 case ENOTBLK:
1411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1412 "cache device must be a disk or disk slice"));
1413 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1414 break;
1415
1416 default:
1417 (void) zpool_standard_error(hdl, errno, msg);
1418 }
1419
1420 ret = -1;
1421 } else {
1422 ret = 0;
1423 }
1424
1425 zcmd_free_nvlists(&zc);
1426
1427 return (ret);
1428}
1429
1430/*
1431 * Exports the pool from the system. The caller must ensure that there are no
1432 * mounted datasets in the pool.
1433 */
a08ee875
LG
1434static int
1435zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1436 const char *log_str)
34dc7c2f 1437{
a08ee875 1438 zfs_cmd_t zc = {"\0"};
b128c09f 1439 char msg[1024];
34dc7c2f 1440
b128c09f
BB
1441 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1442 "cannot export '%s'"), zhp->zpool_name);
1443
34dc7c2f 1444 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1445 zc.zc_cookie = force;
fb5f0bc8 1446 zc.zc_guid = hardforce;
a08ee875 1447 zc.zc_history = (uint64_t)(uintptr_t)log_str;
b128c09f
BB
1448
1449 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1450 switch (errno) {
1451 case EXDEV:
1452 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1453 "use '-f' to override the following errors:\n"
1454 "'%s' has an active shared spare which could be"
1455 " used by other pools once '%s' is exported."),
1456 zhp->zpool_name, zhp->zpool_name);
1457 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1458 msg));
1459 default:
1460 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1461 msg));
1462 }
1463 }
34dc7c2f 1464
34dc7c2f
BB
1465 return (0);
1466}
1467
fb5f0bc8 1468int
a08ee875 1469zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
fb5f0bc8 1470{
a08ee875 1471 return (zpool_export_common(zhp, force, B_FALSE, log_str));
fb5f0bc8
BB
1472}
1473
1474int
a08ee875 1475zpool_export_force(zpool_handle_t *zhp, const char *log_str)
fb5f0bc8 1476{
a08ee875 1477 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
fb5f0bc8
BB
1478}
1479
428870ff
BB
1480static void
1481zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1482 nvlist_t *config)
428870ff 1483{
572e2857 1484 nvlist_t *nv = NULL;
428870ff
BB
1485 uint64_t rewindto;
1486 int64_t loss = -1;
1487 struct tm t;
1488 char timestr[128];
1489
572e2857
BB
1490 if (!hdl->libzfs_printerr || config == NULL)
1491 return;
1492
9ae529ec
CS
1493 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1494 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
428870ff 1495 return;
9ae529ec 1496 }
428870ff 1497
572e2857 1498 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1499 return;
572e2857 1500 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1501
1502 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1503 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1504 if (dryrun) {
1505 (void) printf(dgettext(TEXT_DOMAIN,
1506 "Would be able to return %s "
1507 "to its state as of %s.\n"),
1508 name, timestr);
1509 } else {
1510 (void) printf(dgettext(TEXT_DOMAIN,
1511 "Pool %s returned to its state as of %s.\n"),
1512 name, timestr);
1513 }
1514 if (loss > 120) {
1515 (void) printf(dgettext(TEXT_DOMAIN,
1516 "%s approximately %lld "),
1517 dryrun ? "Would discard" : "Discarded",
b8864a23 1518 ((longlong_t)loss + 30) / 60);
428870ff
BB
1519 (void) printf(dgettext(TEXT_DOMAIN,
1520 "minutes of transactions.\n"));
1521 } else if (loss > 0) {
1522 (void) printf(dgettext(TEXT_DOMAIN,
1523 "%s approximately %lld "),
b8864a23
BB
1524 dryrun ? "Would discard" : "Discarded",
1525 (longlong_t)loss);
428870ff
BB
1526 (void) printf(dgettext(TEXT_DOMAIN,
1527 "seconds of transactions.\n"));
1528 }
1529 }
1530}
1531
1532void
1533zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1534 nvlist_t *config)
1535{
572e2857 1536 nvlist_t *nv = NULL;
428870ff
BB
1537 int64_t loss = -1;
1538 uint64_t edata = UINT64_MAX;
1539 uint64_t rewindto;
1540 struct tm t;
1541 char timestr[128];
1542
1543 if (!hdl->libzfs_printerr)
1544 return;
1545
1546 if (reason >= 0)
1547 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1548 else
1549 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1550
1551 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857 1552 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
9ae529ec 1553 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
572e2857 1554 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1555 goto no_info;
1556
572e2857
BB
1557 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1558 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1559 &edata);
1560
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "Recovery is possible, but will result in some data loss.\n"));
1563
1564 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1565 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1566 (void) printf(dgettext(TEXT_DOMAIN,
1567 "\tReturning the pool to its state as of %s\n"
1568 "\tshould correct the problem. "),
1569 timestr);
1570 } else {
1571 (void) printf(dgettext(TEXT_DOMAIN,
1572 "\tReverting the pool to an earlier state "
1573 "should correct the problem.\n\t"));
1574 }
1575
1576 if (loss > 120) {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "Approximately %lld minutes of data\n"
b8864a23
BB
1579 "\tmust be discarded, irreversibly. "),
1580 ((longlong_t)loss + 30) / 60);
428870ff
BB
1581 } else if (loss > 0) {
1582 (void) printf(dgettext(TEXT_DOMAIN,
1583 "Approximately %lld seconds of data\n"
b8864a23
BB
1584 "\tmust be discarded, irreversibly. "),
1585 (longlong_t)loss);
428870ff
BB
1586 }
1587 if (edata != 0 && edata != UINT64_MAX) {
1588 if (edata == 1) {
1589 (void) printf(dgettext(TEXT_DOMAIN,
1590 "After rewind, at least\n"
1591 "\tone persistent user-data error will remain. "));
1592 } else {
1593 (void) printf(dgettext(TEXT_DOMAIN,
1594 "After rewind, several\n"
1595 "\tpersistent user-data errors will remain. "));
1596 }
1597 }
1598 (void) printf(dgettext(TEXT_DOMAIN,
1599 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1600 reason >= 0 ? "clear" : "import", name);
1601
1602 (void) printf(dgettext(TEXT_DOMAIN,
1603 "A scrub of the pool\n"
1604 "\tis strongly recommended after recovery.\n"));
1605 return;
1606
1607no_info:
1608 (void) printf(dgettext(TEXT_DOMAIN,
1609 "Destroy and re-create the pool from\n\ta backup source.\n"));
1610}
1611
34dc7c2f
BB
1612/*
1613 * zpool_import() is a contracted interface. Should be kept the same
1614 * if possible.
1615 *
1616 * Applications should use zpool_import_props() to import a pool with
1617 * new properties value to be set.
1618 */
1619int
1620zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1621 char *altroot)
1622{
1623 nvlist_t *props = NULL;
1624 int ret;
1625
1626 if (altroot != NULL) {
1627 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1628 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1629 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1630 newname));
1631 }
1632
1633 if (nvlist_add_string(props,
fb5f0bc8
BB
1634 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1635 nvlist_add_string(props,
1636 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1637 nvlist_free(props);
1638 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1639 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1640 newname));
1641 }
1642 }
1643
572e2857
BB
1644 ret = zpool_import_props(hdl, config, newname, props,
1645 ZFS_IMPORT_NORMAL);
cae5b340 1646 nvlist_free(props);
34dc7c2f
BB
1647 return (ret);
1648}
1649
572e2857
BB
1650static void
1651print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1652 int indent)
1653{
1654 nvlist_t **child;
1655 uint_t c, children;
1656 char *vname;
1657 uint64_t is_log = 0;
1658
1659 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1660 &is_log);
1661
1662 if (name != NULL)
1663 (void) printf("\t%*s%s%s\n", indent, "", name,
1664 is_log ? " [log]" : "");
1665
1666 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1667 &child, &children) != 0)
1668 return;
1669
1670 for (c = 0; c < children; c++) {
4e820b5a 1671 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
572e2857
BB
1672 print_vdev_tree(hdl, vname, child[c], indent + 2);
1673 free(vname);
1674 }
1675}
1676
9ae529ec
CS
1677void
1678zpool_print_unsup_feat(nvlist_t *config)
1679{
1680 nvlist_t *nvinfo, *unsup_feat;
1681 nvpair_t *nvp;
1682
1683 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1684 0);
1685 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1686 &unsup_feat) == 0);
1687
1688 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1689 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1690 char *desc;
1691
1692 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1693 verify(nvpair_value_string(nvp, &desc) == 0);
1694
1695 if (strlen(desc) > 0)
1696 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1697 else
1698 (void) printf("\t%s\n", nvpair_name(nvp));
1699 }
1700}
1701
34dc7c2f
BB
1702/*
1703 * Import the given pool using the known configuration and a list of
1704 * properties to be set. The configuration should have come from
1705 * zpool_find_import(). The 'newname' parameters control whether the pool
1706 * is imported with a different name.
1707 */
1708int
1709zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1710 nvlist_t *props, int flags)
34dc7c2f 1711{
a08ee875 1712 zfs_cmd_t zc = {"\0"};
428870ff 1713 zpool_rewind_policy_t policy;
572e2857
BB
1714 nvlist_t *nv = NULL;
1715 nvlist_t *nvinfo = NULL;
1716 nvlist_t *missing = NULL;
34dc7c2f
BB
1717 char *thename;
1718 char *origname;
1719 int ret;
572e2857 1720 int error = 0;
34dc7c2f
BB
1721 char errbuf[1024];
1722
1723 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1724 &origname) == 0);
1725
1726 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1727 "cannot import pool '%s'"), origname);
1728
1729 if (newname != NULL) {
1730 if (!zpool_name_valid(hdl, B_FALSE, newname))
1731 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1732 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1733 newname));
1734 thename = (char *)newname;
1735 } else {
1736 thename = origname;
1737 }
1738
cae5b340 1739 if (props != NULL) {
34dc7c2f 1740 uint64_t version;
572e2857 1741 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1742
1743 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1744 &version) == 0);
1745
b128c09f 1746 if ((props = zpool_valid_proplist(hdl, origname,
cae5b340 1747 props, version, flags, errbuf)) == NULL)
34dc7c2f 1748 return (-1);
cae5b340 1749 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
34dc7c2f
BB
1750 nvlist_free(props);
1751 return (-1);
1752 }
cae5b340 1753 nvlist_free(props);
34dc7c2f
BB
1754 }
1755
1756 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1757
1758 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1759 &zc.zc_guid) == 0);
1760
1761 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
cae5b340 1762 zcmd_free_nvlists(&zc);
34dc7c2f
BB
1763 return (-1);
1764 }
572e2857 1765 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
cae5b340 1766 zcmd_free_nvlists(&zc);
428870ff
BB
1767 return (-1);
1768 }
34dc7c2f 1769
572e2857
BB
1770 zc.zc_cookie = flags;
1771 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1772 errno == ENOMEM) {
1773 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1774 zcmd_free_nvlists(&zc);
1775 return (-1);
1776 }
1777 }
1778 if (ret != 0)
1779 error = errno;
1780
1781 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
cae5b340
AX
1782
1783 zcmd_free_nvlists(&zc);
1784
572e2857
BB
1785 zpool_get_rewind_policy(config, &policy);
1786
1787 if (error) {
34dc7c2f 1788 char desc[1024];
cae5b340 1789 char aux[256];
428870ff 1790
428870ff
BB
1791 /*
1792 * Dry-run failed, but we print out what success
1793 * looks like if we found a best txg
1794 */
572e2857 1795 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1796 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1797 B_TRUE, nv);
1798 nvlist_free(nv);
428870ff
BB
1799 return (-1);
1800 }
1801
34dc7c2f
BB
1802 if (newname == NULL)
1803 (void) snprintf(desc, sizeof (desc),
1804 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1805 thename);
1806 else
1807 (void) snprintf(desc, sizeof (desc),
1808 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1809 origname, thename);
1810
572e2857 1811 switch (error) {
34dc7c2f 1812 case ENOTSUP:
9ae529ec
CS
1813 if (nv != NULL && nvlist_lookup_nvlist(nv,
1814 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1815 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1816 (void) printf(dgettext(TEXT_DOMAIN, "This "
1817 "pool uses the following feature(s) not "
1818 "supported by this system:\n"));
1819 zpool_print_unsup_feat(nv);
1820 if (nvlist_exists(nvinfo,
1821 ZPOOL_CONFIG_CAN_RDONLY)) {
1822 (void) printf(dgettext(TEXT_DOMAIN,
1823 "All unsupported features are only "
1824 "required for writing to the pool."
1825 "\nThe pool can be imported using "
1826 "'-o readonly=on'.\n"));
1827 }
1828 }
34dc7c2f
BB
1829 /*
1830 * Unsupported version.
1831 */
1832 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1833 break;
1834
cae5b340
AX
1835 case EREMOTEIO:
1836 if (nv != NULL && nvlist_lookup_nvlist(nv,
1837 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1838 char *hostname = "<unknown>";
1839 uint64_t hostid = 0;
1840 mmp_state_t mmp_state;
1841
1842 mmp_state = fnvlist_lookup_uint64(nvinfo,
1843 ZPOOL_CONFIG_MMP_STATE);
1844
1845 if (nvlist_exists(nvinfo,
1846 ZPOOL_CONFIG_MMP_HOSTNAME))
1847 hostname = fnvlist_lookup_string(nvinfo,
1848 ZPOOL_CONFIG_MMP_HOSTNAME);
1849
1850 if (nvlist_exists(nvinfo,
1851 ZPOOL_CONFIG_MMP_HOSTID))
1852 hostid = fnvlist_lookup_uint64(nvinfo,
1853 ZPOOL_CONFIG_MMP_HOSTID);
1854
1855 if (mmp_state == MMP_STATE_ACTIVE) {
1856 (void) snprintf(aux, sizeof (aux),
1857 dgettext(TEXT_DOMAIN, "pool is imp"
1858 "orted on host '%s' (hostid=%lx).\n"
1859 "Export the pool on the other "
1860 "system, then run 'zpool import'."),
1861 hostname, (unsigned long) hostid);
1862 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1863 (void) snprintf(aux, sizeof (aux),
1864 dgettext(TEXT_DOMAIN, "pool has "
1865 "the multihost property on and "
1866 "the\nsystem's hostid is not set. "
1867 "Set a unique system hostid with "
1868 "the zgenhostid(8) command.\n"));
1869 }
1870
1871 (void) zfs_error_aux(hdl, aux);
1872 }
1873 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1874 break;
1875
34dc7c2f
BB
1876 case EINVAL:
1877 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1878 break;
1879
428870ff
BB
1880 case EROFS:
1881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1882 "one or more devices is read only"));
1883 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1884 break;
1885
572e2857
BB
1886 case ENXIO:
1887 if (nv && nvlist_lookup_nvlist(nv,
1888 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1889 nvlist_lookup_nvlist(nvinfo,
1890 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1891 (void) printf(dgettext(TEXT_DOMAIN,
1892 "The devices below are missing, use "
1893 "'-m' to import the pool anyway:\n"));
1894 print_vdev_tree(hdl, NULL, missing, 2);
1895 (void) printf("\n");
1896 }
1897 (void) zpool_standard_error(hdl, error, desc);
1898 break;
1899
1900 case EEXIST:
1901 (void) zpool_standard_error(hdl, error, desc);
1902 break;
1903
abe5b8fb
BB
1904 case EBUSY:
1905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1906 "one or more devices are already in use\n"));
1907 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1908 break;
87dac73d
AX
1909 case ENAMETOOLONG:
1910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1911 "new name of at least one dataset is longer than "
1912 "the maximum allowable length"));
1913 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1914 break;
34dc7c2f 1915 default:
572e2857 1916 (void) zpool_standard_error(hdl, error, desc);
428870ff 1917 zpool_explain_recover(hdl,
572e2857 1918 newname ? origname : thename, -error, nv);
428870ff 1919 break;
34dc7c2f
BB
1920 }
1921
572e2857 1922 nvlist_free(nv);
34dc7c2f
BB
1923 ret = -1;
1924 } else {
1925 zpool_handle_t *zhp;
1926
1927 /*
1928 * This should never fail, but play it safe anyway.
1929 */
428870ff 1930 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1931 ret = -1;
428870ff 1932 else if (zhp != NULL)
34dc7c2f 1933 zpool_close(zhp);
428870ff
BB
1934 if (policy.zrp_request &
1935 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1936 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1937 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1938 }
572e2857 1939 nvlist_free(nv);
428870ff 1940 return (0);
34dc7c2f
BB
1941 }
1942
34dc7c2f
BB
1943 return (ret);
1944}
1945
1946/*
428870ff 1947 * Scan the pool.
34dc7c2f
BB
1948 */
1949int
cae5b340 1950zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
34dc7c2f 1951{
a08ee875 1952 zfs_cmd_t zc = {"\0"};
34dc7c2f 1953 char msg[1024];
cae5b340 1954 int err;
34dc7c2f
BB
1955 libzfs_handle_t *hdl = zhp->zpool_hdl;
1956
1957 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1958 zc.zc_cookie = func;
cae5b340 1959 zc.zc_flags = cmd;
34dc7c2f 1960
cae5b340
AX
1961 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
1962 return (0);
1963
1964 err = errno;
1965
1966 /* ECANCELED on a scrub means we resumed a paused scrub */
1967 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
1968 cmd == POOL_SCRUB_NORMAL)
1969 return (0);
1970
1971 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
34dc7c2f
BB
1972 return (0);
1973
428870ff 1974 if (func == POOL_SCAN_SCRUB) {
cae5b340
AX
1975 if (cmd == POOL_SCRUB_PAUSE) {
1976 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1977 "cannot pause scrubbing %s"), zc.zc_name);
1978 } else {
1979 assert(cmd == POOL_SCRUB_NORMAL);
1980 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1981 "cannot scrub %s"), zc.zc_name);
1982 }
428870ff
BB
1983 } else if (func == POOL_SCAN_NONE) {
1984 (void) snprintf(msg, sizeof (msg),
1985 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1986 zc.zc_name);
1987 } else {
1988 assert(!"unexpected result");
1989 }
34dc7c2f 1990
cae5b340 1991 if (err == EBUSY) {
428870ff
BB
1992 nvlist_t *nvroot;
1993 pool_scan_stat_t *ps = NULL;
1994 uint_t psc;
1995
1996 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1997 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1998 (void) nvlist_lookup_uint64_array(nvroot,
1999 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
cae5b340
AX
2000 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2001 if (cmd == POOL_SCRUB_PAUSE)
2002 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2003 else
2004 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2005 } else {
428870ff 2006 return (zfs_error(hdl, EZFS_RESILVERING, msg));
cae5b340
AX
2007 }
2008 } else if (err == ENOENT) {
428870ff
BB
2009 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2010 } else {
cae5b340 2011 return (zpool_standard_error(hdl, err, msg));
428870ff
BB
2012 }
2013}
2014
34dc7c2f 2015/*
9babb374
BB
2016 * Find a vdev that matches the search criteria specified. We use the
2017 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
2018 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2019 * spare; but FALSE if its an INUSE spare.
2020 */
2021static nvlist_t *
9babb374
BB
2022vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2023 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
2024{
2025 uint_t c, children;
2026 nvlist_t **child;
34dc7c2f 2027 nvlist_t *ret;
b128c09f 2028 uint64_t is_log;
9babb374
BB
2029 char *srchkey;
2030 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2031
2032 /* Nothing to look for */
2033 if (search == NULL || pair == NULL)
2034 return (NULL);
2035
2036 /* Obtain the key we will use to search */
2037 srchkey = nvpair_name(pair);
2038
2039 switch (nvpair_type(pair)) {
572e2857 2040 case DATA_TYPE_UINT64:
9babb374 2041 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
2042 uint64_t srchval, theguid;
2043
2044 verify(nvpair_value_uint64(pair, &srchval) == 0);
2045 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2046 &theguid) == 0);
2047 if (theguid == srchval)
2048 return (nv);
9babb374
BB
2049 }
2050 break;
9babb374
BB
2051
2052 case DATA_TYPE_STRING: {
2053 char *srchval, *val;
2054
2055 verify(nvpair_value_string(pair, &srchval) == 0);
2056 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2057 break;
34dc7c2f 2058
9babb374 2059 /*
428870ff
BB
2060 * Search for the requested value. Special cases:
2061 *
eac47204
BB
2062 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2063 * "-part1", or "p1". The suffix is hidden from the user,
2064 * but included in the string, so this matches around it.
2065 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2066 * is used to check all possible expanded paths.
428870ff
BB
2067 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2068 *
2069 * Otherwise, all other searches are simple string compares.
9babb374 2070 */
a2c6816c 2071 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
2072 uint64_t wholedisk = 0;
2073
2074 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2075 &wholedisk);
eac47204
BB
2076 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2077 return (nv);
428870ff 2078
428870ff
BB
2079 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2080 char *type, *idx, *end, *p;
2081 uint64_t id, vdev_id;
2082
2083 /*
2084 * Determine our vdev type, keeping in mind
2085 * that the srchval is composed of a type and
2086 * vdev id pair (i.e. mirror-4).
2087 */
2088 if ((type = strdup(srchval)) == NULL)
2089 return (NULL);
2090
2091 if ((p = strrchr(type, '-')) == NULL) {
2092 free(type);
2093 break;
2094 }
2095 idx = p + 1;
2096 *p = '\0';
2097
2098 /*
2099 * If the types don't match then keep looking.
2100 */
2101 if (strncmp(val, type, strlen(val)) != 0) {
2102 free(type);
2103 break;
2104 }
2105
2106 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2107 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2108 strncmp(type, VDEV_TYPE_MIRROR,
2109 strlen(VDEV_TYPE_MIRROR)) == 0);
2110 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2111 &id) == 0);
2112
2113 errno = 0;
2114 vdev_id = strtoull(idx, &end, 10);
2115
2116 free(type);
2117 if (errno != 0)
2118 return (NULL);
2119
2120 /*
2121 * Now verify that we have the correct vdev id.
2122 */
2123 if (vdev_id == id)
2124 return (nv);
9babb374 2125 }
34dc7c2f 2126
34dc7c2f 2127 /*
9babb374 2128 * Common case
34dc7c2f 2129 */
9babb374 2130 if (strcmp(srchval, val) == 0)
34dc7c2f 2131 return (nv);
9babb374
BB
2132 break;
2133 }
2134
2135 default:
2136 break;
34dc7c2f
BB
2137 }
2138
2139 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2140 &child, &children) != 0)
2141 return (NULL);
2142
b128c09f 2143 for (c = 0; c < children; c++) {
9babb374 2144 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
2145 avail_spare, l2cache, NULL)) != NULL) {
2146 /*
2147 * The 'is_log' value is only set for the toplevel
2148 * vdev, not the leaf vdevs. So we always lookup the
2149 * log device from the root of the vdev tree (where
2150 * 'log' is non-NULL).
2151 */
2152 if (log != NULL &&
2153 nvlist_lookup_uint64(child[c],
2154 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2155 is_log) {
2156 *log = B_TRUE;
2157 }
34dc7c2f 2158 return (ret);
b128c09f
BB
2159 }
2160 }
34dc7c2f
BB
2161
2162 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2163 &child, &children) == 0) {
2164 for (c = 0; c < children; c++) {
9babb374 2165 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2166 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2167 *avail_spare = B_TRUE;
2168 return (ret);
2169 }
2170 }
2171 }
2172
2173 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2174 &child, &children) == 0) {
2175 for (c = 0; c < children; c++) {
9babb374 2176 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2177 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2178 *l2cache = B_TRUE;
2179 return (ret);
2180 }
2181 }
2182 }
2183
2184 return (NULL);
2185}
2186
9babb374
BB
2187/*
2188 * Given a physical path (minus the "/devices" prefix), find the
2189 * associated vdev.
2190 */
2191nvlist_t *
2192zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2193 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2194{
2195 nvlist_t *search, *nvroot, *ret;
2196
2197 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2198 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2199
2200 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2201 &nvroot) == 0);
2202
2203 *avail_spare = B_FALSE;
572e2857
BB
2204 *l2cache = B_FALSE;
2205 if (log != NULL)
2206 *log = B_FALSE;
9babb374
BB
2207 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2208 nvlist_free(search);
2209
2210 return (ret);
2211}
2212
428870ff
BB
2213/*
2214 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2215 */
2216boolean_t
2217zpool_vdev_is_interior(const char *name)
2218{
2219 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2220 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2221 return (B_TRUE);
2222 return (B_FALSE);
2223}
2224
34dc7c2f
BB
2225nvlist_t *
2226zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 2227 boolean_t *l2cache, boolean_t *log)
34dc7c2f 2228{
34dc7c2f 2229 char *end;
9babb374 2230 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
2231 uint64_t guid;
2232
9babb374
BB
2233 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2234
ea04106b 2235 guid = strtoull(path, &end, 0);
34dc7c2f 2236 if (guid != 0 && *end == '\0') {
9babb374 2237 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
2238 } else if (zpool_vdev_is_interior(path)) {
2239 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 2240 } else {
9babb374 2241 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
2242 }
2243
2244 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2245 &nvroot) == 0);
2246
2247 *avail_spare = B_FALSE;
2248 *l2cache = B_FALSE;
b128c09f
BB
2249 if (log != NULL)
2250 *log = B_FALSE;
9babb374
BB
2251 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2252 nvlist_free(search);
2253
2254 return (ret);
b128c09f
BB
2255}
2256
2257static int
cae5b340 2258vdev_is_online(nvlist_t *nv)
b128c09f
BB
2259{
2260 uint64_t ival;
2261
2262 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2263 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2264 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2265 return (0);
2266
2267 return (1);
2268}
2269
2270/*
9babb374 2271 * Helper function for zpool_get_physpaths().
b128c09f 2272 */
9babb374
BB
2273static int
2274vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2275 size_t *bytes_written)
2276{
2277 size_t bytes_left, pos, rsz;
2278 char *tmppath;
2279 const char *format;
2280
2281 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2282 &tmppath) != 0)
2283 return (EZFS_NODEVICE);
2284
2285 pos = *bytes_written;
2286 bytes_left = physpath_size - pos;
2287 format = (pos == 0) ? "%s" : " %s";
2288
2289 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2290 *bytes_written += rsz;
2291
2292 if (rsz >= bytes_left) {
2293 /* if physpath was not copied properly, clear it */
2294 if (bytes_left != 0) {
2295 physpath[pos] = 0;
2296 }
2297 return (EZFS_NOSPC);
2298 }
2299 return (0);
2300}
2301
2302static int
2303vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2304 size_t *rsz, boolean_t is_spare)
2305{
2306 char *type;
2307 int ret;
2308
2309 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2310 return (EZFS_INVALCONFIG);
2311
2312 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2313 /*
2314 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2315 * For a spare vdev, we only want to boot from the active
2316 * spare device.
2317 */
2318 if (is_spare) {
2319 uint64_t spare = 0;
2320 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2321 &spare);
2322 if (!spare)
2323 return (EZFS_INVALCONFIG);
2324 }
2325
cae5b340 2326 if (vdev_is_online(nv)) {
9babb374
BB
2327 if ((ret = vdev_get_one_physpath(nv, physpath,
2328 phypath_size, rsz)) != 0)
2329 return (ret);
2330 }
2331 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
cae5b340 2332 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
9babb374
BB
2333 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2334 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2335 nvlist_t **child;
2336 uint_t count;
2337 int i, ret;
2338
2339 if (nvlist_lookup_nvlist_array(nv,
2340 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2341 return (EZFS_INVALCONFIG);
2342
2343 for (i = 0; i < count; i++) {
2344 ret = vdev_get_physpaths(child[i], physpath,
2345 phypath_size, rsz, is_spare);
2346 if (ret == EZFS_NOSPC)
2347 return (ret);
2348 }
2349 }
2350
2351 return (EZFS_POOL_INVALARG);
2352}
2353
2354/*
2355 * Get phys_path for a root pool config.
2356 * Return 0 on success; non-zero on failure.
2357 */
2358static int
2359zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2360{
9babb374 2361 size_t rsz;
b128c09f
BB
2362 nvlist_t *vdev_root;
2363 nvlist_t **child;
2364 uint_t count;
9babb374 2365 char *type;
b128c09f 2366
9babb374 2367 rsz = 0;
b128c09f 2368
9babb374
BB
2369 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2370 &vdev_root) != 0)
2371 return (EZFS_INVALCONFIG);
b128c09f 2372
9babb374
BB
2373 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2374 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2375 &child, &count) != 0)
9babb374 2376 return (EZFS_INVALCONFIG);
b128c09f 2377
9babb374 2378 /*
cae5b340 2379 * root pool can only have a single top-level vdev.
9babb374 2380 */
cae5b340 2381 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
9babb374 2382 return (EZFS_POOL_INVALARG);
b128c09f 2383
9babb374
BB
2384 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2385 B_FALSE);
2386
2387 /* No online devices */
2388 if (rsz == 0)
2389 return (EZFS_NODEVICE);
b128c09f
BB
2390
2391 return (0);
34dc7c2f
BB
2392}
2393
9babb374
BB
2394/*
2395 * Get phys_path for a root pool
2396 * Return 0 on success; non-zero on failure.
2397 */
2398int
2399zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2400{
2401 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2402 phypath_size));
2403}
2404
9babb374
BB
2405/*
2406 * If the device has being dynamically expanded then we need to relabel
2407 * the disk to use the new unallocated space.
2408 */
2409static int
8adf4864 2410zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2411{
9babb374 2412 int fd, error;
9babb374 2413
d603ed6c 2414 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2416 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2417 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2418 }
2419
2420 /*
2421 * It's possible that we might encounter an error if the device
2422 * does not have any unallocated space left. If so, we simply
2423 * ignore that error and continue on.
b5a28807
ED
2424 *
2425 * Also, we don't call efi_rescan() - that would just return EBUSY.
2426 * The module will do it for us in vdev_disk_open().
9babb374 2427 */
d603ed6c 2428 error = efi_use_whole_disk(fd);
cae5b340
AX
2429
2430 /* Flush the buffers to disk and invalidate the page cache. */
2431 (void) fsync(fd);
2432 (void) ioctl(fd, BLKFLSBUF);
2433
9babb374
BB
2434 (void) close(fd);
2435 if (error && error != VT_ENOSPC) {
2436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2437 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2438 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374 2439 }
cae5b340 2440
9babb374
BB
2441 return (0);
2442}
2443
cae5b340
AX
2444/*
2445 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2446 *
2447 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2448 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2449 * ignore them.
2450 */
2451static uint64_t
2452zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2453 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2454{
2455 uint64_t guid;
2456 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2457 nvlist_t *tgt;
2458
2459 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2460 &log)) == NULL)
2461 return (0);
2462
2463 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2464 if (is_spare != NULL)
2465 *is_spare = spare;
2466 if (is_l2cache != NULL)
2467 *is_l2cache = l2cache;
2468 if (is_log != NULL)
2469 *is_log = log;
2470
2471 return (guid);
2472}
2473
2474/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2475uint64_t
2476zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2477{
2478 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2479}
2480
34dc7c2f
BB
2481/*
2482 * Bring the specified vdev online. The 'flags' parameter is a set of the
2483 * ZFS_ONLINE_* flags.
2484 */
2485int
2486zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2487 vdev_state_t *newstate)
2488{
a08ee875 2489 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2490 char msg[1024];
2491 nvlist_t *tgt;
9babb374 2492 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2493 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2494 int error;
34dc7c2f 2495
9babb374
BB
2496 if (flags & ZFS_ONLINE_EXPAND) {
2497 (void) snprintf(msg, sizeof (msg),
2498 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2499 } else {
2500 (void) snprintf(msg, sizeof (msg),
2501 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2502 }
34dc7c2f
BB
2503
2504 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2505 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2506 &islog)) == NULL)
34dc7c2f
BB
2507 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2508
2509 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2510
428870ff 2511 if (avail_spare)
34dc7c2f
BB
2512 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2513
9babb374
BB
2514 if (flags & ZFS_ONLINE_EXPAND ||
2515 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
9babb374
BB
2516 uint64_t wholedisk = 0;
2517
2518 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2519 &wholedisk);
9babb374
BB
2520
2521 /*
2522 * XXX - L2ARC 1.0 devices can't support expansion.
2523 */
2524 if (l2cache) {
2525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2526 "cannot expand cache devices"));
2527 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2528 }
2529
2530 if (wholedisk) {
7608bd0d
ED
2531 const char *fullpath = path;
2532 char buf[MAXPATHLEN];
2533
2534 if (path[0] != '/') {
2535 error = zfs_resolve_shortname(path, buf,
a08ee875 2536 sizeof (buf));
7608bd0d
ED
2537 if (error != 0)
2538 return (zfs_error(hdl, EZFS_NODEVICE,
2539 msg));
2540
2541 fullpath = buf;
2542 }
2543
2544 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2545 if (error != 0)
2546 return (error);
9babb374
BB
2547 }
2548 }
2549
34dc7c2f
BB
2550 zc.zc_cookie = VDEV_STATE_ONLINE;
2551 zc.zc_obj = flags;
2552
572e2857 2553 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2554 if (errno == EINVAL) {
2555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2556 "from this pool into a new one. Use '%s' "
2557 "instead"), "zpool detach");
2558 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2559 }
34dc7c2f 2560 return (zpool_standard_error(hdl, errno, msg));
428870ff 2561 }
34dc7c2f
BB
2562
2563 *newstate = zc.zc_cookie;
2564 return (0);
2565}
2566
2567/*
2568 * Take the specified vdev offline
2569 */
2570int
2571zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2572{
a08ee875 2573 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2574 char msg[1024];
2575 nvlist_t *tgt;
2576 boolean_t avail_spare, l2cache;
2577 libzfs_handle_t *hdl = zhp->zpool_hdl;
2578
2579 (void) snprintf(msg, sizeof (msg),
2580 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2581
2582 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2583 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2584 NULL)) == NULL)
34dc7c2f
BB
2585 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2586
2587 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2588
428870ff 2589 if (avail_spare)
34dc7c2f
BB
2590 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2591
34dc7c2f
BB
2592 zc.zc_cookie = VDEV_STATE_OFFLINE;
2593 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2594
572e2857 2595 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2596 return (0);
2597
2598 switch (errno) {
2599 case EBUSY:
2600
2601 /*
2602 * There are no other replicas of this device.
2603 */
2604 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2605
9babb374
BB
2606 case EEXIST:
2607 /*
2608 * The log device has unplayed logs
2609 */
2610 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2611
34dc7c2f
BB
2612 default:
2613 return (zpool_standard_error(hdl, errno, msg));
2614 }
2615}
2616
2617/*
2618 * Mark the given vdev faulted.
2619 */
2620int
428870ff 2621zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2622{
a08ee875 2623 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2624 char msg[1024];
2625 libzfs_handle_t *hdl = zhp->zpool_hdl;
2626
2627 (void) snprintf(msg, sizeof (msg),
a08ee875 2628 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2629
2630 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2631 zc.zc_guid = guid;
2632 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2633 zc.zc_obj = aux;
34dc7c2f 2634
572e2857 2635 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2636 return (0);
2637
2638 switch (errno) {
2639 case EBUSY:
2640
2641 /*
2642 * There are no other replicas of this device.
2643 */
2644 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2645
2646 default:
2647 return (zpool_standard_error(hdl, errno, msg));
2648 }
2649
2650}
2651
2652/*
2653 * Mark the given vdev degraded.
2654 */
2655int
428870ff 2656zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2657{
a08ee875 2658 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2659 char msg[1024];
2660 libzfs_handle_t *hdl = zhp->zpool_hdl;
2661
2662 (void) snprintf(msg, sizeof (msg),
a08ee875 2663 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2664
2665 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2666 zc.zc_guid = guid;
2667 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2668 zc.zc_obj = aux;
34dc7c2f 2669
572e2857 2670 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2671 return (0);
2672
2673 return (zpool_standard_error(hdl, errno, msg));
2674}
2675
2676/*
2677 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2678 * a hot spare.
2679 */
2680static boolean_t
2681is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2682{
2683 nvlist_t **child;
2684 uint_t c, children;
2685 char *type;
2686
2687 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2688 &children) == 0) {
2689 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2690 &type) == 0);
2691
2692 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2693 children == 2 && child[which] == tgt)
2694 return (B_TRUE);
2695
2696 for (c = 0; c < children; c++)
2697 if (is_replacing_spare(child[c], tgt, which))
2698 return (B_TRUE);
2699 }
2700
2701 return (B_FALSE);
2702}
2703
2704/*
2705 * Attach new_disk (fully described by nvroot) to old_disk.
2706 * If 'replacing' is specified, the new disk will replace the old one.
2707 */
2708int
2709zpool_vdev_attach(zpool_handle_t *zhp,
2710 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2711{
a08ee875 2712 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2713 char msg[1024];
2714 int ret;
2715 nvlist_t *tgt;
b128c09f
BB
2716 boolean_t avail_spare, l2cache, islog;
2717 uint64_t val;
572e2857 2718 char *newname;
34dc7c2f
BB
2719 nvlist_t **child;
2720 uint_t children;
2721 nvlist_t *config_root;
2722 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 2723 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
2724
2725 if (replacing)
2726 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2727 "cannot replace %s with %s"), old_disk, new_disk);
2728 else
2729 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2730 "cannot attach %s to %s"), new_disk, old_disk);
2731
2732 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2733 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2734 &islog)) == 0)
34dc7c2f
BB
2735 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2736
2737 if (avail_spare)
2738 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2739
2740 if (l2cache)
2741 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2742
2743 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2744 zc.zc_cookie = replacing;
2745
2746 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2747 &child, &children) != 0 || children != 1) {
2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2749 "new device must be a single disk"));
2750 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2751 }
2752
2753 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2754 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2755
4e820b5a 2756 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
b128c09f
BB
2757 return (-1);
2758
34dc7c2f
BB
2759 /*
2760 * If the target is a hot spare that has been swapped in, we can only
2761 * replace it with another hot spare.
2762 */
2763 if (replacing &&
2764 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2765 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2766 NULL) == NULL || !avail_spare) &&
2767 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2769 "can only be replaced by another hot spare"));
b128c09f 2770 free(newname);
34dc7c2f
BB
2771 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2772 }
2773
b128c09f
BB
2774 free(newname);
2775
34dc7c2f
BB
2776 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2777 return (-1);
2778
572e2857 2779 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2780
2781 zcmd_free_nvlists(&zc);
2782
b128c09f
BB
2783 if (ret == 0) {
2784 if (rootpool) {
9babb374
BB
2785 /*
2786 * XXX need a better way to prevent user from
2787 * booting up a half-baked vdev.
2788 */
2789 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2790 "sure to wait until resilver is done "
2791 "before rebooting.\n"));
b128c09f 2792 }
34dc7c2f 2793 return (0);
b128c09f 2794 }
34dc7c2f
BB
2795
2796 switch (errno) {
2797 case ENOTSUP:
2798 /*
2799 * Can't attach to or replace this type of vdev.
2800 */
2801 if (replacing) {
572e2857
BB
2802 uint64_t version = zpool_get_prop_int(zhp,
2803 ZPOOL_PROP_VERSION, NULL);
2804
b128c09f 2805 if (islog)
34dc7c2f
BB
2806 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2807 "cannot replace a log with a spare"));
572e2857
BB
2808 else if (version >= SPA_VERSION_MULTI_REPLACE)
2809 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2810 "already in replacing/spare config; wait "
2811 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2812 else
2813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2814 "cannot replace a replacing device"));
2815 } else {
2816 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2817 "can only attach to mirrors and top-level "
2818 "disks"));
2819 }
2820 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2821 break;
2822
2823 case EINVAL:
2824 /*
2825 * The new device must be a single disk.
2826 */
2827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2828 "new device must be a single disk"));
2829 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2830 break;
2831
2832 case EBUSY:
2833 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2834 new_disk);
2835 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2836 break;
2837
2838 case EOVERFLOW:
2839 /*
2840 * The new device is too small.
2841 */
2842 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2843 "device is too small"));
2844 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2845 break;
2846
2847 case EDOM:
2848 /*
ea04106b 2849 * The new device has a different optimal sector size.
34dc7c2f
BB
2850 */
2851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
ea04106b
AX
2852 "new device has a different optimal sector size; use the "
2853 "option '-o ashift=N' to override the optimal size"));
34dc7c2f
BB
2854 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2855 break;
2856
2857 case ENAMETOOLONG:
2858 /*
2859 * The resulting top-level vdev spec won't fit in the label.
2860 */
2861 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2862 break;
2863
2864 default:
2865 (void) zpool_standard_error(hdl, errno, msg);
2866 }
2867
2868 return (-1);
2869}
2870
2871/*
2872 * Detach the specified device.
2873 */
2874int
2875zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2876{
a08ee875 2877 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2878 char msg[1024];
2879 nvlist_t *tgt;
2880 boolean_t avail_spare, l2cache;
2881 libzfs_handle_t *hdl = zhp->zpool_hdl;
2882
2883 (void) snprintf(msg, sizeof (msg),
2884 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2885
2886 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2887 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2888 NULL)) == 0)
34dc7c2f
BB
2889 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2890
2891 if (avail_spare)
2892 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2893
2894 if (l2cache)
2895 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2896
2897 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2898
2899 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2900 return (0);
2901
2902 switch (errno) {
2903
2904 case ENOTSUP:
2905 /*
2906 * Can't detach from this type of vdev.
2907 */
2908 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2909 "applicable to mirror and replacing vdevs"));
572e2857 2910 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2911 break;
2912
2913 case EBUSY:
2914 /*
2915 * There are no other replicas of this device.
2916 */
2917 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2918 break;
2919
2920 default:
2921 (void) zpool_standard_error(hdl, errno, msg);
2922 }
2923
2924 return (-1);
2925}
2926
428870ff
BB
2927/*
2928 * Find a mirror vdev in the source nvlist.
2929 *
2930 * The mchild array contains a list of disks in one of the top-level mirrors
2931 * of the source pool. The schild array contains a list of disks that the
2932 * user specified on the command line. We loop over the mchild array to
2933 * see if any entry in the schild array matches.
2934 *
2935 * If a disk in the mchild array is found in the schild array, we return
2936 * the index of that entry. Otherwise we return -1.
2937 */
2938static int
2939find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2940 nvlist_t **schild, uint_t schildren)
2941{
2942 uint_t mc;
2943
2944 for (mc = 0; mc < mchildren; mc++) {
2945 uint_t sc;
2946 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
4e820b5a 2947 mchild[mc], 0);
428870ff
BB
2948
2949 for (sc = 0; sc < schildren; sc++) {
2950 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
4e820b5a 2951 schild[sc], 0);
428870ff
BB
2952 boolean_t result = (strcmp(mpath, spath) == 0);
2953
2954 free(spath);
2955 if (result) {
2956 free(mpath);
2957 return (mc);
2958 }
2959 }
2960
2961 free(mpath);
2962 }
2963
2964 return (-1);
2965}
2966
2967/*
2968 * Split a mirror pool. If newroot points to null, then a new nvlist
2969 * is generated and it is the responsibility of the caller to free it.
2970 */
2971int
2972zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2973 nvlist_t *props, splitflags_t flags)
2974{
a08ee875 2975 zfs_cmd_t zc = {"\0"};
428870ff
BB
2976 char msg[1024];
2977 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2978 nvlist_t **varray = NULL, *zc_props = NULL;
2979 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2980 libzfs_handle_t *hdl = zhp->zpool_hdl;
2981 uint64_t vers;
2982 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2983 int retval = 0;
2984
2985 (void) snprintf(msg, sizeof (msg),
2986 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2987
2988 if (!zpool_name_valid(hdl, B_FALSE, newname))
2989 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2990
2991 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2992 (void) fprintf(stderr, gettext("Internal error: unable to "
2993 "retrieve pool configuration\n"));
2994 return (-1);
2995 }
2996
2997 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2998 == 0);
2999 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3000
3001 if (props) {
572e2857 3002 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 3003 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 3004 props, vers, flags, msg)) == NULL)
428870ff
BB
3005 return (-1);
3006 }
3007
3008 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3009 &children) != 0) {
3010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3011 "Source pool is missing vdev tree"));
cae5b340 3012 nvlist_free(zc_props);
428870ff
BB
3013 return (-1);
3014 }
3015
3016 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3017 vcount = 0;
3018
3019 if (*newroot == NULL ||
3020 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3021 &newchild, &newchildren) != 0)
3022 newchildren = 0;
3023
3024 for (c = 0; c < children; c++) {
3025 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3026 char *type;
3027 nvlist_t **mchild, *vdev;
3028 uint_t mchildren;
3029 int entry;
3030
3031 /*
3032 * Unlike cache & spares, slogs are stored in the
3033 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3034 */
3035 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3036 &is_log);
3037 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3038 &is_hole);
3039 if (is_log || is_hole) {
3040 /*
3041 * Create a hole vdev and put it in the config.
3042 */
3043 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3044 goto out;
3045 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3046 VDEV_TYPE_HOLE) != 0)
3047 goto out;
3048 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3049 1) != 0)
3050 goto out;
3051 if (lastlog == 0)
3052 lastlog = vcount;
3053 varray[vcount++] = vdev;
3054 continue;
3055 }
3056 lastlog = 0;
3057 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3058 == 0);
3059 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3060 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3061 "Source pool must be composed only of mirrors\n"));
3062 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3063 goto out;
3064 }
3065
3066 verify(nvlist_lookup_nvlist_array(child[c],
3067 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3068
3069 /* find or add an entry for this top-level vdev */
3070 if (newchildren > 0 &&
3071 (entry = find_vdev_entry(zhp, mchild, mchildren,
3072 newchild, newchildren)) >= 0) {
3073 /* We found a disk that the user specified. */
3074 vdev = mchild[entry];
3075 ++found;
3076 } else {
3077 /* User didn't specify a disk for this vdev. */
3078 vdev = mchild[mchildren - 1];
3079 }
3080
3081 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3082 goto out;
3083 }
3084
3085 /* did we find every disk the user specified? */
3086 if (found != newchildren) {
3087 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3088 "include at most one disk from each mirror"));
3089 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3090 goto out;
3091 }
3092
3093 /* Prepare the nvlist for populating. */
3094 if (*newroot == NULL) {
3095 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3096 goto out;
3097 freelist = B_TRUE;
3098 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3099 VDEV_TYPE_ROOT) != 0)
3100 goto out;
3101 } else {
3102 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3103 }
3104
3105 /* Add all the children we found */
3106 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3107 lastlog == 0 ? vcount : lastlog) != 0)
3108 goto out;
3109
3110 /*
3111 * If we're just doing a dry run, exit now with success.
3112 */
3113 if (flags.dryrun) {
3114 memory_err = B_FALSE;
3115 freelist = B_FALSE;
3116 goto out;
3117 }
3118
3119 /* now build up the config list & call the ioctl */
3120 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3121 goto out;
3122
3123 if (nvlist_add_nvlist(newconfig,
3124 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3125 nvlist_add_string(newconfig,
3126 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3127 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3128 goto out;
3129
3130 /*
3131 * The new pool is automatically part of the namespace unless we
3132 * explicitly export it.
3133 */
3134 if (!flags.import)
3135 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3136 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3137 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3138 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3139 goto out;
3140 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3141 goto out;
3142
3143 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3144 retval = zpool_standard_error(hdl, errno, msg);
3145 goto out;
3146 }
3147
3148 freelist = B_FALSE;
3149 memory_err = B_FALSE;
3150
3151out:
3152 if (varray != NULL) {
3153 int v;
3154
3155 for (v = 0; v < vcount; v++)
3156 nvlist_free(varray[v]);
3157 free(varray);
3158 }
3159 zcmd_free_nvlists(&zc);
cae5b340
AX
3160 nvlist_free(zc_props);
3161 nvlist_free(newconfig);
428870ff
BB
3162 if (freelist) {
3163 nvlist_free(*newroot);
3164 *newroot = NULL;
3165 }
3166
3167 if (retval != 0)
3168 return (retval);
3169
3170 if (memory_err)
3171 return (no_memory(hdl));
3172
3173 return (0);
3174}
3175
34dc7c2f 3176/*
cae5b340
AX
3177 * Remove the given device. Currently, this is supported only for hot spares,
3178 * cache, and log devices.
34dc7c2f
BB
3179 */
3180int
3181zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3182{
a08ee875 3183 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3184 char msg[1024];
3185 nvlist_t *tgt;
428870ff 3186 boolean_t avail_spare, l2cache, islog;
34dc7c2f 3187 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3188 uint64_t version;
34dc7c2f
BB
3189
3190 (void) snprintf(msg, sizeof (msg),
3191 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3192
3193 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3194 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 3195 &islog)) == 0)
34dc7c2f 3196 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
3197 /*
3198 * XXX - this should just go away.
3199 */
3200 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 3201 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
cae5b340 3202 "only inactive hot spares, cache, "
428870ff 3203 "or log devices can be removed"));
34dc7c2f
BB
3204 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3205 }
3206
428870ff
BB
3207 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3208 if (islog && version < SPA_VERSION_HOLES) {
3209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3210 "pool must be upgrade to support log removal"));
3211 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3212 }
3213
34dc7c2f
BB
3214 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3215
3216 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3217 return (0);
3218
3219 return (zpool_standard_error(hdl, errno, msg));
3220}
3221
3222/*
3223 * Clear the errors for the pool, or the particular device if specified.
3224 */
3225int
428870ff 3226zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 3227{
a08ee875 3228 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3229 char msg[1024];
3230 nvlist_t *tgt;
428870ff 3231 zpool_rewind_policy_t policy;
34dc7c2f
BB
3232 boolean_t avail_spare, l2cache;
3233 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3234 nvlist_t *nvi = NULL;
572e2857 3235 int error;
34dc7c2f
BB
3236
3237 if (path)
3238 (void) snprintf(msg, sizeof (msg),
3239 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3240 path);
3241 else
3242 (void) snprintf(msg, sizeof (msg),
3243 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3244 zhp->zpool_name);
3245
3246 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3247 if (path) {
3248 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 3249 &l2cache, NULL)) == 0)
34dc7c2f
BB
3250 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3251
3252 /*
3253 * Don't allow error clearing for hot spares. Do allow
3254 * error clearing for l2cache devices.
3255 */
3256 if (avail_spare)
3257 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3258
3259 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3260 &zc.zc_guid) == 0);
3261 }
3262
428870ff
BB
3263 zpool_get_rewind_policy(rewindnvl, &policy);
3264 zc.zc_cookie = policy.zrp_request;
3265
572e2857 3266 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
3267 return (-1);
3268
572e2857 3269 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
3270 return (-1);
3271
572e2857
BB
3272 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3273 errno == ENOMEM) {
3274 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3275 zcmd_free_nvlists(&zc);
3276 return (-1);
3277 }
3278 }
3279
3280 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
3281 errno != EPERM && errno != EACCES)) {
3282 if (policy.zrp_request &
3283 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3284 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3285 zpool_rewind_exclaim(hdl, zc.zc_name,
3286 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3287 nvi);
3288 nvlist_free(nvi);
3289 }
3290 zcmd_free_nvlists(&zc);
34dc7c2f 3291 return (0);
428870ff 3292 }
34dc7c2f 3293
428870ff 3294 zcmd_free_nvlists(&zc);
34dc7c2f
BB
3295 return (zpool_standard_error(hdl, errno, msg));
3296}
3297
3298/*
3299 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3300 */
3301int
3302zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3303{
a08ee875 3304 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3305 char msg[1024];
3306 libzfs_handle_t *hdl = zhp->zpool_hdl;
3307
3308 (void) snprintf(msg, sizeof (msg),
3309 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
a08ee875 3310 (u_longlong_t)guid);
34dc7c2f
BB
3311
3312 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3313 zc.zc_guid = guid;
428870ff 3314 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
3315
3316 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3317 return (0);
3318
3319 return (zpool_standard_error(hdl, errno, msg));
3320}
3321
3541dc6d
GA
3322/*
3323 * Change the GUID for a pool.
3324 */
3325int
3326zpool_reguid(zpool_handle_t *zhp)
3327{
3328 char msg[1024];
3329 libzfs_handle_t *hdl = zhp->zpool_hdl;
a08ee875 3330 zfs_cmd_t zc = {"\0"};
3541dc6d
GA
3331
3332 (void) snprintf(msg, sizeof (msg),
3333 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3334
3335 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3336 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3337 return (0);
3338
3339 return (zpool_standard_error(hdl, errno, msg));
3340}
3341
1bd201e7
CS
3342/*
3343 * Reopen the pool.
3344 */
3345int
3346zpool_reopen(zpool_handle_t *zhp)
3347{
a08ee875 3348 zfs_cmd_t zc = {"\0"};
1bd201e7
CS
3349 char msg[1024];
3350 libzfs_handle_t *hdl = zhp->zpool_hdl;
3351
3352 (void) snprintf(msg, sizeof (msg),
3353 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3354 zhp->zpool_name);
3355
3356 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3357 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3358 return (0);
3359 return (zpool_standard_error(hdl, errno, msg));
3360}
3361
cae5b340
AX
3362/* call into libzfs_core to execute the sync IOCTL per pool */
3363int
3364zpool_sync_one(zpool_handle_t *zhp, void *data)
3365{
3366 int ret;
3367 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3368 const char *pool_name = zpool_get_name(zhp);
3369 boolean_t *force = data;
3370 nvlist_t *innvl = fnvlist_alloc();
3371
3372 fnvlist_add_boolean_value(innvl, "force", *force);
3373 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3374 nvlist_free(innvl);
3375 return (zpool_standard_error_fmt(hdl, ret,
3376 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3377 }
3378 nvlist_free(innvl);
3379
3380 return (0);
3381}
3382
3383#if defined(__sun__) || defined(__sun)
34dc7c2f
BB
3384/*
3385 * Convert from a devid string to a path.
3386 */
3387static char *
3388devid_to_path(char *devid_str)
3389{
3390 ddi_devid_t devid;
3391 char *minor;
3392 char *path;
3393 devid_nmlist_t *list = NULL;
3394 int ret;
3395
3396 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3397 return (NULL);
3398
3399 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3400
3401 devid_str_free(minor);
3402 devid_free(devid);
3403
3404 if (ret != 0)
3405 return (NULL);
3406
cae5b340
AX
3407 /*
3408 * In a case the strdup() fails, we will just return NULL below.
3409 */
3410 path = strdup(list[0].devname);
34dc7c2f
BB
3411
3412 devid_free_nmlist(list);
3413
3414 return (path);
3415}
3416
3417/*
3418 * Convert from a path to a devid string.
3419 */
3420static char *
3421path_to_devid(const char *path)
3422{
3423 int fd;
3424 ddi_devid_t devid;
3425 char *minor, *ret;
3426
3427 if ((fd = open(path, O_RDONLY)) < 0)
3428 return (NULL);
3429
3430 minor = NULL;
3431 ret = NULL;
3432 if (devid_get(fd, &devid) == 0) {
3433 if (devid_get_minor_name(fd, &minor) == 0)
3434 ret = devid_str_encode(devid, minor);
3435 if (minor != NULL)
3436 devid_str_free(minor);
3437 devid_free(devid);
3438 }
3439 (void) close(fd);
3440
3441 return (ret);
3442}
3443
3444/*
3445 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3446 * ignore any failure here, since a common case is for an unprivileged user to
3447 * type 'zpool status', and we'll display the correct information anyway.
3448 */
3449static void
3450set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3451{
a08ee875 3452 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3453
3454 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3455 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3456 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3457 &zc.zc_guid) == 0);
3458
3459 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3460}
cae5b340 3461#endif /* sun */
34dc7c2f 3462
83c62c93
NB
3463/*
3464 * Remove partition suffix from a vdev path. Partition suffixes may take three
3465 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3466 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3467 * third case only occurs when preceded by a string matching the regular
e10b0808 3468 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
cae5b340
AX
3469 *
3470 * caller must free the returned string
83c62c93 3471 */
cae5b340
AX
3472char *
3473zfs_strip_partition(char *path)
83c62c93 3474{
cae5b340 3475 char *tmp = strdup(path);
83c62c93 3476 char *part = NULL, *d = NULL;
cae5b340
AX
3477 if (!tmp)
3478 return (NULL);
83c62c93
NB
3479
3480 if ((part = strstr(tmp, "-part")) && part != tmp) {
3481 d = part + 5;
3482 } else if ((part = strrchr(tmp, 'p')) &&
3483 part > tmp + 1 && isdigit(*(part-1))) {
3484 d = part + 1;
e10b0808
AX
3485 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3486 tmp[1] == 'd') {
cae5b340 3487 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
e10b0808 3488 } else if (strncmp("xvd", tmp, 3) == 0) {
cae5b340 3489 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
83c62c93
NB
3490 }
3491 if (part && d && *d != '\0') {
cae5b340 3492 for (; isdigit(*d); d++) { }
83c62c93
NB
3493 if (*d == '\0')
3494 *part = '\0';
3495 }
cae5b340 3496
83c62c93
NB
3497 return (tmp);
3498}
3499
cae5b340
AX
3500/*
3501 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3502 *
3503 * path: /dev/sda1
3504 * returns: /dev/sda
3505 *
3506 * Returned string must be freed.
3507 */
3508char *
3509zfs_strip_partition_path(char *path)
3510{
3511 char *newpath = strdup(path);
3512 char *sd_offset;
3513 char *new_sd;
3514
3515 if (!newpath)
3516 return (NULL);
3517
3518 /* Point to "sda1" part of "/dev/sda1" */
3519 sd_offset = strrchr(newpath, '/') + 1;
3520
3521 /* Get our new name "sda" */
3522 new_sd = zfs_strip_partition(sd_offset);
3523 if (!new_sd) {
3524 free(newpath);
3525 return (NULL);
3526 }
3527
3528 /* Paste the "sda" where "sda1" was */
3529 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3530
3531 /* Free temporary "sda" */
3532 free(new_sd);
3533
3534 return (newpath);
3535}
3536
858219cc
NB
3537#define PATH_BUF_LEN 64
3538
34dc7c2f
BB
3539/*
3540 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3541 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3542 * We also check if this is a whole disk, in which case we strip off the
3543 * trailing 's0' slice name.
3544 *
3545 * This routine is also responsible for identifying when disks have been
3546 * reconfigured in a new location. The kernel will have opened the device by
3547 * devid, but the path will still refer to the old location. To catch this, we
3548 * first do a path -> devid translation (which is fast for the common case). If
3549 * the devid matches, we're done. If not, we do a reverse devid -> path
3550 * translation and issue the appropriate ioctl() to update the path of the vdev.
3551 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3552 * of these checks.
3553 */
3554char *
428870ff 3555zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4e820b5a 3556 int name_flags)
34dc7c2f 3557{
cae5b340 3558 char *path, *type, *env;
34dc7c2f 3559 uint64_t value;
858219cc 3560 char buf[PATH_BUF_LEN];
fc24f7c8 3561 char tmpbuf[PATH_BUF_LEN];
34dc7c2f 3562
4e820b5a
AX
3563 env = getenv("ZPOOL_VDEV_NAME_PATH");
3564 if (env && (strtoul(env, NULL, 0) > 0 ||
3565 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3566 name_flags |= VDEV_NAME_PATH;
3567
3568 env = getenv("ZPOOL_VDEV_NAME_GUID");
3569 if (env && (strtoul(env, NULL, 0) > 0 ||
3570 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3571 name_flags |= VDEV_NAME_GUID;
3572
3573 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3574 if (env && (strtoul(env, NULL, 0) > 0 ||
3575 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3576 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3577
3578 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3579 name_flags & VDEV_NAME_GUID) {
cae5b340 3580 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4e820b5a 3581 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
34dc7c2f
BB
3582 path = buf;
3583 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
cae5b340
AX
3584#if defined(__sun__) || defined(__sun)
3585 /*
3586 * Live VDEV path updates to a kernel VDEV during a
3587 * zpool_vdev_name lookup are not supported on Linux.
3588 */
3589 char *devid;
3590 vdev_stat_t *vs;
3591 uint_t vsc;
3592
34dc7c2f
BB
3593 /*
3594 * If the device is dead (faulted, offline, etc) then don't
3595 * bother opening it. Otherwise we may be forcing the user to
3596 * open a misbehaving device, which can have undesirable
3597 * effects.
3598 */
428870ff 3599 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3600 (uint64_t **)&vs, &vsc) != 0 ||
3601 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3602 zhp != NULL &&
3603 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3604 /*
3605 * Determine if the current path is correct.
3606 */
3607 char *newdevid = path_to_devid(path);
3608
3609 if (newdevid == NULL ||
3610 strcmp(devid, newdevid) != 0) {
3611 char *newpath;
3612
3613 if ((newpath = devid_to_path(devid)) != NULL) {
3614 /*
3615 * Update the path appropriately.
3616 */
3617 set_path(zhp, nv, newpath);
3618 if (nvlist_add_string(nv,
3619 ZPOOL_CONFIG_PATH, newpath) == 0)
3620 verify(nvlist_lookup_string(nv,
3621 ZPOOL_CONFIG_PATH,
3622 &path) == 0);
3623 free(newpath);
3624 }
3625 }
3626
3627 if (newdevid)
3628 devid_str_free(newdevid);
3629 }
cae5b340 3630#endif /* sun */
34dc7c2f 3631
4e820b5a
AX
3632 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3633 char *rp = realpath(path, NULL);
3634 if (rp) {
3635 strlcpy(buf, rp, sizeof (buf));
3636 path = buf;
3637 free(rp);
3638 }
3639 }
3640
d603ed6c
BB
3641 /*
3642 * For a block device only use the name.
3643 */
3644 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
4e820b5a
AX
3645 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3646 !(name_flags & VDEV_NAME_PATH)) {
d603ed6c
BB
3647 path = strrchr(path, '/');
3648 path++;
3649 }
34dc7c2f 3650
d603ed6c 3651 /*
83c62c93 3652 * Remove the partition from the path it this is a whole disk.
d603ed6c 3653 */
4e820b5a
AX
3654 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3655 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
cae5b340 3656 return (zfs_strip_partition(path));
34dc7c2f
BB
3657 }
3658 } else {
3659 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3660
3661 /*
3662 * If it's a raidz device, we need to stick in the parity level.
3663 */
3664 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3665 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3666 &value) == 0);
fc24f7c8 3667 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 3668 (u_longlong_t)value);
fc24f7c8 3669 path = buf;
34dc7c2f 3670 }
428870ff
BB
3671
3672 /*
3673 * We identify each top-level vdev by using a <type-id>
3674 * naming convention.
3675 */
4e820b5a 3676 if (name_flags & VDEV_NAME_TYPE_ID) {
428870ff 3677 uint64_t id;
428870ff
BB
3678 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3679 &id) == 0);
fc24f7c8
MM
3680 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3681 path, (u_longlong_t)id);
3682 path = tmpbuf;
428870ff 3683 }
34dc7c2f
BB
3684 }
3685
3686 return (zfs_strdup(hdl, path));
3687}
3688
3689static int
cae5b340 3690zbookmark_mem_compare(const void *a, const void *b)
34dc7c2f 3691{
ea04106b 3692 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
34dc7c2f
BB
3693}
3694
3695/*
3696 * Retrieve the persistent error log, uniquify the members, and return to the
3697 * caller.
3698 */
3699int
3700zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3701{
a08ee875 3702 zfs_cmd_t zc = {"\0"};
cae5b340 3703 libzfs_handle_t *hdl = zhp->zpool_hdl;
34dc7c2f 3704 uint64_t count;
ea04106b 3705 zbookmark_phys_t *zb = NULL;
34dc7c2f
BB
3706 int i;
3707
3708 /*
3709 * Retrieve the raw error list from the kernel. If the number of errors
3710 * has increased, allocate more space and continue until we get the
3711 * entire list.
3712 */
3713 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3714 &count) == 0);
3715 if (count == 0)
3716 return (0);
cae5b340
AX
3717 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3718 count * sizeof (zbookmark_phys_t));
34dc7c2f
BB
3719 zc.zc_nvlist_dst_size = count;
3720 (void) strcpy(zc.zc_name, zhp->zpool_name);
3721 for (;;) {
3722 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3723 &zc) != 0) {
3724 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3725 if (errno == ENOMEM) {
ea04106b
AX
3726 void *dst;
3727
34dc7c2f 3728 count = zc.zc_nvlist_dst_size;
ea04106b
AX
3729 dst = zfs_alloc(zhp->zpool_hdl, count *
3730 sizeof (zbookmark_phys_t));
ea04106b 3731 zc.zc_nvlist_dst = (uintptr_t)dst;
34dc7c2f 3732 } else {
cae5b340
AX
3733 return (zpool_standard_error_fmt(hdl, errno,
3734 dgettext(TEXT_DOMAIN, "errors: List of "
3735 "errors unavailable")));
34dc7c2f
BB
3736 }
3737 } else {
3738 break;
3739 }
3740 }
3741
3742 /*
3743 * Sort the resulting bookmarks. This is a little confusing due to the
3744 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3745 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3746 * _not_ copied as part of the process. So we point the start of our
3747 * array appropriate and decrement the total number of elements.
3748 */
ea04106b 3749 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
34dc7c2f
BB
3750 zc.zc_nvlist_dst_size;
3751 count -= zc.zc_nvlist_dst_size;
3752
cae5b340 3753 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
34dc7c2f
BB
3754
3755 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3756
3757 /*
3758 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3759 */
3760 for (i = 0; i < count; i++) {
3761 nvlist_t *nv;
3762
3763 /* ignoring zb_blkid and zb_level for now */
3764 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3765 zb[i-1].zb_object == zb[i].zb_object)
3766 continue;
3767
3768 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3769 goto nomem;
3770 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3771 zb[i].zb_objset) != 0) {
3772 nvlist_free(nv);
3773 goto nomem;
3774 }
3775 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3776 zb[i].zb_object) != 0) {
3777 nvlist_free(nv);
3778 goto nomem;
3779 }
3780 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3781 nvlist_free(nv);
3782 goto nomem;
3783 }
3784 nvlist_free(nv);
3785 }
3786
3787 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3788 return (0);
3789
3790nomem:
3791 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3792 return (no_memory(zhp->zpool_hdl));
3793}
3794
3795/*
3796 * Upgrade a ZFS pool to the latest on-disk version.
3797 */
3798int
3799zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3800{
a08ee875 3801 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3802 libzfs_handle_t *hdl = zhp->zpool_hdl;
3803
3804 (void) strcpy(zc.zc_name, zhp->zpool_name);
3805 zc.zc_cookie = new_version;
3806
3807 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3808 return (zpool_standard_error_fmt(hdl, errno,
3809 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3810 zhp->zpool_name));
3811 return (0);
3812}
3813
3814void
a08ee875 3815zfs_save_arguments(int argc, char **argv, char *string, int len)
34dc7c2f
BB
3816{
3817 int i;
3818
a08ee875 3819 (void) strlcpy(string, basename(argv[0]), len);
34dc7c2f 3820 for (i = 1; i < argc; i++) {
a08ee875
LG
3821 (void) strlcat(string, " ", len);
3822 (void) strlcat(string, argv[i], len);
34dc7c2f
BB
3823 }
3824}
3825
34dc7c2f 3826int
a08ee875 3827zpool_log_history(libzfs_handle_t *hdl, const char *message)
34dc7c2f 3828{
a08ee875
LG
3829 zfs_cmd_t zc = {"\0"};
3830 nvlist_t *args;
3831 int err;
3832
3833 args = fnvlist_alloc();
3834 fnvlist_add_string(args, "message", message);
3835 err = zcmd_write_src_nvlist(hdl, &zc, args);
3836 if (err == 0)
3837 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3838 nvlist_free(args);
3839 zcmd_free_nvlists(&zc);
3840 return (err);
34dc7c2f
BB
3841}
3842
3843/*
3844 * Perform ioctl to get some command history of a pool.
3845 *
3846 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3847 * logical offset of the history buffer to start reading from.
3848 *
3849 * Upon return, 'off' is the next logical offset to read from and
3850 * 'len' is the actual amount of bytes read into 'buf'.
3851 */
3852static int
3853get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3854{
a08ee875 3855 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3856 libzfs_handle_t *hdl = zhp->zpool_hdl;
3857
3858 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3859
3860 zc.zc_history = (uint64_t)(uintptr_t)buf;
3861 zc.zc_history_len = *len;
3862 zc.zc_history_offset = *off;
3863
3864 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3865 switch (errno) {
3866 case EPERM:
3867 return (zfs_error_fmt(hdl, EZFS_PERM,
3868 dgettext(TEXT_DOMAIN,
3869 "cannot show history for pool '%s'"),
3870 zhp->zpool_name));
3871 case ENOENT:
3872 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3873 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3874 "'%s'"), zhp->zpool_name));
3875 case ENOTSUP:
3876 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3877 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3878 "'%s', pool must be upgraded"), zhp->zpool_name));
3879 default:
3880 return (zpool_standard_error_fmt(hdl, errno,
3881 dgettext(TEXT_DOMAIN,
3882 "cannot get history for '%s'"), zhp->zpool_name));
3883 }
3884 }
3885
3886 *len = zc.zc_history_len;
3887 *off = zc.zc_history_offset;
3888
3889 return (0);
3890}
3891
3892/*
3893 * Process the buffer of nvlists, unpacking and storing each nvlist record
3894 * into 'records'. 'leftover' is set to the number of bytes that weren't
3895 * processed as there wasn't a complete record.
3896 */
428870ff 3897int
34dc7c2f
BB
3898zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3899 nvlist_t ***records, uint_t *numrecords)
3900{
3901 uint64_t reclen;
3902 nvlist_t *nv;
3903 int i;
cae5b340 3904 void *tmp;
34dc7c2f
BB
3905
3906 while (bytes_read > sizeof (reclen)) {
3907
3908 /* get length of packed record (stored as little endian) */
3909 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3910 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3911
3912 if (bytes_read < sizeof (reclen) + reclen)
3913 break;
3914
3915 /* unpack record */
3916 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3917 return (ENOMEM);
3918 bytes_read -= sizeof (reclen) + reclen;
3919 buf += sizeof (reclen) + reclen;
3920
3921 /* add record to nvlist array */
3922 (*numrecords)++;
3923 if (ISP2(*numrecords + 1)) {
cae5b340 3924 tmp = realloc(*records,
34dc7c2f 3925 *numrecords * 2 * sizeof (nvlist_t *));
cae5b340
AX
3926 if (tmp == NULL) {
3927 nvlist_free(nv);
3928 (*numrecords)--;
3929 return (ENOMEM);
3930 }
3931 *records = tmp;
34dc7c2f
BB
3932 }
3933 (*records)[*numrecords - 1] = nv;
3934 }
3935
3936 *leftover = bytes_read;
3937 return (0);
3938}
3939
34dc7c2f
BB
3940/*
3941 * Retrieve the command history of a pool.
3942 */
3943int
3944zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3945{
ea04106b
AX
3946 char *buf;
3947 int buflen = 128 * 1024;
34dc7c2f
BB
3948 uint64_t off = 0;
3949 nvlist_t **records = NULL;
3950 uint_t numrecords = 0;
3951 int err, i;
3952
ea04106b
AX
3953 buf = malloc(buflen);
3954 if (buf == NULL)
3955 return (ENOMEM);
34dc7c2f 3956 do {
ea04106b 3957 uint64_t bytes_read = buflen;
34dc7c2f
BB
3958 uint64_t leftover;
3959
3960 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3961 break;
3962
3963 /* if nothing else was read in, we're at EOF, just return */
3964 if (!bytes_read)
3965 break;
3966
3967 if ((err = zpool_history_unpack(buf, bytes_read,
3968 &leftover, &records, &numrecords)) != 0)
3969 break;
3970 off -= leftover;
ea04106b
AX
3971 if (leftover == bytes_read) {
3972 /*
3973 * no progress made, because buffer is not big enough
3974 * to hold this record; resize and retry.
3975 */
3976 buflen *= 2;
3977 free(buf);
3978 buf = malloc(buflen);
3979 if (buf == NULL)
3980 return (ENOMEM);
3981 }
34dc7c2f
BB
3982
3983 /* CONSTCOND */
3984 } while (1);
3985
ea04106b
AX
3986 free(buf);
3987
34dc7c2f
BB
3988 if (!err) {
3989 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3990 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3991 records, numrecords) == 0);
3992 }
3993 for (i = 0; i < numrecords; i++)
3994 nvlist_free(records[i]);
3995 free(records);
3996
3997 return (err);
3998}
3999
26685276 4000/*
ea04106b
AX
4001 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4002 * If there is a new event available 'nvp' will contain a newly allocated
4003 * nvlist and 'dropped' will be set to the number of missed events since
4004 * the last call to this function. When 'nvp' is set to NULL it indicates
4005 * no new events are available. In either case the function returns 0 and
4006 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4007 * function will return a non-zero value. When the function is called in
4008 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4009 * it will not return until a new event is available.
26685276
BB
4010 */
4011int
4012zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
ea04106b 4013 int *dropped, unsigned flags, int zevent_fd)
26685276 4014{
a08ee875 4015 zfs_cmd_t zc = {"\0"};
26685276
BB
4016 int error = 0;
4017
4018 *nvp = NULL;
4019 *dropped = 0;
ea04106b 4020 zc.zc_cleanup_fd = zevent_fd;
26685276 4021
ea04106b 4022 if (flags & ZEVENT_NONBLOCK)
26685276
BB
4023 zc.zc_guid = ZEVENT_NONBLOCK;
4024
4025 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4026 return (-1);
4027
4028retry:
4029 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4030 switch (errno) {
4031 case ESHUTDOWN:
4032 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4033 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4034 goto out;
4035 case ENOENT:
4036 /* Blocking error case should not occur */
ea04106b 4037 if (!(flags & ZEVENT_NONBLOCK))
26685276
BB
4038 error = zpool_standard_error_fmt(hdl, errno,
4039 dgettext(TEXT_DOMAIN, "cannot get event"));
4040
4041 goto out;
4042 case ENOMEM:
4043 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4044 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4045 dgettext(TEXT_DOMAIN, "cannot get event"));
4046 goto out;
4047 } else {
4048 goto retry;
4049 }
4050 default:
4051 error = zpool_standard_error_fmt(hdl, errno,
4052 dgettext(TEXT_DOMAIN, "cannot get event"));
4053 goto out;
4054 }
4055 }
4056
4057 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4058 if (error != 0)
4059 goto out;
4060
4061 *dropped = (int)zc.zc_cookie;
4062out:
4063 zcmd_free_nvlists(&zc);
4064
4065 return (error);
4066}
4067
4068/*
4069 * Clear all events.
4070 */
4071int
4072zpool_events_clear(libzfs_handle_t *hdl, int *count)
4073{
a08ee875 4074 zfs_cmd_t zc = {"\0"};
26685276
BB
4075 char msg[1024];
4076
4077 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4078 "cannot clear events"));
4079
4080 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4081 return (zpool_standard_error_fmt(hdl, errno, msg));
4082
4083 if (count != NULL)
4084 *count = (int)zc.zc_cookie; /* # of events cleared */
4085
4086 return (0);
4087}
4088
ea04106b
AX
4089/*
4090 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4091 * the passed zevent_fd file handle. On success zero is returned,
4092 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4093 */
4094int
4095zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4096{
4097 zfs_cmd_t zc = {"\0"};
4098 int error = 0;
4099
4100 zc.zc_guid = eid;
4101 zc.zc_cleanup_fd = zevent_fd;
4102
4103 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4104 switch (errno) {
4105 case ENOENT:
4106 error = zfs_error_fmt(hdl, EZFS_NOENT,
4107 dgettext(TEXT_DOMAIN, "cannot get event"));
4108 break;
4109
4110 case ENOMEM:
4111 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4112 dgettext(TEXT_DOMAIN, "cannot get event"));
4113 break;
4114
4115 default:
4116 error = zpool_standard_error_fmt(hdl, errno,
4117 dgettext(TEXT_DOMAIN, "cannot get event"));
4118 break;
4119 }
4120 }
4121
4122 return (error);
4123}
4124
34dc7c2f
BB
4125void
4126zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4127 char *pathname, size_t len)
4128{
a08ee875 4129 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
4130 boolean_t mounted = B_FALSE;
4131 char *mntpnt = NULL;
cae5b340 4132 char dsname[ZFS_MAX_DATASET_NAME_LEN];
34dc7c2f
BB
4133
4134 if (dsobj == 0) {
4135 /* special case for the MOS */
a08ee875
LG
4136 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4137 (longlong_t)obj);
34dc7c2f
BB
4138 return;
4139 }
4140
4141 /* get the dataset's name */
4142 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4143 zc.zc_obj = dsobj;
4144 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4145 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4146 /* just write out a path of two object numbers */
4147 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 4148 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
4149 return;
4150 }
4151 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4152
4153 /* find out if the dataset is mounted */
4154 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4155
4156 /* get the corrupted object's path */
4157 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4158 zc.zc_obj = obj;
4159 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4160 &zc) == 0) {
4161 if (mounted) {
4162 (void) snprintf(pathname, len, "%s%s", mntpnt,
4163 zc.zc_value);
4164 } else {
4165 (void) snprintf(pathname, len, "%s:%s",
4166 dsname, zc.zc_value);
4167 }
4168 } else {
a08ee875
LG
4169 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4170 (longlong_t)obj);
34dc7c2f
BB
4171 }
4172 free(mntpnt);
4173}
4174
b128c09f
BB
4175/*
4176 * Read the EFI label from the config, if a label does not exist then
4177 * pass back the error to the caller. If the caller has passed a non-NULL
4178 * diskaddr argument then we set it to the starting address of the EFI
4179 * partition.
4180 */
4181static int
4182read_efi_label(nvlist_t *config, diskaddr_t *sb)
4183{
4184 char *path;
4185 int fd;
4186 char diskname[MAXPATHLEN];
4187 int err = -1;
4188
4189 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4190 return (err);
4191
eac47204 4192 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 4193 strrchr(path, '/'));
cae5b340 4194 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
b128c09f
BB
4195 struct dk_gpt *vtoc;
4196
4197 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4198 if (sb != NULL)
4199 *sb = vtoc->efi_parts[0].p_start;
4200 efi_free(vtoc);
4201 }
4202 (void) close(fd);
4203 }
4204 return (err);
4205}
4206
34dc7c2f
BB
4207/*
4208 * determine where a partition starts on a disk in the current
4209 * configuration
4210 */
4211static diskaddr_t
4212find_start_block(nvlist_t *config)
4213{
4214 nvlist_t **child;
4215 uint_t c, children;
34dc7c2f 4216 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
4217 uint64_t wholedisk;
4218
4219 if (nvlist_lookup_nvlist_array(config,
4220 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4221 if (nvlist_lookup_uint64(config,
4222 ZPOOL_CONFIG_WHOLE_DISK,
4223 &wholedisk) != 0 || !wholedisk) {
4224 return (MAXOFFSET_T);
4225 }
b128c09f
BB
4226 if (read_efi_label(config, &sb) < 0)
4227 sb = MAXOFFSET_T;
34dc7c2f
BB
4228 return (sb);
4229 }
4230
4231 for (c = 0; c < children; c++) {
4232 sb = find_start_block(child[c]);
4233 if (sb != MAXOFFSET_T) {
4234 return (sb);
4235 }
4236 }
4237 return (MAXOFFSET_T);
4238}
4239
cae5b340 4240static int
d603ed6c
BB
4241zpool_label_disk_check(char *path)
4242{
4243 struct dk_gpt *vtoc;
4244 int fd, err;
4245
cae5b340 4246 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
a08ee875 4247 return (errno);
d603ed6c
BB
4248
4249 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4250 (void) close(fd);
a08ee875 4251 return (err);
d603ed6c
BB
4252 }
4253
4254 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4255 efi_free(vtoc);
4256 (void) close(fd);
a08ee875 4257 return (EIDRM);
d603ed6c
BB
4258 }
4259
4260 efi_free(vtoc);
4261 (void) close(fd);
a08ee875 4262 return (0);
d603ed6c
BB
4263}
4264
5eacc075
AX
4265/*
4266 * Generate a unique partition name for the ZFS member. Partitions must
4267 * have unique names to ensure udev will be able to create symlinks under
4268 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4269 * of the form <pool>-<unique-id>.
4270 */
4271static void
4272zpool_label_name(char *label_name, int label_size)
4273{
4274 uint64_t id = 0;
4275 int fd;
4276
4277 fd = open("/dev/urandom", O_RDONLY);
cae5b340 4278 if (fd >= 0) {
5eacc075
AX
4279 if (read(fd, &id, sizeof (id)) != sizeof (id))
4280 id = 0;
4281
4282 close(fd);
4283 }
4284
4285 if (id == 0)
4286 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4287
cae5b340 4288 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
5eacc075
AX
4289}
4290
34dc7c2f
BB
4291/*
4292 * Label an individual disk. The name provided is the short name,
4293 * stripped of any leading /dev path.
4294 */
4295int
4296zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4297{
4298 char path[MAXPATHLEN];
4299 struct dk_gpt *vtoc;
d603ed6c 4300 int rval, fd;
34dc7c2f
BB
4301 size_t resv = EFI_MIN_RESV_SIZE;
4302 uint64_t slice_size;
4303 diskaddr_t start_block;
4304 char errbuf[1024];
4305
4306 /* prepare an error message just in case */
4307 (void) snprintf(errbuf, sizeof (errbuf),
4308 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4309
4310 if (zhp) {
4311 nvlist_t *nvroot;
4312
4313 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4314 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4315
4316 if (zhp->zpool_start_block == 0)
4317 start_block = find_start_block(nvroot);
4318 else
4319 start_block = zhp->zpool_start_block;
4320 zhp->zpool_start_block = start_block;
4321 } else {
4322 /* new pool */
4323 start_block = NEW_START_BLOCK;
4324 }
4325
eac47204 4326 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 4327
cae5b340 4328 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
34dc7c2f
BB
4329 /*
4330 * This shouldn't happen. We've long since verified that this
4331 * is a valid device.
4332 */
109491a8
RL
4333 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4334 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
4335 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4336 }
4337
4338 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4339 /*
4340 * The only way this can fail is if we run out of memory, or we
4341 * were unable to read the disk's capacity
4342 */
4343 if (errno == ENOMEM)
4344 (void) no_memory(hdl);
4345
4346 (void) close(fd);
109491a8
RL
4347 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4348 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
4349
4350 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4351 }
4352
4353 slice_size = vtoc->efi_last_u_lba + 1;
4354 slice_size -= EFI_MIN_RESV_SIZE;
4355 if (start_block == MAXOFFSET_T)
4356 start_block = NEW_START_BLOCK;
4357 slice_size -= start_block;
613d88ed 4358 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
4359
4360 vtoc->efi_parts[0].p_start = start_block;
4361 vtoc->efi_parts[0].p_size = slice_size;
4362
4363 /*
4364 * Why we use V_USR: V_BACKUP confuses users, and is considered
4365 * disposable by some EFI utilities (since EFI doesn't have a backup
4366 * slice). V_UNASSIGNED is supposed to be used only for zero size
4367 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4368 * etc. were all pretty specific. V_USR is as close to reality as we
4369 * can get, in the absence of V_OTHER.
4370 */
4371 vtoc->efi_parts[0].p_tag = V_USR;
5eacc075 4372 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
34dc7c2f
BB
4373
4374 vtoc->efi_parts[8].p_start = slice_size + start_block;
4375 vtoc->efi_parts[8].p_size = resv;
4376 vtoc->efi_parts[8].p_tag = V_RESERVED;
4377
cae5b340
AX
4378 rval = efi_write(fd, vtoc);
4379
4380 /* Flush the buffers to disk and invalidate the page cache. */
4381 (void) fsync(fd);
4382 (void) ioctl(fd, BLKFLSBUF);
4383
4384 if (rval == 0)
4385 rval = efi_rescan(fd);
4386
4387 /*
4388 * Some block drivers (like pcata) may not support EFI GPT labels.
4389 * Print out a helpful error message directing the user to manually
4390 * label the disk and give a specific slice.
4391 */
4392 if (rval != 0) {
34dc7c2f
BB
4393 (void) close(fd);
4394 efi_free(vtoc);
4395
d603ed6c
BB
4396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4397 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
4398 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4399 }
4400
4401 (void) close(fd);
4402 efi_free(vtoc);
34dc7c2f 4403
eac47204
BB
4404 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4405 (void) zfs_append_partition(path, MAXPATHLEN);
4406
5eacc075
AX
4407 /* Wait to udev to signal use the device has settled. */
4408 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
d603ed6c
BB
4409 if (rval) {
4410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4411 "detect device partitions on '%s': %d"), path, rval);
4412 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
4413 }
4414
d603ed6c
BB
4415 /* We can't be to paranoid. Read the label back and verify it. */
4416 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4417 rval = zpool_label_disk_check(path);
4418 if (rval) {
4419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4420 "EFI label on '%s' is damaged. Ensure\nthis device "
4421 "is not in in use, and is functioning properly: %d"),
4422 path, rval);
4423 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 4424 }
34dc7c2f 4425
a08ee875 4426 return (0);
34dc7c2f 4427}
cae5b340
AX
4428
4429/*
4430 * Allocate and return the underlying device name for a device mapper device.
4431 * If a device mapper device maps to multiple devices, return the first device.
4432 *
4433 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4434 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4435 *
4436 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4437 * device then return NULL.
4438 *
4439 * NOTE: The returned name string must be *freed*.
4440 */
4441char *
4442dm_get_underlying_path(char *dm_name)
4443{
4444 DIR *dp = NULL;
4445 struct dirent *ep;
4446 char *realp;
4447 char *tmp = NULL;
4448 char *path = NULL;
4449 char *dev_str;
4450 int size;
4451
4452 if (dm_name == NULL)
4453 return (NULL);
4454
4455 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4456 realp = realpath(dm_name, NULL);
4457 if (realp == NULL)
4458 return (NULL);
4459
4460 /*
4461 * If they preface 'dev' with a path (like "/dev") then strip it off.
4462 * We just want the 'dm-N' part.
4463 */
4464 tmp = strrchr(realp, '/');
4465 if (tmp != NULL)
4466 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4467 else
4468 dev_str = tmp;
4469
4470 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4471 if (size == -1 || !tmp)
4472 goto end;
4473
4474 dp = opendir(tmp);
4475 if (dp == NULL)
4476 goto end;
4477
4478 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4479 while ((ep = readdir(dp))) {
4480 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4481 size = asprintf(&path, "/dev/%s", ep->d_name);
4482 break;
4483 }
4484 }
4485
4486end:
4487 if (dp != NULL)
4488 closedir(dp);
4489 free(tmp);
4490 free(realp);
4491 return (path);
4492}
4493
4494/*
4495 * Return 1 if device is a device mapper or multipath device.
4496 * Return 0 if not.
4497 */
4498int
4499zfs_dev_is_dm(char *dev_name)
4500{
4501
4502 char *tmp;
4503 tmp = dm_get_underlying_path(dev_name);
4504 if (tmp == NULL)
4505 return (0);
4506
4507 free(tmp);
4508 return (1);
4509}
4510
4511/*
4512 * By "whole disk" we mean an entire physical disk (something we can
4513 * label, toggle the write cache on, etc.) as opposed to the full
4514 * capacity of a pseudo-device such as lofi or did. We act as if we
4515 * are labeling the disk, which should be a pretty good test of whether
4516 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4517 * it isn't.
4518 */
4519int
4520zfs_dev_is_whole_disk(char *dev_name)
4521{
4522 struct dk_gpt *label;
4523 int fd;
4524
4525 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4526 return (0);
4527
4528 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4529 (void) close(fd);
4530 return (0);
4531 }
4532
4533 efi_free(label);
4534 (void) close(fd);
4535
4536 return (1);
4537}
4538
4539/*
4540 * Lookup the underlying device for a device name
4541 *
4542 * Often you'll have a symlink to a device, a partition device,
4543 * or a multipath device, and want to look up the underlying device.
4544 * This function returns the underlying device name. If the device
4545 * name is already the underlying device, then just return the same
4546 * name. If the device is a DM device with multiple underlying devices
4547 * then return the first one.
4548 *
4549 * For example:
4550 *
4551 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4552 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4553 * returns: /dev/sda
4554 *
4555 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4556 * dev_name: /dev/mapper/mpatha
4557 * returns: /dev/sda (first device)
4558 *
4559 * 3. /dev/sda (already the underlying device)
4560 * dev_name: /dev/sda
4561 * returns: /dev/sda
4562 *
4563 * 4. /dev/dm-3 (mapped to /dev/sda)
4564 * dev_name: /dev/dm-3
4565 * returns: /dev/sda
4566 *
4567 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4568 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4569 * returns: /dev/sdb
4570 *
4571 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4572 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4573 * returns: /dev/sda
4574 *
4575 * Returns underlying device name, or NULL on error or no match.
4576 *
4577 * NOTE: The returned name string must be *freed*.
4578 */
4579char *
4580zfs_get_underlying_path(char *dev_name)
4581{
4582 char *name = NULL;
4583 char *tmp;
4584
4585 if (dev_name == NULL)
4586 return (NULL);
4587
4588 tmp = dm_get_underlying_path(dev_name);
4589
4590 /* dev_name not a DM device, so just un-symlinkize it */
4591 if (tmp == NULL)
4592 tmp = realpath(dev_name, NULL);
4593
4594 if (tmp != NULL) {
4595 name = zfs_strip_partition_path(tmp);
4596 free(tmp);
4597 }
4598
4599 return (name);
4600}
4601
4602/*
4603 * Given a dev name like "sda", return the full enclosure sysfs path to
4604 * the disk. You can also pass in the name with "/dev" prepended
4605 * to it (like /dev/sda).
4606 *
4607 * For example, disk "sda" in enclosure slot 1:
4608 * dev: "sda"
4609 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4610 *
4611 * 'dev' must be a non-devicemapper device.
4612 *
4613 * Returned string must be freed.
4614 */
4615char *
4616zfs_get_enclosure_sysfs_path(char *dev_name)
4617{
4618 DIR *dp = NULL;
4619 struct dirent *ep;
4620 char buf[MAXPATHLEN];
4621 char *tmp1 = NULL;
4622 char *tmp2 = NULL;
4623 char *tmp3 = NULL;
4624 char *path = NULL;
4625 size_t size;
4626 int tmpsize;
4627
4628 if (dev_name == NULL)
4629 return (NULL);
4630
4631 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4632 tmp1 = strrchr(dev_name, '/');
4633 if (tmp1 != NULL)
4634 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4635
4636 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4637 if (tmpsize == -1 || tmp1 == NULL) {
4638 tmp1 = NULL;
4639 goto end;
4640 }
4641
4642 dp = opendir(tmp1);
4643 if (dp == NULL) {
4644 tmp1 = NULL; /* To make free() at the end a NOP */
4645 goto end;
4646 }
4647
4648 /*
4649 * Look though all sysfs entries in /sys/block/<dev>/device for
4650 * the enclosure symlink.
4651 */
4652 while ((ep = readdir(dp))) {
4653 /* Ignore everything that's not our enclosure_device link */
4654 if (strstr(ep->d_name, "enclosure_device") == NULL)
4655 continue;
4656
4657 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4658 tmp2 == NULL)
4659 break;
4660
4661 size = readlink(tmp2, buf, sizeof (buf));
4662
4663 /* Did readlink fail or crop the link name? */
4664 if (size == -1 || size >= sizeof (buf)) {
4665 free(tmp2);
4666 tmp2 = NULL; /* To make free() at the end a NOP */
4667 break;
4668 }
4669
4670 /*
4671 * We got a valid link. readlink() doesn't terminate strings
4672 * so we have to do it.
4673 */
4674 buf[size] = '\0';
4675
4676 /*
4677 * Our link will look like:
4678 *
4679 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4680 *
4681 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4682 */
4683 tmp3 = strstr(buf, "enclosure");
4684 if (tmp3 == NULL)
4685 break;
4686
4687 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4688 /* If asprintf() fails, 'path' is undefined */
4689 path = NULL;
4690 break;
4691 }
4692
4693 if (path == NULL)
4694 break;
4695 }
4696
4697end:
4698 free(tmp2);
4699 free(tmp1);
4700
4701 if (dp != NULL)
4702 closedir(dp);
4703
4704 return (path);
4705}