]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_pool.c
Run zfs load-key if needed in dracut
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
0fdd8d64 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
428870ff 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
a05dfd00 25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
23d70cde 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
bec1067d 27 * Copyright (c) 2017 Datto Inc.
d3f2cd7e 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
34dc7c2f
BB
29 */
30
34dc7c2f
BB
31#include <ctype.h>
32#include <errno.h>
33#include <devid.h>
34dc7c2f
BB
34#include <fcntl.h>
35#include <libintl.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <strings.h>
39#include <unistd.h>
6f1ffb06 40#include <libgen.h>
d603ed6c
BB
41#include <zone.h>
42#include <sys/stat.h>
34dc7c2f 43#include <sys/efi_partition.h>
46364cb2 44#include <sys/systeminfo.h>
34dc7c2f
BB
45#include <sys/vtoc.h>
46#include <sys/zfs_ioctl.h>
9babb374 47#include <dlfcn.h>
34dc7c2f
BB
48
49#include "zfs_namecheck.h"
50#include "zfs_prop.h"
51#include "libzfs_impl.h"
428870ff 52#include "zfs_comutil.h"
9ae529ec 53#include "zfeature_common.h"
34dc7c2f 54
b128c09f
BB
55static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
56
572e2857
BB
57typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
60} prop_flags_t;
61
34dc7c2f
BB
62/*
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
66 */
67
68static int
69zpool_get_all_props(zpool_handle_t *zhp)
70{
13fe0198 71 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
73
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 return (-1);
78
79 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 } else {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89 }
90
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
93 return (-1);
94 }
95
96 zcmd_free_nvlists(&zc);
97
98 return (0);
99}
100
101static int
102zpool_props_refresh(zpool_handle_t *zhp)
103{
104 nvlist_t *old_props;
105
106 old_props = zhp->zpool_props;
107
108 if (zpool_get_all_props(zhp) != 0)
109 return (-1);
110
111 nvlist_free(old_props);
112 return (0);
113}
114
115static char *
116zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
117 zprop_source_t *src)
118{
119 nvlist_t *nv, *nvl;
120 uint64_t ival;
121 char *value;
122 zprop_source_t source;
123
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 source = ival;
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 } else {
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
132 value = "-";
133 }
134
135 if (src)
136 *src = source;
137
138 return (value);
139}
140
141uint64_t
142zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143{
144 nvlist_t *nv, *nvl;
145 uint64_t value;
146 zprop_source_t source;
147
b128c09f
BB
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 /*
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
153 */
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
158 == 0)) {
159 return (value);
160 }
34dc7c2f 161 return (zpool_prop_default_numeric(prop));
b128c09f 162 }
34dc7c2f
BB
163
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 source = value;
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 } else {
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
172 }
173
174 if (src)
175 *src = source;
176
177 return (value);
178}
179
180/*
181 * Map VDEV STATE to printed strings.
182 */
183char *
184zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185{
186 switch (state) {
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
b128c09f 193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 194 return (gettext("FAULTED"));
428870ff
BB
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
34dc7c2f
BB
197 else
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
23d70cde
GM
205
206 default:
207 break;
34dc7c2f
BB
208 }
209
210 return (gettext("UNKNOWN"));
211}
212
131cc95c
DK
213/*
214 * Map POOL STATE to printed strings.
215 */
216const char *
217zpool_pool_state_to_name(pool_state_t state)
218{
219 switch (state) {
220 default:
221 break;
222 case POOL_STATE_ACTIVE:
223 return (gettext("ACTIVE"));
224 case POOL_STATE_EXPORTED:
225 return (gettext("EXPORTED"));
226 case POOL_STATE_DESTROYED:
227 return (gettext("DESTROYED"));
228 case POOL_STATE_SPARE:
229 return (gettext("SPARE"));
230 case POOL_STATE_L2CACHE:
231 return (gettext("L2CACHE"));
232 case POOL_STATE_UNINITIALIZED:
233 return (gettext("UNINITIALIZED"));
234 case POOL_STATE_UNAVAIL:
235 return (gettext("UNAVAIL"));
236 case POOL_STATE_POTENTIALLY_ACTIVE:
237 return (gettext("POTENTIALLY_ACTIVE"));
238 }
239
240 return (gettext("UNKNOWN"));
241}
242
8b921f66
RE
243/*
244 * Get a zpool property value for 'prop' and return the value in
245 * a pre-allocated buffer.
246 */
247int
2a8b84b7 248zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
d1d7e268 249 size_t len, zprop_source_t *srctype, boolean_t literal)
34dc7c2f
BB
250{
251 uint64_t intval;
252 const char *strval;
253 zprop_source_t src = ZPROP_SRC_NONE;
254 nvlist_t *nvroot;
255 vdev_stat_t *vs;
256 uint_t vsc;
257
258 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
259 switch (prop) {
260 case ZPOOL_PROP_NAME:
34dc7c2f 261 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
262 break;
263
264 case ZPOOL_PROP_HEALTH:
34dc7c2f 265 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
266 break;
267
268 case ZPOOL_PROP_GUID:
269 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 270 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
271 break;
272
273 case ZPOOL_PROP_ALTROOT:
274 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 275 case ZPOOL_PROP_COMMENT:
d164b209
BB
276 if (zhp->zpool_props != NULL ||
277 zpool_get_all_props(zhp) == 0) {
278 (void) strlcpy(buf,
279 zpool_get_prop_string(zhp, prop, &src),
280 len);
2a8b84b7 281 break;
d164b209
BB
282 }
283 /* FALLTHROUGH */
284 default:
34dc7c2f 285 (void) strlcpy(buf, "-", len);
d164b209
BB
286 break;
287 }
288
289 if (srctype != NULL)
290 *srctype = src;
34dc7c2f
BB
291 return (0);
292 }
293
294 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
295 prop != ZPOOL_PROP_NAME)
296 return (-1);
297
298 switch (zpool_prop_get_type(prop)) {
299 case PROP_TYPE_STRING:
300 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
301 len);
302 break;
303
304 case PROP_TYPE_NUMBER:
305 intval = zpool_get_prop_int(zhp, prop, &src);
306
307 switch (prop) {
308 case ZPOOL_PROP_SIZE:
428870ff
BB
309 case ZPOOL_PROP_ALLOCATED:
310 case ZPOOL_PROP_FREE:
9ae529ec 311 case ZPOOL_PROP_FREEING:
fbeddd60 312 case ZPOOL_PROP_LEAKED:
df30f566 313 case ZPOOL_PROP_ASHIFT:
8b921f66
RE
314 if (literal)
315 (void) snprintf(buf, len, "%llu",
02730c33 316 (u_longlong_t)intval);
8b921f66
RE
317 else
318 (void) zfs_nicenum(intval, buf, len);
34dc7c2f
BB
319 break;
320
a05dfd00
GW
321 case ZPOOL_PROP_EXPANDSZ:
322 if (intval == 0) {
323 (void) strlcpy(buf, "-", len);
324 } else if (literal) {
325 (void) snprintf(buf, len, "%llu",
326 (u_longlong_t)intval);
327 } else {
e7fbeb60 328 (void) zfs_nicebytes(intval, buf, len);
a05dfd00
GW
329 }
330 break;
331
34dc7c2f 332 case ZPOOL_PROP_CAPACITY:
2a8b84b7
AS
333 if (literal) {
334 (void) snprintf(buf, len, "%llu",
335 (u_longlong_t)intval);
336 } else {
337 (void) snprintf(buf, len, "%llu%%",
338 (u_longlong_t)intval);
339 }
34dc7c2f
BB
340 break;
341
1ca56e60 342 case ZPOOL_PROP_FRAGMENTATION:
343 if (intval == UINT64_MAX) {
344 (void) strlcpy(buf, "-", len);
bc2d8093
CE
345 } else if (literal) {
346 (void) snprintf(buf, len, "%llu",
347 (u_longlong_t)intval);
1ca56e60 348 } else {
349 (void) snprintf(buf, len, "%llu%%",
350 (u_longlong_t)intval);
351 }
352 break;
353
428870ff 354 case ZPOOL_PROP_DEDUPRATIO:
bc2d8093
CE
355 if (literal)
356 (void) snprintf(buf, len, "%llu.%02llu",
357 (u_longlong_t)(intval / 100),
358 (u_longlong_t)(intval % 100));
359 else
360 (void) snprintf(buf, len, "%llu.%02llux",
361 (u_longlong_t)(intval / 100),
362 (u_longlong_t)(intval % 100));
428870ff
BB
363 break;
364
34dc7c2f
BB
365 case ZPOOL_PROP_HEALTH:
366 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
367 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
368 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
369 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
370 == 0);
34dc7c2f
BB
371
372 (void) strlcpy(buf, zpool_state_to_name(intval,
373 vs->vs_aux), len);
374 break;
9ae529ec
CS
375 case ZPOOL_PROP_VERSION:
376 if (intval >= SPA_VERSION_FEATURES) {
377 (void) snprintf(buf, len, "-");
378 break;
379 }
380 /* FALLTHROUGH */
34dc7c2f 381 default:
b8864a23 382 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
383 }
384 break;
385
386 case PROP_TYPE_INDEX:
387 intval = zpool_get_prop_int(zhp, prop, &src);
388 if (zpool_prop_index_to_string(prop, intval, &strval)
389 != 0)
390 return (-1);
391 (void) strlcpy(buf, strval, len);
392 break;
393
394 default:
395 abort();
396 }
397
398 if (srctype)
399 *srctype = src;
400
401 return (0);
402}
403
404/*
405 * Check if the bootfs name has the same pool name as it is set to.
406 * Assuming bootfs is a valid dataset name.
407 */
408static boolean_t
409bootfs_name_valid(const char *pool, char *bootfs)
410{
411 int len = strlen(pool);
412
b128c09f 413 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
414 return (B_FALSE);
415
416 if (strncmp(pool, bootfs, len) == 0 &&
417 (bootfs[len] == '/' || bootfs[len] == '\0'))
418 return (B_TRUE);
419
420 return (B_FALSE);
421}
422
1bd201e7
CS
423boolean_t
424zpool_is_bootable(zpool_handle_t *zhp)
b128c09f 425{
eca7b760 426 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
b128c09f
BB
427
428 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
2a8b84b7 429 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
b128c09f
BB
430 sizeof (bootfs)) != 0);
431}
432
433
34dc7c2f
BB
434/*
435 * Given an nvlist of zpool properties to be set, validate that they are
436 * correct, and parse any numeric properties (index, boolean, etc) if they are
437 * specified as strings.
438 */
439static nvlist_t *
b128c09f 440zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 441 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
442{
443 nvpair_t *elem;
444 nvlist_t *retprops;
445 zpool_prop_t prop;
446 char *strval;
447 uint64_t intval;
d96eb2b1 448 char *slash, *check;
34dc7c2f 449 struct stat64 statbuf;
b128c09f 450 zpool_handle_t *zhp;
34dc7c2f
BB
451
452 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
453 (void) no_memory(hdl);
454 return (NULL);
455 }
456
457 elem = NULL;
458 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
459 const char *propname = nvpair_name(elem);
460
9ae529ec
CS
461 prop = zpool_name_to_prop(propname);
462 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
463 int err;
9ae529ec
CS
464 char *fname = strchr(propname, '@') + 1;
465
fa86b5db 466 err = zfeature_lookup_name(fname, NULL);
9ae529ec
CS
467 if (err != 0) {
468 ASSERT3U(err, ==, ENOENT);
469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
470 "invalid feature '%s'"), fname);
471 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
472 goto error;
473 }
474
475 if (nvpair_type(elem) != DATA_TYPE_STRING) {
476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
477 "'%s' must be a string"), propname);
478 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
479 goto error;
480 }
481
482 (void) nvpair_value_string(elem, &strval);
e4010f27 483 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
484 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
9ae529ec
CS
485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
486 "property '%s' can only be set to "
e4010f27 487 "'enabled' or 'disabled'"), propname);
9ae529ec
CS
488 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
489 goto error;
490 }
491
492 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
493 (void) no_memory(hdl);
494 goto error;
495 }
496 continue;
497 }
498
34dc7c2f
BB
499 /*
500 * Make sure this property is valid and applies to this type.
501 */
9ae529ec 502 if (prop == ZPROP_INVAL) {
34dc7c2f
BB
503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
504 "invalid property '%s'"), propname);
505 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
506 goto error;
507 }
508
509 if (zpool_prop_readonly(prop)) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
511 "is readonly"), propname);
512 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
513 goto error;
514 }
515
516 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
517 &strval, &intval, errbuf) != 0)
518 goto error;
519
520 /*
521 * Perform additional checking for specific properties.
522 */
523 switch (prop) {
524 case ZPOOL_PROP_VERSION:
9ae529ec
CS
525 if (intval < version ||
526 !SPA_VERSION_IS_SUPPORTED(intval)) {
34dc7c2f
BB
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 "property '%s' number %d is invalid."),
529 propname, intval);
530 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
531 goto error;
532 }
533 break;
534
df30f566 535 case ZPOOL_PROP_ASHIFT:
ff61d1a4 536 if (intval != 0 &&
537 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
df30f566 538 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
ff61d1a4 539 "invalid '%s=%d' property: only values "
540 "between %" PRId32 " and %" PRId32 " "
541 "are allowed.\n"),
542 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
df30f566
CK
543 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
544 goto error;
545 }
546 break;
547
34dc7c2f 548 case ZPOOL_PROP_BOOTFS:
572e2857 549 if (flags.create || flags.import) {
34dc7c2f
BB
550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551 "property '%s' cannot be set at creation "
552 "or import time"), propname);
553 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
554 goto error;
555 }
556
557 if (version < SPA_VERSION_BOOTFS) {
558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 "pool must be upgraded to support "
560 "'%s' property"), propname);
561 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
562 goto error;
563 }
564
565 /*
566 * bootfs property value has to be a dataset name and
567 * the dataset has to be in the same pool as it sets to.
568 */
569 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
570 strval)) {
571 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
572 "is an invalid name"), strval);
573 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
574 goto error;
575 }
b128c09f
BB
576
577 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
579 "could not open pool '%s'"), poolname);
580 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
581 goto error;
582 }
b128c09f 583 zpool_close(zhp);
34dc7c2f
BB
584 break;
585
586 case ZPOOL_PROP_ALTROOT:
572e2857 587 if (!flags.create && !flags.import) {
34dc7c2f
BB
588 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
589 "property '%s' can only be set during pool "
590 "creation or import"), propname);
591 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
592 goto error;
593 }
594
595 if (strval[0] != '/') {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 "bad alternate root '%s'"), strval);
598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
599 goto error;
600 }
601 break;
602
603 case ZPOOL_PROP_CACHEFILE:
604 if (strval[0] == '\0')
605 break;
606
607 if (strcmp(strval, "none") == 0)
608 break;
609
610 if (strval[0] != '/') {
611 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
612 "property '%s' must be empty, an "
613 "absolute path, or 'none'"), propname);
614 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
615 goto error;
616 }
617
618 slash = strrchr(strval, '/');
619
620 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
621 strcmp(slash, "/..") == 0) {
622 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
623 "'%s' is not a valid file"), strval);
624 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
625 goto error;
626 }
627
628 *slash = '\0';
629
630 if (strval[0] != '\0' &&
631 (stat64(strval, &statbuf) != 0 ||
632 !S_ISDIR(statbuf.st_mode))) {
633 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
634 "'%s' is not a valid directory"),
635 strval);
636 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
637 goto error;
638 }
639
640 *slash = '/';
641 break;
572e2857 642
d96eb2b1
DM
643 case ZPOOL_PROP_COMMENT:
644 for (check = strval; *check != '\0'; check++) {
645 if (!isprint(*check)) {
646 zfs_error_aux(hdl,
647 dgettext(TEXT_DOMAIN,
648 "comment may only have printable "
649 "characters"));
650 (void) zfs_error(hdl, EZFS_BADPROP,
651 errbuf);
652 goto error;
653 }
654 }
655 if (strlen(strval) > ZPROP_MAX_COMMENT) {
656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
657 "comment must not exceed %d characters"),
658 ZPROP_MAX_COMMENT);
659 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
660 goto error;
661 }
662 break;
572e2857
BB
663 case ZPOOL_PROP_READONLY:
664 if (!flags.import) {
665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
666 "property '%s' can only be set at "
667 "import time"), propname);
668 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
669 goto error;
670 }
671 break;
83e9986f
RY
672 case ZPOOL_PROP_TNAME:
673 if (!flags.create) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "property '%s' can only be set at "
676 "creation time"), propname);
677 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
678 goto error;
679 }
97dde921 680 break;
379ca9cf
OF
681 case ZPOOL_PROP_MULTIHOST:
682 if (get_system_hostid() == 0) {
683 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
684 "requires a non-zero system hostid"));
685 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
686 goto error;
687 }
688 break;
23d70cde
GM
689 default:
690 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
691 "property '%s'(%d) not defined"), propname, prop);
83e9986f 692 break;
34dc7c2f
BB
693 }
694 }
695
696 return (retprops);
697error:
698 nvlist_free(retprops);
699 return (NULL);
700}
701
702/*
703 * Set zpool property : propname=propval.
704 */
705int
706zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
707{
13fe0198 708 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
709 int ret = -1;
710 char errbuf[1024];
711 nvlist_t *nvl = NULL;
712 nvlist_t *realprops;
713 uint64_t version;
572e2857 714 prop_flags_t flags = { 0 };
34dc7c2f
BB
715
716 (void) snprintf(errbuf, sizeof (errbuf),
717 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
718 zhp->zpool_name);
719
34dc7c2f
BB
720 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
721 return (no_memory(zhp->zpool_hdl));
722
723 if (nvlist_add_string(nvl, propname, propval) != 0) {
724 nvlist_free(nvl);
725 return (no_memory(zhp->zpool_hdl));
726 }
727
728 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 729 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 730 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
731 nvlist_free(nvl);
732 return (-1);
733 }
734
735 nvlist_free(nvl);
736 nvl = realprops;
737
738 /*
739 * Execute the corresponding ioctl() to set this property.
740 */
741 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
742
743 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
744 nvlist_free(nvl);
745 return (-1);
746 }
747
748 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
749
750 zcmd_free_nvlists(&zc);
751 nvlist_free(nvl);
752
753 if (ret)
754 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
755 else
756 (void) zpool_props_refresh(zhp);
757
758 return (ret);
759}
760
761int
762zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
763{
764 libzfs_handle_t *hdl = zhp->zpool_hdl;
765 zprop_list_t *entry;
766 char buf[ZFS_MAXPROPLEN];
9ae529ec
CS
767 nvlist_t *features = NULL;
768 nvpair_t *nvp;
769 zprop_list_t **last;
770 boolean_t firstexpand = (NULL == *plp);
771 int i;
34dc7c2f
BB
772
773 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
774 return (-1);
775
9ae529ec
CS
776 last = plp;
777 while (*last != NULL)
778 last = &(*last)->pl_next;
779
780 if ((*plp)->pl_all)
781 features = zpool_get_features(zhp);
782
783 if ((*plp)->pl_all && firstexpand) {
784 for (i = 0; i < SPA_FEATURES; i++) {
785 zprop_list_t *entry = zfs_alloc(hdl,
786 sizeof (zprop_list_t));
787 entry->pl_prop = ZPROP_INVAL;
788 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
789 spa_feature_table[i].fi_uname);
790 entry->pl_width = strlen(entry->pl_user_prop);
791 entry->pl_all = B_TRUE;
792
793 *last = entry;
794 last = &entry->pl_next;
795 }
796 }
797
798 /* add any unsupported features */
799 for (nvp = nvlist_next_nvpair(features, NULL);
800 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
801 char *propname;
802 boolean_t found;
803 zprop_list_t *entry;
804
805 if (zfeature_is_supported(nvpair_name(nvp)))
806 continue;
807
808 propname = zfs_asprintf(hdl, "unsupported@%s",
809 nvpair_name(nvp));
810
811 /*
812 * Before adding the property to the list make sure that no
813 * other pool already added the same property.
814 */
815 found = B_FALSE;
816 entry = *plp;
817 while (entry != NULL) {
818 if (entry->pl_user_prop != NULL &&
819 strcmp(propname, entry->pl_user_prop) == 0) {
820 found = B_TRUE;
821 break;
822 }
823 entry = entry->pl_next;
824 }
825 if (found) {
826 free(propname);
827 continue;
828 }
829
830 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
831 entry->pl_prop = ZPROP_INVAL;
832 entry->pl_user_prop = propname;
833 entry->pl_width = strlen(entry->pl_user_prop);
834 entry->pl_all = B_TRUE;
835
836 *last = entry;
837 last = &entry->pl_next;
838 }
839
34dc7c2f
BB
840 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
841
842 if (entry->pl_fixed)
843 continue;
844
845 if (entry->pl_prop != ZPROP_INVAL &&
846 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2a8b84b7 847 NULL, B_FALSE) == 0) {
34dc7c2f
BB
848 if (strlen(buf) > entry->pl_width)
849 entry->pl_width = strlen(buf);
850 }
851 }
852
853 return (0);
854}
855
9ae529ec
CS
856/*
857 * Get the state for the given feature on the given ZFS pool.
858 */
859int
860zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
861 size_t len)
862{
863 uint64_t refcount;
864 boolean_t found = B_FALSE;
865 nvlist_t *features = zpool_get_features(zhp);
866 boolean_t supported;
867 const char *feature = strchr(propname, '@') + 1;
868
869 supported = zpool_prop_feature(propname);
870 ASSERT(supported || zpool_prop_unsupported(propname));
871
872 /*
873 * Convert from feature name to feature guid. This conversion is
4e33ba4c 874 * unnecessary for unsupported@... properties because they already
9ae529ec
CS
875 * use guids.
876 */
877 if (supported) {
878 int ret;
fa86b5db 879 spa_feature_t fid;
9ae529ec 880
fa86b5db 881 ret = zfeature_lookup_name(feature, &fid);
9ae529ec
CS
882 if (ret != 0) {
883 (void) strlcpy(buf, "-", len);
884 return (ENOTSUP);
885 }
fa86b5db 886 feature = spa_feature_table[fid].fi_guid;
9ae529ec
CS
887 }
888
889 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
890 found = B_TRUE;
891
892 if (supported) {
893 if (!found) {
894 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
895 } else {
896 if (refcount == 0)
897 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
898 else
899 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
900 }
901 } else {
902 if (found) {
903 if (refcount == 0) {
904 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
905 } else {
906 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
907 }
908 } else {
909 (void) strlcpy(buf, "-", len);
910 return (ENOTSUP);
911 }
912 }
913
914 return (0);
915}
34dc7c2f 916
9babb374
BB
917/*
918 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
919 * devices will use a stripe width of 128k, other vendors prefer a 1m
920 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
921 * given 512B blocks. When the block size is larger by a power of 2
922 * we will still be 1m aligned. Some devices are sensitive to the
923 * partition ending alignment as well.
9babb374 924 */
613d88ed
NB
925#define NEW_START_BLOCK 2048
926#define PARTITION_END_ALIGNMENT 2048
9babb374 927
34dc7c2f
BB
928/*
929 * Validate the given pool name, optionally putting an extended error message in
930 * 'buf'.
931 */
932boolean_t
933zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
934{
935 namecheck_err_t why;
936 char what;
937 int ret;
938
939 ret = pool_namecheck(pool, &why, &what);
940
941 /*
942 * The rules for reserved pool names were extended at a later point.
943 * But we need to support users with existing pools that may now be
944 * invalid. So we only check for this expanded set of names during a
945 * create (or import), and only in userland.
946 */
947 if (ret == 0 && !isopen &&
948 (strncmp(pool, "mirror", 6) == 0 ||
949 strncmp(pool, "raidz", 5) == 0 ||
950 strncmp(pool, "spare", 5) == 0 ||
951 strcmp(pool, "log") == 0)) {
952 if (hdl != NULL)
953 zfs_error_aux(hdl,
954 dgettext(TEXT_DOMAIN, "name is reserved"));
955 return (B_FALSE);
956 }
957
958
959 if (ret != 0) {
960 if (hdl != NULL) {
961 switch (why) {
962 case NAME_ERR_TOOLONG:
963 zfs_error_aux(hdl,
964 dgettext(TEXT_DOMAIN, "name is too long"));
965 break;
966
967 case NAME_ERR_INVALCHAR:
968 zfs_error_aux(hdl,
969 dgettext(TEXT_DOMAIN, "invalid character "
970 "'%c' in pool name"), what);
971 break;
972
973 case NAME_ERR_NOLETTER:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "name must begin with a letter"));
976 break;
977
978 case NAME_ERR_RESERVED:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "name is reserved"));
981 break;
982
983 case NAME_ERR_DISKLIKE:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "pool name is reserved"));
986 break;
987
988 case NAME_ERR_LEADING_SLASH:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "leading slash in name"));
991 break;
992
993 case NAME_ERR_EMPTY_COMPONENT:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "empty component in name"));
996 break;
997
998 case NAME_ERR_TRAILING_SLASH:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "trailing slash in name"));
1001 break;
1002
aeacdefe 1003 case NAME_ERR_MULTIPLE_DELIMITERS:
34dc7c2f 1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
aeacdefe
GM
1005 "multiple '@' and/or '#' delimiters in "
1006 "name"));
34dc7c2f 1007 break;
97dde921 1008
e75c13c3
BB
1009 case NAME_ERR_NO_AT:
1010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1011 "permission set is missing '@'"));
97dde921 1012 break;
23d70cde
GM
1013
1014 default:
1015 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1016 "(%d) not defined"), why);
e75c13c3 1017 break;
34dc7c2f
BB
1018 }
1019 }
1020 return (B_FALSE);
1021 }
1022
1023 return (B_TRUE);
1024}
1025
1026/*
1027 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1028 * state.
1029 */
1030zpool_handle_t *
1031zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1032{
1033 zpool_handle_t *zhp;
1034 boolean_t missing;
1035
1036 /*
1037 * Make sure the pool name is valid.
1038 */
1039 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1040 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1041 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1042 pool);
1043 return (NULL);
1044 }
1045
1046 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1047 return (NULL);
1048
1049 zhp->zpool_hdl = hdl;
1050 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1051
1052 if (zpool_refresh_stats(zhp, &missing) != 0) {
1053 zpool_close(zhp);
1054 return (NULL);
1055 }
1056
1057 if (missing) {
1058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1059 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1060 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1061 zpool_close(zhp);
1062 return (NULL);
1063 }
1064
1065 return (zhp);
1066}
1067
1068/*
1069 * Like the above, but silent on error. Used when iterating over pools (because
1070 * the configuration cache may be out of date).
1071 */
1072int
1073zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1074{
1075 zpool_handle_t *zhp;
1076 boolean_t missing;
1077
1078 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1079 return (-1);
1080
1081 zhp->zpool_hdl = hdl;
1082 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1083
1084 if (zpool_refresh_stats(zhp, &missing) != 0) {
1085 zpool_close(zhp);
1086 return (-1);
1087 }
1088
1089 if (missing) {
1090 zpool_close(zhp);
1091 *ret = NULL;
1092 return (0);
1093 }
1094
1095 *ret = zhp;
1096 return (0);
1097}
1098
1099/*
1100 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1101 * state.
1102 */
1103zpool_handle_t *
1104zpool_open(libzfs_handle_t *hdl, const char *pool)
1105{
1106 zpool_handle_t *zhp;
1107
1108 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1109 return (NULL);
1110
1111 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1112 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1113 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1114 zpool_close(zhp);
1115 return (NULL);
1116 }
1117
1118 return (zhp);
1119}
1120
1121/*
1122 * Close the handle. Simply frees the memory associated with the handle.
1123 */
1124void
1125zpool_close(zpool_handle_t *zhp)
1126{
8a5fc748
JJS
1127 nvlist_free(zhp->zpool_config);
1128 nvlist_free(zhp->zpool_old_config);
1129 nvlist_free(zhp->zpool_props);
34dc7c2f
BB
1130 free(zhp);
1131}
1132
1133/*
1134 * Return the name of the pool.
1135 */
1136const char *
1137zpool_get_name(zpool_handle_t *zhp)
1138{
1139 return (zhp->zpool_name);
1140}
1141
1142
1143/*
1144 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1145 */
1146int
1147zpool_get_state(zpool_handle_t *zhp)
1148{
1149 return (zhp->zpool_state);
1150}
1151
1152/*
1153 * Create the named pool, using the provided vdev list. It is assumed
1154 * that the consumer has already validated the contents of the nvlist, so we
1155 * don't have to worry about error semantics.
1156 */
1157int
1158zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 1159 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 1160{
13fe0198 1161 zfs_cmd_t zc = {"\0"};
b128c09f
BB
1162 nvlist_t *zc_fsprops = NULL;
1163 nvlist_t *zc_props = NULL;
b5256303
TC
1164 nvlist_t *hidden_args = NULL;
1165 uint8_t *wkeydata = NULL;
1166 uint_t wkeylen = 0;
34dc7c2f 1167 char msg[1024];
b128c09f 1168 int ret = -1;
34dc7c2f
BB
1169
1170 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1171 "cannot create '%s'"), pool);
1172
1173 if (!zpool_name_valid(hdl, B_FALSE, pool))
1174 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1175
1176 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1177 return (-1);
1178
b128c09f 1179 if (props) {
572e2857
BB
1180 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1181
b128c09f 1182 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 1183 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
1184 goto create_failed;
1185 }
1186 }
34dc7c2f 1187
b128c09f
BB
1188 if (fsprops) {
1189 uint64_t zoned;
1190 char *zonestr;
1191
1192 zoned = ((nvlist_lookup_string(fsprops,
1193 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1194 strcmp(zonestr, "on") == 0);
1195
82f6f6e6 1196 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
b5256303 1197 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
b128c09f
BB
1198 goto create_failed;
1199 }
1200 if (!zc_props &&
1201 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1202 goto create_failed;
1203 }
b5256303
TC
1204 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props,
1205 &wkeydata, &wkeylen) != 0) {
1206 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1207 goto create_failed;
1208 }
b128c09f
BB
1209 if (nvlist_add_nvlist(zc_props,
1210 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1211 goto create_failed;
1212 }
b5256303
TC
1213 if (wkeydata != NULL) {
1214 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1215 goto create_failed;
1216
1217 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1218 wkeydata, wkeylen) != 0)
1219 goto create_failed;
1220
1221 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1222 hidden_args) != 0)
1223 goto create_failed;
1224 }
34dc7c2f
BB
1225 }
1226
b128c09f
BB
1227 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1228 goto create_failed;
1229
34dc7c2f
BB
1230 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1231
b128c09f 1232 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
1233
1234 zcmd_free_nvlists(&zc);
b128c09f
BB
1235 nvlist_free(zc_props);
1236 nvlist_free(zc_fsprops);
b5256303
TC
1237 nvlist_free(hidden_args);
1238 if (wkeydata != NULL)
1239 free(wkeydata);
34dc7c2f
BB
1240
1241 switch (errno) {
1242 case EBUSY:
1243 /*
1244 * This can happen if the user has specified the same
1245 * device multiple times. We can't reliably detect this
1246 * until we try to add it and see we already have a
d603ed6c
BB
1247 * label. This can also happen under if the device is
1248 * part of an active md or lvm device.
34dc7c2f
BB
1249 */
1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d1d7e268
MK
1251 "one or more vdevs refer to the same device, or "
1252 "one of\nthe devices is part of an active md or "
1253 "lvm device"));
34dc7c2f
BB
1254 return (zfs_error(hdl, EZFS_BADDEV, msg));
1255
82f6f6e6
JS
1256 case ERANGE:
1257 /*
1258 * This happens if the record size is smaller or larger
1259 * than the allowed size range, or not a power of 2.
1260 *
1261 * NOTE: although zfs_valid_proplist is called earlier,
1262 * this case may have slipped through since the
1263 * pool does not exist yet and it is therefore
1264 * impossible to read properties e.g. max blocksize
1265 * from the pool.
1266 */
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "record size invalid"));
1269 return (zfs_error(hdl, EZFS_BADPROP, msg));
1270
34dc7c2f
BB
1271 case EOVERFLOW:
1272 /*
1273 * This occurs when one of the devices is below
1274 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1275 * device was the problem device since there's no
1276 * reliable way to determine device size from userland.
1277 */
1278 {
1279 char buf[64];
1280
e7fbeb60 1281 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1282 sizeof (buf));
34dc7c2f
BB
1283
1284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1285 "one or more devices is less than the "
1286 "minimum size (%s)"), buf);
1287 }
1288 return (zfs_error(hdl, EZFS_BADDEV, msg));
1289
1290 case ENOSPC:
1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1292 "one or more devices is out of space"));
1293 return (zfs_error(hdl, EZFS_BADDEV, msg));
1294
1295 case ENOTBLK:
1296 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1297 "cache device must be a disk or disk slice"));
1298 return (zfs_error(hdl, EZFS_BADDEV, msg));
1299
1300 default:
1301 return (zpool_standard_error(hdl, errno, msg));
1302 }
1303 }
1304
b128c09f 1305create_failed:
34dc7c2f 1306 zcmd_free_nvlists(&zc);
b128c09f
BB
1307 nvlist_free(zc_props);
1308 nvlist_free(zc_fsprops);
b5256303
TC
1309 nvlist_free(hidden_args);
1310 if (wkeydata != NULL)
1311 free(wkeydata);
b128c09f 1312 return (ret);
34dc7c2f
BB
1313}
1314
1315/*
1316 * Destroy the given pool. It is up to the caller to ensure that there are no
1317 * datasets left in the pool.
1318 */
1319int
6f1ffb06 1320zpool_destroy(zpool_handle_t *zhp, const char *log_str)
34dc7c2f 1321{
13fe0198 1322 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1323 zfs_handle_t *zfp = NULL;
1324 libzfs_handle_t *hdl = zhp->zpool_hdl;
1325 char msg[1024];
1326
1327 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1328 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1329 return (-1);
1330
34dc7c2f 1331 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
6f1ffb06 1332 zc.zc_history = (uint64_t)(uintptr_t)log_str;
34dc7c2f 1333
572e2857 1334 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1335 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1336 "cannot destroy '%s'"), zhp->zpool_name);
1337
1338 if (errno == EROFS) {
1339 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1340 "one or more devices is read only"));
1341 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1342 } else {
1343 (void) zpool_standard_error(hdl, errno, msg);
1344 }
1345
1346 if (zfp)
1347 zfs_close(zfp);
1348 return (-1);
1349 }
1350
1351 if (zfp) {
1352 remove_mountpoint(zfp);
1353 zfs_close(zfp);
1354 }
1355
1356 return (0);
1357}
1358
1359/*
1360 * Add the given vdevs to the pool. The caller must have already performed the
1361 * necessary verification to ensure that the vdev specification is well-formed.
1362 */
1363int
1364zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1365{
13fe0198 1366 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1367 int ret;
1368 libzfs_handle_t *hdl = zhp->zpool_hdl;
1369 char msg[1024];
1370 nvlist_t **spares, **l2cache;
1371 uint_t nspares, nl2cache;
1372
1373 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1374 "cannot add to '%s'"), zhp->zpool_name);
1375
1376 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1377 SPA_VERSION_SPARES &&
1378 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1379 &spares, &nspares) == 0) {
1380 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1381 "upgraded to add hot spares"));
1382 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1383 }
1384
1385 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1386 SPA_VERSION_L2CACHE &&
1387 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1388 &l2cache, &nl2cache) == 0) {
1389 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1390 "upgraded to add cache devices"));
1391 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1392 }
1393
1394 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1395 return (-1);
1396 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1397
572e2857 1398 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1399 switch (errno) {
1400 case EBUSY:
1401 /*
1402 * This can happen if the user has specified the same
1403 * device multiple times. We can't reliably detect this
1404 * until we try to add it and see we already have a
1405 * label.
1406 */
1407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1408 "one or more vdevs refer to the same device"));
1409 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1410 break;
1411
1412 case EOVERFLOW:
1413 /*
1414 * This occurrs when one of the devices is below
1415 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1416 * device was the problem device since there's no
1417 * reliable way to determine device size from userland.
1418 */
1419 {
1420 char buf[64];
1421
e7fbeb60 1422 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1423 sizeof (buf));
34dc7c2f
BB
1424
1425 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1426 "device is less than the minimum "
1427 "size (%s)"), buf);
1428 }
1429 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1430 break;
1431
1432 case ENOTSUP:
1433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1434 "pool must be upgraded to add these vdevs"));
1435 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1436 break;
1437
34dc7c2f
BB
1438 case ENOTBLK:
1439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1440 "cache device must be a disk or disk slice"));
1441 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1442 break;
1443
1444 default:
1445 (void) zpool_standard_error(hdl, errno, msg);
1446 }
1447
1448 ret = -1;
1449 } else {
1450 ret = 0;
1451 }
1452
1453 zcmd_free_nvlists(&zc);
1454
1455 return (ret);
1456}
1457
1458/*
1459 * Exports the pool from the system. The caller must ensure that there are no
1460 * mounted datasets in the pool.
1461 */
6f1ffb06
MA
1462static int
1463zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1464 const char *log_str)
34dc7c2f 1465{
13fe0198 1466 zfs_cmd_t zc = {"\0"};
b128c09f 1467 char msg[1024];
34dc7c2f 1468
b128c09f
BB
1469 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1470 "cannot export '%s'"), zhp->zpool_name);
1471
34dc7c2f 1472 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1473 zc.zc_cookie = force;
fb5f0bc8 1474 zc.zc_guid = hardforce;
6f1ffb06 1475 zc.zc_history = (uint64_t)(uintptr_t)log_str;
b128c09f
BB
1476
1477 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1478 switch (errno) {
1479 case EXDEV:
1480 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1481 "use '-f' to override the following errors:\n"
1482 "'%s' has an active shared spare which could be"
1483 " used by other pools once '%s' is exported."),
1484 zhp->zpool_name, zhp->zpool_name);
1485 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1486 msg));
1487 default:
1488 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1489 msg));
1490 }
1491 }
34dc7c2f 1492
34dc7c2f
BB
1493 return (0);
1494}
1495
fb5f0bc8 1496int
6f1ffb06 1497zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
fb5f0bc8 1498{
6f1ffb06 1499 return (zpool_export_common(zhp, force, B_FALSE, log_str));
fb5f0bc8
BB
1500}
1501
1502int
6f1ffb06 1503zpool_export_force(zpool_handle_t *zhp, const char *log_str)
fb5f0bc8 1504{
6f1ffb06 1505 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
fb5f0bc8
BB
1506}
1507
428870ff
BB
1508static void
1509zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1510 nvlist_t *config)
428870ff 1511{
572e2857 1512 nvlist_t *nv = NULL;
428870ff
BB
1513 uint64_t rewindto;
1514 int64_t loss = -1;
1515 struct tm t;
1516 char timestr[128];
1517
572e2857
BB
1518 if (!hdl->libzfs_printerr || config == NULL)
1519 return;
1520
9ae529ec
CS
1521 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1522 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
428870ff 1523 return;
9ae529ec 1524 }
428870ff 1525
572e2857 1526 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1527 return;
572e2857 1528 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1529
1530 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1531 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1532 if (dryrun) {
1533 (void) printf(dgettext(TEXT_DOMAIN,
1534 "Would be able to return %s "
1535 "to its state as of %s.\n"),
1536 name, timestr);
1537 } else {
1538 (void) printf(dgettext(TEXT_DOMAIN,
1539 "Pool %s returned to its state as of %s.\n"),
1540 name, timestr);
1541 }
1542 if (loss > 120) {
1543 (void) printf(dgettext(TEXT_DOMAIN,
1544 "%s approximately %lld "),
1545 dryrun ? "Would discard" : "Discarded",
b8864a23 1546 ((longlong_t)loss + 30) / 60);
428870ff
BB
1547 (void) printf(dgettext(TEXT_DOMAIN,
1548 "minutes of transactions.\n"));
1549 } else if (loss > 0) {
1550 (void) printf(dgettext(TEXT_DOMAIN,
1551 "%s approximately %lld "),
b8864a23
BB
1552 dryrun ? "Would discard" : "Discarded",
1553 (longlong_t)loss);
428870ff
BB
1554 (void) printf(dgettext(TEXT_DOMAIN,
1555 "seconds of transactions.\n"));
1556 }
1557 }
1558}
1559
1560void
1561zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1562 nvlist_t *config)
1563{
572e2857 1564 nvlist_t *nv = NULL;
428870ff
BB
1565 int64_t loss = -1;
1566 uint64_t edata = UINT64_MAX;
1567 uint64_t rewindto;
1568 struct tm t;
1569 char timestr[128];
1570
1571 if (!hdl->libzfs_printerr)
1572 return;
1573
1574 if (reason >= 0)
1575 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1576 else
1577 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1578
1579 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857 1580 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
9ae529ec 1581 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
572e2857 1582 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1583 goto no_info;
1584
572e2857
BB
1585 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1586 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1587 &edata);
1588
1589 (void) printf(dgettext(TEXT_DOMAIN,
1590 "Recovery is possible, but will result in some data loss.\n"));
1591
1592 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1593 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1594 (void) printf(dgettext(TEXT_DOMAIN,
1595 "\tReturning the pool to its state as of %s\n"
1596 "\tshould correct the problem. "),
1597 timestr);
1598 } else {
1599 (void) printf(dgettext(TEXT_DOMAIN,
1600 "\tReverting the pool to an earlier state "
1601 "should correct the problem.\n\t"));
1602 }
1603
1604 if (loss > 120) {
1605 (void) printf(dgettext(TEXT_DOMAIN,
1606 "Approximately %lld minutes of data\n"
b8864a23
BB
1607 "\tmust be discarded, irreversibly. "),
1608 ((longlong_t)loss + 30) / 60);
428870ff
BB
1609 } else if (loss > 0) {
1610 (void) printf(dgettext(TEXT_DOMAIN,
1611 "Approximately %lld seconds of data\n"
b8864a23
BB
1612 "\tmust be discarded, irreversibly. "),
1613 (longlong_t)loss);
428870ff
BB
1614 }
1615 if (edata != 0 && edata != UINT64_MAX) {
1616 if (edata == 1) {
1617 (void) printf(dgettext(TEXT_DOMAIN,
1618 "After rewind, at least\n"
1619 "\tone persistent user-data error will remain. "));
1620 } else {
1621 (void) printf(dgettext(TEXT_DOMAIN,
1622 "After rewind, several\n"
1623 "\tpersistent user-data errors will remain. "));
1624 }
1625 }
1626 (void) printf(dgettext(TEXT_DOMAIN,
1627 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1628 reason >= 0 ? "clear" : "import", name);
1629
1630 (void) printf(dgettext(TEXT_DOMAIN,
1631 "A scrub of the pool\n"
1632 "\tis strongly recommended after recovery.\n"));
1633 return;
1634
1635no_info:
1636 (void) printf(dgettext(TEXT_DOMAIN,
1637 "Destroy and re-create the pool from\n\ta backup source.\n"));
1638}
1639
34dc7c2f
BB
1640/*
1641 * zpool_import() is a contracted interface. Should be kept the same
1642 * if possible.
1643 *
1644 * Applications should use zpool_import_props() to import a pool with
1645 * new properties value to be set.
1646 */
1647int
1648zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1649 char *altroot)
1650{
1651 nvlist_t *props = NULL;
1652 int ret;
1653
1654 if (altroot != NULL) {
1655 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1656 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1657 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1658 newname));
1659 }
1660
1661 if (nvlist_add_string(props,
fb5f0bc8
BB
1662 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1663 nvlist_add_string(props,
1664 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1665 nvlist_free(props);
1666 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1667 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1668 newname));
1669 }
1670 }
1671
572e2857
BB
1672 ret = zpool_import_props(hdl, config, newname, props,
1673 ZFS_IMPORT_NORMAL);
8a5fc748 1674 nvlist_free(props);
34dc7c2f
BB
1675 return (ret);
1676}
1677
572e2857
BB
1678static void
1679print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1680 int indent)
1681{
1682 nvlist_t **child;
1683 uint_t c, children;
1684 char *vname;
1685 uint64_t is_log = 0;
1686
1687 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1688 &is_log);
1689
1690 if (name != NULL)
1691 (void) printf("\t%*s%s%s\n", indent, "", name,
1692 is_log ? " [log]" : "");
1693
1694 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1695 &child, &children) != 0)
1696 return;
1697
1698 for (c = 0; c < children; c++) {
d2f3e292 1699 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
572e2857
BB
1700 print_vdev_tree(hdl, vname, child[c], indent + 2);
1701 free(vname);
1702 }
1703}
1704
9ae529ec
CS
1705void
1706zpool_print_unsup_feat(nvlist_t *config)
1707{
1708 nvlist_t *nvinfo, *unsup_feat;
1709 nvpair_t *nvp;
1710
1711 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1712 0);
1713 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1714 &unsup_feat) == 0);
1715
1716 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1717 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1718 char *desc;
1719
1720 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1721 verify(nvpair_value_string(nvp, &desc) == 0);
1722
1723 if (strlen(desc) > 0)
1724 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1725 else
1726 (void) printf("\t%s\n", nvpair_name(nvp));
1727 }
1728}
1729
34dc7c2f
BB
1730/*
1731 * Import the given pool using the known configuration and a list of
1732 * properties to be set. The configuration should have come from
1733 * zpool_find_import(). The 'newname' parameters control whether the pool
1734 * is imported with a different name.
1735 */
1736int
1737zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1738 nvlist_t *props, int flags)
34dc7c2f 1739{
13fe0198 1740 zfs_cmd_t zc = {"\0"};
428870ff 1741 zpool_rewind_policy_t policy;
572e2857
BB
1742 nvlist_t *nv = NULL;
1743 nvlist_t *nvinfo = NULL;
1744 nvlist_t *missing = NULL;
34dc7c2f
BB
1745 char *thename;
1746 char *origname;
1747 int ret;
572e2857 1748 int error = 0;
34dc7c2f
BB
1749 char errbuf[1024];
1750
1751 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1752 &origname) == 0);
1753
1754 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1755 "cannot import pool '%s'"), origname);
1756
1757 if (newname != NULL) {
1758 if (!zpool_name_valid(hdl, B_FALSE, newname))
1759 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1760 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1761 newname));
1762 thename = (char *)newname;
1763 } else {
1764 thename = origname;
1765 }
1766
0fdd8d64 1767 if (props != NULL) {
34dc7c2f 1768 uint64_t version;
572e2857 1769 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1770
1771 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1772 &version) == 0);
1773
b128c09f 1774 if ((props = zpool_valid_proplist(hdl, origname,
0fdd8d64 1775 props, version, flags, errbuf)) == NULL)
34dc7c2f 1776 return (-1);
0fdd8d64 1777 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
34dc7c2f
BB
1778 nvlist_free(props);
1779 return (-1);
1780 }
0fdd8d64 1781 nvlist_free(props);
34dc7c2f
BB
1782 }
1783
1784 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1785
1786 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1787 &zc.zc_guid) == 0);
1788
1789 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
0fdd8d64 1790 zcmd_free_nvlists(&zc);
34dc7c2f
BB
1791 return (-1);
1792 }
572e2857 1793 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
0fdd8d64 1794 zcmd_free_nvlists(&zc);
428870ff
BB
1795 return (-1);
1796 }
34dc7c2f 1797
572e2857
BB
1798 zc.zc_cookie = flags;
1799 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1800 errno == ENOMEM) {
1801 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1802 zcmd_free_nvlists(&zc);
1803 return (-1);
1804 }
1805 }
1806 if (ret != 0)
1807 error = errno;
1808
1809 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
0fdd8d64
MT
1810
1811 zcmd_free_nvlists(&zc);
1812
572e2857
BB
1813 zpool_get_rewind_policy(config, &policy);
1814
1815 if (error) {
34dc7c2f 1816 char desc[1024];
379ca9cf 1817 char aux[256];
428870ff 1818
428870ff
BB
1819 /*
1820 * Dry-run failed, but we print out what success
1821 * looks like if we found a best txg
1822 */
572e2857 1823 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1824 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1825 B_TRUE, nv);
1826 nvlist_free(nv);
428870ff
BB
1827 return (-1);
1828 }
1829
34dc7c2f
BB
1830 if (newname == NULL)
1831 (void) snprintf(desc, sizeof (desc),
1832 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1833 thename);
1834 else
1835 (void) snprintf(desc, sizeof (desc),
1836 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1837 origname, thename);
1838
572e2857 1839 switch (error) {
34dc7c2f 1840 case ENOTSUP:
9ae529ec
CS
1841 if (nv != NULL && nvlist_lookup_nvlist(nv,
1842 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1843 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1844 (void) printf(dgettext(TEXT_DOMAIN, "This "
1845 "pool uses the following feature(s) not "
1846 "supported by this system:\n"));
1847 zpool_print_unsup_feat(nv);
1848 if (nvlist_exists(nvinfo,
1849 ZPOOL_CONFIG_CAN_RDONLY)) {
1850 (void) printf(dgettext(TEXT_DOMAIN,
1851 "All unsupported features are only "
1852 "required for writing to the pool."
1853 "\nThe pool can be imported using "
1854 "'-o readonly=on'.\n"));
1855 }
1856 }
34dc7c2f
BB
1857 /*
1858 * Unsupported version.
1859 */
1860 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1861 break;
1862
379ca9cf
OF
1863 case EREMOTEIO:
1864 if (nv != NULL && nvlist_lookup_nvlist(nv,
1865 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1866 char *hostname = "<unknown>";
1867 uint64_t hostid = 0;
1868 mmp_state_t mmp_state;
1869
1870 mmp_state = fnvlist_lookup_uint64(nvinfo,
1871 ZPOOL_CONFIG_MMP_STATE);
1872
1873 if (nvlist_exists(nvinfo,
1874 ZPOOL_CONFIG_MMP_HOSTNAME))
1875 hostname = fnvlist_lookup_string(nvinfo,
1876 ZPOOL_CONFIG_MMP_HOSTNAME);
1877
1878 if (nvlist_exists(nvinfo,
1879 ZPOOL_CONFIG_MMP_HOSTID))
1880 hostid = fnvlist_lookup_uint64(nvinfo,
1881 ZPOOL_CONFIG_MMP_HOSTID);
1882
1883 if (mmp_state == MMP_STATE_ACTIVE) {
1884 (void) snprintf(aux, sizeof (aux),
1885 dgettext(TEXT_DOMAIN, "pool is imp"
1886 "orted on host '%s' (hostid=%lx).\n"
1887 "Export the pool on the other "
1888 "system, then run 'zpool import'."),
1889 hostname, (unsigned long) hostid);
1890 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1891 (void) snprintf(aux, sizeof (aux),
1892 dgettext(TEXT_DOMAIN, "pool has "
1893 "the multihost property on and "
1894 "the\nsystem's hostid is not set. "
1895 "Set a unique system hostid with "
b9373170 1896 "the zgenhostid(8) command.\n"));
379ca9cf
OF
1897 }
1898
1899 (void) zfs_error_aux(hdl, aux);
1900 }
1901 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1902 break;
1903
34dc7c2f
BB
1904 case EINVAL:
1905 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1906 break;
1907
428870ff
BB
1908 case EROFS:
1909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1910 "one or more devices is read only"));
1911 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1912 break;
1913
572e2857
BB
1914 case ENXIO:
1915 if (nv && nvlist_lookup_nvlist(nv,
1916 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1917 nvlist_lookup_nvlist(nvinfo,
1918 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1919 (void) printf(dgettext(TEXT_DOMAIN,
1920 "The devices below are missing, use "
1921 "'-m' to import the pool anyway:\n"));
1922 print_vdev_tree(hdl, NULL, missing, 2);
1923 (void) printf("\n");
1924 }
1925 (void) zpool_standard_error(hdl, error, desc);
1926 break;
1927
1928 case EEXIST:
1929 (void) zpool_standard_error(hdl, error, desc);
1930 break;
1931
abe5b8fb
BB
1932 case EBUSY:
1933 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1934 "one or more devices are already in use\n"));
1935 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1936 break;
d1d19c78
PD
1937 case ENAMETOOLONG:
1938 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1939 "new name of at least one dataset is longer than "
1940 "the maximum allowable length"));
1941 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1942 break;
34dc7c2f 1943 default:
572e2857 1944 (void) zpool_standard_error(hdl, error, desc);
428870ff 1945 zpool_explain_recover(hdl,
572e2857 1946 newname ? origname : thename, -error, nv);
428870ff 1947 break;
34dc7c2f
BB
1948 }
1949
572e2857 1950 nvlist_free(nv);
34dc7c2f
BB
1951 ret = -1;
1952 } else {
1953 zpool_handle_t *zhp;
1954
1955 /*
1956 * This should never fail, but play it safe anyway.
1957 */
428870ff 1958 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1959 ret = -1;
428870ff 1960 else if (zhp != NULL)
34dc7c2f 1961 zpool_close(zhp);
428870ff
BB
1962 if (policy.zrp_request &
1963 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1964 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1965 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1966 }
572e2857 1967 nvlist_free(nv);
428870ff 1968 return (0);
34dc7c2f
BB
1969 }
1970
34dc7c2f
BB
1971 return (ret);
1972}
1973
1974/*
428870ff 1975 * Scan the pool.
34dc7c2f
BB
1976 */
1977int
0ea05c64 1978zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
34dc7c2f 1979{
13fe0198 1980 zfs_cmd_t zc = {"\0"};
34dc7c2f 1981 char msg[1024];
0ea05c64 1982 int err;
34dc7c2f
BB
1983 libzfs_handle_t *hdl = zhp->zpool_hdl;
1984
1985 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1986 zc.zc_cookie = func;
0ea05c64 1987 zc.zc_flags = cmd;
34dc7c2f 1988
0ea05c64
AP
1989 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
1990 return (0);
1991
1992 err = errno;
1993
1994 /* ECANCELED on a scrub means we resumed a paused scrub */
1995 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
1996 cmd == POOL_SCRUB_NORMAL)
1997 return (0);
1998
1999 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
34dc7c2f
BB
2000 return (0);
2001
428870ff 2002 if (func == POOL_SCAN_SCRUB) {
0ea05c64
AP
2003 if (cmd == POOL_SCRUB_PAUSE) {
2004 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2005 "cannot pause scrubbing %s"), zc.zc_name);
2006 } else {
2007 assert(cmd == POOL_SCRUB_NORMAL);
2008 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2009 "cannot scrub %s"), zc.zc_name);
2010 }
428870ff
BB
2011 } else if (func == POOL_SCAN_NONE) {
2012 (void) snprintf(msg, sizeof (msg),
2013 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2014 zc.zc_name);
2015 } else {
2016 assert(!"unexpected result");
2017 }
34dc7c2f 2018
0ea05c64 2019 if (err == EBUSY) {
428870ff
BB
2020 nvlist_t *nvroot;
2021 pool_scan_stat_t *ps = NULL;
2022 uint_t psc;
2023
2024 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2025 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2026 (void) nvlist_lookup_uint64_array(nvroot,
2027 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
0ea05c64
AP
2028 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2029 if (cmd == POOL_SCRUB_PAUSE)
2030 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2031 else
2032 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2033 } else {
428870ff 2034 return (zfs_error(hdl, EZFS_RESILVERING, msg));
0ea05c64
AP
2035 }
2036 } else if (err == ENOENT) {
428870ff
BB
2037 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2038 } else {
0ea05c64 2039 return (zpool_standard_error(hdl, err, msg));
428870ff
BB
2040 }
2041}
2042
34dc7c2f 2043/*
9babb374
BB
2044 * Find a vdev that matches the search criteria specified. We use the
2045 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
2046 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2047 * spare; but FALSE if its an INUSE spare.
2048 */
2049static nvlist_t *
9babb374
BB
2050vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2051 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
2052{
2053 uint_t c, children;
2054 nvlist_t **child;
34dc7c2f 2055 nvlist_t *ret;
b128c09f 2056 uint64_t is_log;
9babb374
BB
2057 char *srchkey;
2058 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2059
2060 /* Nothing to look for */
2061 if (search == NULL || pair == NULL)
2062 return (NULL);
2063
2064 /* Obtain the key we will use to search */
2065 srchkey = nvpair_name(pair);
2066
2067 switch (nvpair_type(pair)) {
572e2857 2068 case DATA_TYPE_UINT64:
9babb374 2069 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
2070 uint64_t srchval, theguid;
2071
2072 verify(nvpair_value_uint64(pair, &srchval) == 0);
2073 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2074 &theguid) == 0);
2075 if (theguid == srchval)
2076 return (nv);
9babb374
BB
2077 }
2078 break;
9babb374
BB
2079
2080 case DATA_TYPE_STRING: {
2081 char *srchval, *val;
2082
2083 verify(nvpair_value_string(pair, &srchval) == 0);
2084 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2085 break;
34dc7c2f 2086
9babb374 2087 /*
428870ff
BB
2088 * Search for the requested value. Special cases:
2089 *
eac47204
BB
2090 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2091 * "-part1", or "p1". The suffix is hidden from the user,
2092 * but included in the string, so this matches around it.
2093 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2094 * is used to check all possible expanded paths.
428870ff
BB
2095 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2096 *
2097 * Otherwise, all other searches are simple string compares.
9babb374 2098 */
a2c6816c 2099 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
2100 uint64_t wholedisk = 0;
2101
2102 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2103 &wholedisk);
eac47204
BB
2104 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2105 return (nv);
428870ff 2106
428870ff
BB
2107 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2108 char *type, *idx, *end, *p;
2109 uint64_t id, vdev_id;
2110
2111 /*
2112 * Determine our vdev type, keeping in mind
2113 * that the srchval is composed of a type and
2114 * vdev id pair (i.e. mirror-4).
2115 */
2116 if ((type = strdup(srchval)) == NULL)
2117 return (NULL);
2118
2119 if ((p = strrchr(type, '-')) == NULL) {
2120 free(type);
2121 break;
2122 }
2123 idx = p + 1;
2124 *p = '\0';
2125
2126 /*
2127 * If the types don't match then keep looking.
2128 */
2129 if (strncmp(val, type, strlen(val)) != 0) {
2130 free(type);
2131 break;
2132 }
2133
2134 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2135 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2136 strncmp(type, VDEV_TYPE_MIRROR,
2137 strlen(VDEV_TYPE_MIRROR)) == 0);
2138 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2139 &id) == 0);
2140
2141 errno = 0;
2142 vdev_id = strtoull(idx, &end, 10);
2143
2144 free(type);
2145 if (errno != 0)
2146 return (NULL);
2147
2148 /*
2149 * Now verify that we have the correct vdev id.
2150 */
2151 if (vdev_id == id)
2152 return (nv);
9babb374 2153 }
34dc7c2f 2154
34dc7c2f 2155 /*
9babb374 2156 * Common case
34dc7c2f 2157 */
9babb374 2158 if (strcmp(srchval, val) == 0)
34dc7c2f 2159 return (nv);
9babb374
BB
2160 break;
2161 }
2162
2163 default:
2164 break;
34dc7c2f
BB
2165 }
2166
2167 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2168 &child, &children) != 0)
2169 return (NULL);
2170
b128c09f 2171 for (c = 0; c < children; c++) {
9babb374 2172 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
2173 avail_spare, l2cache, NULL)) != NULL) {
2174 /*
2175 * The 'is_log' value is only set for the toplevel
2176 * vdev, not the leaf vdevs. So we always lookup the
2177 * log device from the root of the vdev tree (where
2178 * 'log' is non-NULL).
2179 */
2180 if (log != NULL &&
2181 nvlist_lookup_uint64(child[c],
2182 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2183 is_log) {
2184 *log = B_TRUE;
2185 }
34dc7c2f 2186 return (ret);
b128c09f
BB
2187 }
2188 }
34dc7c2f
BB
2189
2190 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2191 &child, &children) == 0) {
2192 for (c = 0; c < children; c++) {
9babb374 2193 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2194 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2195 *avail_spare = B_TRUE;
2196 return (ret);
2197 }
2198 }
2199 }
2200
2201 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2202 &child, &children) == 0) {
2203 for (c = 0; c < children; c++) {
9babb374 2204 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2205 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2206 *l2cache = B_TRUE;
2207 return (ret);
2208 }
2209 }
2210 }
2211
2212 return (NULL);
2213}
2214
9babb374
BB
2215/*
2216 * Given a physical path (minus the "/devices" prefix), find the
2217 * associated vdev.
2218 */
2219nvlist_t *
2220zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2221 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2222{
2223 nvlist_t *search, *nvroot, *ret;
2224
2225 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2226 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2227
2228 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2229 &nvroot) == 0);
2230
2231 *avail_spare = B_FALSE;
572e2857
BB
2232 *l2cache = B_FALSE;
2233 if (log != NULL)
2234 *log = B_FALSE;
9babb374
BB
2235 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2236 nvlist_free(search);
2237
2238 return (ret);
2239}
2240
428870ff
BB
2241/*
2242 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2243 */
2244boolean_t
2245zpool_vdev_is_interior(const char *name)
2246{
2247 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2248 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2249 return (B_TRUE);
2250 return (B_FALSE);
2251}
2252
34dc7c2f
BB
2253nvlist_t *
2254zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 2255 boolean_t *l2cache, boolean_t *log)
34dc7c2f 2256{
34dc7c2f 2257 char *end;
9babb374 2258 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
2259 uint64_t guid;
2260
9babb374
BB
2261 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2262
1a5c611a 2263 guid = strtoull(path, &end, 0);
34dc7c2f 2264 if (guid != 0 && *end == '\0') {
9babb374 2265 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
2266 } else if (zpool_vdev_is_interior(path)) {
2267 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 2268 } else {
9babb374 2269 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
2270 }
2271
2272 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2273 &nvroot) == 0);
2274
2275 *avail_spare = B_FALSE;
2276 *l2cache = B_FALSE;
b128c09f
BB
2277 if (log != NULL)
2278 *log = B_FALSE;
9babb374
BB
2279 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2280 nvlist_free(search);
2281
2282 return (ret);
b128c09f
BB
2283}
2284
2285static int
379ca9cf 2286vdev_is_online(nvlist_t *nv)
b128c09f
BB
2287{
2288 uint64_t ival;
2289
2290 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2291 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2292 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2293 return (0);
2294
2295 return (1);
2296}
2297
2298/*
9babb374 2299 * Helper function for zpool_get_physpaths().
b128c09f 2300 */
9babb374
BB
2301static int
2302vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2303 size_t *bytes_written)
2304{
2305 size_t bytes_left, pos, rsz;
2306 char *tmppath;
2307 const char *format;
2308
2309 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2310 &tmppath) != 0)
2311 return (EZFS_NODEVICE);
2312
2313 pos = *bytes_written;
2314 bytes_left = physpath_size - pos;
2315 format = (pos == 0) ? "%s" : " %s";
2316
2317 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2318 *bytes_written += rsz;
2319
2320 if (rsz >= bytes_left) {
2321 /* if physpath was not copied properly, clear it */
2322 if (bytes_left != 0) {
2323 physpath[pos] = 0;
2324 }
2325 return (EZFS_NOSPC);
2326 }
2327 return (0);
2328}
2329
2330static int
2331vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2332 size_t *rsz, boolean_t is_spare)
2333{
2334 char *type;
2335 int ret;
2336
2337 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2338 return (EZFS_INVALCONFIG);
2339
2340 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2341 /*
2342 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2343 * For a spare vdev, we only want to boot from the active
2344 * spare device.
2345 */
2346 if (is_spare) {
2347 uint64_t spare = 0;
2348 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2349 &spare);
2350 if (!spare)
2351 return (EZFS_INVALCONFIG);
2352 }
2353
379ca9cf 2354 if (vdev_is_online(nv)) {
9babb374
BB
2355 if ((ret = vdev_get_one_physpath(nv, physpath,
2356 phypath_size, rsz)) != 0)
2357 return (ret);
2358 }
2359 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
0a3d2673 2360 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
9babb374
BB
2361 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2362 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2363 nvlist_t **child;
2364 uint_t count;
2365 int i, ret;
2366
2367 if (nvlist_lookup_nvlist_array(nv,
2368 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2369 return (EZFS_INVALCONFIG);
2370
2371 for (i = 0; i < count; i++) {
2372 ret = vdev_get_physpaths(child[i], physpath,
2373 phypath_size, rsz, is_spare);
2374 if (ret == EZFS_NOSPC)
2375 return (ret);
2376 }
2377 }
2378
2379 return (EZFS_POOL_INVALARG);
2380}
2381
2382/*
2383 * Get phys_path for a root pool config.
2384 * Return 0 on success; non-zero on failure.
2385 */
2386static int
2387zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2388{
9babb374 2389 size_t rsz;
b128c09f
BB
2390 nvlist_t *vdev_root;
2391 nvlist_t **child;
2392 uint_t count;
9babb374 2393 char *type;
b128c09f 2394
9babb374 2395 rsz = 0;
b128c09f 2396
9babb374
BB
2397 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2398 &vdev_root) != 0)
2399 return (EZFS_INVALCONFIG);
b128c09f 2400
9babb374
BB
2401 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2402 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2403 &child, &count) != 0)
9babb374 2404 return (EZFS_INVALCONFIG);
b128c09f 2405
9babb374 2406 /*
986dd8aa 2407 * root pool can only have a single top-level vdev.
9babb374 2408 */
986dd8aa 2409 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
9babb374 2410 return (EZFS_POOL_INVALARG);
b128c09f 2411
9babb374
BB
2412 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2413 B_FALSE);
2414
2415 /* No online devices */
2416 if (rsz == 0)
2417 return (EZFS_NODEVICE);
b128c09f
BB
2418
2419 return (0);
34dc7c2f
BB
2420}
2421
9babb374
BB
2422/*
2423 * Get phys_path for a root pool
2424 * Return 0 on success; non-zero on failure.
2425 */
2426int
2427zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2428{
2429 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2430 phypath_size));
2431}
2432
9babb374
BB
2433/*
2434 * If the device has being dynamically expanded then we need to relabel
2435 * the disk to use the new unallocated space.
2436 */
2437static int
8adf4864 2438zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2439{
9babb374 2440 int fd, error;
9babb374 2441
d603ed6c 2442 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2444 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2445 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2446 }
2447
2448 /*
2449 * It's possible that we might encounter an error if the device
2450 * does not have any unallocated space left. If so, we simply
2451 * ignore that error and continue on.
b5a28807
ED
2452 *
2453 * Also, we don't call efi_rescan() - that would just return EBUSY.
2454 * The module will do it for us in vdev_disk_open().
9babb374 2455 */
d603ed6c 2456 error = efi_use_whole_disk(fd);
dbb38f66
YP
2457
2458 /* Flush the buffers to disk and invalidate the page cache. */
2459 (void) fsync(fd);
2460 (void) ioctl(fd, BLKFLSBUF);
2461
9babb374
BB
2462 (void) close(fd);
2463 if (error && error != VT_ENOSPC) {
2464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2465 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2466 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374 2467 }
dbb38f66 2468
9babb374
BB
2469 return (0);
2470}
2471
4a283c7f
TH
2472/*
2473 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2474 *
2475 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2476 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2477 * ignore them.
2478 */
2479static uint64_t
2480zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2481 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2482{
2483 uint64_t guid;
2484 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2485 nvlist_t *tgt;
2486
2487 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2488 &log)) == NULL)
2489 return (0);
2490
2491 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2492 if (is_spare != NULL)
2493 *is_spare = spare;
2494 if (is_l2cache != NULL)
2495 *is_l2cache = l2cache;
2496 if (is_log != NULL)
2497 *is_log = log;
2498
2499 return (guid);
2500}
2501
2502/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2503uint64_t
2504zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2505{
2506 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2507}
2508
34dc7c2f
BB
2509/*
2510 * Bring the specified vdev online. The 'flags' parameter is a set of the
2511 * ZFS_ONLINE_* flags.
2512 */
2513int
2514zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2515 vdev_state_t *newstate)
2516{
13fe0198 2517 zfs_cmd_t zc = {"\0"};
34dc7c2f 2518 char msg[1024];
8198c57b 2519 char *pathname;
34dc7c2f 2520 nvlist_t *tgt;
9babb374 2521 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2522 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2523 int error;
34dc7c2f 2524
9babb374
BB
2525 if (flags & ZFS_ONLINE_EXPAND) {
2526 (void) snprintf(msg, sizeof (msg),
2527 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2528 } else {
2529 (void) snprintf(msg, sizeof (msg),
2530 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2531 }
34dc7c2f
BB
2532
2533 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2534 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2535 &islog)) == NULL)
34dc7c2f
BB
2536 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2537
2538 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2539
428870ff 2540 if (avail_spare)
34dc7c2f
BB
2541 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2542
8198c57b
YP
2543 if ((flags & ZFS_ONLINE_EXPAND ||
2544 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2545 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
9babb374
BB
2546 uint64_t wholedisk = 0;
2547
2548 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2549 &wholedisk);
9babb374
BB
2550
2551 /*
2552 * XXX - L2ARC 1.0 devices can't support expansion.
2553 */
2554 if (l2cache) {
2555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2556 "cannot expand cache devices"));
2557 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2558 }
2559
2560 if (wholedisk) {
7608bd0d
ED
2561 const char *fullpath = path;
2562 char buf[MAXPATHLEN];
2563
2564 if (path[0] != '/') {
2565 error = zfs_resolve_shortname(path, buf,
d1d7e268 2566 sizeof (buf));
7608bd0d
ED
2567 if (error != 0)
2568 return (zfs_error(hdl, EZFS_NODEVICE,
2569 msg));
2570
2571 fullpath = buf;
2572 }
2573
2574 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2575 if (error != 0)
2576 return (error);
9babb374
BB
2577 }
2578 }
2579
34dc7c2f
BB
2580 zc.zc_cookie = VDEV_STATE_ONLINE;
2581 zc.zc_obj = flags;
2582
572e2857 2583 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2584 if (errno == EINVAL) {
2585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2586 "from this pool into a new one. Use '%s' "
2587 "instead"), "zpool detach");
2588 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2589 }
34dc7c2f 2590 return (zpool_standard_error(hdl, errno, msg));
428870ff 2591 }
34dc7c2f
BB
2592
2593 *newstate = zc.zc_cookie;
2594 return (0);
2595}
2596
2597/*
2598 * Take the specified vdev offline
2599 */
2600int
2601zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2602{
13fe0198 2603 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2604 char msg[1024];
2605 nvlist_t *tgt;
2606 boolean_t avail_spare, l2cache;
2607 libzfs_handle_t *hdl = zhp->zpool_hdl;
2608
2609 (void) snprintf(msg, sizeof (msg),
2610 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2611
2612 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2613 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2614 NULL)) == NULL)
34dc7c2f
BB
2615 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2616
2617 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2618
428870ff 2619 if (avail_spare)
34dc7c2f
BB
2620 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2621
34dc7c2f
BB
2622 zc.zc_cookie = VDEV_STATE_OFFLINE;
2623 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2624
572e2857 2625 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2626 return (0);
2627
2628 switch (errno) {
2629 case EBUSY:
2630
2631 /*
2632 * There are no other replicas of this device.
2633 */
2634 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2635
9babb374
BB
2636 case EEXIST:
2637 /*
2638 * The log device has unplayed logs
2639 */
2640 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2641
34dc7c2f
BB
2642 default:
2643 return (zpool_standard_error(hdl, errno, msg));
2644 }
2645}
2646
2647/*
2648 * Mark the given vdev faulted.
2649 */
2650int
428870ff 2651zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2652{
13fe0198 2653 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2654 char msg[1024];
2655 libzfs_handle_t *hdl = zhp->zpool_hdl;
2656
2657 (void) snprintf(msg, sizeof (msg),
d1d7e268 2658 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2659
2660 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2661 zc.zc_guid = guid;
2662 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2663 zc.zc_obj = aux;
34dc7c2f 2664
572e2857 2665 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2666 return (0);
2667
2668 switch (errno) {
2669 case EBUSY:
2670
2671 /*
2672 * There are no other replicas of this device.
2673 */
2674 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2675
2676 default:
2677 return (zpool_standard_error(hdl, errno, msg));
2678 }
2679
2680}
2681
2682/*
2683 * Mark the given vdev degraded.
2684 */
2685int
428870ff 2686zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2687{
13fe0198 2688 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2689 char msg[1024];
2690 libzfs_handle_t *hdl = zhp->zpool_hdl;
2691
2692 (void) snprintf(msg, sizeof (msg),
d1d7e268 2693 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2694
2695 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2696 zc.zc_guid = guid;
2697 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2698 zc.zc_obj = aux;
34dc7c2f 2699
572e2857 2700 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2701 return (0);
2702
2703 return (zpool_standard_error(hdl, errno, msg));
2704}
2705
2706/*
2707 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2708 * a hot spare.
2709 */
2710static boolean_t
2711is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2712{
2713 nvlist_t **child;
2714 uint_t c, children;
2715 char *type;
2716
2717 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2718 &children) == 0) {
2719 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2720 &type) == 0);
2721
2722 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2723 children == 2 && child[which] == tgt)
2724 return (B_TRUE);
2725
2726 for (c = 0; c < children; c++)
2727 if (is_replacing_spare(child[c], tgt, which))
2728 return (B_TRUE);
2729 }
2730
2731 return (B_FALSE);
2732}
2733
2734/*
2735 * Attach new_disk (fully described by nvroot) to old_disk.
2736 * If 'replacing' is specified, the new disk will replace the old one.
2737 */
2738int
2739zpool_vdev_attach(zpool_handle_t *zhp,
2740 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2741{
13fe0198 2742 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2743 char msg[1024];
2744 int ret;
2745 nvlist_t *tgt;
b128c09f
BB
2746 boolean_t avail_spare, l2cache, islog;
2747 uint64_t val;
572e2857 2748 char *newname;
34dc7c2f
BB
2749 nvlist_t **child;
2750 uint_t children;
2751 nvlist_t *config_root;
2752 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 2753 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
2754
2755 if (replacing)
2756 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2757 "cannot replace %s with %s"), old_disk, new_disk);
2758 else
2759 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2760 "cannot attach %s to %s"), new_disk, old_disk);
2761
2762 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2763 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2764 &islog)) == 0)
34dc7c2f
BB
2765 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2766
2767 if (avail_spare)
2768 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2769
2770 if (l2cache)
2771 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2772
2773 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2774 zc.zc_cookie = replacing;
2775
2776 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2777 &child, &children) != 0 || children != 1) {
2778 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2779 "new device must be a single disk"));
2780 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2781 }
2782
2783 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2784 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2785
d2f3e292 2786 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
b128c09f
BB
2787 return (-1);
2788
34dc7c2f
BB
2789 /*
2790 * If the target is a hot spare that has been swapped in, we can only
2791 * replace it with another hot spare.
2792 */
2793 if (replacing &&
2794 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2795 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2796 NULL) == NULL || !avail_spare) &&
2797 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2799 "can only be replaced by another hot spare"));
b128c09f 2800 free(newname);
34dc7c2f
BB
2801 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2802 }
2803
b128c09f
BB
2804 free(newname);
2805
34dc7c2f
BB
2806 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2807 return (-1);
2808
572e2857 2809 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2810
2811 zcmd_free_nvlists(&zc);
2812
b128c09f
BB
2813 if (ret == 0) {
2814 if (rootpool) {
9babb374
BB
2815 /*
2816 * XXX need a better way to prevent user from
2817 * booting up a half-baked vdev.
2818 */
2819 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2820 "sure to wait until resilver is done "
2821 "before rebooting.\n"));
b128c09f 2822 }
34dc7c2f 2823 return (0);
b128c09f 2824 }
34dc7c2f
BB
2825
2826 switch (errno) {
2827 case ENOTSUP:
2828 /*
2829 * Can't attach to or replace this type of vdev.
2830 */
2831 if (replacing) {
572e2857
BB
2832 uint64_t version = zpool_get_prop_int(zhp,
2833 ZPOOL_PROP_VERSION, NULL);
2834
b128c09f 2835 if (islog)
34dc7c2f
BB
2836 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2837 "cannot replace a log with a spare"));
572e2857
BB
2838 else if (version >= SPA_VERSION_MULTI_REPLACE)
2839 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2840 "already in replacing/spare config; wait "
2841 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2842 else
2843 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2844 "cannot replace a replacing device"));
2845 } else {
2846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2847 "can only attach to mirrors and top-level "
2848 "disks"));
2849 }
2850 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2851 break;
2852
2853 case EINVAL:
2854 /*
2855 * The new device must be a single disk.
2856 */
2857 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2858 "new device must be a single disk"));
2859 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2860 break;
2861
2862 case EBUSY:
2863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2864 new_disk);
2865 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2866 break;
2867
2868 case EOVERFLOW:
2869 /*
2870 * The new device is too small.
2871 */
2872 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2873 "device is too small"));
2874 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2875 break;
2876
2877 case EDOM:
2878 /*
d4aae2a0 2879 * The new device has a different optimal sector size.
34dc7c2f
BB
2880 */
2881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d4aae2a0
BB
2882 "new device has a different optimal sector size; use the "
2883 "option '-o ashift=N' to override the optimal size"));
34dc7c2f
BB
2884 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2885 break;
2886
2887 case ENAMETOOLONG:
2888 /*
2889 * The resulting top-level vdev spec won't fit in the label.
2890 */
2891 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2892 break;
2893
2894 default:
2895 (void) zpool_standard_error(hdl, errno, msg);
2896 }
2897
2898 return (-1);
2899}
2900
2901/*
2902 * Detach the specified device.
2903 */
2904int
2905zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2906{
13fe0198 2907 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2908 char msg[1024];
2909 nvlist_t *tgt;
2910 boolean_t avail_spare, l2cache;
2911 libzfs_handle_t *hdl = zhp->zpool_hdl;
2912
2913 (void) snprintf(msg, sizeof (msg),
2914 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2915
2916 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2917 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2918 NULL)) == 0)
34dc7c2f
BB
2919 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2920
2921 if (avail_spare)
2922 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2923
2924 if (l2cache)
2925 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2926
2927 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2928
2929 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2930 return (0);
2931
2932 switch (errno) {
2933
2934 case ENOTSUP:
2935 /*
2936 * Can't detach from this type of vdev.
2937 */
2938 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2939 "applicable to mirror and replacing vdevs"));
572e2857 2940 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2941 break;
2942
2943 case EBUSY:
2944 /*
2945 * There are no other replicas of this device.
2946 */
2947 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2948 break;
2949
2950 default:
2951 (void) zpool_standard_error(hdl, errno, msg);
2952 }
2953
2954 return (-1);
2955}
2956
428870ff
BB
2957/*
2958 * Find a mirror vdev in the source nvlist.
2959 *
2960 * The mchild array contains a list of disks in one of the top-level mirrors
2961 * of the source pool. The schild array contains a list of disks that the
2962 * user specified on the command line. We loop over the mchild array to
2963 * see if any entry in the schild array matches.
2964 *
2965 * If a disk in the mchild array is found in the schild array, we return
2966 * the index of that entry. Otherwise we return -1.
2967 */
2968static int
2969find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2970 nvlist_t **schild, uint_t schildren)
2971{
2972 uint_t mc;
2973
2974 for (mc = 0; mc < mchildren; mc++) {
2975 uint_t sc;
2976 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
d2f3e292 2977 mchild[mc], 0);
428870ff
BB
2978
2979 for (sc = 0; sc < schildren; sc++) {
2980 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
d2f3e292 2981 schild[sc], 0);
428870ff
BB
2982 boolean_t result = (strcmp(mpath, spath) == 0);
2983
2984 free(spath);
2985 if (result) {
2986 free(mpath);
2987 return (mc);
2988 }
2989 }
2990
2991 free(mpath);
2992 }
2993
2994 return (-1);
2995}
2996
2997/*
2998 * Split a mirror pool. If newroot points to null, then a new nvlist
2999 * is generated and it is the responsibility of the caller to free it.
3000 */
3001int
3002zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3003 nvlist_t *props, splitflags_t flags)
3004{
13fe0198 3005 zfs_cmd_t zc = {"\0"};
428870ff
BB
3006 char msg[1024];
3007 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3008 nvlist_t **varray = NULL, *zc_props = NULL;
3009 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3010 libzfs_handle_t *hdl = zhp->zpool_hdl;
3011 uint64_t vers;
3012 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3013 int retval = 0;
3014
3015 (void) snprintf(msg, sizeof (msg),
3016 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3017
3018 if (!zpool_name_valid(hdl, B_FALSE, newname))
3019 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3020
3021 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3022 (void) fprintf(stderr, gettext("Internal error: unable to "
3023 "retrieve pool configuration\n"));
3024 return (-1);
3025 }
3026
3027 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3028 == 0);
3029 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3030
3031 if (props) {
572e2857 3032 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 3033 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 3034 props, vers, flags, msg)) == NULL)
428870ff
BB
3035 return (-1);
3036 }
3037
3038 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3039 &children) != 0) {
3040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3041 "Source pool is missing vdev tree"));
8a5fc748 3042 nvlist_free(zc_props);
428870ff
BB
3043 return (-1);
3044 }
3045
3046 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3047 vcount = 0;
3048
3049 if (*newroot == NULL ||
3050 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3051 &newchild, &newchildren) != 0)
3052 newchildren = 0;
3053
3054 for (c = 0; c < children; c++) {
3055 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3056 char *type;
3057 nvlist_t **mchild, *vdev;
3058 uint_t mchildren;
3059 int entry;
3060
3061 /*
3062 * Unlike cache & spares, slogs are stored in the
3063 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3064 */
3065 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3066 &is_log);
3067 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3068 &is_hole);
3069 if (is_log || is_hole) {
3070 /*
3071 * Create a hole vdev and put it in the config.
3072 */
3073 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3074 goto out;
3075 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3076 VDEV_TYPE_HOLE) != 0)
3077 goto out;
3078 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3079 1) != 0)
3080 goto out;
3081 if (lastlog == 0)
3082 lastlog = vcount;
3083 varray[vcount++] = vdev;
3084 continue;
3085 }
3086 lastlog = 0;
3087 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3088 == 0);
3089 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3090 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3091 "Source pool must be composed only of mirrors\n"));
3092 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3093 goto out;
3094 }
3095
3096 verify(nvlist_lookup_nvlist_array(child[c],
3097 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3098
3099 /* find or add an entry for this top-level vdev */
3100 if (newchildren > 0 &&
3101 (entry = find_vdev_entry(zhp, mchild, mchildren,
3102 newchild, newchildren)) >= 0) {
3103 /* We found a disk that the user specified. */
3104 vdev = mchild[entry];
3105 ++found;
3106 } else {
3107 /* User didn't specify a disk for this vdev. */
3108 vdev = mchild[mchildren - 1];
3109 }
3110
3111 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3112 goto out;
3113 }
3114
3115 /* did we find every disk the user specified? */
3116 if (found != newchildren) {
3117 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3118 "include at most one disk from each mirror"));
3119 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3120 goto out;
3121 }
3122
3123 /* Prepare the nvlist for populating. */
3124 if (*newroot == NULL) {
3125 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3126 goto out;
3127 freelist = B_TRUE;
3128 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3129 VDEV_TYPE_ROOT) != 0)
3130 goto out;
3131 } else {
3132 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3133 }
3134
3135 /* Add all the children we found */
3136 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3137 lastlog == 0 ? vcount : lastlog) != 0)
3138 goto out;
3139
3140 /*
3141 * If we're just doing a dry run, exit now with success.
3142 */
3143 if (flags.dryrun) {
3144 memory_err = B_FALSE;
3145 freelist = B_FALSE;
3146 goto out;
3147 }
3148
3149 /* now build up the config list & call the ioctl */
3150 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3151 goto out;
3152
3153 if (nvlist_add_nvlist(newconfig,
3154 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3155 nvlist_add_string(newconfig,
3156 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3157 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3158 goto out;
3159
3160 /*
3161 * The new pool is automatically part of the namespace unless we
3162 * explicitly export it.
3163 */
3164 if (!flags.import)
3165 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3166 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3167 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3168 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3169 goto out;
3170 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3171 goto out;
3172
3173 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3174 retval = zpool_standard_error(hdl, errno, msg);
3175 goto out;
3176 }
3177
3178 freelist = B_FALSE;
3179 memory_err = B_FALSE;
3180
3181out:
3182 if (varray != NULL) {
3183 int v;
3184
3185 for (v = 0; v < vcount; v++)
3186 nvlist_free(varray[v]);
3187 free(varray);
3188 }
3189 zcmd_free_nvlists(&zc);
8a5fc748
JJS
3190 nvlist_free(zc_props);
3191 nvlist_free(newconfig);
428870ff
BB
3192 if (freelist) {
3193 nvlist_free(*newroot);
3194 *newroot = NULL;
3195 }
3196
3197 if (retval != 0)
3198 return (retval);
3199
3200 if (memory_err)
3201 return (no_memory(hdl));
3202
3203 return (0);
3204}
3205
34dc7c2f 3206/*
d1502e9e
RL
3207 * Remove the given device. Currently, this is supported only for hot spares,
3208 * cache, and log devices.
34dc7c2f
BB
3209 */
3210int
3211zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3212{
13fe0198 3213 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3214 char msg[1024];
3215 nvlist_t *tgt;
428870ff 3216 boolean_t avail_spare, l2cache, islog;
34dc7c2f 3217 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3218 uint64_t version;
34dc7c2f
BB
3219
3220 (void) snprintf(msg, sizeof (msg),
3221 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3222
3223 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3224 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 3225 &islog)) == 0)
34dc7c2f 3226 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
3227 /*
3228 * XXX - this should just go away.
3229 */
3230 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 3231 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d1502e9e 3232 "only inactive hot spares, cache, "
428870ff 3233 "or log devices can be removed"));
34dc7c2f
BB
3234 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3235 }
3236
428870ff
BB
3237 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3238 if (islog && version < SPA_VERSION_HOLES) {
3239 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3240 "pool must be upgrade to support log removal"));
3241 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3242 }
3243
34dc7c2f
BB
3244 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3245
3246 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3247 return (0);
3248
3249 return (zpool_standard_error(hdl, errno, msg));
3250}
3251
3252/*
3253 * Clear the errors for the pool, or the particular device if specified.
3254 */
3255int
428870ff 3256zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 3257{
13fe0198 3258 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3259 char msg[1024];
3260 nvlist_t *tgt;
428870ff 3261 zpool_rewind_policy_t policy;
34dc7c2f
BB
3262 boolean_t avail_spare, l2cache;
3263 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3264 nvlist_t *nvi = NULL;
572e2857 3265 int error;
34dc7c2f
BB
3266
3267 if (path)
3268 (void) snprintf(msg, sizeof (msg),
3269 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3270 path);
3271 else
3272 (void) snprintf(msg, sizeof (msg),
3273 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3274 zhp->zpool_name);
3275
3276 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3277 if (path) {
3278 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 3279 &l2cache, NULL)) == 0)
34dc7c2f
BB
3280 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3281
3282 /*
3283 * Don't allow error clearing for hot spares. Do allow
3284 * error clearing for l2cache devices.
3285 */
3286 if (avail_spare)
3287 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3288
3289 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3290 &zc.zc_guid) == 0);
3291 }
3292
428870ff
BB
3293 zpool_get_rewind_policy(rewindnvl, &policy);
3294 zc.zc_cookie = policy.zrp_request;
3295
572e2857 3296 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
3297 return (-1);
3298
572e2857 3299 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
3300 return (-1);
3301
572e2857
BB
3302 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3303 errno == ENOMEM) {
3304 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3305 zcmd_free_nvlists(&zc);
3306 return (-1);
3307 }
3308 }
3309
3310 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
3311 errno != EPERM && errno != EACCES)) {
3312 if (policy.zrp_request &
3313 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3314 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3315 zpool_rewind_exclaim(hdl, zc.zc_name,
3316 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3317 nvi);
3318 nvlist_free(nvi);
3319 }
3320 zcmd_free_nvlists(&zc);
34dc7c2f 3321 return (0);
428870ff 3322 }
34dc7c2f 3323
428870ff 3324 zcmd_free_nvlists(&zc);
34dc7c2f
BB
3325 return (zpool_standard_error(hdl, errno, msg));
3326}
3327
3328/*
3329 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3330 */
3331int
3332zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3333{
13fe0198 3334 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3335 char msg[1024];
3336 libzfs_handle_t *hdl = zhp->zpool_hdl;
3337
3338 (void) snprintf(msg, sizeof (msg),
3339 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
d1d7e268 3340 (u_longlong_t)guid);
34dc7c2f
BB
3341
3342 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3343 zc.zc_guid = guid;
428870ff 3344 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
3345
3346 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3347 return (0);
3348
3349 return (zpool_standard_error(hdl, errno, msg));
3350}
3351
3541dc6d
GA
3352/*
3353 * Change the GUID for a pool.
3354 */
3355int
3356zpool_reguid(zpool_handle_t *zhp)
3357{
3358 char msg[1024];
3359 libzfs_handle_t *hdl = zhp->zpool_hdl;
13fe0198 3360 zfs_cmd_t zc = {"\0"};
3541dc6d
GA
3361
3362 (void) snprintf(msg, sizeof (msg),
3363 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3364
3365 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3366 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3367 return (0);
3368
3369 return (zpool_standard_error(hdl, errno, msg));
3370}
3371
1bd201e7
CS
3372/*
3373 * Reopen the pool.
3374 */
3375int
d3f2cd7e 3376zpool_reopen_one(zpool_handle_t *zhp, void *data)
1bd201e7 3377{
d3f2cd7e
AB
3378 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3379 const char *pool_name = zpool_get_name(zhp);
3380 boolean_t *scrub_restart = data;
3381 int error;
1bd201e7 3382
d3f2cd7e
AB
3383 error = lzc_reopen(pool_name, *scrub_restart);
3384 if (error) {
3385 return (zpool_standard_error_fmt(hdl, error,
3386 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3387 }
1bd201e7 3388
d3f2cd7e 3389 return (0);
1bd201e7
CS
3390}
3391
bec1067d
AP
3392/* call into libzfs_core to execute the sync IOCTL per pool */
3393int
3394zpool_sync_one(zpool_handle_t *zhp, void *data)
3395{
3396 int ret;
3397 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3398 const char *pool_name = zpool_get_name(zhp);
3399 boolean_t *force = data;
3400 nvlist_t *innvl = fnvlist_alloc();
3401
3402 fnvlist_add_boolean_value(innvl, "force", *force);
3403 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3404 nvlist_free(innvl);
3405 return (zpool_standard_error_fmt(hdl, ret,
3406 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3407 }
3408 nvlist_free(innvl);
3409
3410 return (0);
3411}
3412
39fc0cb5 3413#if defined(__sun__) || defined(__sun)
34dc7c2f
BB
3414/*
3415 * Convert from a devid string to a path.
3416 */
3417static char *
3418devid_to_path(char *devid_str)
3419{
3420 ddi_devid_t devid;
3421 char *minor;
3422 char *path;
3423 devid_nmlist_t *list = NULL;
3424 int ret;
3425
3426 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3427 return (NULL);
3428
3429 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3430
3431 devid_str_free(minor);
3432 devid_free(devid);
3433
3434 if (ret != 0)
3435 return (NULL);
3436
0fdd8d64
MT
3437 /*
3438 * In a case the strdup() fails, we will just return NULL below.
3439 */
3440 path = strdup(list[0].devname);
34dc7c2f
BB
3441
3442 devid_free_nmlist(list);
3443
3444 return (path);
3445}
3446
3447/*
3448 * Convert from a path to a devid string.
3449 */
3450static char *
3451path_to_devid(const char *path)
3452{
3453 int fd;
3454 ddi_devid_t devid;
3455 char *minor, *ret;
3456
3457 if ((fd = open(path, O_RDONLY)) < 0)
3458 return (NULL);
3459
3460 minor = NULL;
3461 ret = NULL;
3462 if (devid_get(fd, &devid) == 0) {
3463 if (devid_get_minor_name(fd, &minor) == 0)
3464 ret = devid_str_encode(devid, minor);
3465 if (minor != NULL)
3466 devid_str_free(minor);
3467 devid_free(devid);
3468 }
3469 (void) close(fd);
3470
3471 return (ret);
3472}
3473
3474/*
3475 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3476 * ignore any failure here, since a common case is for an unprivileged user to
3477 * type 'zpool status', and we'll display the correct information anyway.
3478 */
3479static void
3480set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3481{
13fe0198 3482 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3483
3484 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3485 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3486 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3487 &zc.zc_guid) == 0);
3488
3489 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3490}
39fc0cb5 3491#endif /* sun */
34dc7c2f 3492
83c62c93
NB
3493/*
3494 * Remove partition suffix from a vdev path. Partition suffixes may take three
3495 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3496 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3497 * third case only occurs when preceded by a string matching the regular
541da993 3498 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
d02ca379
DB
3499 *
3500 * caller must free the returned string
83c62c93 3501 */
d02ca379 3502char *
6078881a 3503zfs_strip_partition(char *path)
83c62c93 3504{
6078881a 3505 char *tmp = strdup(path);
83c62c93 3506 char *part = NULL, *d = NULL;
6078881a
TH
3507 if (!tmp)
3508 return (NULL);
83c62c93
NB
3509
3510 if ((part = strstr(tmp, "-part")) && part != tmp) {
3511 d = part + 5;
3512 } else if ((part = strrchr(tmp, 'p')) &&
3513 part > tmp + 1 && isdigit(*(part-1))) {
3514 d = part + 1;
541da993
RY
3515 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3516 tmp[1] == 'd') {
02730c33 3517 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
541da993 3518 } else if (strncmp("xvd", tmp, 3) == 0) {
02730c33 3519 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
83c62c93
NB
3520 }
3521 if (part && d && *d != '\0') {
02730c33 3522 for (; isdigit(*d); d++) { }
83c62c93
NB
3523 if (*d == '\0')
3524 *part = '\0';
3525 }
6078881a 3526
83c62c93
NB
3527 return (tmp);
3528}
3529
8720e9e7
TH
3530/*
3531 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3532 *
3533 * path: /dev/sda1
3534 * returns: /dev/sda
3535 *
3536 * Returned string must be freed.
3537 */
3538char *
3539zfs_strip_partition_path(char *path)
3540{
3541 char *newpath = strdup(path);
3542 char *sd_offset;
3543 char *new_sd;
3544
3545 if (!newpath)
3546 return (NULL);
3547
3548 /* Point to "sda1" part of "/dev/sda1" */
3549 sd_offset = strrchr(newpath, '/') + 1;
3550
3551 /* Get our new name "sda" */
3552 new_sd = zfs_strip_partition(sd_offset);
3553 if (!new_sd) {
3554 free(newpath);
3555 return (NULL);
3556 }
3557
3558 /* Paste the "sda" where "sda1" was */
3559 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3560
3561 /* Free temporary "sda" */
3562 free(new_sd);
3563
3564 return (newpath);
3565}
3566
858219cc
NB
3567#define PATH_BUF_LEN 64
3568
34dc7c2f
BB
3569/*
3570 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3571 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3572 * We also check if this is a whole disk, in which case we strip off the
3573 * trailing 's0' slice name.
3574 *
3575 * This routine is also responsible for identifying when disks have been
3576 * reconfigured in a new location. The kernel will have opened the device by
3577 * devid, but the path will still refer to the old location. To catch this, we
3578 * first do a path -> devid translation (which is fast for the common case). If
3579 * the devid matches, we're done. If not, we do a reverse devid -> path
3580 * translation and issue the appropriate ioctl() to update the path of the vdev.
3581 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3582 * of these checks.
3583 */
3584char *
428870ff 3585zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
d2f3e292 3586 int name_flags)
34dc7c2f 3587{
39fc0cb5 3588 char *path, *type, *env;
34dc7c2f 3589 uint64_t value;
858219cc 3590 char buf[PATH_BUF_LEN];
fc24f7c8 3591 char tmpbuf[PATH_BUF_LEN];
34dc7c2f 3592
2df9ad1c
GG
3593 /*
3594 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3595 * zpool name that will be displayed to the user.
3596 */
3597 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3598 if (zhp != NULL && strcmp(type, "root") == 0)
3599 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3600
d2f3e292
RY
3601 env = getenv("ZPOOL_VDEV_NAME_PATH");
3602 if (env && (strtoul(env, NULL, 0) > 0 ||
3603 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3604 name_flags |= VDEV_NAME_PATH;
3605
3606 env = getenv("ZPOOL_VDEV_NAME_GUID");
3607 if (env && (strtoul(env, NULL, 0) > 0 ||
3608 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3609 name_flags |= VDEV_NAME_GUID;
3610
3611 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3612 if (env && (strtoul(env, NULL, 0) > 0 ||
3613 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3614 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3615
3616 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3617 name_flags & VDEV_NAME_GUID) {
aecdc706 3618 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
d2f3e292 3619 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
34dc7c2f
BB
3620 path = buf;
3621 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
39fc0cb5
DB
3622#if defined(__sun__) || defined(__sun)
3623 /*
3624 * Live VDEV path updates to a kernel VDEV during a
3625 * zpool_vdev_name lookup are not supported on Linux.
3626 */
3627 char *devid;
3628 vdev_stat_t *vs;
3629 uint_t vsc;
3630
34dc7c2f
BB
3631 /*
3632 * If the device is dead (faulted, offline, etc) then don't
3633 * bother opening it. Otherwise we may be forcing the user to
3634 * open a misbehaving device, which can have undesirable
3635 * effects.
3636 */
428870ff 3637 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3638 (uint64_t **)&vs, &vsc) != 0 ||
3639 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3640 zhp != NULL &&
3641 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3642 /*
3643 * Determine if the current path is correct.
3644 */
3645 char *newdevid = path_to_devid(path);
3646
3647 if (newdevid == NULL ||
3648 strcmp(devid, newdevid) != 0) {
3649 char *newpath;
3650
3651 if ((newpath = devid_to_path(devid)) != NULL) {
3652 /*
3653 * Update the path appropriately.
3654 */
3655 set_path(zhp, nv, newpath);
3656 if (nvlist_add_string(nv,
3657 ZPOOL_CONFIG_PATH, newpath) == 0)
3658 verify(nvlist_lookup_string(nv,
3659 ZPOOL_CONFIG_PATH,
3660 &path) == 0);
3661 free(newpath);
3662 }
3663 }
3664
3665 if (newdevid)
3666 devid_str_free(newdevid);
3667 }
39fc0cb5 3668#endif /* sun */
34dc7c2f 3669
d2f3e292
RY
3670 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3671 char *rp = realpath(path, NULL);
3672 if (rp) {
3673 strlcpy(buf, rp, sizeof (buf));
3674 path = buf;
3675 free(rp);
3676 }
3677 }
3678
d603ed6c
BB
3679 /*
3680 * For a block device only use the name.
3681 */
d2f3e292
RY
3682 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3683 !(name_flags & VDEV_NAME_PATH)) {
d603ed6c
BB
3684 path = strrchr(path, '/');
3685 path++;
3686 }
34dc7c2f 3687
d603ed6c 3688 /*
83c62c93 3689 * Remove the partition from the path it this is a whole disk.
d603ed6c 3690 */
d2f3e292
RY
3691 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3692 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
6078881a 3693 return (zfs_strip_partition(path));
34dc7c2f
BB
3694 }
3695 } else {
2df9ad1c 3696 path = type;
34dc7c2f
BB
3697
3698 /*
3699 * If it's a raidz device, we need to stick in the parity level.
3700 */
3701 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3702 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3703 &value) == 0);
fc24f7c8 3704 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 3705 (u_longlong_t)value);
fc24f7c8 3706 path = buf;
34dc7c2f 3707 }
428870ff
BB
3708
3709 /*
3710 * We identify each top-level vdev by using a <type-id>
3711 * naming convention.
3712 */
d2f3e292 3713 if (name_flags & VDEV_NAME_TYPE_ID) {
428870ff 3714 uint64_t id;
428870ff
BB
3715 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3716 &id) == 0);
fc24f7c8
MM
3717 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3718 path, (u_longlong_t)id);
3719 path = tmpbuf;
428870ff 3720 }
34dc7c2f
BB
3721 }
3722
3723 return (zfs_strdup(hdl, path));
3724}
3725
3726static int
fcff0f35 3727zbookmark_mem_compare(const void *a, const void *b)
34dc7c2f 3728{
5dbd68a3 3729 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
34dc7c2f
BB
3730}
3731
3732/*
3733 * Retrieve the persistent error log, uniquify the members, and return to the
3734 * caller.
3735 */
3736int
3737zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3738{
13fe0198 3739 zfs_cmd_t zc = {"\0"};
56a6054d 3740 libzfs_handle_t *hdl = zhp->zpool_hdl;
34dc7c2f 3741 uint64_t count;
5dbd68a3 3742 zbookmark_phys_t *zb = NULL;
34dc7c2f
BB
3743 int i;
3744
3745 /*
3746 * Retrieve the raw error list from the kernel. If the number of errors
3747 * has increased, allocate more space and continue until we get the
3748 * entire list.
3749 */
3750 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3751 &count) == 0);
3752 if (count == 0)
3753 return (0);
56a6054d
BB
3754 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3755 count * sizeof (zbookmark_phys_t));
34dc7c2f
BB
3756 zc.zc_nvlist_dst_size = count;
3757 (void) strcpy(zc.zc_name, zhp->zpool_name);
3758 for (;;) {
3759 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3760 &zc) != 0) {
3761 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3762 if (errno == ENOMEM) {
5dbd68a3
MA
3763 void *dst;
3764
34dc7c2f 3765 count = zc.zc_nvlist_dst_size;
5dbd68a3
MA
3766 dst = zfs_alloc(zhp->zpool_hdl, count *
3767 sizeof (zbookmark_phys_t));
5dbd68a3 3768 zc.zc_nvlist_dst = (uintptr_t)dst;
34dc7c2f 3769 } else {
56a6054d
BB
3770 return (zpool_standard_error_fmt(hdl, errno,
3771 dgettext(TEXT_DOMAIN, "errors: List of "
3772 "errors unavailable")));
34dc7c2f
BB
3773 }
3774 } else {
3775 break;
3776 }
3777 }
3778
3779 /*
3780 * Sort the resulting bookmarks. This is a little confusing due to the
3781 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3782 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3783 * _not_ copied as part of the process. So we point the start of our
3784 * array appropriate and decrement the total number of elements.
3785 */
5dbd68a3 3786 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
34dc7c2f
BB
3787 zc.zc_nvlist_dst_size;
3788 count -= zc.zc_nvlist_dst_size;
3789
fcff0f35 3790 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
34dc7c2f
BB
3791
3792 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3793
3794 /*
3795 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3796 */
3797 for (i = 0; i < count; i++) {
3798 nvlist_t *nv;
3799
3800 /* ignoring zb_blkid and zb_level for now */
3801 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3802 zb[i-1].zb_object == zb[i].zb_object)
3803 continue;
3804
3805 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3806 goto nomem;
3807 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3808 zb[i].zb_objset) != 0) {
3809 nvlist_free(nv);
3810 goto nomem;
3811 }
3812 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3813 zb[i].zb_object) != 0) {
3814 nvlist_free(nv);
3815 goto nomem;
3816 }
3817 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3818 nvlist_free(nv);
3819 goto nomem;
3820 }
3821 nvlist_free(nv);
3822 }
3823
3824 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3825 return (0);
3826
3827nomem:
3828 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3829 return (no_memory(zhp->zpool_hdl));
3830}
3831
3832/*
3833 * Upgrade a ZFS pool to the latest on-disk version.
3834 */
3835int
3836zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3837{
13fe0198 3838 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3839 libzfs_handle_t *hdl = zhp->zpool_hdl;
3840
3841 (void) strcpy(zc.zc_name, zhp->zpool_name);
3842 zc.zc_cookie = new_version;
3843
3844 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3845 return (zpool_standard_error_fmt(hdl, errno,
3846 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3847 zhp->zpool_name));
3848 return (0);
3849}
3850
3851void
6f1ffb06 3852zfs_save_arguments(int argc, char **argv, char *string, int len)
34dc7c2f
BB
3853{
3854 int i;
3855
6f1ffb06 3856 (void) strlcpy(string, basename(argv[0]), len);
34dc7c2f 3857 for (i = 1; i < argc; i++) {
6f1ffb06
MA
3858 (void) strlcat(string, " ", len);
3859 (void) strlcat(string, argv[i], len);
34dc7c2f
BB
3860 }
3861}
3862
34dc7c2f 3863int
6f1ffb06
MA
3864zpool_log_history(libzfs_handle_t *hdl, const char *message)
3865{
13fe0198 3866 zfs_cmd_t zc = {"\0"};
6f1ffb06
MA
3867 nvlist_t *args;
3868 int err;
3869
3870 args = fnvlist_alloc();
3871 fnvlist_add_string(args, "message", message);
3872 err = zcmd_write_src_nvlist(hdl, &zc, args);
3873 if (err == 0)
3874 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3875 nvlist_free(args);
3876 zcmd_free_nvlists(&zc);
3877 return (err);
34dc7c2f
BB
3878}
3879
3880/*
3881 * Perform ioctl to get some command history of a pool.
3882 *
3883 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3884 * logical offset of the history buffer to start reading from.
3885 *
3886 * Upon return, 'off' is the next logical offset to read from and
3887 * 'len' is the actual amount of bytes read into 'buf'.
3888 */
3889static int
3890get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3891{
13fe0198 3892 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3893 libzfs_handle_t *hdl = zhp->zpool_hdl;
3894
3895 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3896
3897 zc.zc_history = (uint64_t)(uintptr_t)buf;
3898 zc.zc_history_len = *len;
3899 zc.zc_history_offset = *off;
3900
3901 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3902 switch (errno) {
3903 case EPERM:
3904 return (zfs_error_fmt(hdl, EZFS_PERM,
3905 dgettext(TEXT_DOMAIN,
3906 "cannot show history for pool '%s'"),
3907 zhp->zpool_name));
3908 case ENOENT:
3909 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3910 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3911 "'%s'"), zhp->zpool_name));
3912 case ENOTSUP:
3913 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3914 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3915 "'%s', pool must be upgraded"), zhp->zpool_name));
3916 default:
3917 return (zpool_standard_error_fmt(hdl, errno,
3918 dgettext(TEXT_DOMAIN,
3919 "cannot get history for '%s'"), zhp->zpool_name));
3920 }
3921 }
3922
3923 *len = zc.zc_history_len;
3924 *off = zc.zc_history_offset;
3925
3926 return (0);
3927}
3928
3929/*
3930 * Process the buffer of nvlists, unpacking and storing each nvlist record
3931 * into 'records'. 'leftover' is set to the number of bytes that weren't
3932 * processed as there wasn't a complete record.
3933 */
428870ff 3934int
34dc7c2f
BB
3935zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3936 nvlist_t ***records, uint_t *numrecords)
3937{
3938 uint64_t reclen;
3939 nvlist_t *nv;
3940 int i;
687e612f 3941 void *tmp;
34dc7c2f
BB
3942
3943 while (bytes_read > sizeof (reclen)) {
3944
3945 /* get length of packed record (stored as little endian) */
3946 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3947 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3948
3949 if (bytes_read < sizeof (reclen) + reclen)
3950 break;
3951
3952 /* unpack record */
3953 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3954 return (ENOMEM);
3955 bytes_read -= sizeof (reclen) + reclen;
3956 buf += sizeof (reclen) + reclen;
3957
3958 /* add record to nvlist array */
3959 (*numrecords)++;
3960 if (ISP2(*numrecords + 1)) {
687e612f 3961 tmp = realloc(*records,
34dc7c2f 3962 *numrecords * 2 * sizeof (nvlist_t *));
687e612f
GM
3963 if (tmp == NULL) {
3964 nvlist_free(nv);
3965 (*numrecords)--;
3966 return (ENOMEM);
3967 }
3968 *records = tmp;
34dc7c2f
BB
3969 }
3970 (*records)[*numrecords - 1] = nv;
3971 }
3972
3973 *leftover = bytes_read;
3974 return (0);
3975}
3976
34dc7c2f
BB
3977/*
3978 * Retrieve the command history of a pool.
3979 */
3980int
3981zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3982{
1f6f97f3
MA
3983 char *buf;
3984 int buflen = 128 * 1024;
34dc7c2f
BB
3985 uint64_t off = 0;
3986 nvlist_t **records = NULL;
3987 uint_t numrecords = 0;
3988 int err, i;
3989
1f6f97f3
MA
3990 buf = malloc(buflen);
3991 if (buf == NULL)
3992 return (ENOMEM);
34dc7c2f 3993 do {
1f6f97f3 3994 uint64_t bytes_read = buflen;
34dc7c2f
BB
3995 uint64_t leftover;
3996
3997 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3998 break;
3999
4000 /* if nothing else was read in, we're at EOF, just return */
4001 if (!bytes_read)
4002 break;
4003
4004 if ((err = zpool_history_unpack(buf, bytes_read,
4005 &leftover, &records, &numrecords)) != 0)
4006 break;
4007 off -= leftover;
1f6f97f3
MA
4008 if (leftover == bytes_read) {
4009 /*
4010 * no progress made, because buffer is not big enough
4011 * to hold this record; resize and retry.
4012 */
4013 buflen *= 2;
4014 free(buf);
4015 buf = malloc(buflen);
4016 if (buf == NULL)
4017 return (ENOMEM);
4018 }
34dc7c2f
BB
4019
4020 /* CONSTCOND */
4021 } while (1);
4022
1f6f97f3
MA
4023 free(buf);
4024
34dc7c2f
BB
4025 if (!err) {
4026 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4027 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4028 records, numrecords) == 0);
4029 }
4030 for (i = 0; i < numrecords; i++)
4031 nvlist_free(records[i]);
4032 free(records);
4033
4034 return (err);
4035}
4036
26685276 4037/*
9b101a73
BB
4038 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4039 * If there is a new event available 'nvp' will contain a newly allocated
4040 * nvlist and 'dropped' will be set to the number of missed events since
4041 * the last call to this function. When 'nvp' is set to NULL it indicates
4042 * no new events are available. In either case the function returns 0 and
4043 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4044 * function will return a non-zero value. When the function is called in
8c7aa0cf
CD
4045 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4046 * it will not return until a new event is available.
26685276
BB
4047 */
4048int
4049zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
8c7aa0cf 4050 int *dropped, unsigned flags, int zevent_fd)
26685276 4051{
13fe0198 4052 zfs_cmd_t zc = {"\0"};
26685276
BB
4053 int error = 0;
4054
4055 *nvp = NULL;
4056 *dropped = 0;
9b101a73 4057 zc.zc_cleanup_fd = zevent_fd;
26685276 4058
8c7aa0cf 4059 if (flags & ZEVENT_NONBLOCK)
26685276
BB
4060 zc.zc_guid = ZEVENT_NONBLOCK;
4061
4062 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4063 return (-1);
4064
4065retry:
4066 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4067 switch (errno) {
4068 case ESHUTDOWN:
4069 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4070 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4071 goto out;
4072 case ENOENT:
4073 /* Blocking error case should not occur */
8c7aa0cf 4074 if (!(flags & ZEVENT_NONBLOCK))
26685276
BB
4075 error = zpool_standard_error_fmt(hdl, errno,
4076 dgettext(TEXT_DOMAIN, "cannot get event"));
4077
4078 goto out;
4079 case ENOMEM:
4080 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4081 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4082 dgettext(TEXT_DOMAIN, "cannot get event"));
4083 goto out;
4084 } else {
4085 goto retry;
4086 }
4087 default:
4088 error = zpool_standard_error_fmt(hdl, errno,
4089 dgettext(TEXT_DOMAIN, "cannot get event"));
4090 goto out;
4091 }
4092 }
4093
4094 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4095 if (error != 0)
4096 goto out;
4097
4098 *dropped = (int)zc.zc_cookie;
4099out:
4100 zcmd_free_nvlists(&zc);
4101
4102 return (error);
4103}
4104
4105/*
4106 * Clear all events.
4107 */
4108int
4109zpool_events_clear(libzfs_handle_t *hdl, int *count)
4110{
13fe0198 4111 zfs_cmd_t zc = {"\0"};
26685276
BB
4112 char msg[1024];
4113
4114 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4115 "cannot clear events"));
4116
4117 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4118 return (zpool_standard_error_fmt(hdl, errno, msg));
4119
4120 if (count != NULL)
4121 *count = (int)zc.zc_cookie; /* # of events cleared */
4122
4123 return (0);
4124}
4125
75e3ff58
BB
4126/*
4127 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4128 * the passed zevent_fd file handle. On success zero is returned,
4129 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4130 */
4131int
4132zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4133{
4134 zfs_cmd_t zc = {"\0"};
4135 int error = 0;
4136
4137 zc.zc_guid = eid;
4138 zc.zc_cleanup_fd = zevent_fd;
4139
4140 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4141 switch (errno) {
4142 case ENOENT:
4143 error = zfs_error_fmt(hdl, EZFS_NOENT,
4144 dgettext(TEXT_DOMAIN, "cannot get event"));
4145 break;
4146
4147 case ENOMEM:
4148 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4149 dgettext(TEXT_DOMAIN, "cannot get event"));
4150 break;
4151
4152 default:
4153 error = zpool_standard_error_fmt(hdl, errno,
4154 dgettext(TEXT_DOMAIN, "cannot get event"));
4155 break;
4156 }
4157 }
4158
4159 return (error);
4160}
4161
34dc7c2f
BB
4162void
4163zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4164 char *pathname, size_t len)
4165{
13fe0198 4166 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
4167 boolean_t mounted = B_FALSE;
4168 char *mntpnt = NULL;
eca7b760 4169 char dsname[ZFS_MAX_DATASET_NAME_LEN];
34dc7c2f
BB
4170
4171 if (dsobj == 0) {
4172 /* special case for the MOS */
d1d7e268
MK
4173 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4174 (longlong_t)obj);
34dc7c2f
BB
4175 return;
4176 }
4177
4178 /* get the dataset's name */
4179 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4180 zc.zc_obj = dsobj;
4181 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4182 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4183 /* just write out a path of two object numbers */
4184 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 4185 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
4186 return;
4187 }
4188 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4189
4190 /* find out if the dataset is mounted */
4191 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4192
4193 /* get the corrupted object's path */
4194 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4195 zc.zc_obj = obj;
4196 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4197 &zc) == 0) {
4198 if (mounted) {
4199 (void) snprintf(pathname, len, "%s%s", mntpnt,
4200 zc.zc_value);
4201 } else {
4202 (void) snprintf(pathname, len, "%s:%s",
4203 dsname, zc.zc_value);
4204 }
4205 } else {
d1d7e268
MK
4206 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4207 (longlong_t)obj);
34dc7c2f
BB
4208 }
4209 free(mntpnt);
4210}
4211
b128c09f
BB
4212/*
4213 * Read the EFI label from the config, if a label does not exist then
4214 * pass back the error to the caller. If the caller has passed a non-NULL
4215 * diskaddr argument then we set it to the starting address of the EFI
4216 * partition.
4217 */
4218static int
4219read_efi_label(nvlist_t *config, diskaddr_t *sb)
4220{
4221 char *path;
4222 int fd;
4223 char diskname[MAXPATHLEN];
4224 int err = -1;
4225
4226 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4227 return (err);
4228
eac47204 4229 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 4230 strrchr(path, '/'));
dbb38f66 4231 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
b128c09f
BB
4232 struct dk_gpt *vtoc;
4233
4234 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4235 if (sb != NULL)
4236 *sb = vtoc->efi_parts[0].p_start;
4237 efi_free(vtoc);
4238 }
4239 (void) close(fd);
4240 }
4241 return (err);
4242}
4243
34dc7c2f
BB
4244/*
4245 * determine where a partition starts on a disk in the current
4246 * configuration
4247 */
4248static diskaddr_t
4249find_start_block(nvlist_t *config)
4250{
4251 nvlist_t **child;
4252 uint_t c, children;
34dc7c2f 4253 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
4254 uint64_t wholedisk;
4255
4256 if (nvlist_lookup_nvlist_array(config,
4257 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4258 if (nvlist_lookup_uint64(config,
4259 ZPOOL_CONFIG_WHOLE_DISK,
4260 &wholedisk) != 0 || !wholedisk) {
4261 return (MAXOFFSET_T);
4262 }
b128c09f
BB
4263 if (read_efi_label(config, &sb) < 0)
4264 sb = MAXOFFSET_T;
34dc7c2f
BB
4265 return (sb);
4266 }
4267
4268 for (c = 0; c < children; c++) {
4269 sb = find_start_block(child[c]);
4270 if (sb != MAXOFFSET_T) {
4271 return (sb);
4272 }
4273 }
4274 return (MAXOFFSET_T);
4275}
4276
2d82ea8b 4277static int
d603ed6c
BB
4278zpool_label_disk_check(char *path)
4279{
4280 struct dk_gpt *vtoc;
4281 int fd, err;
4282
dbb38f66 4283 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
d1d7e268 4284 return (errno);
d603ed6c
BB
4285
4286 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4287 (void) close(fd);
d1d7e268 4288 return (err);
d603ed6c
BB
4289 }
4290
4291 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4292 efi_free(vtoc);
4293 (void) close(fd);
d1d7e268 4294 return (EIDRM);
d603ed6c
BB
4295 }
4296
4297 efi_free(vtoc);
4298 (void) close(fd);
d1d7e268 4299 return (0);
d603ed6c
BB
4300}
4301
5b4136bd
BB
4302/*
4303 * Generate a unique partition name for the ZFS member. Partitions must
4304 * have unique names to ensure udev will be able to create symlinks under
4305 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4306 * of the form <pool>-<unique-id>.
4307 */
4308static void
4309zpool_label_name(char *label_name, int label_size)
4310{
4311 uint64_t id = 0;
4312 int fd;
4313
4314 fd = open("/dev/urandom", O_RDONLY);
06cf4d98 4315 if (fd >= 0) {
5b4136bd
BB
4316 if (read(fd, &id, sizeof (id)) != sizeof (id))
4317 id = 0;
4318
4319 close(fd);
4320 }
4321
4322 if (id == 0)
4323 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4324
02730c33 4325 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
5b4136bd
BB
4326}
4327
34dc7c2f
BB
4328/*
4329 * Label an individual disk. The name provided is the short name,
4330 * stripped of any leading /dev path.
4331 */
4332int
4333zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4334{
4335 char path[MAXPATHLEN];
4336 struct dk_gpt *vtoc;
d603ed6c 4337 int rval, fd;
34dc7c2f
BB
4338 size_t resv = EFI_MIN_RESV_SIZE;
4339 uint64_t slice_size;
4340 diskaddr_t start_block;
4341 char errbuf[1024];
4342
4343 /* prepare an error message just in case */
4344 (void) snprintf(errbuf, sizeof (errbuf),
4345 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4346
4347 if (zhp) {
4348 nvlist_t *nvroot;
4349
4350 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4351 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4352
4353 if (zhp->zpool_start_block == 0)
4354 start_block = find_start_block(nvroot);
4355 else
4356 start_block = zhp->zpool_start_block;
4357 zhp->zpool_start_block = start_block;
4358 } else {
4359 /* new pool */
4360 start_block = NEW_START_BLOCK;
4361 }
4362
eac47204 4363 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 4364
d02ca379 4365 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
34dc7c2f
BB
4366 /*
4367 * This shouldn't happen. We've long since verified that this
4368 * is a valid device.
4369 */
109491a8
RL
4370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4371 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
4372 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4373 }
4374
4375 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4376 /*
4377 * The only way this can fail is if we run out of memory, or we
4378 * were unable to read the disk's capacity
4379 */
4380 if (errno == ENOMEM)
4381 (void) no_memory(hdl);
4382
4383 (void) close(fd);
109491a8
RL
4384 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4385 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
4386
4387 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4388 }
4389
4390 slice_size = vtoc->efi_last_u_lba + 1;
4391 slice_size -= EFI_MIN_RESV_SIZE;
4392 if (start_block == MAXOFFSET_T)
4393 start_block = NEW_START_BLOCK;
4394 slice_size -= start_block;
613d88ed 4395 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
4396
4397 vtoc->efi_parts[0].p_start = start_block;
4398 vtoc->efi_parts[0].p_size = slice_size;
4399
4400 /*
4401 * Why we use V_USR: V_BACKUP confuses users, and is considered
4402 * disposable by some EFI utilities (since EFI doesn't have a backup
4403 * slice). V_UNASSIGNED is supposed to be used only for zero size
4404 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4405 * etc. were all pretty specific. V_USR is as close to reality as we
4406 * can get, in the absence of V_OTHER.
4407 */
4408 vtoc->efi_parts[0].p_tag = V_USR;
5b4136bd 4409 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
34dc7c2f
BB
4410
4411 vtoc->efi_parts[8].p_start = slice_size + start_block;
4412 vtoc->efi_parts[8].p_size = resv;
4413 vtoc->efi_parts[8].p_tag = V_RESERVED;
4414
dbb38f66
YP
4415 rval = efi_write(fd, vtoc);
4416
4417 /* Flush the buffers to disk and invalidate the page cache. */
4418 (void) fsync(fd);
4419 (void) ioctl(fd, BLKFLSBUF);
4420
4421 if (rval == 0)
4422 rval = efi_rescan(fd);
4423
4424 /*
4425 * Some block drivers (like pcata) may not support EFI GPT labels.
4426 * Print out a helpful error message directing the user to manually
4427 * label the disk and give a specific slice.
4428 */
4429 if (rval != 0) {
34dc7c2f
BB
4430 (void) close(fd);
4431 efi_free(vtoc);
4432
d603ed6c
BB
4433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4434 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
4435 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4436 }
4437
4438 (void) close(fd);
4439 efi_free(vtoc);
34dc7c2f 4440
eac47204
BB
4441 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4442 (void) zfs_append_partition(path, MAXPATHLEN);
4443
2d82ea8b
BB
4444 /* Wait to udev to signal use the device has settled. */
4445 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
d603ed6c
BB
4446 if (rval) {
4447 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4448 "detect device partitions on '%s': %d"), path, rval);
4449 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
4450 }
4451
d603ed6c
BB
4452 /* We can't be to paranoid. Read the label back and verify it. */
4453 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4454 rval = zpool_label_disk_check(path);
4455 if (rval) {
4456 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4457 "EFI label on '%s' is damaged. Ensure\nthis device "
4458 "is not in in use, and is functioning properly: %d"),
4459 path, rval);
4460 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 4461 }
34dc7c2f 4462
d1d7e268 4463 return (0);
34dc7c2f 4464}
6078881a 4465
6078881a
TH
4466/*
4467 * Allocate and return the underlying device name for a device mapper device.
4468 * If a device mapper device maps to multiple devices, return the first device.
4469 *
8720e9e7
TH
4470 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4471 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
6078881a
TH
4472 *
4473 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4474 * device then return NULL.
4475 *
4476 * NOTE: The returned name string must be *freed*.
4477 */
8720e9e7
TH
4478char *
4479dm_get_underlying_path(char *dm_name)
6078881a 4480{
8720e9e7
TH
4481 DIR *dp = NULL;
4482 struct dirent *ep;
4483 char *realp;
4484 char *tmp = NULL;
4485 char *path = NULL;
4486 char *dev_str;
4487 int size;
6078881a 4488
8720e9e7
TH
4489 if (dm_name == NULL)
4490 return (NULL);
4491
4492 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4493 realp = realpath(dm_name, NULL);
4494 if (realp == NULL)
4495 return (NULL);
6078881a
TH
4496
4497 /*
8720e9e7
TH
4498 * If they preface 'dev' with a path (like "/dev") then strip it off.
4499 * We just want the 'dm-N' part.
6078881a 4500 */
8720e9e7
TH
4501 tmp = strrchr(realp, '/');
4502 if (tmp != NULL)
4503 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4504 else
4505 dev_str = tmp;
6078881a 4506
8720e9e7
TH
4507 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4508 if (size == -1 || !tmp)
6078881a
TH
4509 goto end;
4510
8720e9e7
TH
4511 dp = opendir(tmp);
4512 if (dp == NULL)
6078881a
TH
4513 goto end;
4514
8720e9e7
TH
4515 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4516 while ((ep = readdir(dp))) {
4517 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4518 size = asprintf(&path, "/dev/%s", ep->d_name);
4519 break;
4520 }
4521 }
6078881a
TH
4522
4523end:
8720e9e7
TH
4524 if (dp != NULL)
4525 closedir(dp);
4526 free(tmp);
4527 free(realp);
4528 return (path);
6078881a
TH
4529}
4530
4531/*
4532 * Return 1 if device is a device mapper or multipath device.
4533 * Return 0 if not.
4534 */
4535int
1bbd8770 4536zfs_dev_is_dm(char *dev_name)
6078881a
TH
4537{
4538
4539 char *tmp;
1bbd8770 4540 tmp = dm_get_underlying_path(dev_name);
8720e9e7 4541 if (tmp == NULL)
6078881a
TH
4542 return (0);
4543
4544 free(tmp);
4545 return (1);
4546}
4547
dbb38f66
YP
4548/*
4549 * By "whole disk" we mean an entire physical disk (something we can
4550 * label, toggle the write cache on, etc.) as opposed to the full
4551 * capacity of a pseudo-device such as lofi or did. We act as if we
4552 * are labeling the disk, which should be a pretty good test of whether
4553 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4554 * it isn't.
4555 */
4556int
4557zfs_dev_is_whole_disk(char *dev_name)
4558{
4559 struct dk_gpt *label;
4560 int fd;
4561
4562 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4563 return (0);
4564
4565 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4566 (void) close(fd);
4567 return (0);
4568 }
4569
4570 efi_free(label);
4571 (void) close(fd);
4572
4573 return (1);
4574}
4575
6078881a
TH
4576/*
4577 * Lookup the underlying device for a device name
4578 *
4579 * Often you'll have a symlink to a device, a partition device,
4580 * or a multipath device, and want to look up the underlying device.
4581 * This function returns the underlying device name. If the device
4582 * name is already the underlying device, then just return the same
4583 * name. If the device is a DM device with multiple underlying devices
4584 * then return the first one.
4585 *
4586 * For example:
4587 *
4588 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4589 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4590 * returns: /dev/sda
4591 *
4592 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4593 * dev_name: /dev/mapper/mpatha
4594 * returns: /dev/sda (first device)
4595 *
4596 * 3. /dev/sda (already the underlying device)
4597 * dev_name: /dev/sda
4598 * returns: /dev/sda
4599 *
4600 * 4. /dev/dm-3 (mapped to /dev/sda)
4601 * dev_name: /dev/dm-3
4602 * returns: /dev/sda
4603 *
4604 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4605 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4606 * returns: /dev/sdb
4607 *
4608 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4609 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4610 * returns: /dev/sda
4611 *
4612 * Returns underlying device name, or NULL on error or no match.
4613 *
4614 * NOTE: The returned name string must be *freed*.
4615 */
4616char *
1bbd8770 4617zfs_get_underlying_path(char *dev_name)
6078881a
TH
4618{
4619 char *name = NULL;
4620 char *tmp;
4621
8720e9e7 4622 if (dev_name == NULL)
6078881a
TH
4623 return (NULL);
4624
4625 tmp = dm_get_underlying_path(dev_name);
4626
4627 /* dev_name not a DM device, so just un-symlinkize it */
8720e9e7 4628 if (tmp == NULL)
6078881a
TH
4629 tmp = realpath(dev_name, NULL);
4630
8720e9e7
TH
4631 if (tmp != NULL) {
4632 name = zfs_strip_partition_path(tmp);
6078881a
TH
4633 free(tmp);
4634 }
4635
4636 return (name);
4637}
1bbd8770
TH
4638
4639/*
4640 * Given a dev name like "sda", return the full enclosure sysfs path to
4641 * the disk. You can also pass in the name with "/dev" prepended
4642 * to it (like /dev/sda).
4643 *
4644 * For example, disk "sda" in enclosure slot 1:
4645 * dev: "sda"
4646 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4647 *
4648 * 'dev' must be a non-devicemapper device.
4649 *
4650 * Returned string must be freed.
4651 */
4652char *
4653zfs_get_enclosure_sysfs_path(char *dev_name)
4654{
4655 DIR *dp = NULL;
4656 struct dirent *ep;
4657 char buf[MAXPATHLEN];
4658 char *tmp1 = NULL;
4659 char *tmp2 = NULL;
4660 char *tmp3 = NULL;
4661 char *path = NULL;
4662 size_t size;
4663 int tmpsize;
4664
8720e9e7 4665 if (dev_name == NULL)
1bbd8770
TH
4666 return (NULL);
4667
4668 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4669 tmp1 = strrchr(dev_name, '/');
8720e9e7 4670 if (tmp1 != NULL)
1bbd8770
TH
4671 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4672
4673 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4674 if (tmpsize == -1 || tmp1 == NULL) {
4675 tmp1 = NULL;
4676 goto end;
4677 }
4678
4679 dp = opendir(tmp1);
4680 if (dp == NULL) {
4681 tmp1 = NULL; /* To make free() at the end a NOP */
4682 goto end;
4683 }
4684
4685 /*
4686 * Look though all sysfs entries in /sys/block/<dev>/device for
4687 * the enclosure symlink.
4688 */
4689 while ((ep = readdir(dp))) {
4690 /* Ignore everything that's not our enclosure_device link */
8720e9e7 4691 if (strstr(ep->d_name, "enclosure_device") == NULL)
1bbd8770
TH
4692 continue;
4693
4694 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4695 tmp2 == NULL)
4696 break;
4697
4698 size = readlink(tmp2, buf, sizeof (buf));
4699
4700 /* Did readlink fail or crop the link name? */
4701 if (size == -1 || size >= sizeof (buf)) {
4702 free(tmp2);
4703 tmp2 = NULL; /* To make free() at the end a NOP */
4704 break;
4705 }
4706
4707 /*
4708 * We got a valid link. readlink() doesn't terminate strings
4709 * so we have to do it.
4710 */
4711 buf[size] = '\0';
4712
4713 /*
4714 * Our link will look like:
4715 *
4716 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4717 *
4718 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4719 */
4720 tmp3 = strstr(buf, "enclosure");
4721 if (tmp3 == NULL)
4722 break;
4723
4724 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4725 /* If asprintf() fails, 'path' is undefined */
4726 path = NULL;
4727 break;
4728 }
4729
4730 if (path == NULL)
4731 break;
4732 }
4733
4734end:
4735 free(tmp2);
4736 free(tmp1);
4737
8720e9e7 4738 if (dp != NULL)
1bbd8770
TH
4739 closedir(dp);
4740
4741 return (path);
4742}