]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_pool.c
Add feature check for 'zpool resilver' command
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
0fdd8d64 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
428870ff 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
74d42600 25 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
23d70cde 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
2ffd89fc 27 * Copyright (c) 2018 Datto Inc.
d3f2cd7e 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
cc99f275 29 * Copyright (c) 2017, Intel Corporation.
f0f97865 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
34dc7c2f
BB
31 */
32
34dc7c2f
BB
33#include <errno.h>
34#include <devid.h>
34dc7c2f
BB
35#include <libintl.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <strings.h>
39#include <unistd.h>
6f1ffb06 40#include <libgen.h>
d603ed6c
BB
41#include <zone.h>
42#include <sys/stat.h>
34dc7c2f 43#include <sys/efi_partition.h>
46364cb2 44#include <sys/systeminfo.h>
34dc7c2f
BB
45#include <sys/vtoc.h>
46#include <sys/zfs_ioctl.h>
74d42600 47#include <sys/vdev_disk.h>
9babb374 48#include <dlfcn.h>
e89f1295 49#include <libzutil.h>
34dc7c2f
BB
50
51#include "zfs_namecheck.h"
52#include "zfs_prop.h"
53#include "libzfs_impl.h"
428870ff 54#include "zfs_comutil.h"
9ae529ec 55#include "zfeature_common.h"
34dc7c2f 56
b128c09f 57static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
1574c73b 58static boolean_t zpool_vdev_is_interior(const char *name);
b128c09f 59
572e2857
BB
60typedef struct prop_flags {
61 int create:1; /* Validate property on creation */
62 int import:1; /* Validate property on import */
63} prop_flags_t;
64
34dc7c2f
BB
65/*
66 * ====================================================================
67 * zpool property functions
68 * ====================================================================
69 */
70
71static int
72zpool_get_all_props(zpool_handle_t *zhp)
73{
13fe0198 74 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
75 libzfs_handle_t *hdl = zhp->zpool_hdl;
76
77 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
78
79 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
80 return (-1);
81
82 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
83 if (errno == ENOMEM) {
84 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
85 zcmd_free_nvlists(&zc);
86 return (-1);
87 }
88 } else {
89 zcmd_free_nvlists(&zc);
90 return (-1);
91 }
92 }
93
94 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
95 zcmd_free_nvlists(&zc);
96 return (-1);
97 }
98
99 zcmd_free_nvlists(&zc);
100
101 return (0);
102}
103
104static int
105zpool_props_refresh(zpool_handle_t *zhp)
106{
107 nvlist_t *old_props;
108
109 old_props = zhp->zpool_props;
110
111 if (zpool_get_all_props(zhp) != 0)
112 return (-1);
113
114 nvlist_free(old_props);
115 return (0);
116}
117
6b8655ad 118static const char *
34dc7c2f
BB
119zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
120 zprop_source_t *src)
121{
122 nvlist_t *nv, *nvl;
123 uint64_t ival;
124 char *value;
125 zprop_source_t source;
126
127 nvl = zhp->zpool_props;
128 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
129 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
130 source = ival;
131 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
132 } else {
133 source = ZPROP_SRC_DEFAULT;
134 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
135 value = "-";
136 }
137
138 if (src)
139 *src = source;
140
141 return (value);
142}
143
144uint64_t
145zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
146{
147 nvlist_t *nv, *nvl;
148 uint64_t value;
149 zprop_source_t source;
150
b128c09f
BB
151 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
152 /*
153 * zpool_get_all_props() has most likely failed because
154 * the pool is faulted, but if all we need is the top level
155 * vdev's guid then get it from the zhp config nvlist.
156 */
157 if ((prop == ZPOOL_PROP_GUID) &&
158 (nvlist_lookup_nvlist(zhp->zpool_config,
159 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
160 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
161 == 0)) {
162 return (value);
163 }
34dc7c2f 164 return (zpool_prop_default_numeric(prop));
b128c09f 165 }
34dc7c2f
BB
166
167 nvl = zhp->zpool_props;
168 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
169 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
170 source = value;
171 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
172 } else {
173 source = ZPROP_SRC_DEFAULT;
174 value = zpool_prop_default_numeric(prop);
175 }
176
177 if (src)
178 *src = source;
179
180 return (value);
181}
182
183/*
184 * Map VDEV STATE to printed strings.
185 */
6b8655ad 186const char *
34dc7c2f
BB
187zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
188{
189 switch (state) {
190 case VDEV_STATE_CLOSED:
191 case VDEV_STATE_OFFLINE:
192 return (gettext("OFFLINE"));
193 case VDEV_STATE_REMOVED:
194 return (gettext("REMOVED"));
195 case VDEV_STATE_CANT_OPEN:
b128c09f 196 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 197 return (gettext("FAULTED"));
428870ff
BB
198 else if (aux == VDEV_AUX_SPLIT_POOL)
199 return (gettext("SPLIT"));
34dc7c2f
BB
200 else
201 return (gettext("UNAVAIL"));
202 case VDEV_STATE_FAULTED:
203 return (gettext("FAULTED"));
204 case VDEV_STATE_DEGRADED:
205 return (gettext("DEGRADED"));
206 case VDEV_STATE_HEALTHY:
207 return (gettext("ONLINE"));
23d70cde
GM
208
209 default:
210 break;
34dc7c2f
BB
211 }
212
213 return (gettext("UNKNOWN"));
214}
215
131cc95c
DK
216/*
217 * Map POOL STATE to printed strings.
218 */
219const char *
220zpool_pool_state_to_name(pool_state_t state)
221{
222 switch (state) {
223 default:
224 break;
225 case POOL_STATE_ACTIVE:
226 return (gettext("ACTIVE"));
227 case POOL_STATE_EXPORTED:
228 return (gettext("EXPORTED"));
229 case POOL_STATE_DESTROYED:
230 return (gettext("DESTROYED"));
231 case POOL_STATE_SPARE:
232 return (gettext("SPARE"));
233 case POOL_STATE_L2CACHE:
234 return (gettext("L2CACHE"));
235 case POOL_STATE_UNINITIALIZED:
236 return (gettext("UNINITIALIZED"));
237 case POOL_STATE_UNAVAIL:
238 return (gettext("UNAVAIL"));
239 case POOL_STATE_POTENTIALLY_ACTIVE:
240 return (gettext("POTENTIALLY_ACTIVE"));
241 }
242
243 return (gettext("UNKNOWN"));
244}
245
f0ed6c74
TH
246/*
247 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
248 * "SUSPENDED", etc).
249 */
250const char *
251zpool_get_state_str(zpool_handle_t *zhp)
252{
253 zpool_errata_t errata;
254 zpool_status_t status;
255 nvlist_t *nvroot;
256 vdev_stat_t *vs;
257 uint_t vsc;
258 const char *str;
259
260 status = zpool_get_status(zhp, NULL, &errata);
261
262 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
263 str = gettext("FAULTED");
264 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
265 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
266 str = gettext("SUSPENDED");
267 } else {
268 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
269 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
270 verify(nvlist_lookup_uint64_array(nvroot,
271 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
272 == 0);
273 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
274 }
275 return (str);
276}
277
8b921f66
RE
278/*
279 * Get a zpool property value for 'prop' and return the value in
280 * a pre-allocated buffer.
281 */
282int
2a8b84b7 283zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
d1d7e268 284 size_t len, zprop_source_t *srctype, boolean_t literal)
34dc7c2f
BB
285{
286 uint64_t intval;
287 const char *strval;
288 zprop_source_t src = ZPROP_SRC_NONE;
34dc7c2f
BB
289
290 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
291 switch (prop) {
292 case ZPOOL_PROP_NAME:
34dc7c2f 293 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
294 break;
295
296 case ZPOOL_PROP_HEALTH:
f0ed6c74 297 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
d164b209
BB
298 break;
299
300 case ZPOOL_PROP_GUID:
301 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 302 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
303 break;
304
305 case ZPOOL_PROP_ALTROOT:
306 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 307 case ZPOOL_PROP_COMMENT:
d164b209
BB
308 if (zhp->zpool_props != NULL ||
309 zpool_get_all_props(zhp) == 0) {
310 (void) strlcpy(buf,
311 zpool_get_prop_string(zhp, prop, &src),
312 len);
2a8b84b7 313 break;
d164b209
BB
314 }
315 /* FALLTHROUGH */
316 default:
34dc7c2f 317 (void) strlcpy(buf, "-", len);
d164b209
BB
318 break;
319 }
320
321 if (srctype != NULL)
322 *srctype = src;
34dc7c2f
BB
323 return (0);
324 }
325
326 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
327 prop != ZPOOL_PROP_NAME)
328 return (-1);
329
330 switch (zpool_prop_get_type(prop)) {
331 case PROP_TYPE_STRING:
332 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
333 len);
334 break;
335
336 case PROP_TYPE_NUMBER:
337 intval = zpool_get_prop_int(zhp, prop, &src);
338
339 switch (prop) {
340 case ZPOOL_PROP_SIZE:
428870ff
BB
341 case ZPOOL_PROP_ALLOCATED:
342 case ZPOOL_PROP_FREE:
9ae529ec 343 case ZPOOL_PROP_FREEING:
fbeddd60 344 case ZPOOL_PROP_LEAKED:
df30f566 345 case ZPOOL_PROP_ASHIFT:
8b921f66
RE
346 if (literal)
347 (void) snprintf(buf, len, "%llu",
02730c33 348 (u_longlong_t)intval);
8b921f66
RE
349 else
350 (void) zfs_nicenum(intval, buf, len);
34dc7c2f
BB
351 break;
352
a05dfd00 353 case ZPOOL_PROP_EXPANDSZ:
d2734cce 354 case ZPOOL_PROP_CHECKPOINT:
a05dfd00
GW
355 if (intval == 0) {
356 (void) strlcpy(buf, "-", len);
357 } else if (literal) {
358 (void) snprintf(buf, len, "%llu",
359 (u_longlong_t)intval);
360 } else {
e7fbeb60 361 (void) zfs_nicebytes(intval, buf, len);
a05dfd00
GW
362 }
363 break;
364
34dc7c2f 365 case ZPOOL_PROP_CAPACITY:
2a8b84b7
AS
366 if (literal) {
367 (void) snprintf(buf, len, "%llu",
368 (u_longlong_t)intval);
369 } else {
370 (void) snprintf(buf, len, "%llu%%",
371 (u_longlong_t)intval);
372 }
34dc7c2f
BB
373 break;
374
1ca56e60 375 case ZPOOL_PROP_FRAGMENTATION:
376 if (intval == UINT64_MAX) {
377 (void) strlcpy(buf, "-", len);
bc2d8093
CE
378 } else if (literal) {
379 (void) snprintf(buf, len, "%llu",
380 (u_longlong_t)intval);
1ca56e60 381 } else {
382 (void) snprintf(buf, len, "%llu%%",
383 (u_longlong_t)intval);
384 }
385 break;
386
428870ff 387 case ZPOOL_PROP_DEDUPRATIO:
bc2d8093
CE
388 if (literal)
389 (void) snprintf(buf, len, "%llu.%02llu",
390 (u_longlong_t)(intval / 100),
391 (u_longlong_t)(intval % 100));
392 else
393 (void) snprintf(buf, len, "%llu.%02llux",
394 (u_longlong_t)(intval / 100),
395 (u_longlong_t)(intval % 100));
428870ff
BB
396 break;
397
34dc7c2f 398 case ZPOOL_PROP_HEALTH:
f0ed6c74 399 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
34dc7c2f 400 break;
9ae529ec
CS
401 case ZPOOL_PROP_VERSION:
402 if (intval >= SPA_VERSION_FEATURES) {
403 (void) snprintf(buf, len, "-");
404 break;
405 }
406 /* FALLTHROUGH */
34dc7c2f 407 default:
b8864a23 408 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
409 }
410 break;
411
412 case PROP_TYPE_INDEX:
413 intval = zpool_get_prop_int(zhp, prop, &src);
414 if (zpool_prop_index_to_string(prop, intval, &strval)
415 != 0)
416 return (-1);
417 (void) strlcpy(buf, strval, len);
418 break;
419
420 default:
421 abort();
422 }
423
424 if (srctype)
425 *srctype = src;
426
427 return (0);
428}
429
430/*
431 * Check if the bootfs name has the same pool name as it is set to.
432 * Assuming bootfs is a valid dataset name.
433 */
434static boolean_t
435bootfs_name_valid(const char *pool, char *bootfs)
436{
437 int len = strlen(pool);
387b6856
PD
438 if (bootfs[0] == '\0')
439 return (B_TRUE);
34dc7c2f 440
b128c09f 441 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
442 return (B_FALSE);
443
444 if (strncmp(pool, bootfs, len) == 0 &&
445 (bootfs[len] == '/' || bootfs[len] == '\0'))
446 return (B_TRUE);
447
448 return (B_FALSE);
449}
450
1bd201e7
CS
451boolean_t
452zpool_is_bootable(zpool_handle_t *zhp)
b128c09f 453{
eca7b760 454 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
b128c09f
BB
455
456 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
2a8b84b7 457 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
b128c09f
BB
458 sizeof (bootfs)) != 0);
459}
460
461
34dc7c2f
BB
462/*
463 * Given an nvlist of zpool properties to be set, validate that they are
464 * correct, and parse any numeric properties (index, boolean, etc) if they are
465 * specified as strings.
466 */
467static nvlist_t *
b128c09f 468zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 469 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
470{
471 nvpair_t *elem;
472 nvlist_t *retprops;
473 zpool_prop_t prop;
474 char *strval;
475 uint64_t intval;
d96eb2b1 476 char *slash, *check;
34dc7c2f 477 struct stat64 statbuf;
b128c09f 478 zpool_handle_t *zhp;
34dc7c2f
BB
479
480 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
481 (void) no_memory(hdl);
482 return (NULL);
483 }
484
485 elem = NULL;
486 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
487 const char *propname = nvpair_name(elem);
488
9ae529ec 489 prop = zpool_name_to_prop(propname);
31864e3d 490 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
9ae529ec 491 int err;
9ae529ec
CS
492 char *fname = strchr(propname, '@') + 1;
493
fa86b5db 494 err = zfeature_lookup_name(fname, NULL);
9ae529ec
CS
495 if (err != 0) {
496 ASSERT3U(err, ==, ENOENT);
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 "invalid feature '%s'"), fname);
499 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
500 goto error;
501 }
502
503 if (nvpair_type(elem) != DATA_TYPE_STRING) {
504 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
505 "'%s' must be a string"), propname);
506 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
507 goto error;
508 }
509
510 (void) nvpair_value_string(elem, &strval);
e4010f27 511 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
512 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
9ae529ec
CS
513 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
514 "property '%s' can only be set to "
e4010f27 515 "'enabled' or 'disabled'"), propname);
9966754a 516 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
517 goto error;
518 }
519
520 if (!flags.create &&
521 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
522 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 "property '%s' can only be set to "
524 "'disabled' at creation time"), propname);
9ae529ec
CS
525 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
526 goto error;
527 }
528
529 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
530 (void) no_memory(hdl);
531 goto error;
532 }
533 continue;
534 }
535
34dc7c2f
BB
536 /*
537 * Make sure this property is valid and applies to this type.
538 */
31864e3d 539 if (prop == ZPOOL_PROP_INVAL) {
34dc7c2f
BB
540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
541 "invalid property '%s'"), propname);
542 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
543 goto error;
544 }
545
546 if (zpool_prop_readonly(prop)) {
547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
548 "is readonly"), propname);
549 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
550 goto error;
551 }
552
edf60b86
CC
553 if (!flags.create && zpool_prop_setonce(prop)) {
554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
555 "property '%s' can only be set at "
556 "creation time"), propname);
557 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
558 goto error;
559 }
560
34dc7c2f
BB
561 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
562 &strval, &intval, errbuf) != 0)
563 goto error;
564
565 /*
566 * Perform additional checking for specific properties.
567 */
568 switch (prop) {
569 case ZPOOL_PROP_VERSION:
9ae529ec
CS
570 if (intval < version ||
571 !SPA_VERSION_IS_SUPPORTED(intval)) {
34dc7c2f
BB
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "property '%s' number %d is invalid."),
574 propname, intval);
575 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
576 goto error;
577 }
578 break;
579
df30f566 580 case ZPOOL_PROP_ASHIFT:
ff61d1a4 581 if (intval != 0 &&
582 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
df30f566 583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
f0f97865 584 "property '%s' number %d is invalid, only "
585 "values between %" PRId32 " and "
586 "%" PRId32 " are allowed."),
ff61d1a4 587 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
df30f566
CK
588 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
589 goto error;
590 }
591 break;
592
34dc7c2f 593 case ZPOOL_PROP_BOOTFS:
572e2857 594 if (flags.create || flags.import) {
34dc7c2f
BB
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "property '%s' cannot be set at creation "
597 "or import time"), propname);
598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 goto error;
600 }
601
602 if (version < SPA_VERSION_BOOTFS) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "pool must be upgraded to support "
605 "'%s' property"), propname);
606 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
607 goto error;
608 }
609
610 /*
611 * bootfs property value has to be a dataset name and
612 * the dataset has to be in the same pool as it sets to.
613 */
387b6856 614 if (!bootfs_name_valid(poolname, strval)) {
34dc7c2f
BB
615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
616 "is an invalid name"), strval);
617 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
618 goto error;
619 }
b128c09f
BB
620
621 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
622 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
623 "could not open pool '%s'"), poolname);
624 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
625 goto error;
626 }
b128c09f 627 zpool_close(zhp);
34dc7c2f
BB
628 break;
629
630 case ZPOOL_PROP_ALTROOT:
572e2857 631 if (!flags.create && !flags.import) {
34dc7c2f
BB
632 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
633 "property '%s' can only be set during pool "
634 "creation or import"), propname);
635 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
636 goto error;
637 }
638
639 if (strval[0] != '/') {
640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
641 "bad alternate root '%s'"), strval);
642 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
643 goto error;
644 }
645 break;
646
647 case ZPOOL_PROP_CACHEFILE:
648 if (strval[0] == '\0')
649 break;
650
651 if (strcmp(strval, "none") == 0)
652 break;
653
654 if (strval[0] != '/') {
655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
656 "property '%s' must be empty, an "
657 "absolute path, or 'none'"), propname);
658 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
659 goto error;
660 }
661
662 slash = strrchr(strval, '/');
663
664 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
665 strcmp(slash, "/..") == 0) {
666 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
667 "'%s' is not a valid file"), strval);
668 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
669 goto error;
670 }
671
672 *slash = '\0';
673
674 if (strval[0] != '\0' &&
675 (stat64(strval, &statbuf) != 0 ||
676 !S_ISDIR(statbuf.st_mode))) {
677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
678 "'%s' is not a valid directory"),
679 strval);
680 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
681 goto error;
682 }
683
684 *slash = '/';
685 break;
572e2857 686
d96eb2b1
DM
687 case ZPOOL_PROP_COMMENT:
688 for (check = strval; *check != '\0'; check++) {
689 if (!isprint(*check)) {
690 zfs_error_aux(hdl,
691 dgettext(TEXT_DOMAIN,
692 "comment may only have printable "
693 "characters"));
694 (void) zfs_error(hdl, EZFS_BADPROP,
695 errbuf);
696 goto error;
697 }
698 }
699 if (strlen(strval) > ZPROP_MAX_COMMENT) {
700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701 "comment must not exceed %d characters"),
702 ZPROP_MAX_COMMENT);
703 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
704 goto error;
705 }
706 break;
572e2857
BB
707 case ZPOOL_PROP_READONLY:
708 if (!flags.import) {
709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
710 "property '%s' can only be set at "
711 "import time"), propname);
712 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
713 goto error;
714 }
715 break;
379ca9cf
OF
716 case ZPOOL_PROP_MULTIHOST:
717 if (get_system_hostid() == 0) {
718 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
719 "requires a non-zero system hostid"));
720 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
721 goto error;
722 }
723 break;
f0f97865 724 case ZPOOL_PROP_DEDUPDITTO:
725 if (intval < ZIO_DEDUPDITTO_MIN && intval != 0) {
726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
727 "property '%s' value %d is invalid; only "
728 "values of 0 or >= %" PRId32 " are allowed "
729 "for this property."),
730 propname, intval, ZIO_DEDUPDITTO_MIN);
731 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
732 goto error;
733 }
734 break;
cc99f275 735
23d70cde
GM
736 default:
737 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
738 "property '%s'(%d) not defined"), propname, prop);
83e9986f 739 break;
34dc7c2f
BB
740 }
741 }
742
743 return (retprops);
744error:
745 nvlist_free(retprops);
746 return (NULL);
747}
748
749/*
750 * Set zpool property : propname=propval.
751 */
752int
753zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
754{
13fe0198 755 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
756 int ret = -1;
757 char errbuf[1024];
758 nvlist_t *nvl = NULL;
759 nvlist_t *realprops;
760 uint64_t version;
572e2857 761 prop_flags_t flags = { 0 };
34dc7c2f
BB
762
763 (void) snprintf(errbuf, sizeof (errbuf),
764 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
765 zhp->zpool_name);
766
34dc7c2f
BB
767 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
768 return (no_memory(zhp->zpool_hdl));
769
770 if (nvlist_add_string(nvl, propname, propval) != 0) {
771 nvlist_free(nvl);
772 return (no_memory(zhp->zpool_hdl));
773 }
774
775 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 776 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 777 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
778 nvlist_free(nvl);
779 return (-1);
780 }
781
782 nvlist_free(nvl);
783 nvl = realprops;
784
785 /*
786 * Execute the corresponding ioctl() to set this property.
787 */
788 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
789
790 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
791 nvlist_free(nvl);
792 return (-1);
793 }
794
795 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
796
797 zcmd_free_nvlists(&zc);
798 nvlist_free(nvl);
799
800 if (ret)
801 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
802 else
803 (void) zpool_props_refresh(zhp);
804
805 return (ret);
806}
807
808int
809zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
810{
811 libzfs_handle_t *hdl = zhp->zpool_hdl;
812 zprop_list_t *entry;
813 char buf[ZFS_MAXPROPLEN];
9ae529ec
CS
814 nvlist_t *features = NULL;
815 nvpair_t *nvp;
816 zprop_list_t **last;
817 boolean_t firstexpand = (NULL == *plp);
818 int i;
34dc7c2f
BB
819
820 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
821 return (-1);
822
9ae529ec
CS
823 last = plp;
824 while (*last != NULL)
825 last = &(*last)->pl_next;
826
827 if ((*plp)->pl_all)
828 features = zpool_get_features(zhp);
829
830 if ((*plp)->pl_all && firstexpand) {
831 for (i = 0; i < SPA_FEATURES; i++) {
832 zprop_list_t *entry = zfs_alloc(hdl,
833 sizeof (zprop_list_t));
834 entry->pl_prop = ZPROP_INVAL;
835 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
836 spa_feature_table[i].fi_uname);
837 entry->pl_width = strlen(entry->pl_user_prop);
838 entry->pl_all = B_TRUE;
839
840 *last = entry;
841 last = &entry->pl_next;
842 }
843 }
844
845 /* add any unsupported features */
846 for (nvp = nvlist_next_nvpair(features, NULL);
847 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
848 char *propname;
849 boolean_t found;
850 zprop_list_t *entry;
851
852 if (zfeature_is_supported(nvpair_name(nvp)))
853 continue;
854
855 propname = zfs_asprintf(hdl, "unsupported@%s",
856 nvpair_name(nvp));
857
858 /*
859 * Before adding the property to the list make sure that no
860 * other pool already added the same property.
861 */
862 found = B_FALSE;
863 entry = *plp;
864 while (entry != NULL) {
865 if (entry->pl_user_prop != NULL &&
866 strcmp(propname, entry->pl_user_prop) == 0) {
867 found = B_TRUE;
868 break;
869 }
870 entry = entry->pl_next;
871 }
872 if (found) {
873 free(propname);
874 continue;
875 }
876
877 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
878 entry->pl_prop = ZPROP_INVAL;
879 entry->pl_user_prop = propname;
880 entry->pl_width = strlen(entry->pl_user_prop);
881 entry->pl_all = B_TRUE;
882
883 *last = entry;
884 last = &entry->pl_next;
885 }
886
34dc7c2f
BB
887 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
888
889 if (entry->pl_fixed)
890 continue;
891
892 if (entry->pl_prop != ZPROP_INVAL &&
893 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2a8b84b7 894 NULL, B_FALSE) == 0) {
34dc7c2f
BB
895 if (strlen(buf) > entry->pl_width)
896 entry->pl_width = strlen(buf);
897 }
898 }
899
900 return (0);
901}
902
9ae529ec
CS
903/*
904 * Get the state for the given feature on the given ZFS pool.
905 */
906int
907zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
908 size_t len)
909{
910 uint64_t refcount;
911 boolean_t found = B_FALSE;
912 nvlist_t *features = zpool_get_features(zhp);
913 boolean_t supported;
914 const char *feature = strchr(propname, '@') + 1;
915
916 supported = zpool_prop_feature(propname);
917 ASSERT(supported || zpool_prop_unsupported(propname));
918
919 /*
920 * Convert from feature name to feature guid. This conversion is
4e33ba4c 921 * unnecessary for unsupported@... properties because they already
9ae529ec
CS
922 * use guids.
923 */
924 if (supported) {
925 int ret;
fa86b5db 926 spa_feature_t fid;
9ae529ec 927
fa86b5db 928 ret = zfeature_lookup_name(feature, &fid);
9ae529ec
CS
929 if (ret != 0) {
930 (void) strlcpy(buf, "-", len);
931 return (ENOTSUP);
932 }
fa86b5db 933 feature = spa_feature_table[fid].fi_guid;
9ae529ec
CS
934 }
935
936 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
937 found = B_TRUE;
938
939 if (supported) {
940 if (!found) {
941 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
942 } else {
943 if (refcount == 0)
944 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
945 else
946 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
947 }
948 } else {
949 if (found) {
950 if (refcount == 0) {
951 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
952 } else {
953 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
954 }
955 } else {
956 (void) strlcpy(buf, "-", len);
957 return (ENOTSUP);
958 }
959 }
960
961 return (0);
962}
34dc7c2f
BB
963
964/*
965 * Validate the given pool name, optionally putting an extended error message in
966 * 'buf'.
967 */
968boolean_t
969zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
970{
971 namecheck_err_t why;
972 char what;
973 int ret;
974
975 ret = pool_namecheck(pool, &why, &what);
976
977 /*
978 * The rules for reserved pool names were extended at a later point.
979 * But we need to support users with existing pools that may now be
980 * invalid. So we only check for this expanded set of names during a
981 * create (or import), and only in userland.
982 */
983 if (ret == 0 && !isopen &&
984 (strncmp(pool, "mirror", 6) == 0 ||
985 strncmp(pool, "raidz", 5) == 0 ||
986 strncmp(pool, "spare", 5) == 0 ||
987 strcmp(pool, "log") == 0)) {
988 if (hdl != NULL)
989 zfs_error_aux(hdl,
990 dgettext(TEXT_DOMAIN, "name is reserved"));
991 return (B_FALSE);
992 }
993
994
995 if (ret != 0) {
996 if (hdl != NULL) {
997 switch (why) {
998 case NAME_ERR_TOOLONG:
999 zfs_error_aux(hdl,
1000 dgettext(TEXT_DOMAIN, "name is too long"));
1001 break;
1002
1003 case NAME_ERR_INVALCHAR:
1004 zfs_error_aux(hdl,
1005 dgettext(TEXT_DOMAIN, "invalid character "
1006 "'%c' in pool name"), what);
1007 break;
1008
1009 case NAME_ERR_NOLETTER:
1010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1011 "name must begin with a letter"));
1012 break;
1013
1014 case NAME_ERR_RESERVED:
1015 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1016 "name is reserved"));
1017 break;
1018
1019 case NAME_ERR_DISKLIKE:
1020 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1021 "pool name is reserved"));
1022 break;
1023
1024 case NAME_ERR_LEADING_SLASH:
1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1026 "leading slash in name"));
1027 break;
1028
1029 case NAME_ERR_EMPTY_COMPONENT:
1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1031 "empty component in name"));
1032 break;
1033
1034 case NAME_ERR_TRAILING_SLASH:
1035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1036 "trailing slash in name"));
1037 break;
1038
aeacdefe 1039 case NAME_ERR_MULTIPLE_DELIMITERS:
34dc7c2f 1040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
aeacdefe
GM
1041 "multiple '@' and/or '#' delimiters in "
1042 "name"));
34dc7c2f 1043 break;
97dde921 1044
e75c13c3
BB
1045 case NAME_ERR_NO_AT:
1046 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1047 "permission set is missing '@'"));
97dde921 1048 break;
23d70cde
GM
1049
1050 default:
1051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1052 "(%d) not defined"), why);
e75c13c3 1053 break;
34dc7c2f
BB
1054 }
1055 }
1056 return (B_FALSE);
1057 }
1058
1059 return (B_TRUE);
1060}
1061
1062/*
1063 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1064 * state.
1065 */
1066zpool_handle_t *
1067zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1068{
1069 zpool_handle_t *zhp;
1070 boolean_t missing;
1071
1072 /*
1073 * Make sure the pool name is valid.
1074 */
1075 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1076 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1077 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1078 pool);
1079 return (NULL);
1080 }
1081
1082 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1083 return (NULL);
1084
1085 zhp->zpool_hdl = hdl;
1086 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1087
1088 if (zpool_refresh_stats(zhp, &missing) != 0) {
1089 zpool_close(zhp);
1090 return (NULL);
1091 }
1092
1093 if (missing) {
1094 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1095 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1096 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1097 zpool_close(zhp);
1098 return (NULL);
1099 }
1100
1101 return (zhp);
1102}
1103
1104/*
1105 * Like the above, but silent on error. Used when iterating over pools (because
1106 * the configuration cache may be out of date).
1107 */
1108int
1109zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1110{
1111 zpool_handle_t *zhp;
1112 boolean_t missing;
1113
1114 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1115 return (-1);
1116
1117 zhp->zpool_hdl = hdl;
1118 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1119
1120 if (zpool_refresh_stats(zhp, &missing) != 0) {
1121 zpool_close(zhp);
1122 return (-1);
1123 }
1124
1125 if (missing) {
1126 zpool_close(zhp);
1127 *ret = NULL;
1128 return (0);
1129 }
1130
1131 *ret = zhp;
1132 return (0);
1133}
1134
1135/*
1136 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1137 * state.
1138 */
1139zpool_handle_t *
1140zpool_open(libzfs_handle_t *hdl, const char *pool)
1141{
1142 zpool_handle_t *zhp;
1143
1144 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1145 return (NULL);
1146
1147 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1148 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1149 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1150 zpool_close(zhp);
1151 return (NULL);
1152 }
1153
1154 return (zhp);
1155}
1156
1157/*
1158 * Close the handle. Simply frees the memory associated with the handle.
1159 */
1160void
1161zpool_close(zpool_handle_t *zhp)
1162{
8a5fc748
JJS
1163 nvlist_free(zhp->zpool_config);
1164 nvlist_free(zhp->zpool_old_config);
1165 nvlist_free(zhp->zpool_props);
34dc7c2f
BB
1166 free(zhp);
1167}
1168
1169/*
1170 * Return the name of the pool.
1171 */
1172const char *
1173zpool_get_name(zpool_handle_t *zhp)
1174{
1175 return (zhp->zpool_name);
1176}
1177
1178
1179/*
1180 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1181 */
1182int
1183zpool_get_state(zpool_handle_t *zhp)
1184{
1185 return (zhp->zpool_state);
1186}
1187
cc99f275
DB
1188/*
1189 * Check if vdev list contains a special vdev
1190 */
1191static boolean_t
1192zpool_has_special_vdev(nvlist_t *nvroot)
1193{
1194 nvlist_t **child;
1195 uint_t children;
1196
1197 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1198 &children) == 0) {
1199 for (uint_t c = 0; c < children; c++) {
1200 char *bias;
1201
1202 if (nvlist_lookup_string(child[c],
1203 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1204 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1205 return (B_TRUE);
1206 }
1207 }
1208 }
1209 return (B_FALSE);
1210}
1211
34dc7c2f
BB
1212/*
1213 * Create the named pool, using the provided vdev list. It is assumed
1214 * that the consumer has already validated the contents of the nvlist, so we
1215 * don't have to worry about error semantics.
1216 */
1217int
1218zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 1219 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 1220{
13fe0198 1221 zfs_cmd_t zc = {"\0"};
b128c09f
BB
1222 nvlist_t *zc_fsprops = NULL;
1223 nvlist_t *zc_props = NULL;
b5256303
TC
1224 nvlist_t *hidden_args = NULL;
1225 uint8_t *wkeydata = NULL;
1226 uint_t wkeylen = 0;
34dc7c2f 1227 char msg[1024];
b128c09f 1228 int ret = -1;
34dc7c2f
BB
1229
1230 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1231 "cannot create '%s'"), pool);
1232
1233 if (!zpool_name_valid(hdl, B_FALSE, pool))
1234 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1235
1236 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1237 return (-1);
1238
b128c09f 1239 if (props) {
572e2857
BB
1240 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1241
b128c09f 1242 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 1243 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
1244 goto create_failed;
1245 }
1246 }
34dc7c2f 1247
b128c09f
BB
1248 if (fsprops) {
1249 uint64_t zoned;
1250 char *zonestr;
1251
1252 zoned = ((nvlist_lookup_string(fsprops,
1253 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1254 strcmp(zonestr, "on") == 0);
1255
82f6f6e6 1256 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
b5256303 1257 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
b128c09f
BB
1258 goto create_failed;
1259 }
cc99f275
DB
1260
1261 if (nvlist_exists(zc_fsprops,
1262 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1263 !zpool_has_special_vdev(nvroot)) {
1264 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1265 "%s property requires a special vdev"),
1266 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1267 (void) zfs_error(hdl, EZFS_BADPROP, msg);
1268 goto create_failed;
1269 }
1270
b128c09f
BB
1271 if (!zc_props &&
1272 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1273 goto create_failed;
1274 }
d9c460a0 1275 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
b5256303
TC
1276 &wkeydata, &wkeylen) != 0) {
1277 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1278 goto create_failed;
1279 }
b128c09f
BB
1280 if (nvlist_add_nvlist(zc_props,
1281 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1282 goto create_failed;
1283 }
b5256303
TC
1284 if (wkeydata != NULL) {
1285 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1286 goto create_failed;
1287
1288 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1289 wkeydata, wkeylen) != 0)
1290 goto create_failed;
1291
1292 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1293 hidden_args) != 0)
1294 goto create_failed;
1295 }
34dc7c2f
BB
1296 }
1297
b128c09f
BB
1298 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1299 goto create_failed;
1300
34dc7c2f
BB
1301 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1302
b128c09f 1303 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
1304
1305 zcmd_free_nvlists(&zc);
b128c09f
BB
1306 nvlist_free(zc_props);
1307 nvlist_free(zc_fsprops);
b5256303
TC
1308 nvlist_free(hidden_args);
1309 if (wkeydata != NULL)
1310 free(wkeydata);
34dc7c2f
BB
1311
1312 switch (errno) {
1313 case EBUSY:
1314 /*
1315 * This can happen if the user has specified the same
1316 * device multiple times. We can't reliably detect this
1317 * until we try to add it and see we already have a
d603ed6c
BB
1318 * label. This can also happen under if the device is
1319 * part of an active md or lvm device.
34dc7c2f
BB
1320 */
1321 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d1d7e268
MK
1322 "one or more vdevs refer to the same device, or "
1323 "one of\nthe devices is part of an active md or "
1324 "lvm device"));
34dc7c2f
BB
1325 return (zfs_error(hdl, EZFS_BADDEV, msg));
1326
82f6f6e6
JS
1327 case ERANGE:
1328 /*
1329 * This happens if the record size is smaller or larger
1330 * than the allowed size range, or not a power of 2.
1331 *
1332 * NOTE: although zfs_valid_proplist is called earlier,
1333 * this case may have slipped through since the
1334 * pool does not exist yet and it is therefore
1335 * impossible to read properties e.g. max blocksize
1336 * from the pool.
1337 */
1338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1339 "record size invalid"));
1340 return (zfs_error(hdl, EZFS_BADPROP, msg));
1341
34dc7c2f
BB
1342 case EOVERFLOW:
1343 /*
1344 * This occurs when one of the devices is below
1345 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1346 * device was the problem device since there's no
1347 * reliable way to determine device size from userland.
1348 */
1349 {
1350 char buf[64];
1351
e7fbeb60 1352 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1353 sizeof (buf));
34dc7c2f
BB
1354
1355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1356 "one or more devices is less than the "
1357 "minimum size (%s)"), buf);
1358 }
1359 return (zfs_error(hdl, EZFS_BADDEV, msg));
1360
1361 case ENOSPC:
1362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1363 "one or more devices is out of space"));
1364 return (zfs_error(hdl, EZFS_BADDEV, msg));
1365
1366 case ENOTBLK:
1367 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1368 "cache device must be a disk or disk slice"));
1369 return (zfs_error(hdl, EZFS_BADDEV, msg));
1370
1371 default:
1372 return (zpool_standard_error(hdl, errno, msg));
1373 }
1374 }
1375
b128c09f 1376create_failed:
34dc7c2f 1377 zcmd_free_nvlists(&zc);
b128c09f
BB
1378 nvlist_free(zc_props);
1379 nvlist_free(zc_fsprops);
b5256303
TC
1380 nvlist_free(hidden_args);
1381 if (wkeydata != NULL)
1382 free(wkeydata);
b128c09f 1383 return (ret);
34dc7c2f
BB
1384}
1385
1386/*
1387 * Destroy the given pool. It is up to the caller to ensure that there are no
1388 * datasets left in the pool.
1389 */
1390int
6f1ffb06 1391zpool_destroy(zpool_handle_t *zhp, const char *log_str)
34dc7c2f 1392{
13fe0198 1393 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1394 zfs_handle_t *zfp = NULL;
1395 libzfs_handle_t *hdl = zhp->zpool_hdl;
1396 char msg[1024];
1397
1398 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1399 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1400 return (-1);
1401
34dc7c2f 1402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
6f1ffb06 1403 zc.zc_history = (uint64_t)(uintptr_t)log_str;
34dc7c2f 1404
572e2857 1405 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1406 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1407 "cannot destroy '%s'"), zhp->zpool_name);
1408
1409 if (errno == EROFS) {
1410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1411 "one or more devices is read only"));
1412 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1413 } else {
1414 (void) zpool_standard_error(hdl, errno, msg);
1415 }
1416
1417 if (zfp)
1418 zfs_close(zfp);
1419 return (-1);
1420 }
1421
1422 if (zfp) {
1423 remove_mountpoint(zfp);
1424 zfs_close(zfp);
1425 }
1426
1427 return (0);
1428}
1429
d2734cce
SD
1430/*
1431 * Create a checkpoint in the given pool.
1432 */
1433int
1434zpool_checkpoint(zpool_handle_t *zhp)
1435{
1436 libzfs_handle_t *hdl = zhp->zpool_hdl;
1437 char msg[1024];
1438 int error;
1439
1440 error = lzc_pool_checkpoint(zhp->zpool_name);
1441 if (error != 0) {
1442 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1443 "cannot checkpoint '%s'"), zhp->zpool_name);
1444 (void) zpool_standard_error(hdl, error, msg);
1445 return (-1);
1446 }
1447
1448 return (0);
1449}
1450
1451/*
1452 * Discard the checkpoint from the given pool.
1453 */
1454int
1455zpool_discard_checkpoint(zpool_handle_t *zhp)
1456{
1457 libzfs_handle_t *hdl = zhp->zpool_hdl;
1458 char msg[1024];
1459 int error;
1460
1461 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1462 if (error != 0) {
1463 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1464 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1465 (void) zpool_standard_error(hdl, error, msg);
1466 return (-1);
1467 }
1468
1469 return (0);
1470}
1471
34dc7c2f
BB
1472/*
1473 * Add the given vdevs to the pool. The caller must have already performed the
1474 * necessary verification to ensure that the vdev specification is well-formed.
1475 */
1476int
1477zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1478{
13fe0198 1479 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
1480 int ret;
1481 libzfs_handle_t *hdl = zhp->zpool_hdl;
1482 char msg[1024];
1483 nvlist_t **spares, **l2cache;
1484 uint_t nspares, nl2cache;
1485
1486 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1487 "cannot add to '%s'"), zhp->zpool_name);
1488
1489 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1490 SPA_VERSION_SPARES &&
1491 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1492 &spares, &nspares) == 0) {
1493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1494 "upgraded to add hot spares"));
1495 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1496 }
1497
1498 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1499 SPA_VERSION_L2CACHE &&
1500 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1501 &l2cache, &nl2cache) == 0) {
1502 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1503 "upgraded to add cache devices"));
1504 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1505 }
1506
1507 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1508 return (-1);
1509 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1510
572e2857 1511 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1512 switch (errno) {
1513 case EBUSY:
1514 /*
1515 * This can happen if the user has specified the same
1516 * device multiple times. We can't reliably detect this
1517 * until we try to add it and see we already have a
1518 * label.
1519 */
1520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1521 "one or more vdevs refer to the same device"));
1522 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1523 break;
1524
a1d477c2
MA
1525 case EINVAL:
1526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1527 "invalid config; a pool with removing/removed "
1528 "vdevs does not support adding raidz vdevs"));
1529 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1530 break;
1531
34dc7c2f
BB
1532 case EOVERFLOW:
1533 /*
1534 * This occurrs when one of the devices is below
1535 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1536 * device was the problem device since there's no
1537 * reliable way to determine device size from userland.
1538 */
1539 {
1540 char buf[64];
1541
e7fbeb60 1542 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1543 sizeof (buf));
34dc7c2f
BB
1544
1545 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1546 "device is less than the minimum "
1547 "size (%s)"), buf);
1548 }
1549 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1550 break;
1551
1552 case ENOTSUP:
1553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1554 "pool must be upgraded to add these vdevs"));
1555 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1556 break;
1557
34dc7c2f
BB
1558 case ENOTBLK:
1559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1560 "cache device must be a disk or disk slice"));
1561 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1562 break;
1563
1564 default:
1565 (void) zpool_standard_error(hdl, errno, msg);
1566 }
1567
1568 ret = -1;
1569 } else {
1570 ret = 0;
1571 }
1572
1573 zcmd_free_nvlists(&zc);
1574
1575 return (ret);
1576}
1577
1578/*
1579 * Exports the pool from the system. The caller must ensure that there are no
1580 * mounted datasets in the pool.
1581 */
6f1ffb06
MA
1582static int
1583zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1584 const char *log_str)
34dc7c2f 1585{
13fe0198 1586 zfs_cmd_t zc = {"\0"};
b128c09f 1587 char msg[1024];
34dc7c2f 1588
b128c09f
BB
1589 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1590 "cannot export '%s'"), zhp->zpool_name);
1591
34dc7c2f 1592 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1593 zc.zc_cookie = force;
fb5f0bc8 1594 zc.zc_guid = hardforce;
6f1ffb06 1595 zc.zc_history = (uint64_t)(uintptr_t)log_str;
b128c09f
BB
1596
1597 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1598 switch (errno) {
1599 case EXDEV:
1600 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1601 "use '-f' to override the following errors:\n"
1602 "'%s' has an active shared spare which could be"
1603 " used by other pools once '%s' is exported."),
1604 zhp->zpool_name, zhp->zpool_name);
1605 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1606 msg));
1607 default:
1608 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1609 msg));
1610 }
1611 }
34dc7c2f 1612
34dc7c2f
BB
1613 return (0);
1614}
1615
fb5f0bc8 1616int
6f1ffb06 1617zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
fb5f0bc8 1618{
6f1ffb06 1619 return (zpool_export_common(zhp, force, B_FALSE, log_str));
fb5f0bc8
BB
1620}
1621
1622int
6f1ffb06 1623zpool_export_force(zpool_handle_t *zhp, const char *log_str)
fb5f0bc8 1624{
6f1ffb06 1625 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
fb5f0bc8
BB
1626}
1627
428870ff
BB
1628static void
1629zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1630 nvlist_t *config)
428870ff 1631{
572e2857 1632 nvlist_t *nv = NULL;
428870ff
BB
1633 uint64_t rewindto;
1634 int64_t loss = -1;
1635 struct tm t;
1636 char timestr[128];
1637
572e2857
BB
1638 if (!hdl->libzfs_printerr || config == NULL)
1639 return;
1640
9ae529ec
CS
1641 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1642 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
428870ff 1643 return;
9ae529ec 1644 }
428870ff 1645
572e2857 1646 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1647 return;
572e2857 1648 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1649
1650 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1651 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1652 if (dryrun) {
1653 (void) printf(dgettext(TEXT_DOMAIN,
1654 "Would be able to return %s "
1655 "to its state as of %s.\n"),
1656 name, timestr);
1657 } else {
1658 (void) printf(dgettext(TEXT_DOMAIN,
1659 "Pool %s returned to its state as of %s.\n"),
1660 name, timestr);
1661 }
1662 if (loss > 120) {
1663 (void) printf(dgettext(TEXT_DOMAIN,
1664 "%s approximately %lld "),
1665 dryrun ? "Would discard" : "Discarded",
b8864a23 1666 ((longlong_t)loss + 30) / 60);
428870ff
BB
1667 (void) printf(dgettext(TEXT_DOMAIN,
1668 "minutes of transactions.\n"));
1669 } else if (loss > 0) {
1670 (void) printf(dgettext(TEXT_DOMAIN,
1671 "%s approximately %lld "),
b8864a23
BB
1672 dryrun ? "Would discard" : "Discarded",
1673 (longlong_t)loss);
428870ff
BB
1674 (void) printf(dgettext(TEXT_DOMAIN,
1675 "seconds of transactions.\n"));
1676 }
1677 }
1678}
1679
1680void
1681zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1682 nvlist_t *config)
1683{
572e2857 1684 nvlist_t *nv = NULL;
428870ff
BB
1685 int64_t loss = -1;
1686 uint64_t edata = UINT64_MAX;
1687 uint64_t rewindto;
1688 struct tm t;
1689 char timestr[128];
1690
1691 if (!hdl->libzfs_printerr)
1692 return;
1693
1694 if (reason >= 0)
1695 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1696 else
1697 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1698
1699 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857 1700 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
9ae529ec 1701 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
572e2857 1702 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1703 goto no_info;
1704
572e2857
BB
1705 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1706 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1707 &edata);
1708
1709 (void) printf(dgettext(TEXT_DOMAIN,
1710 "Recovery is possible, but will result in some data loss.\n"));
1711
1712 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1713 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1714 (void) printf(dgettext(TEXT_DOMAIN,
1715 "\tReturning the pool to its state as of %s\n"
1716 "\tshould correct the problem. "),
1717 timestr);
1718 } else {
1719 (void) printf(dgettext(TEXT_DOMAIN,
1720 "\tReverting the pool to an earlier state "
1721 "should correct the problem.\n\t"));
1722 }
1723
1724 if (loss > 120) {
1725 (void) printf(dgettext(TEXT_DOMAIN,
1726 "Approximately %lld minutes of data\n"
b8864a23
BB
1727 "\tmust be discarded, irreversibly. "),
1728 ((longlong_t)loss + 30) / 60);
428870ff
BB
1729 } else if (loss > 0) {
1730 (void) printf(dgettext(TEXT_DOMAIN,
1731 "Approximately %lld seconds of data\n"
b8864a23
BB
1732 "\tmust be discarded, irreversibly. "),
1733 (longlong_t)loss);
428870ff
BB
1734 }
1735 if (edata != 0 && edata != UINT64_MAX) {
1736 if (edata == 1) {
1737 (void) printf(dgettext(TEXT_DOMAIN,
1738 "After rewind, at least\n"
1739 "\tone persistent user-data error will remain. "));
1740 } else {
1741 (void) printf(dgettext(TEXT_DOMAIN,
1742 "After rewind, several\n"
1743 "\tpersistent user-data errors will remain. "));
1744 }
1745 }
1746 (void) printf(dgettext(TEXT_DOMAIN,
1747 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1748 reason >= 0 ? "clear" : "import", name);
1749
1750 (void) printf(dgettext(TEXT_DOMAIN,
1751 "A scrub of the pool\n"
1752 "\tis strongly recommended after recovery.\n"));
1753 return;
1754
1755no_info:
1756 (void) printf(dgettext(TEXT_DOMAIN,
1757 "Destroy and re-create the pool from\n\ta backup source.\n"));
1758}
1759
34dc7c2f
BB
1760/*
1761 * zpool_import() is a contracted interface. Should be kept the same
1762 * if possible.
1763 *
1764 * Applications should use zpool_import_props() to import a pool with
1765 * new properties value to be set.
1766 */
1767int
1768zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1769 char *altroot)
1770{
1771 nvlist_t *props = NULL;
1772 int ret;
1773
1774 if (altroot != NULL) {
1775 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1776 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1777 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1778 newname));
1779 }
1780
1781 if (nvlist_add_string(props,
fb5f0bc8
BB
1782 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1783 nvlist_add_string(props,
1784 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1785 nvlist_free(props);
1786 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1787 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1788 newname));
1789 }
1790 }
1791
572e2857
BB
1792 ret = zpool_import_props(hdl, config, newname, props,
1793 ZFS_IMPORT_NORMAL);
8a5fc748 1794 nvlist_free(props);
34dc7c2f
BB
1795 return (ret);
1796}
1797
572e2857
BB
1798static void
1799print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1800 int indent)
1801{
1802 nvlist_t **child;
1803 uint_t c, children;
1804 char *vname;
1805 uint64_t is_log = 0;
1806
1807 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1808 &is_log);
1809
1810 if (name != NULL)
1811 (void) printf("\t%*s%s%s\n", indent, "", name,
1812 is_log ? " [log]" : "");
1813
1814 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1815 &child, &children) != 0)
1816 return;
1817
1818 for (c = 0; c < children; c++) {
d2f3e292 1819 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
572e2857
BB
1820 print_vdev_tree(hdl, vname, child[c], indent + 2);
1821 free(vname);
1822 }
1823}
1824
9ae529ec
CS
1825void
1826zpool_print_unsup_feat(nvlist_t *config)
1827{
1828 nvlist_t *nvinfo, *unsup_feat;
1829 nvpair_t *nvp;
1830
1831 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1832 0);
1833 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1834 &unsup_feat) == 0);
1835
1836 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1837 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1838 char *desc;
1839
1840 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1841 verify(nvpair_value_string(nvp, &desc) == 0);
1842
1843 if (strlen(desc) > 0)
1844 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1845 else
1846 (void) printf("\t%s\n", nvpair_name(nvp));
1847 }
1848}
1849
34dc7c2f
BB
1850/*
1851 * Import the given pool using the known configuration and a list of
1852 * properties to be set. The configuration should have come from
1853 * zpool_find_import(). The 'newname' parameters control whether the pool
1854 * is imported with a different name.
1855 */
1856int
1857zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1858 nvlist_t *props, int flags)
34dc7c2f 1859{
13fe0198 1860 zfs_cmd_t zc = {"\0"};
8a393be3 1861 zpool_load_policy_t policy;
572e2857
BB
1862 nvlist_t *nv = NULL;
1863 nvlist_t *nvinfo = NULL;
1864 nvlist_t *missing = NULL;
34dc7c2f
BB
1865 char *thename;
1866 char *origname;
1867 int ret;
572e2857 1868 int error = 0;
34dc7c2f
BB
1869 char errbuf[1024];
1870
1871 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1872 &origname) == 0);
1873
1874 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1875 "cannot import pool '%s'"), origname);
1876
1877 if (newname != NULL) {
1878 if (!zpool_name_valid(hdl, B_FALSE, newname))
1879 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1880 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1881 newname));
1882 thename = (char *)newname;
1883 } else {
1884 thename = origname;
1885 }
1886
0fdd8d64 1887 if (props != NULL) {
34dc7c2f 1888 uint64_t version;
572e2857 1889 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1890
1891 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1892 &version) == 0);
1893
b128c09f 1894 if ((props = zpool_valid_proplist(hdl, origname,
0fdd8d64 1895 props, version, flags, errbuf)) == NULL)
34dc7c2f 1896 return (-1);
0fdd8d64 1897 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
34dc7c2f
BB
1898 nvlist_free(props);
1899 return (-1);
1900 }
0fdd8d64 1901 nvlist_free(props);
34dc7c2f
BB
1902 }
1903
1904 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1905
1906 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1907 &zc.zc_guid) == 0);
1908
1909 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
0fdd8d64 1910 zcmd_free_nvlists(&zc);
34dc7c2f
BB
1911 return (-1);
1912 }
572e2857 1913 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
0fdd8d64 1914 zcmd_free_nvlists(&zc);
428870ff
BB
1915 return (-1);
1916 }
34dc7c2f 1917
572e2857
BB
1918 zc.zc_cookie = flags;
1919 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1920 errno == ENOMEM) {
1921 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1922 zcmd_free_nvlists(&zc);
1923 return (-1);
1924 }
1925 }
1926 if (ret != 0)
1927 error = errno;
1928
1929 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
0fdd8d64
MT
1930
1931 zcmd_free_nvlists(&zc);
1932
8a393be3 1933 zpool_get_load_policy(config, &policy);
572e2857
BB
1934
1935 if (error) {
34dc7c2f 1936 char desc[1024];
379ca9cf 1937 char aux[256];
428870ff 1938
428870ff
BB
1939 /*
1940 * Dry-run failed, but we print out what success
1941 * looks like if we found a best txg
1942 */
8a393be3 1943 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
428870ff 1944 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1945 B_TRUE, nv);
1946 nvlist_free(nv);
428870ff
BB
1947 return (-1);
1948 }
1949
34dc7c2f
BB
1950 if (newname == NULL)
1951 (void) snprintf(desc, sizeof (desc),
1952 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1953 thename);
1954 else
1955 (void) snprintf(desc, sizeof (desc),
1956 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1957 origname, thename);
1958
572e2857 1959 switch (error) {
34dc7c2f 1960 case ENOTSUP:
9ae529ec
CS
1961 if (nv != NULL && nvlist_lookup_nvlist(nv,
1962 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1963 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1964 (void) printf(dgettext(TEXT_DOMAIN, "This "
1965 "pool uses the following feature(s) not "
1966 "supported by this system:\n"));
1967 zpool_print_unsup_feat(nv);
1968 if (nvlist_exists(nvinfo,
1969 ZPOOL_CONFIG_CAN_RDONLY)) {
1970 (void) printf(dgettext(TEXT_DOMAIN,
1971 "All unsupported features are only "
1972 "required for writing to the pool."
1973 "\nThe pool can be imported using "
1974 "'-o readonly=on'.\n"));
1975 }
1976 }
34dc7c2f
BB
1977 /*
1978 * Unsupported version.
1979 */
1980 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1981 break;
1982
379ca9cf
OF
1983 case EREMOTEIO:
1984 if (nv != NULL && nvlist_lookup_nvlist(nv,
1985 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1986 char *hostname = "<unknown>";
1987 uint64_t hostid = 0;
1988 mmp_state_t mmp_state;
1989
1990 mmp_state = fnvlist_lookup_uint64(nvinfo,
1991 ZPOOL_CONFIG_MMP_STATE);
1992
1993 if (nvlist_exists(nvinfo,
1994 ZPOOL_CONFIG_MMP_HOSTNAME))
1995 hostname = fnvlist_lookup_string(nvinfo,
1996 ZPOOL_CONFIG_MMP_HOSTNAME);
1997
1998 if (nvlist_exists(nvinfo,
1999 ZPOOL_CONFIG_MMP_HOSTID))
2000 hostid = fnvlist_lookup_uint64(nvinfo,
2001 ZPOOL_CONFIG_MMP_HOSTID);
2002
2003 if (mmp_state == MMP_STATE_ACTIVE) {
2004 (void) snprintf(aux, sizeof (aux),
2005 dgettext(TEXT_DOMAIN, "pool is imp"
2006 "orted on host '%s' (hostid=%lx).\n"
2007 "Export the pool on the other "
2008 "system, then run 'zpool import'."),
2009 hostname, (unsigned long) hostid);
2010 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2011 (void) snprintf(aux, sizeof (aux),
2012 dgettext(TEXT_DOMAIN, "pool has "
2013 "the multihost property on and "
2014 "the\nsystem's hostid is not set. "
2015 "Set a unique system hostid with "
b9373170 2016 "the zgenhostid(8) command.\n"));
379ca9cf
OF
2017 }
2018
2019 (void) zfs_error_aux(hdl, aux);
2020 }
2021 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2022 break;
2023
34dc7c2f
BB
2024 case EINVAL:
2025 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2026 break;
2027
428870ff
BB
2028 case EROFS:
2029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2030 "one or more devices is read only"));
2031 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2032 break;
2033
572e2857
BB
2034 case ENXIO:
2035 if (nv && nvlist_lookup_nvlist(nv,
2036 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2037 nvlist_lookup_nvlist(nvinfo,
2038 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2039 (void) printf(dgettext(TEXT_DOMAIN,
6cb8e530
PZ
2040 "The devices below are missing or "
2041 "corrupted, use '-m' to import the pool "
2042 "anyway:\n"));
572e2857
BB
2043 print_vdev_tree(hdl, NULL, missing, 2);
2044 (void) printf("\n");
2045 }
2046 (void) zpool_standard_error(hdl, error, desc);
2047 break;
2048
2049 case EEXIST:
2050 (void) zpool_standard_error(hdl, error, desc);
2051 break;
2052
abe5b8fb
BB
2053 case EBUSY:
2054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2055 "one or more devices are already in use\n"));
2056 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2057 break;
d1d19c78
PD
2058 case ENAMETOOLONG:
2059 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2060 "new name of at least one dataset is longer than "
2061 "the maximum allowable length"));
2062 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2063 break;
34dc7c2f 2064 default:
572e2857 2065 (void) zpool_standard_error(hdl, error, desc);
428870ff 2066 zpool_explain_recover(hdl,
572e2857 2067 newname ? origname : thename, -error, nv);
428870ff 2068 break;
34dc7c2f
BB
2069 }
2070
572e2857 2071 nvlist_free(nv);
34dc7c2f
BB
2072 ret = -1;
2073 } else {
2074 zpool_handle_t *zhp;
2075
2076 /*
2077 * This should never fail, but play it safe anyway.
2078 */
428870ff 2079 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 2080 ret = -1;
428870ff 2081 else if (zhp != NULL)
34dc7c2f 2082 zpool_close(zhp);
8a393be3 2083 if (policy.zlp_rewind &
428870ff
BB
2084 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2085 zpool_rewind_exclaim(hdl, newname ? origname : thename,
8a393be3 2086 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 2087 }
572e2857 2088 nvlist_free(nv);
428870ff 2089 return (0);
34dc7c2f
BB
2090 }
2091
34dc7c2f
BB
2092 return (ret);
2093}
2094
1b939560
BB
2095/*
2096 * Translate vdev names to guids. If a vdev_path is determined to be
2097 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2098 * are added to it.
2099 */
2100static int
2101zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2102 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2103{
2104 nvlist_t *errlist = NULL;
2105 int error = 0;
2106
2107 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2108 elem = nvlist_next_nvpair(vds, elem)) {
2109 boolean_t spare, cache;
2110
2111 char *vd_path = nvpair_name(elem);
2112 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2113 NULL);
2114
2115 if ((tgt == NULL) || cache || spare) {
2116 if (errlist == NULL) {
2117 errlist = fnvlist_alloc();
2118 error = EINVAL;
2119 }
2120
2121 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2122 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2123 fnvlist_add_int64(errlist, vd_path, err);
2124 continue;
2125 }
2126
2127 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2128 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2129
2130 char msg[MAXNAMELEN];
2131 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2132 fnvlist_add_string(guids_to_paths, msg, vd_path);
2133 }
2134
2135 if (error != 0) {
2136 verify(errlist != NULL);
2137 if (vd_errlist != NULL)
2138 *vd_errlist = errlist;
2139 else
2140 fnvlist_free(errlist);
2141 }
2142
2143 return (error);
2144}
2145
619f0976
GW
2146static int
2147xlate_init_err(int err)
2148{
2149 switch (err) {
2150 case ENODEV:
2151 return (EZFS_NODEVICE);
2152 case EINVAL:
2153 case EROFS:
2154 return (EZFS_BADDEV);
2155 case EBUSY:
2156 return (EZFS_INITIALIZING);
2157 case ESRCH:
2158 return (EZFS_NO_INITIALIZE);
2159 }
2160 return (err);
2161}
2162
2163/*
2164 * Begin, suspend, or cancel the initialization (initializing of all free
2165 * blocks) for the given vdevs in the given pool.
2166 */
2167int
2168zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2169 nvlist_t *vds)
2170{
2171 char msg[1024];
1b939560 2172 int err;
619f0976 2173
619f0976
GW
2174 nvlist_t *vdev_guids = fnvlist_alloc();
2175 nvlist_t *guids_to_paths = fnvlist_alloc();
1b939560
BB
2176 nvlist_t *vd_errlist = NULL;
2177 nvlist_t *errlist;
619f0976
GW
2178 nvpair_t *elem;
2179
1b939560
BB
2180 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2181 guids_to_paths, &vd_errlist);
619f0976 2182
1b939560
BB
2183 if (err == 0) {
2184 err = lzc_initialize(zhp->zpool_name, cmd_type,
2185 vdev_guids, &errlist);
2186 if (err == 0) {
619f0976
GW
2187 fnvlist_free(vdev_guids);
2188 fnvlist_free(guids_to_paths);
1b939560 2189 return (0);
619f0976
GW
2190 }
2191
1b939560
BB
2192 if (errlist != NULL) {
2193 vd_errlist = fnvlist_lookup_nvlist(errlist,
2194 ZPOOL_INITIALIZE_VDEVS);
2195 }
619f0976 2196
1b939560
BB
2197 (void) snprintf(msg, sizeof (msg),
2198 dgettext(TEXT_DOMAIN, "operation failed"));
2199 } else {
2200 verify(vd_errlist != NULL);
2201 }
2202
2203 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2204 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2205 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2206 char *path;
2207
2208 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2209 &path) != 0)
2210 path = nvpair_name(elem);
2211
2212 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2213 "cannot initialize '%s'", path);
619f0976
GW
2214 }
2215
619f0976 2216 fnvlist_free(vdev_guids);
1b939560 2217 fnvlist_free(guids_to_paths);
619f0976 2218
1b939560
BB
2219 if (vd_errlist != NULL) {
2220 fnvlist_free(vd_errlist);
2221 return (-1);
619f0976
GW
2222 }
2223
1b939560
BB
2224 return (zpool_standard_error(zhp->zpool_hdl, err, msg));
2225}
2226
2227static int
2228xlate_trim_err(int err)
2229{
2230 switch (err) {
2231 case ENODEV:
2232 return (EZFS_NODEVICE);
2233 case EINVAL:
2234 case EROFS:
2235 return (EZFS_BADDEV);
2236 case EBUSY:
2237 return (EZFS_TRIMMING);
2238 case ESRCH:
2239 return (EZFS_NO_TRIM);
2240 case EOPNOTSUPP:
2241 return (EZFS_TRIM_NOTSUP);
2242 }
2243 return (err);
2244}
2245
2246/*
2247 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2248 * the given vdevs in the given pool.
2249 */
2250int
2251zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2252 trimflags_t *trim_flags)
2253{
2254 char msg[1024];
2255 int err;
2256
2257 nvlist_t *vdev_guids = fnvlist_alloc();
2258 nvlist_t *guids_to_paths = fnvlist_alloc();
619f0976 2259 nvlist_t *vd_errlist = NULL;
1b939560
BB
2260 nvlist_t *errlist;
2261 nvpair_t *elem;
2262
2263 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2264 guids_to_paths, &vd_errlist);
2265 if (err == 0) {
2266 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2267 trim_flags->secure, vdev_guids, &errlist);
2268 if (err == 0) {
2269 fnvlist_free(vdev_guids);
2270 fnvlist_free(guids_to_paths);
2271 return (0);
2272 }
2273
2274 if (errlist != NULL) {
2275 vd_errlist = fnvlist_lookup_nvlist(errlist,
2276 ZPOOL_TRIM_VDEVS);
2277 }
2278
2279 (void) snprintf(msg, sizeof (msg),
2280 dgettext(TEXT_DOMAIN, "operation failed"));
2281 } else {
2282 verify(vd_errlist != NULL);
619f0976
GW
2283 }
2284
1b939560
BB
2285 for (elem = nvlist_next_nvpair(vd_errlist, NULL);
2286 elem != NULL; elem = nvlist_next_nvpair(vd_errlist, elem)) {
2287 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2288 char *path;
619f0976 2289
1b939560
BB
2290 /*
2291 * If only the pool was specified, and it was not a secure
2292 * trim then suppress warnings for individual vdevs which
2293 * do not support trimming.
2294 */
2295 if (vd_error == EZFS_TRIM_NOTSUP &&
2296 trim_flags->fullpool &&
2297 !trim_flags->secure) {
2298 continue;
2299 }
2300
2301 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2302 &path) != 0)
2303 path = nvpair_name(elem);
2304
2305 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2306 "cannot trim '%s'", path);
619f0976
GW
2307 }
2308
1b939560 2309 fnvlist_free(vdev_guids);
619f0976 2310 fnvlist_free(guids_to_paths);
1b939560
BB
2311
2312 if (vd_errlist != NULL) {
2313 fnvlist_free(vd_errlist);
619f0976 2314 return (-1);
1b939560 2315 }
619f0976 2316
1b939560 2317 return (zpool_standard_error(zhp->zpool_hdl, err, msg));
619f0976
GW
2318}
2319
34dc7c2f 2320/*
428870ff 2321 * Scan the pool.
34dc7c2f
BB
2322 */
2323int
0ea05c64 2324zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
34dc7c2f 2325{
13fe0198 2326 zfs_cmd_t zc = {"\0"};
34dc7c2f 2327 char msg[1024];
0ea05c64 2328 int err;
34dc7c2f
BB
2329 libzfs_handle_t *hdl = zhp->zpool_hdl;
2330
2331 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 2332 zc.zc_cookie = func;
0ea05c64 2333 zc.zc_flags = cmd;
34dc7c2f 2334
0ea05c64
AP
2335 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2336 return (0);
2337
2338 err = errno;
2339
2340 /* ECANCELED on a scrub means we resumed a paused scrub */
2341 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2342 cmd == POOL_SCRUB_NORMAL)
2343 return (0);
2344
2345 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
34dc7c2f
BB
2346 return (0);
2347
428870ff 2348 if (func == POOL_SCAN_SCRUB) {
0ea05c64
AP
2349 if (cmd == POOL_SCRUB_PAUSE) {
2350 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2351 "cannot pause scrubbing %s"), zc.zc_name);
2352 } else {
2353 assert(cmd == POOL_SCRUB_NORMAL);
2354 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2355 "cannot scrub %s"), zc.zc_name);
2356 }
fa241660
TC
2357 } else if (func == POOL_SCAN_RESILVER) {
2358 assert(cmd == POOL_SCRUB_NORMAL);
2359 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2360 "cannot restart resilver on %s"), zc.zc_name);
428870ff
BB
2361 } else if (func == POOL_SCAN_NONE) {
2362 (void) snprintf(msg, sizeof (msg),
2363 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2364 zc.zc_name);
2365 } else {
2366 assert(!"unexpected result");
2367 }
34dc7c2f 2368
0ea05c64 2369 if (err == EBUSY) {
428870ff
BB
2370 nvlist_t *nvroot;
2371 pool_scan_stat_t *ps = NULL;
2372 uint_t psc;
2373
2374 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2375 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2376 (void) nvlist_lookup_uint64_array(nvroot,
2377 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
0ea05c64
AP
2378 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2379 if (cmd == POOL_SCRUB_PAUSE)
2380 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2381 else
2382 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2383 } else {
428870ff 2384 return (zfs_error(hdl, EZFS_RESILVERING, msg));
0ea05c64
AP
2385 }
2386 } else if (err == ENOENT) {
428870ff 2387 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
fa241660
TC
2388 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2389 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
428870ff 2390 } else {
0ea05c64 2391 return (zpool_standard_error(hdl, err, msg));
428870ff
BB
2392 }
2393}
2394
34dc7c2f 2395/*
9babb374
BB
2396 * Find a vdev that matches the search criteria specified. We use the
2397 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
2398 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2399 * spare; but FALSE if its an INUSE spare.
2400 */
2401static nvlist_t *
9babb374
BB
2402vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2403 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
2404{
2405 uint_t c, children;
2406 nvlist_t **child;
34dc7c2f 2407 nvlist_t *ret;
b128c09f 2408 uint64_t is_log;
9babb374
BB
2409 char *srchkey;
2410 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2411
2412 /* Nothing to look for */
2413 if (search == NULL || pair == NULL)
2414 return (NULL);
2415
2416 /* Obtain the key we will use to search */
2417 srchkey = nvpair_name(pair);
2418
2419 switch (nvpair_type(pair)) {
572e2857 2420 case DATA_TYPE_UINT64:
9babb374 2421 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
2422 uint64_t srchval, theguid;
2423
2424 verify(nvpair_value_uint64(pair, &srchval) == 0);
2425 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2426 &theguid) == 0);
2427 if (theguid == srchval)
2428 return (nv);
9babb374
BB
2429 }
2430 break;
9babb374
BB
2431
2432 case DATA_TYPE_STRING: {
2433 char *srchval, *val;
2434
2435 verify(nvpair_value_string(pair, &srchval) == 0);
2436 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2437 break;
34dc7c2f 2438
9babb374 2439 /*
428870ff
BB
2440 * Search for the requested value. Special cases:
2441 *
eac47204
BB
2442 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2443 * "-part1", or "p1". The suffix is hidden from the user,
2444 * but included in the string, so this matches around it.
2445 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2446 * is used to check all possible expanded paths.
428870ff
BB
2447 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2448 *
2449 * Otherwise, all other searches are simple string compares.
9babb374 2450 */
a2c6816c 2451 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
2452 uint64_t wholedisk = 0;
2453
2454 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2455 &wholedisk);
eac47204
BB
2456 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2457 return (nv);
428870ff 2458
428870ff
BB
2459 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2460 char *type, *idx, *end, *p;
2461 uint64_t id, vdev_id;
2462
2463 /*
2464 * Determine our vdev type, keeping in mind
2465 * that the srchval is composed of a type and
2466 * vdev id pair (i.e. mirror-4).
2467 */
2468 if ((type = strdup(srchval)) == NULL)
2469 return (NULL);
2470
2471 if ((p = strrchr(type, '-')) == NULL) {
2472 free(type);
2473 break;
2474 }
2475 idx = p + 1;
2476 *p = '\0';
2477
2478 /*
2479 * If the types don't match then keep looking.
2480 */
2481 if (strncmp(val, type, strlen(val)) != 0) {
2482 free(type);
2483 break;
2484 }
2485
1574c73b 2486 verify(zpool_vdev_is_interior(type));
428870ff
BB
2487 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2488 &id) == 0);
2489
2490 errno = 0;
2491 vdev_id = strtoull(idx, &end, 10);
2492
2493 free(type);
2494 if (errno != 0)
2495 return (NULL);
2496
2497 /*
2498 * Now verify that we have the correct vdev id.
2499 */
2500 if (vdev_id == id)
2501 return (nv);
9babb374 2502 }
34dc7c2f 2503
34dc7c2f 2504 /*
9babb374 2505 * Common case
34dc7c2f 2506 */
9babb374 2507 if (strcmp(srchval, val) == 0)
34dc7c2f 2508 return (nv);
9babb374
BB
2509 break;
2510 }
2511
2512 default:
2513 break;
34dc7c2f
BB
2514 }
2515
2516 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2517 &child, &children) != 0)
2518 return (NULL);
2519
b128c09f 2520 for (c = 0; c < children; c++) {
9babb374 2521 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
2522 avail_spare, l2cache, NULL)) != NULL) {
2523 /*
2524 * The 'is_log' value is only set for the toplevel
2525 * vdev, not the leaf vdevs. So we always lookup the
2526 * log device from the root of the vdev tree (where
2527 * 'log' is non-NULL).
2528 */
2529 if (log != NULL &&
2530 nvlist_lookup_uint64(child[c],
2531 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2532 is_log) {
2533 *log = B_TRUE;
2534 }
34dc7c2f 2535 return (ret);
b128c09f
BB
2536 }
2537 }
34dc7c2f
BB
2538
2539 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2540 &child, &children) == 0) {
2541 for (c = 0; c < children; c++) {
9babb374 2542 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2543 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2544 *avail_spare = B_TRUE;
2545 return (ret);
2546 }
2547 }
2548 }
2549
2550 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2551 &child, &children) == 0) {
2552 for (c = 0; c < children; c++) {
9babb374 2553 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 2554 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
2555 *l2cache = B_TRUE;
2556 return (ret);
2557 }
2558 }
2559 }
2560
2561 return (NULL);
2562}
2563
9babb374 2564/*
d441e85d 2565 * Given a physical path or guid, find the associated vdev.
9babb374
BB
2566 */
2567nvlist_t *
2568zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2569 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2570{
2571 nvlist_t *search, *nvroot, *ret;
d441e85d
BB
2572 uint64_t guid;
2573 char *end;
9babb374
BB
2574
2575 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
d441e85d
BB
2576
2577 guid = strtoull(ppath, &end, 0);
2578 if (guid != 0 && *end == '\0') {
2579 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2580 } else {
2581 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2582 ppath) == 0);
2583 }
9babb374
BB
2584
2585 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2586 &nvroot) == 0);
2587
2588 *avail_spare = B_FALSE;
572e2857
BB
2589 *l2cache = B_FALSE;
2590 if (log != NULL)
2591 *log = B_FALSE;
9babb374
BB
2592 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2593 nvlist_free(search);
2594
2595 return (ret);
2596}
2597
428870ff
BB
2598/*
2599 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2600 */
1574c73b 2601static boolean_t
428870ff
BB
2602zpool_vdev_is_interior(const char *name)
2603{
2604 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1574c73b
BB
2605 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2606 strncmp(name,
2607 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
428870ff
BB
2608 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2609 return (B_TRUE);
2610 return (B_FALSE);
2611}
2612
34dc7c2f
BB
2613nvlist_t *
2614zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 2615 boolean_t *l2cache, boolean_t *log)
34dc7c2f 2616{
34dc7c2f 2617 char *end;
9babb374 2618 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
2619 uint64_t guid;
2620
9babb374
BB
2621 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2622
1a5c611a 2623 guid = strtoull(path, &end, 0);
34dc7c2f 2624 if (guid != 0 && *end == '\0') {
9babb374 2625 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
2626 } else if (zpool_vdev_is_interior(path)) {
2627 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 2628 } else {
9babb374 2629 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
2630 }
2631
2632 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2633 &nvroot) == 0);
2634
2635 *avail_spare = B_FALSE;
2636 *l2cache = B_FALSE;
b128c09f
BB
2637 if (log != NULL)
2638 *log = B_FALSE;
9babb374
BB
2639 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2640 nvlist_free(search);
2641
2642 return (ret);
b128c09f
BB
2643}
2644
2645static int
379ca9cf 2646vdev_is_online(nvlist_t *nv)
b128c09f
BB
2647{
2648 uint64_t ival;
2649
2650 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2651 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2652 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2653 return (0);
2654
2655 return (1);
2656}
2657
2658/*
9babb374 2659 * Helper function for zpool_get_physpaths().
b128c09f 2660 */
9babb374
BB
2661static int
2662vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2663 size_t *bytes_written)
2664{
2665 size_t bytes_left, pos, rsz;
2666 char *tmppath;
2667 const char *format;
2668
2669 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2670 &tmppath) != 0)
2671 return (EZFS_NODEVICE);
2672
2673 pos = *bytes_written;
2674 bytes_left = physpath_size - pos;
2675 format = (pos == 0) ? "%s" : " %s";
2676
2677 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2678 *bytes_written += rsz;
2679
2680 if (rsz >= bytes_left) {
2681 /* if physpath was not copied properly, clear it */
2682 if (bytes_left != 0) {
2683 physpath[pos] = 0;
2684 }
2685 return (EZFS_NOSPC);
2686 }
2687 return (0);
2688}
2689
2690static int
2691vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2692 size_t *rsz, boolean_t is_spare)
2693{
2694 char *type;
2695 int ret;
2696
2697 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2698 return (EZFS_INVALCONFIG);
2699
2700 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2701 /*
2702 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2703 * For a spare vdev, we only want to boot from the active
2704 * spare device.
2705 */
2706 if (is_spare) {
2707 uint64_t spare = 0;
2708 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2709 &spare);
2710 if (!spare)
2711 return (EZFS_INVALCONFIG);
2712 }
2713
379ca9cf 2714 if (vdev_is_online(nv)) {
9babb374
BB
2715 if ((ret = vdev_get_one_physpath(nv, physpath,
2716 phypath_size, rsz)) != 0)
2717 return (ret);
2718 }
2719 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
0a3d2673 2720 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
9babb374
BB
2721 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2722 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2723 nvlist_t **child;
2724 uint_t count;
2725 int i, ret;
2726
2727 if (nvlist_lookup_nvlist_array(nv,
2728 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2729 return (EZFS_INVALCONFIG);
2730
2731 for (i = 0; i < count; i++) {
2732 ret = vdev_get_physpaths(child[i], physpath,
2733 phypath_size, rsz, is_spare);
2734 if (ret == EZFS_NOSPC)
2735 return (ret);
2736 }
2737 }
2738
2739 return (EZFS_POOL_INVALARG);
2740}
2741
2742/*
2743 * Get phys_path for a root pool config.
2744 * Return 0 on success; non-zero on failure.
2745 */
2746static int
2747zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2748{
9babb374 2749 size_t rsz;
b128c09f
BB
2750 nvlist_t *vdev_root;
2751 nvlist_t **child;
2752 uint_t count;
9babb374 2753 char *type;
b128c09f 2754
9babb374 2755 rsz = 0;
b128c09f 2756
9babb374
BB
2757 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2758 &vdev_root) != 0)
2759 return (EZFS_INVALCONFIG);
b128c09f 2760
9babb374
BB
2761 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2762 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2763 &child, &count) != 0)
9babb374 2764 return (EZFS_INVALCONFIG);
b128c09f 2765
9babb374 2766 /*
986dd8aa 2767 * root pool can only have a single top-level vdev.
9babb374 2768 */
986dd8aa 2769 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
9babb374 2770 return (EZFS_POOL_INVALARG);
b128c09f 2771
9babb374
BB
2772 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2773 B_FALSE);
2774
2775 /* No online devices */
2776 if (rsz == 0)
2777 return (EZFS_NODEVICE);
b128c09f
BB
2778
2779 return (0);
34dc7c2f
BB
2780}
2781
9babb374
BB
2782/*
2783 * Get phys_path for a root pool
2784 * Return 0 on success; non-zero on failure.
2785 */
2786int
2787zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2788{
2789 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2790 phypath_size));
2791}
2792
9babb374
BB
2793/*
2794 * If the device has being dynamically expanded then we need to relabel
2795 * the disk to use the new unallocated space.
2796 */
2797static int
8adf4864 2798zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2799{
9babb374 2800 int fd, error;
9babb374 2801
d603ed6c 2802 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2804 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2805 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2806 }
2807
2808 /*
2809 * It's possible that we might encounter an error if the device
2810 * does not have any unallocated space left. If so, we simply
2811 * ignore that error and continue on.
b5a28807
ED
2812 *
2813 * Also, we don't call efi_rescan() - that would just return EBUSY.
2814 * The module will do it for us in vdev_disk_open().
9babb374 2815 */
d603ed6c 2816 error = efi_use_whole_disk(fd);
dbb38f66
YP
2817
2818 /* Flush the buffers to disk and invalidate the page cache. */
2819 (void) fsync(fd);
2820 (void) ioctl(fd, BLKFLSBUF);
2821
9babb374
BB
2822 (void) close(fd);
2823 if (error && error != VT_ENOSPC) {
2824 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2825 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2826 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374 2827 }
dbb38f66 2828
9babb374
BB
2829 return (0);
2830}
2831
4a283c7f
TH
2832/*
2833 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2834 *
2835 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2836 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2837 * ignore them.
2838 */
2839static uint64_t
2840zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2841 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2842{
2843 uint64_t guid;
2844 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2845 nvlist_t *tgt;
2846
2847 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2848 &log)) == NULL)
2849 return (0);
2850
2851 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2852 if (is_spare != NULL)
2853 *is_spare = spare;
2854 if (is_l2cache != NULL)
2855 *is_l2cache = l2cache;
2856 if (is_log != NULL)
2857 *is_log = log;
2858
2859 return (guid);
2860}
2861
2862/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2863uint64_t
2864zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2865{
2866 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2867}
2868
34dc7c2f
BB
2869/*
2870 * Bring the specified vdev online. The 'flags' parameter is a set of the
2871 * ZFS_ONLINE_* flags.
2872 */
2873int
2874zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2875 vdev_state_t *newstate)
2876{
13fe0198 2877 zfs_cmd_t zc = {"\0"};
34dc7c2f 2878 char msg[1024];
8198c57b 2879 char *pathname;
34dc7c2f 2880 nvlist_t *tgt;
9babb374 2881 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2882 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2883 int error;
34dc7c2f 2884
9babb374
BB
2885 if (flags & ZFS_ONLINE_EXPAND) {
2886 (void) snprintf(msg, sizeof (msg),
2887 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2888 } else {
2889 (void) snprintf(msg, sizeof (msg),
2890 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2891 }
34dc7c2f
BB
2892
2893 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2894 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2895 &islog)) == NULL)
34dc7c2f
BB
2896 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2897
2898 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2899
428870ff 2900 if (avail_spare)
34dc7c2f
BB
2901 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2902
8198c57b
YP
2903 if ((flags & ZFS_ONLINE_EXPAND ||
2904 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2905 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
9babb374
BB
2906 uint64_t wholedisk = 0;
2907
2908 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2909 &wholedisk);
9babb374
BB
2910
2911 /*
2912 * XXX - L2ARC 1.0 devices can't support expansion.
2913 */
2914 if (l2cache) {
2915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2916 "cannot expand cache devices"));
2917 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2918 }
2919
2920 if (wholedisk) {
7608bd0d
ED
2921 const char *fullpath = path;
2922 char buf[MAXPATHLEN];
2923
2924 if (path[0] != '/') {
2925 error = zfs_resolve_shortname(path, buf,
d1d7e268 2926 sizeof (buf));
7608bd0d
ED
2927 if (error != 0)
2928 return (zfs_error(hdl, EZFS_NODEVICE,
2929 msg));
2930
2931 fullpath = buf;
2932 }
2933
2934 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2935 if (error != 0)
2936 return (error);
9babb374
BB
2937 }
2938 }
2939
34dc7c2f
BB
2940 zc.zc_cookie = VDEV_STATE_ONLINE;
2941 zc.zc_obj = flags;
2942
572e2857 2943 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2944 if (errno == EINVAL) {
2945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2946 "from this pool into a new one. Use '%s' "
2947 "instead"), "zpool detach");
2948 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2949 }
34dc7c2f 2950 return (zpool_standard_error(hdl, errno, msg));
428870ff 2951 }
34dc7c2f
BB
2952
2953 *newstate = zc.zc_cookie;
2954 return (0);
2955}
2956
2957/*
2958 * Take the specified vdev offline
2959 */
2960int
2961zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2962{
13fe0198 2963 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
2964 char msg[1024];
2965 nvlist_t *tgt;
2966 boolean_t avail_spare, l2cache;
2967 libzfs_handle_t *hdl = zhp->zpool_hdl;
2968
2969 (void) snprintf(msg, sizeof (msg),
2970 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2971
2972 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2973 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2974 NULL)) == NULL)
34dc7c2f
BB
2975 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2976
2977 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2978
428870ff 2979 if (avail_spare)
34dc7c2f
BB
2980 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2981
34dc7c2f
BB
2982 zc.zc_cookie = VDEV_STATE_OFFLINE;
2983 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2984
572e2857 2985 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2986 return (0);
2987
2988 switch (errno) {
2989 case EBUSY:
2990
2991 /*
2992 * There are no other replicas of this device.
2993 */
2994 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2995
9babb374
BB
2996 case EEXIST:
2997 /*
2998 * The log device has unplayed logs
2999 */
3000 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3001
34dc7c2f
BB
3002 default:
3003 return (zpool_standard_error(hdl, errno, msg));
3004 }
3005}
3006
3007/*
3008 * Mark the given vdev faulted.
3009 */
3010int
428870ff 3011zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 3012{
13fe0198 3013 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3014 char msg[1024];
3015 libzfs_handle_t *hdl = zhp->zpool_hdl;
3016
3017 (void) snprintf(msg, sizeof (msg),
d1d7e268 3018 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
3019
3020 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3021 zc.zc_guid = guid;
3022 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 3023 zc.zc_obj = aux;
34dc7c2f 3024
572e2857 3025 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
3026 return (0);
3027
3028 switch (errno) {
3029 case EBUSY:
3030
3031 /*
3032 * There are no other replicas of this device.
3033 */
3034 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3035
3036 default:
3037 return (zpool_standard_error(hdl, errno, msg));
3038 }
3039
3040}
3041
3042/*
3043 * Mark the given vdev degraded.
3044 */
3045int
428870ff 3046zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 3047{
13fe0198 3048 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3049 char msg[1024];
3050 libzfs_handle_t *hdl = zhp->zpool_hdl;
3051
3052 (void) snprintf(msg, sizeof (msg),
d1d7e268 3053 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
3054
3055 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3056 zc.zc_guid = guid;
3057 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 3058 zc.zc_obj = aux;
34dc7c2f 3059
572e2857 3060 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
3061 return (0);
3062
3063 return (zpool_standard_error(hdl, errno, msg));
3064}
3065
3066/*
3067 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3068 * a hot spare.
3069 */
3070static boolean_t
3071is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3072{
3073 nvlist_t **child;
3074 uint_t c, children;
3075 char *type;
3076
3077 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3078 &children) == 0) {
3079 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3080 &type) == 0);
3081
3082 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
3083 children == 2 && child[which] == tgt)
3084 return (B_TRUE);
3085
3086 for (c = 0; c < children; c++)
3087 if (is_replacing_spare(child[c], tgt, which))
3088 return (B_TRUE);
3089 }
3090
3091 return (B_FALSE);
3092}
3093
3094/*
3095 * Attach new_disk (fully described by nvroot) to old_disk.
3096 * If 'replacing' is specified, the new disk will replace the old one.
3097 */
3098int
3099zpool_vdev_attach(zpool_handle_t *zhp,
3100 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
3101{
13fe0198 3102 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3103 char msg[1024];
3104 int ret;
3105 nvlist_t *tgt;
b128c09f
BB
3106 boolean_t avail_spare, l2cache, islog;
3107 uint64_t val;
572e2857 3108 char *newname;
34dc7c2f
BB
3109 nvlist_t **child;
3110 uint_t children;
3111 nvlist_t *config_root;
3112 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 3113 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
3114
3115 if (replacing)
3116 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3117 "cannot replace %s with %s"), old_disk, new_disk);
3118 else
3119 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3120 "cannot attach %s to %s"), new_disk, old_disk);
3121
3122 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3123 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
a1d477c2 3124 &islog)) == NULL)
34dc7c2f
BB
3125 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3126
3127 if (avail_spare)
3128 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3129
3130 if (l2cache)
3131 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3132
3133 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3134 zc.zc_cookie = replacing;
3135
3136 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3137 &child, &children) != 0 || children != 1) {
3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3139 "new device must be a single disk"));
3140 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3141 }
3142
3143 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3144 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3145
d2f3e292 3146 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
b128c09f
BB
3147 return (-1);
3148
34dc7c2f
BB
3149 /*
3150 * If the target is a hot spare that has been swapped in, we can only
3151 * replace it with another hot spare.
3152 */
3153 if (replacing &&
3154 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
3155 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3156 NULL) == NULL || !avail_spare) &&
3157 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
3158 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3159 "can only be replaced by another hot spare"));
b128c09f 3160 free(newname);
34dc7c2f
BB
3161 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3162 }
3163
b128c09f
BB
3164 free(newname);
3165
34dc7c2f
BB
3166 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3167 return (-1);
3168
572e2857 3169 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
3170
3171 zcmd_free_nvlists(&zc);
3172
b128c09f
BB
3173 if (ret == 0) {
3174 if (rootpool) {
9babb374
BB
3175 /*
3176 * XXX need a better way to prevent user from
3177 * booting up a half-baked vdev.
3178 */
3179 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
3180 "sure to wait until resilver is done "
3181 "before rebooting.\n"));
b128c09f 3182 }
34dc7c2f 3183 return (0);
b128c09f 3184 }
34dc7c2f
BB
3185
3186 switch (errno) {
3187 case ENOTSUP:
3188 /*
3189 * Can't attach to or replace this type of vdev.
3190 */
3191 if (replacing) {
572e2857
BB
3192 uint64_t version = zpool_get_prop_int(zhp,
3193 ZPOOL_PROP_VERSION, NULL);
3194
b128c09f 3195 if (islog)
34dc7c2f
BB
3196 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3197 "cannot replace a log with a spare"));
572e2857
BB
3198 else if (version >= SPA_VERSION_MULTI_REPLACE)
3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3200 "already in replacing/spare config; wait "
3201 "for completion or use 'zpool detach'"));
34dc7c2f
BB
3202 else
3203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3204 "cannot replace a replacing device"));
3205 } else {
3206 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3207 "can only attach to mirrors and top-level "
3208 "disks"));
3209 }
3210 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3211 break;
3212
3213 case EINVAL:
3214 /*
3215 * The new device must be a single disk.
3216 */
3217 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3218 "new device must be a single disk"));
3219 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3220 break;
3221
3222 case EBUSY:
a1d477c2 3223 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
9e052db4 3224 "or device removal is in progress"),
34dc7c2f
BB
3225 new_disk);
3226 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3227 break;
3228
3229 case EOVERFLOW:
3230 /*
3231 * The new device is too small.
3232 */
3233 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3234 "device is too small"));
3235 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3236 break;
3237
3238 case EDOM:
3239 /*
d4aae2a0 3240 * The new device has a different optimal sector size.
34dc7c2f
BB
3241 */
3242 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d4aae2a0
BB
3243 "new device has a different optimal sector size; use the "
3244 "option '-o ashift=N' to override the optimal size"));
34dc7c2f
BB
3245 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3246 break;
3247
3248 case ENAMETOOLONG:
3249 /*
3250 * The resulting top-level vdev spec won't fit in the label.
3251 */
3252 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3253 break;
3254
3255 default:
3256 (void) zpool_standard_error(hdl, errno, msg);
3257 }
3258
3259 return (-1);
3260}
3261
3262/*
3263 * Detach the specified device.
3264 */
3265int
3266zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3267{
13fe0198 3268 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3269 char msg[1024];
3270 nvlist_t *tgt;
3271 boolean_t avail_spare, l2cache;
3272 libzfs_handle_t *hdl = zhp->zpool_hdl;
3273
3274 (void) snprintf(msg, sizeof (msg),
3275 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3276
3277 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3278 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
a1d477c2 3279 NULL)) == NULL)
34dc7c2f
BB
3280 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3281
3282 if (avail_spare)
3283 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3284
3285 if (l2cache)
3286 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3287
3288 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3289
3290 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3291 return (0);
3292
3293 switch (errno) {
3294
3295 case ENOTSUP:
3296 /*
3297 * Can't detach from this type of vdev.
3298 */
3299 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3300 "applicable to mirror and replacing vdevs"));
572e2857 3301 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
3302 break;
3303
3304 case EBUSY:
3305 /*
3306 * There are no other replicas of this device.
3307 */
3308 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3309 break;
3310
3311 default:
3312 (void) zpool_standard_error(hdl, errno, msg);
3313 }
3314
3315 return (-1);
3316}
3317
428870ff
BB
3318/*
3319 * Find a mirror vdev in the source nvlist.
3320 *
3321 * The mchild array contains a list of disks in one of the top-level mirrors
3322 * of the source pool. The schild array contains a list of disks that the
3323 * user specified on the command line. We loop over the mchild array to
3324 * see if any entry in the schild array matches.
3325 *
3326 * If a disk in the mchild array is found in the schild array, we return
3327 * the index of that entry. Otherwise we return -1.
3328 */
3329static int
3330find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3331 nvlist_t **schild, uint_t schildren)
3332{
3333 uint_t mc;
3334
3335 for (mc = 0; mc < mchildren; mc++) {
3336 uint_t sc;
3337 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
d2f3e292 3338 mchild[mc], 0);
428870ff
BB
3339
3340 for (sc = 0; sc < schildren; sc++) {
3341 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
d2f3e292 3342 schild[sc], 0);
428870ff
BB
3343 boolean_t result = (strcmp(mpath, spath) == 0);
3344
3345 free(spath);
3346 if (result) {
3347 free(mpath);
3348 return (mc);
3349 }
3350 }
3351
3352 free(mpath);
3353 }
3354
3355 return (-1);
3356}
3357
3358/*
3359 * Split a mirror pool. If newroot points to null, then a new nvlist
3360 * is generated and it is the responsibility of the caller to free it.
3361 */
3362int
3363zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3364 nvlist_t *props, splitflags_t flags)
3365{
13fe0198 3366 zfs_cmd_t zc = {"\0"};
428870ff
BB
3367 char msg[1024];
3368 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3369 nvlist_t **varray = NULL, *zc_props = NULL;
3370 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3371 libzfs_handle_t *hdl = zhp->zpool_hdl;
7fab6361 3372 uint64_t vers, readonly = B_FALSE;
428870ff
BB
3373 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3374 int retval = 0;
3375
3376 (void) snprintf(msg, sizeof (msg),
3377 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3378
3379 if (!zpool_name_valid(hdl, B_FALSE, newname))
3380 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3381
3382 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3383 (void) fprintf(stderr, gettext("Internal error: unable to "
3384 "retrieve pool configuration\n"));
3385 return (-1);
3386 }
3387
3388 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3389 == 0);
3390 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3391
3392 if (props) {
572e2857 3393 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 3394 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 3395 props, vers, flags, msg)) == NULL)
428870ff 3396 return (-1);
7fab6361 3397 (void) nvlist_lookup_uint64(zc_props,
3398 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3399 if (readonly) {
3400 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3401 "property %s can only be set at import time"),
3402 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3403 return (-1);
3404 }
428870ff
BB
3405 }
3406
3407 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3408 &children) != 0) {
3409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3410 "Source pool is missing vdev tree"));
8a5fc748 3411 nvlist_free(zc_props);
428870ff
BB
3412 return (-1);
3413 }
3414
3415 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3416 vcount = 0;
3417
3418 if (*newroot == NULL ||
3419 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3420 &newchild, &newchildren) != 0)
3421 newchildren = 0;
3422
3423 for (c = 0; c < children; c++) {
3424 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3425 char *type;
3426 nvlist_t **mchild, *vdev;
3427 uint_t mchildren;
3428 int entry;
3429
3430 /*
3431 * Unlike cache & spares, slogs are stored in the
3432 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3433 */
3434 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3435 &is_log);
3436 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3437 &is_hole);
3438 if (is_log || is_hole) {
3439 /*
3440 * Create a hole vdev and put it in the config.
3441 */
3442 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3443 goto out;
3444 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3445 VDEV_TYPE_HOLE) != 0)
3446 goto out;
3447 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3448 1) != 0)
3449 goto out;
3450 if (lastlog == 0)
3451 lastlog = vcount;
3452 varray[vcount++] = vdev;
3453 continue;
3454 }
3455 lastlog = 0;
3456 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3457 == 0);
3458 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3460 "Source pool must be composed only of mirrors\n"));
3461 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3462 goto out;
3463 }
3464
3465 verify(nvlist_lookup_nvlist_array(child[c],
3466 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3467
3468 /* find or add an entry for this top-level vdev */
3469 if (newchildren > 0 &&
3470 (entry = find_vdev_entry(zhp, mchild, mchildren,
3471 newchild, newchildren)) >= 0) {
3472 /* We found a disk that the user specified. */
3473 vdev = mchild[entry];
3474 ++found;
3475 } else {
3476 /* User didn't specify a disk for this vdev. */
3477 vdev = mchild[mchildren - 1];
3478 }
3479
3480 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3481 goto out;
3482 }
3483
3484 /* did we find every disk the user specified? */
3485 if (found != newchildren) {
3486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3487 "include at most one disk from each mirror"));
3488 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3489 goto out;
3490 }
3491
3492 /* Prepare the nvlist for populating. */
3493 if (*newroot == NULL) {
3494 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3495 goto out;
3496 freelist = B_TRUE;
3497 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3498 VDEV_TYPE_ROOT) != 0)
3499 goto out;
3500 } else {
3501 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3502 }
3503
3504 /* Add all the children we found */
3505 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3506 lastlog == 0 ? vcount : lastlog) != 0)
3507 goto out;
3508
3509 /*
3510 * If we're just doing a dry run, exit now with success.
3511 */
3512 if (flags.dryrun) {
3513 memory_err = B_FALSE;
3514 freelist = B_FALSE;
3515 goto out;
3516 }
3517
3518 /* now build up the config list & call the ioctl */
3519 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3520 goto out;
3521
3522 if (nvlist_add_nvlist(newconfig,
3523 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3524 nvlist_add_string(newconfig,
3525 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3526 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3527 goto out;
3528
3529 /*
3530 * The new pool is automatically part of the namespace unless we
3531 * explicitly export it.
3532 */
3533 if (!flags.import)
3534 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3535 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3536 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3537 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3538 goto out;
3539 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3540 goto out;
3541
3542 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3543 retval = zpool_standard_error(hdl, errno, msg);
3544 goto out;
3545 }
3546
3547 freelist = B_FALSE;
3548 memory_err = B_FALSE;
3549
3550out:
3551 if (varray != NULL) {
3552 int v;
3553
3554 for (v = 0; v < vcount; v++)
3555 nvlist_free(varray[v]);
3556 free(varray);
3557 }
3558 zcmd_free_nvlists(&zc);
8a5fc748
JJS
3559 nvlist_free(zc_props);
3560 nvlist_free(newconfig);
428870ff
BB
3561 if (freelist) {
3562 nvlist_free(*newroot);
3563 *newroot = NULL;
3564 }
3565
3566 if (retval != 0)
3567 return (retval);
3568
3569 if (memory_err)
3570 return (no_memory(hdl));
3571
3572 return (0);
3573}
3574
34dc7c2f 3575/*
a1d477c2 3576 * Remove the given device.
34dc7c2f
BB
3577 */
3578int
3579zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3580{
13fe0198 3581 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3582 char msg[1024];
3583 nvlist_t *tgt;
428870ff 3584 boolean_t avail_spare, l2cache, islog;
34dc7c2f 3585 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3586 uint64_t version;
34dc7c2f
BB
3587
3588 (void) snprintf(msg, sizeof (msg),
3589 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3590
3591 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 3592 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
a1d477c2 3593 &islog)) == NULL)
34dc7c2f 3594 return (zfs_error(hdl, EZFS_NODEVICE, msg));
34dc7c2f 3595
428870ff
BB
3596 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3597 if (islog && version < SPA_VERSION_HOLES) {
3598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
a1d477c2 3599 "pool must be upgraded to support log removal"));
428870ff
BB
3600 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3601 }
3602
a1d477c2
MA
3603 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3605 "root pool can not have removed devices, "
3606 "because GRUB does not understand them"));
3607 return (zfs_error(hdl, EINVAL, msg));
3608 }
3609
3610 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3611
3612 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3613 return (0);
3614
3615 switch (errno) {
3616
3617 case EINVAL:
3618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3619 "invalid config; all top-level vdevs must "
3620 "have the same sector size and not be raidz."));
3621 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3622 break;
3623
3624 case EBUSY:
2ffd89fc
PZ
3625 if (islog) {
3626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3627 "Mount encrypted datasets to replay logs."));
3628 } else {
3629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3630 "Pool busy; removal may already be in progress"));
3631 }
a1d477c2
MA
3632 (void) zfs_error(hdl, EZFS_BUSY, msg);
3633 break;
3634
2ffd89fc
PZ
3635 case EACCES:
3636 if (islog) {
3637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3638 "Mount encrypted datasets to replay logs."));
3639 (void) zfs_error(hdl, EZFS_BUSY, msg);
3640 } else {
3641 (void) zpool_standard_error(hdl, errno, msg);
3642 }
3643 break;
3644
a1d477c2
MA
3645 default:
3646 (void) zpool_standard_error(hdl, errno, msg);
3647 }
3648 return (-1);
3649}
3650
3651int
3652zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3653{
3654 zfs_cmd_t zc;
3655 char msg[1024];
3656 libzfs_handle_t *hdl = zhp->zpool_hdl;
3657
3658 (void) snprintf(msg, sizeof (msg),
3659 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3660
3661 bzero(&zc, sizeof (zc));
3662 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3663 zc.zc_cookie = 1;
34dc7c2f
BB
3664
3665 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3666 return (0);
3667
3668 return (zpool_standard_error(hdl, errno, msg));
3669}
3670
a1d477c2
MA
3671int
3672zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3673 uint64_t *sizep)
3674{
3675 char msg[1024];
3676 nvlist_t *tgt;
3677 boolean_t avail_spare, l2cache, islog;
3678 libzfs_handle_t *hdl = zhp->zpool_hdl;
3679
3680 (void) snprintf(msg, sizeof (msg),
3681 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3682 path);
3683
3684 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3685 &islog)) == NULL)
3686 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3687
3688 if (avail_spare || l2cache || islog) {
3689 *sizep = 0;
3690 return (0);
3691 }
3692
3693 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3695 "indirect size not available"));
3696 return (zfs_error(hdl, EINVAL, msg));
3697 }
3698 return (0);
3699}
3700
34dc7c2f
BB
3701/*
3702 * Clear the errors for the pool, or the particular device if specified.
3703 */
3704int
428870ff 3705zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 3706{
13fe0198 3707 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3708 char msg[1024];
3709 nvlist_t *tgt;
8a393be3 3710 zpool_load_policy_t policy;
34dc7c2f
BB
3711 boolean_t avail_spare, l2cache;
3712 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 3713 nvlist_t *nvi = NULL;
572e2857 3714 int error;
34dc7c2f
BB
3715
3716 if (path)
3717 (void) snprintf(msg, sizeof (msg),
3718 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3719 path);
3720 else
3721 (void) snprintf(msg, sizeof (msg),
3722 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3723 zhp->zpool_name);
3724
3725 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3726 if (path) {
3727 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
a1d477c2 3728 &l2cache, NULL)) == NULL)
34dc7c2f
BB
3729 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3730
3731 /*
3732 * Don't allow error clearing for hot spares. Do allow
3733 * error clearing for l2cache devices.
3734 */
3735 if (avail_spare)
3736 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3737
3738 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3739 &zc.zc_guid) == 0);
3740 }
3741
8a393be3
PZ
3742 zpool_get_load_policy(rewindnvl, &policy);
3743 zc.zc_cookie = policy.zlp_rewind;
428870ff 3744
572e2857 3745 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
3746 return (-1);
3747
572e2857 3748 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
3749 return (-1);
3750
572e2857
BB
3751 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3752 errno == ENOMEM) {
3753 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3754 zcmd_free_nvlists(&zc);
3755 return (-1);
3756 }
3757 }
3758
8a393be3 3759 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
428870ff 3760 errno != EPERM && errno != EACCES)) {
8a393be3 3761 if (policy.zlp_rewind &
428870ff
BB
3762 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3763 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3764 zpool_rewind_exclaim(hdl, zc.zc_name,
8a393be3 3765 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
428870ff
BB
3766 nvi);
3767 nvlist_free(nvi);
3768 }
3769 zcmd_free_nvlists(&zc);
34dc7c2f 3770 return (0);
428870ff 3771 }
34dc7c2f 3772
428870ff 3773 zcmd_free_nvlists(&zc);
34dc7c2f
BB
3774 return (zpool_standard_error(hdl, errno, msg));
3775}
3776
3777/*
3778 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3779 */
3780int
3781zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3782{
13fe0198 3783 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3784 char msg[1024];
3785 libzfs_handle_t *hdl = zhp->zpool_hdl;
3786
3787 (void) snprintf(msg, sizeof (msg),
3788 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
d1d7e268 3789 (u_longlong_t)guid);
34dc7c2f
BB
3790
3791 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3792 zc.zc_guid = guid;
428870ff 3793 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
3794
3795 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3796 return (0);
3797
3798 return (zpool_standard_error(hdl, errno, msg));
3799}
3800
3541dc6d
GA
3801/*
3802 * Change the GUID for a pool.
3803 */
3804int
3805zpool_reguid(zpool_handle_t *zhp)
3806{
3807 char msg[1024];
3808 libzfs_handle_t *hdl = zhp->zpool_hdl;
13fe0198 3809 zfs_cmd_t zc = {"\0"};
3541dc6d
GA
3810
3811 (void) snprintf(msg, sizeof (msg),
3812 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3813
3814 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3815 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3816 return (0);
3817
3818 return (zpool_standard_error(hdl, errno, msg));
3819}
3820
1bd201e7
CS
3821/*
3822 * Reopen the pool.
3823 */
3824int
d3f2cd7e 3825zpool_reopen_one(zpool_handle_t *zhp, void *data)
1bd201e7 3826{
d3f2cd7e
AB
3827 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3828 const char *pool_name = zpool_get_name(zhp);
3829 boolean_t *scrub_restart = data;
3830 int error;
1bd201e7 3831
d3f2cd7e
AB
3832 error = lzc_reopen(pool_name, *scrub_restart);
3833 if (error) {
3834 return (zpool_standard_error_fmt(hdl, error,
3835 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3836 }
1bd201e7 3837
d3f2cd7e 3838 return (0);
1bd201e7
CS
3839}
3840
bec1067d
AP
3841/* call into libzfs_core to execute the sync IOCTL per pool */
3842int
3843zpool_sync_one(zpool_handle_t *zhp, void *data)
3844{
3845 int ret;
3846 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3847 const char *pool_name = zpool_get_name(zhp);
3848 boolean_t *force = data;
3849 nvlist_t *innvl = fnvlist_alloc();
3850
3851 fnvlist_add_boolean_value(innvl, "force", *force);
3852 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3853 nvlist_free(innvl);
3854 return (zpool_standard_error_fmt(hdl, ret,
3855 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3856 }
3857 nvlist_free(innvl);
3858
3859 return (0);
3860}
3861
39fc0cb5 3862#if defined(__sun__) || defined(__sun)
34dc7c2f
BB
3863/*
3864 * Convert from a devid string to a path.
3865 */
3866static char *
3867devid_to_path(char *devid_str)
3868{
3869 ddi_devid_t devid;
3870 char *minor;
3871 char *path;
3872 devid_nmlist_t *list = NULL;
3873 int ret;
3874
3875 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3876 return (NULL);
3877
3878 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3879
3880 devid_str_free(minor);
3881 devid_free(devid);
3882
3883 if (ret != 0)
3884 return (NULL);
3885
0fdd8d64
MT
3886 /*
3887 * In a case the strdup() fails, we will just return NULL below.
3888 */
3889 path = strdup(list[0].devname);
34dc7c2f
BB
3890
3891 devid_free_nmlist(list);
3892
3893 return (path);
3894}
3895
3896/*
3897 * Convert from a path to a devid string.
3898 */
3899static char *
3900path_to_devid(const char *path)
3901{
3902 int fd;
3903 ddi_devid_t devid;
3904 char *minor, *ret;
3905
3906 if ((fd = open(path, O_RDONLY)) < 0)
3907 return (NULL);
3908
3909 minor = NULL;
3910 ret = NULL;
3911 if (devid_get(fd, &devid) == 0) {
3912 if (devid_get_minor_name(fd, &minor) == 0)
3913 ret = devid_str_encode(devid, minor);
3914 if (minor != NULL)
3915 devid_str_free(minor);
3916 devid_free(devid);
3917 }
3918 (void) close(fd);
3919
3920 return (ret);
3921}
3922
3923/*
3924 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3925 * ignore any failure here, since a common case is for an unprivileged user to
3926 * type 'zpool status', and we'll display the correct information anyway.
3927 */
3928static void
3929set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3930{
13fe0198 3931 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
3932
3933 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3934 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3935 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3936 &zc.zc_guid) == 0);
3937
3938 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3939}
39fc0cb5 3940#endif /* sun */
34dc7c2f 3941
858219cc
NB
3942#define PATH_BUF_LEN 64
3943
34dc7c2f
BB
3944/*
3945 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3946 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3947 * We also check if this is a whole disk, in which case we strip off the
3948 * trailing 's0' slice name.
3949 *
3950 * This routine is also responsible for identifying when disks have been
3951 * reconfigured in a new location. The kernel will have opened the device by
3952 * devid, but the path will still refer to the old location. To catch this, we
3953 * first do a path -> devid translation (which is fast for the common case). If
3954 * the devid matches, we're done. If not, we do a reverse devid -> path
3955 * translation and issue the appropriate ioctl() to update the path of the vdev.
3956 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3957 * of these checks.
3958 */
3959char *
428870ff 3960zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
d2f3e292 3961 int name_flags)
34dc7c2f 3962{
39fc0cb5 3963 char *path, *type, *env;
34dc7c2f 3964 uint64_t value;
858219cc 3965 char buf[PATH_BUF_LEN];
fc24f7c8 3966 char tmpbuf[PATH_BUF_LEN];
34dc7c2f 3967
2df9ad1c
GG
3968 /*
3969 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3970 * zpool name that will be displayed to the user.
3971 */
3972 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3973 if (zhp != NULL && strcmp(type, "root") == 0)
3974 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3975
d2f3e292
RY
3976 env = getenv("ZPOOL_VDEV_NAME_PATH");
3977 if (env && (strtoul(env, NULL, 0) > 0 ||
3978 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3979 name_flags |= VDEV_NAME_PATH;
3980
3981 env = getenv("ZPOOL_VDEV_NAME_GUID");
3982 if (env && (strtoul(env, NULL, 0) > 0 ||
3983 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3984 name_flags |= VDEV_NAME_GUID;
3985
3986 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3987 if (env && (strtoul(env, NULL, 0) > 0 ||
3988 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3989 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3990
3991 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3992 name_flags & VDEV_NAME_GUID) {
aecdc706 3993 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
d2f3e292 3994 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
34dc7c2f
BB
3995 path = buf;
3996 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
39fc0cb5
DB
3997#if defined(__sun__) || defined(__sun)
3998 /*
3999 * Live VDEV path updates to a kernel VDEV during a
4000 * zpool_vdev_name lookup are not supported on Linux.
4001 */
4002 char *devid;
4003 vdev_stat_t *vs;
4004 uint_t vsc;
4005
34dc7c2f
BB
4006 /*
4007 * If the device is dead (faulted, offline, etc) then don't
4008 * bother opening it. Otherwise we may be forcing the user to
4009 * open a misbehaving device, which can have undesirable
4010 * effects.
4011 */
428870ff 4012 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
4013 (uint64_t **)&vs, &vsc) != 0 ||
4014 vs->vs_state >= VDEV_STATE_DEGRADED) &&
4015 zhp != NULL &&
4016 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
4017 /*
4018 * Determine if the current path is correct.
4019 */
4020 char *newdevid = path_to_devid(path);
4021
4022 if (newdevid == NULL ||
4023 strcmp(devid, newdevid) != 0) {
4024 char *newpath;
4025
4026 if ((newpath = devid_to_path(devid)) != NULL) {
4027 /*
4028 * Update the path appropriately.
4029 */
4030 set_path(zhp, nv, newpath);
4031 if (nvlist_add_string(nv,
4032 ZPOOL_CONFIG_PATH, newpath) == 0)
4033 verify(nvlist_lookup_string(nv,
4034 ZPOOL_CONFIG_PATH,
4035 &path) == 0);
4036 free(newpath);
4037 }
4038 }
4039
4040 if (newdevid)
4041 devid_str_free(newdevid);
4042 }
39fc0cb5 4043#endif /* sun */
34dc7c2f 4044
d2f3e292
RY
4045 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4046 char *rp = realpath(path, NULL);
4047 if (rp) {
4048 strlcpy(buf, rp, sizeof (buf));
4049 path = buf;
4050 free(rp);
4051 }
4052 }
4053
d603ed6c
BB
4054 /*
4055 * For a block device only use the name.
4056 */
d2f3e292
RY
4057 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4058 !(name_flags & VDEV_NAME_PATH)) {
d603ed6c
BB
4059 path = strrchr(path, '/');
4060 path++;
4061 }
34dc7c2f 4062
d603ed6c 4063 /*
83c62c93 4064 * Remove the partition from the path it this is a whole disk.
d603ed6c 4065 */
d2f3e292
RY
4066 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4067 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
6078881a 4068 return (zfs_strip_partition(path));
34dc7c2f
BB
4069 }
4070 } else {
2df9ad1c 4071 path = type;
34dc7c2f
BB
4072
4073 /*
4074 * If it's a raidz device, we need to stick in the parity level.
4075 */
4076 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4077 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
4078 &value) == 0);
fc24f7c8 4079 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 4080 (u_longlong_t)value);
fc24f7c8 4081 path = buf;
34dc7c2f 4082 }
428870ff
BB
4083
4084 /*
4085 * We identify each top-level vdev by using a <type-id>
4086 * naming convention.
4087 */
d2f3e292 4088 if (name_flags & VDEV_NAME_TYPE_ID) {
428870ff 4089 uint64_t id;
428870ff
BB
4090 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
4091 &id) == 0);
fc24f7c8
MM
4092 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4093 path, (u_longlong_t)id);
4094 path = tmpbuf;
428870ff 4095 }
34dc7c2f
BB
4096 }
4097
4098 return (zfs_strdup(hdl, path));
4099}
4100
4101static int
fcff0f35 4102zbookmark_mem_compare(const void *a, const void *b)
34dc7c2f 4103{
5dbd68a3 4104 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
34dc7c2f
BB
4105}
4106
4107/*
4108 * Retrieve the persistent error log, uniquify the members, and return to the
4109 * caller.
4110 */
4111int
4112zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4113{
13fe0198 4114 zfs_cmd_t zc = {"\0"};
56a6054d 4115 libzfs_handle_t *hdl = zhp->zpool_hdl;
34dc7c2f 4116 uint64_t count;
5dbd68a3 4117 zbookmark_phys_t *zb = NULL;
34dc7c2f
BB
4118 int i;
4119
4120 /*
4121 * Retrieve the raw error list from the kernel. If the number of errors
4122 * has increased, allocate more space and continue until we get the
4123 * entire list.
4124 */
4125 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4126 &count) == 0);
4127 if (count == 0)
4128 return (0);
56a6054d
BB
4129 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4130 count * sizeof (zbookmark_phys_t));
34dc7c2f
BB
4131 zc.zc_nvlist_dst_size = count;
4132 (void) strcpy(zc.zc_name, zhp->zpool_name);
4133 for (;;) {
4134 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
4135 &zc) != 0) {
4136 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4137 if (errno == ENOMEM) {
5dbd68a3
MA
4138 void *dst;
4139
34dc7c2f 4140 count = zc.zc_nvlist_dst_size;
5dbd68a3
MA
4141 dst = zfs_alloc(zhp->zpool_hdl, count *
4142 sizeof (zbookmark_phys_t));
5dbd68a3 4143 zc.zc_nvlist_dst = (uintptr_t)dst;
34dc7c2f 4144 } else {
56a6054d
BB
4145 return (zpool_standard_error_fmt(hdl, errno,
4146 dgettext(TEXT_DOMAIN, "errors: List of "
4147 "errors unavailable")));
34dc7c2f
BB
4148 }
4149 } else {
4150 break;
4151 }
4152 }
4153
4154 /*
4155 * Sort the resulting bookmarks. This is a little confusing due to the
4156 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4157 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
4158 * _not_ copied as part of the process. So we point the start of our
4159 * array appropriate and decrement the total number of elements.
4160 */
5dbd68a3 4161 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
34dc7c2f
BB
4162 zc.zc_nvlist_dst_size;
4163 count -= zc.zc_nvlist_dst_size;
4164
fcff0f35 4165 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
34dc7c2f
BB
4166
4167 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4168
4169 /*
4170 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4171 */
4172 for (i = 0; i < count; i++) {
4173 nvlist_t *nv;
4174
4175 /* ignoring zb_blkid and zb_level for now */
4176 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4177 zb[i-1].zb_object == zb[i].zb_object)
4178 continue;
4179
4180 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4181 goto nomem;
4182 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4183 zb[i].zb_objset) != 0) {
4184 nvlist_free(nv);
4185 goto nomem;
4186 }
4187 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4188 zb[i].zb_object) != 0) {
4189 nvlist_free(nv);
4190 goto nomem;
4191 }
4192 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4193 nvlist_free(nv);
4194 goto nomem;
4195 }
4196 nvlist_free(nv);
4197 }
4198
4199 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4200 return (0);
4201
4202nomem:
4203 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4204 return (no_memory(zhp->zpool_hdl));
4205}
4206
4207/*
4208 * Upgrade a ZFS pool to the latest on-disk version.
4209 */
4210int
4211zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4212{
13fe0198 4213 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
4214 libzfs_handle_t *hdl = zhp->zpool_hdl;
4215
4216 (void) strcpy(zc.zc_name, zhp->zpool_name);
4217 zc.zc_cookie = new_version;
4218
4219 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4220 return (zpool_standard_error_fmt(hdl, errno,
4221 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4222 zhp->zpool_name));
4223 return (0);
4224}
4225
4226void
6f1ffb06 4227zfs_save_arguments(int argc, char **argv, char *string, int len)
34dc7c2f
BB
4228{
4229 int i;
4230
6f1ffb06 4231 (void) strlcpy(string, basename(argv[0]), len);
34dc7c2f 4232 for (i = 1; i < argc; i++) {
6f1ffb06
MA
4233 (void) strlcat(string, " ", len);
4234 (void) strlcat(string, argv[i], len);
34dc7c2f
BB
4235 }
4236}
4237
34dc7c2f 4238int
6f1ffb06
MA
4239zpool_log_history(libzfs_handle_t *hdl, const char *message)
4240{
13fe0198 4241 zfs_cmd_t zc = {"\0"};
6f1ffb06
MA
4242 nvlist_t *args;
4243 int err;
4244
4245 args = fnvlist_alloc();
4246 fnvlist_add_string(args, "message", message);
4247 err = zcmd_write_src_nvlist(hdl, &zc, args);
4248 if (err == 0)
4249 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
4250 nvlist_free(args);
4251 zcmd_free_nvlists(&zc);
4252 return (err);
34dc7c2f
BB
4253}
4254
4255/*
4256 * Perform ioctl to get some command history of a pool.
4257 *
4258 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4259 * logical offset of the history buffer to start reading from.
4260 *
4261 * Upon return, 'off' is the next logical offset to read from and
4262 * 'len' is the actual amount of bytes read into 'buf'.
4263 */
4264static int
4265get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4266{
13fe0198 4267 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
4268 libzfs_handle_t *hdl = zhp->zpool_hdl;
4269
4270 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4271
4272 zc.zc_history = (uint64_t)(uintptr_t)buf;
4273 zc.zc_history_len = *len;
4274 zc.zc_history_offset = *off;
4275
4276 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4277 switch (errno) {
4278 case EPERM:
4279 return (zfs_error_fmt(hdl, EZFS_PERM,
4280 dgettext(TEXT_DOMAIN,
4281 "cannot show history for pool '%s'"),
4282 zhp->zpool_name));
4283 case ENOENT:
4284 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4285 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4286 "'%s'"), zhp->zpool_name));
4287 case ENOTSUP:
4288 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4289 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4290 "'%s', pool must be upgraded"), zhp->zpool_name));
4291 default:
4292 return (zpool_standard_error_fmt(hdl, errno,
4293 dgettext(TEXT_DOMAIN,
4294 "cannot get history for '%s'"), zhp->zpool_name));
4295 }
4296 }
4297
4298 *len = zc.zc_history_len;
4299 *off = zc.zc_history_offset;
4300
4301 return (0);
4302}
4303
34dc7c2f
BB
4304/*
4305 * Retrieve the command history of a pool.
4306 */
4307int
4308zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
4309{
1f6f97f3
MA
4310 char *buf;
4311 int buflen = 128 * 1024;
34dc7c2f
BB
4312 uint64_t off = 0;
4313 nvlist_t **records = NULL;
4314 uint_t numrecords = 0;
4315 int err, i;
4316
1f6f97f3
MA
4317 buf = malloc(buflen);
4318 if (buf == NULL)
4319 return (ENOMEM);
34dc7c2f 4320 do {
1f6f97f3 4321 uint64_t bytes_read = buflen;
34dc7c2f
BB
4322 uint64_t leftover;
4323
4324 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
4325 break;
4326
4327 /* if nothing else was read in, we're at EOF, just return */
4328 if (!bytes_read)
4329 break;
4330
4331 if ((err = zpool_history_unpack(buf, bytes_read,
4332 &leftover, &records, &numrecords)) != 0)
4333 break;
4334 off -= leftover;
1f6f97f3
MA
4335 if (leftover == bytes_read) {
4336 /*
4337 * no progress made, because buffer is not big enough
4338 * to hold this record; resize and retry.
4339 */
4340 buflen *= 2;
4341 free(buf);
4342 buf = malloc(buflen);
4343 if (buf == NULL)
4344 return (ENOMEM);
4345 }
34dc7c2f
BB
4346
4347 /* CONSTCOND */
4348 } while (1);
4349
1f6f97f3
MA
4350 free(buf);
4351
34dc7c2f
BB
4352 if (!err) {
4353 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4354 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4355 records, numrecords) == 0);
4356 }
4357 for (i = 0; i < numrecords; i++)
4358 nvlist_free(records[i]);
4359 free(records);
4360
4361 return (err);
4362}
4363
26685276 4364/*
9b101a73
BB
4365 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4366 * If there is a new event available 'nvp' will contain a newly allocated
4367 * nvlist and 'dropped' will be set to the number of missed events since
4368 * the last call to this function. When 'nvp' is set to NULL it indicates
4369 * no new events are available. In either case the function returns 0 and
4370 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4371 * function will return a non-zero value. When the function is called in
8c7aa0cf
CD
4372 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4373 * it will not return until a new event is available.
26685276
BB
4374 */
4375int
4376zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
8c7aa0cf 4377 int *dropped, unsigned flags, int zevent_fd)
26685276 4378{
13fe0198 4379 zfs_cmd_t zc = {"\0"};
26685276
BB
4380 int error = 0;
4381
4382 *nvp = NULL;
4383 *dropped = 0;
9b101a73 4384 zc.zc_cleanup_fd = zevent_fd;
26685276 4385
8c7aa0cf 4386 if (flags & ZEVENT_NONBLOCK)
26685276
BB
4387 zc.zc_guid = ZEVENT_NONBLOCK;
4388
4389 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4390 return (-1);
4391
4392retry:
4393 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4394 switch (errno) {
4395 case ESHUTDOWN:
4396 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4397 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4398 goto out;
4399 case ENOENT:
4400 /* Blocking error case should not occur */
8c7aa0cf 4401 if (!(flags & ZEVENT_NONBLOCK))
26685276
BB
4402 error = zpool_standard_error_fmt(hdl, errno,
4403 dgettext(TEXT_DOMAIN, "cannot get event"));
4404
4405 goto out;
4406 case ENOMEM:
4407 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4408 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4409 dgettext(TEXT_DOMAIN, "cannot get event"));
4410 goto out;
4411 } else {
4412 goto retry;
4413 }
4414 default:
4415 error = zpool_standard_error_fmt(hdl, errno,
4416 dgettext(TEXT_DOMAIN, "cannot get event"));
4417 goto out;
4418 }
4419 }
4420
4421 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4422 if (error != 0)
4423 goto out;
4424
4425 *dropped = (int)zc.zc_cookie;
4426out:
4427 zcmd_free_nvlists(&zc);
4428
4429 return (error);
4430}
4431
4432/*
4433 * Clear all events.
4434 */
4435int
4436zpool_events_clear(libzfs_handle_t *hdl, int *count)
4437{
13fe0198 4438 zfs_cmd_t zc = {"\0"};
26685276
BB
4439 char msg[1024];
4440
4441 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4442 "cannot clear events"));
4443
4444 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4445 return (zpool_standard_error_fmt(hdl, errno, msg));
4446
4447 if (count != NULL)
4448 *count = (int)zc.zc_cookie; /* # of events cleared */
4449
4450 return (0);
4451}
4452
75e3ff58
BB
4453/*
4454 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4455 * the passed zevent_fd file handle. On success zero is returned,
4456 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4457 */
4458int
4459zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4460{
4461 zfs_cmd_t zc = {"\0"};
4462 int error = 0;
4463
4464 zc.zc_guid = eid;
4465 zc.zc_cleanup_fd = zevent_fd;
4466
4467 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4468 switch (errno) {
4469 case ENOENT:
4470 error = zfs_error_fmt(hdl, EZFS_NOENT,
4471 dgettext(TEXT_DOMAIN, "cannot get event"));
4472 break;
4473
4474 case ENOMEM:
4475 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4476 dgettext(TEXT_DOMAIN, "cannot get event"));
4477 break;
4478
4479 default:
4480 error = zpool_standard_error_fmt(hdl, errno,
4481 dgettext(TEXT_DOMAIN, "cannot get event"));
4482 break;
4483 }
4484 }
4485
4486 return (error);
4487}
4488
34dc7c2f
BB
4489void
4490zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4491 char *pathname, size_t len)
4492{
13fe0198 4493 zfs_cmd_t zc = {"\0"};
34dc7c2f
BB
4494 boolean_t mounted = B_FALSE;
4495 char *mntpnt = NULL;
eca7b760 4496 char dsname[ZFS_MAX_DATASET_NAME_LEN];
34dc7c2f
BB
4497
4498 if (dsobj == 0) {
4499 /* special case for the MOS */
d1d7e268
MK
4500 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4501 (longlong_t)obj);
34dc7c2f
BB
4502 return;
4503 }
4504
4505 /* get the dataset's name */
4506 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4507 zc.zc_obj = dsobj;
4508 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4509 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4510 /* just write out a path of two object numbers */
4511 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 4512 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
4513 return;
4514 }
4515 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4516
4517 /* find out if the dataset is mounted */
4518 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4519
4520 /* get the corrupted object's path */
4521 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4522 zc.zc_obj = obj;
4523 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4524 &zc) == 0) {
4525 if (mounted) {
4526 (void) snprintf(pathname, len, "%s%s", mntpnt,
4527 zc.zc_value);
4528 } else {
4529 (void) snprintf(pathname, len, "%s:%s",
4530 dsname, zc.zc_value);
4531 }
4532 } else {
d1d7e268
MK
4533 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4534 (longlong_t)obj);
34dc7c2f
BB
4535 }
4536 free(mntpnt);
4537}
4538
b128c09f
BB
4539/*
4540 * Read the EFI label from the config, if a label does not exist then
4541 * pass back the error to the caller. If the caller has passed a non-NULL
4542 * diskaddr argument then we set it to the starting address of the EFI
4543 * partition.
4544 */
4545static int
4546read_efi_label(nvlist_t *config, diskaddr_t *sb)
4547{
4548 char *path;
4549 int fd;
4550 char diskname[MAXPATHLEN];
4551 int err = -1;
4552
4553 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4554 return (err);
4555
eac47204 4556 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 4557 strrchr(path, '/'));
dbb38f66 4558 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
b128c09f
BB
4559 struct dk_gpt *vtoc;
4560
4561 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4562 if (sb != NULL)
4563 *sb = vtoc->efi_parts[0].p_start;
4564 efi_free(vtoc);
4565 }
4566 (void) close(fd);
4567 }
4568 return (err);
4569}
4570
34dc7c2f
BB
4571/*
4572 * determine where a partition starts on a disk in the current
4573 * configuration
4574 */
4575static diskaddr_t
4576find_start_block(nvlist_t *config)
4577{
4578 nvlist_t **child;
4579 uint_t c, children;
34dc7c2f 4580 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
4581 uint64_t wholedisk;
4582
4583 if (nvlist_lookup_nvlist_array(config,
4584 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4585 if (nvlist_lookup_uint64(config,
4586 ZPOOL_CONFIG_WHOLE_DISK,
4587 &wholedisk) != 0 || !wholedisk) {
4588 return (MAXOFFSET_T);
4589 }
b128c09f
BB
4590 if (read_efi_label(config, &sb) < 0)
4591 sb = MAXOFFSET_T;
34dc7c2f
BB
4592 return (sb);
4593 }
4594
4595 for (c = 0; c < children; c++) {
4596 sb = find_start_block(child[c]);
4597 if (sb != MAXOFFSET_T) {
4598 return (sb);
4599 }
4600 }
4601 return (MAXOFFSET_T);
4602}
4603
2d82ea8b 4604static int
d603ed6c
BB
4605zpool_label_disk_check(char *path)
4606{
4607 struct dk_gpt *vtoc;
4608 int fd, err;
4609
dbb38f66 4610 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
d1d7e268 4611 return (errno);
d603ed6c
BB
4612
4613 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4614 (void) close(fd);
d1d7e268 4615 return (err);
d603ed6c
BB
4616 }
4617
4618 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4619 efi_free(vtoc);
4620 (void) close(fd);
d1d7e268 4621 return (EIDRM);
d603ed6c
BB
4622 }
4623
4624 efi_free(vtoc);
4625 (void) close(fd);
d1d7e268 4626 return (0);
d603ed6c
BB
4627}
4628
5b4136bd
BB
4629/*
4630 * Generate a unique partition name for the ZFS member. Partitions must
4631 * have unique names to ensure udev will be able to create symlinks under
4632 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4633 * of the form <pool>-<unique-id>.
4634 */
4635static void
4636zpool_label_name(char *label_name, int label_size)
4637{
4638 uint64_t id = 0;
4639 int fd;
4640
4641 fd = open("/dev/urandom", O_RDONLY);
06cf4d98 4642 if (fd >= 0) {
5b4136bd
BB
4643 if (read(fd, &id, sizeof (id)) != sizeof (id))
4644 id = 0;
4645
4646 close(fd);
4647 }
4648
4649 if (id == 0)
4650 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4651
02730c33 4652 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
5b4136bd
BB
4653}
4654
34dc7c2f
BB
4655/*
4656 * Label an individual disk. The name provided is the short name,
4657 * stripped of any leading /dev path.
4658 */
4659int
4660zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4661{
4662 char path[MAXPATHLEN];
4663 struct dk_gpt *vtoc;
d603ed6c 4664 int rval, fd;
34dc7c2f
BB
4665 size_t resv = EFI_MIN_RESV_SIZE;
4666 uint64_t slice_size;
4667 diskaddr_t start_block;
4668 char errbuf[1024];
4669
4670 /* prepare an error message just in case */
4671 (void) snprintf(errbuf, sizeof (errbuf),
4672 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4673
4674 if (zhp) {
4675 nvlist_t *nvroot;
4676
4677 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4678 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4679
4680 if (zhp->zpool_start_block == 0)
4681 start_block = find_start_block(nvroot);
4682 else
4683 start_block = zhp->zpool_start_block;
4684 zhp->zpool_start_block = start_block;
4685 } else {
4686 /* new pool */
4687 start_block = NEW_START_BLOCK;
4688 }
4689
eac47204 4690 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 4691
d02ca379 4692 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
34dc7c2f
BB
4693 /*
4694 * This shouldn't happen. We've long since verified that this
4695 * is a valid device.
4696 */
109491a8
RL
4697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4698 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
4699 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4700 }
4701
4702 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4703 /*
4704 * The only way this can fail is if we run out of memory, or we
4705 * were unable to read the disk's capacity
4706 */
4707 if (errno == ENOMEM)
4708 (void) no_memory(hdl);
4709
4710 (void) close(fd);
109491a8
RL
4711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4712 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
4713
4714 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4715 }
4716
4717 slice_size = vtoc->efi_last_u_lba + 1;
4718 slice_size -= EFI_MIN_RESV_SIZE;
4719 if (start_block == MAXOFFSET_T)
4720 start_block = NEW_START_BLOCK;
4721 slice_size -= start_block;
613d88ed 4722 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
4723
4724 vtoc->efi_parts[0].p_start = start_block;
4725 vtoc->efi_parts[0].p_size = slice_size;
4726
4727 /*
4728 * Why we use V_USR: V_BACKUP confuses users, and is considered
4729 * disposable by some EFI utilities (since EFI doesn't have a backup
4730 * slice). V_UNASSIGNED is supposed to be used only for zero size
4731 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4732 * etc. were all pretty specific. V_USR is as close to reality as we
4733 * can get, in the absence of V_OTHER.
4734 */
4735 vtoc->efi_parts[0].p_tag = V_USR;
5b4136bd 4736 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
34dc7c2f
BB
4737
4738 vtoc->efi_parts[8].p_start = slice_size + start_block;
4739 vtoc->efi_parts[8].p_size = resv;
4740 vtoc->efi_parts[8].p_tag = V_RESERVED;
4741
dbb38f66
YP
4742 rval = efi_write(fd, vtoc);
4743
4744 /* Flush the buffers to disk and invalidate the page cache. */
4745 (void) fsync(fd);
4746 (void) ioctl(fd, BLKFLSBUF);
4747
4748 if (rval == 0)
4749 rval = efi_rescan(fd);
4750
4751 /*
4752 * Some block drivers (like pcata) may not support EFI GPT labels.
4753 * Print out a helpful error message directing the user to manually
4754 * label the disk and give a specific slice.
4755 */
4756 if (rval != 0) {
34dc7c2f
BB
4757 (void) close(fd);
4758 efi_free(vtoc);
4759
d603ed6c
BB
4760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4761 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
4762 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4763 }
4764
4765 (void) close(fd);
4766 efi_free(vtoc);
34dc7c2f 4767
eac47204
BB
4768 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4769 (void) zfs_append_partition(path, MAXPATHLEN);
4770
2d82ea8b
BB
4771 /* Wait to udev to signal use the device has settled. */
4772 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
d603ed6c
BB
4773 if (rval) {
4774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4775 "detect device partitions on '%s': %d"), path, rval);
4776 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
4777 }
4778
d603ed6c
BB
4779 /* We can't be to paranoid. Read the label back and verify it. */
4780 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4781 rval = zpool_label_disk_check(path);
4782 if (rval) {
4783 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4784 "EFI label on '%s' is damaged. Ensure\nthis device "
4785 "is not in in use, and is functioning properly: %d"),
4786 path, rval);
4787 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 4788 }
34dc7c2f 4789
d1d7e268 4790 return (0);
34dc7c2f 4791}