]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_pool.c
Illumos #1949, #1953
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3541dc6d
GA
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 by Delphix. All rights reserved.
34dc7c2f
BB
26 */
27
34dc7c2f
BB
28#include <ctype.h>
29#include <errno.h>
30#include <devid.h>
34dc7c2f
BB
31#include <fcntl.h>
32#include <libintl.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <strings.h>
36#include <unistd.h>
d603ed6c
BB
37#include <zone.h>
38#include <sys/stat.h>
34dc7c2f
BB
39#include <sys/efi_partition.h>
40#include <sys/vtoc.h>
41#include <sys/zfs_ioctl.h>
9babb374 42#include <dlfcn.h>
34dc7c2f
BB
43
44#include "zfs_namecheck.h"
45#include "zfs_prop.h"
46#include "libzfs_impl.h"
428870ff 47#include "zfs_comutil.h"
34dc7c2f 48
b128c09f
BB
49static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
572e2857
BB
51typedef struct prop_flags {
52 int create:1; /* Validate property on creation */
53 int import:1; /* Validate property on import */
54} prop_flags_t;
55
34dc7c2f
BB
56/*
57 * ====================================================================
58 * zpool property functions
59 * ====================================================================
60 */
61
62static int
63zpool_get_all_props(zpool_handle_t *zhp)
64{
2598c001 65 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
66 libzfs_handle_t *hdl = zhp->zpool_hdl;
67
68 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
69
70 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
71 return (-1);
72
73 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74 if (errno == ENOMEM) {
75 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76 zcmd_free_nvlists(&zc);
77 return (-1);
78 }
79 } else {
80 zcmd_free_nvlists(&zc);
81 return (-1);
82 }
83 }
84
85 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89
90 zcmd_free_nvlists(&zc);
91
92 return (0);
93}
94
95static int
96zpool_props_refresh(zpool_handle_t *zhp)
97{
98 nvlist_t *old_props;
99
100 old_props = zhp->zpool_props;
101
102 if (zpool_get_all_props(zhp) != 0)
103 return (-1);
104
105 nvlist_free(old_props);
106 return (0);
107}
108
109static char *
110zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
111 zprop_source_t *src)
112{
113 nvlist_t *nv, *nvl;
114 uint64_t ival;
115 char *value;
116 zprop_source_t source;
117
118 nvl = zhp->zpool_props;
119 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
121 source = ival;
122 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
123 } else {
124 source = ZPROP_SRC_DEFAULT;
125 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
126 value = "-";
127 }
128
129 if (src)
130 *src = source;
131
132 return (value);
133}
134
135uint64_t
136zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
137{
138 nvlist_t *nv, *nvl;
139 uint64_t value;
140 zprop_source_t source;
141
b128c09f
BB
142 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
143 /*
144 * zpool_get_all_props() has most likely failed because
145 * the pool is faulted, but if all we need is the top level
146 * vdev's guid then get it from the zhp config nvlist.
147 */
148 if ((prop == ZPOOL_PROP_GUID) &&
149 (nvlist_lookup_nvlist(zhp->zpool_config,
150 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
152 == 0)) {
153 return (value);
154 }
34dc7c2f 155 return (zpool_prop_default_numeric(prop));
b128c09f 156 }
34dc7c2f
BB
157
158 nvl = zhp->zpool_props;
159 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
161 source = value;
162 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
163 } else {
164 source = ZPROP_SRC_DEFAULT;
165 value = zpool_prop_default_numeric(prop);
166 }
167
168 if (src)
169 *src = source;
170
171 return (value);
172}
173
174/*
175 * Map VDEV STATE to printed strings.
176 */
177char *
178zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
179{
180 switch (state) {
e75c13c3
BB
181 default:
182 break;
34dc7c2f
BB
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
b128c09f 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 190 return (gettext("FAULTED"));
428870ff
BB
191 else if (aux == VDEV_AUX_SPLIT_POOL)
192 return (gettext("SPLIT"));
34dc7c2f
BB
193 else
194 return (gettext("UNAVAIL"));
195 case VDEV_STATE_FAULTED:
196 return (gettext("FAULTED"));
197 case VDEV_STATE_DEGRADED:
198 return (gettext("DEGRADED"));
199 case VDEV_STATE_HEALTHY:
200 return (gettext("ONLINE"));
201 }
202
203 return (gettext("UNKNOWN"));
204}
205
206/*
207 * Get a zpool property value for 'prop' and return the value in
208 * a pre-allocated buffer.
209 */
210int
211zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212 zprop_source_t *srctype)
213{
214 uint64_t intval;
215 const char *strval;
216 zprop_source_t src = ZPROP_SRC_NONE;
217 nvlist_t *nvroot;
218 vdev_stat_t *vs;
219 uint_t vsc;
220
221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
222 switch (prop) {
223 case ZPOOL_PROP_NAME:
34dc7c2f 224 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
225 break;
226
227 case ZPOOL_PROP_HEALTH:
34dc7c2f 228 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
229 break;
230
231 case ZPOOL_PROP_GUID:
232 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 233 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
234 break;
235
236 case ZPOOL_PROP_ALTROOT:
237 case ZPOOL_PROP_CACHEFILE:
238 if (zhp->zpool_props != NULL ||
239 zpool_get_all_props(zhp) == 0) {
240 (void) strlcpy(buf,
241 zpool_get_prop_string(zhp, prop, &src),
242 len);
243 if (srctype != NULL)
244 *srctype = src;
245 return (0);
246 }
247 /* FALLTHROUGH */
248 default:
34dc7c2f 249 (void) strlcpy(buf, "-", len);
d164b209
BB
250 break;
251 }
252
253 if (srctype != NULL)
254 *srctype = src;
34dc7c2f
BB
255 return (0);
256 }
257
258 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
259 prop != ZPOOL_PROP_NAME)
260 return (-1);
261
262 switch (zpool_prop_get_type(prop)) {
263 case PROP_TYPE_STRING:
264 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
265 len);
266 break;
267
268 case PROP_TYPE_NUMBER:
269 intval = zpool_get_prop_int(zhp, prop, &src);
270
271 switch (prop) {
272 case ZPOOL_PROP_SIZE:
428870ff
BB
273 case ZPOOL_PROP_ALLOCATED:
274 case ZPOOL_PROP_FREE:
df30f566 275 case ZPOOL_PROP_ASHIFT:
34dc7c2f
BB
276 (void) zfs_nicenum(intval, buf, len);
277 break;
278
279 case ZPOOL_PROP_CAPACITY:
280 (void) snprintf(buf, len, "%llu%%",
281 (u_longlong_t)intval);
282 break;
283
428870ff
BB
284 case ZPOOL_PROP_DEDUPRATIO:
285 (void) snprintf(buf, len, "%llu.%02llux",
286 (u_longlong_t)(intval / 100),
287 (u_longlong_t)(intval % 100));
288 break;
289
34dc7c2f
BB
290 case ZPOOL_PROP_HEALTH:
291 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
292 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
293 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
294 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
295 == 0);
34dc7c2f
BB
296
297 (void) strlcpy(buf, zpool_state_to_name(intval,
298 vs->vs_aux), len);
299 break;
300 default:
b8864a23 301 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
302 }
303 break;
304
305 case PROP_TYPE_INDEX:
306 intval = zpool_get_prop_int(zhp, prop, &src);
307 if (zpool_prop_index_to_string(prop, intval, &strval)
308 != 0)
309 return (-1);
310 (void) strlcpy(buf, strval, len);
311 break;
312
313 default:
314 abort();
315 }
316
317 if (srctype)
318 *srctype = src;
319
320 return (0);
321}
322
323/*
324 * Check if the bootfs name has the same pool name as it is set to.
325 * Assuming bootfs is a valid dataset name.
326 */
327static boolean_t
328bootfs_name_valid(const char *pool, char *bootfs)
329{
330 int len = strlen(pool);
331
b128c09f 332 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
333 return (B_FALSE);
334
335 if (strncmp(pool, bootfs, len) == 0 &&
336 (bootfs[len] == '/' || bootfs[len] == '\0'))
337 return (B_TRUE);
338
339 return (B_FALSE);
340}
341
b128c09f
BB
342/*
343 * Inspect the configuration to determine if any of the devices contain
344 * an EFI label.
345 */
346static boolean_t
347pool_uses_efi(nvlist_t *config)
348{
349 nvlist_t **child;
350 uint_t c, children;
351
352 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
353 &child, &children) != 0)
354 return (read_efi_label(config, NULL) >= 0);
355
356 for (c = 0; c < children; c++) {
357 if (pool_uses_efi(child[c]))
358 return (B_TRUE);
359 }
360 return (B_FALSE);
361}
362
363static boolean_t
364pool_is_bootable(zpool_handle_t *zhp)
365{
366 char bootfs[ZPOOL_MAXNAMELEN];
367
368 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
369 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
370 sizeof (bootfs)) != 0);
371}
372
373
34dc7c2f
BB
374/*
375 * Given an nvlist of zpool properties to be set, validate that they are
376 * correct, and parse any numeric properties (index, boolean, etc) if they are
377 * specified as strings.
378 */
379static nvlist_t *
b128c09f 380zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 381 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
382{
383 nvpair_t *elem;
384 nvlist_t *retprops;
385 zpool_prop_t prop;
386 char *strval;
387 uint64_t intval;
388 char *slash;
389 struct stat64 statbuf;
b128c09f
BB
390 zpool_handle_t *zhp;
391 nvlist_t *nvroot;
34dc7c2f
BB
392
393 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
394 (void) no_memory(hdl);
395 return (NULL);
396 }
397
398 elem = NULL;
399 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
400 const char *propname = nvpair_name(elem);
401
402 /*
403 * Make sure this property is valid and applies to this type.
404 */
405 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
407 "invalid property '%s'"), propname);
408 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
409 goto error;
410 }
411
412 if (zpool_prop_readonly(prop)) {
413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
414 "is readonly"), propname);
415 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
416 goto error;
417 }
418
419 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
420 &strval, &intval, errbuf) != 0)
421 goto error;
422
423 /*
424 * Perform additional checking for specific properties.
425 */
426 switch (prop) {
e75c13c3
BB
427 default:
428 break;
34dc7c2f
BB
429 case ZPOOL_PROP_VERSION:
430 if (intval < version || intval > SPA_VERSION) {
431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432 "property '%s' number %d is invalid."),
433 propname, intval);
434 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
435 goto error;
436 }
437 break;
438
df30f566
CK
439 case ZPOOL_PROP_ASHIFT:
440 if (!flags.create) {
441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442 "property '%s' can only be set at "
443 "creation time"), propname);
444 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
445 goto error;
446 }
447
b41c9906 448 if (intval != 0 && (intval < 9 || intval > 13)) {
df30f566
CK
449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
450 "property '%s' number %d is invalid."),
451 propname, intval);
452 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
453 goto error;
454 }
455 break;
456
34dc7c2f 457 case ZPOOL_PROP_BOOTFS:
572e2857 458 if (flags.create || flags.import) {
34dc7c2f
BB
459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
460 "property '%s' cannot be set at creation "
461 "or import time"), propname);
462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
463 goto error;
464 }
465
466 if (version < SPA_VERSION_BOOTFS) {
467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
468 "pool must be upgraded to support "
469 "'%s' property"), propname);
470 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
471 goto error;
472 }
473
474 /*
475 * bootfs property value has to be a dataset name and
476 * the dataset has to be in the same pool as it sets to.
477 */
478 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
479 strval)) {
480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
481 "is an invalid name"), strval);
482 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
483 goto error;
484 }
b128c09f
BB
485
486 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "could not open pool '%s'"), poolname);
489 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
490 goto error;
491 }
492 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
493 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
494
f783130a 495#if defined(__sun__) || defined(__sun)
b128c09f
BB
496 /*
497 * bootfs property cannot be set on a disk which has
498 * been EFI labeled.
499 */
500 if (pool_uses_efi(nvroot)) {
501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 "property '%s' not supported on "
503 "EFI labeled devices"), propname);
504 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
505 zpool_close(zhp);
506 goto error;
507 }
f783130a 508#endif
b128c09f 509 zpool_close(zhp);
34dc7c2f
BB
510 break;
511
512 case ZPOOL_PROP_ALTROOT:
572e2857 513 if (!flags.create && !flags.import) {
34dc7c2f
BB
514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
515 "property '%s' can only be set during pool "
516 "creation or import"), propname);
517 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
518 goto error;
519 }
520
521 if (strval[0] != '/') {
522 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 "bad alternate root '%s'"), strval);
524 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
525 goto error;
526 }
527 break;
528
529 case ZPOOL_PROP_CACHEFILE:
530 if (strval[0] == '\0')
531 break;
532
533 if (strcmp(strval, "none") == 0)
534 break;
535
536 if (strval[0] != '/') {
537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
538 "property '%s' must be empty, an "
539 "absolute path, or 'none'"), propname);
540 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
541 goto error;
542 }
543
544 slash = strrchr(strval, '/');
545
546 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
547 strcmp(slash, "/..") == 0) {
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "'%s' is not a valid file"), strval);
550 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551 goto error;
552 }
553
554 *slash = '\0';
555
556 if (strval[0] != '\0' &&
557 (stat64(strval, &statbuf) != 0 ||
558 !S_ISDIR(statbuf.st_mode))) {
559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560 "'%s' is not a valid directory"),
561 strval);
562 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
563 goto error;
564 }
565
566 *slash = '/';
567 break;
572e2857
BB
568
569 case ZPOOL_PROP_READONLY:
570 if (!flags.import) {
571 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
572 "property '%s' can only be set at "
573 "import time"), propname);
574 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
575 goto error;
576 }
577 break;
34dc7c2f
BB
578 }
579 }
580
581 return (retprops);
582error:
583 nvlist_free(retprops);
584 return (NULL);
585}
586
587/*
588 * Set zpool property : propname=propval.
589 */
590int
591zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
592{
2598c001 593 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
594 int ret = -1;
595 char errbuf[1024];
596 nvlist_t *nvl = NULL;
597 nvlist_t *realprops;
598 uint64_t version;
572e2857 599 prop_flags_t flags = { 0 };
34dc7c2f
BB
600
601 (void) snprintf(errbuf, sizeof (errbuf),
602 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
603 zhp->zpool_name);
604
34dc7c2f
BB
605 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
606 return (no_memory(zhp->zpool_hdl));
607
608 if (nvlist_add_string(nvl, propname, propval) != 0) {
609 nvlist_free(nvl);
610 return (no_memory(zhp->zpool_hdl));
611 }
612
613 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 614 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 615 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
616 nvlist_free(nvl);
617 return (-1);
618 }
619
620 nvlist_free(nvl);
621 nvl = realprops;
622
623 /*
624 * Execute the corresponding ioctl() to set this property.
625 */
626 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
627
628 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
629 nvlist_free(nvl);
630 return (-1);
631 }
632
633 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
634
635 zcmd_free_nvlists(&zc);
636 nvlist_free(nvl);
637
638 if (ret)
639 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
640 else
641 (void) zpool_props_refresh(zhp);
642
643 return (ret);
644}
645
646int
647zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
648{
649 libzfs_handle_t *hdl = zhp->zpool_hdl;
650 zprop_list_t *entry;
651 char buf[ZFS_MAXPROPLEN];
652
653 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
654 return (-1);
655
656 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
657
658 if (entry->pl_fixed)
659 continue;
660
661 if (entry->pl_prop != ZPROP_INVAL &&
662 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
663 NULL) == 0) {
664 if (strlen(buf) > entry->pl_width)
665 entry->pl_width = strlen(buf);
666 }
667 }
668
669 return (0);
670}
671
672
9babb374
BB
673/*
674 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
675 * devices will use a stripe width of 128k, other vendors prefer a 1m
676 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
677 * given 512B blocks. When the block size is larger by a power of 2
678 * we will still be 1m aligned. Some devices are sensitive to the
679 * partition ending alignment as well.
9babb374 680 */
613d88ed
NB
681#define NEW_START_BLOCK 2048
682#define PARTITION_END_ALIGNMENT 2048
9babb374 683
34dc7c2f
BB
684/*
685 * Validate the given pool name, optionally putting an extended error message in
686 * 'buf'.
687 */
688boolean_t
689zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
690{
691 namecheck_err_t why;
692 char what;
693 int ret;
694
695 ret = pool_namecheck(pool, &why, &what);
696
697 /*
698 * The rules for reserved pool names were extended at a later point.
699 * But we need to support users with existing pools that may now be
700 * invalid. So we only check for this expanded set of names during a
701 * create (or import), and only in userland.
702 */
703 if (ret == 0 && !isopen &&
704 (strncmp(pool, "mirror", 6) == 0 ||
705 strncmp(pool, "raidz", 5) == 0 ||
706 strncmp(pool, "spare", 5) == 0 ||
707 strcmp(pool, "log") == 0)) {
708 if (hdl != NULL)
709 zfs_error_aux(hdl,
710 dgettext(TEXT_DOMAIN, "name is reserved"));
711 return (B_FALSE);
712 }
713
714
715 if (ret != 0) {
716 if (hdl != NULL) {
717 switch (why) {
718 case NAME_ERR_TOOLONG:
719 zfs_error_aux(hdl,
720 dgettext(TEXT_DOMAIN, "name is too long"));
721 break;
722
723 case NAME_ERR_INVALCHAR:
724 zfs_error_aux(hdl,
725 dgettext(TEXT_DOMAIN, "invalid character "
726 "'%c' in pool name"), what);
727 break;
728
729 case NAME_ERR_NOLETTER:
730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
731 "name must begin with a letter"));
732 break;
733
734 case NAME_ERR_RESERVED:
735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736 "name is reserved"));
737 break;
738
739 case NAME_ERR_DISKLIKE:
740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
741 "pool name is reserved"));
742 break;
743
744 case NAME_ERR_LEADING_SLASH:
745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
746 "leading slash in name"));
747 break;
748
749 case NAME_ERR_EMPTY_COMPONENT:
750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
751 "empty component in name"));
752 break;
753
754 case NAME_ERR_TRAILING_SLASH:
755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
756 "trailing slash in name"));
757 break;
758
759 case NAME_ERR_MULTIPLE_AT:
760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
761 "multiple '@' delimiters in name"));
762 break;
e75c13c3
BB
763 case NAME_ERR_NO_AT:
764 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 "permission set is missing '@'"));
766 break;
34dc7c2f
BB
767 }
768 }
769 return (B_FALSE);
770 }
771
772 return (B_TRUE);
773}
774
775/*
776 * Open a handle to the given pool, even if the pool is currently in the FAULTED
777 * state.
778 */
779zpool_handle_t *
780zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
781{
782 zpool_handle_t *zhp;
783 boolean_t missing;
784
785 /*
786 * Make sure the pool name is valid.
787 */
788 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
789 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
790 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
791 pool);
792 return (NULL);
793 }
794
795 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
796 return (NULL);
797
798 zhp->zpool_hdl = hdl;
799 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
800
801 if (zpool_refresh_stats(zhp, &missing) != 0) {
802 zpool_close(zhp);
803 return (NULL);
804 }
805
806 if (missing) {
807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
808 (void) zfs_error_fmt(hdl, EZFS_NOENT,
809 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
810 zpool_close(zhp);
811 return (NULL);
812 }
813
814 return (zhp);
815}
816
817/*
818 * Like the above, but silent on error. Used when iterating over pools (because
819 * the configuration cache may be out of date).
820 */
821int
822zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
823{
824 zpool_handle_t *zhp;
825 boolean_t missing;
826
827 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
828 return (-1);
829
830 zhp->zpool_hdl = hdl;
831 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
832
833 if (zpool_refresh_stats(zhp, &missing) != 0) {
834 zpool_close(zhp);
835 return (-1);
836 }
837
838 if (missing) {
839 zpool_close(zhp);
840 *ret = NULL;
841 return (0);
842 }
843
844 *ret = zhp;
845 return (0);
846}
847
848/*
849 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
850 * state.
851 */
852zpool_handle_t *
853zpool_open(libzfs_handle_t *hdl, const char *pool)
854{
855 zpool_handle_t *zhp;
856
857 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
858 return (NULL);
859
860 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
861 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
862 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
863 zpool_close(zhp);
864 return (NULL);
865 }
866
867 return (zhp);
868}
869
870/*
871 * Close the handle. Simply frees the memory associated with the handle.
872 */
873void
874zpool_close(zpool_handle_t *zhp)
875{
876 if (zhp->zpool_config)
877 nvlist_free(zhp->zpool_config);
878 if (zhp->zpool_old_config)
879 nvlist_free(zhp->zpool_old_config);
880 if (zhp->zpool_props)
881 nvlist_free(zhp->zpool_props);
882 free(zhp);
883}
884
885/*
886 * Return the name of the pool.
887 */
888const char *
889zpool_get_name(zpool_handle_t *zhp)
890{
891 return (zhp->zpool_name);
892}
893
894
895/*
896 * Return the state of the pool (ACTIVE or UNAVAILABLE)
897 */
898int
899zpool_get_state(zpool_handle_t *zhp)
900{
901 return (zhp->zpool_state);
902}
903
904/*
905 * Create the named pool, using the provided vdev list. It is assumed
906 * that the consumer has already validated the contents of the nvlist, so we
907 * don't have to worry about error semantics.
908 */
909int
910zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 911 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 912{
2598c001 913 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f
BB
914 nvlist_t *zc_fsprops = NULL;
915 nvlist_t *zc_props = NULL;
34dc7c2f
BB
916 char msg[1024];
917 char *altroot;
b128c09f 918 int ret = -1;
34dc7c2f
BB
919
920 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
921 "cannot create '%s'"), pool);
922
923 if (!zpool_name_valid(hdl, B_FALSE, pool))
924 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
925
926 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
927 return (-1);
928
b128c09f 929 if (props) {
572e2857
BB
930 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
931
b128c09f 932 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 933 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
934 goto create_failed;
935 }
936 }
34dc7c2f 937
b128c09f
BB
938 if (fsprops) {
939 uint64_t zoned;
940 char *zonestr;
941
942 zoned = ((nvlist_lookup_string(fsprops,
943 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
944 strcmp(zonestr, "on") == 0);
945
946 if ((zc_fsprops = zfs_valid_proplist(hdl,
947 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
948 goto create_failed;
949 }
950 if (!zc_props &&
951 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
952 goto create_failed;
953 }
954 if (nvlist_add_nvlist(zc_props,
955 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
956 goto create_failed;
957 }
34dc7c2f
BB
958 }
959
b128c09f
BB
960 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
961 goto create_failed;
962
34dc7c2f
BB
963 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
964
b128c09f 965 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
966
967 zcmd_free_nvlists(&zc);
b128c09f
BB
968 nvlist_free(zc_props);
969 nvlist_free(zc_fsprops);
34dc7c2f
BB
970
971 switch (errno) {
972 case EBUSY:
973 /*
974 * This can happen if the user has specified the same
975 * device multiple times. We can't reliably detect this
976 * until we try to add it and see we already have a
d603ed6c
BB
977 * label. This can also happen under if the device is
978 * part of an active md or lvm device.
34dc7c2f
BB
979 */
980 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d603ed6c
BB
981 "one or more vdevs refer to the same device, or one of\n"
982 "the devices is part of an active md or lvm device"));
34dc7c2f
BB
983 return (zfs_error(hdl, EZFS_BADDEV, msg));
984
985 case EOVERFLOW:
986 /*
987 * This occurs when one of the devices is below
988 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
989 * device was the problem device since there's no
990 * reliable way to determine device size from userland.
991 */
992 {
993 char buf[64];
994
995 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
996
997 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998 "one or more devices is less than the "
999 "minimum size (%s)"), buf);
1000 }
1001 return (zfs_error(hdl, EZFS_BADDEV, msg));
1002
1003 case ENOSPC:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "one or more devices is out of space"));
1006 return (zfs_error(hdl, EZFS_BADDEV, msg));
1007
1008 case ENOTBLK:
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "cache device must be a disk or disk slice"));
1011 return (zfs_error(hdl, EZFS_BADDEV, msg));
1012
1013 default:
1014 return (zpool_standard_error(hdl, errno, msg));
1015 }
1016 }
1017
1018 /*
1019 * If this is an alternate root pool, then we automatically set the
1020 * mountpoint of the root dataset to be '/'.
1021 */
1022 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1023 &altroot) == 0) {
1024 zfs_handle_t *zhp;
1025
1026 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1027 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1028 "/") == 0);
1029
1030 zfs_close(zhp);
1031 }
1032
b128c09f 1033create_failed:
34dc7c2f 1034 zcmd_free_nvlists(&zc);
b128c09f
BB
1035 nvlist_free(zc_props);
1036 nvlist_free(zc_fsprops);
1037 return (ret);
34dc7c2f
BB
1038}
1039
1040/*
1041 * Destroy the given pool. It is up to the caller to ensure that there are no
1042 * datasets left in the pool.
1043 */
1044int
1045zpool_destroy(zpool_handle_t *zhp)
1046{
2598c001 1047 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1048 zfs_handle_t *zfp = NULL;
1049 libzfs_handle_t *hdl = zhp->zpool_hdl;
1050 char msg[1024];
1051
1052 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1053 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1054 return (-1);
1055
34dc7c2f
BB
1056 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1057
572e2857 1058 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1059 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1060 "cannot destroy '%s'"), zhp->zpool_name);
1061
1062 if (errno == EROFS) {
1063 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1064 "one or more devices is read only"));
1065 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1066 } else {
1067 (void) zpool_standard_error(hdl, errno, msg);
1068 }
1069
1070 if (zfp)
1071 zfs_close(zfp);
1072 return (-1);
1073 }
1074
1075 if (zfp) {
1076 remove_mountpoint(zfp);
1077 zfs_close(zfp);
1078 }
1079
1080 return (0);
1081}
1082
1083/*
1084 * Add the given vdevs to the pool. The caller must have already performed the
1085 * necessary verification to ensure that the vdev specification is well-formed.
1086 */
1087int
1088zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1089{
2598c001 1090 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1091 int ret;
1092 libzfs_handle_t *hdl = zhp->zpool_hdl;
1093 char msg[1024];
1094 nvlist_t **spares, **l2cache;
1095 uint_t nspares, nl2cache;
1096
1097 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1098 "cannot add to '%s'"), zhp->zpool_name);
1099
1100 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1101 SPA_VERSION_SPARES &&
1102 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1103 &spares, &nspares) == 0) {
1104 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1105 "upgraded to add hot spares"));
1106 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1107 }
1108
b128c09f
BB
1109 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1110 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1111 uint64_t s;
1112
1113 for (s = 0; s < nspares; s++) {
1114 char *path;
1115
1116 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1117 &path) == 0 && pool_uses_efi(spares[s])) {
1118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1119 "device '%s' contains an EFI label and "
1120 "cannot be used on root pools."),
428870ff
BB
1121 zpool_vdev_name(hdl, NULL, spares[s],
1122 B_FALSE));
b128c09f
BB
1123 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1124 }
1125 }
1126 }
1127
34dc7c2f
BB
1128 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1129 SPA_VERSION_L2CACHE &&
1130 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1131 &l2cache, &nl2cache) == 0) {
1132 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1133 "upgraded to add cache devices"));
1134 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1135 }
1136
1137 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1138 return (-1);
1139 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1140
572e2857 1141 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1142 switch (errno) {
1143 case EBUSY:
1144 /*
1145 * This can happen if the user has specified the same
1146 * device multiple times. We can't reliably detect this
1147 * until we try to add it and see we already have a
1148 * label.
1149 */
1150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1151 "one or more vdevs refer to the same device"));
1152 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1153 break;
1154
1155 case EOVERFLOW:
1156 /*
1157 * This occurrs when one of the devices is below
1158 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1159 * device was the problem device since there's no
1160 * reliable way to determine device size from userland.
1161 */
1162 {
1163 char buf[64];
1164
1165 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1166
1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168 "device is less than the minimum "
1169 "size (%s)"), buf);
1170 }
1171 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1172 break;
1173
1174 case ENOTSUP:
1175 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1176 "pool must be upgraded to add these vdevs"));
1177 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1178 break;
1179
1180 case EDOM:
1181 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1182 "root pool can not have multiple vdevs"
1183 " or separate logs"));
1184 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1185 break;
1186
1187 case ENOTBLK:
1188 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1189 "cache device must be a disk or disk slice"));
1190 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1191 break;
1192
1193 default:
1194 (void) zpool_standard_error(hdl, errno, msg);
1195 }
1196
1197 ret = -1;
1198 } else {
1199 ret = 0;
1200 }
1201
1202 zcmd_free_nvlists(&zc);
1203
1204 return (ret);
1205}
1206
1207/*
1208 * Exports the pool from the system. The caller must ensure that there are no
1209 * mounted datasets in the pool.
1210 */
1211int
fb5f0bc8 1212zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
34dc7c2f 1213{
2598c001 1214 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f 1215 char msg[1024];
34dc7c2f 1216
b128c09f
BB
1217 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1218 "cannot export '%s'"), zhp->zpool_name);
1219
34dc7c2f 1220 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1221 zc.zc_cookie = force;
fb5f0bc8 1222 zc.zc_guid = hardforce;
b128c09f
BB
1223
1224 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1225 switch (errno) {
1226 case EXDEV:
1227 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1228 "use '-f' to override the following errors:\n"
1229 "'%s' has an active shared spare which could be"
1230 " used by other pools once '%s' is exported."),
1231 zhp->zpool_name, zhp->zpool_name);
1232 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1233 msg));
1234 default:
1235 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1236 msg));
1237 }
1238 }
34dc7c2f 1239
34dc7c2f
BB
1240 return (0);
1241}
1242
fb5f0bc8
BB
1243int
1244zpool_export(zpool_handle_t *zhp, boolean_t force)
1245{
1246 return (zpool_export_common(zhp, force, B_FALSE));
1247}
1248
1249int
1250zpool_export_force(zpool_handle_t *zhp)
1251{
1252 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1253}
1254
428870ff
BB
1255static void
1256zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1257 nvlist_t *config)
428870ff 1258{
572e2857 1259 nvlist_t *nv = NULL;
428870ff
BB
1260 uint64_t rewindto;
1261 int64_t loss = -1;
1262 struct tm t;
1263 char timestr[128];
1264
572e2857
BB
1265 if (!hdl->libzfs_printerr || config == NULL)
1266 return;
1267
1268 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
428870ff
BB
1269 return;
1270
572e2857 1271 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1272 return;
572e2857 1273 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1274
1275 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1276 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1277 if (dryrun) {
1278 (void) printf(dgettext(TEXT_DOMAIN,
1279 "Would be able to return %s "
1280 "to its state as of %s.\n"),
1281 name, timestr);
1282 } else {
1283 (void) printf(dgettext(TEXT_DOMAIN,
1284 "Pool %s returned to its state as of %s.\n"),
1285 name, timestr);
1286 }
1287 if (loss > 120) {
1288 (void) printf(dgettext(TEXT_DOMAIN,
1289 "%s approximately %lld "),
1290 dryrun ? "Would discard" : "Discarded",
b8864a23 1291 ((longlong_t)loss + 30) / 60);
428870ff
BB
1292 (void) printf(dgettext(TEXT_DOMAIN,
1293 "minutes of transactions.\n"));
1294 } else if (loss > 0) {
1295 (void) printf(dgettext(TEXT_DOMAIN,
1296 "%s approximately %lld "),
b8864a23
BB
1297 dryrun ? "Would discard" : "Discarded",
1298 (longlong_t)loss);
428870ff
BB
1299 (void) printf(dgettext(TEXT_DOMAIN,
1300 "seconds of transactions.\n"));
1301 }
1302 }
1303}
1304
1305void
1306zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1307 nvlist_t *config)
1308{
572e2857 1309 nvlist_t *nv = NULL;
428870ff
BB
1310 int64_t loss = -1;
1311 uint64_t edata = UINT64_MAX;
1312 uint64_t rewindto;
1313 struct tm t;
1314 char timestr[128];
1315
1316 if (!hdl->libzfs_printerr)
1317 return;
1318
1319 if (reason >= 0)
1320 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1321 else
1322 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1323
1324 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857
BB
1325 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1326 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1327 goto no_info;
1328
572e2857
BB
1329 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1330 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1331 &edata);
1332
1333 (void) printf(dgettext(TEXT_DOMAIN,
1334 "Recovery is possible, but will result in some data loss.\n"));
1335
1336 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1337 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1338 (void) printf(dgettext(TEXT_DOMAIN,
1339 "\tReturning the pool to its state as of %s\n"
1340 "\tshould correct the problem. "),
1341 timestr);
1342 } else {
1343 (void) printf(dgettext(TEXT_DOMAIN,
1344 "\tReverting the pool to an earlier state "
1345 "should correct the problem.\n\t"));
1346 }
1347
1348 if (loss > 120) {
1349 (void) printf(dgettext(TEXT_DOMAIN,
1350 "Approximately %lld minutes of data\n"
b8864a23
BB
1351 "\tmust be discarded, irreversibly. "),
1352 ((longlong_t)loss + 30) / 60);
428870ff
BB
1353 } else if (loss > 0) {
1354 (void) printf(dgettext(TEXT_DOMAIN,
1355 "Approximately %lld seconds of data\n"
b8864a23
BB
1356 "\tmust be discarded, irreversibly. "),
1357 (longlong_t)loss);
428870ff
BB
1358 }
1359 if (edata != 0 && edata != UINT64_MAX) {
1360 if (edata == 1) {
1361 (void) printf(dgettext(TEXT_DOMAIN,
1362 "After rewind, at least\n"
1363 "\tone persistent user-data error will remain. "));
1364 } else {
1365 (void) printf(dgettext(TEXT_DOMAIN,
1366 "After rewind, several\n"
1367 "\tpersistent user-data errors will remain. "));
1368 }
1369 }
1370 (void) printf(dgettext(TEXT_DOMAIN,
1371 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1372 reason >= 0 ? "clear" : "import", name);
1373
1374 (void) printf(dgettext(TEXT_DOMAIN,
1375 "A scrub of the pool\n"
1376 "\tis strongly recommended after recovery.\n"));
1377 return;
1378
1379no_info:
1380 (void) printf(dgettext(TEXT_DOMAIN,
1381 "Destroy and re-create the pool from\n\ta backup source.\n"));
1382}
1383
34dc7c2f
BB
1384/*
1385 * zpool_import() is a contracted interface. Should be kept the same
1386 * if possible.
1387 *
1388 * Applications should use zpool_import_props() to import a pool with
1389 * new properties value to be set.
1390 */
1391int
1392zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1393 char *altroot)
1394{
1395 nvlist_t *props = NULL;
1396 int ret;
1397
1398 if (altroot != NULL) {
1399 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1400 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1401 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1402 newname));
1403 }
1404
1405 if (nvlist_add_string(props,
fb5f0bc8
BB
1406 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1407 nvlist_add_string(props,
1408 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1409 nvlist_free(props);
1410 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1411 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1412 newname));
1413 }
1414 }
1415
572e2857
BB
1416 ret = zpool_import_props(hdl, config, newname, props,
1417 ZFS_IMPORT_NORMAL);
34dc7c2f
BB
1418 if (props)
1419 nvlist_free(props);
1420 return (ret);
1421}
1422
572e2857
BB
1423static void
1424print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1425 int indent)
1426{
1427 nvlist_t **child;
1428 uint_t c, children;
1429 char *vname;
1430 uint64_t is_log = 0;
1431
1432 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1433 &is_log);
1434
1435 if (name != NULL)
1436 (void) printf("\t%*s%s%s\n", indent, "", name,
1437 is_log ? " [log]" : "");
1438
1439 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1440 &child, &children) != 0)
1441 return;
1442
1443 for (c = 0; c < children; c++) {
1444 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1445 print_vdev_tree(hdl, vname, child[c], indent + 2);
1446 free(vname);
1447 }
1448}
1449
34dc7c2f
BB
1450/*
1451 * Import the given pool using the known configuration and a list of
1452 * properties to be set. The configuration should have come from
1453 * zpool_find_import(). The 'newname' parameters control whether the pool
1454 * is imported with a different name.
1455 */
1456int
1457zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1458 nvlist_t *props, int flags)
34dc7c2f 1459{
2598c001 1460 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff 1461 zpool_rewind_policy_t policy;
572e2857
BB
1462 nvlist_t *nv = NULL;
1463 nvlist_t *nvinfo = NULL;
1464 nvlist_t *missing = NULL;
34dc7c2f
BB
1465 char *thename;
1466 char *origname;
1467 int ret;
572e2857 1468 int error = 0;
34dc7c2f
BB
1469 char errbuf[1024];
1470
1471 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1472 &origname) == 0);
1473
1474 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1475 "cannot import pool '%s'"), origname);
1476
1477 if (newname != NULL) {
1478 if (!zpool_name_valid(hdl, B_FALSE, newname))
1479 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1480 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1481 newname));
1482 thename = (char *)newname;
1483 } else {
1484 thename = origname;
1485 }
1486
1487 if (props) {
1488 uint64_t version;
572e2857 1489 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1490
1491 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1492 &version) == 0);
1493
b128c09f 1494 if ((props = zpool_valid_proplist(hdl, origname,
572e2857 1495 props, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
1496 return (-1);
1497 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1498 nvlist_free(props);
1499 return (-1);
1500 }
1501 }
1502
1503 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1504
1505 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1506 &zc.zc_guid) == 0);
1507
1508 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1509 nvlist_free(props);
1510 return (-1);
1511 }
572e2857 1512 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
428870ff
BB
1513 nvlist_free(props);
1514 return (-1);
1515 }
34dc7c2f 1516
572e2857
BB
1517 zc.zc_cookie = flags;
1518 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1519 errno == ENOMEM) {
1520 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1521 zcmd_free_nvlists(&zc);
1522 return (-1);
1523 }
1524 }
1525 if (ret != 0)
1526 error = errno;
1527
1528 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1529 zpool_get_rewind_policy(config, &policy);
1530
1531 if (error) {
34dc7c2f 1532 char desc[1024];
428870ff 1533
428870ff
BB
1534 /*
1535 * Dry-run failed, but we print out what success
1536 * looks like if we found a best txg
1537 */
572e2857 1538 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1539 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1540 B_TRUE, nv);
1541 nvlist_free(nv);
428870ff
BB
1542 return (-1);
1543 }
1544
34dc7c2f
BB
1545 if (newname == NULL)
1546 (void) snprintf(desc, sizeof (desc),
1547 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1548 thename);
1549 else
1550 (void) snprintf(desc, sizeof (desc),
1551 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1552 origname, thename);
1553
572e2857 1554 switch (error) {
34dc7c2f
BB
1555 case ENOTSUP:
1556 /*
1557 * Unsupported version.
1558 */
1559 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1560 break;
1561
1562 case EINVAL:
1563 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1564 break;
1565
428870ff
BB
1566 case EROFS:
1567 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1568 "one or more devices is read only"));
1569 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1570 break;
1571
572e2857
BB
1572 case ENXIO:
1573 if (nv && nvlist_lookup_nvlist(nv,
1574 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1575 nvlist_lookup_nvlist(nvinfo,
1576 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "The devices below are missing, use "
1579 "'-m' to import the pool anyway:\n"));
1580 print_vdev_tree(hdl, NULL, missing, 2);
1581 (void) printf("\n");
1582 }
1583 (void) zpool_standard_error(hdl, error, desc);
1584 break;
1585
1586 case EEXIST:
1587 (void) zpool_standard_error(hdl, error, desc);
1588 break;
1589
abe5b8fb
BB
1590 case EBUSY:
1591 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1592 "one or more devices are already in use\n"));
1593 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1594 break;
1595
34dc7c2f 1596 default:
572e2857 1597 (void) zpool_standard_error(hdl, error, desc);
428870ff 1598 zpool_explain_recover(hdl,
572e2857 1599 newname ? origname : thename, -error, nv);
428870ff 1600 break;
34dc7c2f
BB
1601 }
1602
572e2857 1603 nvlist_free(nv);
34dc7c2f
BB
1604 ret = -1;
1605 } else {
1606 zpool_handle_t *zhp;
1607
1608 /*
1609 * This should never fail, but play it safe anyway.
1610 */
428870ff 1611 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1612 ret = -1;
428870ff 1613 else if (zhp != NULL)
34dc7c2f 1614 zpool_close(zhp);
428870ff
BB
1615 if (policy.zrp_request &
1616 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1617 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1618 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1619 }
572e2857 1620 nvlist_free(nv);
428870ff 1621 return (0);
34dc7c2f
BB
1622 }
1623
1624 zcmd_free_nvlists(&zc);
1625 nvlist_free(props);
1626
1627 return (ret);
1628}
1629
1630/*
428870ff 1631 * Scan the pool.
34dc7c2f
BB
1632 */
1633int
428870ff 1634zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
34dc7c2f 1635{
2598c001 1636 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1637 char msg[1024];
1638 libzfs_handle_t *hdl = zhp->zpool_hdl;
1639
1640 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1641 zc.zc_cookie = func;
34dc7c2f 1642
572e2857 1643 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
428870ff 1644 (errno == ENOENT && func != POOL_SCAN_NONE))
34dc7c2f
BB
1645 return (0);
1646
428870ff
BB
1647 if (func == POOL_SCAN_SCRUB) {
1648 (void) snprintf(msg, sizeof (msg),
1649 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1650 } else if (func == POOL_SCAN_NONE) {
1651 (void) snprintf(msg, sizeof (msg),
1652 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1653 zc.zc_name);
1654 } else {
1655 assert(!"unexpected result");
1656 }
34dc7c2f 1657
428870ff
BB
1658 if (errno == EBUSY) {
1659 nvlist_t *nvroot;
1660 pool_scan_stat_t *ps = NULL;
1661 uint_t psc;
1662
1663 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1664 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1665 (void) nvlist_lookup_uint64_array(nvroot,
1666 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1667 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1668 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1669 else
1670 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1671 } else if (errno == ENOENT) {
1672 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1673 } else {
34dc7c2f 1674 return (zpool_standard_error(hdl, errno, msg));
428870ff
BB
1675 }
1676}
1677
34dc7c2f 1678/*
9babb374
BB
1679 * Find a vdev that matches the search criteria specified. We use the
1680 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
1681 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1682 * spare; but FALSE if its an INUSE spare.
1683 */
1684static nvlist_t *
9babb374
BB
1685vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1686 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1687{
1688 uint_t c, children;
1689 nvlist_t **child;
34dc7c2f 1690 nvlist_t *ret;
b128c09f 1691 uint64_t is_log;
9babb374
BB
1692 char *srchkey;
1693 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1694
1695 /* Nothing to look for */
1696 if (search == NULL || pair == NULL)
1697 return (NULL);
1698
1699 /* Obtain the key we will use to search */
1700 srchkey = nvpair_name(pair);
1701
1702 switch (nvpair_type(pair)) {
572e2857 1703 case DATA_TYPE_UINT64:
9babb374 1704 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
1705 uint64_t srchval, theguid;
1706
1707 verify(nvpair_value_uint64(pair, &srchval) == 0);
1708 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1709 &theguid) == 0);
1710 if (theguid == srchval)
1711 return (nv);
9babb374
BB
1712 }
1713 break;
9babb374
BB
1714
1715 case DATA_TYPE_STRING: {
1716 char *srchval, *val;
1717
1718 verify(nvpair_value_string(pair, &srchval) == 0);
1719 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1720 break;
34dc7c2f 1721
9babb374 1722 /*
428870ff
BB
1723 * Search for the requested value. Special cases:
1724 *
a2c6816c
NB
1725 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in with a
1726 * partition suffix "1", "-part1", or "p1". The suffix is hidden
1727 * from the user, but included in the string, so this matches around
1728 * it.
428870ff
BB
1729 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1730 *
1731 * Otherwise, all other searches are simple string compares.
9babb374 1732 */
a2c6816c 1733 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
1734 uint64_t wholedisk = 0;
1735
1736 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1737 &wholedisk);
1738 if (wholedisk) {
a2c6816c 1739 char buf[MAXPATHLEN];
428870ff 1740
a2c6816c
NB
1741 zfs_append_partition(srchval, buf, sizeof (buf));
1742 if (strcmp(val, buf) == 0)
428870ff
BB
1743 return (nv);
1744
9babb374
BB
1745 break;
1746 }
428870ff
BB
1747 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1748 char *type, *idx, *end, *p;
1749 uint64_t id, vdev_id;
1750
1751 /*
1752 * Determine our vdev type, keeping in mind
1753 * that the srchval is composed of a type and
1754 * vdev id pair (i.e. mirror-4).
1755 */
1756 if ((type = strdup(srchval)) == NULL)
1757 return (NULL);
1758
1759 if ((p = strrchr(type, '-')) == NULL) {
1760 free(type);
1761 break;
1762 }
1763 idx = p + 1;
1764 *p = '\0';
1765
1766 /*
1767 * If the types don't match then keep looking.
1768 */
1769 if (strncmp(val, type, strlen(val)) != 0) {
1770 free(type);
1771 break;
1772 }
1773
1774 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1775 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1776 strncmp(type, VDEV_TYPE_MIRROR,
1777 strlen(VDEV_TYPE_MIRROR)) == 0);
1778 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1779 &id) == 0);
1780
1781 errno = 0;
1782 vdev_id = strtoull(idx, &end, 10);
1783
1784 free(type);
1785 if (errno != 0)
1786 return (NULL);
1787
1788 /*
1789 * Now verify that we have the correct vdev id.
1790 */
1791 if (vdev_id == id)
1792 return (nv);
9babb374 1793 }
34dc7c2f 1794
34dc7c2f 1795 /*
9babb374 1796 * Common case
34dc7c2f 1797 */
9babb374 1798 if (strcmp(srchval, val) == 0)
34dc7c2f 1799 return (nv);
9babb374
BB
1800 break;
1801 }
1802
1803 default:
1804 break;
34dc7c2f
BB
1805 }
1806
1807 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1808 &child, &children) != 0)
1809 return (NULL);
1810
b128c09f 1811 for (c = 0; c < children; c++) {
9babb374 1812 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
1813 avail_spare, l2cache, NULL)) != NULL) {
1814 /*
1815 * The 'is_log' value is only set for the toplevel
1816 * vdev, not the leaf vdevs. So we always lookup the
1817 * log device from the root of the vdev tree (where
1818 * 'log' is non-NULL).
1819 */
1820 if (log != NULL &&
1821 nvlist_lookup_uint64(child[c],
1822 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1823 is_log) {
1824 *log = B_TRUE;
1825 }
34dc7c2f 1826 return (ret);
b128c09f
BB
1827 }
1828 }
34dc7c2f
BB
1829
1830 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1831 &child, &children) == 0) {
1832 for (c = 0; c < children; c++) {
9babb374 1833 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1834 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1835 *avail_spare = B_TRUE;
1836 return (ret);
1837 }
1838 }
1839 }
1840
1841 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1842 &child, &children) == 0) {
1843 for (c = 0; c < children; c++) {
9babb374 1844 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1845 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1846 *l2cache = B_TRUE;
1847 return (ret);
1848 }
1849 }
1850 }
1851
1852 return (NULL);
1853}
1854
9babb374
BB
1855/*
1856 * Given a physical path (minus the "/devices" prefix), find the
1857 * associated vdev.
1858 */
1859nvlist_t *
1860zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1861 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1862{
1863 nvlist_t *search, *nvroot, *ret;
1864
1865 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1866 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1867
1868 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1869 &nvroot) == 0);
1870
1871 *avail_spare = B_FALSE;
572e2857
BB
1872 *l2cache = B_FALSE;
1873 if (log != NULL)
1874 *log = B_FALSE;
9babb374
BB
1875 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1876 nvlist_free(search);
1877
1878 return (ret);
1879}
1880
428870ff
BB
1881/*
1882 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1883 */
1884boolean_t
1885zpool_vdev_is_interior(const char *name)
1886{
1887 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1888 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1889 return (B_TRUE);
1890 return (B_FALSE);
1891}
1892
34dc7c2f
BB
1893nvlist_t *
1894zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 1895 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1896{
1897 char buf[MAXPATHLEN];
34dc7c2f 1898 char *end;
9babb374 1899 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
1900 uint64_t guid;
1901
9babb374
BB
1902 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1903
34dc7c2f
BB
1904 guid = strtoull(path, &end, 10);
1905 if (guid != 0 && *end == '\0') {
9babb374 1906 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
1907 } else if (zpool_vdev_is_interior(path)) {
1908 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 1909 } else if (path[0] != '/') {
a2c6816c
NB
1910 if (zfs_resolve_shortname(path, buf, sizeof (buf)) < 0) {
1911 nvlist_free(search);
1912 return (NULL);
1913 }
9babb374 1914 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
34dc7c2f 1915 } else {
9babb374 1916 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
1917 }
1918
1919 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1920 &nvroot) == 0);
1921
1922 *avail_spare = B_FALSE;
1923 *l2cache = B_FALSE;
b128c09f
BB
1924 if (log != NULL)
1925 *log = B_FALSE;
9babb374
BB
1926 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1927 nvlist_free(search);
1928
1929 return (ret);
b128c09f
BB
1930}
1931
1932static int
1933vdev_online(nvlist_t *nv)
1934{
1935 uint64_t ival;
1936
1937 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1938 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1939 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1940 return (0);
1941
1942 return (1);
1943}
1944
1945/*
9babb374 1946 * Helper function for zpool_get_physpaths().
b128c09f 1947 */
9babb374
BB
1948static int
1949vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1950 size_t *bytes_written)
1951{
1952 size_t bytes_left, pos, rsz;
1953 char *tmppath;
1954 const char *format;
1955
1956 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1957 &tmppath) != 0)
1958 return (EZFS_NODEVICE);
1959
1960 pos = *bytes_written;
1961 bytes_left = physpath_size - pos;
1962 format = (pos == 0) ? "%s" : " %s";
1963
1964 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1965 *bytes_written += rsz;
1966
1967 if (rsz >= bytes_left) {
1968 /* if physpath was not copied properly, clear it */
1969 if (bytes_left != 0) {
1970 physpath[pos] = 0;
1971 }
1972 return (EZFS_NOSPC);
1973 }
1974 return (0);
1975}
1976
1977static int
1978vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1979 size_t *rsz, boolean_t is_spare)
1980{
1981 char *type;
1982 int ret;
1983
1984 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1985 return (EZFS_INVALCONFIG);
1986
1987 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1988 /*
1989 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1990 * For a spare vdev, we only want to boot from the active
1991 * spare device.
1992 */
1993 if (is_spare) {
1994 uint64_t spare = 0;
1995 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1996 &spare);
1997 if (!spare)
1998 return (EZFS_INVALCONFIG);
1999 }
2000
2001 if (vdev_online(nv)) {
2002 if ((ret = vdev_get_one_physpath(nv, physpath,
2003 phypath_size, rsz)) != 0)
2004 return (ret);
2005 }
2006 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2007 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2008 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2009 nvlist_t **child;
2010 uint_t count;
2011 int i, ret;
2012
2013 if (nvlist_lookup_nvlist_array(nv,
2014 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2015 return (EZFS_INVALCONFIG);
2016
2017 for (i = 0; i < count; i++) {
2018 ret = vdev_get_physpaths(child[i], physpath,
2019 phypath_size, rsz, is_spare);
2020 if (ret == EZFS_NOSPC)
2021 return (ret);
2022 }
2023 }
2024
2025 return (EZFS_POOL_INVALARG);
2026}
2027
2028/*
2029 * Get phys_path for a root pool config.
2030 * Return 0 on success; non-zero on failure.
2031 */
2032static int
2033zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2034{
9babb374 2035 size_t rsz;
b128c09f
BB
2036 nvlist_t *vdev_root;
2037 nvlist_t **child;
2038 uint_t count;
9babb374 2039 char *type;
b128c09f 2040
9babb374 2041 rsz = 0;
b128c09f 2042
9babb374
BB
2043 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2044 &vdev_root) != 0)
2045 return (EZFS_INVALCONFIG);
b128c09f 2046
9babb374
BB
2047 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2048 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2049 &child, &count) != 0)
9babb374 2050 return (EZFS_INVALCONFIG);
b128c09f 2051
9babb374
BB
2052 /*
2053 * root pool can not have EFI labeled disks and can only have
2054 * a single top-level vdev.
2055 */
2056 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2057 pool_uses_efi(vdev_root))
2058 return (EZFS_POOL_INVALARG);
b128c09f 2059
9babb374
BB
2060 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2061 B_FALSE);
2062
2063 /* No online devices */
2064 if (rsz == 0)
2065 return (EZFS_NODEVICE);
b128c09f
BB
2066
2067 return (0);
34dc7c2f
BB
2068}
2069
9babb374
BB
2070/*
2071 * Get phys_path for a root pool
2072 * Return 0 on success; non-zero on failure.
2073 */
2074int
2075zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2076{
2077 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2078 phypath_size));
2079}
2080
9babb374
BB
2081/*
2082 * If the device has being dynamically expanded then we need to relabel
2083 * the disk to use the new unallocated space.
2084 */
2085static int
d603ed6c 2086zpool_relabel_disk(libzfs_handle_t *hdl, const char *path)
9babb374 2087{
9babb374
BB
2088 char errbuf[1024];
2089 int fd, error;
9babb374 2090
d603ed6c 2091 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2093 "relabel '%s': unable to open device: %d"), path, errno);
9babb374
BB
2094 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2095 }
2096
2097 /*
2098 * It's possible that we might encounter an error if the device
2099 * does not have any unallocated space left. If so, we simply
2100 * ignore that error and continue on.
2101 */
d603ed6c 2102 error = efi_use_whole_disk(fd);
9babb374
BB
2103 (void) close(fd);
2104 if (error && error != VT_ENOSPC) {
2105 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2106 "relabel '%s': unable to read disk capacity"), path);
9babb374
BB
2107 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2108 }
2109 return (0);
2110}
2111
34dc7c2f
BB
2112/*
2113 * Bring the specified vdev online. The 'flags' parameter is a set of the
2114 * ZFS_ONLINE_* flags.
2115 */
2116int
2117zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2118 vdev_state_t *newstate)
2119{
2598c001 2120 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2121 char msg[1024];
2122 nvlist_t *tgt;
9babb374 2123 boolean_t avail_spare, l2cache, islog;
34dc7c2f
BB
2124 libzfs_handle_t *hdl = zhp->zpool_hdl;
2125
9babb374
BB
2126 if (flags & ZFS_ONLINE_EXPAND) {
2127 (void) snprintf(msg, sizeof (msg),
2128 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2129 } else {
2130 (void) snprintf(msg, sizeof (msg),
2131 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2132 }
34dc7c2f
BB
2133
2134 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2135 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2136 &islog)) == NULL)
34dc7c2f
BB
2137 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2138
2139 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2140
428870ff 2141 if (avail_spare)
34dc7c2f
BB
2142 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2143
9babb374
BB
2144 if (flags & ZFS_ONLINE_EXPAND ||
2145 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2146 char *pathname = NULL;
2147 uint64_t wholedisk = 0;
2148
2149 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2150 &wholedisk);
2151 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2152 &pathname) == 0);
2153
2154 /*
2155 * XXX - L2ARC 1.0 devices can't support expansion.
2156 */
2157 if (l2cache) {
2158 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2159 "cannot expand cache devices"));
2160 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2161 }
2162
2163 if (wholedisk) {
2164 pathname += strlen(DISK_ROOT) + 1;
572e2857 2165 (void) zpool_relabel_disk(hdl, pathname);
9babb374
BB
2166 }
2167 }
2168
34dc7c2f
BB
2169 zc.zc_cookie = VDEV_STATE_ONLINE;
2170 zc.zc_obj = flags;
2171
572e2857 2172 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2173 if (errno == EINVAL) {
2174 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2175 "from this pool into a new one. Use '%s' "
2176 "instead"), "zpool detach");
2177 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2178 }
34dc7c2f 2179 return (zpool_standard_error(hdl, errno, msg));
428870ff 2180 }
34dc7c2f
BB
2181
2182 *newstate = zc.zc_cookie;
2183 return (0);
2184}
2185
2186/*
2187 * Take the specified vdev offline
2188 */
2189int
2190zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2191{
2598c001 2192 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2193 char msg[1024];
2194 nvlist_t *tgt;
2195 boolean_t avail_spare, l2cache;
2196 libzfs_handle_t *hdl = zhp->zpool_hdl;
2197
2198 (void) snprintf(msg, sizeof (msg),
2199 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2200
2201 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2202 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2203 NULL)) == NULL)
34dc7c2f
BB
2204 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2205
2206 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2207
428870ff 2208 if (avail_spare)
34dc7c2f
BB
2209 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2210
34dc7c2f
BB
2211 zc.zc_cookie = VDEV_STATE_OFFLINE;
2212 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2213
572e2857 2214 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2215 return (0);
2216
2217 switch (errno) {
2218 case EBUSY:
2219
2220 /*
2221 * There are no other replicas of this device.
2222 */
2223 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2224
9babb374
BB
2225 case EEXIST:
2226 /*
2227 * The log device has unplayed logs
2228 */
2229 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2230
34dc7c2f
BB
2231 default:
2232 return (zpool_standard_error(hdl, errno, msg));
2233 }
2234}
2235
2236/*
2237 * Mark the given vdev faulted.
2238 */
2239int
428870ff 2240zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2241{
2598c001 2242 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2243 char msg[1024];
2244 libzfs_handle_t *hdl = zhp->zpool_hdl;
2245
2246 (void) snprintf(msg, sizeof (msg),
b8864a23 2247 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2248
2249 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2250 zc.zc_guid = guid;
2251 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2252 zc.zc_obj = aux;
34dc7c2f 2253
572e2857 2254 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2255 return (0);
2256
2257 switch (errno) {
2258 case EBUSY:
2259
2260 /*
2261 * There are no other replicas of this device.
2262 */
2263 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2264
2265 default:
2266 return (zpool_standard_error(hdl, errno, msg));
2267 }
2268
2269}
2270
2271/*
2272 * Mark the given vdev degraded.
2273 */
2274int
428870ff 2275zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2276{
2598c001 2277 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2278 char msg[1024];
2279 libzfs_handle_t *hdl = zhp->zpool_hdl;
2280
2281 (void) snprintf(msg, sizeof (msg),
b8864a23 2282 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2283
2284 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2285 zc.zc_guid = guid;
2286 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2287 zc.zc_obj = aux;
34dc7c2f 2288
572e2857 2289 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2290 return (0);
2291
2292 return (zpool_standard_error(hdl, errno, msg));
2293}
2294
2295/*
2296 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2297 * a hot spare.
2298 */
2299static boolean_t
2300is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2301{
2302 nvlist_t **child;
2303 uint_t c, children;
2304 char *type;
2305
2306 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2307 &children) == 0) {
2308 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2309 &type) == 0);
2310
2311 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2312 children == 2 && child[which] == tgt)
2313 return (B_TRUE);
2314
2315 for (c = 0; c < children; c++)
2316 if (is_replacing_spare(child[c], tgt, which))
2317 return (B_TRUE);
2318 }
2319
2320 return (B_FALSE);
2321}
2322
2323/*
2324 * Attach new_disk (fully described by nvroot) to old_disk.
2325 * If 'replacing' is specified, the new disk will replace the old one.
2326 */
2327int
2328zpool_vdev_attach(zpool_handle_t *zhp,
2329 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2330{
2598c001 2331 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2332 char msg[1024];
2333 int ret;
2334 nvlist_t *tgt;
b128c09f
BB
2335 boolean_t avail_spare, l2cache, islog;
2336 uint64_t val;
572e2857 2337 char *newname;
34dc7c2f
BB
2338 nvlist_t **child;
2339 uint_t children;
2340 nvlist_t *config_root;
2341 libzfs_handle_t *hdl = zhp->zpool_hdl;
b128c09f 2342 boolean_t rootpool = pool_is_bootable(zhp);
34dc7c2f
BB
2343
2344 if (replacing)
2345 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2346 "cannot replace %s with %s"), old_disk, new_disk);
2347 else
2348 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2349 "cannot attach %s to %s"), new_disk, old_disk);
2350
b128c09f
BB
2351 /*
2352 * If this is a root pool, make sure that we're not attaching an
2353 * EFI labeled device.
2354 */
2355 if (rootpool && pool_uses_efi(nvroot)) {
2356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2357 "EFI labeled devices are not supported on root pools."));
2358 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2359 }
2360
34dc7c2f 2361 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2362 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2363 &islog)) == 0)
34dc7c2f
BB
2364 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2365
2366 if (avail_spare)
2367 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2368
2369 if (l2cache)
2370 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2371
2372 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2373 zc.zc_cookie = replacing;
2374
2375 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2376 &child, &children) != 0 || children != 1) {
2377 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2378 "new device must be a single disk"));
2379 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2380 }
2381
2382 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2383 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2384
428870ff 2385 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
b128c09f
BB
2386 return (-1);
2387
34dc7c2f
BB
2388 /*
2389 * If the target is a hot spare that has been swapped in, we can only
2390 * replace it with another hot spare.
2391 */
2392 if (replacing &&
2393 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2394 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2395 NULL) == NULL || !avail_spare) &&
2396 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2398 "can only be replaced by another hot spare"));
b128c09f 2399 free(newname);
34dc7c2f
BB
2400 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2401 }
2402
b128c09f
BB
2403 free(newname);
2404
34dc7c2f
BB
2405 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2406 return (-1);
2407
572e2857 2408 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2409
2410 zcmd_free_nvlists(&zc);
2411
b128c09f
BB
2412 if (ret == 0) {
2413 if (rootpool) {
9babb374
BB
2414 /*
2415 * XXX need a better way to prevent user from
2416 * booting up a half-baked vdev.
2417 */
2418 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2419 "sure to wait until resilver is done "
2420 "before rebooting.\n"));
b128c09f 2421 }
34dc7c2f 2422 return (0);
b128c09f 2423 }
34dc7c2f
BB
2424
2425 switch (errno) {
2426 case ENOTSUP:
2427 /*
2428 * Can't attach to or replace this type of vdev.
2429 */
2430 if (replacing) {
572e2857
BB
2431 uint64_t version = zpool_get_prop_int(zhp,
2432 ZPOOL_PROP_VERSION, NULL);
2433
b128c09f 2434 if (islog)
34dc7c2f
BB
2435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2436 "cannot replace a log with a spare"));
572e2857
BB
2437 else if (version >= SPA_VERSION_MULTI_REPLACE)
2438 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2439 "already in replacing/spare config; wait "
2440 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2441 else
2442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2443 "cannot replace a replacing device"));
2444 } else {
2445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2446 "can only attach to mirrors and top-level "
2447 "disks"));
2448 }
2449 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2450 break;
2451
2452 case EINVAL:
2453 /*
2454 * The new device must be a single disk.
2455 */
2456 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2457 "new device must be a single disk"));
2458 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2459 break;
2460
2461 case EBUSY:
2462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2463 new_disk);
2464 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2465 break;
2466
2467 case EOVERFLOW:
2468 /*
2469 * The new device is too small.
2470 */
2471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2472 "device is too small"));
2473 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2474 break;
2475
2476 case EDOM:
2477 /*
2478 * The new device has a different alignment requirement.
2479 */
2480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2481 "devices have different sector alignment"));
2482 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2483 break;
2484
2485 case ENAMETOOLONG:
2486 /*
2487 * The resulting top-level vdev spec won't fit in the label.
2488 */
2489 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2490 break;
2491
2492 default:
2493 (void) zpool_standard_error(hdl, errno, msg);
2494 }
2495
2496 return (-1);
2497}
2498
2499/*
2500 * Detach the specified device.
2501 */
2502int
2503zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2504{
2598c001 2505 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2506 char msg[1024];
2507 nvlist_t *tgt;
2508 boolean_t avail_spare, l2cache;
2509 libzfs_handle_t *hdl = zhp->zpool_hdl;
2510
2511 (void) snprintf(msg, sizeof (msg),
2512 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2513
2514 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2515 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2516 NULL)) == 0)
34dc7c2f
BB
2517 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2518
2519 if (avail_spare)
2520 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2521
2522 if (l2cache)
2523 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2524
2525 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2526
2527 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2528 return (0);
2529
2530 switch (errno) {
2531
2532 case ENOTSUP:
2533 /*
2534 * Can't detach from this type of vdev.
2535 */
2536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2537 "applicable to mirror and replacing vdevs"));
572e2857 2538 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2539 break;
2540
2541 case EBUSY:
2542 /*
2543 * There are no other replicas of this device.
2544 */
2545 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2546 break;
2547
2548 default:
2549 (void) zpool_standard_error(hdl, errno, msg);
2550 }
2551
2552 return (-1);
2553}
2554
428870ff
BB
2555/*
2556 * Find a mirror vdev in the source nvlist.
2557 *
2558 * The mchild array contains a list of disks in one of the top-level mirrors
2559 * of the source pool. The schild array contains a list of disks that the
2560 * user specified on the command line. We loop over the mchild array to
2561 * see if any entry in the schild array matches.
2562 *
2563 * If a disk in the mchild array is found in the schild array, we return
2564 * the index of that entry. Otherwise we return -1.
2565 */
2566static int
2567find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2568 nvlist_t **schild, uint_t schildren)
2569{
2570 uint_t mc;
2571
2572 for (mc = 0; mc < mchildren; mc++) {
2573 uint_t sc;
2574 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2575 mchild[mc], B_FALSE);
2576
2577 for (sc = 0; sc < schildren; sc++) {
2578 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2579 schild[sc], B_FALSE);
2580 boolean_t result = (strcmp(mpath, spath) == 0);
2581
2582 free(spath);
2583 if (result) {
2584 free(mpath);
2585 return (mc);
2586 }
2587 }
2588
2589 free(mpath);
2590 }
2591
2592 return (-1);
2593}
2594
2595/*
2596 * Split a mirror pool. If newroot points to null, then a new nvlist
2597 * is generated and it is the responsibility of the caller to free it.
2598 */
2599int
2600zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2601 nvlist_t *props, splitflags_t flags)
2602{
2598c001 2603 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff
BB
2604 char msg[1024];
2605 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2606 nvlist_t **varray = NULL, *zc_props = NULL;
2607 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2608 libzfs_handle_t *hdl = zhp->zpool_hdl;
2609 uint64_t vers;
2610 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2611 int retval = 0;
2612
2613 (void) snprintf(msg, sizeof (msg),
2614 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2615
2616 if (!zpool_name_valid(hdl, B_FALSE, newname))
2617 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2618
2619 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2620 (void) fprintf(stderr, gettext("Internal error: unable to "
2621 "retrieve pool configuration\n"));
2622 return (-1);
2623 }
2624
2625 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2626 == 0);
2627 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2628
2629 if (props) {
572e2857 2630 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 2631 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 2632 props, vers, flags, msg)) == NULL)
428870ff
BB
2633 return (-1);
2634 }
2635
2636 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2637 &children) != 0) {
2638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2639 "Source pool is missing vdev tree"));
2640 if (zc_props)
2641 nvlist_free(zc_props);
2642 return (-1);
2643 }
2644
2645 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2646 vcount = 0;
2647
2648 if (*newroot == NULL ||
2649 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2650 &newchild, &newchildren) != 0)
2651 newchildren = 0;
2652
2653 for (c = 0; c < children; c++) {
2654 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2655 char *type;
2656 nvlist_t **mchild, *vdev;
2657 uint_t mchildren;
2658 int entry;
2659
2660 /*
2661 * Unlike cache & spares, slogs are stored in the
2662 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2663 */
2664 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2665 &is_log);
2666 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2667 &is_hole);
2668 if (is_log || is_hole) {
2669 /*
2670 * Create a hole vdev and put it in the config.
2671 */
2672 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2673 goto out;
2674 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2675 VDEV_TYPE_HOLE) != 0)
2676 goto out;
2677 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2678 1) != 0)
2679 goto out;
2680 if (lastlog == 0)
2681 lastlog = vcount;
2682 varray[vcount++] = vdev;
2683 continue;
2684 }
2685 lastlog = 0;
2686 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2687 == 0);
2688 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2689 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2690 "Source pool must be composed only of mirrors\n"));
2691 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2692 goto out;
2693 }
2694
2695 verify(nvlist_lookup_nvlist_array(child[c],
2696 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2697
2698 /* find or add an entry for this top-level vdev */
2699 if (newchildren > 0 &&
2700 (entry = find_vdev_entry(zhp, mchild, mchildren,
2701 newchild, newchildren)) >= 0) {
2702 /* We found a disk that the user specified. */
2703 vdev = mchild[entry];
2704 ++found;
2705 } else {
2706 /* User didn't specify a disk for this vdev. */
2707 vdev = mchild[mchildren - 1];
2708 }
2709
2710 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2711 goto out;
2712 }
2713
2714 /* did we find every disk the user specified? */
2715 if (found != newchildren) {
2716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2717 "include at most one disk from each mirror"));
2718 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2719 goto out;
2720 }
2721
2722 /* Prepare the nvlist for populating. */
2723 if (*newroot == NULL) {
2724 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2725 goto out;
2726 freelist = B_TRUE;
2727 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2728 VDEV_TYPE_ROOT) != 0)
2729 goto out;
2730 } else {
2731 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2732 }
2733
2734 /* Add all the children we found */
2735 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2736 lastlog == 0 ? vcount : lastlog) != 0)
2737 goto out;
2738
2739 /*
2740 * If we're just doing a dry run, exit now with success.
2741 */
2742 if (flags.dryrun) {
2743 memory_err = B_FALSE;
2744 freelist = B_FALSE;
2745 goto out;
2746 }
2747
2748 /* now build up the config list & call the ioctl */
2749 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2750 goto out;
2751
2752 if (nvlist_add_nvlist(newconfig,
2753 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2754 nvlist_add_string(newconfig,
2755 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2756 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2757 goto out;
2758
2759 /*
2760 * The new pool is automatically part of the namespace unless we
2761 * explicitly export it.
2762 */
2763 if (!flags.import)
2764 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2765 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2766 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2767 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2768 goto out;
2769 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2770 goto out;
2771
2772 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2773 retval = zpool_standard_error(hdl, errno, msg);
2774 goto out;
2775 }
2776
2777 freelist = B_FALSE;
2778 memory_err = B_FALSE;
2779
2780out:
2781 if (varray != NULL) {
2782 int v;
2783
2784 for (v = 0; v < vcount; v++)
2785 nvlist_free(varray[v]);
2786 free(varray);
2787 }
2788 zcmd_free_nvlists(&zc);
2789 if (zc_props)
2790 nvlist_free(zc_props);
2791 if (newconfig)
2792 nvlist_free(newconfig);
2793 if (freelist) {
2794 nvlist_free(*newroot);
2795 *newroot = NULL;
2796 }
2797
2798 if (retval != 0)
2799 return (retval);
2800
2801 if (memory_err)
2802 return (no_memory(hdl));
2803
2804 return (0);
2805}
2806
34dc7c2f
BB
2807/*
2808 * Remove the given device. Currently, this is supported only for hot spares
2809 * and level 2 cache devices.
2810 */
2811int
2812zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2813{
2598c001 2814 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2815 char msg[1024];
2816 nvlist_t *tgt;
428870ff 2817 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2818 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2819 uint64_t version;
34dc7c2f
BB
2820
2821 (void) snprintf(msg, sizeof (msg),
2822 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2823
2824 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2825 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 2826 &islog)) == 0)
34dc7c2f 2827 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
2828 /*
2829 * XXX - this should just go away.
2830 */
2831 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 2832 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428870ff
BB
2833 "only inactive hot spares, cache, top-level, "
2834 "or log devices can be removed"));
34dc7c2f
BB
2835 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2836 }
2837
428870ff
BB
2838 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2839 if (islog && version < SPA_VERSION_HOLES) {
2840 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2841 "pool must be upgrade to support log removal"));
2842 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2843 }
2844
34dc7c2f
BB
2845 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2846
2847 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2848 return (0);
2849
2850 return (zpool_standard_error(hdl, errno, msg));
2851}
2852
2853/*
2854 * Clear the errors for the pool, or the particular device if specified.
2855 */
2856int
428870ff 2857zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 2858{
2598c001 2859 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2860 char msg[1024];
2861 nvlist_t *tgt;
428870ff 2862 zpool_rewind_policy_t policy;
34dc7c2f
BB
2863 boolean_t avail_spare, l2cache;
2864 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2865 nvlist_t *nvi = NULL;
572e2857 2866 int error;
34dc7c2f
BB
2867
2868 if (path)
2869 (void) snprintf(msg, sizeof (msg),
2870 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2871 path);
2872 else
2873 (void) snprintf(msg, sizeof (msg),
2874 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2875 zhp->zpool_name);
2876
2877 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2878 if (path) {
2879 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 2880 &l2cache, NULL)) == 0)
34dc7c2f
BB
2881 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2882
2883 /*
2884 * Don't allow error clearing for hot spares. Do allow
2885 * error clearing for l2cache devices.
2886 */
2887 if (avail_spare)
2888 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2889
2890 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2891 &zc.zc_guid) == 0);
2892 }
2893
428870ff
BB
2894 zpool_get_rewind_policy(rewindnvl, &policy);
2895 zc.zc_cookie = policy.zrp_request;
2896
572e2857 2897 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
2898 return (-1);
2899
572e2857 2900 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
2901 return (-1);
2902
572e2857
BB
2903 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2904 errno == ENOMEM) {
2905 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2906 zcmd_free_nvlists(&zc);
2907 return (-1);
2908 }
2909 }
2910
2911 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
2912 errno != EPERM && errno != EACCES)) {
2913 if (policy.zrp_request &
2914 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2915 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2916 zpool_rewind_exclaim(hdl, zc.zc_name,
2917 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2918 nvi);
2919 nvlist_free(nvi);
2920 }
2921 zcmd_free_nvlists(&zc);
34dc7c2f 2922 return (0);
428870ff 2923 }
34dc7c2f 2924
428870ff 2925 zcmd_free_nvlists(&zc);
34dc7c2f
BB
2926 return (zpool_standard_error(hdl, errno, msg));
2927}
2928
2929/*
2930 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2931 */
2932int
2933zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2934{
2598c001 2935 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2936 char msg[1024];
2937 libzfs_handle_t *hdl = zhp->zpool_hdl;
2938
2939 (void) snprintf(msg, sizeof (msg),
2940 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
b8864a23 2941 (u_longlong_t)guid);
34dc7c2f
BB
2942
2943 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2944 zc.zc_guid = guid;
428870ff 2945 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
2946
2947 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2948 return (0);
2949
2950 return (zpool_standard_error(hdl, errno, msg));
2951}
2952
3541dc6d
GA
2953/*
2954 * Change the GUID for a pool.
2955 */
2956int
2957zpool_reguid(zpool_handle_t *zhp)
2958{
2959 char msg[1024];
2960 libzfs_handle_t *hdl = zhp->zpool_hdl;
2961 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2962
2963 (void) snprintf(msg, sizeof (msg),
2964 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
2965
2966 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2967 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
2968 return (0);
2969
2970 return (zpool_standard_error(hdl, errno, msg));
2971}
2972
34dc7c2f
BB
2973/*
2974 * Convert from a devid string to a path.
2975 */
2976static char *
2977devid_to_path(char *devid_str)
2978{
2979 ddi_devid_t devid;
2980 char *minor;
2981 char *path;
2982 devid_nmlist_t *list = NULL;
2983 int ret;
2984
2985 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2986 return (NULL);
2987
2988 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2989
2990 devid_str_free(minor);
2991 devid_free(devid);
2992
2993 if (ret != 0)
2994 return (NULL);
2995
2996 if ((path = strdup(list[0].devname)) == NULL)
2997 return (NULL);
2998
2999 devid_free_nmlist(list);
3000
3001 return (path);
3002}
3003
3004/*
3005 * Convert from a path to a devid string.
3006 */
3007static char *
3008path_to_devid(const char *path)
3009{
3010 int fd;
3011 ddi_devid_t devid;
3012 char *minor, *ret;
3013
3014 if ((fd = open(path, O_RDONLY)) < 0)
3015 return (NULL);
3016
3017 minor = NULL;
3018 ret = NULL;
3019 if (devid_get(fd, &devid) == 0) {
3020 if (devid_get_minor_name(fd, &minor) == 0)
3021 ret = devid_str_encode(devid, minor);
3022 if (minor != NULL)
3023 devid_str_free(minor);
3024 devid_free(devid);
3025 }
3026 (void) close(fd);
3027
3028 return (ret);
3029}
3030
3031/*
3032 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3033 * ignore any failure here, since a common case is for an unprivileged user to
3034 * type 'zpool status', and we'll display the correct information anyway.
3035 */
3036static void
3037set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3038{
2598c001 3039 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3040
3041 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3042 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3043 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3044 &zc.zc_guid) == 0);
3045
3046 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3047}
3048
83c62c93
NB
3049/*
3050 * Remove partition suffix from a vdev path. Partition suffixes may take three
3051 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3052 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3053 * third case only occurs when preceded by a string matching the regular
3054 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3055 */
3056static char *
3057strip_partition(libzfs_handle_t *hdl, char *path)
3058{
3059 char *tmp = zfs_strdup(hdl, path);
3060 char *part = NULL, *d = NULL;
3061
3062 if ((part = strstr(tmp, "-part")) && part != tmp) {
3063 d = part + 5;
3064 } else if ((part = strrchr(tmp, 'p')) &&
3065 part > tmp + 1 && isdigit(*(part-1))) {
3066 d = part + 1;
3067 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3068 for (d = &tmp[2]; isalpha(*d); part = ++d);
3069 }
3070 if (part && d && *d != '\0') {
3071 for (; isdigit(*d); d++);
3072 if (*d == '\0')
3073 *part = '\0';
3074 }
3075 return (tmp);
3076}
3077
858219cc
NB
3078#define PATH_BUF_LEN 64
3079
34dc7c2f
BB
3080/*
3081 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3082 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3083 * We also check if this is a whole disk, in which case we strip off the
3084 * trailing 's0' slice name.
3085 *
3086 * This routine is also responsible for identifying when disks have been
3087 * reconfigured in a new location. The kernel will have opened the device by
3088 * devid, but the path will still refer to the old location. To catch this, we
3089 * first do a path -> devid translation (which is fast for the common case). If
3090 * the devid matches, we're done. If not, we do a reverse devid -> path
3091 * translation and issue the appropriate ioctl() to update the path of the vdev.
3092 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3093 * of these checks.
3094 */
3095char *
428870ff
BB
3096zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3097 boolean_t verbose)
34dc7c2f 3098{
d603ed6c 3099 char *path, *devid, *type;
34dc7c2f 3100 uint64_t value;
858219cc 3101 char buf[PATH_BUF_LEN];
34dc7c2f
BB
3102 vdev_stat_t *vs;
3103 uint_t vsc;
3104
3105 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3106 &value) == 0) {
3107 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3108 &value) == 0);
3109 (void) snprintf(buf, sizeof (buf), "%llu",
3110 (u_longlong_t)value);
3111 path = buf;
3112 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
34dc7c2f
BB
3113 /*
3114 * If the device is dead (faulted, offline, etc) then don't
3115 * bother opening it. Otherwise we may be forcing the user to
3116 * open a misbehaving device, which can have undesirable
3117 * effects.
3118 */
428870ff 3119 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3120 (uint64_t **)&vs, &vsc) != 0 ||
3121 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3122 zhp != NULL &&
3123 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3124 /*
3125 * Determine if the current path is correct.
3126 */
3127 char *newdevid = path_to_devid(path);
3128
3129 if (newdevid == NULL ||
3130 strcmp(devid, newdevid) != 0) {
3131 char *newpath;
3132
3133 if ((newpath = devid_to_path(devid)) != NULL) {
3134 /*
3135 * Update the path appropriately.
3136 */
3137 set_path(zhp, nv, newpath);
3138 if (nvlist_add_string(nv,
3139 ZPOOL_CONFIG_PATH, newpath) == 0)
3140 verify(nvlist_lookup_string(nv,
3141 ZPOOL_CONFIG_PATH,
3142 &path) == 0);
3143 free(newpath);
3144 }
3145 }
3146
3147 if (newdevid)
3148 devid_str_free(newdevid);
3149 }
3150
d603ed6c
BB
3151 /*
3152 * For a block device only use the name.
3153 */
3154 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3155 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3156 path = strrchr(path, '/');
3157 path++;
3158 }
34dc7c2f 3159
d603ed6c 3160 /*
83c62c93 3161 * Remove the partition from the path it this is a whole disk.
d603ed6c 3162 */
34dc7c2f
BB
3163 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3164 &value) == 0 && value) {
83c62c93 3165 return strip_partition(hdl, path);
34dc7c2f
BB
3166 }
3167 } else {
3168 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3169
3170 /*
3171 * If it's a raidz device, we need to stick in the parity level.
3172 */
3173 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
858219cc
NB
3174 char tmpbuf[PATH_BUF_LEN];
3175
34dc7c2f
BB
3176 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3177 &value) == 0);
858219cc 3178 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%llu", path,
34dc7c2f 3179 (u_longlong_t)value);
858219cc 3180 path = tmpbuf;
34dc7c2f 3181 }
428870ff
BB
3182
3183 /*
3184 * We identify each top-level vdev by using a <type-id>
3185 * naming convention.
3186 */
3187 if (verbose) {
3188 uint64_t id;
3189
3190 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3191 &id) == 0);
3192 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3193 (u_longlong_t)id);
3194 path = buf;
3195 }
34dc7c2f
BB
3196 }
3197
3198 return (zfs_strdup(hdl, path));
3199}
3200
3201static int
3202zbookmark_compare(const void *a, const void *b)
3203{
3204 return (memcmp(a, b, sizeof (zbookmark_t)));
3205}
3206
3207/*
3208 * Retrieve the persistent error log, uniquify the members, and return to the
3209 * caller.
3210 */
3211int
3212zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3213{
2598c001 3214 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3215 uint64_t count;
3216 zbookmark_t *zb = NULL;
3217 int i;
3218
3219 /*
3220 * Retrieve the raw error list from the kernel. If the number of errors
3221 * has increased, allocate more space and continue until we get the
3222 * entire list.
3223 */
3224 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3225 &count) == 0);
3226 if (count == 0)
3227 return (0);
3228 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3229 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3230 return (-1);
3231 zc.zc_nvlist_dst_size = count;
3232 (void) strcpy(zc.zc_name, zhp->zpool_name);
3233 for (;;) {
3234 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3235 &zc) != 0) {
3236 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3237 if (errno == ENOMEM) {
3238 count = zc.zc_nvlist_dst_size;
3239 if ((zc.zc_nvlist_dst = (uintptr_t)
3240 zfs_alloc(zhp->zpool_hdl, count *
3241 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3242 return (-1);
3243 } else {
3244 return (-1);
3245 }
3246 } else {
3247 break;
3248 }
3249 }
3250
3251 /*
3252 * Sort the resulting bookmarks. This is a little confusing due to the
3253 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3254 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3255 * _not_ copied as part of the process. So we point the start of our
3256 * array appropriate and decrement the total number of elements.
3257 */
3258 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3259 zc.zc_nvlist_dst_size;
3260 count -= zc.zc_nvlist_dst_size;
3261
3262 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3263
3264 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3265
3266 /*
3267 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3268 */
3269 for (i = 0; i < count; i++) {
3270 nvlist_t *nv;
3271
3272 /* ignoring zb_blkid and zb_level for now */
3273 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3274 zb[i-1].zb_object == zb[i].zb_object)
3275 continue;
3276
3277 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3278 goto nomem;
3279 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3280 zb[i].zb_objset) != 0) {
3281 nvlist_free(nv);
3282 goto nomem;
3283 }
3284 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3285 zb[i].zb_object) != 0) {
3286 nvlist_free(nv);
3287 goto nomem;
3288 }
3289 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3290 nvlist_free(nv);
3291 goto nomem;
3292 }
3293 nvlist_free(nv);
3294 }
3295
3296 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3297 return (0);
3298
3299nomem:
3300 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3301 return (no_memory(zhp->zpool_hdl));
3302}
3303
3304/*
3305 * Upgrade a ZFS pool to the latest on-disk version.
3306 */
3307int
3308zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3309{
2598c001 3310 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3311 libzfs_handle_t *hdl = zhp->zpool_hdl;
3312
3313 (void) strcpy(zc.zc_name, zhp->zpool_name);
3314 zc.zc_cookie = new_version;
3315
3316 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3317 return (zpool_standard_error_fmt(hdl, errno,
3318 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3319 zhp->zpool_name));
3320 return (0);
3321}
3322
3323void
3324zpool_set_history_str(const char *subcommand, int argc, char **argv,
3325 char *history_str)
3326{
3327 int i;
3328
3329 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3330 for (i = 1; i < argc; i++) {
3331 if (strlen(history_str) + 1 + strlen(argv[i]) >
3332 HIS_MAX_RECORD_LEN)
3333 break;
3334 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3335 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3336 }
3337}
3338
3339/*
3340 * Stage command history for logging.
3341 */
3342int
3343zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3344{
3345 if (history_str == NULL)
3346 return (EINVAL);
3347
3348 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3349 return (EINVAL);
3350
3351 if (hdl->libzfs_log_str != NULL)
3352 free(hdl->libzfs_log_str);
3353
3354 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3355 return (no_memory(hdl));
3356
3357 return (0);
3358}
3359
3360/*
3361 * Perform ioctl to get some command history of a pool.
3362 *
3363 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3364 * logical offset of the history buffer to start reading from.
3365 *
3366 * Upon return, 'off' is the next logical offset to read from and
3367 * 'len' is the actual amount of bytes read into 'buf'.
3368 */
3369static int
3370get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3371{
2598c001 3372 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3373 libzfs_handle_t *hdl = zhp->zpool_hdl;
3374
3375 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3376
3377 zc.zc_history = (uint64_t)(uintptr_t)buf;
3378 zc.zc_history_len = *len;
3379 zc.zc_history_offset = *off;
3380
3381 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3382 switch (errno) {
3383 case EPERM:
3384 return (zfs_error_fmt(hdl, EZFS_PERM,
3385 dgettext(TEXT_DOMAIN,
3386 "cannot show history for pool '%s'"),
3387 zhp->zpool_name));
3388 case ENOENT:
3389 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3390 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3391 "'%s'"), zhp->zpool_name));
3392 case ENOTSUP:
3393 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3394 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3395 "'%s', pool must be upgraded"), zhp->zpool_name));
3396 default:
3397 return (zpool_standard_error_fmt(hdl, errno,
3398 dgettext(TEXT_DOMAIN,
3399 "cannot get history for '%s'"), zhp->zpool_name));
3400 }
3401 }
3402
3403 *len = zc.zc_history_len;
3404 *off = zc.zc_history_offset;
3405
3406 return (0);
3407}
3408
3409/*
3410 * Process the buffer of nvlists, unpacking and storing each nvlist record
3411 * into 'records'. 'leftover' is set to the number of bytes that weren't
3412 * processed as there wasn't a complete record.
3413 */
428870ff 3414int
34dc7c2f
BB
3415zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3416 nvlist_t ***records, uint_t *numrecords)
3417{
3418 uint64_t reclen;
3419 nvlist_t *nv;
3420 int i;
3421
3422 while (bytes_read > sizeof (reclen)) {
3423
3424 /* get length of packed record (stored as little endian) */
3425 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3426 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3427
3428 if (bytes_read < sizeof (reclen) + reclen)
3429 break;
3430
3431 /* unpack record */
3432 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3433 return (ENOMEM);
3434 bytes_read -= sizeof (reclen) + reclen;
3435 buf += sizeof (reclen) + reclen;
3436
3437 /* add record to nvlist array */
3438 (*numrecords)++;
3439 if (ISP2(*numrecords + 1)) {
3440 *records = realloc(*records,
3441 *numrecords * 2 * sizeof (nvlist_t *));
3442 }
3443 (*records)[*numrecords - 1] = nv;
3444 }
3445
3446 *leftover = bytes_read;
3447 return (0);
3448}
3449
3450#define HIS_BUF_LEN (128*1024)
3451
3452/*
3453 * Retrieve the command history of a pool.
3454 */
3455int
3456zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3457{
3458 char buf[HIS_BUF_LEN];
3459 uint64_t off = 0;
3460 nvlist_t **records = NULL;
3461 uint_t numrecords = 0;
3462 int err, i;
3463
3464 do {
3465 uint64_t bytes_read = sizeof (buf);
3466 uint64_t leftover;
3467
3468 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3469 break;
3470
3471 /* if nothing else was read in, we're at EOF, just return */
3472 if (!bytes_read)
3473 break;
3474
3475 if ((err = zpool_history_unpack(buf, bytes_read,
3476 &leftover, &records, &numrecords)) != 0)
3477 break;
3478 off -= leftover;
3479
3480 /* CONSTCOND */
3481 } while (1);
3482
3483 if (!err) {
3484 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3485 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3486 records, numrecords) == 0);
3487 }
3488 for (i = 0; i < numrecords; i++)
3489 nvlist_free(records[i]);
3490 free(records);
3491
3492 return (err);
3493}
3494
26685276
BB
3495/*
3496 * Retrieve the next event. If there is a new event available 'nvp' will
3497 * contain a newly allocated nvlist and 'dropped' will be set to the number
3498 * of missed events since the last call to this function. When 'nvp' is
3499 * set to NULL it indicates no new events are available. In either case
3500 * the function returns 0 and it is up to the caller to free 'nvp'. In
3501 * the case of a fatal error the function will return a non-zero value.
3502 * When the function is called in blocking mode it will not return until
3503 * a new event is available.
3504 */
3505int
3506zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3507 int *dropped, int block, int cleanup_fd)
3508{
3509 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3510 int error = 0;
3511
3512 *nvp = NULL;
3513 *dropped = 0;
3514 zc.zc_cleanup_fd = cleanup_fd;
3515
3516 if (!block)
3517 zc.zc_guid = ZEVENT_NONBLOCK;
3518
3519 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3520 return (-1);
3521
3522retry:
3523 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3524 switch (errno) {
3525 case ESHUTDOWN:
3526 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3527 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3528 goto out;
3529 case ENOENT:
3530 /* Blocking error case should not occur */
3531 if (block)
3532 error = zpool_standard_error_fmt(hdl, errno,
3533 dgettext(TEXT_DOMAIN, "cannot get event"));
3534
3535 goto out;
3536 case ENOMEM:
3537 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3538 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3539 dgettext(TEXT_DOMAIN, "cannot get event"));
3540 goto out;
3541 } else {
3542 goto retry;
3543 }
3544 default:
3545 error = zpool_standard_error_fmt(hdl, errno,
3546 dgettext(TEXT_DOMAIN, "cannot get event"));
3547 goto out;
3548 }
3549 }
3550
3551 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3552 if (error != 0)
3553 goto out;
3554
3555 *dropped = (int)zc.zc_cookie;
3556out:
3557 zcmd_free_nvlists(&zc);
3558
3559 return (error);
3560}
3561
3562/*
3563 * Clear all events.
3564 */
3565int
3566zpool_events_clear(libzfs_handle_t *hdl, int *count)
3567{
3568 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3569 char msg[1024];
3570
3571 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3572 "cannot clear events"));
3573
3574 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3575 return (zpool_standard_error_fmt(hdl, errno, msg));
3576
3577 if (count != NULL)
3578 *count = (int)zc.zc_cookie; /* # of events cleared */
3579
3580 return (0);
3581}
3582
34dc7c2f
BB
3583void
3584zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3585 char *pathname, size_t len)
3586{
2598c001 3587 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3588 boolean_t mounted = B_FALSE;
3589 char *mntpnt = NULL;
3590 char dsname[MAXNAMELEN];
3591
3592 if (dsobj == 0) {
3593 /* special case for the MOS */
b8864a23 3594 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
34dc7c2f
BB
3595 return;
3596 }
3597
3598 /* get the dataset's name */
3599 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3600 zc.zc_obj = dsobj;
3601 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3602 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3603 /* just write out a path of two object numbers */
3604 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 3605 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
3606 return;
3607 }
3608 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3609
3610 /* find out if the dataset is mounted */
3611 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3612
3613 /* get the corrupted object's path */
3614 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3615 zc.zc_obj = obj;
3616 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3617 &zc) == 0) {
3618 if (mounted) {
3619 (void) snprintf(pathname, len, "%s%s", mntpnt,
3620 zc.zc_value);
3621 } else {
3622 (void) snprintf(pathname, len, "%s:%s",
3623 dsname, zc.zc_value);
3624 }
3625 } else {
b8864a23 3626 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
34dc7c2f
BB
3627 }
3628 free(mntpnt);
3629}
3630
b128c09f
BB
3631/*
3632 * Read the EFI label from the config, if a label does not exist then
3633 * pass back the error to the caller. If the caller has passed a non-NULL
3634 * diskaddr argument then we set it to the starting address of the EFI
3635 * partition.
3636 */
3637static int
3638read_efi_label(nvlist_t *config, diskaddr_t *sb)
3639{
3640 char *path;
3641 int fd;
3642 char diskname[MAXPATHLEN];
3643 int err = -1;
3644
3645 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3646 return (err);
3647
3648 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3649 strrchr(path, '/'));
d603ed6c 3650 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
b128c09f
BB
3651 struct dk_gpt *vtoc;
3652
3653 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3654 if (sb != NULL)
3655 *sb = vtoc->efi_parts[0].p_start;
3656 efi_free(vtoc);
3657 }
3658 (void) close(fd);
3659 }
3660 return (err);
3661}
3662
34dc7c2f
BB
3663/*
3664 * determine where a partition starts on a disk in the current
3665 * configuration
3666 */
3667static diskaddr_t
3668find_start_block(nvlist_t *config)
3669{
3670 nvlist_t **child;
3671 uint_t c, children;
34dc7c2f 3672 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
3673 uint64_t wholedisk;
3674
3675 if (nvlist_lookup_nvlist_array(config,
3676 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3677 if (nvlist_lookup_uint64(config,
3678 ZPOOL_CONFIG_WHOLE_DISK,
3679 &wholedisk) != 0 || !wholedisk) {
3680 return (MAXOFFSET_T);
3681 }
b128c09f
BB
3682 if (read_efi_label(config, &sb) < 0)
3683 sb = MAXOFFSET_T;
34dc7c2f
BB
3684 return (sb);
3685 }
3686
3687 for (c = 0; c < children; c++) {
3688 sb = find_start_block(child[c]);
3689 if (sb != MAXOFFSET_T) {
3690 return (sb);
3691 }
3692 }
3693 return (MAXOFFSET_T);
3694}
3695
d603ed6c
BB
3696int
3697zpool_label_disk_wait(char *path, int timeout)
3698{
3699 struct stat64 statbuf;
3700 int i;
3701
3702 /*
3703 * Wait timeout miliseconds for a newly created device to be available
3704 * from the given path. There is a small window when a /dev/ device
3705 * will exist and the udev link will not, so we must wait for the
3706 * symlink. Depending on the udev rules this may take a few seconds.
3707 */
3708 for (i = 0; i < timeout; i++) {
3709 usleep(1000);
3710
3711 errno = 0;
3712 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3713 return (0);
3714 }
3715
3716 return (ENOENT);
3717}
3718
3719int
3720zpool_label_disk_check(char *path)
3721{
3722 struct dk_gpt *vtoc;
3723 int fd, err;
3724
3725 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3726 return errno;
3727
3728 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3729 (void) close(fd);
3730 return err;
3731 }
3732
3733 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3734 efi_free(vtoc);
3735 (void) close(fd);
3736 return EIDRM;
3737 }
3738
3739 efi_free(vtoc);
3740 (void) close(fd);
3741 return 0;
3742}
3743
34dc7c2f
BB
3744/*
3745 * Label an individual disk. The name provided is the short name,
3746 * stripped of any leading /dev path.
3747 */
3748int
3749zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3750{
3751 char path[MAXPATHLEN];
3752 struct dk_gpt *vtoc;
d603ed6c 3753 int rval, fd;
34dc7c2f
BB
3754 size_t resv = EFI_MIN_RESV_SIZE;
3755 uint64_t slice_size;
3756 diskaddr_t start_block;
3757 char errbuf[1024];
3758
3759 /* prepare an error message just in case */
3760 (void) snprintf(errbuf, sizeof (errbuf),
3761 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3762
3763 if (zhp) {
3764 nvlist_t *nvroot;
3765
b128c09f
BB
3766 if (pool_is_bootable(zhp)) {
3767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3768 "EFI labeled devices are not supported on root "
3769 "pools."));
3770 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3771 }
3772
34dc7c2f
BB
3773 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3774 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3775
3776 if (zhp->zpool_start_block == 0)
3777 start_block = find_start_block(nvroot);
3778 else
3779 start_block = zhp->zpool_start_block;
3780 zhp->zpool_start_block = start_block;
3781 } else {
3782 /* new pool */
3783 start_block = NEW_START_BLOCK;
3784 }
3785
3786 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3787 BACKUP_SLICE);
3788
d603ed6c 3789 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
34dc7c2f
BB
3790 /*
3791 * This shouldn't happen. We've long since verified that this
3792 * is a valid device.
3793 */
109491a8
RL
3794 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3795 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
3796 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3797 }
3798
3799 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3800 /*
3801 * The only way this can fail is if we run out of memory, or we
3802 * were unable to read the disk's capacity
3803 */
3804 if (errno == ENOMEM)
3805 (void) no_memory(hdl);
3806
3807 (void) close(fd);
109491a8
RL
3808 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3809 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
3810
3811 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3812 }
3813
3814 slice_size = vtoc->efi_last_u_lba + 1;
3815 slice_size -= EFI_MIN_RESV_SIZE;
3816 if (start_block == MAXOFFSET_T)
3817 start_block = NEW_START_BLOCK;
3818 slice_size -= start_block;
613d88ed 3819 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
3820
3821 vtoc->efi_parts[0].p_start = start_block;
3822 vtoc->efi_parts[0].p_size = slice_size;
3823
3824 /*
3825 * Why we use V_USR: V_BACKUP confuses users, and is considered
3826 * disposable by some EFI utilities (since EFI doesn't have a backup
3827 * slice). V_UNASSIGNED is supposed to be used only for zero size
3828 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3829 * etc. were all pretty specific. V_USR is as close to reality as we
3830 * can get, in the absence of V_OTHER.
3831 */
3832 vtoc->efi_parts[0].p_tag = V_USR;
3833 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3834
3835 vtoc->efi_parts[8].p_start = slice_size + start_block;
3836 vtoc->efi_parts[8].p_size = resv;
3837 vtoc->efi_parts[8].p_tag = V_RESERVED;
3838
d603ed6c 3839 if ((rval = efi_write(fd, vtoc)) != 0) {
34dc7c2f
BB
3840 /*
3841 * Some block drivers (like pcata) may not support EFI
3842 * GPT labels. Print out a helpful error message dir-
3843 * ecting the user to manually label the disk and give
3844 * a specific slice.
3845 */
3846 (void) close(fd);
3847 efi_free(vtoc);
3848
d603ed6c
BB
3849 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3850 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
3851 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3852 }
3853
3854 (void) close(fd);
3855 efi_free(vtoc);
34dc7c2f 3856
d603ed6c
BB
3857 /* Wait for the first expected slice to appear. */
3858 (void) snprintf(path, sizeof (path), "%s/%s%s%s", DISK_ROOT, name,
3859 isdigit(name[strlen(name)-1]) ? "p" : "", FIRST_SLICE);
3860 rval = zpool_label_disk_wait(path, 3000);
3861 if (rval) {
3862 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3863 "detect device partitions on '%s': %d"), path, rval);
3864 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
3865 }
3866
d603ed6c
BB
3867 /* We can't be to paranoid. Read the label back and verify it. */
3868 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3869 rval = zpool_label_disk_check(path);
3870 if (rval) {
3871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3872 "EFI label on '%s' is damaged. Ensure\nthis device "
3873 "is not in in use, and is functioning properly: %d"),
3874 path, rval);
3875 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 3876 }
34dc7c2f 3877
d603ed6c 3878 return 0;
34dc7c2f 3879}