]> git.proxmox.com Git - mirror_zfs-debian.git/blame - lib/libzfs/libzfs_pool.c
Relicense zfs.gentoo.in from GPLv2 to 2-clause BSD
[mirror_zfs-debian.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
34dc7c2f
BB
24 */
25
34dc7c2f
BB
26#include <ctype.h>
27#include <errno.h>
28#include <devid.h>
34dc7c2f
BB
29#include <fcntl.h>
30#include <libintl.h>
31#include <stdio.h>
32#include <stdlib.h>
33#include <strings.h>
34#include <unistd.h>
d603ed6c
BB
35#include <zone.h>
36#include <sys/stat.h>
34dc7c2f
BB
37#include <sys/efi_partition.h>
38#include <sys/vtoc.h>
39#include <sys/zfs_ioctl.h>
9babb374 40#include <dlfcn.h>
34dc7c2f
BB
41
42#include "zfs_namecheck.h"
43#include "zfs_prop.h"
44#include "libzfs_impl.h"
428870ff 45#include "zfs_comutil.h"
34dc7c2f 46
b128c09f
BB
47static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
48
572e2857
BB
49typedef struct prop_flags {
50 int create:1; /* Validate property on creation */
51 int import:1; /* Validate property on import */
52} prop_flags_t;
53
34dc7c2f
BB
54/*
55 * ====================================================================
56 * zpool property functions
57 * ====================================================================
58 */
59
60static int
61zpool_get_all_props(zpool_handle_t *zhp)
62{
2598c001 63 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
64 libzfs_handle_t *hdl = zhp->zpool_hdl;
65
66 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
67
68 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
69 return (-1);
70
71 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
72 if (errno == ENOMEM) {
73 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
74 zcmd_free_nvlists(&zc);
75 return (-1);
76 }
77 } else {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 }
82
83 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
84 zcmd_free_nvlists(&zc);
85 return (-1);
86 }
87
88 zcmd_free_nvlists(&zc);
89
90 return (0);
91}
92
93static int
94zpool_props_refresh(zpool_handle_t *zhp)
95{
96 nvlist_t *old_props;
97
98 old_props = zhp->zpool_props;
99
100 if (zpool_get_all_props(zhp) != 0)
101 return (-1);
102
103 nvlist_free(old_props);
104 return (0);
105}
106
107static char *
108zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
109 zprop_source_t *src)
110{
111 nvlist_t *nv, *nvl;
112 uint64_t ival;
113 char *value;
114 zprop_source_t source;
115
116 nvl = zhp->zpool_props;
117 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
118 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
119 source = ival;
120 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
121 } else {
122 source = ZPROP_SRC_DEFAULT;
123 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
124 value = "-";
125 }
126
127 if (src)
128 *src = source;
129
130 return (value);
131}
132
133uint64_t
134zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
135{
136 nvlist_t *nv, *nvl;
137 uint64_t value;
138 zprop_source_t source;
139
b128c09f
BB
140 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
141 /*
142 * zpool_get_all_props() has most likely failed because
143 * the pool is faulted, but if all we need is the top level
144 * vdev's guid then get it from the zhp config nvlist.
145 */
146 if ((prop == ZPOOL_PROP_GUID) &&
147 (nvlist_lookup_nvlist(zhp->zpool_config,
148 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
149 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
150 == 0)) {
151 return (value);
152 }
34dc7c2f 153 return (zpool_prop_default_numeric(prop));
b128c09f 154 }
34dc7c2f
BB
155
156 nvl = zhp->zpool_props;
157 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
158 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
159 source = value;
160 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
161 } else {
162 source = ZPROP_SRC_DEFAULT;
163 value = zpool_prop_default_numeric(prop);
164 }
165
166 if (src)
167 *src = source;
168
169 return (value);
170}
171
172/*
173 * Map VDEV STATE to printed strings.
174 */
175char *
176zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
177{
178 switch (state) {
e75c13c3
BB
179 default:
180 break;
34dc7c2f
BB
181 case VDEV_STATE_CLOSED:
182 case VDEV_STATE_OFFLINE:
183 return (gettext("OFFLINE"));
184 case VDEV_STATE_REMOVED:
185 return (gettext("REMOVED"));
186 case VDEV_STATE_CANT_OPEN:
b128c09f 187 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 188 return (gettext("FAULTED"));
428870ff
BB
189 else if (aux == VDEV_AUX_SPLIT_POOL)
190 return (gettext("SPLIT"));
34dc7c2f
BB
191 else
192 return (gettext("UNAVAIL"));
193 case VDEV_STATE_FAULTED:
194 return (gettext("FAULTED"));
195 case VDEV_STATE_DEGRADED:
196 return (gettext("DEGRADED"));
197 case VDEV_STATE_HEALTHY:
198 return (gettext("ONLINE"));
199 }
200
201 return (gettext("UNKNOWN"));
202}
203
204/*
205 * Get a zpool property value for 'prop' and return the value in
206 * a pre-allocated buffer.
207 */
208int
209zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210 zprop_source_t *srctype)
211{
212 uint64_t intval;
213 const char *strval;
214 zprop_source_t src = ZPROP_SRC_NONE;
215 nvlist_t *nvroot;
216 vdev_stat_t *vs;
217 uint_t vsc;
218
219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
220 switch (prop) {
221 case ZPOOL_PROP_NAME:
34dc7c2f 222 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
223 break;
224
225 case ZPOOL_PROP_HEALTH:
34dc7c2f 226 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
227 break;
228
229 case ZPOOL_PROP_GUID:
230 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 231 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
232 break;
233
234 case ZPOOL_PROP_ALTROOT:
235 case ZPOOL_PROP_CACHEFILE:
236 if (zhp->zpool_props != NULL ||
237 zpool_get_all_props(zhp) == 0) {
238 (void) strlcpy(buf,
239 zpool_get_prop_string(zhp, prop, &src),
240 len);
241 if (srctype != NULL)
242 *srctype = src;
243 return (0);
244 }
245 /* FALLTHROUGH */
246 default:
34dc7c2f 247 (void) strlcpy(buf, "-", len);
d164b209
BB
248 break;
249 }
250
251 if (srctype != NULL)
252 *srctype = src;
34dc7c2f
BB
253 return (0);
254 }
255
256 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
257 prop != ZPOOL_PROP_NAME)
258 return (-1);
259
260 switch (zpool_prop_get_type(prop)) {
261 case PROP_TYPE_STRING:
262 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
263 len);
264 break;
265
266 case PROP_TYPE_NUMBER:
267 intval = zpool_get_prop_int(zhp, prop, &src);
268
269 switch (prop) {
270 case ZPOOL_PROP_SIZE:
428870ff
BB
271 case ZPOOL_PROP_ALLOCATED:
272 case ZPOOL_PROP_FREE:
df30f566 273 case ZPOOL_PROP_ASHIFT:
34dc7c2f
BB
274 (void) zfs_nicenum(intval, buf, len);
275 break;
276
277 case ZPOOL_PROP_CAPACITY:
278 (void) snprintf(buf, len, "%llu%%",
279 (u_longlong_t)intval);
280 break;
281
428870ff
BB
282 case ZPOOL_PROP_DEDUPRATIO:
283 (void) snprintf(buf, len, "%llu.%02llux",
284 (u_longlong_t)(intval / 100),
285 (u_longlong_t)(intval % 100));
286 break;
287
34dc7c2f
BB
288 case ZPOOL_PROP_HEALTH:
289 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
290 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
291 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
292 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
293 == 0);
34dc7c2f
BB
294
295 (void) strlcpy(buf, zpool_state_to_name(intval,
296 vs->vs_aux), len);
297 break;
298 default:
b8864a23 299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
300 }
301 break;
302
303 case PROP_TYPE_INDEX:
304 intval = zpool_get_prop_int(zhp, prop, &src);
305 if (zpool_prop_index_to_string(prop, intval, &strval)
306 != 0)
307 return (-1);
308 (void) strlcpy(buf, strval, len);
309 break;
310
311 default:
312 abort();
313 }
314
315 if (srctype)
316 *srctype = src;
317
318 return (0);
319}
320
321/*
322 * Check if the bootfs name has the same pool name as it is set to.
323 * Assuming bootfs is a valid dataset name.
324 */
325static boolean_t
326bootfs_name_valid(const char *pool, char *bootfs)
327{
328 int len = strlen(pool);
329
b128c09f 330 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
331 return (B_FALSE);
332
333 if (strncmp(pool, bootfs, len) == 0 &&
334 (bootfs[len] == '/' || bootfs[len] == '\0'))
335 return (B_TRUE);
336
337 return (B_FALSE);
338}
339
b128c09f
BB
340/*
341 * Inspect the configuration to determine if any of the devices contain
342 * an EFI label.
343 */
344static boolean_t
345pool_uses_efi(nvlist_t *config)
346{
347 nvlist_t **child;
348 uint_t c, children;
349
350 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
351 &child, &children) != 0)
352 return (read_efi_label(config, NULL) >= 0);
353
354 for (c = 0; c < children; c++) {
355 if (pool_uses_efi(child[c]))
356 return (B_TRUE);
357 }
358 return (B_FALSE);
359}
360
361static boolean_t
362pool_is_bootable(zpool_handle_t *zhp)
363{
364 char bootfs[ZPOOL_MAXNAMELEN];
365
366 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
367 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
368 sizeof (bootfs)) != 0);
369}
370
371
34dc7c2f
BB
372/*
373 * Given an nvlist of zpool properties to be set, validate that they are
374 * correct, and parse any numeric properties (index, boolean, etc) if they are
375 * specified as strings.
376 */
377static nvlist_t *
b128c09f 378zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 379 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
380{
381 nvpair_t *elem;
382 nvlist_t *retprops;
383 zpool_prop_t prop;
384 char *strval;
385 uint64_t intval;
386 char *slash;
387 struct stat64 statbuf;
b128c09f
BB
388 zpool_handle_t *zhp;
389 nvlist_t *nvroot;
34dc7c2f
BB
390
391 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
392 (void) no_memory(hdl);
393 return (NULL);
394 }
395
396 elem = NULL;
397 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
398 const char *propname = nvpair_name(elem);
399
400 /*
401 * Make sure this property is valid and applies to this type.
402 */
403 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
404 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
405 "invalid property '%s'"), propname);
406 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
407 goto error;
408 }
409
410 if (zpool_prop_readonly(prop)) {
411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
412 "is readonly"), propname);
413 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
414 goto error;
415 }
416
417 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
418 &strval, &intval, errbuf) != 0)
419 goto error;
420
421 /*
422 * Perform additional checking for specific properties.
423 */
424 switch (prop) {
e75c13c3
BB
425 default:
426 break;
34dc7c2f
BB
427 case ZPOOL_PROP_VERSION:
428 if (intval < version || intval > SPA_VERSION) {
429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 "property '%s' number %d is invalid."),
431 propname, intval);
432 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
433 goto error;
434 }
435 break;
436
df30f566
CK
437 case ZPOOL_PROP_ASHIFT:
438 if (!flags.create) {
439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
440 "property '%s' can only be set at "
441 "creation time"), propname);
442 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
443 goto error;
444 }
445
b41c9906 446 if (intval != 0 && (intval < 9 || intval > 13)) {
df30f566
CK
447 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
448 "property '%s' number %d is invalid."),
449 propname, intval);
450 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
451 goto error;
452 }
453 break;
454
34dc7c2f 455 case ZPOOL_PROP_BOOTFS:
572e2857 456 if (flags.create || flags.import) {
34dc7c2f
BB
457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
458 "property '%s' cannot be set at creation "
459 "or import time"), propname);
460 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
461 goto error;
462 }
463
464 if (version < SPA_VERSION_BOOTFS) {
465 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
466 "pool must be upgraded to support "
467 "'%s' property"), propname);
468 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
469 goto error;
470 }
471
472 /*
473 * bootfs property value has to be a dataset name and
474 * the dataset has to be in the same pool as it sets to.
475 */
476 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
477 strval)) {
478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
479 "is an invalid name"), strval);
480 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
481 goto error;
482 }
b128c09f
BB
483
484 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
486 "could not open pool '%s'"), poolname);
487 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
488 goto error;
489 }
490 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
491 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
492
f783130a 493#if defined(__sun__) || defined(__sun)
b128c09f
BB
494 /*
495 * bootfs property cannot be set on a disk which has
496 * been EFI labeled.
497 */
498 if (pool_uses_efi(nvroot)) {
499 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
500 "property '%s' not supported on "
501 "EFI labeled devices"), propname);
502 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
503 zpool_close(zhp);
504 goto error;
505 }
f783130a 506#endif
b128c09f 507 zpool_close(zhp);
34dc7c2f
BB
508 break;
509
510 case ZPOOL_PROP_ALTROOT:
572e2857 511 if (!flags.create && !flags.import) {
34dc7c2f
BB
512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 "property '%s' can only be set during pool "
514 "creation or import"), propname);
515 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
516 goto error;
517 }
518
519 if (strval[0] != '/') {
520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
521 "bad alternate root '%s'"), strval);
522 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
523 goto error;
524 }
525 break;
526
527 case ZPOOL_PROP_CACHEFILE:
528 if (strval[0] == '\0')
529 break;
530
531 if (strcmp(strval, "none") == 0)
532 break;
533
534 if (strval[0] != '/') {
535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536 "property '%s' must be empty, an "
537 "absolute path, or 'none'"), propname);
538 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
539 goto error;
540 }
541
542 slash = strrchr(strval, '/');
543
544 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
545 strcmp(slash, "/..") == 0) {
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
547 "'%s' is not a valid file"), strval);
548 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
549 goto error;
550 }
551
552 *slash = '\0';
553
554 if (strval[0] != '\0' &&
555 (stat64(strval, &statbuf) != 0 ||
556 !S_ISDIR(statbuf.st_mode))) {
557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
558 "'%s' is not a valid directory"),
559 strval);
560 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
561 goto error;
562 }
563
564 *slash = '/';
565 break;
572e2857
BB
566
567 case ZPOOL_PROP_READONLY:
568 if (!flags.import) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 "property '%s' can only be set at "
571 "import time"), propname);
572 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
573 goto error;
574 }
575 break;
34dc7c2f
BB
576 }
577 }
578
579 return (retprops);
580error:
581 nvlist_free(retprops);
582 return (NULL);
583}
584
585/*
586 * Set zpool property : propname=propval.
587 */
588int
589zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
590{
2598c001 591 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
592 int ret = -1;
593 char errbuf[1024];
594 nvlist_t *nvl = NULL;
595 nvlist_t *realprops;
596 uint64_t version;
572e2857 597 prop_flags_t flags = { 0 };
34dc7c2f
BB
598
599 (void) snprintf(errbuf, sizeof (errbuf),
600 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
601 zhp->zpool_name);
602
34dc7c2f
BB
603 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
604 return (no_memory(zhp->zpool_hdl));
605
606 if (nvlist_add_string(nvl, propname, propval) != 0) {
607 nvlist_free(nvl);
608 return (no_memory(zhp->zpool_hdl));
609 }
610
611 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 612 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 613 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
614 nvlist_free(nvl);
615 return (-1);
616 }
617
618 nvlist_free(nvl);
619 nvl = realprops;
620
621 /*
622 * Execute the corresponding ioctl() to set this property.
623 */
624 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
625
626 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
627 nvlist_free(nvl);
628 return (-1);
629 }
630
631 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
632
633 zcmd_free_nvlists(&zc);
634 nvlist_free(nvl);
635
636 if (ret)
637 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
638 else
639 (void) zpool_props_refresh(zhp);
640
641 return (ret);
642}
643
644int
645zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
646{
647 libzfs_handle_t *hdl = zhp->zpool_hdl;
648 zprop_list_t *entry;
649 char buf[ZFS_MAXPROPLEN];
650
651 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
652 return (-1);
653
654 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
655
656 if (entry->pl_fixed)
657 continue;
658
659 if (entry->pl_prop != ZPROP_INVAL &&
660 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
661 NULL) == 0) {
662 if (strlen(buf) > entry->pl_width)
663 entry->pl_width = strlen(buf);
664 }
665 }
666
667 return (0);
668}
669
670
9babb374
BB
671/*
672 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
673 * devices will use a stripe width of 128k, other vendors prefer a 1m
674 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
675 * given 512B blocks. When the block size is larger by a power of 2
676 * we will still be 1m aligned. Some devices are sensitive to the
677 * partition ending alignment as well.
9babb374 678 */
613d88ed
NB
679#define NEW_START_BLOCK 2048
680#define PARTITION_END_ALIGNMENT 2048
9babb374 681
34dc7c2f
BB
682/*
683 * Validate the given pool name, optionally putting an extended error message in
684 * 'buf'.
685 */
686boolean_t
687zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
688{
689 namecheck_err_t why;
690 char what;
691 int ret;
692
693 ret = pool_namecheck(pool, &why, &what);
694
695 /*
696 * The rules for reserved pool names were extended at a later point.
697 * But we need to support users with existing pools that may now be
698 * invalid. So we only check for this expanded set of names during a
699 * create (or import), and only in userland.
700 */
701 if (ret == 0 && !isopen &&
702 (strncmp(pool, "mirror", 6) == 0 ||
703 strncmp(pool, "raidz", 5) == 0 ||
704 strncmp(pool, "spare", 5) == 0 ||
705 strcmp(pool, "log") == 0)) {
706 if (hdl != NULL)
707 zfs_error_aux(hdl,
708 dgettext(TEXT_DOMAIN, "name is reserved"));
709 return (B_FALSE);
710 }
711
712
713 if (ret != 0) {
714 if (hdl != NULL) {
715 switch (why) {
716 case NAME_ERR_TOOLONG:
717 zfs_error_aux(hdl,
718 dgettext(TEXT_DOMAIN, "name is too long"));
719 break;
720
721 case NAME_ERR_INVALCHAR:
722 zfs_error_aux(hdl,
723 dgettext(TEXT_DOMAIN, "invalid character "
724 "'%c' in pool name"), what);
725 break;
726
727 case NAME_ERR_NOLETTER:
728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
729 "name must begin with a letter"));
730 break;
731
732 case NAME_ERR_RESERVED:
733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
734 "name is reserved"));
735 break;
736
737 case NAME_ERR_DISKLIKE:
738 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
739 "pool name is reserved"));
740 break;
741
742 case NAME_ERR_LEADING_SLASH:
743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
744 "leading slash in name"));
745 break;
746
747 case NAME_ERR_EMPTY_COMPONENT:
748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
749 "empty component in name"));
750 break;
751
752 case NAME_ERR_TRAILING_SLASH:
753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
754 "trailing slash in name"));
755 break;
756
757 case NAME_ERR_MULTIPLE_AT:
758 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
759 "multiple '@' delimiters in name"));
760 break;
e75c13c3
BB
761 case NAME_ERR_NO_AT:
762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
763 "permission set is missing '@'"));
764 break;
34dc7c2f
BB
765 }
766 }
767 return (B_FALSE);
768 }
769
770 return (B_TRUE);
771}
772
773/*
774 * Open a handle to the given pool, even if the pool is currently in the FAULTED
775 * state.
776 */
777zpool_handle_t *
778zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
779{
780 zpool_handle_t *zhp;
781 boolean_t missing;
782
783 /*
784 * Make sure the pool name is valid.
785 */
786 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
787 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
788 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
789 pool);
790 return (NULL);
791 }
792
793 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
794 return (NULL);
795
796 zhp->zpool_hdl = hdl;
797 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
798
799 if (zpool_refresh_stats(zhp, &missing) != 0) {
800 zpool_close(zhp);
801 return (NULL);
802 }
803
804 if (missing) {
805 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
806 (void) zfs_error_fmt(hdl, EZFS_NOENT,
807 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
808 zpool_close(zhp);
809 return (NULL);
810 }
811
812 return (zhp);
813}
814
815/*
816 * Like the above, but silent on error. Used when iterating over pools (because
817 * the configuration cache may be out of date).
818 */
819int
820zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
821{
822 zpool_handle_t *zhp;
823 boolean_t missing;
824
825 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
826 return (-1);
827
828 zhp->zpool_hdl = hdl;
829 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
830
831 if (zpool_refresh_stats(zhp, &missing) != 0) {
832 zpool_close(zhp);
833 return (-1);
834 }
835
836 if (missing) {
837 zpool_close(zhp);
838 *ret = NULL;
839 return (0);
840 }
841
842 *ret = zhp;
843 return (0);
844}
845
846/*
847 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
848 * state.
849 */
850zpool_handle_t *
851zpool_open(libzfs_handle_t *hdl, const char *pool)
852{
853 zpool_handle_t *zhp;
854
855 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
856 return (NULL);
857
858 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
859 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
860 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
861 zpool_close(zhp);
862 return (NULL);
863 }
864
865 return (zhp);
866}
867
868/*
869 * Close the handle. Simply frees the memory associated with the handle.
870 */
871void
872zpool_close(zpool_handle_t *zhp)
873{
874 if (zhp->zpool_config)
875 nvlist_free(zhp->zpool_config);
876 if (zhp->zpool_old_config)
877 nvlist_free(zhp->zpool_old_config);
878 if (zhp->zpool_props)
879 nvlist_free(zhp->zpool_props);
880 free(zhp);
881}
882
883/*
884 * Return the name of the pool.
885 */
886const char *
887zpool_get_name(zpool_handle_t *zhp)
888{
889 return (zhp->zpool_name);
890}
891
892
893/*
894 * Return the state of the pool (ACTIVE or UNAVAILABLE)
895 */
896int
897zpool_get_state(zpool_handle_t *zhp)
898{
899 return (zhp->zpool_state);
900}
901
902/*
903 * Create the named pool, using the provided vdev list. It is assumed
904 * that the consumer has already validated the contents of the nvlist, so we
905 * don't have to worry about error semantics.
906 */
907int
908zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 909 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 910{
2598c001 911 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f
BB
912 nvlist_t *zc_fsprops = NULL;
913 nvlist_t *zc_props = NULL;
34dc7c2f
BB
914 char msg[1024];
915 char *altroot;
b128c09f 916 int ret = -1;
34dc7c2f
BB
917
918 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
919 "cannot create '%s'"), pool);
920
921 if (!zpool_name_valid(hdl, B_FALSE, pool))
922 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
923
924 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
925 return (-1);
926
b128c09f 927 if (props) {
572e2857
BB
928 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
929
b128c09f 930 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 931 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
932 goto create_failed;
933 }
934 }
34dc7c2f 935
b128c09f
BB
936 if (fsprops) {
937 uint64_t zoned;
938 char *zonestr;
939
940 zoned = ((nvlist_lookup_string(fsprops,
941 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
942 strcmp(zonestr, "on") == 0);
943
944 if ((zc_fsprops = zfs_valid_proplist(hdl,
945 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
946 goto create_failed;
947 }
948 if (!zc_props &&
949 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
950 goto create_failed;
951 }
952 if (nvlist_add_nvlist(zc_props,
953 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
954 goto create_failed;
955 }
34dc7c2f
BB
956 }
957
b128c09f
BB
958 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
959 goto create_failed;
960
34dc7c2f
BB
961 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
962
b128c09f 963 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
964
965 zcmd_free_nvlists(&zc);
b128c09f
BB
966 nvlist_free(zc_props);
967 nvlist_free(zc_fsprops);
34dc7c2f
BB
968
969 switch (errno) {
970 case EBUSY:
971 /*
972 * This can happen if the user has specified the same
973 * device multiple times. We can't reliably detect this
974 * until we try to add it and see we already have a
d603ed6c
BB
975 * label. This can also happen under if the device is
976 * part of an active md or lvm device.
34dc7c2f
BB
977 */
978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d603ed6c
BB
979 "one or more vdevs refer to the same device, or one of\n"
980 "the devices is part of an active md or lvm device"));
34dc7c2f
BB
981 return (zfs_error(hdl, EZFS_BADDEV, msg));
982
983 case EOVERFLOW:
984 /*
985 * This occurs when one of the devices is below
986 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
987 * device was the problem device since there's no
988 * reliable way to determine device size from userland.
989 */
990 {
991 char buf[64];
992
993 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
994
995 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
996 "one or more devices is less than the "
997 "minimum size (%s)"), buf);
998 }
999 return (zfs_error(hdl, EZFS_BADDEV, msg));
1000
1001 case ENOSPC:
1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1003 "one or more devices is out of space"));
1004 return (zfs_error(hdl, EZFS_BADDEV, msg));
1005
1006 case ENOTBLK:
1007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1008 "cache device must be a disk or disk slice"));
1009 return (zfs_error(hdl, EZFS_BADDEV, msg));
1010
1011 default:
1012 return (zpool_standard_error(hdl, errno, msg));
1013 }
1014 }
1015
1016 /*
1017 * If this is an alternate root pool, then we automatically set the
1018 * mountpoint of the root dataset to be '/'.
1019 */
1020 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1021 &altroot) == 0) {
1022 zfs_handle_t *zhp;
1023
1024 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1025 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1026 "/") == 0);
1027
1028 zfs_close(zhp);
1029 }
1030
b128c09f 1031create_failed:
34dc7c2f 1032 zcmd_free_nvlists(&zc);
b128c09f
BB
1033 nvlist_free(zc_props);
1034 nvlist_free(zc_fsprops);
1035 return (ret);
34dc7c2f
BB
1036}
1037
1038/*
1039 * Destroy the given pool. It is up to the caller to ensure that there are no
1040 * datasets left in the pool.
1041 */
1042int
1043zpool_destroy(zpool_handle_t *zhp)
1044{
2598c001 1045 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1046 zfs_handle_t *zfp = NULL;
1047 libzfs_handle_t *hdl = zhp->zpool_hdl;
1048 char msg[1024];
1049
1050 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1051 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1052 return (-1);
1053
34dc7c2f
BB
1054 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1055
572e2857 1056 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1057 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 "cannot destroy '%s'"), zhp->zpool_name);
1059
1060 if (errno == EROFS) {
1061 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1062 "one or more devices is read only"));
1063 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1064 } else {
1065 (void) zpool_standard_error(hdl, errno, msg);
1066 }
1067
1068 if (zfp)
1069 zfs_close(zfp);
1070 return (-1);
1071 }
1072
1073 if (zfp) {
1074 remove_mountpoint(zfp);
1075 zfs_close(zfp);
1076 }
1077
1078 return (0);
1079}
1080
1081/*
1082 * Add the given vdevs to the pool. The caller must have already performed the
1083 * necessary verification to ensure that the vdev specification is well-formed.
1084 */
1085int
1086zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1087{
2598c001 1088 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1089 int ret;
1090 libzfs_handle_t *hdl = zhp->zpool_hdl;
1091 char msg[1024];
1092 nvlist_t **spares, **l2cache;
1093 uint_t nspares, nl2cache;
1094
1095 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1096 "cannot add to '%s'"), zhp->zpool_name);
1097
1098 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1099 SPA_VERSION_SPARES &&
1100 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1101 &spares, &nspares) == 0) {
1102 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1103 "upgraded to add hot spares"));
1104 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1105 }
1106
b128c09f
BB
1107 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1108 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1109 uint64_t s;
1110
1111 for (s = 0; s < nspares; s++) {
1112 char *path;
1113
1114 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1115 &path) == 0 && pool_uses_efi(spares[s])) {
1116 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1117 "device '%s' contains an EFI label and "
1118 "cannot be used on root pools."),
428870ff
BB
1119 zpool_vdev_name(hdl, NULL, spares[s],
1120 B_FALSE));
b128c09f
BB
1121 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1122 }
1123 }
1124 }
1125
34dc7c2f
BB
1126 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1127 SPA_VERSION_L2CACHE &&
1128 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1129 &l2cache, &nl2cache) == 0) {
1130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1131 "upgraded to add cache devices"));
1132 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1133 }
1134
1135 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1136 return (-1);
1137 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1138
572e2857 1139 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1140 switch (errno) {
1141 case EBUSY:
1142 /*
1143 * This can happen if the user has specified the same
1144 * device multiple times. We can't reliably detect this
1145 * until we try to add it and see we already have a
1146 * label.
1147 */
1148 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1149 "one or more vdevs refer to the same device"));
1150 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1151 break;
1152
1153 case EOVERFLOW:
1154 /*
1155 * This occurrs when one of the devices is below
1156 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1157 * device was the problem device since there's no
1158 * reliable way to determine device size from userland.
1159 */
1160 {
1161 char buf[64];
1162
1163 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1164
1165 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1166 "device is less than the minimum "
1167 "size (%s)"), buf);
1168 }
1169 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1170 break;
1171
1172 case ENOTSUP:
1173 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 "pool must be upgraded to add these vdevs"));
1175 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1176 break;
1177
1178 case EDOM:
1179 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1180 "root pool can not have multiple vdevs"
1181 " or separate logs"));
1182 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1183 break;
1184
1185 case ENOTBLK:
1186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1187 "cache device must be a disk or disk slice"));
1188 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1189 break;
1190
1191 default:
1192 (void) zpool_standard_error(hdl, errno, msg);
1193 }
1194
1195 ret = -1;
1196 } else {
1197 ret = 0;
1198 }
1199
1200 zcmd_free_nvlists(&zc);
1201
1202 return (ret);
1203}
1204
1205/*
1206 * Exports the pool from the system. The caller must ensure that there are no
1207 * mounted datasets in the pool.
1208 */
1209int
fb5f0bc8 1210zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
34dc7c2f 1211{
2598c001 1212 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f 1213 char msg[1024];
34dc7c2f 1214
b128c09f
BB
1215 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1216 "cannot export '%s'"), zhp->zpool_name);
1217
34dc7c2f 1218 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1219 zc.zc_cookie = force;
fb5f0bc8 1220 zc.zc_guid = hardforce;
b128c09f
BB
1221
1222 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1223 switch (errno) {
1224 case EXDEV:
1225 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1226 "use '-f' to override the following errors:\n"
1227 "'%s' has an active shared spare which could be"
1228 " used by other pools once '%s' is exported."),
1229 zhp->zpool_name, zhp->zpool_name);
1230 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1231 msg));
1232 default:
1233 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1234 msg));
1235 }
1236 }
34dc7c2f 1237
34dc7c2f
BB
1238 return (0);
1239}
1240
fb5f0bc8
BB
1241int
1242zpool_export(zpool_handle_t *zhp, boolean_t force)
1243{
1244 return (zpool_export_common(zhp, force, B_FALSE));
1245}
1246
1247int
1248zpool_export_force(zpool_handle_t *zhp)
1249{
1250 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1251}
1252
428870ff
BB
1253static void
1254zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1255 nvlist_t *config)
428870ff 1256{
572e2857 1257 nvlist_t *nv = NULL;
428870ff
BB
1258 uint64_t rewindto;
1259 int64_t loss = -1;
1260 struct tm t;
1261 char timestr[128];
1262
572e2857
BB
1263 if (!hdl->libzfs_printerr || config == NULL)
1264 return;
1265
1266 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
428870ff
BB
1267 return;
1268
572e2857 1269 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1270 return;
572e2857 1271 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1272
1273 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1274 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1275 if (dryrun) {
1276 (void) printf(dgettext(TEXT_DOMAIN,
1277 "Would be able to return %s "
1278 "to its state as of %s.\n"),
1279 name, timestr);
1280 } else {
1281 (void) printf(dgettext(TEXT_DOMAIN,
1282 "Pool %s returned to its state as of %s.\n"),
1283 name, timestr);
1284 }
1285 if (loss > 120) {
1286 (void) printf(dgettext(TEXT_DOMAIN,
1287 "%s approximately %lld "),
1288 dryrun ? "Would discard" : "Discarded",
b8864a23 1289 ((longlong_t)loss + 30) / 60);
428870ff
BB
1290 (void) printf(dgettext(TEXT_DOMAIN,
1291 "minutes of transactions.\n"));
1292 } else if (loss > 0) {
1293 (void) printf(dgettext(TEXT_DOMAIN,
1294 "%s approximately %lld "),
b8864a23
BB
1295 dryrun ? "Would discard" : "Discarded",
1296 (longlong_t)loss);
428870ff
BB
1297 (void) printf(dgettext(TEXT_DOMAIN,
1298 "seconds of transactions.\n"));
1299 }
1300 }
1301}
1302
1303void
1304zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1305 nvlist_t *config)
1306{
572e2857 1307 nvlist_t *nv = NULL;
428870ff
BB
1308 int64_t loss = -1;
1309 uint64_t edata = UINT64_MAX;
1310 uint64_t rewindto;
1311 struct tm t;
1312 char timestr[128];
1313
1314 if (!hdl->libzfs_printerr)
1315 return;
1316
1317 if (reason >= 0)
1318 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1319 else
1320 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1321
1322 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857
BB
1323 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1324 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1325 goto no_info;
1326
572e2857
BB
1327 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1328 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1329 &edata);
1330
1331 (void) printf(dgettext(TEXT_DOMAIN,
1332 "Recovery is possible, but will result in some data loss.\n"));
1333
1334 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1335 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1336 (void) printf(dgettext(TEXT_DOMAIN,
1337 "\tReturning the pool to its state as of %s\n"
1338 "\tshould correct the problem. "),
1339 timestr);
1340 } else {
1341 (void) printf(dgettext(TEXT_DOMAIN,
1342 "\tReverting the pool to an earlier state "
1343 "should correct the problem.\n\t"));
1344 }
1345
1346 if (loss > 120) {
1347 (void) printf(dgettext(TEXT_DOMAIN,
1348 "Approximately %lld minutes of data\n"
b8864a23
BB
1349 "\tmust be discarded, irreversibly. "),
1350 ((longlong_t)loss + 30) / 60);
428870ff
BB
1351 } else if (loss > 0) {
1352 (void) printf(dgettext(TEXT_DOMAIN,
1353 "Approximately %lld seconds of data\n"
b8864a23
BB
1354 "\tmust be discarded, irreversibly. "),
1355 (longlong_t)loss);
428870ff
BB
1356 }
1357 if (edata != 0 && edata != UINT64_MAX) {
1358 if (edata == 1) {
1359 (void) printf(dgettext(TEXT_DOMAIN,
1360 "After rewind, at least\n"
1361 "\tone persistent user-data error will remain. "));
1362 } else {
1363 (void) printf(dgettext(TEXT_DOMAIN,
1364 "After rewind, several\n"
1365 "\tpersistent user-data errors will remain. "));
1366 }
1367 }
1368 (void) printf(dgettext(TEXT_DOMAIN,
1369 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1370 reason >= 0 ? "clear" : "import", name);
1371
1372 (void) printf(dgettext(TEXT_DOMAIN,
1373 "A scrub of the pool\n"
1374 "\tis strongly recommended after recovery.\n"));
1375 return;
1376
1377no_info:
1378 (void) printf(dgettext(TEXT_DOMAIN,
1379 "Destroy and re-create the pool from\n\ta backup source.\n"));
1380}
1381
34dc7c2f
BB
1382/*
1383 * zpool_import() is a contracted interface. Should be kept the same
1384 * if possible.
1385 *
1386 * Applications should use zpool_import_props() to import a pool with
1387 * new properties value to be set.
1388 */
1389int
1390zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1391 char *altroot)
1392{
1393 nvlist_t *props = NULL;
1394 int ret;
1395
1396 if (altroot != NULL) {
1397 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1398 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1399 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1400 newname));
1401 }
1402
1403 if (nvlist_add_string(props,
fb5f0bc8
BB
1404 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1405 nvlist_add_string(props,
1406 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1407 nvlist_free(props);
1408 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1409 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1410 newname));
1411 }
1412 }
1413
572e2857
BB
1414 ret = zpool_import_props(hdl, config, newname, props,
1415 ZFS_IMPORT_NORMAL);
34dc7c2f
BB
1416 if (props)
1417 nvlist_free(props);
1418 return (ret);
1419}
1420
572e2857
BB
1421static void
1422print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1423 int indent)
1424{
1425 nvlist_t **child;
1426 uint_t c, children;
1427 char *vname;
1428 uint64_t is_log = 0;
1429
1430 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1431 &is_log);
1432
1433 if (name != NULL)
1434 (void) printf("\t%*s%s%s\n", indent, "", name,
1435 is_log ? " [log]" : "");
1436
1437 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1438 &child, &children) != 0)
1439 return;
1440
1441 for (c = 0; c < children; c++) {
1442 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1443 print_vdev_tree(hdl, vname, child[c], indent + 2);
1444 free(vname);
1445 }
1446}
1447
34dc7c2f
BB
1448/*
1449 * Import the given pool using the known configuration and a list of
1450 * properties to be set. The configuration should have come from
1451 * zpool_find_import(). The 'newname' parameters control whether the pool
1452 * is imported with a different name.
1453 */
1454int
1455zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1456 nvlist_t *props, int flags)
34dc7c2f 1457{
2598c001 1458 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff 1459 zpool_rewind_policy_t policy;
572e2857
BB
1460 nvlist_t *nv = NULL;
1461 nvlist_t *nvinfo = NULL;
1462 nvlist_t *missing = NULL;
34dc7c2f
BB
1463 char *thename;
1464 char *origname;
1465 int ret;
572e2857 1466 int error = 0;
34dc7c2f
BB
1467 char errbuf[1024];
1468
1469 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1470 &origname) == 0);
1471
1472 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1473 "cannot import pool '%s'"), origname);
1474
1475 if (newname != NULL) {
1476 if (!zpool_name_valid(hdl, B_FALSE, newname))
1477 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1478 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1479 newname));
1480 thename = (char *)newname;
1481 } else {
1482 thename = origname;
1483 }
1484
1485 if (props) {
1486 uint64_t version;
572e2857 1487 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1488
1489 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1490 &version) == 0);
1491
b128c09f 1492 if ((props = zpool_valid_proplist(hdl, origname,
572e2857 1493 props, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
1494 return (-1);
1495 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1496 nvlist_free(props);
1497 return (-1);
1498 }
1499 }
1500
1501 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1502
1503 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1504 &zc.zc_guid) == 0);
1505
1506 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1507 nvlist_free(props);
1508 return (-1);
1509 }
572e2857 1510 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
428870ff
BB
1511 nvlist_free(props);
1512 return (-1);
1513 }
34dc7c2f 1514
572e2857
BB
1515 zc.zc_cookie = flags;
1516 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1517 errno == ENOMEM) {
1518 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1519 zcmd_free_nvlists(&zc);
1520 return (-1);
1521 }
1522 }
1523 if (ret != 0)
1524 error = errno;
1525
1526 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1527 zpool_get_rewind_policy(config, &policy);
1528
1529 if (error) {
34dc7c2f 1530 char desc[1024];
428870ff 1531
428870ff
BB
1532 /*
1533 * Dry-run failed, but we print out what success
1534 * looks like if we found a best txg
1535 */
572e2857 1536 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1537 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1538 B_TRUE, nv);
1539 nvlist_free(nv);
428870ff
BB
1540 return (-1);
1541 }
1542
34dc7c2f
BB
1543 if (newname == NULL)
1544 (void) snprintf(desc, sizeof (desc),
1545 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1546 thename);
1547 else
1548 (void) snprintf(desc, sizeof (desc),
1549 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1550 origname, thename);
1551
572e2857 1552 switch (error) {
34dc7c2f
BB
1553 case ENOTSUP:
1554 /*
1555 * Unsupported version.
1556 */
1557 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1558 break;
1559
1560 case EINVAL:
1561 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1562 break;
1563
428870ff
BB
1564 case EROFS:
1565 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1566 "one or more devices is read only"));
1567 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1568 break;
1569
572e2857
BB
1570 case ENXIO:
1571 if (nv && nvlist_lookup_nvlist(nv,
1572 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1573 nvlist_lookup_nvlist(nvinfo,
1574 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1575 (void) printf(dgettext(TEXT_DOMAIN,
1576 "The devices below are missing, use "
1577 "'-m' to import the pool anyway:\n"));
1578 print_vdev_tree(hdl, NULL, missing, 2);
1579 (void) printf("\n");
1580 }
1581 (void) zpool_standard_error(hdl, error, desc);
1582 break;
1583
1584 case EEXIST:
1585 (void) zpool_standard_error(hdl, error, desc);
1586 break;
1587
abe5b8fb
BB
1588 case EBUSY:
1589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1590 "one or more devices are already in use\n"));
1591 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1592 break;
1593
34dc7c2f 1594 default:
572e2857 1595 (void) zpool_standard_error(hdl, error, desc);
428870ff 1596 zpool_explain_recover(hdl,
572e2857 1597 newname ? origname : thename, -error, nv);
428870ff 1598 break;
34dc7c2f
BB
1599 }
1600
572e2857 1601 nvlist_free(nv);
34dc7c2f
BB
1602 ret = -1;
1603 } else {
1604 zpool_handle_t *zhp;
1605
1606 /*
1607 * This should never fail, but play it safe anyway.
1608 */
428870ff 1609 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1610 ret = -1;
428870ff 1611 else if (zhp != NULL)
34dc7c2f 1612 zpool_close(zhp);
428870ff
BB
1613 if (policy.zrp_request &
1614 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1615 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1616 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1617 }
572e2857 1618 nvlist_free(nv);
428870ff 1619 return (0);
34dc7c2f
BB
1620 }
1621
1622 zcmd_free_nvlists(&zc);
1623 nvlist_free(props);
1624
1625 return (ret);
1626}
1627
1628/*
428870ff 1629 * Scan the pool.
34dc7c2f
BB
1630 */
1631int
428870ff 1632zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
34dc7c2f 1633{
2598c001 1634 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1635 char msg[1024];
1636 libzfs_handle_t *hdl = zhp->zpool_hdl;
1637
1638 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1639 zc.zc_cookie = func;
34dc7c2f 1640
572e2857 1641 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
428870ff 1642 (errno == ENOENT && func != POOL_SCAN_NONE))
34dc7c2f
BB
1643 return (0);
1644
428870ff
BB
1645 if (func == POOL_SCAN_SCRUB) {
1646 (void) snprintf(msg, sizeof (msg),
1647 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1648 } else if (func == POOL_SCAN_NONE) {
1649 (void) snprintf(msg, sizeof (msg),
1650 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1651 zc.zc_name);
1652 } else {
1653 assert(!"unexpected result");
1654 }
34dc7c2f 1655
428870ff
BB
1656 if (errno == EBUSY) {
1657 nvlist_t *nvroot;
1658 pool_scan_stat_t *ps = NULL;
1659 uint_t psc;
1660
1661 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1662 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1663 (void) nvlist_lookup_uint64_array(nvroot,
1664 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1665 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1666 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1667 else
1668 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1669 } else if (errno == ENOENT) {
1670 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1671 } else {
34dc7c2f 1672 return (zpool_standard_error(hdl, errno, msg));
428870ff
BB
1673 }
1674}
1675
34dc7c2f 1676/*
9babb374
BB
1677 * Find a vdev that matches the search criteria specified. We use the
1678 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
1679 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1680 * spare; but FALSE if its an INUSE spare.
1681 */
1682static nvlist_t *
9babb374
BB
1683vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1684 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1685{
1686 uint_t c, children;
1687 nvlist_t **child;
34dc7c2f 1688 nvlist_t *ret;
b128c09f 1689 uint64_t is_log;
9babb374
BB
1690 char *srchkey;
1691 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1692
1693 /* Nothing to look for */
1694 if (search == NULL || pair == NULL)
1695 return (NULL);
1696
1697 /* Obtain the key we will use to search */
1698 srchkey = nvpair_name(pair);
1699
1700 switch (nvpair_type(pair)) {
572e2857 1701 case DATA_TYPE_UINT64:
9babb374 1702 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
1703 uint64_t srchval, theguid;
1704
1705 verify(nvpair_value_uint64(pair, &srchval) == 0);
1706 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1707 &theguid) == 0);
1708 if (theguid == srchval)
1709 return (nv);
9babb374
BB
1710 }
1711 break;
9babb374
BB
1712
1713 case DATA_TYPE_STRING: {
1714 char *srchval, *val;
1715
1716 verify(nvpair_value_string(pair, &srchval) == 0);
1717 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1718 break;
34dc7c2f 1719
9babb374 1720 /*
428870ff
BB
1721 * Search for the requested value. Special cases:
1722 *
a2c6816c
NB
1723 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in with a
1724 * partition suffix "1", "-part1", or "p1". The suffix is hidden
1725 * from the user, but included in the string, so this matches around
1726 * it.
428870ff
BB
1727 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1728 *
1729 * Otherwise, all other searches are simple string compares.
9babb374 1730 */
a2c6816c 1731 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
1732 uint64_t wholedisk = 0;
1733
1734 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1735 &wholedisk);
1736 if (wholedisk) {
a2c6816c 1737 char buf[MAXPATHLEN];
428870ff 1738
a2c6816c
NB
1739 zfs_append_partition(srchval, buf, sizeof (buf));
1740 if (strcmp(val, buf) == 0)
428870ff
BB
1741 return (nv);
1742
9babb374
BB
1743 break;
1744 }
428870ff
BB
1745 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1746 char *type, *idx, *end, *p;
1747 uint64_t id, vdev_id;
1748
1749 /*
1750 * Determine our vdev type, keeping in mind
1751 * that the srchval is composed of a type and
1752 * vdev id pair (i.e. mirror-4).
1753 */
1754 if ((type = strdup(srchval)) == NULL)
1755 return (NULL);
1756
1757 if ((p = strrchr(type, '-')) == NULL) {
1758 free(type);
1759 break;
1760 }
1761 idx = p + 1;
1762 *p = '\0';
1763
1764 /*
1765 * If the types don't match then keep looking.
1766 */
1767 if (strncmp(val, type, strlen(val)) != 0) {
1768 free(type);
1769 break;
1770 }
1771
1772 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1773 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1774 strncmp(type, VDEV_TYPE_MIRROR,
1775 strlen(VDEV_TYPE_MIRROR)) == 0);
1776 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1777 &id) == 0);
1778
1779 errno = 0;
1780 vdev_id = strtoull(idx, &end, 10);
1781
1782 free(type);
1783 if (errno != 0)
1784 return (NULL);
1785
1786 /*
1787 * Now verify that we have the correct vdev id.
1788 */
1789 if (vdev_id == id)
1790 return (nv);
9babb374 1791 }
34dc7c2f 1792
34dc7c2f 1793 /*
9babb374 1794 * Common case
34dc7c2f 1795 */
9babb374 1796 if (strcmp(srchval, val) == 0)
34dc7c2f 1797 return (nv);
9babb374
BB
1798 break;
1799 }
1800
1801 default:
1802 break;
34dc7c2f
BB
1803 }
1804
1805 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1806 &child, &children) != 0)
1807 return (NULL);
1808
b128c09f 1809 for (c = 0; c < children; c++) {
9babb374 1810 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
1811 avail_spare, l2cache, NULL)) != NULL) {
1812 /*
1813 * The 'is_log' value is only set for the toplevel
1814 * vdev, not the leaf vdevs. So we always lookup the
1815 * log device from the root of the vdev tree (where
1816 * 'log' is non-NULL).
1817 */
1818 if (log != NULL &&
1819 nvlist_lookup_uint64(child[c],
1820 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1821 is_log) {
1822 *log = B_TRUE;
1823 }
34dc7c2f 1824 return (ret);
b128c09f
BB
1825 }
1826 }
34dc7c2f
BB
1827
1828 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1829 &child, &children) == 0) {
1830 for (c = 0; c < children; c++) {
9babb374 1831 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1832 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1833 *avail_spare = B_TRUE;
1834 return (ret);
1835 }
1836 }
1837 }
1838
1839 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1840 &child, &children) == 0) {
1841 for (c = 0; c < children; c++) {
9babb374 1842 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1843 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1844 *l2cache = B_TRUE;
1845 return (ret);
1846 }
1847 }
1848 }
1849
1850 return (NULL);
1851}
1852
9babb374
BB
1853/*
1854 * Given a physical path (minus the "/devices" prefix), find the
1855 * associated vdev.
1856 */
1857nvlist_t *
1858zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1859 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1860{
1861 nvlist_t *search, *nvroot, *ret;
1862
1863 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1864 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1865
1866 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1867 &nvroot) == 0);
1868
1869 *avail_spare = B_FALSE;
572e2857
BB
1870 *l2cache = B_FALSE;
1871 if (log != NULL)
1872 *log = B_FALSE;
9babb374
BB
1873 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1874 nvlist_free(search);
1875
1876 return (ret);
1877}
1878
428870ff
BB
1879/*
1880 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1881 */
1882boolean_t
1883zpool_vdev_is_interior(const char *name)
1884{
1885 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1886 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1887 return (B_TRUE);
1888 return (B_FALSE);
1889}
1890
34dc7c2f
BB
1891nvlist_t *
1892zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 1893 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1894{
1895 char buf[MAXPATHLEN];
34dc7c2f 1896 char *end;
9babb374 1897 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
1898 uint64_t guid;
1899
9babb374
BB
1900 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1901
34dc7c2f
BB
1902 guid = strtoull(path, &end, 10);
1903 if (guid != 0 && *end == '\0') {
9babb374 1904 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
1905 } else if (zpool_vdev_is_interior(path)) {
1906 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 1907 } else if (path[0] != '/') {
a2c6816c
NB
1908 if (zfs_resolve_shortname(path, buf, sizeof (buf)) < 0) {
1909 nvlist_free(search);
1910 return (NULL);
1911 }
9babb374 1912 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
34dc7c2f 1913 } else {
9babb374 1914 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
1915 }
1916
1917 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1918 &nvroot) == 0);
1919
1920 *avail_spare = B_FALSE;
1921 *l2cache = B_FALSE;
b128c09f
BB
1922 if (log != NULL)
1923 *log = B_FALSE;
9babb374
BB
1924 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1925 nvlist_free(search);
1926
1927 return (ret);
b128c09f
BB
1928}
1929
1930static int
1931vdev_online(nvlist_t *nv)
1932{
1933 uint64_t ival;
1934
1935 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1936 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1937 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1938 return (0);
1939
1940 return (1);
1941}
1942
1943/*
9babb374 1944 * Helper function for zpool_get_physpaths().
b128c09f 1945 */
9babb374
BB
1946static int
1947vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1948 size_t *bytes_written)
1949{
1950 size_t bytes_left, pos, rsz;
1951 char *tmppath;
1952 const char *format;
1953
1954 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1955 &tmppath) != 0)
1956 return (EZFS_NODEVICE);
1957
1958 pos = *bytes_written;
1959 bytes_left = physpath_size - pos;
1960 format = (pos == 0) ? "%s" : " %s";
1961
1962 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1963 *bytes_written += rsz;
1964
1965 if (rsz >= bytes_left) {
1966 /* if physpath was not copied properly, clear it */
1967 if (bytes_left != 0) {
1968 physpath[pos] = 0;
1969 }
1970 return (EZFS_NOSPC);
1971 }
1972 return (0);
1973}
1974
1975static int
1976vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1977 size_t *rsz, boolean_t is_spare)
1978{
1979 char *type;
1980 int ret;
1981
1982 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1983 return (EZFS_INVALCONFIG);
1984
1985 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1986 /*
1987 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1988 * For a spare vdev, we only want to boot from the active
1989 * spare device.
1990 */
1991 if (is_spare) {
1992 uint64_t spare = 0;
1993 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1994 &spare);
1995 if (!spare)
1996 return (EZFS_INVALCONFIG);
1997 }
1998
1999 if (vdev_online(nv)) {
2000 if ((ret = vdev_get_one_physpath(nv, physpath,
2001 phypath_size, rsz)) != 0)
2002 return (ret);
2003 }
2004 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2005 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2006 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2007 nvlist_t **child;
2008 uint_t count;
2009 int i, ret;
2010
2011 if (nvlist_lookup_nvlist_array(nv,
2012 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2013 return (EZFS_INVALCONFIG);
2014
2015 for (i = 0; i < count; i++) {
2016 ret = vdev_get_physpaths(child[i], physpath,
2017 phypath_size, rsz, is_spare);
2018 if (ret == EZFS_NOSPC)
2019 return (ret);
2020 }
2021 }
2022
2023 return (EZFS_POOL_INVALARG);
2024}
2025
2026/*
2027 * Get phys_path for a root pool config.
2028 * Return 0 on success; non-zero on failure.
2029 */
2030static int
2031zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2032{
9babb374 2033 size_t rsz;
b128c09f
BB
2034 nvlist_t *vdev_root;
2035 nvlist_t **child;
2036 uint_t count;
9babb374 2037 char *type;
b128c09f 2038
9babb374 2039 rsz = 0;
b128c09f 2040
9babb374
BB
2041 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2042 &vdev_root) != 0)
2043 return (EZFS_INVALCONFIG);
b128c09f 2044
9babb374
BB
2045 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2046 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2047 &child, &count) != 0)
9babb374 2048 return (EZFS_INVALCONFIG);
b128c09f 2049
9babb374
BB
2050 /*
2051 * root pool can not have EFI labeled disks and can only have
2052 * a single top-level vdev.
2053 */
2054 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2055 pool_uses_efi(vdev_root))
2056 return (EZFS_POOL_INVALARG);
b128c09f 2057
9babb374
BB
2058 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2059 B_FALSE);
2060
2061 /* No online devices */
2062 if (rsz == 0)
2063 return (EZFS_NODEVICE);
b128c09f
BB
2064
2065 return (0);
34dc7c2f
BB
2066}
2067
9babb374
BB
2068/*
2069 * Get phys_path for a root pool
2070 * Return 0 on success; non-zero on failure.
2071 */
2072int
2073zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2074{
2075 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2076 phypath_size));
2077}
2078
9babb374
BB
2079/*
2080 * If the device has being dynamically expanded then we need to relabel
2081 * the disk to use the new unallocated space.
2082 */
2083static int
d603ed6c 2084zpool_relabel_disk(libzfs_handle_t *hdl, const char *path)
9babb374 2085{
9babb374
BB
2086 char errbuf[1024];
2087 int fd, error;
9babb374 2088
d603ed6c 2089 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2090 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2091 "relabel '%s': unable to open device: %d"), path, errno);
9babb374
BB
2092 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2093 }
2094
2095 /*
2096 * It's possible that we might encounter an error if the device
2097 * does not have any unallocated space left. If so, we simply
2098 * ignore that error and continue on.
2099 */
d603ed6c 2100 error = efi_use_whole_disk(fd);
9babb374
BB
2101 (void) close(fd);
2102 if (error && error != VT_ENOSPC) {
2103 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2104 "relabel '%s': unable to read disk capacity"), path);
9babb374
BB
2105 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2106 }
2107 return (0);
2108}
2109
34dc7c2f
BB
2110/*
2111 * Bring the specified vdev online. The 'flags' parameter is a set of the
2112 * ZFS_ONLINE_* flags.
2113 */
2114int
2115zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2116 vdev_state_t *newstate)
2117{
2598c001 2118 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2119 char msg[1024];
2120 nvlist_t *tgt;
9babb374 2121 boolean_t avail_spare, l2cache, islog;
34dc7c2f
BB
2122 libzfs_handle_t *hdl = zhp->zpool_hdl;
2123
9babb374
BB
2124 if (flags & ZFS_ONLINE_EXPAND) {
2125 (void) snprintf(msg, sizeof (msg),
2126 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2127 } else {
2128 (void) snprintf(msg, sizeof (msg),
2129 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2130 }
34dc7c2f
BB
2131
2132 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2133 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2134 &islog)) == NULL)
34dc7c2f
BB
2135 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2136
2137 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2138
428870ff 2139 if (avail_spare)
34dc7c2f
BB
2140 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2141
9babb374
BB
2142 if (flags & ZFS_ONLINE_EXPAND ||
2143 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2144 char *pathname = NULL;
2145 uint64_t wholedisk = 0;
2146
2147 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2148 &wholedisk);
2149 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2150 &pathname) == 0);
2151
2152 /*
2153 * XXX - L2ARC 1.0 devices can't support expansion.
2154 */
2155 if (l2cache) {
2156 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2157 "cannot expand cache devices"));
2158 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2159 }
2160
2161 if (wholedisk) {
2162 pathname += strlen(DISK_ROOT) + 1;
572e2857 2163 (void) zpool_relabel_disk(hdl, pathname);
9babb374
BB
2164 }
2165 }
2166
34dc7c2f
BB
2167 zc.zc_cookie = VDEV_STATE_ONLINE;
2168 zc.zc_obj = flags;
2169
572e2857 2170 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2171 if (errno == EINVAL) {
2172 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2173 "from this pool into a new one. Use '%s' "
2174 "instead"), "zpool detach");
2175 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2176 }
34dc7c2f 2177 return (zpool_standard_error(hdl, errno, msg));
428870ff 2178 }
34dc7c2f
BB
2179
2180 *newstate = zc.zc_cookie;
2181 return (0);
2182}
2183
2184/*
2185 * Take the specified vdev offline
2186 */
2187int
2188zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2189{
2598c001 2190 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2191 char msg[1024];
2192 nvlist_t *tgt;
2193 boolean_t avail_spare, l2cache;
2194 libzfs_handle_t *hdl = zhp->zpool_hdl;
2195
2196 (void) snprintf(msg, sizeof (msg),
2197 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2198
2199 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2200 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2201 NULL)) == NULL)
34dc7c2f
BB
2202 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2203
2204 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2205
428870ff 2206 if (avail_spare)
34dc7c2f
BB
2207 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2208
34dc7c2f
BB
2209 zc.zc_cookie = VDEV_STATE_OFFLINE;
2210 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2211
572e2857 2212 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2213 return (0);
2214
2215 switch (errno) {
2216 case EBUSY:
2217
2218 /*
2219 * There are no other replicas of this device.
2220 */
2221 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2222
9babb374
BB
2223 case EEXIST:
2224 /*
2225 * The log device has unplayed logs
2226 */
2227 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2228
34dc7c2f
BB
2229 default:
2230 return (zpool_standard_error(hdl, errno, msg));
2231 }
2232}
2233
2234/*
2235 * Mark the given vdev faulted.
2236 */
2237int
428870ff 2238zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2239{
2598c001 2240 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2241 char msg[1024];
2242 libzfs_handle_t *hdl = zhp->zpool_hdl;
2243
2244 (void) snprintf(msg, sizeof (msg),
b8864a23 2245 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2246
2247 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2248 zc.zc_guid = guid;
2249 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2250 zc.zc_obj = aux;
34dc7c2f 2251
572e2857 2252 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2253 return (0);
2254
2255 switch (errno) {
2256 case EBUSY:
2257
2258 /*
2259 * There are no other replicas of this device.
2260 */
2261 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2262
2263 default:
2264 return (zpool_standard_error(hdl, errno, msg));
2265 }
2266
2267}
2268
2269/*
2270 * Mark the given vdev degraded.
2271 */
2272int
428870ff 2273zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2274{
2598c001 2275 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2276 char msg[1024];
2277 libzfs_handle_t *hdl = zhp->zpool_hdl;
2278
2279 (void) snprintf(msg, sizeof (msg),
b8864a23 2280 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2281
2282 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2283 zc.zc_guid = guid;
2284 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2285 zc.zc_obj = aux;
34dc7c2f 2286
572e2857 2287 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2288 return (0);
2289
2290 return (zpool_standard_error(hdl, errno, msg));
2291}
2292
2293/*
2294 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2295 * a hot spare.
2296 */
2297static boolean_t
2298is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2299{
2300 nvlist_t **child;
2301 uint_t c, children;
2302 char *type;
2303
2304 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2305 &children) == 0) {
2306 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2307 &type) == 0);
2308
2309 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2310 children == 2 && child[which] == tgt)
2311 return (B_TRUE);
2312
2313 for (c = 0; c < children; c++)
2314 if (is_replacing_spare(child[c], tgt, which))
2315 return (B_TRUE);
2316 }
2317
2318 return (B_FALSE);
2319}
2320
2321/*
2322 * Attach new_disk (fully described by nvroot) to old_disk.
2323 * If 'replacing' is specified, the new disk will replace the old one.
2324 */
2325int
2326zpool_vdev_attach(zpool_handle_t *zhp,
2327 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2328{
2598c001 2329 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2330 char msg[1024];
2331 int ret;
2332 nvlist_t *tgt;
b128c09f
BB
2333 boolean_t avail_spare, l2cache, islog;
2334 uint64_t val;
572e2857 2335 char *newname;
34dc7c2f
BB
2336 nvlist_t **child;
2337 uint_t children;
2338 nvlist_t *config_root;
2339 libzfs_handle_t *hdl = zhp->zpool_hdl;
b128c09f 2340 boolean_t rootpool = pool_is_bootable(zhp);
34dc7c2f
BB
2341
2342 if (replacing)
2343 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2344 "cannot replace %s with %s"), old_disk, new_disk);
2345 else
2346 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2347 "cannot attach %s to %s"), new_disk, old_disk);
2348
b128c09f
BB
2349 /*
2350 * If this is a root pool, make sure that we're not attaching an
2351 * EFI labeled device.
2352 */
2353 if (rootpool && pool_uses_efi(nvroot)) {
2354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2355 "EFI labeled devices are not supported on root pools."));
2356 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2357 }
2358
34dc7c2f 2359 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2360 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2361 &islog)) == 0)
34dc7c2f
BB
2362 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2363
2364 if (avail_spare)
2365 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2366
2367 if (l2cache)
2368 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2369
2370 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2371 zc.zc_cookie = replacing;
2372
2373 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2374 &child, &children) != 0 || children != 1) {
2375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2376 "new device must be a single disk"));
2377 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2378 }
2379
2380 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2381 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2382
428870ff 2383 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
b128c09f
BB
2384 return (-1);
2385
34dc7c2f
BB
2386 /*
2387 * If the target is a hot spare that has been swapped in, we can only
2388 * replace it with another hot spare.
2389 */
2390 if (replacing &&
2391 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2392 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2393 NULL) == NULL || !avail_spare) &&
2394 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2396 "can only be replaced by another hot spare"));
b128c09f 2397 free(newname);
34dc7c2f
BB
2398 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2399 }
2400
b128c09f
BB
2401 free(newname);
2402
34dc7c2f
BB
2403 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2404 return (-1);
2405
572e2857 2406 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2407
2408 zcmd_free_nvlists(&zc);
2409
b128c09f
BB
2410 if (ret == 0) {
2411 if (rootpool) {
9babb374
BB
2412 /*
2413 * XXX need a better way to prevent user from
2414 * booting up a half-baked vdev.
2415 */
2416 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2417 "sure to wait until resilver is done "
2418 "before rebooting.\n"));
b128c09f 2419 }
34dc7c2f 2420 return (0);
b128c09f 2421 }
34dc7c2f
BB
2422
2423 switch (errno) {
2424 case ENOTSUP:
2425 /*
2426 * Can't attach to or replace this type of vdev.
2427 */
2428 if (replacing) {
572e2857
BB
2429 uint64_t version = zpool_get_prop_int(zhp,
2430 ZPOOL_PROP_VERSION, NULL);
2431
b128c09f 2432 if (islog)
34dc7c2f
BB
2433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2434 "cannot replace a log with a spare"));
572e2857
BB
2435 else if (version >= SPA_VERSION_MULTI_REPLACE)
2436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2437 "already in replacing/spare config; wait "
2438 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2439 else
2440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2441 "cannot replace a replacing device"));
2442 } else {
2443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2444 "can only attach to mirrors and top-level "
2445 "disks"));
2446 }
2447 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2448 break;
2449
2450 case EINVAL:
2451 /*
2452 * The new device must be a single disk.
2453 */
2454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2455 "new device must be a single disk"));
2456 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2457 break;
2458
2459 case EBUSY:
2460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2461 new_disk);
2462 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2463 break;
2464
2465 case EOVERFLOW:
2466 /*
2467 * The new device is too small.
2468 */
2469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2470 "device is too small"));
2471 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2472 break;
2473
2474 case EDOM:
2475 /*
2476 * The new device has a different alignment requirement.
2477 */
2478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2479 "devices have different sector alignment"));
2480 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2481 break;
2482
2483 case ENAMETOOLONG:
2484 /*
2485 * The resulting top-level vdev spec won't fit in the label.
2486 */
2487 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2488 break;
2489
2490 default:
2491 (void) zpool_standard_error(hdl, errno, msg);
2492 }
2493
2494 return (-1);
2495}
2496
2497/*
2498 * Detach the specified device.
2499 */
2500int
2501zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2502{
2598c001 2503 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2504 char msg[1024];
2505 nvlist_t *tgt;
2506 boolean_t avail_spare, l2cache;
2507 libzfs_handle_t *hdl = zhp->zpool_hdl;
2508
2509 (void) snprintf(msg, sizeof (msg),
2510 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2511
2512 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2513 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2514 NULL)) == 0)
34dc7c2f
BB
2515 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2516
2517 if (avail_spare)
2518 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2519
2520 if (l2cache)
2521 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2522
2523 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2524
2525 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2526 return (0);
2527
2528 switch (errno) {
2529
2530 case ENOTSUP:
2531 /*
2532 * Can't detach from this type of vdev.
2533 */
2534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2535 "applicable to mirror and replacing vdevs"));
572e2857 2536 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2537 break;
2538
2539 case EBUSY:
2540 /*
2541 * There are no other replicas of this device.
2542 */
2543 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2544 break;
2545
2546 default:
2547 (void) zpool_standard_error(hdl, errno, msg);
2548 }
2549
2550 return (-1);
2551}
2552
428870ff
BB
2553/*
2554 * Find a mirror vdev in the source nvlist.
2555 *
2556 * The mchild array contains a list of disks in one of the top-level mirrors
2557 * of the source pool. The schild array contains a list of disks that the
2558 * user specified on the command line. We loop over the mchild array to
2559 * see if any entry in the schild array matches.
2560 *
2561 * If a disk in the mchild array is found in the schild array, we return
2562 * the index of that entry. Otherwise we return -1.
2563 */
2564static int
2565find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2566 nvlist_t **schild, uint_t schildren)
2567{
2568 uint_t mc;
2569
2570 for (mc = 0; mc < mchildren; mc++) {
2571 uint_t sc;
2572 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2573 mchild[mc], B_FALSE);
2574
2575 for (sc = 0; sc < schildren; sc++) {
2576 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2577 schild[sc], B_FALSE);
2578 boolean_t result = (strcmp(mpath, spath) == 0);
2579
2580 free(spath);
2581 if (result) {
2582 free(mpath);
2583 return (mc);
2584 }
2585 }
2586
2587 free(mpath);
2588 }
2589
2590 return (-1);
2591}
2592
2593/*
2594 * Split a mirror pool. If newroot points to null, then a new nvlist
2595 * is generated and it is the responsibility of the caller to free it.
2596 */
2597int
2598zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2599 nvlist_t *props, splitflags_t flags)
2600{
2598c001 2601 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff
BB
2602 char msg[1024];
2603 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2604 nvlist_t **varray = NULL, *zc_props = NULL;
2605 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2606 libzfs_handle_t *hdl = zhp->zpool_hdl;
2607 uint64_t vers;
2608 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2609 int retval = 0;
2610
2611 (void) snprintf(msg, sizeof (msg),
2612 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2613
2614 if (!zpool_name_valid(hdl, B_FALSE, newname))
2615 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2616
2617 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2618 (void) fprintf(stderr, gettext("Internal error: unable to "
2619 "retrieve pool configuration\n"));
2620 return (-1);
2621 }
2622
2623 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2624 == 0);
2625 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2626
2627 if (props) {
572e2857 2628 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 2629 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 2630 props, vers, flags, msg)) == NULL)
428870ff
BB
2631 return (-1);
2632 }
2633
2634 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2635 &children) != 0) {
2636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2637 "Source pool is missing vdev tree"));
2638 if (zc_props)
2639 nvlist_free(zc_props);
2640 return (-1);
2641 }
2642
2643 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2644 vcount = 0;
2645
2646 if (*newroot == NULL ||
2647 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2648 &newchild, &newchildren) != 0)
2649 newchildren = 0;
2650
2651 for (c = 0; c < children; c++) {
2652 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2653 char *type;
2654 nvlist_t **mchild, *vdev;
2655 uint_t mchildren;
2656 int entry;
2657
2658 /*
2659 * Unlike cache & spares, slogs are stored in the
2660 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2661 */
2662 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2663 &is_log);
2664 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2665 &is_hole);
2666 if (is_log || is_hole) {
2667 /*
2668 * Create a hole vdev and put it in the config.
2669 */
2670 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2671 goto out;
2672 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2673 VDEV_TYPE_HOLE) != 0)
2674 goto out;
2675 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2676 1) != 0)
2677 goto out;
2678 if (lastlog == 0)
2679 lastlog = vcount;
2680 varray[vcount++] = vdev;
2681 continue;
2682 }
2683 lastlog = 0;
2684 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2685 == 0);
2686 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2688 "Source pool must be composed only of mirrors\n"));
2689 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2690 goto out;
2691 }
2692
2693 verify(nvlist_lookup_nvlist_array(child[c],
2694 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2695
2696 /* find or add an entry for this top-level vdev */
2697 if (newchildren > 0 &&
2698 (entry = find_vdev_entry(zhp, mchild, mchildren,
2699 newchild, newchildren)) >= 0) {
2700 /* We found a disk that the user specified. */
2701 vdev = mchild[entry];
2702 ++found;
2703 } else {
2704 /* User didn't specify a disk for this vdev. */
2705 vdev = mchild[mchildren - 1];
2706 }
2707
2708 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2709 goto out;
2710 }
2711
2712 /* did we find every disk the user specified? */
2713 if (found != newchildren) {
2714 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2715 "include at most one disk from each mirror"));
2716 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2717 goto out;
2718 }
2719
2720 /* Prepare the nvlist for populating. */
2721 if (*newroot == NULL) {
2722 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2723 goto out;
2724 freelist = B_TRUE;
2725 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2726 VDEV_TYPE_ROOT) != 0)
2727 goto out;
2728 } else {
2729 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2730 }
2731
2732 /* Add all the children we found */
2733 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2734 lastlog == 0 ? vcount : lastlog) != 0)
2735 goto out;
2736
2737 /*
2738 * If we're just doing a dry run, exit now with success.
2739 */
2740 if (flags.dryrun) {
2741 memory_err = B_FALSE;
2742 freelist = B_FALSE;
2743 goto out;
2744 }
2745
2746 /* now build up the config list & call the ioctl */
2747 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2748 goto out;
2749
2750 if (nvlist_add_nvlist(newconfig,
2751 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2752 nvlist_add_string(newconfig,
2753 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2754 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2755 goto out;
2756
2757 /*
2758 * The new pool is automatically part of the namespace unless we
2759 * explicitly export it.
2760 */
2761 if (!flags.import)
2762 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2763 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2764 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2765 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2766 goto out;
2767 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2768 goto out;
2769
2770 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2771 retval = zpool_standard_error(hdl, errno, msg);
2772 goto out;
2773 }
2774
2775 freelist = B_FALSE;
2776 memory_err = B_FALSE;
2777
2778out:
2779 if (varray != NULL) {
2780 int v;
2781
2782 for (v = 0; v < vcount; v++)
2783 nvlist_free(varray[v]);
2784 free(varray);
2785 }
2786 zcmd_free_nvlists(&zc);
2787 if (zc_props)
2788 nvlist_free(zc_props);
2789 if (newconfig)
2790 nvlist_free(newconfig);
2791 if (freelist) {
2792 nvlist_free(*newroot);
2793 *newroot = NULL;
2794 }
2795
2796 if (retval != 0)
2797 return (retval);
2798
2799 if (memory_err)
2800 return (no_memory(hdl));
2801
2802 return (0);
2803}
2804
34dc7c2f
BB
2805/*
2806 * Remove the given device. Currently, this is supported only for hot spares
2807 * and level 2 cache devices.
2808 */
2809int
2810zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2811{
2598c001 2812 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2813 char msg[1024];
2814 nvlist_t *tgt;
428870ff 2815 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2816 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2817 uint64_t version;
34dc7c2f
BB
2818
2819 (void) snprintf(msg, sizeof (msg),
2820 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2821
2822 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2823 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 2824 &islog)) == 0)
34dc7c2f 2825 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
2826 /*
2827 * XXX - this should just go away.
2828 */
2829 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 2830 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428870ff
BB
2831 "only inactive hot spares, cache, top-level, "
2832 "or log devices can be removed"));
34dc7c2f
BB
2833 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2834 }
2835
428870ff
BB
2836 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2837 if (islog && version < SPA_VERSION_HOLES) {
2838 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2839 "pool must be upgrade to support log removal"));
2840 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2841 }
2842
34dc7c2f
BB
2843 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2844
2845 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2846 return (0);
2847
2848 return (zpool_standard_error(hdl, errno, msg));
2849}
2850
2851/*
2852 * Clear the errors for the pool, or the particular device if specified.
2853 */
2854int
428870ff 2855zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 2856{
2598c001 2857 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2858 char msg[1024];
2859 nvlist_t *tgt;
428870ff 2860 zpool_rewind_policy_t policy;
34dc7c2f
BB
2861 boolean_t avail_spare, l2cache;
2862 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2863 nvlist_t *nvi = NULL;
572e2857 2864 int error;
34dc7c2f
BB
2865
2866 if (path)
2867 (void) snprintf(msg, sizeof (msg),
2868 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2869 path);
2870 else
2871 (void) snprintf(msg, sizeof (msg),
2872 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2873 zhp->zpool_name);
2874
2875 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2876 if (path) {
2877 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 2878 &l2cache, NULL)) == 0)
34dc7c2f
BB
2879 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2880
2881 /*
2882 * Don't allow error clearing for hot spares. Do allow
2883 * error clearing for l2cache devices.
2884 */
2885 if (avail_spare)
2886 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2887
2888 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2889 &zc.zc_guid) == 0);
2890 }
2891
428870ff
BB
2892 zpool_get_rewind_policy(rewindnvl, &policy);
2893 zc.zc_cookie = policy.zrp_request;
2894
572e2857 2895 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
2896 return (-1);
2897
572e2857 2898 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
2899 return (-1);
2900
572e2857
BB
2901 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2902 errno == ENOMEM) {
2903 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2904 zcmd_free_nvlists(&zc);
2905 return (-1);
2906 }
2907 }
2908
2909 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
2910 errno != EPERM && errno != EACCES)) {
2911 if (policy.zrp_request &
2912 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2913 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2914 zpool_rewind_exclaim(hdl, zc.zc_name,
2915 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2916 nvi);
2917 nvlist_free(nvi);
2918 }
2919 zcmd_free_nvlists(&zc);
34dc7c2f 2920 return (0);
428870ff 2921 }
34dc7c2f 2922
428870ff 2923 zcmd_free_nvlists(&zc);
34dc7c2f
BB
2924 return (zpool_standard_error(hdl, errno, msg));
2925}
2926
2927/*
2928 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2929 */
2930int
2931zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2932{
2598c001 2933 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2934 char msg[1024];
2935 libzfs_handle_t *hdl = zhp->zpool_hdl;
2936
2937 (void) snprintf(msg, sizeof (msg),
2938 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
b8864a23 2939 (u_longlong_t)guid);
34dc7c2f
BB
2940
2941 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2942 zc.zc_guid = guid;
428870ff 2943 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
2944
2945 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2946 return (0);
2947
2948 return (zpool_standard_error(hdl, errno, msg));
2949}
2950
34dc7c2f
BB
2951/*
2952 * Convert from a devid string to a path.
2953 */
2954static char *
2955devid_to_path(char *devid_str)
2956{
2957 ddi_devid_t devid;
2958 char *minor;
2959 char *path;
2960 devid_nmlist_t *list = NULL;
2961 int ret;
2962
2963 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2964 return (NULL);
2965
2966 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2967
2968 devid_str_free(minor);
2969 devid_free(devid);
2970
2971 if (ret != 0)
2972 return (NULL);
2973
2974 if ((path = strdup(list[0].devname)) == NULL)
2975 return (NULL);
2976
2977 devid_free_nmlist(list);
2978
2979 return (path);
2980}
2981
2982/*
2983 * Convert from a path to a devid string.
2984 */
2985static char *
2986path_to_devid(const char *path)
2987{
2988 int fd;
2989 ddi_devid_t devid;
2990 char *minor, *ret;
2991
2992 if ((fd = open(path, O_RDONLY)) < 0)
2993 return (NULL);
2994
2995 minor = NULL;
2996 ret = NULL;
2997 if (devid_get(fd, &devid) == 0) {
2998 if (devid_get_minor_name(fd, &minor) == 0)
2999 ret = devid_str_encode(devid, minor);
3000 if (minor != NULL)
3001 devid_str_free(minor);
3002 devid_free(devid);
3003 }
3004 (void) close(fd);
3005
3006 return (ret);
3007}
3008
3009/*
3010 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3011 * ignore any failure here, since a common case is for an unprivileged user to
3012 * type 'zpool status', and we'll display the correct information anyway.
3013 */
3014static void
3015set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3016{
2598c001 3017 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3018
3019 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3020 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3021 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3022 &zc.zc_guid) == 0);
3023
3024 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3025}
3026
83c62c93
NB
3027/*
3028 * Remove partition suffix from a vdev path. Partition suffixes may take three
3029 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3030 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3031 * third case only occurs when preceded by a string matching the regular
3032 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3033 */
3034static char *
3035strip_partition(libzfs_handle_t *hdl, char *path)
3036{
3037 char *tmp = zfs_strdup(hdl, path);
3038 char *part = NULL, *d = NULL;
3039
3040 if ((part = strstr(tmp, "-part")) && part != tmp) {
3041 d = part + 5;
3042 } else if ((part = strrchr(tmp, 'p')) &&
3043 part > tmp + 1 && isdigit(*(part-1))) {
3044 d = part + 1;
3045 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3046 for (d = &tmp[2]; isalpha(*d); part = ++d);
3047 }
3048 if (part && d && *d != '\0') {
3049 for (; isdigit(*d); d++);
3050 if (*d == '\0')
3051 *part = '\0';
3052 }
3053 return (tmp);
3054}
3055
858219cc
NB
3056#define PATH_BUF_LEN 64
3057
34dc7c2f
BB
3058/*
3059 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3060 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3061 * We also check if this is a whole disk, in which case we strip off the
3062 * trailing 's0' slice name.
3063 *
3064 * This routine is also responsible for identifying when disks have been
3065 * reconfigured in a new location. The kernel will have opened the device by
3066 * devid, but the path will still refer to the old location. To catch this, we
3067 * first do a path -> devid translation (which is fast for the common case). If
3068 * the devid matches, we're done. If not, we do a reverse devid -> path
3069 * translation and issue the appropriate ioctl() to update the path of the vdev.
3070 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3071 * of these checks.
3072 */
3073char *
428870ff
BB
3074zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3075 boolean_t verbose)
34dc7c2f 3076{
d603ed6c 3077 char *path, *devid, *type;
34dc7c2f 3078 uint64_t value;
858219cc 3079 char buf[PATH_BUF_LEN];
34dc7c2f
BB
3080 vdev_stat_t *vs;
3081 uint_t vsc;
3082
3083 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3084 &value) == 0) {
3085 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3086 &value) == 0);
3087 (void) snprintf(buf, sizeof (buf), "%llu",
3088 (u_longlong_t)value);
3089 path = buf;
3090 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
34dc7c2f
BB
3091 /*
3092 * If the device is dead (faulted, offline, etc) then don't
3093 * bother opening it. Otherwise we may be forcing the user to
3094 * open a misbehaving device, which can have undesirable
3095 * effects.
3096 */
428870ff 3097 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3098 (uint64_t **)&vs, &vsc) != 0 ||
3099 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3100 zhp != NULL &&
3101 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3102 /*
3103 * Determine if the current path is correct.
3104 */
3105 char *newdevid = path_to_devid(path);
3106
3107 if (newdevid == NULL ||
3108 strcmp(devid, newdevid) != 0) {
3109 char *newpath;
3110
3111 if ((newpath = devid_to_path(devid)) != NULL) {
3112 /*
3113 * Update the path appropriately.
3114 */
3115 set_path(zhp, nv, newpath);
3116 if (nvlist_add_string(nv,
3117 ZPOOL_CONFIG_PATH, newpath) == 0)
3118 verify(nvlist_lookup_string(nv,
3119 ZPOOL_CONFIG_PATH,
3120 &path) == 0);
3121 free(newpath);
3122 }
3123 }
3124
3125 if (newdevid)
3126 devid_str_free(newdevid);
3127 }
3128
d603ed6c
BB
3129 /*
3130 * For a block device only use the name.
3131 */
3132 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3133 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3134 path = strrchr(path, '/');
3135 path++;
3136 }
34dc7c2f 3137
d603ed6c 3138 /*
83c62c93 3139 * Remove the partition from the path it this is a whole disk.
d603ed6c 3140 */
34dc7c2f
BB
3141 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3142 &value) == 0 && value) {
83c62c93 3143 return strip_partition(hdl, path);
34dc7c2f
BB
3144 }
3145 } else {
3146 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3147
3148 /*
3149 * If it's a raidz device, we need to stick in the parity level.
3150 */
3151 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
858219cc
NB
3152 char tmpbuf[PATH_BUF_LEN];
3153
34dc7c2f
BB
3154 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3155 &value) == 0);
858219cc 3156 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%llu", path,
34dc7c2f 3157 (u_longlong_t)value);
858219cc 3158 path = tmpbuf;
34dc7c2f 3159 }
428870ff
BB
3160
3161 /*
3162 * We identify each top-level vdev by using a <type-id>
3163 * naming convention.
3164 */
3165 if (verbose) {
3166 uint64_t id;
3167
3168 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3169 &id) == 0);
3170 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3171 (u_longlong_t)id);
3172 path = buf;
3173 }
34dc7c2f
BB
3174 }
3175
3176 return (zfs_strdup(hdl, path));
3177}
3178
3179static int
3180zbookmark_compare(const void *a, const void *b)
3181{
3182 return (memcmp(a, b, sizeof (zbookmark_t)));
3183}
3184
3185/*
3186 * Retrieve the persistent error log, uniquify the members, and return to the
3187 * caller.
3188 */
3189int
3190zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3191{
2598c001 3192 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3193 uint64_t count;
3194 zbookmark_t *zb = NULL;
3195 int i;
3196
3197 /*
3198 * Retrieve the raw error list from the kernel. If the number of errors
3199 * has increased, allocate more space and continue until we get the
3200 * entire list.
3201 */
3202 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3203 &count) == 0);
3204 if (count == 0)
3205 return (0);
3206 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3207 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3208 return (-1);
3209 zc.zc_nvlist_dst_size = count;
3210 (void) strcpy(zc.zc_name, zhp->zpool_name);
3211 for (;;) {
3212 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3213 &zc) != 0) {
3214 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3215 if (errno == ENOMEM) {
3216 count = zc.zc_nvlist_dst_size;
3217 if ((zc.zc_nvlist_dst = (uintptr_t)
3218 zfs_alloc(zhp->zpool_hdl, count *
3219 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3220 return (-1);
3221 } else {
3222 return (-1);
3223 }
3224 } else {
3225 break;
3226 }
3227 }
3228
3229 /*
3230 * Sort the resulting bookmarks. This is a little confusing due to the
3231 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3232 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3233 * _not_ copied as part of the process. So we point the start of our
3234 * array appropriate and decrement the total number of elements.
3235 */
3236 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3237 zc.zc_nvlist_dst_size;
3238 count -= zc.zc_nvlist_dst_size;
3239
3240 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3241
3242 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3243
3244 /*
3245 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3246 */
3247 for (i = 0; i < count; i++) {
3248 nvlist_t *nv;
3249
3250 /* ignoring zb_blkid and zb_level for now */
3251 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3252 zb[i-1].zb_object == zb[i].zb_object)
3253 continue;
3254
3255 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3256 goto nomem;
3257 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3258 zb[i].zb_objset) != 0) {
3259 nvlist_free(nv);
3260 goto nomem;
3261 }
3262 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3263 zb[i].zb_object) != 0) {
3264 nvlist_free(nv);
3265 goto nomem;
3266 }
3267 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3268 nvlist_free(nv);
3269 goto nomem;
3270 }
3271 nvlist_free(nv);
3272 }
3273
3274 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3275 return (0);
3276
3277nomem:
3278 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3279 return (no_memory(zhp->zpool_hdl));
3280}
3281
3282/*
3283 * Upgrade a ZFS pool to the latest on-disk version.
3284 */
3285int
3286zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3287{
2598c001 3288 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3289 libzfs_handle_t *hdl = zhp->zpool_hdl;
3290
3291 (void) strcpy(zc.zc_name, zhp->zpool_name);
3292 zc.zc_cookie = new_version;
3293
3294 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3295 return (zpool_standard_error_fmt(hdl, errno,
3296 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3297 zhp->zpool_name));
3298 return (0);
3299}
3300
3301void
3302zpool_set_history_str(const char *subcommand, int argc, char **argv,
3303 char *history_str)
3304{
3305 int i;
3306
3307 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3308 for (i = 1; i < argc; i++) {
3309 if (strlen(history_str) + 1 + strlen(argv[i]) >
3310 HIS_MAX_RECORD_LEN)
3311 break;
3312 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3313 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3314 }
3315}
3316
3317/*
3318 * Stage command history for logging.
3319 */
3320int
3321zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3322{
3323 if (history_str == NULL)
3324 return (EINVAL);
3325
3326 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3327 return (EINVAL);
3328
3329 if (hdl->libzfs_log_str != NULL)
3330 free(hdl->libzfs_log_str);
3331
3332 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3333 return (no_memory(hdl));
3334
3335 return (0);
3336}
3337
3338/*
3339 * Perform ioctl to get some command history of a pool.
3340 *
3341 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3342 * logical offset of the history buffer to start reading from.
3343 *
3344 * Upon return, 'off' is the next logical offset to read from and
3345 * 'len' is the actual amount of bytes read into 'buf'.
3346 */
3347static int
3348get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3349{
2598c001 3350 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3351 libzfs_handle_t *hdl = zhp->zpool_hdl;
3352
3353 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3354
3355 zc.zc_history = (uint64_t)(uintptr_t)buf;
3356 zc.zc_history_len = *len;
3357 zc.zc_history_offset = *off;
3358
3359 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3360 switch (errno) {
3361 case EPERM:
3362 return (zfs_error_fmt(hdl, EZFS_PERM,
3363 dgettext(TEXT_DOMAIN,
3364 "cannot show history for pool '%s'"),
3365 zhp->zpool_name));
3366 case ENOENT:
3367 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3368 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3369 "'%s'"), zhp->zpool_name));
3370 case ENOTSUP:
3371 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3372 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3373 "'%s', pool must be upgraded"), zhp->zpool_name));
3374 default:
3375 return (zpool_standard_error_fmt(hdl, errno,
3376 dgettext(TEXT_DOMAIN,
3377 "cannot get history for '%s'"), zhp->zpool_name));
3378 }
3379 }
3380
3381 *len = zc.zc_history_len;
3382 *off = zc.zc_history_offset;
3383
3384 return (0);
3385}
3386
3387/*
3388 * Process the buffer of nvlists, unpacking and storing each nvlist record
3389 * into 'records'. 'leftover' is set to the number of bytes that weren't
3390 * processed as there wasn't a complete record.
3391 */
428870ff 3392int
34dc7c2f
BB
3393zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3394 nvlist_t ***records, uint_t *numrecords)
3395{
3396 uint64_t reclen;
3397 nvlist_t *nv;
3398 int i;
3399
3400 while (bytes_read > sizeof (reclen)) {
3401
3402 /* get length of packed record (stored as little endian) */
3403 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3404 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3405
3406 if (bytes_read < sizeof (reclen) + reclen)
3407 break;
3408
3409 /* unpack record */
3410 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3411 return (ENOMEM);
3412 bytes_read -= sizeof (reclen) + reclen;
3413 buf += sizeof (reclen) + reclen;
3414
3415 /* add record to nvlist array */
3416 (*numrecords)++;
3417 if (ISP2(*numrecords + 1)) {
3418 *records = realloc(*records,
3419 *numrecords * 2 * sizeof (nvlist_t *));
3420 }
3421 (*records)[*numrecords - 1] = nv;
3422 }
3423
3424 *leftover = bytes_read;
3425 return (0);
3426}
3427
3428#define HIS_BUF_LEN (128*1024)
3429
3430/*
3431 * Retrieve the command history of a pool.
3432 */
3433int
3434zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3435{
3436 char buf[HIS_BUF_LEN];
3437 uint64_t off = 0;
3438 nvlist_t **records = NULL;
3439 uint_t numrecords = 0;
3440 int err, i;
3441
3442 do {
3443 uint64_t bytes_read = sizeof (buf);
3444 uint64_t leftover;
3445
3446 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3447 break;
3448
3449 /* if nothing else was read in, we're at EOF, just return */
3450 if (!bytes_read)
3451 break;
3452
3453 if ((err = zpool_history_unpack(buf, bytes_read,
3454 &leftover, &records, &numrecords)) != 0)
3455 break;
3456 off -= leftover;
3457
3458 /* CONSTCOND */
3459 } while (1);
3460
3461 if (!err) {
3462 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3463 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3464 records, numrecords) == 0);
3465 }
3466 for (i = 0; i < numrecords; i++)
3467 nvlist_free(records[i]);
3468 free(records);
3469
3470 return (err);
3471}
3472
26685276
BB
3473/*
3474 * Retrieve the next event. If there is a new event available 'nvp' will
3475 * contain a newly allocated nvlist and 'dropped' will be set to the number
3476 * of missed events since the last call to this function. When 'nvp' is
3477 * set to NULL it indicates no new events are available. In either case
3478 * the function returns 0 and it is up to the caller to free 'nvp'. In
3479 * the case of a fatal error the function will return a non-zero value.
3480 * When the function is called in blocking mode it will not return until
3481 * a new event is available.
3482 */
3483int
3484zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3485 int *dropped, int block, int cleanup_fd)
3486{
3487 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3488 int error = 0;
3489
3490 *nvp = NULL;
3491 *dropped = 0;
3492 zc.zc_cleanup_fd = cleanup_fd;
3493
3494 if (!block)
3495 zc.zc_guid = ZEVENT_NONBLOCK;
3496
3497 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3498 return (-1);
3499
3500retry:
3501 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3502 switch (errno) {
3503 case ESHUTDOWN:
3504 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3505 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3506 goto out;
3507 case ENOENT:
3508 /* Blocking error case should not occur */
3509 if (block)
3510 error = zpool_standard_error_fmt(hdl, errno,
3511 dgettext(TEXT_DOMAIN, "cannot get event"));
3512
3513 goto out;
3514 case ENOMEM:
3515 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3516 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3517 dgettext(TEXT_DOMAIN, "cannot get event"));
3518 goto out;
3519 } else {
3520 goto retry;
3521 }
3522 default:
3523 error = zpool_standard_error_fmt(hdl, errno,
3524 dgettext(TEXT_DOMAIN, "cannot get event"));
3525 goto out;
3526 }
3527 }
3528
3529 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3530 if (error != 0)
3531 goto out;
3532
3533 *dropped = (int)zc.zc_cookie;
3534out:
3535 zcmd_free_nvlists(&zc);
3536
3537 return (error);
3538}
3539
3540/*
3541 * Clear all events.
3542 */
3543int
3544zpool_events_clear(libzfs_handle_t *hdl, int *count)
3545{
3546 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3547 char msg[1024];
3548
3549 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3550 "cannot clear events"));
3551
3552 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3553 return (zpool_standard_error_fmt(hdl, errno, msg));
3554
3555 if (count != NULL)
3556 *count = (int)zc.zc_cookie; /* # of events cleared */
3557
3558 return (0);
3559}
3560
34dc7c2f
BB
3561void
3562zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3563 char *pathname, size_t len)
3564{
2598c001 3565 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3566 boolean_t mounted = B_FALSE;
3567 char *mntpnt = NULL;
3568 char dsname[MAXNAMELEN];
3569
3570 if (dsobj == 0) {
3571 /* special case for the MOS */
b8864a23 3572 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
34dc7c2f
BB
3573 return;
3574 }
3575
3576 /* get the dataset's name */
3577 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3578 zc.zc_obj = dsobj;
3579 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3580 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3581 /* just write out a path of two object numbers */
3582 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 3583 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
3584 return;
3585 }
3586 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3587
3588 /* find out if the dataset is mounted */
3589 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3590
3591 /* get the corrupted object's path */
3592 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3593 zc.zc_obj = obj;
3594 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3595 &zc) == 0) {
3596 if (mounted) {
3597 (void) snprintf(pathname, len, "%s%s", mntpnt,
3598 zc.zc_value);
3599 } else {
3600 (void) snprintf(pathname, len, "%s:%s",
3601 dsname, zc.zc_value);
3602 }
3603 } else {
b8864a23 3604 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
34dc7c2f
BB
3605 }
3606 free(mntpnt);
3607}
3608
b128c09f
BB
3609/*
3610 * Read the EFI label from the config, if a label does not exist then
3611 * pass back the error to the caller. If the caller has passed a non-NULL
3612 * diskaddr argument then we set it to the starting address of the EFI
3613 * partition.
3614 */
3615static int
3616read_efi_label(nvlist_t *config, diskaddr_t *sb)
3617{
3618 char *path;
3619 int fd;
3620 char diskname[MAXPATHLEN];
3621 int err = -1;
3622
3623 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3624 return (err);
3625
3626 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3627 strrchr(path, '/'));
d603ed6c 3628 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
b128c09f
BB
3629 struct dk_gpt *vtoc;
3630
3631 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3632 if (sb != NULL)
3633 *sb = vtoc->efi_parts[0].p_start;
3634 efi_free(vtoc);
3635 }
3636 (void) close(fd);
3637 }
3638 return (err);
3639}
3640
34dc7c2f
BB
3641/*
3642 * determine where a partition starts on a disk in the current
3643 * configuration
3644 */
3645static diskaddr_t
3646find_start_block(nvlist_t *config)
3647{
3648 nvlist_t **child;
3649 uint_t c, children;
34dc7c2f 3650 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
3651 uint64_t wholedisk;
3652
3653 if (nvlist_lookup_nvlist_array(config,
3654 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3655 if (nvlist_lookup_uint64(config,
3656 ZPOOL_CONFIG_WHOLE_DISK,
3657 &wholedisk) != 0 || !wholedisk) {
3658 return (MAXOFFSET_T);
3659 }
b128c09f
BB
3660 if (read_efi_label(config, &sb) < 0)
3661 sb = MAXOFFSET_T;
34dc7c2f
BB
3662 return (sb);
3663 }
3664
3665 for (c = 0; c < children; c++) {
3666 sb = find_start_block(child[c]);
3667 if (sb != MAXOFFSET_T) {
3668 return (sb);
3669 }
3670 }
3671 return (MAXOFFSET_T);
3672}
3673
d603ed6c
BB
3674int
3675zpool_label_disk_wait(char *path, int timeout)
3676{
3677 struct stat64 statbuf;
3678 int i;
3679
3680 /*
3681 * Wait timeout miliseconds for a newly created device to be available
3682 * from the given path. There is a small window when a /dev/ device
3683 * will exist and the udev link will not, so we must wait for the
3684 * symlink. Depending on the udev rules this may take a few seconds.
3685 */
3686 for (i = 0; i < timeout; i++) {
3687 usleep(1000);
3688
3689 errno = 0;
3690 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3691 return (0);
3692 }
3693
3694 return (ENOENT);
3695}
3696
3697int
3698zpool_label_disk_check(char *path)
3699{
3700 struct dk_gpt *vtoc;
3701 int fd, err;
3702
3703 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3704 return errno;
3705
3706 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3707 (void) close(fd);
3708 return err;
3709 }
3710
3711 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3712 efi_free(vtoc);
3713 (void) close(fd);
3714 return EIDRM;
3715 }
3716
3717 efi_free(vtoc);
3718 (void) close(fd);
3719 return 0;
3720}
3721
34dc7c2f
BB
3722/*
3723 * Label an individual disk. The name provided is the short name,
3724 * stripped of any leading /dev path.
3725 */
3726int
3727zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3728{
3729 char path[MAXPATHLEN];
3730 struct dk_gpt *vtoc;
d603ed6c 3731 int rval, fd;
34dc7c2f
BB
3732 size_t resv = EFI_MIN_RESV_SIZE;
3733 uint64_t slice_size;
3734 diskaddr_t start_block;
3735 char errbuf[1024];
3736
3737 /* prepare an error message just in case */
3738 (void) snprintf(errbuf, sizeof (errbuf),
3739 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3740
3741 if (zhp) {
3742 nvlist_t *nvroot;
3743
b128c09f
BB
3744 if (pool_is_bootable(zhp)) {
3745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3746 "EFI labeled devices are not supported on root "
3747 "pools."));
3748 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3749 }
3750
34dc7c2f
BB
3751 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3752 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3753
3754 if (zhp->zpool_start_block == 0)
3755 start_block = find_start_block(nvroot);
3756 else
3757 start_block = zhp->zpool_start_block;
3758 zhp->zpool_start_block = start_block;
3759 } else {
3760 /* new pool */
3761 start_block = NEW_START_BLOCK;
3762 }
3763
3764 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3765 BACKUP_SLICE);
3766
d603ed6c 3767 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
34dc7c2f
BB
3768 /*
3769 * This shouldn't happen. We've long since verified that this
3770 * is a valid device.
3771 */
109491a8
RL
3772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3773 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
3774 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3775 }
3776
3777 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3778 /*
3779 * The only way this can fail is if we run out of memory, or we
3780 * were unable to read the disk's capacity
3781 */
3782 if (errno == ENOMEM)
3783 (void) no_memory(hdl);
3784
3785 (void) close(fd);
109491a8
RL
3786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3787 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
3788
3789 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3790 }
3791
3792 slice_size = vtoc->efi_last_u_lba + 1;
3793 slice_size -= EFI_MIN_RESV_SIZE;
3794 if (start_block == MAXOFFSET_T)
3795 start_block = NEW_START_BLOCK;
3796 slice_size -= start_block;
613d88ed 3797 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
3798
3799 vtoc->efi_parts[0].p_start = start_block;
3800 vtoc->efi_parts[0].p_size = slice_size;
3801
3802 /*
3803 * Why we use V_USR: V_BACKUP confuses users, and is considered
3804 * disposable by some EFI utilities (since EFI doesn't have a backup
3805 * slice). V_UNASSIGNED is supposed to be used only for zero size
3806 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3807 * etc. were all pretty specific. V_USR is as close to reality as we
3808 * can get, in the absence of V_OTHER.
3809 */
3810 vtoc->efi_parts[0].p_tag = V_USR;
3811 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3812
3813 vtoc->efi_parts[8].p_start = slice_size + start_block;
3814 vtoc->efi_parts[8].p_size = resv;
3815 vtoc->efi_parts[8].p_tag = V_RESERVED;
3816
d603ed6c 3817 if ((rval = efi_write(fd, vtoc)) != 0) {
34dc7c2f
BB
3818 /*
3819 * Some block drivers (like pcata) may not support EFI
3820 * GPT labels. Print out a helpful error message dir-
3821 * ecting the user to manually label the disk and give
3822 * a specific slice.
3823 */
3824 (void) close(fd);
3825 efi_free(vtoc);
3826
d603ed6c
BB
3827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3828 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
3829 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3830 }
3831
3832 (void) close(fd);
3833 efi_free(vtoc);
34dc7c2f 3834
d603ed6c
BB
3835 /* Wait for the first expected slice to appear. */
3836 (void) snprintf(path, sizeof (path), "%s/%s%s%s", DISK_ROOT, name,
3837 isdigit(name[strlen(name)-1]) ? "p" : "", FIRST_SLICE);
3838 rval = zpool_label_disk_wait(path, 3000);
3839 if (rval) {
3840 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3841 "detect device partitions on '%s': %d"), path, rval);
3842 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
3843 }
3844
d603ed6c
BB
3845 /* We can't be to paranoid. Read the label back and verify it. */
3846 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3847 rval = zpool_label_disk_check(path);
3848 if (rval) {
3849 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3850 "EFI label on '%s' is damaged. Ensure\nthis device "
3851 "is not in in use, and is functioning properly: %d"),
3852 path, rval);
3853 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 3854 }
34dc7c2f 3855
d603ed6c 3856 return 0;
34dc7c2f 3857}