]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libzfs/libzfs_pool.c
Disable page allocation warnings for super block
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3541dc6d 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
1bd201e7 25 * Copyright (c) 2012 by Delphix. All rights reserved.
34dc7c2f
BB
26 */
27
34dc7c2f
BB
28#include <ctype.h>
29#include <errno.h>
30#include <devid.h>
34dc7c2f
BB
31#include <fcntl.h>
32#include <libintl.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <strings.h>
36#include <unistd.h>
d603ed6c
BB
37#include <zone.h>
38#include <sys/stat.h>
34dc7c2f
BB
39#include <sys/efi_partition.h>
40#include <sys/vtoc.h>
41#include <sys/zfs_ioctl.h>
9babb374 42#include <dlfcn.h>
34dc7c2f
BB
43
44#include "zfs_namecheck.h"
45#include "zfs_prop.h"
46#include "libzfs_impl.h"
428870ff 47#include "zfs_comutil.h"
34dc7c2f 48
b128c09f
BB
49static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
572e2857
BB
51typedef struct prop_flags {
52 int create:1; /* Validate property on creation */
53 int import:1; /* Validate property on import */
54} prop_flags_t;
55
34dc7c2f
BB
56/*
57 * ====================================================================
58 * zpool property functions
59 * ====================================================================
60 */
61
62static int
63zpool_get_all_props(zpool_handle_t *zhp)
64{
2598c001 65 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
66 libzfs_handle_t *hdl = zhp->zpool_hdl;
67
68 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
69
70 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
71 return (-1);
72
73 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74 if (errno == ENOMEM) {
75 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76 zcmd_free_nvlists(&zc);
77 return (-1);
78 }
79 } else {
80 zcmd_free_nvlists(&zc);
81 return (-1);
82 }
83 }
84
85 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89
90 zcmd_free_nvlists(&zc);
91
92 return (0);
93}
94
95static int
96zpool_props_refresh(zpool_handle_t *zhp)
97{
98 nvlist_t *old_props;
99
100 old_props = zhp->zpool_props;
101
102 if (zpool_get_all_props(zhp) != 0)
103 return (-1);
104
105 nvlist_free(old_props);
106 return (0);
107}
108
109static char *
110zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
111 zprop_source_t *src)
112{
113 nvlist_t *nv, *nvl;
114 uint64_t ival;
115 char *value;
116 zprop_source_t source;
117
118 nvl = zhp->zpool_props;
119 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
121 source = ival;
122 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
123 } else {
124 source = ZPROP_SRC_DEFAULT;
125 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
126 value = "-";
127 }
128
129 if (src)
130 *src = source;
131
132 return (value);
133}
134
135uint64_t
136zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
137{
138 nvlist_t *nv, *nvl;
139 uint64_t value;
140 zprop_source_t source;
141
b128c09f
BB
142 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
143 /*
144 * zpool_get_all_props() has most likely failed because
145 * the pool is faulted, but if all we need is the top level
146 * vdev's guid then get it from the zhp config nvlist.
147 */
148 if ((prop == ZPOOL_PROP_GUID) &&
149 (nvlist_lookup_nvlist(zhp->zpool_config,
150 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
152 == 0)) {
153 return (value);
154 }
34dc7c2f 155 return (zpool_prop_default_numeric(prop));
b128c09f 156 }
34dc7c2f
BB
157
158 nvl = zhp->zpool_props;
159 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
161 source = value;
162 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
163 } else {
164 source = ZPROP_SRC_DEFAULT;
165 value = zpool_prop_default_numeric(prop);
166 }
167
168 if (src)
169 *src = source;
170
171 return (value);
172}
173
174/*
175 * Map VDEV STATE to printed strings.
176 */
177char *
178zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
179{
180 switch (state) {
e75c13c3
BB
181 default:
182 break;
34dc7c2f
BB
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
b128c09f 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 190 return (gettext("FAULTED"));
428870ff
BB
191 else if (aux == VDEV_AUX_SPLIT_POOL)
192 return (gettext("SPLIT"));
34dc7c2f
BB
193 else
194 return (gettext("UNAVAIL"));
195 case VDEV_STATE_FAULTED:
196 return (gettext("FAULTED"));
197 case VDEV_STATE_DEGRADED:
198 return (gettext("DEGRADED"));
199 case VDEV_STATE_HEALTHY:
200 return (gettext("ONLINE"));
201 }
202
203 return (gettext("UNKNOWN"));
204}
205
206/*
207 * Get a zpool property value for 'prop' and return the value in
208 * a pre-allocated buffer.
209 */
210int
211zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212 zprop_source_t *srctype)
213{
214 uint64_t intval;
215 const char *strval;
216 zprop_source_t src = ZPROP_SRC_NONE;
217 nvlist_t *nvroot;
218 vdev_stat_t *vs;
219 uint_t vsc;
220
221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
222 switch (prop) {
223 case ZPOOL_PROP_NAME:
34dc7c2f 224 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
225 break;
226
227 case ZPOOL_PROP_HEALTH:
34dc7c2f 228 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
229 break;
230
231 case ZPOOL_PROP_GUID:
232 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 233 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
234 break;
235
236 case ZPOOL_PROP_ALTROOT:
237 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 238 case ZPOOL_PROP_COMMENT:
d164b209
BB
239 if (zhp->zpool_props != NULL ||
240 zpool_get_all_props(zhp) == 0) {
241 (void) strlcpy(buf,
242 zpool_get_prop_string(zhp, prop, &src),
243 len);
244 if (srctype != NULL)
245 *srctype = src;
246 return (0);
247 }
248 /* FALLTHROUGH */
249 default:
34dc7c2f 250 (void) strlcpy(buf, "-", len);
d164b209
BB
251 break;
252 }
253
254 if (srctype != NULL)
255 *srctype = src;
34dc7c2f
BB
256 return (0);
257 }
258
259 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
260 prop != ZPOOL_PROP_NAME)
261 return (-1);
262
263 switch (zpool_prop_get_type(prop)) {
264 case PROP_TYPE_STRING:
265 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
266 len);
267 break;
268
269 case PROP_TYPE_NUMBER:
270 intval = zpool_get_prop_int(zhp, prop, &src);
271
272 switch (prop) {
273 case ZPOOL_PROP_SIZE:
428870ff
BB
274 case ZPOOL_PROP_ALLOCATED:
275 case ZPOOL_PROP_FREE:
1bd201e7 276 case ZPOOL_PROP_EXPANDSZ:
df30f566 277 case ZPOOL_PROP_ASHIFT:
34dc7c2f
BB
278 (void) zfs_nicenum(intval, buf, len);
279 break;
280
281 case ZPOOL_PROP_CAPACITY:
282 (void) snprintf(buf, len, "%llu%%",
283 (u_longlong_t)intval);
284 break;
285
428870ff
BB
286 case ZPOOL_PROP_DEDUPRATIO:
287 (void) snprintf(buf, len, "%llu.%02llux",
288 (u_longlong_t)(intval / 100),
289 (u_longlong_t)(intval % 100));
290 break;
291
34dc7c2f
BB
292 case ZPOOL_PROP_HEALTH:
293 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
294 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
295 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
296 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
297 == 0);
34dc7c2f
BB
298
299 (void) strlcpy(buf, zpool_state_to_name(intval,
300 vs->vs_aux), len);
301 break;
302 default:
b8864a23 303 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
304 }
305 break;
306
307 case PROP_TYPE_INDEX:
308 intval = zpool_get_prop_int(zhp, prop, &src);
309 if (zpool_prop_index_to_string(prop, intval, &strval)
310 != 0)
311 return (-1);
312 (void) strlcpy(buf, strval, len);
313 break;
314
315 default:
316 abort();
317 }
318
319 if (srctype)
320 *srctype = src;
321
322 return (0);
323}
324
325/*
326 * Check if the bootfs name has the same pool name as it is set to.
327 * Assuming bootfs is a valid dataset name.
328 */
329static boolean_t
330bootfs_name_valid(const char *pool, char *bootfs)
331{
332 int len = strlen(pool);
333
b128c09f 334 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
335 return (B_FALSE);
336
337 if (strncmp(pool, bootfs, len) == 0 &&
338 (bootfs[len] == '/' || bootfs[len] == '\0'))
339 return (B_TRUE);
340
341 return (B_FALSE);
342}
343
b128c09f
BB
344/*
345 * Inspect the configuration to determine if any of the devices contain
346 * an EFI label.
347 */
348static boolean_t
349pool_uses_efi(nvlist_t *config)
350{
351 nvlist_t **child;
352 uint_t c, children;
353
354 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
355 &child, &children) != 0)
356 return (read_efi_label(config, NULL) >= 0);
357
358 for (c = 0; c < children; c++) {
359 if (pool_uses_efi(child[c]))
360 return (B_TRUE);
361 }
362 return (B_FALSE);
363}
364
1bd201e7
CS
365boolean_t
366zpool_is_bootable(zpool_handle_t *zhp)
b128c09f
BB
367{
368 char bootfs[ZPOOL_MAXNAMELEN];
369
370 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
371 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
372 sizeof (bootfs)) != 0);
373}
374
375
34dc7c2f
BB
376/*
377 * Given an nvlist of zpool properties to be set, validate that they are
378 * correct, and parse any numeric properties (index, boolean, etc) if they are
379 * specified as strings.
380 */
381static nvlist_t *
b128c09f 382zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 383 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
384{
385 nvpair_t *elem;
386 nvlist_t *retprops;
387 zpool_prop_t prop;
388 char *strval;
389 uint64_t intval;
d96eb2b1 390 char *slash, *check;
34dc7c2f 391 struct stat64 statbuf;
b128c09f
BB
392 zpool_handle_t *zhp;
393 nvlist_t *nvroot;
34dc7c2f
BB
394
395 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
396 (void) no_memory(hdl);
397 return (NULL);
398 }
399
400 elem = NULL;
401 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
402 const char *propname = nvpair_name(elem);
403
404 /*
405 * Make sure this property is valid and applies to this type.
406 */
407 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
409 "invalid property '%s'"), propname);
410 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
411 goto error;
412 }
413
414 if (zpool_prop_readonly(prop)) {
415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
416 "is readonly"), propname);
417 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
418 goto error;
419 }
420
421 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
422 &strval, &intval, errbuf) != 0)
423 goto error;
424
425 /*
426 * Perform additional checking for specific properties.
427 */
428 switch (prop) {
e75c13c3
BB
429 default:
430 break;
34dc7c2f
BB
431 case ZPOOL_PROP_VERSION:
432 if (intval < version || intval > SPA_VERSION) {
433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
434 "property '%s' number %d is invalid."),
435 propname, intval);
436 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
437 goto error;
438 }
439 break;
440
df30f566
CK
441 case ZPOOL_PROP_ASHIFT:
442 if (!flags.create) {
443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
444 "property '%s' can only be set at "
445 "creation time"), propname);
446 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
447 goto error;
448 }
449
b41c9906 450 if (intval != 0 && (intval < 9 || intval > 13)) {
df30f566
CK
451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
452 "property '%s' number %d is invalid."),
453 propname, intval);
454 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
455 goto error;
456 }
457 break;
458
34dc7c2f 459 case ZPOOL_PROP_BOOTFS:
572e2857 460 if (flags.create || flags.import) {
34dc7c2f
BB
461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
462 "property '%s' cannot be set at creation "
463 "or import time"), propname);
464 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
465 goto error;
466 }
467
468 if (version < SPA_VERSION_BOOTFS) {
469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
470 "pool must be upgraded to support "
471 "'%s' property"), propname);
472 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
473 goto error;
474 }
475
476 /*
477 * bootfs property value has to be a dataset name and
478 * the dataset has to be in the same pool as it sets to.
479 */
480 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
481 strval)) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
483 "is an invalid name"), strval);
484 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
485 goto error;
486 }
b128c09f
BB
487
488 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "could not open pool '%s'"), poolname);
491 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
492 goto error;
493 }
494 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
495 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
496
f783130a 497#if defined(__sun__) || defined(__sun)
b128c09f
BB
498 /*
499 * bootfs property cannot be set on a disk which has
500 * been EFI labeled.
501 */
502 if (pool_uses_efi(nvroot)) {
503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
504 "property '%s' not supported on "
505 "EFI labeled devices"), propname);
506 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
507 zpool_close(zhp);
508 goto error;
509 }
f783130a 510#endif
b128c09f 511 zpool_close(zhp);
34dc7c2f
BB
512 break;
513
514 case ZPOOL_PROP_ALTROOT:
572e2857 515 if (!flags.create && !flags.import) {
34dc7c2f
BB
516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
517 "property '%s' can only be set during pool "
518 "creation or import"), propname);
519 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
520 goto error;
521 }
522
523 if (strval[0] != '/') {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "bad alternate root '%s'"), strval);
526 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
527 goto error;
528 }
529 break;
530
531 case ZPOOL_PROP_CACHEFILE:
532 if (strval[0] == '\0')
533 break;
534
535 if (strcmp(strval, "none") == 0)
536 break;
537
538 if (strval[0] != '/') {
539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
540 "property '%s' must be empty, an "
541 "absolute path, or 'none'"), propname);
542 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
543 goto error;
544 }
545
546 slash = strrchr(strval, '/');
547
548 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
549 strcmp(slash, "/..") == 0) {
550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551 "'%s' is not a valid file"), strval);
552 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
553 goto error;
554 }
555
556 *slash = '\0';
557
558 if (strval[0] != '\0' &&
559 (stat64(strval, &statbuf) != 0 ||
560 !S_ISDIR(statbuf.st_mode))) {
561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
562 "'%s' is not a valid directory"),
563 strval);
564 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
565 goto error;
566 }
567
568 *slash = '/';
569 break;
572e2857 570
d96eb2b1
DM
571 case ZPOOL_PROP_COMMENT:
572 for (check = strval; *check != '\0'; check++) {
573 if (!isprint(*check)) {
574 zfs_error_aux(hdl,
575 dgettext(TEXT_DOMAIN,
576 "comment may only have printable "
577 "characters"));
578 (void) zfs_error(hdl, EZFS_BADPROP,
579 errbuf);
580 goto error;
581 }
582 }
583 if (strlen(strval) > ZPROP_MAX_COMMENT) {
584 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
585 "comment must not exceed %d characters"),
586 ZPROP_MAX_COMMENT);
587 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
588 goto error;
589 }
590 break;
572e2857
BB
591 case ZPOOL_PROP_READONLY:
592 if (!flags.import) {
593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
594 "property '%s' can only be set at "
595 "import time"), propname);
596 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
597 goto error;
598 }
599 break;
34dc7c2f
BB
600 }
601 }
602
603 return (retprops);
604error:
605 nvlist_free(retprops);
606 return (NULL);
607}
608
609/*
610 * Set zpool property : propname=propval.
611 */
612int
613zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
614{
2598c001 615 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
616 int ret = -1;
617 char errbuf[1024];
618 nvlist_t *nvl = NULL;
619 nvlist_t *realprops;
620 uint64_t version;
572e2857 621 prop_flags_t flags = { 0 };
34dc7c2f
BB
622
623 (void) snprintf(errbuf, sizeof (errbuf),
624 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
625 zhp->zpool_name);
626
34dc7c2f
BB
627 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
628 return (no_memory(zhp->zpool_hdl));
629
630 if (nvlist_add_string(nvl, propname, propval) != 0) {
631 nvlist_free(nvl);
632 return (no_memory(zhp->zpool_hdl));
633 }
634
635 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 636 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 637 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
638 nvlist_free(nvl);
639 return (-1);
640 }
641
642 nvlist_free(nvl);
643 nvl = realprops;
644
645 /*
646 * Execute the corresponding ioctl() to set this property.
647 */
648 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
649
650 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
651 nvlist_free(nvl);
652 return (-1);
653 }
654
655 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
656
657 zcmd_free_nvlists(&zc);
658 nvlist_free(nvl);
659
660 if (ret)
661 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
662 else
663 (void) zpool_props_refresh(zhp);
664
665 return (ret);
666}
667
668int
669zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
670{
671 libzfs_handle_t *hdl = zhp->zpool_hdl;
672 zprop_list_t *entry;
673 char buf[ZFS_MAXPROPLEN];
674
675 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
676 return (-1);
677
678 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
679
680 if (entry->pl_fixed)
681 continue;
682
683 if (entry->pl_prop != ZPROP_INVAL &&
684 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
685 NULL) == 0) {
686 if (strlen(buf) > entry->pl_width)
687 entry->pl_width = strlen(buf);
688 }
689 }
690
691 return (0);
692}
693
694
9babb374
BB
695/*
696 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
697 * devices will use a stripe width of 128k, other vendors prefer a 1m
698 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
699 * given 512B blocks. When the block size is larger by a power of 2
700 * we will still be 1m aligned. Some devices are sensitive to the
701 * partition ending alignment as well.
9babb374 702 */
613d88ed
NB
703#define NEW_START_BLOCK 2048
704#define PARTITION_END_ALIGNMENT 2048
9babb374 705
34dc7c2f
BB
706/*
707 * Validate the given pool name, optionally putting an extended error message in
708 * 'buf'.
709 */
710boolean_t
711zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
712{
713 namecheck_err_t why;
714 char what;
715 int ret;
716
717 ret = pool_namecheck(pool, &why, &what);
718
719 /*
720 * The rules for reserved pool names were extended at a later point.
721 * But we need to support users with existing pools that may now be
722 * invalid. So we only check for this expanded set of names during a
723 * create (or import), and only in userland.
724 */
725 if (ret == 0 && !isopen &&
726 (strncmp(pool, "mirror", 6) == 0 ||
727 strncmp(pool, "raidz", 5) == 0 ||
728 strncmp(pool, "spare", 5) == 0 ||
729 strcmp(pool, "log") == 0)) {
730 if (hdl != NULL)
731 zfs_error_aux(hdl,
732 dgettext(TEXT_DOMAIN, "name is reserved"));
733 return (B_FALSE);
734 }
735
736
737 if (ret != 0) {
738 if (hdl != NULL) {
739 switch (why) {
740 case NAME_ERR_TOOLONG:
741 zfs_error_aux(hdl,
742 dgettext(TEXT_DOMAIN, "name is too long"));
743 break;
744
745 case NAME_ERR_INVALCHAR:
746 zfs_error_aux(hdl,
747 dgettext(TEXT_DOMAIN, "invalid character "
748 "'%c' in pool name"), what);
749 break;
750
751 case NAME_ERR_NOLETTER:
752 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
753 "name must begin with a letter"));
754 break;
755
756 case NAME_ERR_RESERVED:
757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
758 "name is reserved"));
759 break;
760
761 case NAME_ERR_DISKLIKE:
762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
763 "pool name is reserved"));
764 break;
765
766 case NAME_ERR_LEADING_SLASH:
767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
768 "leading slash in name"));
769 break;
770
771 case NAME_ERR_EMPTY_COMPONENT:
772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
773 "empty component in name"));
774 break;
775
776 case NAME_ERR_TRAILING_SLASH:
777 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
778 "trailing slash in name"));
779 break;
780
781 case NAME_ERR_MULTIPLE_AT:
782 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
783 "multiple '@' delimiters in name"));
784 break;
e75c13c3
BB
785 case NAME_ERR_NO_AT:
786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
787 "permission set is missing '@'"));
788 break;
34dc7c2f
BB
789 }
790 }
791 return (B_FALSE);
792 }
793
794 return (B_TRUE);
795}
796
797/*
798 * Open a handle to the given pool, even if the pool is currently in the FAULTED
799 * state.
800 */
801zpool_handle_t *
802zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
803{
804 zpool_handle_t *zhp;
805 boolean_t missing;
806
807 /*
808 * Make sure the pool name is valid.
809 */
810 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
811 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
812 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
813 pool);
814 return (NULL);
815 }
816
817 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
818 return (NULL);
819
820 zhp->zpool_hdl = hdl;
821 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
822
823 if (zpool_refresh_stats(zhp, &missing) != 0) {
824 zpool_close(zhp);
825 return (NULL);
826 }
827
828 if (missing) {
829 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
830 (void) zfs_error_fmt(hdl, EZFS_NOENT,
831 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
832 zpool_close(zhp);
833 return (NULL);
834 }
835
836 return (zhp);
837}
838
839/*
840 * Like the above, but silent on error. Used when iterating over pools (because
841 * the configuration cache may be out of date).
842 */
843int
844zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
845{
846 zpool_handle_t *zhp;
847 boolean_t missing;
848
849 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
850 return (-1);
851
852 zhp->zpool_hdl = hdl;
853 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
854
855 if (zpool_refresh_stats(zhp, &missing) != 0) {
856 zpool_close(zhp);
857 return (-1);
858 }
859
860 if (missing) {
861 zpool_close(zhp);
862 *ret = NULL;
863 return (0);
864 }
865
866 *ret = zhp;
867 return (0);
868}
869
870/*
871 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
872 * state.
873 */
874zpool_handle_t *
875zpool_open(libzfs_handle_t *hdl, const char *pool)
876{
877 zpool_handle_t *zhp;
878
879 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
880 return (NULL);
881
882 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
883 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
884 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
885 zpool_close(zhp);
886 return (NULL);
887 }
888
889 return (zhp);
890}
891
892/*
893 * Close the handle. Simply frees the memory associated with the handle.
894 */
895void
896zpool_close(zpool_handle_t *zhp)
897{
898 if (zhp->zpool_config)
899 nvlist_free(zhp->zpool_config);
900 if (zhp->zpool_old_config)
901 nvlist_free(zhp->zpool_old_config);
902 if (zhp->zpool_props)
903 nvlist_free(zhp->zpool_props);
904 free(zhp);
905}
906
907/*
908 * Return the name of the pool.
909 */
910const char *
911zpool_get_name(zpool_handle_t *zhp)
912{
913 return (zhp->zpool_name);
914}
915
916
917/*
918 * Return the state of the pool (ACTIVE or UNAVAILABLE)
919 */
920int
921zpool_get_state(zpool_handle_t *zhp)
922{
923 return (zhp->zpool_state);
924}
925
926/*
927 * Create the named pool, using the provided vdev list. It is assumed
928 * that the consumer has already validated the contents of the nvlist, so we
929 * don't have to worry about error semantics.
930 */
931int
932zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 933 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 934{
2598c001 935 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f
BB
936 nvlist_t *zc_fsprops = NULL;
937 nvlist_t *zc_props = NULL;
34dc7c2f
BB
938 char msg[1024];
939 char *altroot;
b128c09f 940 int ret = -1;
34dc7c2f
BB
941
942 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
943 "cannot create '%s'"), pool);
944
945 if (!zpool_name_valid(hdl, B_FALSE, pool))
946 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
947
948 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
949 return (-1);
950
b128c09f 951 if (props) {
572e2857
BB
952 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
953
b128c09f 954 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 955 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
956 goto create_failed;
957 }
958 }
34dc7c2f 959
b128c09f
BB
960 if (fsprops) {
961 uint64_t zoned;
962 char *zonestr;
963
964 zoned = ((nvlist_lookup_string(fsprops,
965 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
966 strcmp(zonestr, "on") == 0);
967
968 if ((zc_fsprops = zfs_valid_proplist(hdl,
969 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
970 goto create_failed;
971 }
972 if (!zc_props &&
973 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
974 goto create_failed;
975 }
976 if (nvlist_add_nvlist(zc_props,
977 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
978 goto create_failed;
979 }
34dc7c2f
BB
980 }
981
b128c09f
BB
982 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
983 goto create_failed;
984
34dc7c2f
BB
985 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
986
b128c09f 987 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
988
989 zcmd_free_nvlists(&zc);
b128c09f
BB
990 nvlist_free(zc_props);
991 nvlist_free(zc_fsprops);
34dc7c2f
BB
992
993 switch (errno) {
994 case EBUSY:
995 /*
996 * This can happen if the user has specified the same
997 * device multiple times. We can't reliably detect this
998 * until we try to add it and see we already have a
d603ed6c
BB
999 * label. This can also happen under if the device is
1000 * part of an active md or lvm device.
34dc7c2f
BB
1001 */
1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d603ed6c
BB
1003 "one or more vdevs refer to the same device, or one of\n"
1004 "the devices is part of an active md or lvm device"));
34dc7c2f
BB
1005 return (zfs_error(hdl, EZFS_BADDEV, msg));
1006
1007 case EOVERFLOW:
1008 /*
1009 * This occurs when one of the devices is below
1010 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1011 * device was the problem device since there's no
1012 * reliable way to determine device size from userland.
1013 */
1014 {
1015 char buf[64];
1016
1017 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1018
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1020 "one or more devices is less than the "
1021 "minimum size (%s)"), buf);
1022 }
1023 return (zfs_error(hdl, EZFS_BADDEV, msg));
1024
1025 case ENOSPC:
1026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1027 "one or more devices is out of space"));
1028 return (zfs_error(hdl, EZFS_BADDEV, msg));
1029
1030 case ENOTBLK:
1031 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1032 "cache device must be a disk or disk slice"));
1033 return (zfs_error(hdl, EZFS_BADDEV, msg));
1034
1035 default:
1036 return (zpool_standard_error(hdl, errno, msg));
1037 }
1038 }
1039
1040 /*
1041 * If this is an alternate root pool, then we automatically set the
1042 * mountpoint of the root dataset to be '/'.
1043 */
1044 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1045 &altroot) == 0) {
1046 zfs_handle_t *zhp;
1047
1048 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1049 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1050 "/") == 0);
1051
1052 zfs_close(zhp);
1053 }
1054
b128c09f 1055create_failed:
34dc7c2f 1056 zcmd_free_nvlists(&zc);
b128c09f
BB
1057 nvlist_free(zc_props);
1058 nvlist_free(zc_fsprops);
1059 return (ret);
34dc7c2f
BB
1060}
1061
1062/*
1063 * Destroy the given pool. It is up to the caller to ensure that there are no
1064 * datasets left in the pool.
1065 */
1066int
1067zpool_destroy(zpool_handle_t *zhp)
1068{
2598c001 1069 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1070 zfs_handle_t *zfp = NULL;
1071 libzfs_handle_t *hdl = zhp->zpool_hdl;
1072 char msg[1024];
1073
1074 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1075 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1076 return (-1);
1077
34dc7c2f
BB
1078 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1079
572e2857 1080 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1081 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1082 "cannot destroy '%s'"), zhp->zpool_name);
1083
1084 if (errno == EROFS) {
1085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1086 "one or more devices is read only"));
1087 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1088 } else {
1089 (void) zpool_standard_error(hdl, errno, msg);
1090 }
1091
1092 if (zfp)
1093 zfs_close(zfp);
1094 return (-1);
1095 }
1096
1097 if (zfp) {
1098 remove_mountpoint(zfp);
1099 zfs_close(zfp);
1100 }
1101
1102 return (0);
1103}
1104
1105/*
1106 * Add the given vdevs to the pool. The caller must have already performed the
1107 * necessary verification to ensure that the vdev specification is well-formed.
1108 */
1109int
1110zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1111{
2598c001 1112 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1113 int ret;
1114 libzfs_handle_t *hdl = zhp->zpool_hdl;
1115 char msg[1024];
1116 nvlist_t **spares, **l2cache;
1117 uint_t nspares, nl2cache;
1118
1119 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1120 "cannot add to '%s'"), zhp->zpool_name);
1121
1122 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1123 SPA_VERSION_SPARES &&
1124 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1125 &spares, &nspares) == 0) {
1126 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1127 "upgraded to add hot spares"));
1128 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1129 }
1130
1bd201e7 1131 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
b128c09f
BB
1132 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1133 uint64_t s;
1134
1135 for (s = 0; s < nspares; s++) {
1136 char *path;
1137
1138 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1139 &path) == 0 && pool_uses_efi(spares[s])) {
1140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1141 "device '%s' contains an EFI label and "
1142 "cannot be used on root pools."),
428870ff
BB
1143 zpool_vdev_name(hdl, NULL, spares[s],
1144 B_FALSE));
b128c09f
BB
1145 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1146 }
1147 }
1148 }
1149
34dc7c2f
BB
1150 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1151 SPA_VERSION_L2CACHE &&
1152 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1153 &l2cache, &nl2cache) == 0) {
1154 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1155 "upgraded to add cache devices"));
1156 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1157 }
1158
1159 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1160 return (-1);
1161 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1162
572e2857 1163 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1164 switch (errno) {
1165 case EBUSY:
1166 /*
1167 * This can happen if the user has specified the same
1168 * device multiple times. We can't reliably detect this
1169 * until we try to add it and see we already have a
1170 * label.
1171 */
1172 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1173 "one or more vdevs refer to the same device"));
1174 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1175 break;
1176
1177 case EOVERFLOW:
1178 /*
1179 * This occurrs when one of the devices is below
1180 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1181 * device was the problem device since there's no
1182 * reliable way to determine device size from userland.
1183 */
1184 {
1185 char buf[64];
1186
1187 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1188
1189 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1190 "device is less than the minimum "
1191 "size (%s)"), buf);
1192 }
1193 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1194 break;
1195
1196 case ENOTSUP:
1197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1198 "pool must be upgraded to add these vdevs"));
1199 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1200 break;
1201
1202 case EDOM:
1203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1204 "root pool can not have multiple vdevs"
1205 " or separate logs"));
1206 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1207 break;
1208
1209 case ENOTBLK:
1210 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1211 "cache device must be a disk or disk slice"));
1212 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1213 break;
1214
1215 default:
1216 (void) zpool_standard_error(hdl, errno, msg);
1217 }
1218
1219 ret = -1;
1220 } else {
1221 ret = 0;
1222 }
1223
1224 zcmd_free_nvlists(&zc);
1225
1226 return (ret);
1227}
1228
1229/*
1230 * Exports the pool from the system. The caller must ensure that there are no
1231 * mounted datasets in the pool.
1232 */
1233int
fb5f0bc8 1234zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
34dc7c2f 1235{
2598c001 1236 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f 1237 char msg[1024];
34dc7c2f 1238
b128c09f
BB
1239 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1240 "cannot export '%s'"), zhp->zpool_name);
1241
34dc7c2f 1242 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1243 zc.zc_cookie = force;
fb5f0bc8 1244 zc.zc_guid = hardforce;
b128c09f
BB
1245
1246 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1247 switch (errno) {
1248 case EXDEV:
1249 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1250 "use '-f' to override the following errors:\n"
1251 "'%s' has an active shared spare which could be"
1252 " used by other pools once '%s' is exported."),
1253 zhp->zpool_name, zhp->zpool_name);
1254 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1255 msg));
1256 default:
1257 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1258 msg));
1259 }
1260 }
34dc7c2f 1261
34dc7c2f
BB
1262 return (0);
1263}
1264
fb5f0bc8
BB
1265int
1266zpool_export(zpool_handle_t *zhp, boolean_t force)
1267{
1268 return (zpool_export_common(zhp, force, B_FALSE));
1269}
1270
1271int
1272zpool_export_force(zpool_handle_t *zhp)
1273{
1274 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1275}
1276
428870ff
BB
1277static void
1278zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1279 nvlist_t *config)
428870ff 1280{
572e2857 1281 nvlist_t *nv = NULL;
428870ff
BB
1282 uint64_t rewindto;
1283 int64_t loss = -1;
1284 struct tm t;
1285 char timestr[128];
1286
572e2857
BB
1287 if (!hdl->libzfs_printerr || config == NULL)
1288 return;
1289
1290 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
428870ff
BB
1291 return;
1292
572e2857 1293 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1294 return;
572e2857 1295 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1296
1297 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1298 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1299 if (dryrun) {
1300 (void) printf(dgettext(TEXT_DOMAIN,
1301 "Would be able to return %s "
1302 "to its state as of %s.\n"),
1303 name, timestr);
1304 } else {
1305 (void) printf(dgettext(TEXT_DOMAIN,
1306 "Pool %s returned to its state as of %s.\n"),
1307 name, timestr);
1308 }
1309 if (loss > 120) {
1310 (void) printf(dgettext(TEXT_DOMAIN,
1311 "%s approximately %lld "),
1312 dryrun ? "Would discard" : "Discarded",
b8864a23 1313 ((longlong_t)loss + 30) / 60);
428870ff
BB
1314 (void) printf(dgettext(TEXT_DOMAIN,
1315 "minutes of transactions.\n"));
1316 } else if (loss > 0) {
1317 (void) printf(dgettext(TEXT_DOMAIN,
1318 "%s approximately %lld "),
b8864a23
BB
1319 dryrun ? "Would discard" : "Discarded",
1320 (longlong_t)loss);
428870ff
BB
1321 (void) printf(dgettext(TEXT_DOMAIN,
1322 "seconds of transactions.\n"));
1323 }
1324 }
1325}
1326
1327void
1328zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1329 nvlist_t *config)
1330{
572e2857 1331 nvlist_t *nv = NULL;
428870ff
BB
1332 int64_t loss = -1;
1333 uint64_t edata = UINT64_MAX;
1334 uint64_t rewindto;
1335 struct tm t;
1336 char timestr[128];
1337
1338 if (!hdl->libzfs_printerr)
1339 return;
1340
1341 if (reason >= 0)
1342 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1343 else
1344 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1345
1346 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857
BB
1347 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1348 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1349 goto no_info;
1350
572e2857
BB
1351 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1352 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1353 &edata);
1354
1355 (void) printf(dgettext(TEXT_DOMAIN,
1356 "Recovery is possible, but will result in some data loss.\n"));
1357
1358 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1359 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1360 (void) printf(dgettext(TEXT_DOMAIN,
1361 "\tReturning the pool to its state as of %s\n"
1362 "\tshould correct the problem. "),
1363 timestr);
1364 } else {
1365 (void) printf(dgettext(TEXT_DOMAIN,
1366 "\tReverting the pool to an earlier state "
1367 "should correct the problem.\n\t"));
1368 }
1369
1370 if (loss > 120) {
1371 (void) printf(dgettext(TEXT_DOMAIN,
1372 "Approximately %lld minutes of data\n"
b8864a23
BB
1373 "\tmust be discarded, irreversibly. "),
1374 ((longlong_t)loss + 30) / 60);
428870ff
BB
1375 } else if (loss > 0) {
1376 (void) printf(dgettext(TEXT_DOMAIN,
1377 "Approximately %lld seconds of data\n"
b8864a23
BB
1378 "\tmust be discarded, irreversibly. "),
1379 (longlong_t)loss);
428870ff
BB
1380 }
1381 if (edata != 0 && edata != UINT64_MAX) {
1382 if (edata == 1) {
1383 (void) printf(dgettext(TEXT_DOMAIN,
1384 "After rewind, at least\n"
1385 "\tone persistent user-data error will remain. "));
1386 } else {
1387 (void) printf(dgettext(TEXT_DOMAIN,
1388 "After rewind, several\n"
1389 "\tpersistent user-data errors will remain. "));
1390 }
1391 }
1392 (void) printf(dgettext(TEXT_DOMAIN,
1393 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1394 reason >= 0 ? "clear" : "import", name);
1395
1396 (void) printf(dgettext(TEXT_DOMAIN,
1397 "A scrub of the pool\n"
1398 "\tis strongly recommended after recovery.\n"));
1399 return;
1400
1401no_info:
1402 (void) printf(dgettext(TEXT_DOMAIN,
1403 "Destroy and re-create the pool from\n\ta backup source.\n"));
1404}
1405
34dc7c2f
BB
1406/*
1407 * zpool_import() is a contracted interface. Should be kept the same
1408 * if possible.
1409 *
1410 * Applications should use zpool_import_props() to import a pool with
1411 * new properties value to be set.
1412 */
1413int
1414zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1415 char *altroot)
1416{
1417 nvlist_t *props = NULL;
1418 int ret;
1419
1420 if (altroot != NULL) {
1421 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1422 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1423 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1424 newname));
1425 }
1426
1427 if (nvlist_add_string(props,
fb5f0bc8
BB
1428 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1429 nvlist_add_string(props,
1430 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1431 nvlist_free(props);
1432 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1433 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1434 newname));
1435 }
1436 }
1437
572e2857
BB
1438 ret = zpool_import_props(hdl, config, newname, props,
1439 ZFS_IMPORT_NORMAL);
34dc7c2f
BB
1440 if (props)
1441 nvlist_free(props);
1442 return (ret);
1443}
1444
572e2857
BB
1445static void
1446print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1447 int indent)
1448{
1449 nvlist_t **child;
1450 uint_t c, children;
1451 char *vname;
1452 uint64_t is_log = 0;
1453
1454 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1455 &is_log);
1456
1457 if (name != NULL)
1458 (void) printf("\t%*s%s%s\n", indent, "", name,
1459 is_log ? " [log]" : "");
1460
1461 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1462 &child, &children) != 0)
1463 return;
1464
1465 for (c = 0; c < children; c++) {
1466 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1467 print_vdev_tree(hdl, vname, child[c], indent + 2);
1468 free(vname);
1469 }
1470}
1471
34dc7c2f
BB
1472/*
1473 * Import the given pool using the known configuration and a list of
1474 * properties to be set. The configuration should have come from
1475 * zpool_find_import(). The 'newname' parameters control whether the pool
1476 * is imported with a different name.
1477 */
1478int
1479zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1480 nvlist_t *props, int flags)
34dc7c2f 1481{
2598c001 1482 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff 1483 zpool_rewind_policy_t policy;
572e2857
BB
1484 nvlist_t *nv = NULL;
1485 nvlist_t *nvinfo = NULL;
1486 nvlist_t *missing = NULL;
34dc7c2f
BB
1487 char *thename;
1488 char *origname;
1489 int ret;
572e2857 1490 int error = 0;
34dc7c2f
BB
1491 char errbuf[1024];
1492
1493 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1494 &origname) == 0);
1495
1496 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1497 "cannot import pool '%s'"), origname);
1498
1499 if (newname != NULL) {
1500 if (!zpool_name_valid(hdl, B_FALSE, newname))
1501 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1502 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1503 newname));
1504 thename = (char *)newname;
1505 } else {
1506 thename = origname;
1507 }
1508
1509 if (props) {
1510 uint64_t version;
572e2857 1511 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1512
1513 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1514 &version) == 0);
1515
b128c09f 1516 if ((props = zpool_valid_proplist(hdl, origname,
572e2857 1517 props, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
1518 return (-1);
1519 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1520 nvlist_free(props);
1521 return (-1);
1522 }
1523 }
1524
1525 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1526
1527 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1528 &zc.zc_guid) == 0);
1529
1530 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1531 nvlist_free(props);
1532 return (-1);
1533 }
572e2857 1534 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
428870ff
BB
1535 nvlist_free(props);
1536 return (-1);
1537 }
34dc7c2f 1538
572e2857
BB
1539 zc.zc_cookie = flags;
1540 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1541 errno == ENOMEM) {
1542 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1543 zcmd_free_nvlists(&zc);
1544 return (-1);
1545 }
1546 }
1547 if (ret != 0)
1548 error = errno;
1549
1550 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1551 zpool_get_rewind_policy(config, &policy);
1552
1553 if (error) {
34dc7c2f 1554 char desc[1024];
428870ff 1555
428870ff
BB
1556 /*
1557 * Dry-run failed, but we print out what success
1558 * looks like if we found a best txg
1559 */
572e2857 1560 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1561 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1562 B_TRUE, nv);
1563 nvlist_free(nv);
428870ff
BB
1564 return (-1);
1565 }
1566
34dc7c2f
BB
1567 if (newname == NULL)
1568 (void) snprintf(desc, sizeof (desc),
1569 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1570 thename);
1571 else
1572 (void) snprintf(desc, sizeof (desc),
1573 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1574 origname, thename);
1575
572e2857 1576 switch (error) {
34dc7c2f
BB
1577 case ENOTSUP:
1578 /*
1579 * Unsupported version.
1580 */
1581 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1582 break;
1583
1584 case EINVAL:
1585 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1586 break;
1587
428870ff
BB
1588 case EROFS:
1589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1590 "one or more devices is read only"));
1591 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1592 break;
1593
572e2857
BB
1594 case ENXIO:
1595 if (nv && nvlist_lookup_nvlist(nv,
1596 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1597 nvlist_lookup_nvlist(nvinfo,
1598 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1599 (void) printf(dgettext(TEXT_DOMAIN,
1600 "The devices below are missing, use "
1601 "'-m' to import the pool anyway:\n"));
1602 print_vdev_tree(hdl, NULL, missing, 2);
1603 (void) printf("\n");
1604 }
1605 (void) zpool_standard_error(hdl, error, desc);
1606 break;
1607
1608 case EEXIST:
1609 (void) zpool_standard_error(hdl, error, desc);
1610 break;
1611
abe5b8fb
BB
1612 case EBUSY:
1613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1614 "one or more devices are already in use\n"));
1615 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1616 break;
1617
34dc7c2f 1618 default:
572e2857 1619 (void) zpool_standard_error(hdl, error, desc);
428870ff 1620 zpool_explain_recover(hdl,
572e2857 1621 newname ? origname : thename, -error, nv);
428870ff 1622 break;
34dc7c2f
BB
1623 }
1624
572e2857 1625 nvlist_free(nv);
34dc7c2f
BB
1626 ret = -1;
1627 } else {
1628 zpool_handle_t *zhp;
1629
1630 /*
1631 * This should never fail, but play it safe anyway.
1632 */
428870ff 1633 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1634 ret = -1;
428870ff 1635 else if (zhp != NULL)
34dc7c2f 1636 zpool_close(zhp);
428870ff
BB
1637 if (policy.zrp_request &
1638 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1639 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1640 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1641 }
572e2857 1642 nvlist_free(nv);
428870ff 1643 return (0);
34dc7c2f
BB
1644 }
1645
1646 zcmd_free_nvlists(&zc);
1647 nvlist_free(props);
1648
1649 return (ret);
1650}
1651
1652/*
428870ff 1653 * Scan the pool.
34dc7c2f
BB
1654 */
1655int
428870ff 1656zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
34dc7c2f 1657{
2598c001 1658 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1659 char msg[1024];
1660 libzfs_handle_t *hdl = zhp->zpool_hdl;
1661
1662 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1663 zc.zc_cookie = func;
34dc7c2f 1664
572e2857 1665 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
428870ff 1666 (errno == ENOENT && func != POOL_SCAN_NONE))
34dc7c2f
BB
1667 return (0);
1668
428870ff
BB
1669 if (func == POOL_SCAN_SCRUB) {
1670 (void) snprintf(msg, sizeof (msg),
1671 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1672 } else if (func == POOL_SCAN_NONE) {
1673 (void) snprintf(msg, sizeof (msg),
1674 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1675 zc.zc_name);
1676 } else {
1677 assert(!"unexpected result");
1678 }
34dc7c2f 1679
428870ff
BB
1680 if (errno == EBUSY) {
1681 nvlist_t *nvroot;
1682 pool_scan_stat_t *ps = NULL;
1683 uint_t psc;
1684
1685 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1686 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1687 (void) nvlist_lookup_uint64_array(nvroot,
1688 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1689 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1690 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1691 else
1692 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1693 } else if (errno == ENOENT) {
1694 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1695 } else {
34dc7c2f 1696 return (zpool_standard_error(hdl, errno, msg));
428870ff
BB
1697 }
1698}
1699
34dc7c2f 1700/*
9babb374
BB
1701 * Find a vdev that matches the search criteria specified. We use the
1702 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
1703 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1704 * spare; but FALSE if its an INUSE spare.
1705 */
1706static nvlist_t *
9babb374
BB
1707vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1708 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1709{
1710 uint_t c, children;
1711 nvlist_t **child;
34dc7c2f 1712 nvlist_t *ret;
b128c09f 1713 uint64_t is_log;
9babb374
BB
1714 char *srchkey;
1715 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1716
1717 /* Nothing to look for */
1718 if (search == NULL || pair == NULL)
1719 return (NULL);
1720
1721 /* Obtain the key we will use to search */
1722 srchkey = nvpair_name(pair);
1723
1724 switch (nvpair_type(pair)) {
572e2857 1725 case DATA_TYPE_UINT64:
9babb374 1726 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
1727 uint64_t srchval, theguid;
1728
1729 verify(nvpair_value_uint64(pair, &srchval) == 0);
1730 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1731 &theguid) == 0);
1732 if (theguid == srchval)
1733 return (nv);
9babb374
BB
1734 }
1735 break;
9babb374
BB
1736
1737 case DATA_TYPE_STRING: {
1738 char *srchval, *val;
1739
1740 verify(nvpair_value_string(pair, &srchval) == 0);
1741 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1742 break;
34dc7c2f 1743
9babb374 1744 /*
428870ff
BB
1745 * Search for the requested value. Special cases:
1746 *
eac47204
BB
1747 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1748 * "-part1", or "p1". The suffix is hidden from the user,
1749 * but included in the string, so this matches around it.
1750 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1751 * is used to check all possible expanded paths.
428870ff
BB
1752 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1753 *
1754 * Otherwise, all other searches are simple string compares.
9babb374 1755 */
a2c6816c 1756 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
1757 uint64_t wholedisk = 0;
1758
1759 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1760 &wholedisk);
eac47204
BB
1761 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
1762 return (nv);
428870ff 1763
428870ff
BB
1764 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1765 char *type, *idx, *end, *p;
1766 uint64_t id, vdev_id;
1767
1768 /*
1769 * Determine our vdev type, keeping in mind
1770 * that the srchval is composed of a type and
1771 * vdev id pair (i.e. mirror-4).
1772 */
1773 if ((type = strdup(srchval)) == NULL)
1774 return (NULL);
1775
1776 if ((p = strrchr(type, '-')) == NULL) {
1777 free(type);
1778 break;
1779 }
1780 idx = p + 1;
1781 *p = '\0';
1782
1783 /*
1784 * If the types don't match then keep looking.
1785 */
1786 if (strncmp(val, type, strlen(val)) != 0) {
1787 free(type);
1788 break;
1789 }
1790
1791 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1792 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1793 strncmp(type, VDEV_TYPE_MIRROR,
1794 strlen(VDEV_TYPE_MIRROR)) == 0);
1795 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1796 &id) == 0);
1797
1798 errno = 0;
1799 vdev_id = strtoull(idx, &end, 10);
1800
1801 free(type);
1802 if (errno != 0)
1803 return (NULL);
1804
1805 /*
1806 * Now verify that we have the correct vdev id.
1807 */
1808 if (vdev_id == id)
1809 return (nv);
9babb374 1810 }
34dc7c2f 1811
34dc7c2f 1812 /*
9babb374 1813 * Common case
34dc7c2f 1814 */
9babb374 1815 if (strcmp(srchval, val) == 0)
34dc7c2f 1816 return (nv);
9babb374
BB
1817 break;
1818 }
1819
1820 default:
1821 break;
34dc7c2f
BB
1822 }
1823
1824 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1825 &child, &children) != 0)
1826 return (NULL);
1827
b128c09f 1828 for (c = 0; c < children; c++) {
9babb374 1829 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
1830 avail_spare, l2cache, NULL)) != NULL) {
1831 /*
1832 * The 'is_log' value is only set for the toplevel
1833 * vdev, not the leaf vdevs. So we always lookup the
1834 * log device from the root of the vdev tree (where
1835 * 'log' is non-NULL).
1836 */
1837 if (log != NULL &&
1838 nvlist_lookup_uint64(child[c],
1839 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1840 is_log) {
1841 *log = B_TRUE;
1842 }
34dc7c2f 1843 return (ret);
b128c09f
BB
1844 }
1845 }
34dc7c2f
BB
1846
1847 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1848 &child, &children) == 0) {
1849 for (c = 0; c < children; c++) {
9babb374 1850 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1851 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1852 *avail_spare = B_TRUE;
1853 return (ret);
1854 }
1855 }
1856 }
1857
1858 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1859 &child, &children) == 0) {
1860 for (c = 0; c < children; c++) {
9babb374 1861 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1862 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1863 *l2cache = B_TRUE;
1864 return (ret);
1865 }
1866 }
1867 }
1868
1869 return (NULL);
1870}
1871
9babb374
BB
1872/*
1873 * Given a physical path (minus the "/devices" prefix), find the
1874 * associated vdev.
1875 */
1876nvlist_t *
1877zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1878 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1879{
1880 nvlist_t *search, *nvroot, *ret;
1881
1882 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1883 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1884
1885 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1886 &nvroot) == 0);
1887
1888 *avail_spare = B_FALSE;
572e2857
BB
1889 *l2cache = B_FALSE;
1890 if (log != NULL)
1891 *log = B_FALSE;
9babb374
BB
1892 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1893 nvlist_free(search);
1894
1895 return (ret);
1896}
1897
428870ff
BB
1898/*
1899 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1900 */
1901boolean_t
1902zpool_vdev_is_interior(const char *name)
1903{
1904 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1905 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1906 return (B_TRUE);
1907 return (B_FALSE);
1908}
1909
34dc7c2f
BB
1910nvlist_t *
1911zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 1912 boolean_t *l2cache, boolean_t *log)
34dc7c2f 1913{
34dc7c2f 1914 char *end;
9babb374 1915 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
1916 uint64_t guid;
1917
9babb374
BB
1918 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1919
34dc7c2f
BB
1920 guid = strtoull(path, &end, 10);
1921 if (guid != 0 && *end == '\0') {
9babb374 1922 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
1923 } else if (zpool_vdev_is_interior(path)) {
1924 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 1925 } else {
9babb374 1926 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
1927 }
1928
1929 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1930 &nvroot) == 0);
1931
1932 *avail_spare = B_FALSE;
1933 *l2cache = B_FALSE;
b128c09f
BB
1934 if (log != NULL)
1935 *log = B_FALSE;
9babb374
BB
1936 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1937 nvlist_free(search);
1938
1939 return (ret);
b128c09f
BB
1940}
1941
1942static int
1943vdev_online(nvlist_t *nv)
1944{
1945 uint64_t ival;
1946
1947 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1948 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1949 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1950 return (0);
1951
1952 return (1);
1953}
1954
1955/*
9babb374 1956 * Helper function for zpool_get_physpaths().
b128c09f 1957 */
9babb374
BB
1958static int
1959vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1960 size_t *bytes_written)
1961{
1962 size_t bytes_left, pos, rsz;
1963 char *tmppath;
1964 const char *format;
1965
1966 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1967 &tmppath) != 0)
1968 return (EZFS_NODEVICE);
1969
1970 pos = *bytes_written;
1971 bytes_left = physpath_size - pos;
1972 format = (pos == 0) ? "%s" : " %s";
1973
1974 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1975 *bytes_written += rsz;
1976
1977 if (rsz >= bytes_left) {
1978 /* if physpath was not copied properly, clear it */
1979 if (bytes_left != 0) {
1980 physpath[pos] = 0;
1981 }
1982 return (EZFS_NOSPC);
1983 }
1984 return (0);
1985}
1986
1987static int
1988vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1989 size_t *rsz, boolean_t is_spare)
1990{
1991 char *type;
1992 int ret;
1993
1994 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1995 return (EZFS_INVALCONFIG);
1996
1997 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1998 /*
1999 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2000 * For a spare vdev, we only want to boot from the active
2001 * spare device.
2002 */
2003 if (is_spare) {
2004 uint64_t spare = 0;
2005 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2006 &spare);
2007 if (!spare)
2008 return (EZFS_INVALCONFIG);
2009 }
2010
2011 if (vdev_online(nv)) {
2012 if ((ret = vdev_get_one_physpath(nv, physpath,
2013 phypath_size, rsz)) != 0)
2014 return (ret);
2015 }
2016 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2017 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2018 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2019 nvlist_t **child;
2020 uint_t count;
2021 int i, ret;
2022
2023 if (nvlist_lookup_nvlist_array(nv,
2024 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2025 return (EZFS_INVALCONFIG);
2026
2027 for (i = 0; i < count; i++) {
2028 ret = vdev_get_physpaths(child[i], physpath,
2029 phypath_size, rsz, is_spare);
2030 if (ret == EZFS_NOSPC)
2031 return (ret);
2032 }
2033 }
2034
2035 return (EZFS_POOL_INVALARG);
2036}
2037
2038/*
2039 * Get phys_path for a root pool config.
2040 * Return 0 on success; non-zero on failure.
2041 */
2042static int
2043zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2044{
9babb374 2045 size_t rsz;
b128c09f
BB
2046 nvlist_t *vdev_root;
2047 nvlist_t **child;
2048 uint_t count;
9babb374 2049 char *type;
b128c09f 2050
9babb374 2051 rsz = 0;
b128c09f 2052
9babb374
BB
2053 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2054 &vdev_root) != 0)
2055 return (EZFS_INVALCONFIG);
b128c09f 2056
9babb374
BB
2057 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2058 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2059 &child, &count) != 0)
9babb374 2060 return (EZFS_INVALCONFIG);
b128c09f 2061
9babb374
BB
2062 /*
2063 * root pool can not have EFI labeled disks and can only have
2064 * a single top-level vdev.
2065 */
2066 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2067 pool_uses_efi(vdev_root))
2068 return (EZFS_POOL_INVALARG);
b128c09f 2069
9babb374
BB
2070 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2071 B_FALSE);
2072
2073 /* No online devices */
2074 if (rsz == 0)
2075 return (EZFS_NODEVICE);
b128c09f
BB
2076
2077 return (0);
34dc7c2f
BB
2078}
2079
9babb374
BB
2080/*
2081 * Get phys_path for a root pool
2082 * Return 0 on success; non-zero on failure.
2083 */
2084int
2085zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2086{
2087 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2088 phypath_size));
2089}
2090
9babb374
BB
2091/*
2092 * If the device has being dynamically expanded then we need to relabel
2093 * the disk to use the new unallocated space.
2094 */
2095static int
8adf4864 2096zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2097{
9babb374 2098 int fd, error;
9babb374 2099
d603ed6c 2100 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2102 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2103 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2104 }
2105
2106 /*
2107 * It's possible that we might encounter an error if the device
2108 * does not have any unallocated space left. If so, we simply
2109 * ignore that error and continue on.
b5a28807
ED
2110 *
2111 * Also, we don't call efi_rescan() - that would just return EBUSY.
2112 * The module will do it for us in vdev_disk_open().
9babb374 2113 */
d603ed6c 2114 error = efi_use_whole_disk(fd);
9babb374
BB
2115 (void) close(fd);
2116 if (error && error != VT_ENOSPC) {
2117 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2118 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2119 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374
BB
2120 }
2121 return (0);
2122}
2123
34dc7c2f
BB
2124/*
2125 * Bring the specified vdev online. The 'flags' parameter is a set of the
2126 * ZFS_ONLINE_* flags.
2127 */
2128int
2129zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2130 vdev_state_t *newstate)
2131{
2598c001 2132 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2133 char msg[1024];
2134 nvlist_t *tgt;
9babb374 2135 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2136 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2137 int error;
34dc7c2f 2138
9babb374
BB
2139 if (flags & ZFS_ONLINE_EXPAND) {
2140 (void) snprintf(msg, sizeof (msg),
2141 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2142 } else {
2143 (void) snprintf(msg, sizeof (msg),
2144 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2145 }
34dc7c2f
BB
2146
2147 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2148 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2149 &islog)) == NULL)
34dc7c2f
BB
2150 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2151
2152 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2153
428870ff 2154 if (avail_spare)
34dc7c2f
BB
2155 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2156
9babb374
BB
2157 if (flags & ZFS_ONLINE_EXPAND ||
2158 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
9babb374
BB
2159 uint64_t wholedisk = 0;
2160
2161 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2162 &wholedisk);
9babb374
BB
2163
2164 /*
2165 * XXX - L2ARC 1.0 devices can't support expansion.
2166 */
2167 if (l2cache) {
2168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2169 "cannot expand cache devices"));
2170 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2171 }
2172
2173 if (wholedisk) {
7608bd0d
ED
2174 const char *fullpath = path;
2175 char buf[MAXPATHLEN];
2176
2177 if (path[0] != '/') {
2178 error = zfs_resolve_shortname(path, buf,
2179 sizeof(buf));
2180 if (error != 0)
2181 return (zfs_error(hdl, EZFS_NODEVICE,
2182 msg));
2183
2184 fullpath = buf;
2185 }
2186
2187 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2188 if (error != 0)
2189 return (error);
9babb374
BB
2190 }
2191 }
2192
34dc7c2f
BB
2193 zc.zc_cookie = VDEV_STATE_ONLINE;
2194 zc.zc_obj = flags;
2195
572e2857 2196 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2197 if (errno == EINVAL) {
2198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2199 "from this pool into a new one. Use '%s' "
2200 "instead"), "zpool detach");
2201 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2202 }
34dc7c2f 2203 return (zpool_standard_error(hdl, errno, msg));
428870ff 2204 }
34dc7c2f
BB
2205
2206 *newstate = zc.zc_cookie;
2207 return (0);
2208}
2209
2210/*
2211 * Take the specified vdev offline
2212 */
2213int
2214zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2215{
2598c001 2216 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2217 char msg[1024];
2218 nvlist_t *tgt;
2219 boolean_t avail_spare, l2cache;
2220 libzfs_handle_t *hdl = zhp->zpool_hdl;
2221
2222 (void) snprintf(msg, sizeof (msg),
2223 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2224
2225 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2226 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2227 NULL)) == NULL)
34dc7c2f
BB
2228 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2229
2230 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2231
428870ff 2232 if (avail_spare)
34dc7c2f
BB
2233 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2234
34dc7c2f
BB
2235 zc.zc_cookie = VDEV_STATE_OFFLINE;
2236 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2237
572e2857 2238 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2239 return (0);
2240
2241 switch (errno) {
2242 case EBUSY:
2243
2244 /*
2245 * There are no other replicas of this device.
2246 */
2247 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2248
9babb374
BB
2249 case EEXIST:
2250 /*
2251 * The log device has unplayed logs
2252 */
2253 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2254
34dc7c2f
BB
2255 default:
2256 return (zpool_standard_error(hdl, errno, msg));
2257 }
2258}
2259
2260/*
2261 * Mark the given vdev faulted.
2262 */
2263int
428870ff 2264zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2265{
2598c001 2266 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2267 char msg[1024];
2268 libzfs_handle_t *hdl = zhp->zpool_hdl;
2269
2270 (void) snprintf(msg, sizeof (msg),
b8864a23 2271 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2272
2273 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2274 zc.zc_guid = guid;
2275 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2276 zc.zc_obj = aux;
34dc7c2f 2277
572e2857 2278 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2279 return (0);
2280
2281 switch (errno) {
2282 case EBUSY:
2283
2284 /*
2285 * There are no other replicas of this device.
2286 */
2287 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2288
2289 default:
2290 return (zpool_standard_error(hdl, errno, msg));
2291 }
2292
2293}
2294
2295/*
2296 * Mark the given vdev degraded.
2297 */
2298int
428870ff 2299zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2300{
2598c001 2301 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2302 char msg[1024];
2303 libzfs_handle_t *hdl = zhp->zpool_hdl;
2304
2305 (void) snprintf(msg, sizeof (msg),
b8864a23 2306 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2307
2308 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2309 zc.zc_guid = guid;
2310 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2311 zc.zc_obj = aux;
34dc7c2f 2312
572e2857 2313 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2314 return (0);
2315
2316 return (zpool_standard_error(hdl, errno, msg));
2317}
2318
2319/*
2320 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2321 * a hot spare.
2322 */
2323static boolean_t
2324is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2325{
2326 nvlist_t **child;
2327 uint_t c, children;
2328 char *type;
2329
2330 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2331 &children) == 0) {
2332 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2333 &type) == 0);
2334
2335 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2336 children == 2 && child[which] == tgt)
2337 return (B_TRUE);
2338
2339 for (c = 0; c < children; c++)
2340 if (is_replacing_spare(child[c], tgt, which))
2341 return (B_TRUE);
2342 }
2343
2344 return (B_FALSE);
2345}
2346
2347/*
2348 * Attach new_disk (fully described by nvroot) to old_disk.
2349 * If 'replacing' is specified, the new disk will replace the old one.
2350 */
2351int
2352zpool_vdev_attach(zpool_handle_t *zhp,
2353 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2354{
2598c001 2355 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2356 char msg[1024];
2357 int ret;
2358 nvlist_t *tgt;
b128c09f
BB
2359 boolean_t avail_spare, l2cache, islog;
2360 uint64_t val;
572e2857 2361 char *newname;
34dc7c2f
BB
2362 nvlist_t **child;
2363 uint_t children;
2364 nvlist_t *config_root;
2365 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 2366 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
2367
2368 if (replacing)
2369 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2370 "cannot replace %s with %s"), old_disk, new_disk);
2371 else
2372 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2373 "cannot attach %s to %s"), new_disk, old_disk);
2374
b128c09f
BB
2375 /*
2376 * If this is a root pool, make sure that we're not attaching an
2377 * EFI labeled device.
2378 */
2379 if (rootpool && pool_uses_efi(nvroot)) {
2380 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2381 "EFI labeled devices are not supported on root pools."));
2382 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2383 }
2384
34dc7c2f 2385 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2386 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2387 &islog)) == 0)
34dc7c2f
BB
2388 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2389
2390 if (avail_spare)
2391 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2392
2393 if (l2cache)
2394 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2395
2396 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2397 zc.zc_cookie = replacing;
2398
2399 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2400 &child, &children) != 0 || children != 1) {
2401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2402 "new device must be a single disk"));
2403 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2404 }
2405
2406 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2407 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2408
428870ff 2409 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
b128c09f
BB
2410 return (-1);
2411
34dc7c2f
BB
2412 /*
2413 * If the target is a hot spare that has been swapped in, we can only
2414 * replace it with another hot spare.
2415 */
2416 if (replacing &&
2417 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2418 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2419 NULL) == NULL || !avail_spare) &&
2420 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2421 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2422 "can only be replaced by another hot spare"));
b128c09f 2423 free(newname);
34dc7c2f
BB
2424 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2425 }
2426
b128c09f
BB
2427 free(newname);
2428
34dc7c2f
BB
2429 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2430 return (-1);
2431
572e2857 2432 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2433
2434 zcmd_free_nvlists(&zc);
2435
b128c09f
BB
2436 if (ret == 0) {
2437 if (rootpool) {
9babb374
BB
2438 /*
2439 * XXX need a better way to prevent user from
2440 * booting up a half-baked vdev.
2441 */
2442 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2443 "sure to wait until resilver is done "
2444 "before rebooting.\n"));
b128c09f 2445 }
34dc7c2f 2446 return (0);
b128c09f 2447 }
34dc7c2f
BB
2448
2449 switch (errno) {
2450 case ENOTSUP:
2451 /*
2452 * Can't attach to or replace this type of vdev.
2453 */
2454 if (replacing) {
572e2857
BB
2455 uint64_t version = zpool_get_prop_int(zhp,
2456 ZPOOL_PROP_VERSION, NULL);
2457
b128c09f 2458 if (islog)
34dc7c2f
BB
2459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2460 "cannot replace a log with a spare"));
572e2857
BB
2461 else if (version >= SPA_VERSION_MULTI_REPLACE)
2462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2463 "already in replacing/spare config; wait "
2464 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2465 else
2466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2467 "cannot replace a replacing device"));
2468 } else {
2469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2470 "can only attach to mirrors and top-level "
2471 "disks"));
2472 }
2473 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2474 break;
2475
2476 case EINVAL:
2477 /*
2478 * The new device must be a single disk.
2479 */
2480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2481 "new device must be a single disk"));
2482 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2483 break;
2484
2485 case EBUSY:
2486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2487 new_disk);
2488 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2489 break;
2490
2491 case EOVERFLOW:
2492 /*
2493 * The new device is too small.
2494 */
2495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2496 "device is too small"));
2497 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2498 break;
2499
2500 case EDOM:
2501 /*
2502 * The new device has a different alignment requirement.
2503 */
2504 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2505 "devices have different sector alignment"));
2506 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2507 break;
2508
2509 case ENAMETOOLONG:
2510 /*
2511 * The resulting top-level vdev spec won't fit in the label.
2512 */
2513 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2514 break;
2515
2516 default:
2517 (void) zpool_standard_error(hdl, errno, msg);
2518 }
2519
2520 return (-1);
2521}
2522
2523/*
2524 * Detach the specified device.
2525 */
2526int
2527zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2528{
2598c001 2529 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2530 char msg[1024];
2531 nvlist_t *tgt;
2532 boolean_t avail_spare, l2cache;
2533 libzfs_handle_t *hdl = zhp->zpool_hdl;
2534
2535 (void) snprintf(msg, sizeof (msg),
2536 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2537
2538 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2539 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2540 NULL)) == 0)
34dc7c2f
BB
2541 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2542
2543 if (avail_spare)
2544 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2545
2546 if (l2cache)
2547 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2548
2549 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2550
2551 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2552 return (0);
2553
2554 switch (errno) {
2555
2556 case ENOTSUP:
2557 /*
2558 * Can't detach from this type of vdev.
2559 */
2560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2561 "applicable to mirror and replacing vdevs"));
572e2857 2562 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2563 break;
2564
2565 case EBUSY:
2566 /*
2567 * There are no other replicas of this device.
2568 */
2569 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2570 break;
2571
2572 default:
2573 (void) zpool_standard_error(hdl, errno, msg);
2574 }
2575
2576 return (-1);
2577}
2578
428870ff
BB
2579/*
2580 * Find a mirror vdev in the source nvlist.
2581 *
2582 * The mchild array contains a list of disks in one of the top-level mirrors
2583 * of the source pool. The schild array contains a list of disks that the
2584 * user specified on the command line. We loop over the mchild array to
2585 * see if any entry in the schild array matches.
2586 *
2587 * If a disk in the mchild array is found in the schild array, we return
2588 * the index of that entry. Otherwise we return -1.
2589 */
2590static int
2591find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2592 nvlist_t **schild, uint_t schildren)
2593{
2594 uint_t mc;
2595
2596 for (mc = 0; mc < mchildren; mc++) {
2597 uint_t sc;
2598 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2599 mchild[mc], B_FALSE);
2600
2601 for (sc = 0; sc < schildren; sc++) {
2602 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2603 schild[sc], B_FALSE);
2604 boolean_t result = (strcmp(mpath, spath) == 0);
2605
2606 free(spath);
2607 if (result) {
2608 free(mpath);
2609 return (mc);
2610 }
2611 }
2612
2613 free(mpath);
2614 }
2615
2616 return (-1);
2617}
2618
2619/*
2620 * Split a mirror pool. If newroot points to null, then a new nvlist
2621 * is generated and it is the responsibility of the caller to free it.
2622 */
2623int
2624zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2625 nvlist_t *props, splitflags_t flags)
2626{
2598c001 2627 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff
BB
2628 char msg[1024];
2629 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2630 nvlist_t **varray = NULL, *zc_props = NULL;
2631 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2632 libzfs_handle_t *hdl = zhp->zpool_hdl;
2633 uint64_t vers;
2634 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2635 int retval = 0;
2636
2637 (void) snprintf(msg, sizeof (msg),
2638 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2639
2640 if (!zpool_name_valid(hdl, B_FALSE, newname))
2641 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2642
2643 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2644 (void) fprintf(stderr, gettext("Internal error: unable to "
2645 "retrieve pool configuration\n"));
2646 return (-1);
2647 }
2648
2649 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2650 == 0);
2651 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2652
2653 if (props) {
572e2857 2654 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 2655 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 2656 props, vers, flags, msg)) == NULL)
428870ff
BB
2657 return (-1);
2658 }
2659
2660 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2661 &children) != 0) {
2662 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2663 "Source pool is missing vdev tree"));
2664 if (zc_props)
2665 nvlist_free(zc_props);
2666 return (-1);
2667 }
2668
2669 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2670 vcount = 0;
2671
2672 if (*newroot == NULL ||
2673 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2674 &newchild, &newchildren) != 0)
2675 newchildren = 0;
2676
2677 for (c = 0; c < children; c++) {
2678 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2679 char *type;
2680 nvlist_t **mchild, *vdev;
2681 uint_t mchildren;
2682 int entry;
2683
2684 /*
2685 * Unlike cache & spares, slogs are stored in the
2686 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2687 */
2688 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2689 &is_log);
2690 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2691 &is_hole);
2692 if (is_log || is_hole) {
2693 /*
2694 * Create a hole vdev and put it in the config.
2695 */
2696 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2697 goto out;
2698 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2699 VDEV_TYPE_HOLE) != 0)
2700 goto out;
2701 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2702 1) != 0)
2703 goto out;
2704 if (lastlog == 0)
2705 lastlog = vcount;
2706 varray[vcount++] = vdev;
2707 continue;
2708 }
2709 lastlog = 0;
2710 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2711 == 0);
2712 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 "Source pool must be composed only of mirrors\n"));
2715 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2716 goto out;
2717 }
2718
2719 verify(nvlist_lookup_nvlist_array(child[c],
2720 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2721
2722 /* find or add an entry for this top-level vdev */
2723 if (newchildren > 0 &&
2724 (entry = find_vdev_entry(zhp, mchild, mchildren,
2725 newchild, newchildren)) >= 0) {
2726 /* We found a disk that the user specified. */
2727 vdev = mchild[entry];
2728 ++found;
2729 } else {
2730 /* User didn't specify a disk for this vdev. */
2731 vdev = mchild[mchildren - 1];
2732 }
2733
2734 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2735 goto out;
2736 }
2737
2738 /* did we find every disk the user specified? */
2739 if (found != newchildren) {
2740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2741 "include at most one disk from each mirror"));
2742 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2743 goto out;
2744 }
2745
2746 /* Prepare the nvlist for populating. */
2747 if (*newroot == NULL) {
2748 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2749 goto out;
2750 freelist = B_TRUE;
2751 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2752 VDEV_TYPE_ROOT) != 0)
2753 goto out;
2754 } else {
2755 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2756 }
2757
2758 /* Add all the children we found */
2759 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2760 lastlog == 0 ? vcount : lastlog) != 0)
2761 goto out;
2762
2763 /*
2764 * If we're just doing a dry run, exit now with success.
2765 */
2766 if (flags.dryrun) {
2767 memory_err = B_FALSE;
2768 freelist = B_FALSE;
2769 goto out;
2770 }
2771
2772 /* now build up the config list & call the ioctl */
2773 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2774 goto out;
2775
2776 if (nvlist_add_nvlist(newconfig,
2777 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2778 nvlist_add_string(newconfig,
2779 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2780 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2781 goto out;
2782
2783 /*
2784 * The new pool is automatically part of the namespace unless we
2785 * explicitly export it.
2786 */
2787 if (!flags.import)
2788 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2789 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2790 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2791 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2792 goto out;
2793 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2794 goto out;
2795
2796 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2797 retval = zpool_standard_error(hdl, errno, msg);
2798 goto out;
2799 }
2800
2801 freelist = B_FALSE;
2802 memory_err = B_FALSE;
2803
2804out:
2805 if (varray != NULL) {
2806 int v;
2807
2808 for (v = 0; v < vcount; v++)
2809 nvlist_free(varray[v]);
2810 free(varray);
2811 }
2812 zcmd_free_nvlists(&zc);
2813 if (zc_props)
2814 nvlist_free(zc_props);
2815 if (newconfig)
2816 nvlist_free(newconfig);
2817 if (freelist) {
2818 nvlist_free(*newroot);
2819 *newroot = NULL;
2820 }
2821
2822 if (retval != 0)
2823 return (retval);
2824
2825 if (memory_err)
2826 return (no_memory(hdl));
2827
2828 return (0);
2829}
2830
34dc7c2f
BB
2831/*
2832 * Remove the given device. Currently, this is supported only for hot spares
2833 * and level 2 cache devices.
2834 */
2835int
2836zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2837{
2598c001 2838 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2839 char msg[1024];
2840 nvlist_t *tgt;
428870ff 2841 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2842 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2843 uint64_t version;
34dc7c2f
BB
2844
2845 (void) snprintf(msg, sizeof (msg),
2846 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2847
2848 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2849 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 2850 &islog)) == 0)
34dc7c2f 2851 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
2852 /*
2853 * XXX - this should just go away.
2854 */
2855 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 2856 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428870ff
BB
2857 "only inactive hot spares, cache, top-level, "
2858 "or log devices can be removed"));
34dc7c2f
BB
2859 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2860 }
2861
428870ff
BB
2862 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2863 if (islog && version < SPA_VERSION_HOLES) {
2864 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2865 "pool must be upgrade to support log removal"));
2866 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2867 }
2868
34dc7c2f
BB
2869 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2870
2871 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2872 return (0);
2873
2874 return (zpool_standard_error(hdl, errno, msg));
2875}
2876
2877/*
2878 * Clear the errors for the pool, or the particular device if specified.
2879 */
2880int
428870ff 2881zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 2882{
2598c001 2883 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2884 char msg[1024];
2885 nvlist_t *tgt;
428870ff 2886 zpool_rewind_policy_t policy;
34dc7c2f
BB
2887 boolean_t avail_spare, l2cache;
2888 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2889 nvlist_t *nvi = NULL;
572e2857 2890 int error;
34dc7c2f
BB
2891
2892 if (path)
2893 (void) snprintf(msg, sizeof (msg),
2894 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2895 path);
2896 else
2897 (void) snprintf(msg, sizeof (msg),
2898 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2899 zhp->zpool_name);
2900
2901 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2902 if (path) {
2903 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 2904 &l2cache, NULL)) == 0)
34dc7c2f
BB
2905 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2906
2907 /*
2908 * Don't allow error clearing for hot spares. Do allow
2909 * error clearing for l2cache devices.
2910 */
2911 if (avail_spare)
2912 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2913
2914 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2915 &zc.zc_guid) == 0);
2916 }
2917
428870ff
BB
2918 zpool_get_rewind_policy(rewindnvl, &policy);
2919 zc.zc_cookie = policy.zrp_request;
2920
572e2857 2921 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
2922 return (-1);
2923
572e2857 2924 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
2925 return (-1);
2926
572e2857
BB
2927 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2928 errno == ENOMEM) {
2929 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2930 zcmd_free_nvlists(&zc);
2931 return (-1);
2932 }
2933 }
2934
2935 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
2936 errno != EPERM && errno != EACCES)) {
2937 if (policy.zrp_request &
2938 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2939 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2940 zpool_rewind_exclaim(hdl, zc.zc_name,
2941 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2942 nvi);
2943 nvlist_free(nvi);
2944 }
2945 zcmd_free_nvlists(&zc);
34dc7c2f 2946 return (0);
428870ff 2947 }
34dc7c2f 2948
428870ff 2949 zcmd_free_nvlists(&zc);
34dc7c2f
BB
2950 return (zpool_standard_error(hdl, errno, msg));
2951}
2952
2953/*
2954 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2955 */
2956int
2957zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2958{
2598c001 2959 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2960 char msg[1024];
2961 libzfs_handle_t *hdl = zhp->zpool_hdl;
2962
2963 (void) snprintf(msg, sizeof (msg),
2964 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
b8864a23 2965 (u_longlong_t)guid);
34dc7c2f
BB
2966
2967 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2968 zc.zc_guid = guid;
428870ff 2969 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
2970
2971 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2972 return (0);
2973
2974 return (zpool_standard_error(hdl, errno, msg));
2975}
2976
3541dc6d
GA
2977/*
2978 * Change the GUID for a pool.
2979 */
2980int
2981zpool_reguid(zpool_handle_t *zhp)
2982{
2983 char msg[1024];
2984 libzfs_handle_t *hdl = zhp->zpool_hdl;
2985 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2986
2987 (void) snprintf(msg, sizeof (msg),
2988 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
2989
2990 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2991 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
2992 return (0);
2993
2994 return (zpool_standard_error(hdl, errno, msg));
2995}
2996
1bd201e7
CS
2997/*
2998 * Reopen the pool.
2999 */
3000int
3001zpool_reopen(zpool_handle_t *zhp)
3002{
3003 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3004 char msg[1024];
3005 libzfs_handle_t *hdl = zhp->zpool_hdl;
3006
3007 (void) snprintf(msg, sizeof (msg),
3008 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3009 zhp->zpool_name);
3010
3011 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3012 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3013 return (0);
3014 return (zpool_standard_error(hdl, errno, msg));
3015}
3016
34dc7c2f
BB
3017/*
3018 * Convert from a devid string to a path.
3019 */
3020static char *
3021devid_to_path(char *devid_str)
3022{
3023 ddi_devid_t devid;
3024 char *minor;
3025 char *path;
3026 devid_nmlist_t *list = NULL;
3027 int ret;
3028
3029 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3030 return (NULL);
3031
3032 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3033
3034 devid_str_free(minor);
3035 devid_free(devid);
3036
3037 if (ret != 0)
3038 return (NULL);
3039
3040 if ((path = strdup(list[0].devname)) == NULL)
3041 return (NULL);
3042
3043 devid_free_nmlist(list);
3044
3045 return (path);
3046}
3047
3048/*
3049 * Convert from a path to a devid string.
3050 */
3051static char *
3052path_to_devid(const char *path)
3053{
3054 int fd;
3055 ddi_devid_t devid;
3056 char *minor, *ret;
3057
3058 if ((fd = open(path, O_RDONLY)) < 0)
3059 return (NULL);
3060
3061 minor = NULL;
3062 ret = NULL;
3063 if (devid_get(fd, &devid) == 0) {
3064 if (devid_get_minor_name(fd, &minor) == 0)
3065 ret = devid_str_encode(devid, minor);
3066 if (minor != NULL)
3067 devid_str_free(minor);
3068 devid_free(devid);
3069 }
3070 (void) close(fd);
3071
3072 return (ret);
3073}
3074
3075/*
3076 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3077 * ignore any failure here, since a common case is for an unprivileged user to
3078 * type 'zpool status', and we'll display the correct information anyway.
3079 */
3080static void
3081set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3082{
2598c001 3083 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3084
3085 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3086 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3087 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3088 &zc.zc_guid) == 0);
3089
3090 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3091}
3092
83c62c93
NB
3093/*
3094 * Remove partition suffix from a vdev path. Partition suffixes may take three
3095 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3096 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3097 * third case only occurs when preceded by a string matching the regular
3098 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3099 */
3100static char *
3101strip_partition(libzfs_handle_t *hdl, char *path)
3102{
3103 char *tmp = zfs_strdup(hdl, path);
3104 char *part = NULL, *d = NULL;
3105
3106 if ((part = strstr(tmp, "-part")) && part != tmp) {
3107 d = part + 5;
3108 } else if ((part = strrchr(tmp, 'p')) &&
3109 part > tmp + 1 && isdigit(*(part-1))) {
3110 d = part + 1;
3111 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3112 for (d = &tmp[2]; isalpha(*d); part = ++d);
3113 }
3114 if (part && d && *d != '\0') {
3115 for (; isdigit(*d); d++);
3116 if (*d == '\0')
3117 *part = '\0';
3118 }
3119 return (tmp);
3120}
3121
858219cc
NB
3122#define PATH_BUF_LEN 64
3123
34dc7c2f
BB
3124/*
3125 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3126 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3127 * We also check if this is a whole disk, in which case we strip off the
3128 * trailing 's0' slice name.
3129 *
3130 * This routine is also responsible for identifying when disks have been
3131 * reconfigured in a new location. The kernel will have opened the device by
3132 * devid, but the path will still refer to the old location. To catch this, we
3133 * first do a path -> devid translation (which is fast for the common case). If
3134 * the devid matches, we're done. If not, we do a reverse devid -> path
3135 * translation and issue the appropriate ioctl() to update the path of the vdev.
3136 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3137 * of these checks.
3138 */
3139char *
428870ff
BB
3140zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3141 boolean_t verbose)
34dc7c2f 3142{
d603ed6c 3143 char *path, *devid, *type;
34dc7c2f 3144 uint64_t value;
858219cc 3145 char buf[PATH_BUF_LEN];
fc24f7c8 3146 char tmpbuf[PATH_BUF_LEN];
34dc7c2f
BB
3147 vdev_stat_t *vs;
3148 uint_t vsc;
3149
3150 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3151 &value) == 0) {
3152 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3153 &value) == 0);
3154 (void) snprintf(buf, sizeof (buf), "%llu",
3155 (u_longlong_t)value);
3156 path = buf;
3157 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
34dc7c2f
BB
3158 /*
3159 * If the device is dead (faulted, offline, etc) then don't
3160 * bother opening it. Otherwise we may be forcing the user to
3161 * open a misbehaving device, which can have undesirable
3162 * effects.
3163 */
428870ff 3164 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3165 (uint64_t **)&vs, &vsc) != 0 ||
3166 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3167 zhp != NULL &&
3168 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3169 /*
3170 * Determine if the current path is correct.
3171 */
3172 char *newdevid = path_to_devid(path);
3173
3174 if (newdevid == NULL ||
3175 strcmp(devid, newdevid) != 0) {
3176 char *newpath;
3177
3178 if ((newpath = devid_to_path(devid)) != NULL) {
3179 /*
3180 * Update the path appropriately.
3181 */
3182 set_path(zhp, nv, newpath);
3183 if (nvlist_add_string(nv,
3184 ZPOOL_CONFIG_PATH, newpath) == 0)
3185 verify(nvlist_lookup_string(nv,
3186 ZPOOL_CONFIG_PATH,
3187 &path) == 0);
3188 free(newpath);
3189 }
3190 }
3191
3192 if (newdevid)
3193 devid_str_free(newdevid);
3194 }
3195
d603ed6c
BB
3196 /*
3197 * For a block device only use the name.
3198 */
3199 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3200 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3201 path = strrchr(path, '/');
3202 path++;
3203 }
34dc7c2f 3204
d603ed6c 3205 /*
83c62c93 3206 * Remove the partition from the path it this is a whole disk.
d603ed6c 3207 */
34dc7c2f
BB
3208 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3209 &value) == 0 && value) {
83c62c93 3210 return strip_partition(hdl, path);
34dc7c2f
BB
3211 }
3212 } else {
3213 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3214
3215 /*
3216 * If it's a raidz device, we need to stick in the parity level.
3217 */
3218 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
858219cc 3219
34dc7c2f
BB
3220 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3221 &value) == 0);
fc24f7c8 3222 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 3223 (u_longlong_t)value);
fc24f7c8 3224 path = buf;
34dc7c2f 3225 }
428870ff
BB
3226
3227 /*
3228 * We identify each top-level vdev by using a <type-id>
3229 * naming convention.
3230 */
3231 if (verbose) {
3232 uint64_t id;
3233
3234 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3235 &id) == 0);
fc24f7c8
MM
3236 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3237 path, (u_longlong_t)id);
3238 path = tmpbuf;
428870ff 3239 }
34dc7c2f
BB
3240 }
3241
3242 return (zfs_strdup(hdl, path));
3243}
3244
3245static int
3246zbookmark_compare(const void *a, const void *b)
3247{
3248 return (memcmp(a, b, sizeof (zbookmark_t)));
3249}
3250
3251/*
3252 * Retrieve the persistent error log, uniquify the members, and return to the
3253 * caller.
3254 */
3255int
3256zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3257{
2598c001 3258 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3259 uint64_t count;
3260 zbookmark_t *zb = NULL;
3261 int i;
3262
3263 /*
3264 * Retrieve the raw error list from the kernel. If the number of errors
3265 * has increased, allocate more space and continue until we get the
3266 * entire list.
3267 */
3268 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3269 &count) == 0);
3270 if (count == 0)
3271 return (0);
3272 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3273 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3274 return (-1);
3275 zc.zc_nvlist_dst_size = count;
3276 (void) strcpy(zc.zc_name, zhp->zpool_name);
3277 for (;;) {
3278 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3279 &zc) != 0) {
3280 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3281 if (errno == ENOMEM) {
3282 count = zc.zc_nvlist_dst_size;
3283 if ((zc.zc_nvlist_dst = (uintptr_t)
3284 zfs_alloc(zhp->zpool_hdl, count *
3285 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3286 return (-1);
3287 } else {
3288 return (-1);
3289 }
3290 } else {
3291 break;
3292 }
3293 }
3294
3295 /*
3296 * Sort the resulting bookmarks. This is a little confusing due to the
3297 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3298 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3299 * _not_ copied as part of the process. So we point the start of our
3300 * array appropriate and decrement the total number of elements.
3301 */
3302 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3303 zc.zc_nvlist_dst_size;
3304 count -= zc.zc_nvlist_dst_size;
3305
3306 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3307
3308 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3309
3310 /*
3311 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3312 */
3313 for (i = 0; i < count; i++) {
3314 nvlist_t *nv;
3315
3316 /* ignoring zb_blkid and zb_level for now */
3317 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3318 zb[i-1].zb_object == zb[i].zb_object)
3319 continue;
3320
3321 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3322 goto nomem;
3323 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3324 zb[i].zb_objset) != 0) {
3325 nvlist_free(nv);
3326 goto nomem;
3327 }
3328 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3329 zb[i].zb_object) != 0) {
3330 nvlist_free(nv);
3331 goto nomem;
3332 }
3333 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3334 nvlist_free(nv);
3335 goto nomem;
3336 }
3337 nvlist_free(nv);
3338 }
3339
3340 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3341 return (0);
3342
3343nomem:
3344 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3345 return (no_memory(zhp->zpool_hdl));
3346}
3347
3348/*
3349 * Upgrade a ZFS pool to the latest on-disk version.
3350 */
3351int
3352zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3353{
2598c001 3354 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3355 libzfs_handle_t *hdl = zhp->zpool_hdl;
3356
3357 (void) strcpy(zc.zc_name, zhp->zpool_name);
3358 zc.zc_cookie = new_version;
3359
3360 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3361 return (zpool_standard_error_fmt(hdl, errno,
3362 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3363 zhp->zpool_name));
3364 return (0);
3365}
3366
3367void
3368zpool_set_history_str(const char *subcommand, int argc, char **argv,
3369 char *history_str)
3370{
3371 int i;
3372
3373 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3374 for (i = 1; i < argc; i++) {
3375 if (strlen(history_str) + 1 + strlen(argv[i]) >
3376 HIS_MAX_RECORD_LEN)
3377 break;
3378 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3379 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3380 }
3381}
3382
3383/*
3384 * Stage command history for logging.
3385 */
3386int
3387zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3388{
3389 if (history_str == NULL)
3390 return (EINVAL);
3391
3392 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3393 return (EINVAL);
3394
3395 if (hdl->libzfs_log_str != NULL)
3396 free(hdl->libzfs_log_str);
3397
3398 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3399 return (no_memory(hdl));
3400
3401 return (0);
3402}
3403
3404/*
3405 * Perform ioctl to get some command history of a pool.
3406 *
3407 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3408 * logical offset of the history buffer to start reading from.
3409 *
3410 * Upon return, 'off' is the next logical offset to read from and
3411 * 'len' is the actual amount of bytes read into 'buf'.
3412 */
3413static int
3414get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3415{
2598c001 3416 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3417 libzfs_handle_t *hdl = zhp->zpool_hdl;
3418
3419 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3420
3421 zc.zc_history = (uint64_t)(uintptr_t)buf;
3422 zc.zc_history_len = *len;
3423 zc.zc_history_offset = *off;
3424
3425 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3426 switch (errno) {
3427 case EPERM:
3428 return (zfs_error_fmt(hdl, EZFS_PERM,
3429 dgettext(TEXT_DOMAIN,
3430 "cannot show history for pool '%s'"),
3431 zhp->zpool_name));
3432 case ENOENT:
3433 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3434 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3435 "'%s'"), zhp->zpool_name));
3436 case ENOTSUP:
3437 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3438 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3439 "'%s', pool must be upgraded"), zhp->zpool_name));
3440 default:
3441 return (zpool_standard_error_fmt(hdl, errno,
3442 dgettext(TEXT_DOMAIN,
3443 "cannot get history for '%s'"), zhp->zpool_name));
3444 }
3445 }
3446
3447 *len = zc.zc_history_len;
3448 *off = zc.zc_history_offset;
3449
3450 return (0);
3451}
3452
3453/*
3454 * Process the buffer of nvlists, unpacking and storing each nvlist record
3455 * into 'records'. 'leftover' is set to the number of bytes that weren't
3456 * processed as there wasn't a complete record.
3457 */
428870ff 3458int
34dc7c2f
BB
3459zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3460 nvlist_t ***records, uint_t *numrecords)
3461{
3462 uint64_t reclen;
3463 nvlist_t *nv;
3464 int i;
3465
3466 while (bytes_read > sizeof (reclen)) {
3467
3468 /* get length of packed record (stored as little endian) */
3469 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3470 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3471
3472 if (bytes_read < sizeof (reclen) + reclen)
3473 break;
3474
3475 /* unpack record */
3476 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3477 return (ENOMEM);
3478 bytes_read -= sizeof (reclen) + reclen;
3479 buf += sizeof (reclen) + reclen;
3480
3481 /* add record to nvlist array */
3482 (*numrecords)++;
3483 if (ISP2(*numrecords + 1)) {
3484 *records = realloc(*records,
3485 *numrecords * 2 * sizeof (nvlist_t *));
3486 }
3487 (*records)[*numrecords - 1] = nv;
3488 }
3489
3490 *leftover = bytes_read;
3491 return (0);
3492}
3493
3494#define HIS_BUF_LEN (128*1024)
3495
3496/*
3497 * Retrieve the command history of a pool.
3498 */
3499int
3500zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3501{
3502 char buf[HIS_BUF_LEN];
3503 uint64_t off = 0;
3504 nvlist_t **records = NULL;
3505 uint_t numrecords = 0;
3506 int err, i;
3507
3508 do {
3509 uint64_t bytes_read = sizeof (buf);
3510 uint64_t leftover;
3511
3512 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3513 break;
3514
3515 /* if nothing else was read in, we're at EOF, just return */
3516 if (!bytes_read)
3517 break;
3518
3519 if ((err = zpool_history_unpack(buf, bytes_read,
3520 &leftover, &records, &numrecords)) != 0)
3521 break;
3522 off -= leftover;
3523
3524 /* CONSTCOND */
3525 } while (1);
3526
3527 if (!err) {
3528 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3529 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3530 records, numrecords) == 0);
3531 }
3532 for (i = 0; i < numrecords; i++)
3533 nvlist_free(records[i]);
3534 free(records);
3535
3536 return (err);
3537}
3538
26685276
BB
3539/*
3540 * Retrieve the next event. If there is a new event available 'nvp' will
3541 * contain a newly allocated nvlist and 'dropped' will be set to the number
3542 * of missed events since the last call to this function. When 'nvp' is
3543 * set to NULL it indicates no new events are available. In either case
3544 * the function returns 0 and it is up to the caller to free 'nvp'. In
3545 * the case of a fatal error the function will return a non-zero value.
3546 * When the function is called in blocking mode it will not return until
3547 * a new event is available.
3548 */
3549int
3550zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3551 int *dropped, int block, int cleanup_fd)
3552{
3553 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3554 int error = 0;
3555
3556 *nvp = NULL;
3557 *dropped = 0;
3558 zc.zc_cleanup_fd = cleanup_fd;
3559
3560 if (!block)
3561 zc.zc_guid = ZEVENT_NONBLOCK;
3562
3563 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3564 return (-1);
3565
3566retry:
3567 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3568 switch (errno) {
3569 case ESHUTDOWN:
3570 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3571 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3572 goto out;
3573 case ENOENT:
3574 /* Blocking error case should not occur */
3575 if (block)
3576 error = zpool_standard_error_fmt(hdl, errno,
3577 dgettext(TEXT_DOMAIN, "cannot get event"));
3578
3579 goto out;
3580 case ENOMEM:
3581 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3582 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3583 dgettext(TEXT_DOMAIN, "cannot get event"));
3584 goto out;
3585 } else {
3586 goto retry;
3587 }
3588 default:
3589 error = zpool_standard_error_fmt(hdl, errno,
3590 dgettext(TEXT_DOMAIN, "cannot get event"));
3591 goto out;
3592 }
3593 }
3594
3595 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3596 if (error != 0)
3597 goto out;
3598
3599 *dropped = (int)zc.zc_cookie;
3600out:
3601 zcmd_free_nvlists(&zc);
3602
3603 return (error);
3604}
3605
3606/*
3607 * Clear all events.
3608 */
3609int
3610zpool_events_clear(libzfs_handle_t *hdl, int *count)
3611{
3612 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3613 char msg[1024];
3614
3615 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3616 "cannot clear events"));
3617
3618 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3619 return (zpool_standard_error_fmt(hdl, errno, msg));
3620
3621 if (count != NULL)
3622 *count = (int)zc.zc_cookie; /* # of events cleared */
3623
3624 return (0);
3625}
3626
34dc7c2f
BB
3627void
3628zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3629 char *pathname, size_t len)
3630{
2598c001 3631 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3632 boolean_t mounted = B_FALSE;
3633 char *mntpnt = NULL;
3634 char dsname[MAXNAMELEN];
3635
3636 if (dsobj == 0) {
3637 /* special case for the MOS */
b8864a23 3638 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
34dc7c2f
BB
3639 return;
3640 }
3641
3642 /* get the dataset's name */
3643 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3644 zc.zc_obj = dsobj;
3645 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3646 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3647 /* just write out a path of two object numbers */
3648 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 3649 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
3650 return;
3651 }
3652 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3653
3654 /* find out if the dataset is mounted */
3655 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3656
3657 /* get the corrupted object's path */
3658 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3659 zc.zc_obj = obj;
3660 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3661 &zc) == 0) {
3662 if (mounted) {
3663 (void) snprintf(pathname, len, "%s%s", mntpnt,
3664 zc.zc_value);
3665 } else {
3666 (void) snprintf(pathname, len, "%s:%s",
3667 dsname, zc.zc_value);
3668 }
3669 } else {
b8864a23 3670 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
34dc7c2f
BB
3671 }
3672 free(mntpnt);
3673}
3674
b128c09f
BB
3675/*
3676 * Read the EFI label from the config, if a label does not exist then
3677 * pass back the error to the caller. If the caller has passed a non-NULL
3678 * diskaddr argument then we set it to the starting address of the EFI
3679 * partition.
3680 */
3681static int
3682read_efi_label(nvlist_t *config, diskaddr_t *sb)
3683{
3684 char *path;
3685 int fd;
3686 char diskname[MAXPATHLEN];
3687 int err = -1;
3688
3689 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3690 return (err);
3691
eac47204 3692 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 3693 strrchr(path, '/'));
d603ed6c 3694 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
b128c09f
BB
3695 struct dk_gpt *vtoc;
3696
3697 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3698 if (sb != NULL)
3699 *sb = vtoc->efi_parts[0].p_start;
3700 efi_free(vtoc);
3701 }
3702 (void) close(fd);
3703 }
3704 return (err);
3705}
3706
34dc7c2f
BB
3707/*
3708 * determine where a partition starts on a disk in the current
3709 * configuration
3710 */
3711static diskaddr_t
3712find_start_block(nvlist_t *config)
3713{
3714 nvlist_t **child;
3715 uint_t c, children;
34dc7c2f 3716 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
3717 uint64_t wholedisk;
3718
3719 if (nvlist_lookup_nvlist_array(config,
3720 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3721 if (nvlist_lookup_uint64(config,
3722 ZPOOL_CONFIG_WHOLE_DISK,
3723 &wholedisk) != 0 || !wholedisk) {
3724 return (MAXOFFSET_T);
3725 }
b128c09f
BB
3726 if (read_efi_label(config, &sb) < 0)
3727 sb = MAXOFFSET_T;
34dc7c2f
BB
3728 return (sb);
3729 }
3730
3731 for (c = 0; c < children; c++) {
3732 sb = find_start_block(child[c]);
3733 if (sb != MAXOFFSET_T) {
3734 return (sb);
3735 }
3736 }
3737 return (MAXOFFSET_T);
3738}
3739
d603ed6c
BB
3740int
3741zpool_label_disk_wait(char *path, int timeout)
3742{
3743 struct stat64 statbuf;
3744 int i;
3745
3746 /*
3747 * Wait timeout miliseconds for a newly created device to be available
3748 * from the given path. There is a small window when a /dev/ device
3749 * will exist and the udev link will not, so we must wait for the
3750 * symlink. Depending on the udev rules this may take a few seconds.
3751 */
3752 for (i = 0; i < timeout; i++) {
3753 usleep(1000);
3754
3755 errno = 0;
3756 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3757 return (0);
3758 }
3759
3760 return (ENOENT);
3761}
3762
3763int
3764zpool_label_disk_check(char *path)
3765{
3766 struct dk_gpt *vtoc;
3767 int fd, err;
3768
3769 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3770 return errno;
3771
3772 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3773 (void) close(fd);
3774 return err;
3775 }
3776
3777 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3778 efi_free(vtoc);
3779 (void) close(fd);
3780 return EIDRM;
3781 }
3782
3783 efi_free(vtoc);
3784 (void) close(fd);
3785 return 0;
3786}
3787
34dc7c2f
BB
3788/*
3789 * Label an individual disk. The name provided is the short name,
3790 * stripped of any leading /dev path.
3791 */
3792int
3793zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3794{
3795 char path[MAXPATHLEN];
3796 struct dk_gpt *vtoc;
d603ed6c 3797 int rval, fd;
34dc7c2f
BB
3798 size_t resv = EFI_MIN_RESV_SIZE;
3799 uint64_t slice_size;
3800 diskaddr_t start_block;
3801 char errbuf[1024];
3802
3803 /* prepare an error message just in case */
3804 (void) snprintf(errbuf, sizeof (errbuf),
3805 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3806
3807 if (zhp) {
3808 nvlist_t *nvroot;
3809
1bd201e7 3810 if (zpool_is_bootable(zhp)) {
b128c09f
BB
3811 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3812 "EFI labeled devices are not supported on root "
3813 "pools."));
3814 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3815 }
3816
34dc7c2f
BB
3817 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3818 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3819
3820 if (zhp->zpool_start_block == 0)
3821 start_block = find_start_block(nvroot);
3822 else
3823 start_block = zhp->zpool_start_block;
3824 zhp->zpool_start_block = start_block;
3825 } else {
3826 /* new pool */
3827 start_block = NEW_START_BLOCK;
3828 }
3829
eac47204 3830 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 3831
d603ed6c 3832 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
34dc7c2f
BB
3833 /*
3834 * This shouldn't happen. We've long since verified that this
3835 * is a valid device.
3836 */
109491a8
RL
3837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3838 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
3839 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3840 }
3841
3842 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3843 /*
3844 * The only way this can fail is if we run out of memory, or we
3845 * were unable to read the disk's capacity
3846 */
3847 if (errno == ENOMEM)
3848 (void) no_memory(hdl);
3849
3850 (void) close(fd);
109491a8
RL
3851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3852 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
3853
3854 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3855 }
3856
3857 slice_size = vtoc->efi_last_u_lba + 1;
3858 slice_size -= EFI_MIN_RESV_SIZE;
3859 if (start_block == MAXOFFSET_T)
3860 start_block = NEW_START_BLOCK;
3861 slice_size -= start_block;
613d88ed 3862 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
3863
3864 vtoc->efi_parts[0].p_start = start_block;
3865 vtoc->efi_parts[0].p_size = slice_size;
3866
3867 /*
3868 * Why we use V_USR: V_BACKUP confuses users, and is considered
3869 * disposable by some EFI utilities (since EFI doesn't have a backup
3870 * slice). V_UNASSIGNED is supposed to be used only for zero size
3871 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3872 * etc. were all pretty specific. V_USR is as close to reality as we
3873 * can get, in the absence of V_OTHER.
3874 */
3875 vtoc->efi_parts[0].p_tag = V_USR;
3876 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3877
3878 vtoc->efi_parts[8].p_start = slice_size + start_block;
3879 vtoc->efi_parts[8].p_size = resv;
3880 vtoc->efi_parts[8].p_tag = V_RESERVED;
3881
b5a28807 3882 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
34dc7c2f
BB
3883 /*
3884 * Some block drivers (like pcata) may not support EFI
3885 * GPT labels. Print out a helpful error message dir-
3886 * ecting the user to manually label the disk and give
3887 * a specific slice.
3888 */
3889 (void) close(fd);
3890 efi_free(vtoc);
3891
d603ed6c
BB
3892 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3893 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
3894 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3895 }
3896
3897 (void) close(fd);
3898 efi_free(vtoc);
34dc7c2f 3899
eac47204
BB
3900 /* Wait for the first expected partition to appear. */
3901
3902 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3903 (void) zfs_append_partition(path, MAXPATHLEN);
3904
d603ed6c
BB
3905 rval = zpool_label_disk_wait(path, 3000);
3906 if (rval) {
3907 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3908 "detect device partitions on '%s': %d"), path, rval);
3909 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
3910 }
3911
d603ed6c
BB
3912 /* We can't be to paranoid. Read the label back and verify it. */
3913 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3914 rval = zpool_label_disk_check(path);
3915 if (rval) {
3916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3917 "EFI label on '%s' is damaged. Ensure\nthis device "
3918 "is not in in use, and is functioning properly: %d"),
3919 path, rval);
3920 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 3921 }
34dc7c2f 3922
d603ed6c 3923 return 0;
34dc7c2f 3924}