]> git.proxmox.com Git - mirror_zfs-debian.git/blame - lib/libzfs/libzfs_pool.c
Fix duplicate words in zpool.8
[mirror_zfs-debian.git] / lib / libzfs / libzfs_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3541dc6d 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
1bd201e7 25 * Copyright (c) 2012 by Delphix. All rights reserved.
34dc7c2f
BB
26 */
27
34dc7c2f
BB
28#include <ctype.h>
29#include <errno.h>
30#include <devid.h>
34dc7c2f
BB
31#include <fcntl.h>
32#include <libintl.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <strings.h>
36#include <unistd.h>
d603ed6c
BB
37#include <zone.h>
38#include <sys/stat.h>
34dc7c2f
BB
39#include <sys/efi_partition.h>
40#include <sys/vtoc.h>
41#include <sys/zfs_ioctl.h>
9babb374 42#include <dlfcn.h>
34dc7c2f
BB
43
44#include "zfs_namecheck.h"
45#include "zfs_prop.h"
46#include "libzfs_impl.h"
428870ff 47#include "zfs_comutil.h"
34dc7c2f 48
b128c09f
BB
49static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
572e2857
BB
51typedef struct prop_flags {
52 int create:1; /* Validate property on creation */
53 int import:1; /* Validate property on import */
54} prop_flags_t;
55
34dc7c2f
BB
56/*
57 * ====================================================================
58 * zpool property functions
59 * ====================================================================
60 */
61
62static int
63zpool_get_all_props(zpool_handle_t *zhp)
64{
2598c001 65 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
66 libzfs_handle_t *hdl = zhp->zpool_hdl;
67
68 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
69
70 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
71 return (-1);
72
73 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74 if (errno == ENOMEM) {
75 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76 zcmd_free_nvlists(&zc);
77 return (-1);
78 }
79 } else {
80 zcmd_free_nvlists(&zc);
81 return (-1);
82 }
83 }
84
85 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86 zcmd_free_nvlists(&zc);
87 return (-1);
88 }
89
90 zcmd_free_nvlists(&zc);
91
92 return (0);
93}
94
95static int
96zpool_props_refresh(zpool_handle_t *zhp)
97{
98 nvlist_t *old_props;
99
100 old_props = zhp->zpool_props;
101
102 if (zpool_get_all_props(zhp) != 0)
103 return (-1);
104
105 nvlist_free(old_props);
106 return (0);
107}
108
109static char *
110zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
111 zprop_source_t *src)
112{
113 nvlist_t *nv, *nvl;
114 uint64_t ival;
115 char *value;
116 zprop_source_t source;
117
118 nvl = zhp->zpool_props;
119 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
121 source = ival;
122 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
123 } else {
124 source = ZPROP_SRC_DEFAULT;
125 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
126 value = "-";
127 }
128
129 if (src)
130 *src = source;
131
132 return (value);
133}
134
135uint64_t
136zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
137{
138 nvlist_t *nv, *nvl;
139 uint64_t value;
140 zprop_source_t source;
141
b128c09f
BB
142 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
143 /*
144 * zpool_get_all_props() has most likely failed because
145 * the pool is faulted, but if all we need is the top level
146 * vdev's guid then get it from the zhp config nvlist.
147 */
148 if ((prop == ZPOOL_PROP_GUID) &&
149 (nvlist_lookup_nvlist(zhp->zpool_config,
150 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
152 == 0)) {
153 return (value);
154 }
34dc7c2f 155 return (zpool_prop_default_numeric(prop));
b128c09f 156 }
34dc7c2f
BB
157
158 nvl = zhp->zpool_props;
159 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
161 source = value;
162 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
163 } else {
164 source = ZPROP_SRC_DEFAULT;
165 value = zpool_prop_default_numeric(prop);
166 }
167
168 if (src)
169 *src = source;
170
171 return (value);
172}
173
174/*
175 * Map VDEV STATE to printed strings.
176 */
177char *
178zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
179{
180 switch (state) {
e75c13c3
BB
181 default:
182 break;
34dc7c2f
BB
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
b128c09f 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
34dc7c2f 190 return (gettext("FAULTED"));
428870ff
BB
191 else if (aux == VDEV_AUX_SPLIT_POOL)
192 return (gettext("SPLIT"));
34dc7c2f
BB
193 else
194 return (gettext("UNAVAIL"));
195 case VDEV_STATE_FAULTED:
196 return (gettext("FAULTED"));
197 case VDEV_STATE_DEGRADED:
198 return (gettext("DEGRADED"));
199 case VDEV_STATE_HEALTHY:
200 return (gettext("ONLINE"));
201 }
202
203 return (gettext("UNKNOWN"));
204}
205
206/*
207 * Get a zpool property value for 'prop' and return the value in
208 * a pre-allocated buffer.
209 */
210int
211zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212 zprop_source_t *srctype)
213{
214 uint64_t intval;
215 const char *strval;
216 zprop_source_t src = ZPROP_SRC_NONE;
217 nvlist_t *nvroot;
218 vdev_stat_t *vs;
219 uint_t vsc;
220
221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
d164b209
BB
222 switch (prop) {
223 case ZPOOL_PROP_NAME:
34dc7c2f 224 (void) strlcpy(buf, zpool_get_name(zhp), len);
d164b209
BB
225 break;
226
227 case ZPOOL_PROP_HEALTH:
34dc7c2f 228 (void) strlcpy(buf, "FAULTED", len);
d164b209
BB
229 break;
230
231 case ZPOOL_PROP_GUID:
232 intval = zpool_get_prop_int(zhp, prop, &src);
b8864a23 233 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
d164b209
BB
234 break;
235
236 case ZPOOL_PROP_ALTROOT:
237 case ZPOOL_PROP_CACHEFILE:
d96eb2b1 238 case ZPOOL_PROP_COMMENT:
d164b209
BB
239 if (zhp->zpool_props != NULL ||
240 zpool_get_all_props(zhp) == 0) {
241 (void) strlcpy(buf,
242 zpool_get_prop_string(zhp, prop, &src),
243 len);
244 if (srctype != NULL)
245 *srctype = src;
246 return (0);
247 }
248 /* FALLTHROUGH */
249 default:
34dc7c2f 250 (void) strlcpy(buf, "-", len);
d164b209
BB
251 break;
252 }
253
254 if (srctype != NULL)
255 *srctype = src;
34dc7c2f
BB
256 return (0);
257 }
258
259 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
260 prop != ZPOOL_PROP_NAME)
261 return (-1);
262
263 switch (zpool_prop_get_type(prop)) {
264 case PROP_TYPE_STRING:
265 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
266 len);
267 break;
268
269 case PROP_TYPE_NUMBER:
270 intval = zpool_get_prop_int(zhp, prop, &src);
271
272 switch (prop) {
273 case ZPOOL_PROP_SIZE:
428870ff
BB
274 case ZPOOL_PROP_ALLOCATED:
275 case ZPOOL_PROP_FREE:
1bd201e7 276 case ZPOOL_PROP_EXPANDSZ:
df30f566 277 case ZPOOL_PROP_ASHIFT:
34dc7c2f
BB
278 (void) zfs_nicenum(intval, buf, len);
279 break;
280
281 case ZPOOL_PROP_CAPACITY:
282 (void) snprintf(buf, len, "%llu%%",
283 (u_longlong_t)intval);
284 break;
285
428870ff
BB
286 case ZPOOL_PROP_DEDUPRATIO:
287 (void) snprintf(buf, len, "%llu.%02llux",
288 (u_longlong_t)(intval / 100),
289 (u_longlong_t)(intval % 100));
290 break;
291
34dc7c2f
BB
292 case ZPOOL_PROP_HEALTH:
293 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
294 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
295 verify(nvlist_lookup_uint64_array(nvroot,
428870ff
BB
296 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
297 == 0);
34dc7c2f
BB
298
299 (void) strlcpy(buf, zpool_state_to_name(intval,
300 vs->vs_aux), len);
301 break;
302 default:
b8864a23 303 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
34dc7c2f
BB
304 }
305 break;
306
307 case PROP_TYPE_INDEX:
308 intval = zpool_get_prop_int(zhp, prop, &src);
309 if (zpool_prop_index_to_string(prop, intval, &strval)
310 != 0)
311 return (-1);
312 (void) strlcpy(buf, strval, len);
313 break;
314
315 default:
316 abort();
317 }
318
319 if (srctype)
320 *srctype = src;
321
322 return (0);
323}
324
325/*
326 * Check if the bootfs name has the same pool name as it is set to.
327 * Assuming bootfs is a valid dataset name.
328 */
329static boolean_t
330bootfs_name_valid(const char *pool, char *bootfs)
331{
332 int len = strlen(pool);
333
b128c09f 334 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
34dc7c2f
BB
335 return (B_FALSE);
336
337 if (strncmp(pool, bootfs, len) == 0 &&
338 (bootfs[len] == '/' || bootfs[len] == '\0'))
339 return (B_TRUE);
340
341 return (B_FALSE);
342}
343
c372b36e 344#if defined(__sun__) || defined(__sun)
b128c09f
BB
345/*
346 * Inspect the configuration to determine if any of the devices contain
347 * an EFI label.
348 */
349static boolean_t
350pool_uses_efi(nvlist_t *config)
351{
352 nvlist_t **child;
353 uint_t c, children;
354
355 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
356 &child, &children) != 0)
357 return (read_efi_label(config, NULL) >= 0);
358
359 for (c = 0; c < children; c++) {
360 if (pool_uses_efi(child[c]))
361 return (B_TRUE);
362 }
363 return (B_FALSE);
364}
c372b36e 365#endif
b128c09f 366
1bd201e7
CS
367boolean_t
368zpool_is_bootable(zpool_handle_t *zhp)
b128c09f
BB
369{
370 char bootfs[ZPOOL_MAXNAMELEN];
371
372 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
373 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
374 sizeof (bootfs)) != 0);
375}
376
377
34dc7c2f
BB
378/*
379 * Given an nvlist of zpool properties to be set, validate that they are
380 * correct, and parse any numeric properties (index, boolean, etc) if they are
381 * specified as strings.
382 */
383static nvlist_t *
b128c09f 384zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
572e2857 385 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
34dc7c2f
BB
386{
387 nvpair_t *elem;
388 nvlist_t *retprops;
389 zpool_prop_t prop;
390 char *strval;
391 uint64_t intval;
d96eb2b1 392 char *slash, *check;
34dc7c2f 393 struct stat64 statbuf;
b128c09f
BB
394 zpool_handle_t *zhp;
395 nvlist_t *nvroot;
34dc7c2f
BB
396
397 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
398 (void) no_memory(hdl);
399 return (NULL);
400 }
401
402 elem = NULL;
403 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
404 const char *propname = nvpair_name(elem);
405
406 /*
407 * Make sure this property is valid and applies to this type.
408 */
409 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
411 "invalid property '%s'"), propname);
412 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
413 goto error;
414 }
415
416 if (zpool_prop_readonly(prop)) {
417 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
418 "is readonly"), propname);
419 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
420 goto error;
421 }
422
423 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
424 &strval, &intval, errbuf) != 0)
425 goto error;
426
427 /*
428 * Perform additional checking for specific properties.
429 */
430 switch (prop) {
e75c13c3
BB
431 default:
432 break;
34dc7c2f
BB
433 case ZPOOL_PROP_VERSION:
434 if (intval < version || intval > SPA_VERSION) {
435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
436 "property '%s' number %d is invalid."),
437 propname, intval);
438 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
439 goto error;
440 }
441 break;
442
df30f566
CK
443 case ZPOOL_PROP_ASHIFT:
444 if (!flags.create) {
445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 "property '%s' can only be set at "
447 "creation time"), propname);
448 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
449 goto error;
450 }
451
b41c9906 452 if (intval != 0 && (intval < 9 || intval > 13)) {
df30f566
CK
453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
454 "property '%s' number %d is invalid."),
455 propname, intval);
456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 goto error;
458 }
459 break;
460
34dc7c2f 461 case ZPOOL_PROP_BOOTFS:
572e2857 462 if (flags.create || flags.import) {
34dc7c2f
BB
463 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
464 "property '%s' cannot be set at creation "
465 "or import time"), propname);
466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
467 goto error;
468 }
469
470 if (version < SPA_VERSION_BOOTFS) {
471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 "pool must be upgraded to support "
473 "'%s' property"), propname);
474 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
475 goto error;
476 }
477
478 /*
479 * bootfs property value has to be a dataset name and
480 * the dataset has to be in the same pool as it sets to.
481 */
482 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
483 strval)) {
484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
485 "is an invalid name"), strval);
486 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
487 goto error;
488 }
b128c09f
BB
489
490 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
492 "could not open pool '%s'"), poolname);
493 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
494 goto error;
495 }
496 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
497 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
498
f783130a 499#if defined(__sun__) || defined(__sun)
b128c09f
BB
500 /*
501 * bootfs property cannot be set on a disk which has
502 * been EFI labeled.
503 */
504 if (pool_uses_efi(nvroot)) {
505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
506 "property '%s' not supported on "
507 "EFI labeled devices"), propname);
508 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
509 zpool_close(zhp);
510 goto error;
511 }
f783130a 512#endif
b128c09f 513 zpool_close(zhp);
34dc7c2f
BB
514 break;
515
516 case ZPOOL_PROP_ALTROOT:
572e2857 517 if (!flags.create && !flags.import) {
34dc7c2f
BB
518 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
519 "property '%s' can only be set during pool "
520 "creation or import"), propname);
521 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
522 goto error;
523 }
524
525 if (strval[0] != '/') {
526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
527 "bad alternate root '%s'"), strval);
528 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
529 goto error;
530 }
531 break;
532
533 case ZPOOL_PROP_CACHEFILE:
534 if (strval[0] == '\0')
535 break;
536
537 if (strcmp(strval, "none") == 0)
538 break;
539
540 if (strval[0] != '/') {
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "property '%s' must be empty, an "
543 "absolute path, or 'none'"), propname);
544 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
545 goto error;
546 }
547
548 slash = strrchr(strval, '/');
549
550 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
551 strcmp(slash, "/..") == 0) {
552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
553 "'%s' is not a valid file"), strval);
554 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
555 goto error;
556 }
557
558 *slash = '\0';
559
560 if (strval[0] != '\0' &&
561 (stat64(strval, &statbuf) != 0 ||
562 !S_ISDIR(statbuf.st_mode))) {
563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
564 "'%s' is not a valid directory"),
565 strval);
566 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
567 goto error;
568 }
569
570 *slash = '/';
571 break;
572e2857 572
d96eb2b1
DM
573 case ZPOOL_PROP_COMMENT:
574 for (check = strval; *check != '\0'; check++) {
575 if (!isprint(*check)) {
576 zfs_error_aux(hdl,
577 dgettext(TEXT_DOMAIN,
578 "comment may only have printable "
579 "characters"));
580 (void) zfs_error(hdl, EZFS_BADPROP,
581 errbuf);
582 goto error;
583 }
584 }
585 if (strlen(strval) > ZPROP_MAX_COMMENT) {
586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 "comment must not exceed %d characters"),
588 ZPROP_MAX_COMMENT);
589 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
590 goto error;
591 }
592 break;
572e2857
BB
593 case ZPOOL_PROP_READONLY:
594 if (!flags.import) {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "property '%s' can only be set at "
597 "import time"), propname);
598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 goto error;
600 }
601 break;
34dc7c2f
BB
602 }
603 }
604
605 return (retprops);
606error:
607 nvlist_free(retprops);
608 return (NULL);
609}
610
611/*
612 * Set zpool property : propname=propval.
613 */
614int
615zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
616{
2598c001 617 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
618 int ret = -1;
619 char errbuf[1024];
620 nvlist_t *nvl = NULL;
621 nvlist_t *realprops;
622 uint64_t version;
572e2857 623 prop_flags_t flags = { 0 };
34dc7c2f
BB
624
625 (void) snprintf(errbuf, sizeof (errbuf),
626 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
627 zhp->zpool_name);
628
34dc7c2f
BB
629 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
630 return (no_memory(zhp->zpool_hdl));
631
632 if (nvlist_add_string(nvl, propname, propval) != 0) {
633 nvlist_free(nvl);
634 return (no_memory(zhp->zpool_hdl));
635 }
636
637 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
b128c09f 638 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572e2857 639 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
640 nvlist_free(nvl);
641 return (-1);
642 }
643
644 nvlist_free(nvl);
645 nvl = realprops;
646
647 /*
648 * Execute the corresponding ioctl() to set this property.
649 */
650 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
651
652 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
653 nvlist_free(nvl);
654 return (-1);
655 }
656
657 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
658
659 zcmd_free_nvlists(&zc);
660 nvlist_free(nvl);
661
662 if (ret)
663 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
664 else
665 (void) zpool_props_refresh(zhp);
666
667 return (ret);
668}
669
670int
671zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
672{
673 libzfs_handle_t *hdl = zhp->zpool_hdl;
674 zprop_list_t *entry;
675 char buf[ZFS_MAXPROPLEN];
676
677 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
678 return (-1);
679
680 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
681
682 if (entry->pl_fixed)
683 continue;
684
685 if (entry->pl_prop != ZPROP_INVAL &&
686 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
687 NULL) == 0) {
688 if (strlen(buf) > entry->pl_width)
689 entry->pl_width = strlen(buf);
690 }
691 }
692
693 return (0);
694}
695
696
9babb374
BB
697/*
698 * Don't start the slice at the default block of 34; many storage
d603ed6c
BB
699 * devices will use a stripe width of 128k, other vendors prefer a 1m
700 * alignment. It is best to play it safe and ensure a 1m alignment
613d88ed
NB
701 * given 512B blocks. When the block size is larger by a power of 2
702 * we will still be 1m aligned. Some devices are sensitive to the
703 * partition ending alignment as well.
9babb374 704 */
613d88ed
NB
705#define NEW_START_BLOCK 2048
706#define PARTITION_END_ALIGNMENT 2048
9babb374 707
34dc7c2f
BB
708/*
709 * Validate the given pool name, optionally putting an extended error message in
710 * 'buf'.
711 */
712boolean_t
713zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
714{
715 namecheck_err_t why;
716 char what;
717 int ret;
718
719 ret = pool_namecheck(pool, &why, &what);
720
721 /*
722 * The rules for reserved pool names were extended at a later point.
723 * But we need to support users with existing pools that may now be
724 * invalid. So we only check for this expanded set of names during a
725 * create (or import), and only in userland.
726 */
727 if (ret == 0 && !isopen &&
728 (strncmp(pool, "mirror", 6) == 0 ||
729 strncmp(pool, "raidz", 5) == 0 ||
730 strncmp(pool, "spare", 5) == 0 ||
731 strcmp(pool, "log") == 0)) {
732 if (hdl != NULL)
733 zfs_error_aux(hdl,
734 dgettext(TEXT_DOMAIN, "name is reserved"));
735 return (B_FALSE);
736 }
737
738
739 if (ret != 0) {
740 if (hdl != NULL) {
741 switch (why) {
742 case NAME_ERR_TOOLONG:
743 zfs_error_aux(hdl,
744 dgettext(TEXT_DOMAIN, "name is too long"));
745 break;
746
747 case NAME_ERR_INVALCHAR:
748 zfs_error_aux(hdl,
749 dgettext(TEXT_DOMAIN, "invalid character "
750 "'%c' in pool name"), what);
751 break;
752
753 case NAME_ERR_NOLETTER:
754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 "name must begin with a letter"));
756 break;
757
758 case NAME_ERR_RESERVED:
759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
760 "name is reserved"));
761 break;
762
763 case NAME_ERR_DISKLIKE:
764 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 "pool name is reserved"));
766 break;
767
768 case NAME_ERR_LEADING_SLASH:
769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
770 "leading slash in name"));
771 break;
772
773 case NAME_ERR_EMPTY_COMPONENT:
774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
775 "empty component in name"));
776 break;
777
778 case NAME_ERR_TRAILING_SLASH:
779 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
780 "trailing slash in name"));
781 break;
782
783 case NAME_ERR_MULTIPLE_AT:
784 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
785 "multiple '@' delimiters in name"));
786 break;
e75c13c3
BB
787 case NAME_ERR_NO_AT:
788 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
789 "permission set is missing '@'"));
790 break;
34dc7c2f
BB
791 }
792 }
793 return (B_FALSE);
794 }
795
796 return (B_TRUE);
797}
798
799/*
800 * Open a handle to the given pool, even if the pool is currently in the FAULTED
801 * state.
802 */
803zpool_handle_t *
804zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
805{
806 zpool_handle_t *zhp;
807 boolean_t missing;
808
809 /*
810 * Make sure the pool name is valid.
811 */
812 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
813 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
814 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
815 pool);
816 return (NULL);
817 }
818
819 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
820 return (NULL);
821
822 zhp->zpool_hdl = hdl;
823 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
824
825 if (zpool_refresh_stats(zhp, &missing) != 0) {
826 zpool_close(zhp);
827 return (NULL);
828 }
829
830 if (missing) {
831 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
832 (void) zfs_error_fmt(hdl, EZFS_NOENT,
833 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
834 zpool_close(zhp);
835 return (NULL);
836 }
837
838 return (zhp);
839}
840
841/*
842 * Like the above, but silent on error. Used when iterating over pools (because
843 * the configuration cache may be out of date).
844 */
845int
846zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
847{
848 zpool_handle_t *zhp;
849 boolean_t missing;
850
851 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
852 return (-1);
853
854 zhp->zpool_hdl = hdl;
855 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
856
857 if (zpool_refresh_stats(zhp, &missing) != 0) {
858 zpool_close(zhp);
859 return (-1);
860 }
861
862 if (missing) {
863 zpool_close(zhp);
864 *ret = NULL;
865 return (0);
866 }
867
868 *ret = zhp;
869 return (0);
870}
871
872/*
873 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
874 * state.
875 */
876zpool_handle_t *
877zpool_open(libzfs_handle_t *hdl, const char *pool)
878{
879 zpool_handle_t *zhp;
880
881 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
882 return (NULL);
883
884 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
885 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
886 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
887 zpool_close(zhp);
888 return (NULL);
889 }
890
891 return (zhp);
892}
893
894/*
895 * Close the handle. Simply frees the memory associated with the handle.
896 */
897void
898zpool_close(zpool_handle_t *zhp)
899{
900 if (zhp->zpool_config)
901 nvlist_free(zhp->zpool_config);
902 if (zhp->zpool_old_config)
903 nvlist_free(zhp->zpool_old_config);
904 if (zhp->zpool_props)
905 nvlist_free(zhp->zpool_props);
906 free(zhp);
907}
908
909/*
910 * Return the name of the pool.
911 */
912const char *
913zpool_get_name(zpool_handle_t *zhp)
914{
915 return (zhp->zpool_name);
916}
917
918
919/*
920 * Return the state of the pool (ACTIVE or UNAVAILABLE)
921 */
922int
923zpool_get_state(zpool_handle_t *zhp)
924{
925 return (zhp->zpool_state);
926}
927
928/*
929 * Create the named pool, using the provided vdev list. It is assumed
930 * that the consumer has already validated the contents of the nvlist, so we
931 * don't have to worry about error semantics.
932 */
933int
934zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
b128c09f 935 nvlist_t *props, nvlist_t *fsprops)
34dc7c2f 936{
2598c001 937 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f
BB
938 nvlist_t *zc_fsprops = NULL;
939 nvlist_t *zc_props = NULL;
34dc7c2f
BB
940 char msg[1024];
941 char *altroot;
b128c09f 942 int ret = -1;
34dc7c2f
BB
943
944 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
945 "cannot create '%s'"), pool);
946
947 if (!zpool_name_valid(hdl, B_FALSE, pool))
948 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
949
950 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
951 return (-1);
952
b128c09f 953 if (props) {
572e2857
BB
954 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
955
b128c09f 956 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
572e2857 957 SPA_VERSION_1, flags, msg)) == NULL) {
b128c09f
BB
958 goto create_failed;
959 }
960 }
34dc7c2f 961
b128c09f
BB
962 if (fsprops) {
963 uint64_t zoned;
964 char *zonestr;
965
966 zoned = ((nvlist_lookup_string(fsprops,
967 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
968 strcmp(zonestr, "on") == 0);
969
970 if ((zc_fsprops = zfs_valid_proplist(hdl,
971 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
972 goto create_failed;
973 }
974 if (!zc_props &&
975 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
976 goto create_failed;
977 }
978 if (nvlist_add_nvlist(zc_props,
979 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
980 goto create_failed;
981 }
34dc7c2f
BB
982 }
983
b128c09f
BB
984 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
985 goto create_failed;
986
34dc7c2f
BB
987 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
988
b128c09f 989 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
34dc7c2f
BB
990
991 zcmd_free_nvlists(&zc);
b128c09f
BB
992 nvlist_free(zc_props);
993 nvlist_free(zc_fsprops);
34dc7c2f
BB
994
995 switch (errno) {
996 case EBUSY:
997 /*
998 * This can happen if the user has specified the same
999 * device multiple times. We can't reliably detect this
1000 * until we try to add it and see we already have a
d603ed6c
BB
1001 * label. This can also happen under if the device is
1002 * part of an active md or lvm device.
34dc7c2f
BB
1003 */
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
d603ed6c
BB
1005 "one or more vdevs refer to the same device, or one of\n"
1006 "the devices is part of an active md or lvm device"));
34dc7c2f
BB
1007 return (zfs_error(hdl, EZFS_BADDEV, msg));
1008
1009 case EOVERFLOW:
1010 /*
1011 * This occurs when one of the devices is below
1012 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1013 * device was the problem device since there's no
1014 * reliable way to determine device size from userland.
1015 */
1016 {
1017 char buf[64];
1018
1019 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1020
1021 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1022 "one or more devices is less than the "
1023 "minimum size (%s)"), buf);
1024 }
1025 return (zfs_error(hdl, EZFS_BADDEV, msg));
1026
1027 case ENOSPC:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "one or more devices is out of space"));
1030 return (zfs_error(hdl, EZFS_BADDEV, msg));
1031
1032 case ENOTBLK:
1033 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034 "cache device must be a disk or disk slice"));
1035 return (zfs_error(hdl, EZFS_BADDEV, msg));
1036
1037 default:
1038 return (zpool_standard_error(hdl, errno, msg));
1039 }
1040 }
1041
1042 /*
1043 * If this is an alternate root pool, then we automatically set the
1044 * mountpoint of the root dataset to be '/'.
1045 */
1046 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1047 &altroot) == 0) {
1048 zfs_handle_t *zhp;
1049
1050 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1051 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1052 "/") == 0);
1053
1054 zfs_close(zhp);
1055 }
1056
b128c09f 1057create_failed:
34dc7c2f 1058 zcmd_free_nvlists(&zc);
b128c09f
BB
1059 nvlist_free(zc_props);
1060 nvlist_free(zc_fsprops);
1061 return (ret);
34dc7c2f
BB
1062}
1063
1064/*
1065 * Destroy the given pool. It is up to the caller to ensure that there are no
1066 * datasets left in the pool.
1067 */
1068int
1069zpool_destroy(zpool_handle_t *zhp)
1070{
2598c001 1071 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1072 zfs_handle_t *zfp = NULL;
1073 libzfs_handle_t *hdl = zhp->zpool_hdl;
1074 char msg[1024];
1075
1076 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
572e2857 1077 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
34dc7c2f
BB
1078 return (-1);
1079
34dc7c2f
BB
1080 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1081
572e2857 1082 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
34dc7c2f
BB
1083 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1084 "cannot destroy '%s'"), zhp->zpool_name);
1085
1086 if (errno == EROFS) {
1087 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1088 "one or more devices is read only"));
1089 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1090 } else {
1091 (void) zpool_standard_error(hdl, errno, msg);
1092 }
1093
1094 if (zfp)
1095 zfs_close(zfp);
1096 return (-1);
1097 }
1098
1099 if (zfp) {
1100 remove_mountpoint(zfp);
1101 zfs_close(zfp);
1102 }
1103
1104 return (0);
1105}
1106
1107/*
1108 * Add the given vdevs to the pool. The caller must have already performed the
1109 * necessary verification to ensure that the vdev specification is well-formed.
1110 */
1111int
1112zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1113{
2598c001 1114 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1115 int ret;
1116 libzfs_handle_t *hdl = zhp->zpool_hdl;
1117 char msg[1024];
1118 nvlist_t **spares, **l2cache;
1119 uint_t nspares, nl2cache;
1120
1121 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1122 "cannot add to '%s'"), zhp->zpool_name);
1123
1124 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1125 SPA_VERSION_SPARES &&
1126 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1127 &spares, &nspares) == 0) {
1128 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1129 "upgraded to add hot spares"));
1130 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1131 }
1132
c372b36e 1133#if defined(__sun__) || defined(__sun)
1bd201e7 1134 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
b128c09f
BB
1135 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1136 uint64_t s;
1137
1138 for (s = 0; s < nspares; s++) {
1139 char *path;
1140
1141 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1142 &path) == 0 && pool_uses_efi(spares[s])) {
1143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1144 "device '%s' contains an EFI label and "
1145 "cannot be used on root pools."),
428870ff
BB
1146 zpool_vdev_name(hdl, NULL, spares[s],
1147 B_FALSE));
b128c09f
BB
1148 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1149 }
1150 }
1151 }
c372b36e 1152#endif
b128c09f 1153
34dc7c2f
BB
1154 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1155 SPA_VERSION_L2CACHE &&
1156 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1157 &l2cache, &nl2cache) == 0) {
1158 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1159 "upgraded to add cache devices"));
1160 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1161 }
1162
1163 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1164 return (-1);
1165 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1166
572e2857 1167 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
34dc7c2f
BB
1168 switch (errno) {
1169 case EBUSY:
1170 /*
1171 * This can happen if the user has specified the same
1172 * device multiple times. We can't reliably detect this
1173 * until we try to add it and see we already have a
1174 * label.
1175 */
1176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1177 "one or more vdevs refer to the same device"));
1178 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1179 break;
1180
1181 case EOVERFLOW:
1182 /*
1183 * This occurrs when one of the devices is below
1184 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1185 * device was the problem device since there's no
1186 * reliable way to determine device size from userland.
1187 */
1188 {
1189 char buf[64];
1190
1191 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1192
1193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1194 "device is less than the minimum "
1195 "size (%s)"), buf);
1196 }
1197 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1198 break;
1199
1200 case ENOTSUP:
1201 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1202 "pool must be upgraded to add these vdevs"));
1203 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1204 break;
1205
1206 case EDOM:
1207 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1208 "root pool can not have multiple vdevs"
1209 " or separate logs"));
1210 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1211 break;
1212
1213 case ENOTBLK:
1214 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1215 "cache device must be a disk or disk slice"));
1216 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1217 break;
1218
1219 default:
1220 (void) zpool_standard_error(hdl, errno, msg);
1221 }
1222
1223 ret = -1;
1224 } else {
1225 ret = 0;
1226 }
1227
1228 zcmd_free_nvlists(&zc);
1229
1230 return (ret);
1231}
1232
1233/*
1234 * Exports the pool from the system. The caller must ensure that there are no
1235 * mounted datasets in the pool.
1236 */
1237int
fb5f0bc8 1238zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
34dc7c2f 1239{
2598c001 1240 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
b128c09f 1241 char msg[1024];
34dc7c2f 1242
b128c09f
BB
1243 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1244 "cannot export '%s'"), zhp->zpool_name);
1245
34dc7c2f 1246 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 1247 zc.zc_cookie = force;
fb5f0bc8 1248 zc.zc_guid = hardforce;
b128c09f
BB
1249
1250 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1251 switch (errno) {
1252 case EXDEV:
1253 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1254 "use '-f' to override the following errors:\n"
1255 "'%s' has an active shared spare which could be"
1256 " used by other pools once '%s' is exported."),
1257 zhp->zpool_name, zhp->zpool_name);
1258 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1259 msg));
1260 default:
1261 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1262 msg));
1263 }
1264 }
34dc7c2f 1265
34dc7c2f
BB
1266 return (0);
1267}
1268
fb5f0bc8
BB
1269int
1270zpool_export(zpool_handle_t *zhp, boolean_t force)
1271{
1272 return (zpool_export_common(zhp, force, B_FALSE));
1273}
1274
1275int
1276zpool_export_force(zpool_handle_t *zhp)
1277{
1278 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1279}
1280
428870ff
BB
1281static void
1282zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
572e2857 1283 nvlist_t *config)
428870ff 1284{
572e2857 1285 nvlist_t *nv = NULL;
428870ff
BB
1286 uint64_t rewindto;
1287 int64_t loss = -1;
1288 struct tm t;
1289 char timestr[128];
1290
572e2857
BB
1291 if (!hdl->libzfs_printerr || config == NULL)
1292 return;
1293
1294 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
428870ff
BB
1295 return;
1296
572e2857 1297 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff 1298 return;
572e2857 1299 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
428870ff
BB
1300
1301 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1302 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1303 if (dryrun) {
1304 (void) printf(dgettext(TEXT_DOMAIN,
1305 "Would be able to return %s "
1306 "to its state as of %s.\n"),
1307 name, timestr);
1308 } else {
1309 (void) printf(dgettext(TEXT_DOMAIN,
1310 "Pool %s returned to its state as of %s.\n"),
1311 name, timestr);
1312 }
1313 if (loss > 120) {
1314 (void) printf(dgettext(TEXT_DOMAIN,
1315 "%s approximately %lld "),
1316 dryrun ? "Would discard" : "Discarded",
b8864a23 1317 ((longlong_t)loss + 30) / 60);
428870ff
BB
1318 (void) printf(dgettext(TEXT_DOMAIN,
1319 "minutes of transactions.\n"));
1320 } else if (loss > 0) {
1321 (void) printf(dgettext(TEXT_DOMAIN,
1322 "%s approximately %lld "),
b8864a23
BB
1323 dryrun ? "Would discard" : "Discarded",
1324 (longlong_t)loss);
428870ff
BB
1325 (void) printf(dgettext(TEXT_DOMAIN,
1326 "seconds of transactions.\n"));
1327 }
1328 }
1329}
1330
1331void
1332zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1333 nvlist_t *config)
1334{
572e2857 1335 nvlist_t *nv = NULL;
428870ff
BB
1336 int64_t loss = -1;
1337 uint64_t edata = UINT64_MAX;
1338 uint64_t rewindto;
1339 struct tm t;
1340 char timestr[128];
1341
1342 if (!hdl->libzfs_printerr)
1343 return;
1344
1345 if (reason >= 0)
1346 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1347 else
1348 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1349
1350 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
572e2857
BB
1351 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1352 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
428870ff
BB
1353 goto no_info;
1354
572e2857
BB
1355 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1356 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
428870ff
BB
1357 &edata);
1358
1359 (void) printf(dgettext(TEXT_DOMAIN,
1360 "Recovery is possible, but will result in some data loss.\n"));
1361
1362 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
b8864a23 1363 strftime(timestr, 128, "%c", &t) != 0) {
428870ff
BB
1364 (void) printf(dgettext(TEXT_DOMAIN,
1365 "\tReturning the pool to its state as of %s\n"
1366 "\tshould correct the problem. "),
1367 timestr);
1368 } else {
1369 (void) printf(dgettext(TEXT_DOMAIN,
1370 "\tReverting the pool to an earlier state "
1371 "should correct the problem.\n\t"));
1372 }
1373
1374 if (loss > 120) {
1375 (void) printf(dgettext(TEXT_DOMAIN,
1376 "Approximately %lld minutes of data\n"
b8864a23
BB
1377 "\tmust be discarded, irreversibly. "),
1378 ((longlong_t)loss + 30) / 60);
428870ff
BB
1379 } else if (loss > 0) {
1380 (void) printf(dgettext(TEXT_DOMAIN,
1381 "Approximately %lld seconds of data\n"
b8864a23
BB
1382 "\tmust be discarded, irreversibly. "),
1383 (longlong_t)loss);
428870ff
BB
1384 }
1385 if (edata != 0 && edata != UINT64_MAX) {
1386 if (edata == 1) {
1387 (void) printf(dgettext(TEXT_DOMAIN,
1388 "After rewind, at least\n"
1389 "\tone persistent user-data error will remain. "));
1390 } else {
1391 (void) printf(dgettext(TEXT_DOMAIN,
1392 "After rewind, several\n"
1393 "\tpersistent user-data errors will remain. "));
1394 }
1395 }
1396 (void) printf(dgettext(TEXT_DOMAIN,
1397 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1398 reason >= 0 ? "clear" : "import", name);
1399
1400 (void) printf(dgettext(TEXT_DOMAIN,
1401 "A scrub of the pool\n"
1402 "\tis strongly recommended after recovery.\n"));
1403 return;
1404
1405no_info:
1406 (void) printf(dgettext(TEXT_DOMAIN,
1407 "Destroy and re-create the pool from\n\ta backup source.\n"));
1408}
1409
34dc7c2f
BB
1410/*
1411 * zpool_import() is a contracted interface. Should be kept the same
1412 * if possible.
1413 *
1414 * Applications should use zpool_import_props() to import a pool with
1415 * new properties value to be set.
1416 */
1417int
1418zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1419 char *altroot)
1420{
1421 nvlist_t *props = NULL;
1422 int ret;
1423
1424 if (altroot != NULL) {
1425 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1426 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1427 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1428 newname));
1429 }
1430
1431 if (nvlist_add_string(props,
fb5f0bc8
BB
1432 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1433 nvlist_add_string(props,
1434 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
34dc7c2f
BB
1435 nvlist_free(props);
1436 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1437 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1438 newname));
1439 }
1440 }
1441
572e2857
BB
1442 ret = zpool_import_props(hdl, config, newname, props,
1443 ZFS_IMPORT_NORMAL);
34dc7c2f
BB
1444 if (props)
1445 nvlist_free(props);
1446 return (ret);
1447}
1448
572e2857
BB
1449static void
1450print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1451 int indent)
1452{
1453 nvlist_t **child;
1454 uint_t c, children;
1455 char *vname;
1456 uint64_t is_log = 0;
1457
1458 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1459 &is_log);
1460
1461 if (name != NULL)
1462 (void) printf("\t%*s%s%s\n", indent, "", name,
1463 is_log ? " [log]" : "");
1464
1465 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1466 &child, &children) != 0)
1467 return;
1468
1469 for (c = 0; c < children; c++) {
1470 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1471 print_vdev_tree(hdl, vname, child[c], indent + 2);
1472 free(vname);
1473 }
1474}
1475
34dc7c2f
BB
1476/*
1477 * Import the given pool using the known configuration and a list of
1478 * properties to be set. The configuration should have come from
1479 * zpool_find_import(). The 'newname' parameters control whether the pool
1480 * is imported with a different name.
1481 */
1482int
1483zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
572e2857 1484 nvlist_t *props, int flags)
34dc7c2f 1485{
2598c001 1486 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff 1487 zpool_rewind_policy_t policy;
572e2857
BB
1488 nvlist_t *nv = NULL;
1489 nvlist_t *nvinfo = NULL;
1490 nvlist_t *missing = NULL;
34dc7c2f
BB
1491 char *thename;
1492 char *origname;
1493 int ret;
572e2857 1494 int error = 0;
34dc7c2f
BB
1495 char errbuf[1024];
1496
1497 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1498 &origname) == 0);
1499
1500 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1501 "cannot import pool '%s'"), origname);
1502
1503 if (newname != NULL) {
1504 if (!zpool_name_valid(hdl, B_FALSE, newname))
1505 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1506 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1507 newname));
1508 thename = (char *)newname;
1509 } else {
1510 thename = origname;
1511 }
1512
1513 if (props) {
1514 uint64_t version;
572e2857 1515 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
34dc7c2f
BB
1516
1517 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1518 &version) == 0);
1519
b128c09f 1520 if ((props = zpool_valid_proplist(hdl, origname,
572e2857 1521 props, version, flags, errbuf)) == NULL) {
34dc7c2f
BB
1522 return (-1);
1523 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1524 nvlist_free(props);
1525 return (-1);
1526 }
1527 }
1528
1529 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1530
1531 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1532 &zc.zc_guid) == 0);
1533
1534 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1535 nvlist_free(props);
1536 return (-1);
1537 }
572e2857 1538 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
428870ff
BB
1539 nvlist_free(props);
1540 return (-1);
1541 }
34dc7c2f 1542
572e2857
BB
1543 zc.zc_cookie = flags;
1544 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1545 errno == ENOMEM) {
1546 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1547 zcmd_free_nvlists(&zc);
1548 return (-1);
1549 }
1550 }
1551 if (ret != 0)
1552 error = errno;
1553
1554 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1555 zpool_get_rewind_policy(config, &policy);
1556
1557 if (error) {
34dc7c2f 1558 char desc[1024];
428870ff 1559
428870ff
BB
1560 /*
1561 * Dry-run failed, but we print out what success
1562 * looks like if we found a best txg
1563 */
572e2857 1564 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
428870ff 1565 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857
BB
1566 B_TRUE, nv);
1567 nvlist_free(nv);
428870ff
BB
1568 return (-1);
1569 }
1570
34dc7c2f
BB
1571 if (newname == NULL)
1572 (void) snprintf(desc, sizeof (desc),
1573 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1574 thename);
1575 else
1576 (void) snprintf(desc, sizeof (desc),
1577 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1578 origname, thename);
1579
572e2857 1580 switch (error) {
34dc7c2f
BB
1581 case ENOTSUP:
1582 /*
1583 * Unsupported version.
1584 */
1585 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1586 break;
1587
1588 case EINVAL:
1589 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1590 break;
1591
428870ff
BB
1592 case EROFS:
1593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1594 "one or more devices is read only"));
1595 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1596 break;
1597
572e2857
BB
1598 case ENXIO:
1599 if (nv && nvlist_lookup_nvlist(nv,
1600 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1601 nvlist_lookup_nvlist(nvinfo,
1602 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1603 (void) printf(dgettext(TEXT_DOMAIN,
1604 "The devices below are missing, use "
1605 "'-m' to import the pool anyway:\n"));
1606 print_vdev_tree(hdl, NULL, missing, 2);
1607 (void) printf("\n");
1608 }
1609 (void) zpool_standard_error(hdl, error, desc);
1610 break;
1611
1612 case EEXIST:
1613 (void) zpool_standard_error(hdl, error, desc);
1614 break;
1615
abe5b8fb
BB
1616 case EBUSY:
1617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1618 "one or more devices are already in use\n"));
1619 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1620 break;
1621
34dc7c2f 1622 default:
572e2857 1623 (void) zpool_standard_error(hdl, error, desc);
428870ff 1624 zpool_explain_recover(hdl,
572e2857 1625 newname ? origname : thename, -error, nv);
428870ff 1626 break;
34dc7c2f
BB
1627 }
1628
572e2857 1629 nvlist_free(nv);
34dc7c2f
BB
1630 ret = -1;
1631 } else {
1632 zpool_handle_t *zhp;
1633
1634 /*
1635 * This should never fail, but play it safe anyway.
1636 */
428870ff 1637 if (zpool_open_silent(hdl, thename, &zhp) != 0)
34dc7c2f 1638 ret = -1;
428870ff 1639 else if (zhp != NULL)
34dc7c2f 1640 zpool_close(zhp);
428870ff
BB
1641 if (policy.zrp_request &
1642 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1643 zpool_rewind_exclaim(hdl, newname ? origname : thename,
572e2857 1644 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
34dc7c2f 1645 }
572e2857 1646 nvlist_free(nv);
428870ff 1647 return (0);
34dc7c2f
BB
1648 }
1649
1650 zcmd_free_nvlists(&zc);
1651 nvlist_free(props);
1652
1653 return (ret);
1654}
1655
1656/*
428870ff 1657 * Scan the pool.
34dc7c2f
BB
1658 */
1659int
428870ff 1660zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
34dc7c2f 1661{
2598c001 1662 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
1663 char msg[1024];
1664 libzfs_handle_t *hdl = zhp->zpool_hdl;
1665
1666 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
428870ff 1667 zc.zc_cookie = func;
34dc7c2f 1668
572e2857 1669 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
428870ff 1670 (errno == ENOENT && func != POOL_SCAN_NONE))
34dc7c2f
BB
1671 return (0);
1672
428870ff
BB
1673 if (func == POOL_SCAN_SCRUB) {
1674 (void) snprintf(msg, sizeof (msg),
1675 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1676 } else if (func == POOL_SCAN_NONE) {
1677 (void) snprintf(msg, sizeof (msg),
1678 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1679 zc.zc_name);
1680 } else {
1681 assert(!"unexpected result");
1682 }
34dc7c2f 1683
428870ff
BB
1684 if (errno == EBUSY) {
1685 nvlist_t *nvroot;
1686 pool_scan_stat_t *ps = NULL;
1687 uint_t psc;
1688
1689 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1690 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1691 (void) nvlist_lookup_uint64_array(nvroot,
1692 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1693 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1694 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1695 else
1696 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1697 } else if (errno == ENOENT) {
1698 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1699 } else {
34dc7c2f 1700 return (zpool_standard_error(hdl, errno, msg));
428870ff
BB
1701 }
1702}
1703
34dc7c2f 1704/*
9babb374
BB
1705 * Find a vdev that matches the search criteria specified. We use the
1706 * the nvpair name to determine how we should look for the device.
34dc7c2f
BB
1707 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1708 * spare; but FALSE if its an INUSE spare.
1709 */
1710static nvlist_t *
9babb374
BB
1711vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1712 boolean_t *l2cache, boolean_t *log)
34dc7c2f
BB
1713{
1714 uint_t c, children;
1715 nvlist_t **child;
34dc7c2f 1716 nvlist_t *ret;
b128c09f 1717 uint64_t is_log;
9babb374
BB
1718 char *srchkey;
1719 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1720
1721 /* Nothing to look for */
1722 if (search == NULL || pair == NULL)
1723 return (NULL);
1724
1725 /* Obtain the key we will use to search */
1726 srchkey = nvpair_name(pair);
1727
1728 switch (nvpair_type(pair)) {
572e2857 1729 case DATA_TYPE_UINT64:
9babb374 1730 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
572e2857
BB
1731 uint64_t srchval, theguid;
1732
1733 verify(nvpair_value_uint64(pair, &srchval) == 0);
1734 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1735 &theguid) == 0);
1736 if (theguid == srchval)
1737 return (nv);
9babb374
BB
1738 }
1739 break;
9babb374
BB
1740
1741 case DATA_TYPE_STRING: {
1742 char *srchval, *val;
1743
1744 verify(nvpair_value_string(pair, &srchval) == 0);
1745 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1746 break;
34dc7c2f 1747
9babb374 1748 /*
428870ff
BB
1749 * Search for the requested value. Special cases:
1750 *
eac47204
BB
1751 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1752 * "-part1", or "p1". The suffix is hidden from the user,
1753 * but included in the string, so this matches around it.
1754 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1755 * is used to check all possible expanded paths.
428870ff
BB
1756 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1757 *
1758 * Otherwise, all other searches are simple string compares.
9babb374 1759 */
a2c6816c 1760 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
9babb374
BB
1761 uint64_t wholedisk = 0;
1762
1763 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1764 &wholedisk);
eac47204
BB
1765 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
1766 return (nv);
428870ff 1767
428870ff
BB
1768 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1769 char *type, *idx, *end, *p;
1770 uint64_t id, vdev_id;
1771
1772 /*
1773 * Determine our vdev type, keeping in mind
1774 * that the srchval is composed of a type and
1775 * vdev id pair (i.e. mirror-4).
1776 */
1777 if ((type = strdup(srchval)) == NULL)
1778 return (NULL);
1779
1780 if ((p = strrchr(type, '-')) == NULL) {
1781 free(type);
1782 break;
1783 }
1784 idx = p + 1;
1785 *p = '\0';
1786
1787 /*
1788 * If the types don't match then keep looking.
1789 */
1790 if (strncmp(val, type, strlen(val)) != 0) {
1791 free(type);
1792 break;
1793 }
1794
1795 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1796 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1797 strncmp(type, VDEV_TYPE_MIRROR,
1798 strlen(VDEV_TYPE_MIRROR)) == 0);
1799 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1800 &id) == 0);
1801
1802 errno = 0;
1803 vdev_id = strtoull(idx, &end, 10);
1804
1805 free(type);
1806 if (errno != 0)
1807 return (NULL);
1808
1809 /*
1810 * Now verify that we have the correct vdev id.
1811 */
1812 if (vdev_id == id)
1813 return (nv);
9babb374 1814 }
34dc7c2f 1815
34dc7c2f 1816 /*
9babb374 1817 * Common case
34dc7c2f 1818 */
9babb374 1819 if (strcmp(srchval, val) == 0)
34dc7c2f 1820 return (nv);
9babb374
BB
1821 break;
1822 }
1823
1824 default:
1825 break;
34dc7c2f
BB
1826 }
1827
1828 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1829 &child, &children) != 0)
1830 return (NULL);
1831
b128c09f 1832 for (c = 0; c < children; c++) {
9babb374 1833 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f
BB
1834 avail_spare, l2cache, NULL)) != NULL) {
1835 /*
1836 * The 'is_log' value is only set for the toplevel
1837 * vdev, not the leaf vdevs. So we always lookup the
1838 * log device from the root of the vdev tree (where
1839 * 'log' is non-NULL).
1840 */
1841 if (log != NULL &&
1842 nvlist_lookup_uint64(child[c],
1843 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1844 is_log) {
1845 *log = B_TRUE;
1846 }
34dc7c2f 1847 return (ret);
b128c09f
BB
1848 }
1849 }
34dc7c2f
BB
1850
1851 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1852 &child, &children) == 0) {
1853 for (c = 0; c < children; c++) {
9babb374 1854 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1855 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1856 *avail_spare = B_TRUE;
1857 return (ret);
1858 }
1859 }
1860 }
1861
1862 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1863 &child, &children) == 0) {
1864 for (c = 0; c < children; c++) {
9babb374 1865 if ((ret = vdev_to_nvlist_iter(child[c], search,
b128c09f 1866 avail_spare, l2cache, NULL)) != NULL) {
34dc7c2f
BB
1867 *l2cache = B_TRUE;
1868 return (ret);
1869 }
1870 }
1871 }
1872
1873 return (NULL);
1874}
1875
9babb374
BB
1876/*
1877 * Given a physical path (minus the "/devices" prefix), find the
1878 * associated vdev.
1879 */
1880nvlist_t *
1881zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1882 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1883{
1884 nvlist_t *search, *nvroot, *ret;
1885
1886 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1887 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1888
1889 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1890 &nvroot) == 0);
1891
1892 *avail_spare = B_FALSE;
572e2857
BB
1893 *l2cache = B_FALSE;
1894 if (log != NULL)
1895 *log = B_FALSE;
9babb374
BB
1896 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1897 nvlist_free(search);
1898
1899 return (ret);
1900}
1901
428870ff
BB
1902/*
1903 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1904 */
1905boolean_t
1906zpool_vdev_is_interior(const char *name)
1907{
1908 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1909 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1910 return (B_TRUE);
1911 return (B_FALSE);
1912}
1913
34dc7c2f
BB
1914nvlist_t *
1915zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
b128c09f 1916 boolean_t *l2cache, boolean_t *log)
34dc7c2f 1917{
34dc7c2f 1918 char *end;
9babb374 1919 nvlist_t *nvroot, *search, *ret;
34dc7c2f
BB
1920 uint64_t guid;
1921
9babb374
BB
1922 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1923
34dc7c2f
BB
1924 guid = strtoull(path, &end, 10);
1925 if (guid != 0 && *end == '\0') {
9babb374 1926 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
428870ff
BB
1927 } else if (zpool_vdev_is_interior(path)) {
1928 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
34dc7c2f 1929 } else {
9babb374 1930 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
34dc7c2f
BB
1931 }
1932
1933 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1934 &nvroot) == 0);
1935
1936 *avail_spare = B_FALSE;
1937 *l2cache = B_FALSE;
b128c09f
BB
1938 if (log != NULL)
1939 *log = B_FALSE;
9babb374
BB
1940 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1941 nvlist_free(search);
1942
1943 return (ret);
b128c09f
BB
1944}
1945
1946static int
1947vdev_online(nvlist_t *nv)
1948{
1949 uint64_t ival;
1950
1951 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1952 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1953 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1954 return (0);
1955
1956 return (1);
1957}
1958
1959/*
9babb374 1960 * Helper function for zpool_get_physpaths().
b128c09f 1961 */
9babb374
BB
1962static int
1963vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1964 size_t *bytes_written)
1965{
1966 size_t bytes_left, pos, rsz;
1967 char *tmppath;
1968 const char *format;
1969
1970 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1971 &tmppath) != 0)
1972 return (EZFS_NODEVICE);
1973
1974 pos = *bytes_written;
1975 bytes_left = physpath_size - pos;
1976 format = (pos == 0) ? "%s" : " %s";
1977
1978 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1979 *bytes_written += rsz;
1980
1981 if (rsz >= bytes_left) {
1982 /* if physpath was not copied properly, clear it */
1983 if (bytes_left != 0) {
1984 physpath[pos] = 0;
1985 }
1986 return (EZFS_NOSPC);
1987 }
1988 return (0);
1989}
1990
1991static int
1992vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1993 size_t *rsz, boolean_t is_spare)
1994{
1995 char *type;
1996 int ret;
1997
1998 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1999 return (EZFS_INVALCONFIG);
2000
2001 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2002 /*
2003 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2004 * For a spare vdev, we only want to boot from the active
2005 * spare device.
2006 */
2007 if (is_spare) {
2008 uint64_t spare = 0;
2009 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2010 &spare);
2011 if (!spare)
2012 return (EZFS_INVALCONFIG);
2013 }
2014
2015 if (vdev_online(nv)) {
2016 if ((ret = vdev_get_one_physpath(nv, physpath,
2017 phypath_size, rsz)) != 0)
2018 return (ret);
2019 }
2020 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2021 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2022 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2023 nvlist_t **child;
2024 uint_t count;
2025 int i, ret;
2026
2027 if (nvlist_lookup_nvlist_array(nv,
2028 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2029 return (EZFS_INVALCONFIG);
2030
2031 for (i = 0; i < count; i++) {
2032 ret = vdev_get_physpaths(child[i], physpath,
2033 phypath_size, rsz, is_spare);
2034 if (ret == EZFS_NOSPC)
2035 return (ret);
2036 }
2037 }
2038
2039 return (EZFS_POOL_INVALARG);
2040}
2041
2042/*
2043 * Get phys_path for a root pool config.
2044 * Return 0 on success; non-zero on failure.
2045 */
2046static int
2047zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
b128c09f 2048{
9babb374 2049 size_t rsz;
b128c09f
BB
2050 nvlist_t *vdev_root;
2051 nvlist_t **child;
2052 uint_t count;
9babb374 2053 char *type;
b128c09f 2054
9babb374 2055 rsz = 0;
b128c09f 2056
9babb374
BB
2057 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2058 &vdev_root) != 0)
2059 return (EZFS_INVALCONFIG);
b128c09f 2060
9babb374
BB
2061 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2062 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
b128c09f 2063 &child, &count) != 0)
9babb374 2064 return (EZFS_INVALCONFIG);
b128c09f 2065
c372b36e 2066#if defined(__sun__) || defined(__sun)
9babb374
BB
2067 /*
2068 * root pool can not have EFI labeled disks and can only have
2069 * a single top-level vdev.
2070 */
2071 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2072 pool_uses_efi(vdev_root))
2073 return (EZFS_POOL_INVALARG);
c372b36e 2074#endif
b128c09f 2075
9babb374
BB
2076 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2077 B_FALSE);
2078
2079 /* No online devices */
2080 if (rsz == 0)
2081 return (EZFS_NODEVICE);
b128c09f
BB
2082
2083 return (0);
34dc7c2f
BB
2084}
2085
9babb374
BB
2086/*
2087 * Get phys_path for a root pool
2088 * Return 0 on success; non-zero on failure.
2089 */
2090int
2091zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2092{
2093 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2094 phypath_size));
2095}
2096
9babb374
BB
2097/*
2098 * If the device has being dynamically expanded then we need to relabel
2099 * the disk to use the new unallocated space.
2100 */
2101static int
8adf4864 2102zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
9babb374 2103{
9babb374 2104 int fd, error;
9babb374 2105
d603ed6c 2106 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
9babb374 2107 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
109491a8 2108 "relabel '%s': unable to open device: %d"), path, errno);
8adf4864 2109 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
9babb374
BB
2110 }
2111
2112 /*
2113 * It's possible that we might encounter an error if the device
2114 * does not have any unallocated space left. If so, we simply
2115 * ignore that error and continue on.
b5a28807
ED
2116 *
2117 * Also, we don't call efi_rescan() - that would just return EBUSY.
2118 * The module will do it for us in vdev_disk_open().
9babb374 2119 */
d603ed6c 2120 error = efi_use_whole_disk(fd);
9babb374
BB
2121 (void) close(fd);
2122 if (error && error != VT_ENOSPC) {
2123 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
d603ed6c 2124 "relabel '%s': unable to read disk capacity"), path);
8adf4864 2125 return (zfs_error(hdl, EZFS_NOCAP, msg));
9babb374
BB
2126 }
2127 return (0);
2128}
2129
34dc7c2f
BB
2130/*
2131 * Bring the specified vdev online. The 'flags' parameter is a set of the
2132 * ZFS_ONLINE_* flags.
2133 */
2134int
2135zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2136 vdev_state_t *newstate)
2137{
2598c001 2138 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2139 char msg[1024];
2140 nvlist_t *tgt;
9babb374 2141 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2142 libzfs_handle_t *hdl = zhp->zpool_hdl;
8adf4864 2143 int error;
34dc7c2f 2144
9babb374
BB
2145 if (flags & ZFS_ONLINE_EXPAND) {
2146 (void) snprintf(msg, sizeof (msg),
2147 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2148 } else {
2149 (void) snprintf(msg, sizeof (msg),
2150 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2151 }
34dc7c2f
BB
2152
2153 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2154 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
9babb374 2155 &islog)) == NULL)
34dc7c2f
BB
2156 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2157
2158 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2159
428870ff 2160 if (avail_spare)
34dc7c2f
BB
2161 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2162
9babb374
BB
2163 if (flags & ZFS_ONLINE_EXPAND ||
2164 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
9babb374
BB
2165 uint64_t wholedisk = 0;
2166
2167 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2168 &wholedisk);
9babb374
BB
2169
2170 /*
2171 * XXX - L2ARC 1.0 devices can't support expansion.
2172 */
2173 if (l2cache) {
2174 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2175 "cannot expand cache devices"));
2176 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2177 }
2178
2179 if (wholedisk) {
7608bd0d
ED
2180 const char *fullpath = path;
2181 char buf[MAXPATHLEN];
2182
2183 if (path[0] != '/') {
2184 error = zfs_resolve_shortname(path, buf,
2185 sizeof(buf));
2186 if (error != 0)
2187 return (zfs_error(hdl, EZFS_NODEVICE,
2188 msg));
2189
2190 fullpath = buf;
2191 }
2192
2193 error = zpool_relabel_disk(hdl, fullpath, msg);
8adf4864
ED
2194 if (error != 0)
2195 return (error);
9babb374
BB
2196 }
2197 }
2198
34dc7c2f
BB
2199 zc.zc_cookie = VDEV_STATE_ONLINE;
2200 zc.zc_obj = flags;
2201
572e2857 2202 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
428870ff
BB
2203 if (errno == EINVAL) {
2204 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2205 "from this pool into a new one. Use '%s' "
2206 "instead"), "zpool detach");
2207 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2208 }
34dc7c2f 2209 return (zpool_standard_error(hdl, errno, msg));
428870ff 2210 }
34dc7c2f
BB
2211
2212 *newstate = zc.zc_cookie;
2213 return (0);
2214}
2215
2216/*
2217 * Take the specified vdev offline
2218 */
2219int
2220zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2221{
2598c001 2222 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2223 char msg[1024];
2224 nvlist_t *tgt;
2225 boolean_t avail_spare, l2cache;
2226 libzfs_handle_t *hdl = zhp->zpool_hdl;
2227
2228 (void) snprintf(msg, sizeof (msg),
2229 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2230
2231 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2232 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2233 NULL)) == NULL)
34dc7c2f
BB
2234 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2235
2236 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2237
428870ff 2238 if (avail_spare)
34dc7c2f
BB
2239 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2240
34dc7c2f
BB
2241 zc.zc_cookie = VDEV_STATE_OFFLINE;
2242 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2243
572e2857 2244 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2245 return (0);
2246
2247 switch (errno) {
2248 case EBUSY:
2249
2250 /*
2251 * There are no other replicas of this device.
2252 */
2253 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2254
9babb374
BB
2255 case EEXIST:
2256 /*
2257 * The log device has unplayed logs
2258 */
2259 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2260
34dc7c2f
BB
2261 default:
2262 return (zpool_standard_error(hdl, errno, msg));
2263 }
2264}
2265
2266/*
2267 * Mark the given vdev faulted.
2268 */
2269int
428870ff 2270zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2271{
2598c001 2272 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2273 char msg[1024];
2274 libzfs_handle_t *hdl = zhp->zpool_hdl;
2275
2276 (void) snprintf(msg, sizeof (msg),
b8864a23 2277 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2278
2279 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2280 zc.zc_guid = guid;
2281 zc.zc_cookie = VDEV_STATE_FAULTED;
428870ff 2282 zc.zc_obj = aux;
34dc7c2f 2283
572e2857 2284 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2285 return (0);
2286
2287 switch (errno) {
2288 case EBUSY:
2289
2290 /*
2291 * There are no other replicas of this device.
2292 */
2293 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2294
2295 default:
2296 return (zpool_standard_error(hdl, errno, msg));
2297 }
2298
2299}
2300
2301/*
2302 * Mark the given vdev degraded.
2303 */
2304int
428870ff 2305zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2306{
2598c001 2307 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2308 char msg[1024];
2309 libzfs_handle_t *hdl = zhp->zpool_hdl;
2310
2311 (void) snprintf(msg, sizeof (msg),
b8864a23 2312 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
34dc7c2f
BB
2313
2314 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2315 zc.zc_guid = guid;
2316 zc.zc_cookie = VDEV_STATE_DEGRADED;
428870ff 2317 zc.zc_obj = aux;
34dc7c2f 2318
572e2857 2319 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
34dc7c2f
BB
2320 return (0);
2321
2322 return (zpool_standard_error(hdl, errno, msg));
2323}
2324
2325/*
2326 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2327 * a hot spare.
2328 */
2329static boolean_t
2330is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2331{
2332 nvlist_t **child;
2333 uint_t c, children;
2334 char *type;
2335
2336 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2337 &children) == 0) {
2338 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2339 &type) == 0);
2340
2341 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2342 children == 2 && child[which] == tgt)
2343 return (B_TRUE);
2344
2345 for (c = 0; c < children; c++)
2346 if (is_replacing_spare(child[c], tgt, which))
2347 return (B_TRUE);
2348 }
2349
2350 return (B_FALSE);
2351}
2352
2353/*
2354 * Attach new_disk (fully described by nvroot) to old_disk.
2355 * If 'replacing' is specified, the new disk will replace the old one.
2356 */
2357int
2358zpool_vdev_attach(zpool_handle_t *zhp,
2359 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2360{
2598c001 2361 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2362 char msg[1024];
2363 int ret;
2364 nvlist_t *tgt;
b128c09f
BB
2365 boolean_t avail_spare, l2cache, islog;
2366 uint64_t val;
572e2857 2367 char *newname;
34dc7c2f
BB
2368 nvlist_t **child;
2369 uint_t children;
2370 nvlist_t *config_root;
2371 libzfs_handle_t *hdl = zhp->zpool_hdl;
1bd201e7 2372 boolean_t rootpool = zpool_is_bootable(zhp);
34dc7c2f
BB
2373
2374 if (replacing)
2375 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2376 "cannot replace %s with %s"), old_disk, new_disk);
2377 else
2378 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2379 "cannot attach %s to %s"), new_disk, old_disk);
2380
c372b36e 2381#if defined(__sun__) || defined(__sun)
b128c09f
BB
2382 /*
2383 * If this is a root pool, make sure that we're not attaching an
2384 * EFI labeled device.
2385 */
2386 if (rootpool && pool_uses_efi(nvroot)) {
2387 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2388 "EFI labeled devices are not supported on root pools."));
2389 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2390 }
c372b36e 2391#endif
b128c09f 2392
34dc7c2f 2393 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2394 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2395 &islog)) == 0)
34dc7c2f
BB
2396 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2397
2398 if (avail_spare)
2399 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2400
2401 if (l2cache)
2402 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2403
2404 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2405 zc.zc_cookie = replacing;
2406
2407 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2408 &child, &children) != 0 || children != 1) {
2409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2410 "new device must be a single disk"));
2411 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2412 }
2413
2414 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2415 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2416
428870ff 2417 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
b128c09f
BB
2418 return (-1);
2419
34dc7c2f
BB
2420 /*
2421 * If the target is a hot spare that has been swapped in, we can only
2422 * replace it with another hot spare.
2423 */
2424 if (replacing &&
2425 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
b128c09f
BB
2426 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2427 NULL) == NULL || !avail_spare) &&
2428 is_replacing_spare(config_root, tgt, 1)) {
34dc7c2f
BB
2429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2430 "can only be replaced by another hot spare"));
b128c09f 2431 free(newname);
34dc7c2f
BB
2432 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2433 }
2434
b128c09f
BB
2435 free(newname);
2436
34dc7c2f
BB
2437 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2438 return (-1);
2439
572e2857 2440 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
34dc7c2f
BB
2441
2442 zcmd_free_nvlists(&zc);
2443
b128c09f
BB
2444 if (ret == 0) {
2445 if (rootpool) {
9babb374
BB
2446 /*
2447 * XXX need a better way to prevent user from
2448 * booting up a half-baked vdev.
2449 */
2450 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2451 "sure to wait until resilver is done "
2452 "before rebooting.\n"));
b128c09f 2453 }
34dc7c2f 2454 return (0);
b128c09f 2455 }
34dc7c2f
BB
2456
2457 switch (errno) {
2458 case ENOTSUP:
2459 /*
2460 * Can't attach to or replace this type of vdev.
2461 */
2462 if (replacing) {
572e2857
BB
2463 uint64_t version = zpool_get_prop_int(zhp,
2464 ZPOOL_PROP_VERSION, NULL);
2465
b128c09f 2466 if (islog)
34dc7c2f
BB
2467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2468 "cannot replace a log with a spare"));
572e2857
BB
2469 else if (version >= SPA_VERSION_MULTI_REPLACE)
2470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2471 "already in replacing/spare config; wait "
2472 "for completion or use 'zpool detach'"));
34dc7c2f
BB
2473 else
2474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2475 "cannot replace a replacing device"));
2476 } else {
2477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2478 "can only attach to mirrors and top-level "
2479 "disks"));
2480 }
2481 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2482 break;
2483
2484 case EINVAL:
2485 /*
2486 * The new device must be a single disk.
2487 */
2488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2489 "new device must be a single disk"));
2490 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2491 break;
2492
2493 case EBUSY:
2494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2495 new_disk);
2496 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2497 break;
2498
2499 case EOVERFLOW:
2500 /*
2501 * The new device is too small.
2502 */
2503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2504 "device is too small"));
2505 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2506 break;
2507
2508 case EDOM:
2509 /*
2510 * The new device has a different alignment requirement.
2511 */
2512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2513 "devices have different sector alignment"));
2514 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2515 break;
2516
2517 case ENAMETOOLONG:
2518 /*
2519 * The resulting top-level vdev spec won't fit in the label.
2520 */
2521 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2522 break;
2523
2524 default:
2525 (void) zpool_standard_error(hdl, errno, msg);
2526 }
2527
2528 return (-1);
2529}
2530
2531/*
2532 * Detach the specified device.
2533 */
2534int
2535zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2536{
2598c001 2537 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2538 char msg[1024];
2539 nvlist_t *tgt;
2540 boolean_t avail_spare, l2cache;
2541 libzfs_handle_t *hdl = zhp->zpool_hdl;
2542
2543 (void) snprintf(msg, sizeof (msg),
2544 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2545
2546 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f
BB
2547 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2548 NULL)) == 0)
34dc7c2f
BB
2549 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2550
2551 if (avail_spare)
2552 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2553
2554 if (l2cache)
2555 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2556
2557 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2558
2559 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2560 return (0);
2561
2562 switch (errno) {
2563
2564 case ENOTSUP:
2565 /*
2566 * Can't detach from this type of vdev.
2567 */
2568 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2569 "applicable to mirror and replacing vdevs"));
572e2857 2570 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
34dc7c2f
BB
2571 break;
2572
2573 case EBUSY:
2574 /*
2575 * There are no other replicas of this device.
2576 */
2577 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2578 break;
2579
2580 default:
2581 (void) zpool_standard_error(hdl, errno, msg);
2582 }
2583
2584 return (-1);
2585}
2586
428870ff
BB
2587/*
2588 * Find a mirror vdev in the source nvlist.
2589 *
2590 * The mchild array contains a list of disks in one of the top-level mirrors
2591 * of the source pool. The schild array contains a list of disks that the
2592 * user specified on the command line. We loop over the mchild array to
2593 * see if any entry in the schild array matches.
2594 *
2595 * If a disk in the mchild array is found in the schild array, we return
2596 * the index of that entry. Otherwise we return -1.
2597 */
2598static int
2599find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2600 nvlist_t **schild, uint_t schildren)
2601{
2602 uint_t mc;
2603
2604 for (mc = 0; mc < mchildren; mc++) {
2605 uint_t sc;
2606 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2607 mchild[mc], B_FALSE);
2608
2609 for (sc = 0; sc < schildren; sc++) {
2610 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2611 schild[sc], B_FALSE);
2612 boolean_t result = (strcmp(mpath, spath) == 0);
2613
2614 free(spath);
2615 if (result) {
2616 free(mpath);
2617 return (mc);
2618 }
2619 }
2620
2621 free(mpath);
2622 }
2623
2624 return (-1);
2625}
2626
2627/*
2628 * Split a mirror pool. If newroot points to null, then a new nvlist
2629 * is generated and it is the responsibility of the caller to free it.
2630 */
2631int
2632zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2633 nvlist_t *props, splitflags_t flags)
2634{
2598c001 2635 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
428870ff
BB
2636 char msg[1024];
2637 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2638 nvlist_t **varray = NULL, *zc_props = NULL;
2639 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2640 libzfs_handle_t *hdl = zhp->zpool_hdl;
2641 uint64_t vers;
2642 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2643 int retval = 0;
2644
2645 (void) snprintf(msg, sizeof (msg),
2646 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2647
2648 if (!zpool_name_valid(hdl, B_FALSE, newname))
2649 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2650
2651 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2652 (void) fprintf(stderr, gettext("Internal error: unable to "
2653 "retrieve pool configuration\n"));
2654 return (-1);
2655 }
2656
2657 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2658 == 0);
2659 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2660
2661 if (props) {
572e2857 2662 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
428870ff 2663 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
572e2857 2664 props, vers, flags, msg)) == NULL)
428870ff
BB
2665 return (-1);
2666 }
2667
2668 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2669 &children) != 0) {
2670 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2671 "Source pool is missing vdev tree"));
2672 if (zc_props)
2673 nvlist_free(zc_props);
2674 return (-1);
2675 }
2676
2677 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2678 vcount = 0;
2679
2680 if (*newroot == NULL ||
2681 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2682 &newchild, &newchildren) != 0)
2683 newchildren = 0;
2684
2685 for (c = 0; c < children; c++) {
2686 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2687 char *type;
2688 nvlist_t **mchild, *vdev;
2689 uint_t mchildren;
2690 int entry;
2691
2692 /*
2693 * Unlike cache & spares, slogs are stored in the
2694 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2695 */
2696 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2697 &is_log);
2698 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2699 &is_hole);
2700 if (is_log || is_hole) {
2701 /*
2702 * Create a hole vdev and put it in the config.
2703 */
2704 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2705 goto out;
2706 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2707 VDEV_TYPE_HOLE) != 0)
2708 goto out;
2709 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2710 1) != 0)
2711 goto out;
2712 if (lastlog == 0)
2713 lastlog = vcount;
2714 varray[vcount++] = vdev;
2715 continue;
2716 }
2717 lastlog = 0;
2718 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2719 == 0);
2720 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2722 "Source pool must be composed only of mirrors\n"));
2723 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2724 goto out;
2725 }
2726
2727 verify(nvlist_lookup_nvlist_array(child[c],
2728 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2729
2730 /* find or add an entry for this top-level vdev */
2731 if (newchildren > 0 &&
2732 (entry = find_vdev_entry(zhp, mchild, mchildren,
2733 newchild, newchildren)) >= 0) {
2734 /* We found a disk that the user specified. */
2735 vdev = mchild[entry];
2736 ++found;
2737 } else {
2738 /* User didn't specify a disk for this vdev. */
2739 vdev = mchild[mchildren - 1];
2740 }
2741
2742 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2743 goto out;
2744 }
2745
2746 /* did we find every disk the user specified? */
2747 if (found != newchildren) {
2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2749 "include at most one disk from each mirror"));
2750 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2751 goto out;
2752 }
2753
2754 /* Prepare the nvlist for populating. */
2755 if (*newroot == NULL) {
2756 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2757 goto out;
2758 freelist = B_TRUE;
2759 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2760 VDEV_TYPE_ROOT) != 0)
2761 goto out;
2762 } else {
2763 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2764 }
2765
2766 /* Add all the children we found */
2767 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2768 lastlog == 0 ? vcount : lastlog) != 0)
2769 goto out;
2770
2771 /*
2772 * If we're just doing a dry run, exit now with success.
2773 */
2774 if (flags.dryrun) {
2775 memory_err = B_FALSE;
2776 freelist = B_FALSE;
2777 goto out;
2778 }
2779
2780 /* now build up the config list & call the ioctl */
2781 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2782 goto out;
2783
2784 if (nvlist_add_nvlist(newconfig,
2785 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2786 nvlist_add_string(newconfig,
2787 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2788 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2789 goto out;
2790
2791 /*
2792 * The new pool is automatically part of the namespace unless we
2793 * explicitly export it.
2794 */
2795 if (!flags.import)
2796 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2797 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2798 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2799 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2800 goto out;
2801 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2802 goto out;
2803
2804 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2805 retval = zpool_standard_error(hdl, errno, msg);
2806 goto out;
2807 }
2808
2809 freelist = B_FALSE;
2810 memory_err = B_FALSE;
2811
2812out:
2813 if (varray != NULL) {
2814 int v;
2815
2816 for (v = 0; v < vcount; v++)
2817 nvlist_free(varray[v]);
2818 free(varray);
2819 }
2820 zcmd_free_nvlists(&zc);
2821 if (zc_props)
2822 nvlist_free(zc_props);
2823 if (newconfig)
2824 nvlist_free(newconfig);
2825 if (freelist) {
2826 nvlist_free(*newroot);
2827 *newroot = NULL;
2828 }
2829
2830 if (retval != 0)
2831 return (retval);
2832
2833 if (memory_err)
2834 return (no_memory(hdl));
2835
2836 return (0);
2837}
2838
34dc7c2f
BB
2839/*
2840 * Remove the given device. Currently, this is supported only for hot spares
2841 * and level 2 cache devices.
2842 */
2843int
2844zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2845{
2598c001 2846 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2847 char msg[1024];
2848 nvlist_t *tgt;
428870ff 2849 boolean_t avail_spare, l2cache, islog;
34dc7c2f 2850 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2851 uint64_t version;
34dc7c2f
BB
2852
2853 (void) snprintf(msg, sizeof (msg),
2854 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2855
2856 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
b128c09f 2857 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
428870ff 2858 &islog)) == 0)
34dc7c2f 2859 return (zfs_error(hdl, EZFS_NODEVICE, msg));
428870ff
BB
2860 /*
2861 * XXX - this should just go away.
2862 */
2863 if (!avail_spare && !l2cache && !islog) {
34dc7c2f 2864 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428870ff
BB
2865 "only inactive hot spares, cache, top-level, "
2866 "or log devices can be removed"));
34dc7c2f
BB
2867 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2868 }
2869
428870ff
BB
2870 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2871 if (islog && version < SPA_VERSION_HOLES) {
2872 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2873 "pool must be upgrade to support log removal"));
2874 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2875 }
2876
34dc7c2f
BB
2877 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2878
2879 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2880 return (0);
2881
2882 return (zpool_standard_error(hdl, errno, msg));
2883}
2884
2885/*
2886 * Clear the errors for the pool, or the particular device if specified.
2887 */
2888int
428870ff 2889zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
34dc7c2f 2890{
2598c001 2891 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2892 char msg[1024];
2893 nvlist_t *tgt;
428870ff 2894 zpool_rewind_policy_t policy;
34dc7c2f
BB
2895 boolean_t avail_spare, l2cache;
2896 libzfs_handle_t *hdl = zhp->zpool_hdl;
428870ff 2897 nvlist_t *nvi = NULL;
572e2857 2898 int error;
34dc7c2f
BB
2899
2900 if (path)
2901 (void) snprintf(msg, sizeof (msg),
2902 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2903 path);
2904 else
2905 (void) snprintf(msg, sizeof (msg),
2906 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2907 zhp->zpool_name);
2908
2909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2910 if (path) {
2911 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
b128c09f 2912 &l2cache, NULL)) == 0)
34dc7c2f
BB
2913 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2914
2915 /*
2916 * Don't allow error clearing for hot spares. Do allow
2917 * error clearing for l2cache devices.
2918 */
2919 if (avail_spare)
2920 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2921
2922 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2923 &zc.zc_guid) == 0);
2924 }
2925
428870ff
BB
2926 zpool_get_rewind_policy(rewindnvl, &policy);
2927 zc.zc_cookie = policy.zrp_request;
2928
572e2857 2929 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
428870ff
BB
2930 return (-1);
2931
572e2857 2932 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
428870ff
BB
2933 return (-1);
2934
572e2857
BB
2935 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2936 errno == ENOMEM) {
2937 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2938 zcmd_free_nvlists(&zc);
2939 return (-1);
2940 }
2941 }
2942
2943 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
428870ff
BB
2944 errno != EPERM && errno != EACCES)) {
2945 if (policy.zrp_request &
2946 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2947 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2948 zpool_rewind_exclaim(hdl, zc.zc_name,
2949 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2950 nvi);
2951 nvlist_free(nvi);
2952 }
2953 zcmd_free_nvlists(&zc);
34dc7c2f 2954 return (0);
428870ff 2955 }
34dc7c2f 2956
428870ff 2957 zcmd_free_nvlists(&zc);
34dc7c2f
BB
2958 return (zpool_standard_error(hdl, errno, msg));
2959}
2960
2961/*
2962 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2963 */
2964int
2965zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2966{
2598c001 2967 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
2968 char msg[1024];
2969 libzfs_handle_t *hdl = zhp->zpool_hdl;
2970
2971 (void) snprintf(msg, sizeof (msg),
2972 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
b8864a23 2973 (u_longlong_t)guid);
34dc7c2f
BB
2974
2975 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2976 zc.zc_guid = guid;
428870ff 2977 zc.zc_cookie = ZPOOL_NO_REWIND;
34dc7c2f
BB
2978
2979 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2980 return (0);
2981
2982 return (zpool_standard_error(hdl, errno, msg));
2983}
2984
3541dc6d
GA
2985/*
2986 * Change the GUID for a pool.
2987 */
2988int
2989zpool_reguid(zpool_handle_t *zhp)
2990{
2991 char msg[1024];
2992 libzfs_handle_t *hdl = zhp->zpool_hdl;
2993 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2994
2995 (void) snprintf(msg, sizeof (msg),
2996 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
2997
2998 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2999 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3000 return (0);
3001
3002 return (zpool_standard_error(hdl, errno, msg));
3003}
3004
1bd201e7
CS
3005/*
3006 * Reopen the pool.
3007 */
3008int
3009zpool_reopen(zpool_handle_t *zhp)
3010{
3011 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3012 char msg[1024];
3013 libzfs_handle_t *hdl = zhp->zpool_hdl;
3014
3015 (void) snprintf(msg, sizeof (msg),
3016 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3017 zhp->zpool_name);
3018
3019 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3020 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3021 return (0);
3022 return (zpool_standard_error(hdl, errno, msg));
3023}
3024
34dc7c2f
BB
3025/*
3026 * Convert from a devid string to a path.
3027 */
3028static char *
3029devid_to_path(char *devid_str)
3030{
3031 ddi_devid_t devid;
3032 char *minor;
3033 char *path;
3034 devid_nmlist_t *list = NULL;
3035 int ret;
3036
3037 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3038 return (NULL);
3039
3040 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3041
3042 devid_str_free(minor);
3043 devid_free(devid);
3044
3045 if (ret != 0)
3046 return (NULL);
3047
3048 if ((path = strdup(list[0].devname)) == NULL)
3049 return (NULL);
3050
3051 devid_free_nmlist(list);
3052
3053 return (path);
3054}
3055
3056/*
3057 * Convert from a path to a devid string.
3058 */
3059static char *
3060path_to_devid(const char *path)
3061{
3062 int fd;
3063 ddi_devid_t devid;
3064 char *minor, *ret;
3065
3066 if ((fd = open(path, O_RDONLY)) < 0)
3067 return (NULL);
3068
3069 minor = NULL;
3070 ret = NULL;
3071 if (devid_get(fd, &devid) == 0) {
3072 if (devid_get_minor_name(fd, &minor) == 0)
3073 ret = devid_str_encode(devid, minor);
3074 if (minor != NULL)
3075 devid_str_free(minor);
3076 devid_free(devid);
3077 }
3078 (void) close(fd);
3079
3080 return (ret);
3081}
3082
3083/*
3084 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3085 * ignore any failure here, since a common case is for an unprivileged user to
3086 * type 'zpool status', and we'll display the correct information anyway.
3087 */
3088static void
3089set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3090{
2598c001 3091 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3092
3093 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3094 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3095 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3096 &zc.zc_guid) == 0);
3097
3098 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3099}
3100
83c62c93
NB
3101/*
3102 * Remove partition suffix from a vdev path. Partition suffixes may take three
3103 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3104 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3105 * third case only occurs when preceded by a string matching the regular
3106 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3107 */
3108static char *
3109strip_partition(libzfs_handle_t *hdl, char *path)
3110{
3111 char *tmp = zfs_strdup(hdl, path);
3112 char *part = NULL, *d = NULL;
3113
3114 if ((part = strstr(tmp, "-part")) && part != tmp) {
3115 d = part + 5;
3116 } else if ((part = strrchr(tmp, 'p')) &&
3117 part > tmp + 1 && isdigit(*(part-1))) {
3118 d = part + 1;
3119 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3120 for (d = &tmp[2]; isalpha(*d); part = ++d);
3121 }
3122 if (part && d && *d != '\0') {
3123 for (; isdigit(*d); d++);
3124 if (*d == '\0')
3125 *part = '\0';
3126 }
3127 return (tmp);
3128}
3129
858219cc
NB
3130#define PATH_BUF_LEN 64
3131
34dc7c2f
BB
3132/*
3133 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3134 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3135 * We also check if this is a whole disk, in which case we strip off the
3136 * trailing 's0' slice name.
3137 *
3138 * This routine is also responsible for identifying when disks have been
3139 * reconfigured in a new location. The kernel will have opened the device by
3140 * devid, but the path will still refer to the old location. To catch this, we
3141 * first do a path -> devid translation (which is fast for the common case). If
3142 * the devid matches, we're done. If not, we do a reverse devid -> path
3143 * translation and issue the appropriate ioctl() to update the path of the vdev.
3144 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3145 * of these checks.
3146 */
3147char *
428870ff
BB
3148zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3149 boolean_t verbose)
34dc7c2f 3150{
d603ed6c 3151 char *path, *devid, *type;
34dc7c2f 3152 uint64_t value;
858219cc 3153 char buf[PATH_BUF_LEN];
fc24f7c8 3154 char tmpbuf[PATH_BUF_LEN];
34dc7c2f
BB
3155 vdev_stat_t *vs;
3156 uint_t vsc;
3157
3158 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3159 &value) == 0) {
3160 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3161 &value) == 0);
3162 (void) snprintf(buf, sizeof (buf), "%llu",
3163 (u_longlong_t)value);
3164 path = buf;
3165 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
34dc7c2f
BB
3166 /*
3167 * If the device is dead (faulted, offline, etc) then don't
3168 * bother opening it. Otherwise we may be forcing the user to
3169 * open a misbehaving device, which can have undesirable
3170 * effects.
3171 */
428870ff 3172 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3173 (uint64_t **)&vs, &vsc) != 0 ||
3174 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3175 zhp != NULL &&
3176 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3177 /*
3178 * Determine if the current path is correct.
3179 */
3180 char *newdevid = path_to_devid(path);
3181
3182 if (newdevid == NULL ||
3183 strcmp(devid, newdevid) != 0) {
3184 char *newpath;
3185
3186 if ((newpath = devid_to_path(devid)) != NULL) {
3187 /*
3188 * Update the path appropriately.
3189 */
3190 set_path(zhp, nv, newpath);
3191 if (nvlist_add_string(nv,
3192 ZPOOL_CONFIG_PATH, newpath) == 0)
3193 verify(nvlist_lookup_string(nv,
3194 ZPOOL_CONFIG_PATH,
3195 &path) == 0);
3196 free(newpath);
3197 }
3198 }
3199
3200 if (newdevid)
3201 devid_str_free(newdevid);
3202 }
3203
d603ed6c
BB
3204 /*
3205 * For a block device only use the name.
3206 */
3207 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3208 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3209 path = strrchr(path, '/');
3210 path++;
3211 }
34dc7c2f 3212
d603ed6c 3213 /*
83c62c93 3214 * Remove the partition from the path it this is a whole disk.
d603ed6c 3215 */
34dc7c2f
BB
3216 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3217 &value) == 0 && value) {
83c62c93 3218 return strip_partition(hdl, path);
34dc7c2f
BB
3219 }
3220 } else {
3221 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3222
3223 /*
3224 * If it's a raidz device, we need to stick in the parity level.
3225 */
3226 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
858219cc 3227
34dc7c2f
BB
3228 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3229 &value) == 0);
fc24f7c8 3230 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
34dc7c2f 3231 (u_longlong_t)value);
fc24f7c8 3232 path = buf;
34dc7c2f 3233 }
428870ff
BB
3234
3235 /*
3236 * We identify each top-level vdev by using a <type-id>
3237 * naming convention.
3238 */
3239 if (verbose) {
3240 uint64_t id;
3241
3242 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3243 &id) == 0);
fc24f7c8
MM
3244 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3245 path, (u_longlong_t)id);
3246 path = tmpbuf;
428870ff 3247 }
34dc7c2f
BB
3248 }
3249
3250 return (zfs_strdup(hdl, path));
3251}
3252
3253static int
3254zbookmark_compare(const void *a, const void *b)
3255{
3256 return (memcmp(a, b, sizeof (zbookmark_t)));
3257}
3258
3259/*
3260 * Retrieve the persistent error log, uniquify the members, and return to the
3261 * caller.
3262 */
3263int
3264zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3265{
2598c001 3266 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3267 uint64_t count;
3268 zbookmark_t *zb = NULL;
3269 int i;
3270
3271 /*
3272 * Retrieve the raw error list from the kernel. If the number of errors
3273 * has increased, allocate more space and continue until we get the
3274 * entire list.
3275 */
3276 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3277 &count) == 0);
3278 if (count == 0)
3279 return (0);
3280 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3281 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3282 return (-1);
3283 zc.zc_nvlist_dst_size = count;
3284 (void) strcpy(zc.zc_name, zhp->zpool_name);
3285 for (;;) {
3286 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3287 &zc) != 0) {
3288 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3289 if (errno == ENOMEM) {
3290 count = zc.zc_nvlist_dst_size;
3291 if ((zc.zc_nvlist_dst = (uintptr_t)
3292 zfs_alloc(zhp->zpool_hdl, count *
3293 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3294 return (-1);
3295 } else {
3296 return (-1);
3297 }
3298 } else {
3299 break;
3300 }
3301 }
3302
3303 /*
3304 * Sort the resulting bookmarks. This is a little confusing due to the
3305 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3306 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3307 * _not_ copied as part of the process. So we point the start of our
3308 * array appropriate and decrement the total number of elements.
3309 */
3310 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3311 zc.zc_nvlist_dst_size;
3312 count -= zc.zc_nvlist_dst_size;
3313
3314 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3315
3316 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3317
3318 /*
3319 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3320 */
3321 for (i = 0; i < count; i++) {
3322 nvlist_t *nv;
3323
3324 /* ignoring zb_blkid and zb_level for now */
3325 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3326 zb[i-1].zb_object == zb[i].zb_object)
3327 continue;
3328
3329 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3330 goto nomem;
3331 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3332 zb[i].zb_objset) != 0) {
3333 nvlist_free(nv);
3334 goto nomem;
3335 }
3336 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3337 zb[i].zb_object) != 0) {
3338 nvlist_free(nv);
3339 goto nomem;
3340 }
3341 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3342 nvlist_free(nv);
3343 goto nomem;
3344 }
3345 nvlist_free(nv);
3346 }
3347
3348 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3349 return (0);
3350
3351nomem:
3352 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3353 return (no_memory(zhp->zpool_hdl));
3354}
3355
3356/*
3357 * Upgrade a ZFS pool to the latest on-disk version.
3358 */
3359int
3360zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3361{
2598c001 3362 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3363 libzfs_handle_t *hdl = zhp->zpool_hdl;
3364
3365 (void) strcpy(zc.zc_name, zhp->zpool_name);
3366 zc.zc_cookie = new_version;
3367
3368 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3369 return (zpool_standard_error_fmt(hdl, errno,
3370 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3371 zhp->zpool_name));
3372 return (0);
3373}
3374
3375void
3376zpool_set_history_str(const char *subcommand, int argc, char **argv,
3377 char *history_str)
3378{
3379 int i;
3380
3381 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3382 for (i = 1; i < argc; i++) {
3383 if (strlen(history_str) + 1 + strlen(argv[i]) >
3384 HIS_MAX_RECORD_LEN)
3385 break;
3386 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3387 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3388 }
3389}
3390
3391/*
3392 * Stage command history for logging.
3393 */
3394int
3395zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3396{
3397 if (history_str == NULL)
3398 return (EINVAL);
3399
3400 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3401 return (EINVAL);
3402
3403 if (hdl->libzfs_log_str != NULL)
3404 free(hdl->libzfs_log_str);
3405
3406 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3407 return (no_memory(hdl));
3408
3409 return (0);
3410}
3411
3412/*
3413 * Perform ioctl to get some command history of a pool.
3414 *
3415 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3416 * logical offset of the history buffer to start reading from.
3417 *
3418 * Upon return, 'off' is the next logical offset to read from and
3419 * 'len' is the actual amount of bytes read into 'buf'.
3420 */
3421static int
3422get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3423{
2598c001 3424 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3425 libzfs_handle_t *hdl = zhp->zpool_hdl;
3426
3427 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3428
3429 zc.zc_history = (uint64_t)(uintptr_t)buf;
3430 zc.zc_history_len = *len;
3431 zc.zc_history_offset = *off;
3432
3433 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3434 switch (errno) {
3435 case EPERM:
3436 return (zfs_error_fmt(hdl, EZFS_PERM,
3437 dgettext(TEXT_DOMAIN,
3438 "cannot show history for pool '%s'"),
3439 zhp->zpool_name));
3440 case ENOENT:
3441 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3442 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3443 "'%s'"), zhp->zpool_name));
3444 case ENOTSUP:
3445 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3446 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3447 "'%s', pool must be upgraded"), zhp->zpool_name));
3448 default:
3449 return (zpool_standard_error_fmt(hdl, errno,
3450 dgettext(TEXT_DOMAIN,
3451 "cannot get history for '%s'"), zhp->zpool_name));
3452 }
3453 }
3454
3455 *len = zc.zc_history_len;
3456 *off = zc.zc_history_offset;
3457
3458 return (0);
3459}
3460
3461/*
3462 * Process the buffer of nvlists, unpacking and storing each nvlist record
3463 * into 'records'. 'leftover' is set to the number of bytes that weren't
3464 * processed as there wasn't a complete record.
3465 */
428870ff 3466int
34dc7c2f
BB
3467zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3468 nvlist_t ***records, uint_t *numrecords)
3469{
3470 uint64_t reclen;
3471 nvlist_t *nv;
3472 int i;
3473
3474 while (bytes_read > sizeof (reclen)) {
3475
3476 /* get length of packed record (stored as little endian) */
3477 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3478 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3479
3480 if (bytes_read < sizeof (reclen) + reclen)
3481 break;
3482
3483 /* unpack record */
3484 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3485 return (ENOMEM);
3486 bytes_read -= sizeof (reclen) + reclen;
3487 buf += sizeof (reclen) + reclen;
3488
3489 /* add record to nvlist array */
3490 (*numrecords)++;
3491 if (ISP2(*numrecords + 1)) {
3492 *records = realloc(*records,
3493 *numrecords * 2 * sizeof (nvlist_t *));
3494 }
3495 (*records)[*numrecords - 1] = nv;
3496 }
3497
3498 *leftover = bytes_read;
3499 return (0);
3500}
3501
3502#define HIS_BUF_LEN (128*1024)
3503
3504/*
3505 * Retrieve the command history of a pool.
3506 */
3507int
3508zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3509{
3510 char buf[HIS_BUF_LEN];
3511 uint64_t off = 0;
3512 nvlist_t **records = NULL;
3513 uint_t numrecords = 0;
3514 int err, i;
3515
3516 do {
3517 uint64_t bytes_read = sizeof (buf);
3518 uint64_t leftover;
3519
3520 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3521 break;
3522
3523 /* if nothing else was read in, we're at EOF, just return */
3524 if (!bytes_read)
3525 break;
3526
3527 if ((err = zpool_history_unpack(buf, bytes_read,
3528 &leftover, &records, &numrecords)) != 0)
3529 break;
3530 off -= leftover;
3531
3532 /* CONSTCOND */
3533 } while (1);
3534
3535 if (!err) {
3536 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3537 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3538 records, numrecords) == 0);
3539 }
3540 for (i = 0; i < numrecords; i++)
3541 nvlist_free(records[i]);
3542 free(records);
3543
3544 return (err);
3545}
3546
26685276
BB
3547/*
3548 * Retrieve the next event. If there is a new event available 'nvp' will
3549 * contain a newly allocated nvlist and 'dropped' will be set to the number
3550 * of missed events since the last call to this function. When 'nvp' is
3551 * set to NULL it indicates no new events are available. In either case
3552 * the function returns 0 and it is up to the caller to free 'nvp'. In
3553 * the case of a fatal error the function will return a non-zero value.
3554 * When the function is called in blocking mode it will not return until
3555 * a new event is available.
3556 */
3557int
3558zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3559 int *dropped, int block, int cleanup_fd)
3560{
3561 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3562 int error = 0;
3563
3564 *nvp = NULL;
3565 *dropped = 0;
3566 zc.zc_cleanup_fd = cleanup_fd;
3567
3568 if (!block)
3569 zc.zc_guid = ZEVENT_NONBLOCK;
3570
3571 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3572 return (-1);
3573
3574retry:
3575 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3576 switch (errno) {
3577 case ESHUTDOWN:
3578 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3579 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3580 goto out;
3581 case ENOENT:
3582 /* Blocking error case should not occur */
3583 if (block)
3584 error = zpool_standard_error_fmt(hdl, errno,
3585 dgettext(TEXT_DOMAIN, "cannot get event"));
3586
3587 goto out;
3588 case ENOMEM:
3589 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3590 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3591 dgettext(TEXT_DOMAIN, "cannot get event"));
3592 goto out;
3593 } else {
3594 goto retry;
3595 }
3596 default:
3597 error = zpool_standard_error_fmt(hdl, errno,
3598 dgettext(TEXT_DOMAIN, "cannot get event"));
3599 goto out;
3600 }
3601 }
3602
3603 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3604 if (error != 0)
3605 goto out;
3606
3607 *dropped = (int)zc.zc_cookie;
3608out:
3609 zcmd_free_nvlists(&zc);
3610
3611 return (error);
3612}
3613
3614/*
3615 * Clear all events.
3616 */
3617int
3618zpool_events_clear(libzfs_handle_t *hdl, int *count)
3619{
3620 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3621 char msg[1024];
3622
3623 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3624 "cannot clear events"));
3625
3626 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3627 return (zpool_standard_error_fmt(hdl, errno, msg));
3628
3629 if (count != NULL)
3630 *count = (int)zc.zc_cookie; /* # of events cleared */
3631
3632 return (0);
3633}
3634
34dc7c2f
BB
3635void
3636zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3637 char *pathname, size_t len)
3638{
2598c001 3639 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
34dc7c2f
BB
3640 boolean_t mounted = B_FALSE;
3641 char *mntpnt = NULL;
3642 char dsname[MAXNAMELEN];
3643
3644 if (dsobj == 0) {
3645 /* special case for the MOS */
b8864a23 3646 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
34dc7c2f
BB
3647 return;
3648 }
3649
3650 /* get the dataset's name */
3651 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3652 zc.zc_obj = dsobj;
3653 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3654 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3655 /* just write out a path of two object numbers */
3656 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
b8864a23 3657 (longlong_t)dsobj, (longlong_t)obj);
34dc7c2f
BB
3658 return;
3659 }
3660 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3661
3662 /* find out if the dataset is mounted */
3663 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3664
3665 /* get the corrupted object's path */
3666 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3667 zc.zc_obj = obj;
3668 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3669 &zc) == 0) {
3670 if (mounted) {
3671 (void) snprintf(pathname, len, "%s%s", mntpnt,
3672 zc.zc_value);
3673 } else {
3674 (void) snprintf(pathname, len, "%s:%s",
3675 dsname, zc.zc_value);
3676 }
3677 } else {
b8864a23 3678 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
34dc7c2f
BB
3679 }
3680 free(mntpnt);
3681}
3682
b128c09f
BB
3683/*
3684 * Read the EFI label from the config, if a label does not exist then
3685 * pass back the error to the caller. If the caller has passed a non-NULL
3686 * diskaddr argument then we set it to the starting address of the EFI
3687 * partition.
3688 */
3689static int
3690read_efi_label(nvlist_t *config, diskaddr_t *sb)
3691{
3692 char *path;
3693 int fd;
3694 char diskname[MAXPATHLEN];
3695 int err = -1;
3696
3697 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3698 return (err);
3699
eac47204 3700 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
b128c09f 3701 strrchr(path, '/'));
d603ed6c 3702 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
b128c09f
BB
3703 struct dk_gpt *vtoc;
3704
3705 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3706 if (sb != NULL)
3707 *sb = vtoc->efi_parts[0].p_start;
3708 efi_free(vtoc);
3709 }
3710 (void) close(fd);
3711 }
3712 return (err);
3713}
3714
34dc7c2f
BB
3715/*
3716 * determine where a partition starts on a disk in the current
3717 * configuration
3718 */
3719static diskaddr_t
3720find_start_block(nvlist_t *config)
3721{
3722 nvlist_t **child;
3723 uint_t c, children;
34dc7c2f 3724 diskaddr_t sb = MAXOFFSET_T;
34dc7c2f
BB
3725 uint64_t wholedisk;
3726
3727 if (nvlist_lookup_nvlist_array(config,
3728 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3729 if (nvlist_lookup_uint64(config,
3730 ZPOOL_CONFIG_WHOLE_DISK,
3731 &wholedisk) != 0 || !wholedisk) {
3732 return (MAXOFFSET_T);
3733 }
b128c09f
BB
3734 if (read_efi_label(config, &sb) < 0)
3735 sb = MAXOFFSET_T;
34dc7c2f
BB
3736 return (sb);
3737 }
3738
3739 for (c = 0; c < children; c++) {
3740 sb = find_start_block(child[c]);
3741 if (sb != MAXOFFSET_T) {
3742 return (sb);
3743 }
3744 }
3745 return (MAXOFFSET_T);
3746}
3747
d603ed6c
BB
3748int
3749zpool_label_disk_wait(char *path, int timeout)
3750{
3751 struct stat64 statbuf;
3752 int i;
3753
3754 /*
3755 * Wait timeout miliseconds for a newly created device to be available
3756 * from the given path. There is a small window when a /dev/ device
3757 * will exist and the udev link will not, so we must wait for the
3758 * symlink. Depending on the udev rules this may take a few seconds.
3759 */
3760 for (i = 0; i < timeout; i++) {
3761 usleep(1000);
3762
3763 errno = 0;
3764 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3765 return (0);
3766 }
3767
3768 return (ENOENT);
3769}
3770
3771int
3772zpool_label_disk_check(char *path)
3773{
3774 struct dk_gpt *vtoc;
3775 int fd, err;
3776
3777 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3778 return errno;
3779
3780 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3781 (void) close(fd);
3782 return err;
3783 }
3784
3785 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3786 efi_free(vtoc);
3787 (void) close(fd);
3788 return EIDRM;
3789 }
3790
3791 efi_free(vtoc);
3792 (void) close(fd);
3793 return 0;
3794}
3795
34dc7c2f
BB
3796/*
3797 * Label an individual disk. The name provided is the short name,
3798 * stripped of any leading /dev path.
3799 */
3800int
3801zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3802{
3803 char path[MAXPATHLEN];
3804 struct dk_gpt *vtoc;
d603ed6c 3805 int rval, fd;
34dc7c2f
BB
3806 size_t resv = EFI_MIN_RESV_SIZE;
3807 uint64_t slice_size;
3808 diskaddr_t start_block;
3809 char errbuf[1024];
3810
3811 /* prepare an error message just in case */
3812 (void) snprintf(errbuf, sizeof (errbuf),
3813 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3814
3815 if (zhp) {
3816 nvlist_t *nvroot;
3817
c372b36e 3818#if defined(__sun__) || defined(__sun)
1bd201e7 3819 if (zpool_is_bootable(zhp)) {
b128c09f
BB
3820 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3821 "EFI labeled devices are not supported on root "
3822 "pools."));
3823 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3824 }
c372b36e 3825#endif
b128c09f 3826
34dc7c2f
BB
3827 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3828 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3829
3830 if (zhp->zpool_start_block == 0)
3831 start_block = find_start_block(nvroot);
3832 else
3833 start_block = zhp->zpool_start_block;
3834 zhp->zpool_start_block = start_block;
3835 } else {
3836 /* new pool */
3837 start_block = NEW_START_BLOCK;
3838 }
3839
eac47204 3840 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
34dc7c2f 3841
d603ed6c 3842 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
34dc7c2f
BB
3843 /*
3844 * This shouldn't happen. We've long since verified that this
3845 * is a valid device.
3846 */
109491a8
RL
3847 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3848 "label '%s': unable to open device: %d"), path, errno);
34dc7c2f
BB
3849 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3850 }
3851
3852 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3853 /*
3854 * The only way this can fail is if we run out of memory, or we
3855 * were unable to read the disk's capacity
3856 */
3857 if (errno == ENOMEM)
3858 (void) no_memory(hdl);
3859
3860 (void) close(fd);
109491a8
RL
3861 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3862 "label '%s': unable to read disk capacity"), path);
34dc7c2f
BB
3863
3864 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3865 }
3866
3867 slice_size = vtoc->efi_last_u_lba + 1;
3868 slice_size -= EFI_MIN_RESV_SIZE;
3869 if (start_block == MAXOFFSET_T)
3870 start_block = NEW_START_BLOCK;
3871 slice_size -= start_block;
613d88ed 3872 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
34dc7c2f
BB
3873
3874 vtoc->efi_parts[0].p_start = start_block;
3875 vtoc->efi_parts[0].p_size = slice_size;
3876
3877 /*
3878 * Why we use V_USR: V_BACKUP confuses users, and is considered
3879 * disposable by some EFI utilities (since EFI doesn't have a backup
3880 * slice). V_UNASSIGNED is supposed to be used only for zero size
3881 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3882 * etc. were all pretty specific. V_USR is as close to reality as we
3883 * can get, in the absence of V_OTHER.
3884 */
3885 vtoc->efi_parts[0].p_tag = V_USR;
3886 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3887
3888 vtoc->efi_parts[8].p_start = slice_size + start_block;
3889 vtoc->efi_parts[8].p_size = resv;
3890 vtoc->efi_parts[8].p_tag = V_RESERVED;
3891
b5a28807 3892 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
34dc7c2f
BB
3893 /*
3894 * Some block drivers (like pcata) may not support EFI
3895 * GPT labels. Print out a helpful error message dir-
3896 * ecting the user to manually label the disk and give
3897 * a specific slice.
3898 */
3899 (void) close(fd);
3900 efi_free(vtoc);
3901
d603ed6c
BB
3902 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3903 "parted(8) and then provide a specific slice: %d"), rval);
34dc7c2f
BB
3904 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3905 }
3906
3907 (void) close(fd);
3908 efi_free(vtoc);
34dc7c2f 3909
eac47204
BB
3910 /* Wait for the first expected partition to appear. */
3911
3912 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3913 (void) zfs_append_partition(path, MAXPATHLEN);
3914
d603ed6c
BB
3915 rval = zpool_label_disk_wait(path, 3000);
3916 if (rval) {
3917 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3918 "detect device partitions on '%s': %d"), path, rval);
3919 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f
BB
3920 }
3921
d603ed6c
BB
3922 /* We can't be to paranoid. Read the label back and verify it. */
3923 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3924 rval = zpool_label_disk_check(path);
3925 if (rval) {
3926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3927 "EFI label on '%s' is damaged. Ensure\nthis device "
3928 "is not in in use, and is functioning properly: %d"),
3929 path, rval);
3930 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
34dc7c2f 3931 }
34dc7c2f 3932
d603ed6c 3933 return 0;
34dc7c2f 3934}