]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Illumos 4171, 4172
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * API compatibility wrapper around zpool_get_prop_literal
240 */
241 int
242 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243 zprop_source_t *srctype)
244 {
245 return (zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE));
246 }
247
248 /*
249 * Get a zpool property value for 'prop' and return the value in
250 * a pre-allocated buffer.
251 */
252 int
253 zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
254 size_t len, zprop_source_t *srctype, boolean_t literal)
255 {
256 uint64_t intval;
257 const char *strval;
258 zprop_source_t src = ZPROP_SRC_NONE;
259 nvlist_t *nvroot;
260 vdev_stat_t *vs;
261 uint_t vsc;
262
263 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
264 switch (prop) {
265 case ZPOOL_PROP_NAME:
266 (void) strlcpy(buf, zpool_get_name(zhp), len);
267 break;
268
269 case ZPOOL_PROP_HEALTH:
270 (void) strlcpy(buf, "FAULTED", len);
271 break;
272
273 case ZPOOL_PROP_GUID:
274 intval = zpool_get_prop_int(zhp, prop, &src);
275 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
276 break;
277
278 case ZPOOL_PROP_ALTROOT:
279 case ZPOOL_PROP_CACHEFILE:
280 case ZPOOL_PROP_COMMENT:
281 if (zhp->zpool_props != NULL ||
282 zpool_get_all_props(zhp) == 0) {
283 (void) strlcpy(buf,
284 zpool_get_prop_string(zhp, prop, &src),
285 len);
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290 /* FALLTHROUGH */
291 default:
292 (void) strlcpy(buf, "-", len);
293 break;
294 }
295
296 if (srctype != NULL)
297 *srctype = src;
298 return (0);
299 }
300
301 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
302 prop != ZPOOL_PROP_NAME)
303 return (-1);
304
305 switch (zpool_prop_get_type(prop)) {
306 case PROP_TYPE_STRING:
307 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
308 len);
309 break;
310
311 case PROP_TYPE_NUMBER:
312 intval = zpool_get_prop_int(zhp, prop, &src);
313
314 switch (prop) {
315 case ZPOOL_PROP_SIZE:
316 case ZPOOL_PROP_ALLOCATED:
317 case ZPOOL_PROP_FREE:
318 case ZPOOL_PROP_FREEING:
319 case ZPOOL_PROP_EXPANDSZ:
320 case ZPOOL_PROP_ASHIFT:
321 if (literal)
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 else
325 (void) zfs_nicenum(intval, buf, len);
326 break;
327
328 case ZPOOL_PROP_CAPACITY:
329 (void) snprintf(buf, len, "%llu%%",
330 (u_longlong_t)intval);
331 break;
332
333 case ZPOOL_PROP_DEDUPRATIO:
334 (void) snprintf(buf, len, "%llu.%02llux",
335 (u_longlong_t)(intval / 100),
336 (u_longlong_t)(intval % 100));
337 break;
338
339 case ZPOOL_PROP_HEALTH:
340 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
341 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
342 verify(nvlist_lookup_uint64_array(nvroot,
343 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
344 == 0);
345
346 (void) strlcpy(buf, zpool_state_to_name(intval,
347 vs->vs_aux), len);
348 break;
349 case ZPOOL_PROP_VERSION:
350 if (intval >= SPA_VERSION_FEATURES) {
351 (void) snprintf(buf, len, "-");
352 break;
353 }
354 /* FALLTHROUGH */
355 default:
356 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
357 }
358 break;
359
360 case PROP_TYPE_INDEX:
361 intval = zpool_get_prop_int(zhp, prop, &src);
362 if (zpool_prop_index_to_string(prop, intval, &strval)
363 != 0)
364 return (-1);
365 (void) strlcpy(buf, strval, len);
366 break;
367
368 default:
369 abort();
370 }
371
372 if (srctype)
373 *srctype = src;
374
375 return (0);
376 }
377
378 /*
379 * Check if the bootfs name has the same pool name as it is set to.
380 * Assuming bootfs is a valid dataset name.
381 */
382 static boolean_t
383 bootfs_name_valid(const char *pool, char *bootfs)
384 {
385 int len = strlen(pool);
386
387 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
388 return (B_FALSE);
389
390 if (strncmp(pool, bootfs, len) == 0 &&
391 (bootfs[len] == '/' || bootfs[len] == '\0'))
392 return (B_TRUE);
393
394 return (B_FALSE);
395 }
396
397 #if defined(__sun__) || defined(__sun)
398 /*
399 * Inspect the configuration to determine if any of the devices contain
400 * an EFI label.
401 */
402 static boolean_t
403 pool_uses_efi(nvlist_t *config)
404 {
405 nvlist_t **child;
406 uint_t c, children;
407
408 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
409 &child, &children) != 0)
410 return (read_efi_label(config, NULL) >= 0);
411
412 for (c = 0; c < children; c++) {
413 if (pool_uses_efi(child[c]))
414 return (B_TRUE);
415 }
416 return (B_FALSE);
417 }
418 #endif
419
420 boolean_t
421 zpool_is_bootable(zpool_handle_t *zhp)
422 {
423 char bootfs[ZPOOL_MAXNAMELEN];
424
425 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
426 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
427 sizeof (bootfs)) != 0);
428 }
429
430
431 /*
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
435 */
436 static nvlist_t *
437 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
438 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
439 {
440 nvpair_t *elem;
441 nvlist_t *retprops;
442 zpool_prop_t prop;
443 char *strval;
444 uint64_t intval;
445 char *slash, *check;
446 struct stat64 statbuf;
447 zpool_handle_t *zhp;
448 nvlist_t *nvroot;
449
450 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
451 (void) no_memory(hdl);
452 return (NULL);
453 }
454
455 elem = NULL;
456 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
457 const char *propname = nvpair_name(elem);
458
459 prop = zpool_name_to_prop(propname);
460 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
461 int err;
462 char *fname = strchr(propname, '@') + 1;
463
464 err = zfeature_lookup_name(fname, NULL);
465 if (err != 0) {
466 ASSERT3U(err, ==, ENOENT);
467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
468 "invalid feature '%s'"), fname);
469 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
470 goto error;
471 }
472
473 if (nvpair_type(elem) != DATA_TYPE_STRING) {
474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
475 "'%s' must be a string"), propname);
476 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
477 goto error;
478 }
479
480 (void) nvpair_value_string(elem, &strval);
481 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set to "
484 "'enabled'"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486 goto error;
487 }
488
489 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
490 (void) no_memory(hdl);
491 goto error;
492 }
493 continue;
494 }
495
496 /*
497 * Make sure this property is valid and applies to this type.
498 */
499 if (prop == ZPROP_INVAL) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "invalid property '%s'"), propname);
502 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503 goto error;
504 }
505
506 if (zpool_prop_readonly(prop)) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
508 "is readonly"), propname);
509 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
510 goto error;
511 }
512
513 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
514 &strval, &intval, errbuf) != 0)
515 goto error;
516
517 /*
518 * Perform additional checking for specific properties.
519 */
520 switch (prop) {
521 default:
522 break;
523 case ZPOOL_PROP_VERSION:
524 if (intval < version ||
525 !SPA_VERSION_IS_SUPPORTED(intval)) {
526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
527 "property '%s' number %d is invalid."),
528 propname, intval);
529 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
530 goto error;
531 }
532 break;
533
534 case ZPOOL_PROP_ASHIFT:
535 if (!flags.create) {
536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
537 "property '%s' can only be set at "
538 "creation time"), propname);
539 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
540 goto error;
541 }
542
543 if (intval != 0 && (intval < 9 || intval > 13)) {
544 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
545 "property '%s' number %d is invalid."),
546 propname, intval);
547 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
548 goto error;
549 }
550 break;
551
552 case ZPOOL_PROP_BOOTFS:
553 if (flags.create || flags.import) {
554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
555 "property '%s' cannot be set at creation "
556 "or import time"), propname);
557 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
558 goto error;
559 }
560
561 if (version < SPA_VERSION_BOOTFS) {
562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 "pool must be upgraded to support "
564 "'%s' property"), propname);
565 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
566 goto error;
567 }
568
569 /*
570 * bootfs property value has to be a dataset name and
571 * the dataset has to be in the same pool as it sets to.
572 */
573 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
574 strval)) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
576 "is an invalid name"), strval);
577 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
578 goto error;
579 }
580
581 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
583 "could not open pool '%s'"), poolname);
584 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
585 goto error;
586 }
587 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
588 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
589
590 #if defined(__sun__) || defined(__sun)
591 /*
592 * bootfs property cannot be set on a disk which has
593 * been EFI labeled.
594 */
595 if (pool_uses_efi(nvroot)) {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 "property '%s' not supported on "
598 "EFI labeled devices"), propname);
599 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
600 zpool_close(zhp);
601 goto error;
602 }
603 #endif
604 zpool_close(zhp);
605 break;
606
607 case ZPOOL_PROP_ALTROOT:
608 if (!flags.create && !flags.import) {
609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
610 "property '%s' can only be set during pool "
611 "creation or import"), propname);
612 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
613 goto error;
614 }
615
616 if (strval[0] != '/') {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "bad alternate root '%s'"), strval);
619 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
620 goto error;
621 }
622 break;
623
624 case ZPOOL_PROP_CACHEFILE:
625 if (strval[0] == '\0')
626 break;
627
628 if (strcmp(strval, "none") == 0)
629 break;
630
631 if (strval[0] != '/') {
632 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
633 "property '%s' must be empty, an "
634 "absolute path, or 'none'"), propname);
635 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
636 goto error;
637 }
638
639 slash = strrchr(strval, '/');
640
641 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
642 strcmp(slash, "/..") == 0) {
643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
644 "'%s' is not a valid file"), strval);
645 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
646 goto error;
647 }
648
649 *slash = '\0';
650
651 if (strval[0] != '\0' &&
652 (stat64(strval, &statbuf) != 0 ||
653 !S_ISDIR(statbuf.st_mode))) {
654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
655 "'%s' is not a valid directory"),
656 strval);
657 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
658 goto error;
659 }
660
661 *slash = '/';
662 break;
663
664 case ZPOOL_PROP_COMMENT:
665 for (check = strval; *check != '\0'; check++) {
666 if (!isprint(*check)) {
667 zfs_error_aux(hdl,
668 dgettext(TEXT_DOMAIN,
669 "comment may only have printable "
670 "characters"));
671 (void) zfs_error(hdl, EZFS_BADPROP,
672 errbuf);
673 goto error;
674 }
675 }
676 if (strlen(strval) > ZPROP_MAX_COMMENT) {
677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
678 "comment must not exceed %d characters"),
679 ZPROP_MAX_COMMENT);
680 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
681 goto error;
682 }
683 break;
684 case ZPOOL_PROP_READONLY:
685 if (!flags.import) {
686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
687 "property '%s' can only be set at "
688 "import time"), propname);
689 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
690 goto error;
691 }
692 break;
693 }
694 }
695
696 return (retprops);
697 error:
698 nvlist_free(retprops);
699 return (NULL);
700 }
701
702 /*
703 * Set zpool property : propname=propval.
704 */
705 int
706 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
707 {
708 zfs_cmd_t zc = {"\0"};
709 int ret = -1;
710 char errbuf[1024];
711 nvlist_t *nvl = NULL;
712 nvlist_t *realprops;
713 uint64_t version;
714 prop_flags_t flags = { 0 };
715
716 (void) snprintf(errbuf, sizeof (errbuf),
717 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
718 zhp->zpool_name);
719
720 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
721 return (no_memory(zhp->zpool_hdl));
722
723 if (nvlist_add_string(nvl, propname, propval) != 0) {
724 nvlist_free(nvl);
725 return (no_memory(zhp->zpool_hdl));
726 }
727
728 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
729 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
730 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
731 nvlist_free(nvl);
732 return (-1);
733 }
734
735 nvlist_free(nvl);
736 nvl = realprops;
737
738 /*
739 * Execute the corresponding ioctl() to set this property.
740 */
741 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
742
743 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
744 nvlist_free(nvl);
745 return (-1);
746 }
747
748 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
749
750 zcmd_free_nvlists(&zc);
751 nvlist_free(nvl);
752
753 if (ret)
754 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
755 else
756 (void) zpool_props_refresh(zhp);
757
758 return (ret);
759 }
760
761 int
762 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
763 {
764 libzfs_handle_t *hdl = zhp->zpool_hdl;
765 zprop_list_t *entry;
766 char buf[ZFS_MAXPROPLEN];
767 nvlist_t *features = NULL;
768 nvpair_t *nvp;
769 zprop_list_t **last;
770 boolean_t firstexpand = (NULL == *plp);
771 int i;
772
773 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
774 return (-1);
775
776 last = plp;
777 while (*last != NULL)
778 last = &(*last)->pl_next;
779
780 if ((*plp)->pl_all)
781 features = zpool_get_features(zhp);
782
783 if ((*plp)->pl_all && firstexpand) {
784 for (i = 0; i < SPA_FEATURES; i++) {
785 zprop_list_t *entry = zfs_alloc(hdl,
786 sizeof (zprop_list_t));
787 entry->pl_prop = ZPROP_INVAL;
788 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
789 spa_feature_table[i].fi_uname);
790 entry->pl_width = strlen(entry->pl_user_prop);
791 entry->pl_all = B_TRUE;
792
793 *last = entry;
794 last = &entry->pl_next;
795 }
796 }
797
798 /* add any unsupported features */
799 for (nvp = nvlist_next_nvpair(features, NULL);
800 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
801 char *propname;
802 boolean_t found;
803 zprop_list_t *entry;
804
805 if (zfeature_is_supported(nvpair_name(nvp)))
806 continue;
807
808 propname = zfs_asprintf(hdl, "unsupported@%s",
809 nvpair_name(nvp));
810
811 /*
812 * Before adding the property to the list make sure that no
813 * other pool already added the same property.
814 */
815 found = B_FALSE;
816 entry = *plp;
817 while (entry != NULL) {
818 if (entry->pl_user_prop != NULL &&
819 strcmp(propname, entry->pl_user_prop) == 0) {
820 found = B_TRUE;
821 break;
822 }
823 entry = entry->pl_next;
824 }
825 if (found) {
826 free(propname);
827 continue;
828 }
829
830 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
831 entry->pl_prop = ZPROP_INVAL;
832 entry->pl_user_prop = propname;
833 entry->pl_width = strlen(entry->pl_user_prop);
834 entry->pl_all = B_TRUE;
835
836 *last = entry;
837 last = &entry->pl_next;
838 }
839
840 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
841
842 if (entry->pl_fixed)
843 continue;
844
845 if (entry->pl_prop != ZPROP_INVAL &&
846 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
847 NULL) == 0) {
848 if (strlen(buf) > entry->pl_width)
849 entry->pl_width = strlen(buf);
850 }
851 }
852
853 return (0);
854 }
855
856 /*
857 * Get the state for the given feature on the given ZFS pool.
858 */
859 int
860 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
861 size_t len)
862 {
863 uint64_t refcount;
864 boolean_t found = B_FALSE;
865 nvlist_t *features = zpool_get_features(zhp);
866 boolean_t supported;
867 const char *feature = strchr(propname, '@') + 1;
868
869 supported = zpool_prop_feature(propname);
870 ASSERT(supported || zpool_prop_unsupported(propname));
871
872 /*
873 * Convert from feature name to feature guid. This conversion is
874 * unecessary for unsupported@... properties because they already
875 * use guids.
876 */
877 if (supported) {
878 int ret;
879 spa_feature_t fid;
880
881 ret = zfeature_lookup_name(feature, &fid);
882 if (ret != 0) {
883 (void) strlcpy(buf, "-", len);
884 return (ENOTSUP);
885 }
886 feature = spa_feature_table[fid].fi_guid;
887 }
888
889 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
890 found = B_TRUE;
891
892 if (supported) {
893 if (!found) {
894 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
895 } else {
896 if (refcount == 0)
897 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
898 else
899 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
900 }
901 } else {
902 if (found) {
903 if (refcount == 0) {
904 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
905 } else {
906 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
907 }
908 } else {
909 (void) strlcpy(buf, "-", len);
910 return (ENOTSUP);
911 }
912 }
913
914 return (0);
915 }
916
917 /*
918 * Don't start the slice at the default block of 34; many storage
919 * devices will use a stripe width of 128k, other vendors prefer a 1m
920 * alignment. It is best to play it safe and ensure a 1m alignment
921 * given 512B blocks. When the block size is larger by a power of 2
922 * we will still be 1m aligned. Some devices are sensitive to the
923 * partition ending alignment as well.
924 */
925 #define NEW_START_BLOCK 2048
926 #define PARTITION_END_ALIGNMENT 2048
927
928 /*
929 * Validate the given pool name, optionally putting an extended error message in
930 * 'buf'.
931 */
932 boolean_t
933 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
934 {
935 namecheck_err_t why;
936 char what;
937 int ret;
938
939 ret = pool_namecheck(pool, &why, &what);
940
941 /*
942 * The rules for reserved pool names were extended at a later point.
943 * But we need to support users with existing pools that may now be
944 * invalid. So we only check for this expanded set of names during a
945 * create (or import), and only in userland.
946 */
947 if (ret == 0 && !isopen &&
948 (strncmp(pool, "mirror", 6) == 0 ||
949 strncmp(pool, "raidz", 5) == 0 ||
950 strncmp(pool, "spare", 5) == 0 ||
951 strcmp(pool, "log") == 0)) {
952 if (hdl != NULL)
953 zfs_error_aux(hdl,
954 dgettext(TEXT_DOMAIN, "name is reserved"));
955 return (B_FALSE);
956 }
957
958
959 if (ret != 0) {
960 if (hdl != NULL) {
961 switch (why) {
962 case NAME_ERR_TOOLONG:
963 zfs_error_aux(hdl,
964 dgettext(TEXT_DOMAIN, "name is too long"));
965 break;
966
967 case NAME_ERR_INVALCHAR:
968 zfs_error_aux(hdl,
969 dgettext(TEXT_DOMAIN, "invalid character "
970 "'%c' in pool name"), what);
971 break;
972
973 case NAME_ERR_NOLETTER:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "name must begin with a letter"));
976 break;
977
978 case NAME_ERR_RESERVED:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "name is reserved"));
981 break;
982
983 case NAME_ERR_DISKLIKE:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "pool name is reserved"));
986 break;
987
988 case NAME_ERR_LEADING_SLASH:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "leading slash in name"));
991 break;
992
993 case NAME_ERR_EMPTY_COMPONENT:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "empty component in name"));
996 break;
997
998 case NAME_ERR_TRAILING_SLASH:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "trailing slash in name"));
1001 break;
1002
1003 case NAME_ERR_MULTIPLE_AT:
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "multiple '@' delimiters in name"));
1006 break;
1007 case NAME_ERR_NO_AT:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "permission set is missing '@'"));
1010 break;
1011 }
1012 }
1013 return (B_FALSE);
1014 }
1015
1016 return (B_TRUE);
1017 }
1018
1019 /*
1020 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1021 * state.
1022 */
1023 zpool_handle_t *
1024 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1025 {
1026 zpool_handle_t *zhp;
1027 boolean_t missing;
1028
1029 /*
1030 * Make sure the pool name is valid.
1031 */
1032 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1033 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1034 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1035 pool);
1036 return (NULL);
1037 }
1038
1039 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1040 return (NULL);
1041
1042 zhp->zpool_hdl = hdl;
1043 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1044
1045 if (zpool_refresh_stats(zhp, &missing) != 0) {
1046 zpool_close(zhp);
1047 return (NULL);
1048 }
1049
1050 if (missing) {
1051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1052 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1053 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1054 zpool_close(zhp);
1055 return (NULL);
1056 }
1057
1058 return (zhp);
1059 }
1060
1061 /*
1062 * Like the above, but silent on error. Used when iterating over pools (because
1063 * the configuration cache may be out of date).
1064 */
1065 int
1066 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1067 {
1068 zpool_handle_t *zhp;
1069 boolean_t missing;
1070
1071 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1072 return (-1);
1073
1074 zhp->zpool_hdl = hdl;
1075 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1076
1077 if (zpool_refresh_stats(zhp, &missing) != 0) {
1078 zpool_close(zhp);
1079 return (-1);
1080 }
1081
1082 if (missing) {
1083 zpool_close(zhp);
1084 *ret = NULL;
1085 return (0);
1086 }
1087
1088 *ret = zhp;
1089 return (0);
1090 }
1091
1092 /*
1093 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1094 * state.
1095 */
1096 zpool_handle_t *
1097 zpool_open(libzfs_handle_t *hdl, const char *pool)
1098 {
1099 zpool_handle_t *zhp;
1100
1101 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1102 return (NULL);
1103
1104 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1105 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1106 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1107 zpool_close(zhp);
1108 return (NULL);
1109 }
1110
1111 return (zhp);
1112 }
1113
1114 /*
1115 * Close the handle. Simply frees the memory associated with the handle.
1116 */
1117 void
1118 zpool_close(zpool_handle_t *zhp)
1119 {
1120 if (zhp->zpool_config)
1121 nvlist_free(zhp->zpool_config);
1122 if (zhp->zpool_old_config)
1123 nvlist_free(zhp->zpool_old_config);
1124 if (zhp->zpool_props)
1125 nvlist_free(zhp->zpool_props);
1126 free(zhp);
1127 }
1128
1129 /*
1130 * Return the name of the pool.
1131 */
1132 const char *
1133 zpool_get_name(zpool_handle_t *zhp)
1134 {
1135 return (zhp->zpool_name);
1136 }
1137
1138
1139 /*
1140 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1141 */
1142 int
1143 zpool_get_state(zpool_handle_t *zhp)
1144 {
1145 return (zhp->zpool_state);
1146 }
1147
1148 /*
1149 * Create the named pool, using the provided vdev list. It is assumed
1150 * that the consumer has already validated the contents of the nvlist, so we
1151 * don't have to worry about error semantics.
1152 */
1153 int
1154 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1155 nvlist_t *props, nvlist_t *fsprops)
1156 {
1157 zfs_cmd_t zc = {"\0"};
1158 nvlist_t *zc_fsprops = NULL;
1159 nvlist_t *zc_props = NULL;
1160 char msg[1024];
1161 int ret = -1;
1162
1163 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1164 "cannot create '%s'"), pool);
1165
1166 if (!zpool_name_valid(hdl, B_FALSE, pool))
1167 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1168
1169 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1170 return (-1);
1171
1172 if (props) {
1173 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1174
1175 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1176 SPA_VERSION_1, flags, msg)) == NULL) {
1177 goto create_failed;
1178 }
1179 }
1180
1181 if (fsprops) {
1182 uint64_t zoned;
1183 char *zonestr;
1184
1185 zoned = ((nvlist_lookup_string(fsprops,
1186 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1187 strcmp(zonestr, "on") == 0);
1188
1189 if ((zc_fsprops = zfs_valid_proplist(hdl,
1190 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1191 goto create_failed;
1192 }
1193 if (!zc_props &&
1194 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1195 goto create_failed;
1196 }
1197 if (nvlist_add_nvlist(zc_props,
1198 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1199 goto create_failed;
1200 }
1201 }
1202
1203 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1204 goto create_failed;
1205
1206 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1207
1208 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1209
1210 zcmd_free_nvlists(&zc);
1211 nvlist_free(zc_props);
1212 nvlist_free(zc_fsprops);
1213
1214 switch (errno) {
1215 case EBUSY:
1216 /*
1217 * This can happen if the user has specified the same
1218 * device multiple times. We can't reliably detect this
1219 * until we try to add it and see we already have a
1220 * label. This can also happen under if the device is
1221 * part of an active md or lvm device.
1222 */
1223 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1224 "one or more vdevs refer to the same device, or "
1225 "one of\nthe devices is part of an active md or "
1226 "lvm device"));
1227 return (zfs_error(hdl, EZFS_BADDEV, msg));
1228
1229 case EOVERFLOW:
1230 /*
1231 * This occurs when one of the devices is below
1232 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1233 * device was the problem device since there's no
1234 * reliable way to determine device size from userland.
1235 */
1236 {
1237 char buf[64];
1238
1239 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1240
1241 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1242 "one or more devices is less than the "
1243 "minimum size (%s)"), buf);
1244 }
1245 return (zfs_error(hdl, EZFS_BADDEV, msg));
1246
1247 case ENOSPC:
1248 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1249 "one or more devices is out of space"));
1250 return (zfs_error(hdl, EZFS_BADDEV, msg));
1251
1252 case ENOTBLK:
1253 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1254 "cache device must be a disk or disk slice"));
1255 return (zfs_error(hdl, EZFS_BADDEV, msg));
1256
1257 default:
1258 return (zpool_standard_error(hdl, errno, msg));
1259 }
1260 }
1261
1262 create_failed:
1263 zcmd_free_nvlists(&zc);
1264 nvlist_free(zc_props);
1265 nvlist_free(zc_fsprops);
1266 return (ret);
1267 }
1268
1269 /*
1270 * Destroy the given pool. It is up to the caller to ensure that there are no
1271 * datasets left in the pool.
1272 */
1273 int
1274 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1275 {
1276 zfs_cmd_t zc = {"\0"};
1277 zfs_handle_t *zfp = NULL;
1278 libzfs_handle_t *hdl = zhp->zpool_hdl;
1279 char msg[1024];
1280
1281 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1282 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1283 return (-1);
1284
1285 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1286 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1287
1288 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1289 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1290 "cannot destroy '%s'"), zhp->zpool_name);
1291
1292 if (errno == EROFS) {
1293 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1294 "one or more devices is read only"));
1295 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1296 } else {
1297 (void) zpool_standard_error(hdl, errno, msg);
1298 }
1299
1300 if (zfp)
1301 zfs_close(zfp);
1302 return (-1);
1303 }
1304
1305 if (zfp) {
1306 remove_mountpoint(zfp);
1307 zfs_close(zfp);
1308 }
1309
1310 return (0);
1311 }
1312
1313 /*
1314 * Add the given vdevs to the pool. The caller must have already performed the
1315 * necessary verification to ensure that the vdev specification is well-formed.
1316 */
1317 int
1318 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1319 {
1320 zfs_cmd_t zc = {"\0"};
1321 int ret;
1322 libzfs_handle_t *hdl = zhp->zpool_hdl;
1323 char msg[1024];
1324 nvlist_t **spares, **l2cache;
1325 uint_t nspares, nl2cache;
1326
1327 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1328 "cannot add to '%s'"), zhp->zpool_name);
1329
1330 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1331 SPA_VERSION_SPARES &&
1332 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1333 &spares, &nspares) == 0) {
1334 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1335 "upgraded to add hot spares"));
1336 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1337 }
1338
1339 #if defined(__sun__) || defined(__sun)
1340 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1341 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1342 uint64_t s;
1343
1344 for (s = 0; s < nspares; s++) {
1345 char *path;
1346
1347 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1348 &path) == 0 && pool_uses_efi(spares[s])) {
1349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1350 "device '%s' contains an EFI label and "
1351 "cannot be used on root pools."),
1352 zpool_vdev_name(hdl, NULL, spares[s],
1353 B_FALSE));
1354 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1355 }
1356 }
1357 }
1358 #endif
1359
1360 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1361 SPA_VERSION_L2CACHE &&
1362 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1363 &l2cache, &nl2cache) == 0) {
1364 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1365 "upgraded to add cache devices"));
1366 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1367 }
1368
1369 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1370 return (-1);
1371 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1372
1373 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1374 switch (errno) {
1375 case EBUSY:
1376 /*
1377 * This can happen if the user has specified the same
1378 * device multiple times. We can't reliably detect this
1379 * until we try to add it and see we already have a
1380 * label.
1381 */
1382 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1383 "one or more vdevs refer to the same device"));
1384 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1385 break;
1386
1387 case EOVERFLOW:
1388 /*
1389 * This occurrs when one of the devices is below
1390 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1391 * device was the problem device since there's no
1392 * reliable way to determine device size from userland.
1393 */
1394 {
1395 char buf[64];
1396
1397 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1398
1399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1400 "device is less than the minimum "
1401 "size (%s)"), buf);
1402 }
1403 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1404 break;
1405
1406 case ENOTSUP:
1407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1408 "pool must be upgraded to add these vdevs"));
1409 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1410 break;
1411
1412 case ENOTBLK:
1413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1414 "cache device must be a disk or disk slice"));
1415 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1416 break;
1417
1418 default:
1419 (void) zpool_standard_error(hdl, errno, msg);
1420 }
1421
1422 ret = -1;
1423 } else {
1424 ret = 0;
1425 }
1426
1427 zcmd_free_nvlists(&zc);
1428
1429 return (ret);
1430 }
1431
1432 /*
1433 * Exports the pool from the system. The caller must ensure that there are no
1434 * mounted datasets in the pool.
1435 */
1436 static int
1437 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1438 const char *log_str)
1439 {
1440 zfs_cmd_t zc = {"\0"};
1441 char msg[1024];
1442
1443 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1444 "cannot export '%s'"), zhp->zpool_name);
1445
1446 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1447 zc.zc_cookie = force;
1448 zc.zc_guid = hardforce;
1449 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1450
1451 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1452 switch (errno) {
1453 case EXDEV:
1454 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1455 "use '-f' to override the following errors:\n"
1456 "'%s' has an active shared spare which could be"
1457 " used by other pools once '%s' is exported."),
1458 zhp->zpool_name, zhp->zpool_name);
1459 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1460 msg));
1461 default:
1462 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1463 msg));
1464 }
1465 }
1466
1467 return (0);
1468 }
1469
1470 int
1471 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1472 {
1473 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1474 }
1475
1476 int
1477 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1478 {
1479 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1480 }
1481
1482 static void
1483 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1484 nvlist_t *config)
1485 {
1486 nvlist_t *nv = NULL;
1487 uint64_t rewindto;
1488 int64_t loss = -1;
1489 struct tm t;
1490 char timestr[128];
1491
1492 if (!hdl->libzfs_printerr || config == NULL)
1493 return;
1494
1495 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1496 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1497 return;
1498 }
1499
1500 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1501 return;
1502 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1503
1504 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1505 strftime(timestr, 128, "%c", &t) != 0) {
1506 if (dryrun) {
1507 (void) printf(dgettext(TEXT_DOMAIN,
1508 "Would be able to return %s "
1509 "to its state as of %s.\n"),
1510 name, timestr);
1511 } else {
1512 (void) printf(dgettext(TEXT_DOMAIN,
1513 "Pool %s returned to its state as of %s.\n"),
1514 name, timestr);
1515 }
1516 if (loss > 120) {
1517 (void) printf(dgettext(TEXT_DOMAIN,
1518 "%s approximately %lld "),
1519 dryrun ? "Would discard" : "Discarded",
1520 ((longlong_t)loss + 30) / 60);
1521 (void) printf(dgettext(TEXT_DOMAIN,
1522 "minutes of transactions.\n"));
1523 } else if (loss > 0) {
1524 (void) printf(dgettext(TEXT_DOMAIN,
1525 "%s approximately %lld "),
1526 dryrun ? "Would discard" : "Discarded",
1527 (longlong_t)loss);
1528 (void) printf(dgettext(TEXT_DOMAIN,
1529 "seconds of transactions.\n"));
1530 }
1531 }
1532 }
1533
1534 void
1535 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1536 nvlist_t *config)
1537 {
1538 nvlist_t *nv = NULL;
1539 int64_t loss = -1;
1540 uint64_t edata = UINT64_MAX;
1541 uint64_t rewindto;
1542 struct tm t;
1543 char timestr[128];
1544
1545 if (!hdl->libzfs_printerr)
1546 return;
1547
1548 if (reason >= 0)
1549 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1550 else
1551 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1552
1553 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1554 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1555 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1556 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1557 goto no_info;
1558
1559 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1560 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1561 &edata);
1562
1563 (void) printf(dgettext(TEXT_DOMAIN,
1564 "Recovery is possible, but will result in some data loss.\n"));
1565
1566 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1567 strftime(timestr, 128, "%c", &t) != 0) {
1568 (void) printf(dgettext(TEXT_DOMAIN,
1569 "\tReturning the pool to its state as of %s\n"
1570 "\tshould correct the problem. "),
1571 timestr);
1572 } else {
1573 (void) printf(dgettext(TEXT_DOMAIN,
1574 "\tReverting the pool to an earlier state "
1575 "should correct the problem.\n\t"));
1576 }
1577
1578 if (loss > 120) {
1579 (void) printf(dgettext(TEXT_DOMAIN,
1580 "Approximately %lld minutes of data\n"
1581 "\tmust be discarded, irreversibly. "),
1582 ((longlong_t)loss + 30) / 60);
1583 } else if (loss > 0) {
1584 (void) printf(dgettext(TEXT_DOMAIN,
1585 "Approximately %lld seconds of data\n"
1586 "\tmust be discarded, irreversibly. "),
1587 (longlong_t)loss);
1588 }
1589 if (edata != 0 && edata != UINT64_MAX) {
1590 if (edata == 1) {
1591 (void) printf(dgettext(TEXT_DOMAIN,
1592 "After rewind, at least\n"
1593 "\tone persistent user-data error will remain. "));
1594 } else {
1595 (void) printf(dgettext(TEXT_DOMAIN,
1596 "After rewind, several\n"
1597 "\tpersistent user-data errors will remain. "));
1598 }
1599 }
1600 (void) printf(dgettext(TEXT_DOMAIN,
1601 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1602 reason >= 0 ? "clear" : "import", name);
1603
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "A scrub of the pool\n"
1606 "\tis strongly recommended after recovery.\n"));
1607 return;
1608
1609 no_info:
1610 (void) printf(dgettext(TEXT_DOMAIN,
1611 "Destroy and re-create the pool from\n\ta backup source.\n"));
1612 }
1613
1614 /*
1615 * zpool_import() is a contracted interface. Should be kept the same
1616 * if possible.
1617 *
1618 * Applications should use zpool_import_props() to import a pool with
1619 * new properties value to be set.
1620 */
1621 int
1622 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1623 char *altroot)
1624 {
1625 nvlist_t *props = NULL;
1626 int ret;
1627
1628 if (altroot != NULL) {
1629 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1630 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1631 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1632 newname));
1633 }
1634
1635 if (nvlist_add_string(props,
1636 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1637 nvlist_add_string(props,
1638 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1639 nvlist_free(props);
1640 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1641 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1642 newname));
1643 }
1644 }
1645
1646 ret = zpool_import_props(hdl, config, newname, props,
1647 ZFS_IMPORT_NORMAL);
1648 if (props)
1649 nvlist_free(props);
1650 return (ret);
1651 }
1652
1653 static void
1654 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1655 int indent)
1656 {
1657 nvlist_t **child;
1658 uint_t c, children;
1659 char *vname;
1660 uint64_t is_log = 0;
1661
1662 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1663 &is_log);
1664
1665 if (name != NULL)
1666 (void) printf("\t%*s%s%s\n", indent, "", name,
1667 is_log ? " [log]" : "");
1668
1669 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1670 &child, &children) != 0)
1671 return;
1672
1673 for (c = 0; c < children; c++) {
1674 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1675 print_vdev_tree(hdl, vname, child[c], indent + 2);
1676 free(vname);
1677 }
1678 }
1679
1680 void
1681 zpool_print_unsup_feat(nvlist_t *config)
1682 {
1683 nvlist_t *nvinfo, *unsup_feat;
1684 nvpair_t *nvp;
1685
1686 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1687 0);
1688 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1689 &unsup_feat) == 0);
1690
1691 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1692 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1693 char *desc;
1694
1695 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1696 verify(nvpair_value_string(nvp, &desc) == 0);
1697
1698 if (strlen(desc) > 0)
1699 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1700 else
1701 (void) printf("\t%s\n", nvpair_name(nvp));
1702 }
1703 }
1704
1705 /*
1706 * Import the given pool using the known configuration and a list of
1707 * properties to be set. The configuration should have come from
1708 * zpool_find_import(). The 'newname' parameters control whether the pool
1709 * is imported with a different name.
1710 */
1711 int
1712 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1713 nvlist_t *props, int flags)
1714 {
1715 zfs_cmd_t zc = {"\0"};
1716 zpool_rewind_policy_t policy;
1717 nvlist_t *nv = NULL;
1718 nvlist_t *nvinfo = NULL;
1719 nvlist_t *missing = NULL;
1720 char *thename;
1721 char *origname;
1722 int ret;
1723 int error = 0;
1724 char errbuf[1024];
1725
1726 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1727 &origname) == 0);
1728
1729 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1730 "cannot import pool '%s'"), origname);
1731
1732 if (newname != NULL) {
1733 if (!zpool_name_valid(hdl, B_FALSE, newname))
1734 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1735 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1736 newname));
1737 thename = (char *)newname;
1738 } else {
1739 thename = origname;
1740 }
1741
1742 if (props) {
1743 uint64_t version;
1744 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1745
1746 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1747 &version) == 0);
1748
1749 if ((props = zpool_valid_proplist(hdl, origname,
1750 props, version, flags, errbuf)) == NULL) {
1751 return (-1);
1752 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1753 nvlist_free(props);
1754 return (-1);
1755 }
1756 }
1757
1758 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1759
1760 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1761 &zc.zc_guid) == 0);
1762
1763 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1764 nvlist_free(props);
1765 return (-1);
1766 }
1767 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1768 nvlist_free(props);
1769 return (-1);
1770 }
1771
1772 zc.zc_cookie = flags;
1773 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1774 errno == ENOMEM) {
1775 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1776 zcmd_free_nvlists(&zc);
1777 return (-1);
1778 }
1779 }
1780 if (ret != 0)
1781 error = errno;
1782
1783 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1784 zpool_get_rewind_policy(config, &policy);
1785
1786 if (error) {
1787 char desc[1024];
1788
1789 /*
1790 * Dry-run failed, but we print out what success
1791 * looks like if we found a best txg
1792 */
1793 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1794 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1795 B_TRUE, nv);
1796 nvlist_free(nv);
1797 return (-1);
1798 }
1799
1800 if (newname == NULL)
1801 (void) snprintf(desc, sizeof (desc),
1802 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1803 thename);
1804 else
1805 (void) snprintf(desc, sizeof (desc),
1806 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1807 origname, thename);
1808
1809 switch (error) {
1810 case ENOTSUP:
1811 if (nv != NULL && nvlist_lookup_nvlist(nv,
1812 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1813 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1814 (void) printf(dgettext(TEXT_DOMAIN, "This "
1815 "pool uses the following feature(s) not "
1816 "supported by this system:\n"));
1817 zpool_print_unsup_feat(nv);
1818 if (nvlist_exists(nvinfo,
1819 ZPOOL_CONFIG_CAN_RDONLY)) {
1820 (void) printf(dgettext(TEXT_DOMAIN,
1821 "All unsupported features are only "
1822 "required for writing to the pool."
1823 "\nThe pool can be imported using "
1824 "'-o readonly=on'.\n"));
1825 }
1826 }
1827 /*
1828 * Unsupported version.
1829 */
1830 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1831 break;
1832
1833 case EINVAL:
1834 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1835 break;
1836
1837 case EROFS:
1838 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1839 "one or more devices is read only"));
1840 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1841 break;
1842
1843 case ENXIO:
1844 if (nv && nvlist_lookup_nvlist(nv,
1845 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1846 nvlist_lookup_nvlist(nvinfo,
1847 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1848 (void) printf(dgettext(TEXT_DOMAIN,
1849 "The devices below are missing, use "
1850 "'-m' to import the pool anyway:\n"));
1851 print_vdev_tree(hdl, NULL, missing, 2);
1852 (void) printf("\n");
1853 }
1854 (void) zpool_standard_error(hdl, error, desc);
1855 break;
1856
1857 case EEXIST:
1858 (void) zpool_standard_error(hdl, error, desc);
1859 break;
1860
1861 case EBUSY:
1862 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1863 "one or more devices are already in use\n"));
1864 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1865 break;
1866
1867 default:
1868 (void) zpool_standard_error(hdl, error, desc);
1869 zpool_explain_recover(hdl,
1870 newname ? origname : thename, -error, nv);
1871 break;
1872 }
1873
1874 nvlist_free(nv);
1875 ret = -1;
1876 } else {
1877 zpool_handle_t *zhp;
1878
1879 /*
1880 * This should never fail, but play it safe anyway.
1881 */
1882 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1883 ret = -1;
1884 else if (zhp != NULL)
1885 zpool_close(zhp);
1886 if (policy.zrp_request &
1887 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1888 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1889 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1890 }
1891 nvlist_free(nv);
1892 return (0);
1893 }
1894
1895 zcmd_free_nvlists(&zc);
1896 nvlist_free(props);
1897
1898 return (ret);
1899 }
1900
1901 /*
1902 * Scan the pool.
1903 */
1904 int
1905 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1906 {
1907 zfs_cmd_t zc = {"\0"};
1908 char msg[1024];
1909 libzfs_handle_t *hdl = zhp->zpool_hdl;
1910
1911 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1912 zc.zc_cookie = func;
1913
1914 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1915 (errno == ENOENT && func != POOL_SCAN_NONE))
1916 return (0);
1917
1918 if (func == POOL_SCAN_SCRUB) {
1919 (void) snprintf(msg, sizeof (msg),
1920 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1921 } else if (func == POOL_SCAN_NONE) {
1922 (void) snprintf(msg, sizeof (msg),
1923 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1924 zc.zc_name);
1925 } else {
1926 assert(!"unexpected result");
1927 }
1928
1929 if (errno == EBUSY) {
1930 nvlist_t *nvroot;
1931 pool_scan_stat_t *ps = NULL;
1932 uint_t psc;
1933
1934 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1935 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1936 (void) nvlist_lookup_uint64_array(nvroot,
1937 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1938 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1939 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1940 else
1941 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1942 } else if (errno == ENOENT) {
1943 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1944 } else {
1945 return (zpool_standard_error(hdl, errno, msg));
1946 }
1947 }
1948
1949 /*
1950 * Find a vdev that matches the search criteria specified. We use the
1951 * the nvpair name to determine how we should look for the device.
1952 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1953 * spare; but FALSE if its an INUSE spare.
1954 */
1955 static nvlist_t *
1956 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1957 boolean_t *l2cache, boolean_t *log)
1958 {
1959 uint_t c, children;
1960 nvlist_t **child;
1961 nvlist_t *ret;
1962 uint64_t is_log;
1963 char *srchkey;
1964 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1965
1966 /* Nothing to look for */
1967 if (search == NULL || pair == NULL)
1968 return (NULL);
1969
1970 /* Obtain the key we will use to search */
1971 srchkey = nvpair_name(pair);
1972
1973 switch (nvpair_type(pair)) {
1974 case DATA_TYPE_UINT64:
1975 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1976 uint64_t srchval, theguid;
1977
1978 verify(nvpair_value_uint64(pair, &srchval) == 0);
1979 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1980 &theguid) == 0);
1981 if (theguid == srchval)
1982 return (nv);
1983 }
1984 break;
1985
1986 case DATA_TYPE_STRING: {
1987 char *srchval, *val;
1988
1989 verify(nvpair_value_string(pair, &srchval) == 0);
1990 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1991 break;
1992
1993 /*
1994 * Search for the requested value. Special cases:
1995 *
1996 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1997 * "-part1", or "p1". The suffix is hidden from the user,
1998 * but included in the string, so this matches around it.
1999 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2000 * is used to check all possible expanded paths.
2001 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2002 *
2003 * Otherwise, all other searches are simple string compares.
2004 */
2005 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2006 uint64_t wholedisk = 0;
2007
2008 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2009 &wholedisk);
2010 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2011 return (nv);
2012
2013 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2014 char *type, *idx, *end, *p;
2015 uint64_t id, vdev_id;
2016
2017 /*
2018 * Determine our vdev type, keeping in mind
2019 * that the srchval is composed of a type and
2020 * vdev id pair (i.e. mirror-4).
2021 */
2022 if ((type = strdup(srchval)) == NULL)
2023 return (NULL);
2024
2025 if ((p = strrchr(type, '-')) == NULL) {
2026 free(type);
2027 break;
2028 }
2029 idx = p + 1;
2030 *p = '\0';
2031
2032 /*
2033 * If the types don't match then keep looking.
2034 */
2035 if (strncmp(val, type, strlen(val)) != 0) {
2036 free(type);
2037 break;
2038 }
2039
2040 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2041 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2042 strncmp(type, VDEV_TYPE_MIRROR,
2043 strlen(VDEV_TYPE_MIRROR)) == 0);
2044 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2045 &id) == 0);
2046
2047 errno = 0;
2048 vdev_id = strtoull(idx, &end, 10);
2049
2050 free(type);
2051 if (errno != 0)
2052 return (NULL);
2053
2054 /*
2055 * Now verify that we have the correct vdev id.
2056 */
2057 if (vdev_id == id)
2058 return (nv);
2059 }
2060
2061 /*
2062 * Common case
2063 */
2064 if (strcmp(srchval, val) == 0)
2065 return (nv);
2066 break;
2067 }
2068
2069 default:
2070 break;
2071 }
2072
2073 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2074 &child, &children) != 0)
2075 return (NULL);
2076
2077 for (c = 0; c < children; c++) {
2078 if ((ret = vdev_to_nvlist_iter(child[c], search,
2079 avail_spare, l2cache, NULL)) != NULL) {
2080 /*
2081 * The 'is_log' value is only set for the toplevel
2082 * vdev, not the leaf vdevs. So we always lookup the
2083 * log device from the root of the vdev tree (where
2084 * 'log' is non-NULL).
2085 */
2086 if (log != NULL &&
2087 nvlist_lookup_uint64(child[c],
2088 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2089 is_log) {
2090 *log = B_TRUE;
2091 }
2092 return (ret);
2093 }
2094 }
2095
2096 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2097 &child, &children) == 0) {
2098 for (c = 0; c < children; c++) {
2099 if ((ret = vdev_to_nvlist_iter(child[c], search,
2100 avail_spare, l2cache, NULL)) != NULL) {
2101 *avail_spare = B_TRUE;
2102 return (ret);
2103 }
2104 }
2105 }
2106
2107 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2108 &child, &children) == 0) {
2109 for (c = 0; c < children; c++) {
2110 if ((ret = vdev_to_nvlist_iter(child[c], search,
2111 avail_spare, l2cache, NULL)) != NULL) {
2112 *l2cache = B_TRUE;
2113 return (ret);
2114 }
2115 }
2116 }
2117
2118 return (NULL);
2119 }
2120
2121 /*
2122 * Given a physical path (minus the "/devices" prefix), find the
2123 * associated vdev.
2124 */
2125 nvlist_t *
2126 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2127 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2128 {
2129 nvlist_t *search, *nvroot, *ret;
2130
2131 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2132 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2133
2134 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2135 &nvroot) == 0);
2136
2137 *avail_spare = B_FALSE;
2138 *l2cache = B_FALSE;
2139 if (log != NULL)
2140 *log = B_FALSE;
2141 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2142 nvlist_free(search);
2143
2144 return (ret);
2145 }
2146
2147 /*
2148 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2149 */
2150 boolean_t
2151 zpool_vdev_is_interior(const char *name)
2152 {
2153 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2154 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2155 return (B_TRUE);
2156 return (B_FALSE);
2157 }
2158
2159 nvlist_t *
2160 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2161 boolean_t *l2cache, boolean_t *log)
2162 {
2163 char *end;
2164 nvlist_t *nvroot, *search, *ret;
2165 uint64_t guid;
2166
2167 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2168
2169 guid = strtoull(path, &end, 0);
2170 if (guid != 0 && *end == '\0') {
2171 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2172 } else if (zpool_vdev_is_interior(path)) {
2173 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2174 } else {
2175 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2176 }
2177
2178 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2179 &nvroot) == 0);
2180
2181 *avail_spare = B_FALSE;
2182 *l2cache = B_FALSE;
2183 if (log != NULL)
2184 *log = B_FALSE;
2185 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2186 nvlist_free(search);
2187
2188 return (ret);
2189 }
2190
2191 static int
2192 vdev_online(nvlist_t *nv)
2193 {
2194 uint64_t ival;
2195
2196 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2197 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2198 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2199 return (0);
2200
2201 return (1);
2202 }
2203
2204 /*
2205 * Helper function for zpool_get_physpaths().
2206 */
2207 static int
2208 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2209 size_t *bytes_written)
2210 {
2211 size_t bytes_left, pos, rsz;
2212 char *tmppath;
2213 const char *format;
2214
2215 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2216 &tmppath) != 0)
2217 return (EZFS_NODEVICE);
2218
2219 pos = *bytes_written;
2220 bytes_left = physpath_size - pos;
2221 format = (pos == 0) ? "%s" : " %s";
2222
2223 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2224 *bytes_written += rsz;
2225
2226 if (rsz >= bytes_left) {
2227 /* if physpath was not copied properly, clear it */
2228 if (bytes_left != 0) {
2229 physpath[pos] = 0;
2230 }
2231 return (EZFS_NOSPC);
2232 }
2233 return (0);
2234 }
2235
2236 static int
2237 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2238 size_t *rsz, boolean_t is_spare)
2239 {
2240 char *type;
2241 int ret;
2242
2243 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2244 return (EZFS_INVALCONFIG);
2245
2246 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2247 /*
2248 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2249 * For a spare vdev, we only want to boot from the active
2250 * spare device.
2251 */
2252 if (is_spare) {
2253 uint64_t spare = 0;
2254 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2255 &spare);
2256 if (!spare)
2257 return (EZFS_INVALCONFIG);
2258 }
2259
2260 if (vdev_online(nv)) {
2261 if ((ret = vdev_get_one_physpath(nv, physpath,
2262 phypath_size, rsz)) != 0)
2263 return (ret);
2264 }
2265 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2266 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2267 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2268 nvlist_t **child;
2269 uint_t count;
2270 int i, ret;
2271
2272 if (nvlist_lookup_nvlist_array(nv,
2273 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2274 return (EZFS_INVALCONFIG);
2275
2276 for (i = 0; i < count; i++) {
2277 ret = vdev_get_physpaths(child[i], physpath,
2278 phypath_size, rsz, is_spare);
2279 if (ret == EZFS_NOSPC)
2280 return (ret);
2281 }
2282 }
2283
2284 return (EZFS_POOL_INVALARG);
2285 }
2286
2287 /*
2288 * Get phys_path for a root pool config.
2289 * Return 0 on success; non-zero on failure.
2290 */
2291 static int
2292 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2293 {
2294 size_t rsz;
2295 nvlist_t *vdev_root;
2296 nvlist_t **child;
2297 uint_t count;
2298 char *type;
2299
2300 rsz = 0;
2301
2302 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2303 &vdev_root) != 0)
2304 return (EZFS_INVALCONFIG);
2305
2306 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2307 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2308 &child, &count) != 0)
2309 return (EZFS_INVALCONFIG);
2310
2311 #if defined(__sun__) || defined(__sun)
2312 /*
2313 * root pool can not have EFI labeled disks and can only have
2314 * a single top-level vdev.
2315 */
2316 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2317 pool_uses_efi(vdev_root))
2318 return (EZFS_POOL_INVALARG);
2319 #endif
2320
2321 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2322 B_FALSE);
2323
2324 /* No online devices */
2325 if (rsz == 0)
2326 return (EZFS_NODEVICE);
2327
2328 return (0);
2329 }
2330
2331 /*
2332 * Get phys_path for a root pool
2333 * Return 0 on success; non-zero on failure.
2334 */
2335 int
2336 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2337 {
2338 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2339 phypath_size));
2340 }
2341
2342 /*
2343 * If the device has being dynamically expanded then we need to relabel
2344 * the disk to use the new unallocated space.
2345 */
2346 static int
2347 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2348 {
2349 int fd, error;
2350
2351 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2352 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2353 "relabel '%s': unable to open device: %d"), path, errno);
2354 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2355 }
2356
2357 /*
2358 * It's possible that we might encounter an error if the device
2359 * does not have any unallocated space left. If so, we simply
2360 * ignore that error and continue on.
2361 *
2362 * Also, we don't call efi_rescan() - that would just return EBUSY.
2363 * The module will do it for us in vdev_disk_open().
2364 */
2365 error = efi_use_whole_disk(fd);
2366 (void) close(fd);
2367 if (error && error != VT_ENOSPC) {
2368 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2369 "relabel '%s': unable to read disk capacity"), path);
2370 return (zfs_error(hdl, EZFS_NOCAP, msg));
2371 }
2372 return (0);
2373 }
2374
2375 /*
2376 * Bring the specified vdev online. The 'flags' parameter is a set of the
2377 * ZFS_ONLINE_* flags.
2378 */
2379 int
2380 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2381 vdev_state_t *newstate)
2382 {
2383 zfs_cmd_t zc = {"\0"};
2384 char msg[1024];
2385 nvlist_t *tgt;
2386 boolean_t avail_spare, l2cache, islog;
2387 libzfs_handle_t *hdl = zhp->zpool_hdl;
2388 int error;
2389
2390 if (flags & ZFS_ONLINE_EXPAND) {
2391 (void) snprintf(msg, sizeof (msg),
2392 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2393 } else {
2394 (void) snprintf(msg, sizeof (msg),
2395 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2396 }
2397
2398 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2399 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2400 &islog)) == NULL)
2401 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2402
2403 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2404
2405 if (avail_spare)
2406 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2407
2408 if (flags & ZFS_ONLINE_EXPAND ||
2409 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2410 uint64_t wholedisk = 0;
2411
2412 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2413 &wholedisk);
2414
2415 /*
2416 * XXX - L2ARC 1.0 devices can't support expansion.
2417 */
2418 if (l2cache) {
2419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2420 "cannot expand cache devices"));
2421 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2422 }
2423
2424 if (wholedisk) {
2425 const char *fullpath = path;
2426 char buf[MAXPATHLEN];
2427
2428 if (path[0] != '/') {
2429 error = zfs_resolve_shortname(path, buf,
2430 sizeof (buf));
2431 if (error != 0)
2432 return (zfs_error(hdl, EZFS_NODEVICE,
2433 msg));
2434
2435 fullpath = buf;
2436 }
2437
2438 error = zpool_relabel_disk(hdl, fullpath, msg);
2439 if (error != 0)
2440 return (error);
2441 }
2442 }
2443
2444 zc.zc_cookie = VDEV_STATE_ONLINE;
2445 zc.zc_obj = flags;
2446
2447 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2448 if (errno == EINVAL) {
2449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2450 "from this pool into a new one. Use '%s' "
2451 "instead"), "zpool detach");
2452 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2453 }
2454 return (zpool_standard_error(hdl, errno, msg));
2455 }
2456
2457 *newstate = zc.zc_cookie;
2458 return (0);
2459 }
2460
2461 /*
2462 * Take the specified vdev offline
2463 */
2464 int
2465 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2466 {
2467 zfs_cmd_t zc = {"\0"};
2468 char msg[1024];
2469 nvlist_t *tgt;
2470 boolean_t avail_spare, l2cache;
2471 libzfs_handle_t *hdl = zhp->zpool_hdl;
2472
2473 (void) snprintf(msg, sizeof (msg),
2474 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2475
2476 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2477 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2478 NULL)) == NULL)
2479 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2480
2481 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2482
2483 if (avail_spare)
2484 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2485
2486 zc.zc_cookie = VDEV_STATE_OFFLINE;
2487 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2488
2489 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2490 return (0);
2491
2492 switch (errno) {
2493 case EBUSY:
2494
2495 /*
2496 * There are no other replicas of this device.
2497 */
2498 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2499
2500 case EEXIST:
2501 /*
2502 * The log device has unplayed logs
2503 */
2504 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2505
2506 default:
2507 return (zpool_standard_error(hdl, errno, msg));
2508 }
2509 }
2510
2511 /*
2512 * Mark the given vdev faulted.
2513 */
2514 int
2515 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2516 {
2517 zfs_cmd_t zc = {"\0"};
2518 char msg[1024];
2519 libzfs_handle_t *hdl = zhp->zpool_hdl;
2520
2521 (void) snprintf(msg, sizeof (msg),
2522 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2523
2524 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2525 zc.zc_guid = guid;
2526 zc.zc_cookie = VDEV_STATE_FAULTED;
2527 zc.zc_obj = aux;
2528
2529 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2530 return (0);
2531
2532 switch (errno) {
2533 case EBUSY:
2534
2535 /*
2536 * There are no other replicas of this device.
2537 */
2538 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2539
2540 default:
2541 return (zpool_standard_error(hdl, errno, msg));
2542 }
2543
2544 }
2545
2546 /*
2547 * Mark the given vdev degraded.
2548 */
2549 int
2550 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2551 {
2552 zfs_cmd_t zc = {"\0"};
2553 char msg[1024];
2554 libzfs_handle_t *hdl = zhp->zpool_hdl;
2555
2556 (void) snprintf(msg, sizeof (msg),
2557 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2558
2559 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2560 zc.zc_guid = guid;
2561 zc.zc_cookie = VDEV_STATE_DEGRADED;
2562 zc.zc_obj = aux;
2563
2564 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2565 return (0);
2566
2567 return (zpool_standard_error(hdl, errno, msg));
2568 }
2569
2570 /*
2571 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2572 * a hot spare.
2573 */
2574 static boolean_t
2575 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2576 {
2577 nvlist_t **child;
2578 uint_t c, children;
2579 char *type;
2580
2581 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2582 &children) == 0) {
2583 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2584 &type) == 0);
2585
2586 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2587 children == 2 && child[which] == tgt)
2588 return (B_TRUE);
2589
2590 for (c = 0; c < children; c++)
2591 if (is_replacing_spare(child[c], tgt, which))
2592 return (B_TRUE);
2593 }
2594
2595 return (B_FALSE);
2596 }
2597
2598 /*
2599 * Attach new_disk (fully described by nvroot) to old_disk.
2600 * If 'replacing' is specified, the new disk will replace the old one.
2601 */
2602 int
2603 zpool_vdev_attach(zpool_handle_t *zhp,
2604 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2605 {
2606 zfs_cmd_t zc = {"\0"};
2607 char msg[1024];
2608 int ret;
2609 nvlist_t *tgt;
2610 boolean_t avail_spare, l2cache, islog;
2611 uint64_t val;
2612 char *newname;
2613 nvlist_t **child;
2614 uint_t children;
2615 nvlist_t *config_root;
2616 libzfs_handle_t *hdl = zhp->zpool_hdl;
2617 boolean_t rootpool = zpool_is_bootable(zhp);
2618
2619 if (replacing)
2620 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2621 "cannot replace %s with %s"), old_disk, new_disk);
2622 else
2623 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2624 "cannot attach %s to %s"), new_disk, old_disk);
2625
2626 #if defined(__sun__) || defined(__sun)
2627 /*
2628 * If this is a root pool, make sure that we're not attaching an
2629 * EFI labeled device.
2630 */
2631 if (rootpool && pool_uses_efi(nvroot)) {
2632 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2633 "EFI labeled devices are not supported on root pools."));
2634 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2635 }
2636 #endif
2637
2638 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2639 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2640 &islog)) == 0)
2641 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2642
2643 if (avail_spare)
2644 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2645
2646 if (l2cache)
2647 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2648
2649 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2650 zc.zc_cookie = replacing;
2651
2652 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2653 &child, &children) != 0 || children != 1) {
2654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2655 "new device must be a single disk"));
2656 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2657 }
2658
2659 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2660 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2661
2662 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2663 return (-1);
2664
2665 /*
2666 * If the target is a hot spare that has been swapped in, we can only
2667 * replace it with another hot spare.
2668 */
2669 if (replacing &&
2670 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2671 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2672 NULL) == NULL || !avail_spare) &&
2673 is_replacing_spare(config_root, tgt, 1)) {
2674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2675 "can only be replaced by another hot spare"));
2676 free(newname);
2677 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2678 }
2679
2680 free(newname);
2681
2682 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2683 return (-1);
2684
2685 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2686
2687 zcmd_free_nvlists(&zc);
2688
2689 if (ret == 0) {
2690 if (rootpool) {
2691 /*
2692 * XXX need a better way to prevent user from
2693 * booting up a half-baked vdev.
2694 */
2695 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2696 "sure to wait until resilver is done "
2697 "before rebooting.\n"));
2698 }
2699 return (0);
2700 }
2701
2702 switch (errno) {
2703 case ENOTSUP:
2704 /*
2705 * Can't attach to or replace this type of vdev.
2706 */
2707 if (replacing) {
2708 uint64_t version = zpool_get_prop_int(zhp,
2709 ZPOOL_PROP_VERSION, NULL);
2710
2711 if (islog)
2712 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2713 "cannot replace a log with a spare"));
2714 else if (version >= SPA_VERSION_MULTI_REPLACE)
2715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2716 "already in replacing/spare config; wait "
2717 "for completion or use 'zpool detach'"));
2718 else
2719 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2720 "cannot replace a replacing device"));
2721 } else {
2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2723 "can only attach to mirrors and top-level "
2724 "disks"));
2725 }
2726 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2727 break;
2728
2729 case EINVAL:
2730 /*
2731 * The new device must be a single disk.
2732 */
2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2734 "new device must be a single disk"));
2735 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2736 break;
2737
2738 case EBUSY:
2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2740 new_disk);
2741 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2742 break;
2743
2744 case EOVERFLOW:
2745 /*
2746 * The new device is too small.
2747 */
2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2749 "device is too small"));
2750 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2751 break;
2752
2753 case EDOM:
2754 /*
2755 * The new device has a different optimal sector size.
2756 */
2757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2758 "new device has a different optimal sector size; use the "
2759 "option '-o ashift=N' to override the optimal size"));
2760 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2761 break;
2762
2763 case ENAMETOOLONG:
2764 /*
2765 * The resulting top-level vdev spec won't fit in the label.
2766 */
2767 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2768 break;
2769
2770 default:
2771 (void) zpool_standard_error(hdl, errno, msg);
2772 }
2773
2774 return (-1);
2775 }
2776
2777 /*
2778 * Detach the specified device.
2779 */
2780 int
2781 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2782 {
2783 zfs_cmd_t zc = {"\0"};
2784 char msg[1024];
2785 nvlist_t *tgt;
2786 boolean_t avail_spare, l2cache;
2787 libzfs_handle_t *hdl = zhp->zpool_hdl;
2788
2789 (void) snprintf(msg, sizeof (msg),
2790 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2791
2792 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2793 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2794 NULL)) == 0)
2795 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2796
2797 if (avail_spare)
2798 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2799
2800 if (l2cache)
2801 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2802
2803 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2804
2805 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2806 return (0);
2807
2808 switch (errno) {
2809
2810 case ENOTSUP:
2811 /*
2812 * Can't detach from this type of vdev.
2813 */
2814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2815 "applicable to mirror and replacing vdevs"));
2816 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2817 break;
2818
2819 case EBUSY:
2820 /*
2821 * There are no other replicas of this device.
2822 */
2823 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2824 break;
2825
2826 default:
2827 (void) zpool_standard_error(hdl, errno, msg);
2828 }
2829
2830 return (-1);
2831 }
2832
2833 /*
2834 * Find a mirror vdev in the source nvlist.
2835 *
2836 * The mchild array contains a list of disks in one of the top-level mirrors
2837 * of the source pool. The schild array contains a list of disks that the
2838 * user specified on the command line. We loop over the mchild array to
2839 * see if any entry in the schild array matches.
2840 *
2841 * If a disk in the mchild array is found in the schild array, we return
2842 * the index of that entry. Otherwise we return -1.
2843 */
2844 static int
2845 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2846 nvlist_t **schild, uint_t schildren)
2847 {
2848 uint_t mc;
2849
2850 for (mc = 0; mc < mchildren; mc++) {
2851 uint_t sc;
2852 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2853 mchild[mc], B_FALSE);
2854
2855 for (sc = 0; sc < schildren; sc++) {
2856 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2857 schild[sc], B_FALSE);
2858 boolean_t result = (strcmp(mpath, spath) == 0);
2859
2860 free(spath);
2861 if (result) {
2862 free(mpath);
2863 return (mc);
2864 }
2865 }
2866
2867 free(mpath);
2868 }
2869
2870 return (-1);
2871 }
2872
2873 /*
2874 * Split a mirror pool. If newroot points to null, then a new nvlist
2875 * is generated and it is the responsibility of the caller to free it.
2876 */
2877 int
2878 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2879 nvlist_t *props, splitflags_t flags)
2880 {
2881 zfs_cmd_t zc = {"\0"};
2882 char msg[1024];
2883 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2884 nvlist_t **varray = NULL, *zc_props = NULL;
2885 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2886 libzfs_handle_t *hdl = zhp->zpool_hdl;
2887 uint64_t vers;
2888 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2889 int retval = 0;
2890
2891 (void) snprintf(msg, sizeof (msg),
2892 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2893
2894 if (!zpool_name_valid(hdl, B_FALSE, newname))
2895 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2896
2897 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2898 (void) fprintf(stderr, gettext("Internal error: unable to "
2899 "retrieve pool configuration\n"));
2900 return (-1);
2901 }
2902
2903 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2904 == 0);
2905 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2906
2907 if (props) {
2908 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2909 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2910 props, vers, flags, msg)) == NULL)
2911 return (-1);
2912 }
2913
2914 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2915 &children) != 0) {
2916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2917 "Source pool is missing vdev tree"));
2918 if (zc_props)
2919 nvlist_free(zc_props);
2920 return (-1);
2921 }
2922
2923 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2924 vcount = 0;
2925
2926 if (*newroot == NULL ||
2927 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2928 &newchild, &newchildren) != 0)
2929 newchildren = 0;
2930
2931 for (c = 0; c < children; c++) {
2932 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2933 char *type;
2934 nvlist_t **mchild, *vdev;
2935 uint_t mchildren;
2936 int entry;
2937
2938 /*
2939 * Unlike cache & spares, slogs are stored in the
2940 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2941 */
2942 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2943 &is_log);
2944 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2945 &is_hole);
2946 if (is_log || is_hole) {
2947 /*
2948 * Create a hole vdev and put it in the config.
2949 */
2950 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2951 goto out;
2952 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2953 VDEV_TYPE_HOLE) != 0)
2954 goto out;
2955 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2956 1) != 0)
2957 goto out;
2958 if (lastlog == 0)
2959 lastlog = vcount;
2960 varray[vcount++] = vdev;
2961 continue;
2962 }
2963 lastlog = 0;
2964 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2965 == 0);
2966 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2967 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2968 "Source pool must be composed only of mirrors\n"));
2969 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2970 goto out;
2971 }
2972
2973 verify(nvlist_lookup_nvlist_array(child[c],
2974 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2975
2976 /* find or add an entry for this top-level vdev */
2977 if (newchildren > 0 &&
2978 (entry = find_vdev_entry(zhp, mchild, mchildren,
2979 newchild, newchildren)) >= 0) {
2980 /* We found a disk that the user specified. */
2981 vdev = mchild[entry];
2982 ++found;
2983 } else {
2984 /* User didn't specify a disk for this vdev. */
2985 vdev = mchild[mchildren - 1];
2986 }
2987
2988 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2989 goto out;
2990 }
2991
2992 /* did we find every disk the user specified? */
2993 if (found != newchildren) {
2994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2995 "include at most one disk from each mirror"));
2996 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2997 goto out;
2998 }
2999
3000 /* Prepare the nvlist for populating. */
3001 if (*newroot == NULL) {
3002 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3003 goto out;
3004 freelist = B_TRUE;
3005 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3006 VDEV_TYPE_ROOT) != 0)
3007 goto out;
3008 } else {
3009 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3010 }
3011
3012 /* Add all the children we found */
3013 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3014 lastlog == 0 ? vcount : lastlog) != 0)
3015 goto out;
3016
3017 /*
3018 * If we're just doing a dry run, exit now with success.
3019 */
3020 if (flags.dryrun) {
3021 memory_err = B_FALSE;
3022 freelist = B_FALSE;
3023 goto out;
3024 }
3025
3026 /* now build up the config list & call the ioctl */
3027 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3028 goto out;
3029
3030 if (nvlist_add_nvlist(newconfig,
3031 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3032 nvlist_add_string(newconfig,
3033 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3034 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3035 goto out;
3036
3037 /*
3038 * The new pool is automatically part of the namespace unless we
3039 * explicitly export it.
3040 */
3041 if (!flags.import)
3042 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3043 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3044 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3045 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3046 goto out;
3047 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3048 goto out;
3049
3050 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3051 retval = zpool_standard_error(hdl, errno, msg);
3052 goto out;
3053 }
3054
3055 freelist = B_FALSE;
3056 memory_err = B_FALSE;
3057
3058 out:
3059 if (varray != NULL) {
3060 int v;
3061
3062 for (v = 0; v < vcount; v++)
3063 nvlist_free(varray[v]);
3064 free(varray);
3065 }
3066 zcmd_free_nvlists(&zc);
3067 if (zc_props)
3068 nvlist_free(zc_props);
3069 if (newconfig)
3070 nvlist_free(newconfig);
3071 if (freelist) {
3072 nvlist_free(*newroot);
3073 *newroot = NULL;
3074 }
3075
3076 if (retval != 0)
3077 return (retval);
3078
3079 if (memory_err)
3080 return (no_memory(hdl));
3081
3082 return (0);
3083 }
3084
3085 /*
3086 * Remove the given device. Currently, this is supported only for hot spares
3087 * and level 2 cache devices.
3088 */
3089 int
3090 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3091 {
3092 zfs_cmd_t zc = {"\0"};
3093 char msg[1024];
3094 nvlist_t *tgt;
3095 boolean_t avail_spare, l2cache, islog;
3096 libzfs_handle_t *hdl = zhp->zpool_hdl;
3097 uint64_t version;
3098
3099 (void) snprintf(msg, sizeof (msg),
3100 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3101
3102 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3103 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3104 &islog)) == 0)
3105 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3106 /*
3107 * XXX - this should just go away.
3108 */
3109 if (!avail_spare && !l2cache && !islog) {
3110 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3111 "only inactive hot spares, cache, top-level, "
3112 "or log devices can be removed"));
3113 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3114 }
3115
3116 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3117 if (islog && version < SPA_VERSION_HOLES) {
3118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3119 "pool must be upgrade to support log removal"));
3120 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3121 }
3122
3123 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3124
3125 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3126 return (0);
3127
3128 return (zpool_standard_error(hdl, errno, msg));
3129 }
3130
3131 /*
3132 * Clear the errors for the pool, or the particular device if specified.
3133 */
3134 int
3135 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3136 {
3137 zfs_cmd_t zc = {"\0"};
3138 char msg[1024];
3139 nvlist_t *tgt;
3140 zpool_rewind_policy_t policy;
3141 boolean_t avail_spare, l2cache;
3142 libzfs_handle_t *hdl = zhp->zpool_hdl;
3143 nvlist_t *nvi = NULL;
3144 int error;
3145
3146 if (path)
3147 (void) snprintf(msg, sizeof (msg),
3148 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3149 path);
3150 else
3151 (void) snprintf(msg, sizeof (msg),
3152 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3153 zhp->zpool_name);
3154
3155 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3156 if (path) {
3157 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3158 &l2cache, NULL)) == 0)
3159 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3160
3161 /*
3162 * Don't allow error clearing for hot spares. Do allow
3163 * error clearing for l2cache devices.
3164 */
3165 if (avail_spare)
3166 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3167
3168 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3169 &zc.zc_guid) == 0);
3170 }
3171
3172 zpool_get_rewind_policy(rewindnvl, &policy);
3173 zc.zc_cookie = policy.zrp_request;
3174
3175 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3176 return (-1);
3177
3178 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3179 return (-1);
3180
3181 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3182 errno == ENOMEM) {
3183 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3184 zcmd_free_nvlists(&zc);
3185 return (-1);
3186 }
3187 }
3188
3189 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3190 errno != EPERM && errno != EACCES)) {
3191 if (policy.zrp_request &
3192 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3193 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3194 zpool_rewind_exclaim(hdl, zc.zc_name,
3195 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3196 nvi);
3197 nvlist_free(nvi);
3198 }
3199 zcmd_free_nvlists(&zc);
3200 return (0);
3201 }
3202
3203 zcmd_free_nvlists(&zc);
3204 return (zpool_standard_error(hdl, errno, msg));
3205 }
3206
3207 /*
3208 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3209 */
3210 int
3211 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3212 {
3213 zfs_cmd_t zc = {"\0"};
3214 char msg[1024];
3215 libzfs_handle_t *hdl = zhp->zpool_hdl;
3216
3217 (void) snprintf(msg, sizeof (msg),
3218 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3219 (u_longlong_t)guid);
3220
3221 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3222 zc.zc_guid = guid;
3223 zc.zc_cookie = ZPOOL_NO_REWIND;
3224
3225 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3226 return (0);
3227
3228 return (zpool_standard_error(hdl, errno, msg));
3229 }
3230
3231 /*
3232 * Change the GUID for a pool.
3233 */
3234 int
3235 zpool_reguid(zpool_handle_t *zhp)
3236 {
3237 char msg[1024];
3238 libzfs_handle_t *hdl = zhp->zpool_hdl;
3239 zfs_cmd_t zc = {"\0"};
3240
3241 (void) snprintf(msg, sizeof (msg),
3242 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3243
3244 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3246 return (0);
3247
3248 return (zpool_standard_error(hdl, errno, msg));
3249 }
3250
3251 /*
3252 * Reopen the pool.
3253 */
3254 int
3255 zpool_reopen(zpool_handle_t *zhp)
3256 {
3257 zfs_cmd_t zc = {"\0"};
3258 char msg[1024];
3259 libzfs_handle_t *hdl = zhp->zpool_hdl;
3260
3261 (void) snprintf(msg, sizeof (msg),
3262 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3263 zhp->zpool_name);
3264
3265 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3266 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3267 return (0);
3268 return (zpool_standard_error(hdl, errno, msg));
3269 }
3270
3271 /*
3272 * Convert from a devid string to a path.
3273 */
3274 static char *
3275 devid_to_path(char *devid_str)
3276 {
3277 ddi_devid_t devid;
3278 char *minor;
3279 char *path;
3280 devid_nmlist_t *list = NULL;
3281 int ret;
3282
3283 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3284 return (NULL);
3285
3286 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3287
3288 devid_str_free(minor);
3289 devid_free(devid);
3290
3291 if (ret != 0)
3292 return (NULL);
3293
3294 if ((path = strdup(list[0].devname)) == NULL)
3295 return (NULL);
3296
3297 devid_free_nmlist(list);
3298
3299 return (path);
3300 }
3301
3302 /*
3303 * Convert from a path to a devid string.
3304 */
3305 static char *
3306 path_to_devid(const char *path)
3307 {
3308 int fd;
3309 ddi_devid_t devid;
3310 char *minor, *ret;
3311
3312 if ((fd = open(path, O_RDONLY)) < 0)
3313 return (NULL);
3314
3315 minor = NULL;
3316 ret = NULL;
3317 if (devid_get(fd, &devid) == 0) {
3318 if (devid_get_minor_name(fd, &minor) == 0)
3319 ret = devid_str_encode(devid, minor);
3320 if (minor != NULL)
3321 devid_str_free(minor);
3322 devid_free(devid);
3323 }
3324 (void) close(fd);
3325
3326 return (ret);
3327 }
3328
3329 /*
3330 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3331 * ignore any failure here, since a common case is for an unprivileged user to
3332 * type 'zpool status', and we'll display the correct information anyway.
3333 */
3334 static void
3335 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3336 {
3337 zfs_cmd_t zc = {"\0"};
3338
3339 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3340 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3341 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3342 &zc.zc_guid) == 0);
3343
3344 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3345 }
3346
3347 /*
3348 * Remove partition suffix from a vdev path. Partition suffixes may take three
3349 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3350 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3351 * third case only occurs when preceded by a string matching the regular
3352 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3353 */
3354 static char *
3355 strip_partition(libzfs_handle_t *hdl, char *path)
3356 {
3357 char *tmp = zfs_strdup(hdl, path);
3358 char *part = NULL, *d = NULL;
3359
3360 if ((part = strstr(tmp, "-part")) && part != tmp) {
3361 d = part + 5;
3362 } else if ((part = strrchr(tmp, 'p')) &&
3363 part > tmp + 1 && isdigit(*(part-1))) {
3364 d = part + 1;
3365 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3366 for (d = &tmp[2]; isalpha(*d); part = ++d);
3367 }
3368 if (part && d && *d != '\0') {
3369 for (; isdigit(*d); d++);
3370 if (*d == '\0')
3371 *part = '\0';
3372 }
3373 return (tmp);
3374 }
3375
3376 #define PATH_BUF_LEN 64
3377
3378 /*
3379 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3380 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3381 * We also check if this is a whole disk, in which case we strip off the
3382 * trailing 's0' slice name.
3383 *
3384 * This routine is also responsible for identifying when disks have been
3385 * reconfigured in a new location. The kernel will have opened the device by
3386 * devid, but the path will still refer to the old location. To catch this, we
3387 * first do a path -> devid translation (which is fast for the common case). If
3388 * the devid matches, we're done. If not, we do a reverse devid -> path
3389 * translation and issue the appropriate ioctl() to update the path of the vdev.
3390 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3391 * of these checks.
3392 */
3393 char *
3394 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3395 boolean_t verbose)
3396 {
3397 char *path, *devid, *type;
3398 uint64_t value;
3399 char buf[PATH_BUF_LEN];
3400 char tmpbuf[PATH_BUF_LEN];
3401 vdev_stat_t *vs;
3402 uint_t vsc;
3403
3404 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3405 &value) == 0) {
3406 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3407 &value) == 0);
3408 (void) snprintf(buf, sizeof (buf), "%llu",
3409 (u_longlong_t)value);
3410 path = buf;
3411 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3412 /*
3413 * If the device is dead (faulted, offline, etc) then don't
3414 * bother opening it. Otherwise we may be forcing the user to
3415 * open a misbehaving device, which can have undesirable
3416 * effects.
3417 */
3418 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3419 (uint64_t **)&vs, &vsc) != 0 ||
3420 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3421 zhp != NULL &&
3422 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3423 /*
3424 * Determine if the current path is correct.
3425 */
3426 char *newdevid = path_to_devid(path);
3427
3428 if (newdevid == NULL ||
3429 strcmp(devid, newdevid) != 0) {
3430 char *newpath;
3431
3432 if ((newpath = devid_to_path(devid)) != NULL) {
3433 /*
3434 * Update the path appropriately.
3435 */
3436 set_path(zhp, nv, newpath);
3437 if (nvlist_add_string(nv,
3438 ZPOOL_CONFIG_PATH, newpath) == 0)
3439 verify(nvlist_lookup_string(nv,
3440 ZPOOL_CONFIG_PATH,
3441 &path) == 0);
3442 free(newpath);
3443 }
3444 }
3445
3446 if (newdevid)
3447 devid_str_free(newdevid);
3448 }
3449
3450 /*
3451 * For a block device only use the name.
3452 */
3453 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3454 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3455 path = strrchr(path, '/');
3456 path++;
3457 }
3458
3459 /*
3460 * Remove the partition from the path it this is a whole disk.
3461 */
3462 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3463 &value) == 0 && value) {
3464 return (strip_partition(hdl, path));
3465 }
3466 } else {
3467 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3468
3469 /*
3470 * If it's a raidz device, we need to stick in the parity level.
3471 */
3472 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3473
3474 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3475 &value) == 0);
3476 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3477 (u_longlong_t)value);
3478 path = buf;
3479 }
3480
3481 /*
3482 * We identify each top-level vdev by using a <type-id>
3483 * naming convention.
3484 */
3485 if (verbose) {
3486 uint64_t id;
3487
3488 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3489 &id) == 0);
3490 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3491 path, (u_longlong_t)id);
3492 path = tmpbuf;
3493 }
3494 }
3495
3496 return (zfs_strdup(hdl, path));
3497 }
3498
3499 static int
3500 zbookmark_compare(const void *a, const void *b)
3501 {
3502 return (memcmp(a, b, sizeof (zbookmark_t)));
3503 }
3504
3505 /*
3506 * Retrieve the persistent error log, uniquify the members, and return to the
3507 * caller.
3508 */
3509 int
3510 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3511 {
3512 zfs_cmd_t zc = {"\0"};
3513 uint64_t count;
3514 zbookmark_t *zb = NULL;
3515 int i;
3516
3517 /*
3518 * Retrieve the raw error list from the kernel. If the number of errors
3519 * has increased, allocate more space and continue until we get the
3520 * entire list.
3521 */
3522 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3523 &count) == 0);
3524 if (count == 0)
3525 return (0);
3526 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3527 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3528 return (-1);
3529 zc.zc_nvlist_dst_size = count;
3530 (void) strcpy(zc.zc_name, zhp->zpool_name);
3531 for (;;) {
3532 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3533 &zc) != 0) {
3534 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3535 if (errno == ENOMEM) {
3536 count = zc.zc_nvlist_dst_size;
3537 if ((zc.zc_nvlist_dst = (uintptr_t)
3538 zfs_alloc(zhp->zpool_hdl, count *
3539 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3540 return (-1);
3541 } else {
3542 return (-1);
3543 }
3544 } else {
3545 break;
3546 }
3547 }
3548
3549 /*
3550 * Sort the resulting bookmarks. This is a little confusing due to the
3551 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3552 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3553 * _not_ copied as part of the process. So we point the start of our
3554 * array appropriate and decrement the total number of elements.
3555 */
3556 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3557 zc.zc_nvlist_dst_size;
3558 count -= zc.zc_nvlist_dst_size;
3559
3560 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3561
3562 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3563
3564 /*
3565 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3566 */
3567 for (i = 0; i < count; i++) {
3568 nvlist_t *nv;
3569
3570 /* ignoring zb_blkid and zb_level for now */
3571 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3572 zb[i-1].zb_object == zb[i].zb_object)
3573 continue;
3574
3575 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3576 goto nomem;
3577 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3578 zb[i].zb_objset) != 0) {
3579 nvlist_free(nv);
3580 goto nomem;
3581 }
3582 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3583 zb[i].zb_object) != 0) {
3584 nvlist_free(nv);
3585 goto nomem;
3586 }
3587 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3588 nvlist_free(nv);
3589 goto nomem;
3590 }
3591 nvlist_free(nv);
3592 }
3593
3594 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3595 return (0);
3596
3597 nomem:
3598 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3599 return (no_memory(zhp->zpool_hdl));
3600 }
3601
3602 /*
3603 * Upgrade a ZFS pool to the latest on-disk version.
3604 */
3605 int
3606 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3607 {
3608 zfs_cmd_t zc = {"\0"};
3609 libzfs_handle_t *hdl = zhp->zpool_hdl;
3610
3611 (void) strcpy(zc.zc_name, zhp->zpool_name);
3612 zc.zc_cookie = new_version;
3613
3614 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3615 return (zpool_standard_error_fmt(hdl, errno,
3616 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3617 zhp->zpool_name));
3618 return (0);
3619 }
3620
3621 void
3622 zfs_save_arguments(int argc, char **argv, char *string, int len)
3623 {
3624 int i;
3625
3626 (void) strlcpy(string, basename(argv[0]), len);
3627 for (i = 1; i < argc; i++) {
3628 (void) strlcat(string, " ", len);
3629 (void) strlcat(string, argv[i], len);
3630 }
3631 }
3632
3633 int
3634 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3635 {
3636 zfs_cmd_t zc = {"\0"};
3637 nvlist_t *args;
3638 int err;
3639
3640 args = fnvlist_alloc();
3641 fnvlist_add_string(args, "message", message);
3642 err = zcmd_write_src_nvlist(hdl, &zc, args);
3643 if (err == 0)
3644 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3645 nvlist_free(args);
3646 zcmd_free_nvlists(&zc);
3647 return (err);
3648 }
3649
3650 /*
3651 * Perform ioctl to get some command history of a pool.
3652 *
3653 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3654 * logical offset of the history buffer to start reading from.
3655 *
3656 * Upon return, 'off' is the next logical offset to read from and
3657 * 'len' is the actual amount of bytes read into 'buf'.
3658 */
3659 static int
3660 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3661 {
3662 zfs_cmd_t zc = {"\0"};
3663 libzfs_handle_t *hdl = zhp->zpool_hdl;
3664
3665 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3666
3667 zc.zc_history = (uint64_t)(uintptr_t)buf;
3668 zc.zc_history_len = *len;
3669 zc.zc_history_offset = *off;
3670
3671 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3672 switch (errno) {
3673 case EPERM:
3674 return (zfs_error_fmt(hdl, EZFS_PERM,
3675 dgettext(TEXT_DOMAIN,
3676 "cannot show history for pool '%s'"),
3677 zhp->zpool_name));
3678 case ENOENT:
3679 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3680 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3681 "'%s'"), zhp->zpool_name));
3682 case ENOTSUP:
3683 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3684 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3685 "'%s', pool must be upgraded"), zhp->zpool_name));
3686 default:
3687 return (zpool_standard_error_fmt(hdl, errno,
3688 dgettext(TEXT_DOMAIN,
3689 "cannot get history for '%s'"), zhp->zpool_name));
3690 }
3691 }
3692
3693 *len = zc.zc_history_len;
3694 *off = zc.zc_history_offset;
3695
3696 return (0);
3697 }
3698
3699 /*
3700 * Process the buffer of nvlists, unpacking and storing each nvlist record
3701 * into 'records'. 'leftover' is set to the number of bytes that weren't
3702 * processed as there wasn't a complete record.
3703 */
3704 int
3705 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3706 nvlist_t ***records, uint_t *numrecords)
3707 {
3708 uint64_t reclen;
3709 nvlist_t *nv;
3710 int i;
3711
3712 while (bytes_read > sizeof (reclen)) {
3713
3714 /* get length of packed record (stored as little endian) */
3715 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3716 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3717
3718 if (bytes_read < sizeof (reclen) + reclen)
3719 break;
3720
3721 /* unpack record */
3722 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3723 return (ENOMEM);
3724 bytes_read -= sizeof (reclen) + reclen;
3725 buf += sizeof (reclen) + reclen;
3726
3727 /* add record to nvlist array */
3728 (*numrecords)++;
3729 if (ISP2(*numrecords + 1)) {
3730 *records = realloc(*records,
3731 *numrecords * 2 * sizeof (nvlist_t *));
3732 }
3733 (*records)[*numrecords - 1] = nv;
3734 }
3735
3736 *leftover = bytes_read;
3737 return (0);
3738 }
3739
3740 #define HIS_BUF_LEN (128*1024)
3741
3742 /*
3743 * Retrieve the command history of a pool.
3744 */
3745 int
3746 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3747 {
3748 char buf[HIS_BUF_LEN];
3749 uint64_t off = 0;
3750 nvlist_t **records = NULL;
3751 uint_t numrecords = 0;
3752 int err, i;
3753
3754 do {
3755 uint64_t bytes_read = sizeof (buf);
3756 uint64_t leftover;
3757
3758 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3759 break;
3760
3761 /* if nothing else was read in, we're at EOF, just return */
3762 if (!bytes_read)
3763 break;
3764
3765 if ((err = zpool_history_unpack(buf, bytes_read,
3766 &leftover, &records, &numrecords)) != 0)
3767 break;
3768 off -= leftover;
3769
3770 /* CONSTCOND */
3771 } while (1);
3772
3773 if (!err) {
3774 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3775 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3776 records, numrecords) == 0);
3777 }
3778 for (i = 0; i < numrecords; i++)
3779 nvlist_free(records[i]);
3780 free(records);
3781
3782 return (err);
3783 }
3784
3785 /*
3786 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3787 * If there is a new event available 'nvp' will contain a newly allocated
3788 * nvlist and 'dropped' will be set to the number of missed events since
3789 * the last call to this function. When 'nvp' is set to NULL it indicates
3790 * no new events are available. In either case the function returns 0 and
3791 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3792 * function will return a non-zero value. When the function is called in
3793 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3794 * it will not return until a new event is available.
3795 */
3796 int
3797 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3798 int *dropped, unsigned flags, int zevent_fd)
3799 {
3800 zfs_cmd_t zc = {"\0"};
3801 int error = 0;
3802
3803 *nvp = NULL;
3804 *dropped = 0;
3805 zc.zc_cleanup_fd = zevent_fd;
3806
3807 if (flags & ZEVENT_NONBLOCK)
3808 zc.zc_guid = ZEVENT_NONBLOCK;
3809
3810 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3811 return (-1);
3812
3813 retry:
3814 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3815 switch (errno) {
3816 case ESHUTDOWN:
3817 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3818 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3819 goto out;
3820 case ENOENT:
3821 /* Blocking error case should not occur */
3822 if (!(flags & ZEVENT_NONBLOCK))
3823 error = zpool_standard_error_fmt(hdl, errno,
3824 dgettext(TEXT_DOMAIN, "cannot get event"));
3825
3826 goto out;
3827 case ENOMEM:
3828 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3829 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3830 dgettext(TEXT_DOMAIN, "cannot get event"));
3831 goto out;
3832 } else {
3833 goto retry;
3834 }
3835 default:
3836 error = zpool_standard_error_fmt(hdl, errno,
3837 dgettext(TEXT_DOMAIN, "cannot get event"));
3838 goto out;
3839 }
3840 }
3841
3842 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3843 if (error != 0)
3844 goto out;
3845
3846 *dropped = (int)zc.zc_cookie;
3847 out:
3848 zcmd_free_nvlists(&zc);
3849
3850 return (error);
3851 }
3852
3853 /*
3854 * Clear all events.
3855 */
3856 int
3857 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3858 {
3859 zfs_cmd_t zc = {"\0"};
3860 char msg[1024];
3861
3862 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3863 "cannot clear events"));
3864
3865 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3866 return (zpool_standard_error_fmt(hdl, errno, msg));
3867
3868 if (count != NULL)
3869 *count = (int)zc.zc_cookie; /* # of events cleared */
3870
3871 return (0);
3872 }
3873
3874 /*
3875 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3876 * the passed zevent_fd file handle. On success zero is returned,
3877 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3878 */
3879 int
3880 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3881 {
3882 zfs_cmd_t zc = {"\0"};
3883 int error = 0;
3884
3885 zc.zc_guid = eid;
3886 zc.zc_cleanup_fd = zevent_fd;
3887
3888 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3889 switch (errno) {
3890 case ENOENT:
3891 error = zfs_error_fmt(hdl, EZFS_NOENT,
3892 dgettext(TEXT_DOMAIN, "cannot get event"));
3893 break;
3894
3895 case ENOMEM:
3896 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3897 dgettext(TEXT_DOMAIN, "cannot get event"));
3898 break;
3899
3900 default:
3901 error = zpool_standard_error_fmt(hdl, errno,
3902 dgettext(TEXT_DOMAIN, "cannot get event"));
3903 break;
3904 }
3905 }
3906
3907 return (error);
3908 }
3909
3910 void
3911 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3912 char *pathname, size_t len)
3913 {
3914 zfs_cmd_t zc = {"\0"};
3915 boolean_t mounted = B_FALSE;
3916 char *mntpnt = NULL;
3917 char dsname[MAXNAMELEN];
3918
3919 if (dsobj == 0) {
3920 /* special case for the MOS */
3921 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
3922 (longlong_t)obj);
3923 return;
3924 }
3925
3926 /* get the dataset's name */
3927 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3928 zc.zc_obj = dsobj;
3929 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3930 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3931 /* just write out a path of two object numbers */
3932 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3933 (longlong_t)dsobj, (longlong_t)obj);
3934 return;
3935 }
3936 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3937
3938 /* find out if the dataset is mounted */
3939 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3940
3941 /* get the corrupted object's path */
3942 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3943 zc.zc_obj = obj;
3944 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3945 &zc) == 0) {
3946 if (mounted) {
3947 (void) snprintf(pathname, len, "%s%s", mntpnt,
3948 zc.zc_value);
3949 } else {
3950 (void) snprintf(pathname, len, "%s:%s",
3951 dsname, zc.zc_value);
3952 }
3953 } else {
3954 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
3955 (longlong_t)obj);
3956 }
3957 free(mntpnt);
3958 }
3959
3960 /*
3961 * Read the EFI label from the config, if a label does not exist then
3962 * pass back the error to the caller. If the caller has passed a non-NULL
3963 * diskaddr argument then we set it to the starting address of the EFI
3964 * partition.
3965 */
3966 static int
3967 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3968 {
3969 char *path;
3970 int fd;
3971 char diskname[MAXPATHLEN];
3972 int err = -1;
3973
3974 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3975 return (err);
3976
3977 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
3978 strrchr(path, '/'));
3979 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
3980 struct dk_gpt *vtoc;
3981
3982 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3983 if (sb != NULL)
3984 *sb = vtoc->efi_parts[0].p_start;
3985 efi_free(vtoc);
3986 }
3987 (void) close(fd);
3988 }
3989 return (err);
3990 }
3991
3992 /*
3993 * determine where a partition starts on a disk in the current
3994 * configuration
3995 */
3996 static diskaddr_t
3997 find_start_block(nvlist_t *config)
3998 {
3999 nvlist_t **child;
4000 uint_t c, children;
4001 diskaddr_t sb = MAXOFFSET_T;
4002 uint64_t wholedisk;
4003
4004 if (nvlist_lookup_nvlist_array(config,
4005 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4006 if (nvlist_lookup_uint64(config,
4007 ZPOOL_CONFIG_WHOLE_DISK,
4008 &wholedisk) != 0 || !wholedisk) {
4009 return (MAXOFFSET_T);
4010 }
4011 if (read_efi_label(config, &sb) < 0)
4012 sb = MAXOFFSET_T;
4013 return (sb);
4014 }
4015
4016 for (c = 0; c < children; c++) {
4017 sb = find_start_block(child[c]);
4018 if (sb != MAXOFFSET_T) {
4019 return (sb);
4020 }
4021 }
4022 return (MAXOFFSET_T);
4023 }
4024
4025 int
4026 zpool_label_disk_wait(char *path, int timeout)
4027 {
4028 struct stat64 statbuf;
4029 int i;
4030
4031 /*
4032 * Wait timeout miliseconds for a newly created device to be available
4033 * from the given path. There is a small window when a /dev/ device
4034 * will exist and the udev link will not, so we must wait for the
4035 * symlink. Depending on the udev rules this may take a few seconds.
4036 */
4037 for (i = 0; i < timeout; i++) {
4038 usleep(1000);
4039
4040 errno = 0;
4041 if ((stat64(path, &statbuf) == 0) && (errno == 0))
4042 return (0);
4043 }
4044
4045 return (ENOENT);
4046 }
4047
4048 int
4049 zpool_label_disk_check(char *path)
4050 {
4051 struct dk_gpt *vtoc;
4052 int fd, err;
4053
4054 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4055 return (errno);
4056
4057 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4058 (void) close(fd);
4059 return (err);
4060 }
4061
4062 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4063 efi_free(vtoc);
4064 (void) close(fd);
4065 return (EIDRM);
4066 }
4067
4068 efi_free(vtoc);
4069 (void) close(fd);
4070 return (0);
4071 }
4072
4073 /*
4074 * Label an individual disk. The name provided is the short name,
4075 * stripped of any leading /dev path.
4076 */
4077 int
4078 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4079 {
4080 char path[MAXPATHLEN];
4081 struct dk_gpt *vtoc;
4082 int rval, fd;
4083 size_t resv = EFI_MIN_RESV_SIZE;
4084 uint64_t slice_size;
4085 diskaddr_t start_block;
4086 char errbuf[1024];
4087
4088 /* prepare an error message just in case */
4089 (void) snprintf(errbuf, sizeof (errbuf),
4090 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4091
4092 if (zhp) {
4093 nvlist_t *nvroot;
4094
4095 #if defined(__sun__) || defined(__sun)
4096 if (zpool_is_bootable(zhp)) {
4097 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4098 "EFI labeled devices are not supported on root "
4099 "pools."));
4100 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
4101 }
4102 #endif
4103
4104 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4105 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4106
4107 if (zhp->zpool_start_block == 0)
4108 start_block = find_start_block(nvroot);
4109 else
4110 start_block = zhp->zpool_start_block;
4111 zhp->zpool_start_block = start_block;
4112 } else {
4113 /* new pool */
4114 start_block = NEW_START_BLOCK;
4115 }
4116
4117 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4118
4119 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
4120 /*
4121 * This shouldn't happen. We've long since verified that this
4122 * is a valid device.
4123 */
4124 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4125 "label '%s': unable to open device: %d"), path, errno);
4126 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4127 }
4128
4129 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4130 /*
4131 * The only way this can fail is if we run out of memory, or we
4132 * were unable to read the disk's capacity
4133 */
4134 if (errno == ENOMEM)
4135 (void) no_memory(hdl);
4136
4137 (void) close(fd);
4138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4139 "label '%s': unable to read disk capacity"), path);
4140
4141 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4142 }
4143
4144 slice_size = vtoc->efi_last_u_lba + 1;
4145 slice_size -= EFI_MIN_RESV_SIZE;
4146 if (start_block == MAXOFFSET_T)
4147 start_block = NEW_START_BLOCK;
4148 slice_size -= start_block;
4149 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4150
4151 vtoc->efi_parts[0].p_start = start_block;
4152 vtoc->efi_parts[0].p_size = slice_size;
4153
4154 /*
4155 * Why we use V_USR: V_BACKUP confuses users, and is considered
4156 * disposable by some EFI utilities (since EFI doesn't have a backup
4157 * slice). V_UNASSIGNED is supposed to be used only for zero size
4158 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4159 * etc. were all pretty specific. V_USR is as close to reality as we
4160 * can get, in the absence of V_OTHER.
4161 */
4162 vtoc->efi_parts[0].p_tag = V_USR;
4163 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4164
4165 vtoc->efi_parts[8].p_start = slice_size + start_block;
4166 vtoc->efi_parts[8].p_size = resv;
4167 vtoc->efi_parts[8].p_tag = V_RESERVED;
4168
4169 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4170 /*
4171 * Some block drivers (like pcata) may not support EFI
4172 * GPT labels. Print out a helpful error message dir-
4173 * ecting the user to manually label the disk and give
4174 * a specific slice.
4175 */
4176 (void) close(fd);
4177 efi_free(vtoc);
4178
4179 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4180 "parted(8) and then provide a specific slice: %d"), rval);
4181 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4182 }
4183
4184 (void) close(fd);
4185 efi_free(vtoc);
4186
4187 /* Wait for the first expected partition to appear. */
4188
4189 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4190 (void) zfs_append_partition(path, MAXPATHLEN);
4191
4192 rval = zpool_label_disk_wait(path, 3000);
4193 if (rval) {
4194 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4195 "detect device partitions on '%s': %d"), path, rval);
4196 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4197 }
4198
4199 /* We can't be to paranoid. Read the label back and verify it. */
4200 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4201 rval = zpool_label_disk_check(path);
4202 if (rval) {
4203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4204 "EFI label on '%s' is damaged. Ensure\nthis device "
4205 "is not in in use, and is functioning properly: %d"),
4206 path, rval);
4207 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4208 }
4209
4210 return (0);
4211 }