]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Add realloc() success check in zpool_history_unpack()
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <zone.h>
39 #include <sys/stat.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <dlfcn.h>
44
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
50
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52
53 typedef struct prop_flags {
54 int create:1; /* Validate property on creation */
55 int import:1; /* Validate property on import */
56 } prop_flags_t;
57
58 /*
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
62 */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 zfs_cmd_t zc = {"\0"};
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
74
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
80 }
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85 }
86
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
90 }
91
92 zcmd_free_nvlists(&zc);
93
94 return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 nvlist_t *old_props;
101
102 old_props = zhp->zpool_props;
103
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
106
107 nvlist_free(old_props);
108 return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
114 {
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
119
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
129 }
130
131 if (src)
132 *src = source;
133
134 return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
143
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 /*
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
149 */
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
156 }
157 return (zpool_prop_default_numeric(prop));
158 }
159
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
168 }
169
170 if (src)
171 *src = source;
172
173 return (value);
174 }
175
176 /*
177 * Map VDEV STATE to printed strings.
178 */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 switch (state) {
183 default:
184 break;
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Map POOL STATE to printed strings.
210 */
211 const char *
212 zpool_pool_state_to_name(pool_state_t state)
213 {
214 switch (state) {
215 default:
216 break;
217 case POOL_STATE_ACTIVE:
218 return (gettext("ACTIVE"));
219 case POOL_STATE_EXPORTED:
220 return (gettext("EXPORTED"));
221 case POOL_STATE_DESTROYED:
222 return (gettext("DESTROYED"));
223 case POOL_STATE_SPARE:
224 return (gettext("SPARE"));
225 case POOL_STATE_L2CACHE:
226 return (gettext("L2CACHE"));
227 case POOL_STATE_UNINITIALIZED:
228 return (gettext("UNINITIALIZED"));
229 case POOL_STATE_UNAVAIL:
230 return (gettext("UNAVAIL"));
231 case POOL_STATE_POTENTIALLY_ACTIVE:
232 return (gettext("POTENTIALLY_ACTIVE"));
233 }
234
235 return (gettext("UNKNOWN"));
236 }
237
238 /*
239 * Get a zpool property value for 'prop' and return the value in
240 * a pre-allocated buffer.
241 */
242 int
243 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
244 size_t len, zprop_source_t *srctype, boolean_t literal)
245 {
246 uint64_t intval;
247 const char *strval;
248 zprop_source_t src = ZPROP_SRC_NONE;
249 nvlist_t *nvroot;
250 vdev_stat_t *vs;
251 uint_t vsc;
252
253 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
254 switch (prop) {
255 case ZPOOL_PROP_NAME:
256 (void) strlcpy(buf, zpool_get_name(zhp), len);
257 break;
258
259 case ZPOOL_PROP_HEALTH:
260 (void) strlcpy(buf, "FAULTED", len);
261 break;
262
263 case ZPOOL_PROP_GUID:
264 intval = zpool_get_prop_int(zhp, prop, &src);
265 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
266 break;
267
268 case ZPOOL_PROP_ALTROOT:
269 case ZPOOL_PROP_CACHEFILE:
270 case ZPOOL_PROP_COMMENT:
271 if (zhp->zpool_props != NULL ||
272 zpool_get_all_props(zhp) == 0) {
273 (void) strlcpy(buf,
274 zpool_get_prop_string(zhp, prop, &src),
275 len);
276 break;
277 }
278 /* FALLTHROUGH */
279 default:
280 (void) strlcpy(buf, "-", len);
281 break;
282 }
283
284 if (srctype != NULL)
285 *srctype = src;
286 return (0);
287 }
288
289 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
290 prop != ZPOOL_PROP_NAME)
291 return (-1);
292
293 switch (zpool_prop_get_type(prop)) {
294 case PROP_TYPE_STRING:
295 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
296 len);
297 break;
298
299 case PROP_TYPE_NUMBER:
300 intval = zpool_get_prop_int(zhp, prop, &src);
301
302 switch (prop) {
303 case ZPOOL_PROP_SIZE:
304 case ZPOOL_PROP_ALLOCATED:
305 case ZPOOL_PROP_FREE:
306 case ZPOOL_PROP_FREEING:
307 case ZPOOL_PROP_LEAKED:
308 case ZPOOL_PROP_ASHIFT:
309 if (literal)
310 (void) snprintf(buf, len, "%llu",
311 (u_longlong_t)intval);
312 else
313 (void) zfs_nicenum(intval, buf, len);
314 break;
315
316 case ZPOOL_PROP_EXPANDSZ:
317 if (intval == 0) {
318 (void) strlcpy(buf, "-", len);
319 } else if (literal) {
320 (void) snprintf(buf, len, "%llu",
321 (u_longlong_t)intval);
322 } else {
323 (void) zfs_nicenum(intval, buf, len);
324 }
325 break;
326
327 case ZPOOL_PROP_CAPACITY:
328 if (literal) {
329 (void) snprintf(buf, len, "%llu",
330 (u_longlong_t)intval);
331 } else {
332 (void) snprintf(buf, len, "%llu%%",
333 (u_longlong_t)intval);
334 }
335 break;
336
337 case ZPOOL_PROP_FRAGMENTATION:
338 if (intval == UINT64_MAX) {
339 (void) strlcpy(buf, "-", len);
340 } else if (literal) {
341 (void) snprintf(buf, len, "%llu",
342 (u_longlong_t)intval);
343 } else {
344 (void) snprintf(buf, len, "%llu%%",
345 (u_longlong_t)intval);
346 }
347 break;
348
349 case ZPOOL_PROP_DEDUPRATIO:
350 if (literal)
351 (void) snprintf(buf, len, "%llu.%02llu",
352 (u_longlong_t)(intval / 100),
353 (u_longlong_t)(intval % 100));
354 else
355 (void) snprintf(buf, len, "%llu.%02llux",
356 (u_longlong_t)(intval / 100),
357 (u_longlong_t)(intval % 100));
358 break;
359
360 case ZPOOL_PROP_HEALTH:
361 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
362 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
363 verify(nvlist_lookup_uint64_array(nvroot,
364 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
365 == 0);
366
367 (void) strlcpy(buf, zpool_state_to_name(intval,
368 vs->vs_aux), len);
369 break;
370 case ZPOOL_PROP_VERSION:
371 if (intval >= SPA_VERSION_FEATURES) {
372 (void) snprintf(buf, len, "-");
373 break;
374 }
375 /* FALLTHROUGH */
376 default:
377 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
378 }
379 break;
380
381 case PROP_TYPE_INDEX:
382 intval = zpool_get_prop_int(zhp, prop, &src);
383 if (zpool_prop_index_to_string(prop, intval, &strval)
384 != 0)
385 return (-1);
386 (void) strlcpy(buf, strval, len);
387 break;
388
389 default:
390 abort();
391 }
392
393 if (srctype)
394 *srctype = src;
395
396 return (0);
397 }
398
399 /*
400 * Check if the bootfs name has the same pool name as it is set to.
401 * Assuming bootfs is a valid dataset name.
402 */
403 static boolean_t
404 bootfs_name_valid(const char *pool, char *bootfs)
405 {
406 int len = strlen(pool);
407
408 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
409 return (B_FALSE);
410
411 if (strncmp(pool, bootfs, len) == 0 &&
412 (bootfs[len] == '/' || bootfs[len] == '\0'))
413 return (B_TRUE);
414
415 return (B_FALSE);
416 }
417
418 boolean_t
419 zpool_is_bootable(zpool_handle_t *zhp)
420 {
421 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
422
423 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
424 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
425 sizeof (bootfs)) != 0);
426 }
427
428
429 /*
430 * Given an nvlist of zpool properties to be set, validate that they are
431 * correct, and parse any numeric properties (index, boolean, etc) if they are
432 * specified as strings.
433 */
434 static nvlist_t *
435 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
436 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
437 {
438 nvpair_t *elem;
439 nvlist_t *retprops;
440 zpool_prop_t prop;
441 char *strval;
442 uint64_t intval;
443 char *slash, *check;
444 struct stat64 statbuf;
445 zpool_handle_t *zhp;
446
447 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
448 (void) no_memory(hdl);
449 return (NULL);
450 }
451
452 elem = NULL;
453 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
454 const char *propname = nvpair_name(elem);
455
456 prop = zpool_name_to_prop(propname);
457 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
458 int err;
459 char *fname = strchr(propname, '@') + 1;
460
461 err = zfeature_lookup_name(fname, NULL);
462 if (err != 0) {
463 ASSERT3U(err, ==, ENOENT);
464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
465 "invalid feature '%s'"), fname);
466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
467 goto error;
468 }
469
470 if (nvpair_type(elem) != DATA_TYPE_STRING) {
471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 "'%s' must be a string"), propname);
473 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
474 goto error;
475 }
476
477 (void) nvpair_value_string(elem, &strval);
478 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
479 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
481 "property '%s' can only be set to "
482 "'enabled' or 'disabled'"), propname);
483 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
484 goto error;
485 }
486
487 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
488 (void) no_memory(hdl);
489 goto error;
490 }
491 continue;
492 }
493
494 /*
495 * Make sure this property is valid and applies to this type.
496 */
497 if (prop == ZPROP_INVAL) {
498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
499 "invalid property '%s'"), propname);
500 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
501 goto error;
502 }
503
504 if (zpool_prop_readonly(prop)) {
505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
506 "is readonly"), propname);
507 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
508 goto error;
509 }
510
511 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
512 &strval, &intval, errbuf) != 0)
513 goto error;
514
515 /*
516 * Perform additional checking for specific properties.
517 */
518 switch (prop) {
519 default:
520 break;
521 case ZPOOL_PROP_VERSION:
522 if (intval < version ||
523 !SPA_VERSION_IS_SUPPORTED(intval)) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' number %d is invalid."),
526 propname, intval);
527 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
528 goto error;
529 }
530 break;
531
532 case ZPOOL_PROP_ASHIFT:
533 if (!flags.create) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' can only be set at "
536 "creation time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 goto error;
539 }
540
541 if (intval != 0 && (intval < 9 || intval > 13)) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "property '%s' number %d is invalid."),
544 propname, intval);
545 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
546 goto error;
547 }
548 break;
549
550 case ZPOOL_PROP_BOOTFS:
551 if (flags.create || flags.import) {
552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
553 "property '%s' cannot be set at creation "
554 "or import time"), propname);
555 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
556 goto error;
557 }
558
559 if (version < SPA_VERSION_BOOTFS) {
560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561 "pool must be upgraded to support "
562 "'%s' property"), propname);
563 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
564 goto error;
565 }
566
567 /*
568 * bootfs property value has to be a dataset name and
569 * the dataset has to be in the same pool as it sets to.
570 */
571 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
572 strval)) {
573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
574 "is an invalid name"), strval);
575 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
576 goto error;
577 }
578
579 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "could not open pool '%s'"), poolname);
582 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
583 goto error;
584 }
585 zpool_close(zhp);
586 break;
587
588 case ZPOOL_PROP_ALTROOT:
589 if (!flags.create && !flags.import) {
590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 "property '%s' can only be set during pool "
592 "creation or import"), propname);
593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
594 goto error;
595 }
596
597 if (strval[0] != '/') {
598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
599 "bad alternate root '%s'"), strval);
600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
601 goto error;
602 }
603 break;
604
605 case ZPOOL_PROP_CACHEFILE:
606 if (strval[0] == '\0')
607 break;
608
609 if (strcmp(strval, "none") == 0)
610 break;
611
612 if (strval[0] != '/') {
613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
614 "property '%s' must be empty, an "
615 "absolute path, or 'none'"), propname);
616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
617 goto error;
618 }
619
620 slash = strrchr(strval, '/');
621
622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
623 strcmp(slash, "/..") == 0) {
624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
625 "'%s' is not a valid file"), strval);
626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
627 goto error;
628 }
629
630 *slash = '\0';
631
632 if (strval[0] != '\0' &&
633 (stat64(strval, &statbuf) != 0 ||
634 !S_ISDIR(statbuf.st_mode))) {
635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
636 "'%s' is not a valid directory"),
637 strval);
638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
639 goto error;
640 }
641
642 *slash = '/';
643 break;
644
645 case ZPOOL_PROP_COMMENT:
646 for (check = strval; *check != '\0'; check++) {
647 if (!isprint(*check)) {
648 zfs_error_aux(hdl,
649 dgettext(TEXT_DOMAIN,
650 "comment may only have printable "
651 "characters"));
652 (void) zfs_error(hdl, EZFS_BADPROP,
653 errbuf);
654 goto error;
655 }
656 }
657 if (strlen(strval) > ZPROP_MAX_COMMENT) {
658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
659 "comment must not exceed %d characters"),
660 ZPROP_MAX_COMMENT);
661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
662 goto error;
663 }
664 break;
665 case ZPOOL_PROP_READONLY:
666 if (!flags.import) {
667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 "property '%s' can only be set at "
669 "import time"), propname);
670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 goto error;
672 }
673 break;
674 case ZPOOL_PROP_TNAME:
675 if (!flags.create) {
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "property '%s' can only be set at "
678 "creation time"), propname);
679 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
680 goto error;
681 }
682 break;
683 }
684 }
685
686 return (retprops);
687 error:
688 nvlist_free(retprops);
689 return (NULL);
690 }
691
692 /*
693 * Set zpool property : propname=propval.
694 */
695 int
696 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
697 {
698 zfs_cmd_t zc = {"\0"};
699 int ret = -1;
700 char errbuf[1024];
701 nvlist_t *nvl = NULL;
702 nvlist_t *realprops;
703 uint64_t version;
704 prop_flags_t flags = { 0 };
705
706 (void) snprintf(errbuf, sizeof (errbuf),
707 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
708 zhp->zpool_name);
709
710 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
711 return (no_memory(zhp->zpool_hdl));
712
713 if (nvlist_add_string(nvl, propname, propval) != 0) {
714 nvlist_free(nvl);
715 return (no_memory(zhp->zpool_hdl));
716 }
717
718 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
719 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
720 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
721 nvlist_free(nvl);
722 return (-1);
723 }
724
725 nvlist_free(nvl);
726 nvl = realprops;
727
728 /*
729 * Execute the corresponding ioctl() to set this property.
730 */
731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
732
733 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
734 nvlist_free(nvl);
735 return (-1);
736 }
737
738 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
739
740 zcmd_free_nvlists(&zc);
741 nvlist_free(nvl);
742
743 if (ret)
744 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
745 else
746 (void) zpool_props_refresh(zhp);
747
748 return (ret);
749 }
750
751 int
752 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
753 {
754 libzfs_handle_t *hdl = zhp->zpool_hdl;
755 zprop_list_t *entry;
756 char buf[ZFS_MAXPROPLEN];
757 nvlist_t *features = NULL;
758 nvpair_t *nvp;
759 zprop_list_t **last;
760 boolean_t firstexpand = (NULL == *plp);
761 int i;
762
763 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
764 return (-1);
765
766 last = plp;
767 while (*last != NULL)
768 last = &(*last)->pl_next;
769
770 if ((*plp)->pl_all)
771 features = zpool_get_features(zhp);
772
773 if ((*plp)->pl_all && firstexpand) {
774 for (i = 0; i < SPA_FEATURES; i++) {
775 zprop_list_t *entry = zfs_alloc(hdl,
776 sizeof (zprop_list_t));
777 entry->pl_prop = ZPROP_INVAL;
778 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
779 spa_feature_table[i].fi_uname);
780 entry->pl_width = strlen(entry->pl_user_prop);
781 entry->pl_all = B_TRUE;
782
783 *last = entry;
784 last = &entry->pl_next;
785 }
786 }
787
788 /* add any unsupported features */
789 for (nvp = nvlist_next_nvpair(features, NULL);
790 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
791 char *propname;
792 boolean_t found;
793 zprop_list_t *entry;
794
795 if (zfeature_is_supported(nvpair_name(nvp)))
796 continue;
797
798 propname = zfs_asprintf(hdl, "unsupported@%s",
799 nvpair_name(nvp));
800
801 /*
802 * Before adding the property to the list make sure that no
803 * other pool already added the same property.
804 */
805 found = B_FALSE;
806 entry = *plp;
807 while (entry != NULL) {
808 if (entry->pl_user_prop != NULL &&
809 strcmp(propname, entry->pl_user_prop) == 0) {
810 found = B_TRUE;
811 break;
812 }
813 entry = entry->pl_next;
814 }
815 if (found) {
816 free(propname);
817 continue;
818 }
819
820 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
821 entry->pl_prop = ZPROP_INVAL;
822 entry->pl_user_prop = propname;
823 entry->pl_width = strlen(entry->pl_user_prop);
824 entry->pl_all = B_TRUE;
825
826 *last = entry;
827 last = &entry->pl_next;
828 }
829
830 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
831
832 if (entry->pl_fixed)
833 continue;
834
835 if (entry->pl_prop != ZPROP_INVAL &&
836 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
837 NULL, B_FALSE) == 0) {
838 if (strlen(buf) > entry->pl_width)
839 entry->pl_width = strlen(buf);
840 }
841 }
842
843 return (0);
844 }
845
846 /*
847 * Get the state for the given feature on the given ZFS pool.
848 */
849 int
850 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
851 size_t len)
852 {
853 uint64_t refcount;
854 boolean_t found = B_FALSE;
855 nvlist_t *features = zpool_get_features(zhp);
856 boolean_t supported;
857 const char *feature = strchr(propname, '@') + 1;
858
859 supported = zpool_prop_feature(propname);
860 ASSERT(supported || zpool_prop_unsupported(propname));
861
862 /*
863 * Convert from feature name to feature guid. This conversion is
864 * unnecessary for unsupported@... properties because they already
865 * use guids.
866 */
867 if (supported) {
868 int ret;
869 spa_feature_t fid;
870
871 ret = zfeature_lookup_name(feature, &fid);
872 if (ret != 0) {
873 (void) strlcpy(buf, "-", len);
874 return (ENOTSUP);
875 }
876 feature = spa_feature_table[fid].fi_guid;
877 }
878
879 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
880 found = B_TRUE;
881
882 if (supported) {
883 if (!found) {
884 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
885 } else {
886 if (refcount == 0)
887 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
888 else
889 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
890 }
891 } else {
892 if (found) {
893 if (refcount == 0) {
894 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
895 } else {
896 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
897 }
898 } else {
899 (void) strlcpy(buf, "-", len);
900 return (ENOTSUP);
901 }
902 }
903
904 return (0);
905 }
906
907 /*
908 * Don't start the slice at the default block of 34; many storage
909 * devices will use a stripe width of 128k, other vendors prefer a 1m
910 * alignment. It is best to play it safe and ensure a 1m alignment
911 * given 512B blocks. When the block size is larger by a power of 2
912 * we will still be 1m aligned. Some devices are sensitive to the
913 * partition ending alignment as well.
914 */
915 #define NEW_START_BLOCK 2048
916 #define PARTITION_END_ALIGNMENT 2048
917
918 /*
919 * Validate the given pool name, optionally putting an extended error message in
920 * 'buf'.
921 */
922 boolean_t
923 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
924 {
925 namecheck_err_t why;
926 char what;
927 int ret;
928
929 ret = pool_namecheck(pool, &why, &what);
930
931 /*
932 * The rules for reserved pool names were extended at a later point.
933 * But we need to support users with existing pools that may now be
934 * invalid. So we only check for this expanded set of names during a
935 * create (or import), and only in userland.
936 */
937 if (ret == 0 && !isopen &&
938 (strncmp(pool, "mirror", 6) == 0 ||
939 strncmp(pool, "raidz", 5) == 0 ||
940 strncmp(pool, "spare", 5) == 0 ||
941 strcmp(pool, "log") == 0)) {
942 if (hdl != NULL)
943 zfs_error_aux(hdl,
944 dgettext(TEXT_DOMAIN, "name is reserved"));
945 return (B_FALSE);
946 }
947
948
949 if (ret != 0) {
950 if (hdl != NULL) {
951 switch (why) {
952 case NAME_ERR_TOOLONG:
953 zfs_error_aux(hdl,
954 dgettext(TEXT_DOMAIN, "name is too long"));
955 break;
956
957 case NAME_ERR_INVALCHAR:
958 zfs_error_aux(hdl,
959 dgettext(TEXT_DOMAIN, "invalid character "
960 "'%c' in pool name"), what);
961 break;
962
963 case NAME_ERR_NOLETTER:
964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
965 "name must begin with a letter"));
966 break;
967
968 case NAME_ERR_RESERVED:
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "name is reserved"));
971 break;
972
973 case NAME_ERR_DISKLIKE:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "pool name is reserved"));
976 break;
977
978 case NAME_ERR_LEADING_SLASH:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "leading slash in name"));
981 break;
982
983 case NAME_ERR_EMPTY_COMPONENT:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "empty component in name"));
986 break;
987
988 case NAME_ERR_TRAILING_SLASH:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "trailing slash in name"));
991 break;
992
993 case NAME_ERR_MULTIPLE_DELIMITERS:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "multiple '@' and/or '#' delimiters in "
996 "name"));
997 break;
998 case NAME_ERR_NO_AT:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "permission set is missing '@'"));
1001 break;
1002 }
1003 }
1004 return (B_FALSE);
1005 }
1006
1007 return (B_TRUE);
1008 }
1009
1010 /*
1011 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1012 * state.
1013 */
1014 zpool_handle_t *
1015 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1016 {
1017 zpool_handle_t *zhp;
1018 boolean_t missing;
1019
1020 /*
1021 * Make sure the pool name is valid.
1022 */
1023 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1024 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1025 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1026 pool);
1027 return (NULL);
1028 }
1029
1030 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1031 return (NULL);
1032
1033 zhp->zpool_hdl = hdl;
1034 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1035
1036 if (zpool_refresh_stats(zhp, &missing) != 0) {
1037 zpool_close(zhp);
1038 return (NULL);
1039 }
1040
1041 if (missing) {
1042 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1043 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1044 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1045 zpool_close(zhp);
1046 return (NULL);
1047 }
1048
1049 return (zhp);
1050 }
1051
1052 /*
1053 * Like the above, but silent on error. Used when iterating over pools (because
1054 * the configuration cache may be out of date).
1055 */
1056 int
1057 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1058 {
1059 zpool_handle_t *zhp;
1060 boolean_t missing;
1061
1062 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1063 return (-1);
1064
1065 zhp->zpool_hdl = hdl;
1066 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1067
1068 if (zpool_refresh_stats(zhp, &missing) != 0) {
1069 zpool_close(zhp);
1070 return (-1);
1071 }
1072
1073 if (missing) {
1074 zpool_close(zhp);
1075 *ret = NULL;
1076 return (0);
1077 }
1078
1079 *ret = zhp;
1080 return (0);
1081 }
1082
1083 /*
1084 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1085 * state.
1086 */
1087 zpool_handle_t *
1088 zpool_open(libzfs_handle_t *hdl, const char *pool)
1089 {
1090 zpool_handle_t *zhp;
1091
1092 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1093 return (NULL);
1094
1095 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1096 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1097 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1098 zpool_close(zhp);
1099 return (NULL);
1100 }
1101
1102 return (zhp);
1103 }
1104
1105 /*
1106 * Close the handle. Simply frees the memory associated with the handle.
1107 */
1108 void
1109 zpool_close(zpool_handle_t *zhp)
1110 {
1111 nvlist_free(zhp->zpool_config);
1112 nvlist_free(zhp->zpool_old_config);
1113 nvlist_free(zhp->zpool_props);
1114 free(zhp);
1115 }
1116
1117 /*
1118 * Return the name of the pool.
1119 */
1120 const char *
1121 zpool_get_name(zpool_handle_t *zhp)
1122 {
1123 return (zhp->zpool_name);
1124 }
1125
1126
1127 /*
1128 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1129 */
1130 int
1131 zpool_get_state(zpool_handle_t *zhp)
1132 {
1133 return (zhp->zpool_state);
1134 }
1135
1136 /*
1137 * Create the named pool, using the provided vdev list. It is assumed
1138 * that the consumer has already validated the contents of the nvlist, so we
1139 * don't have to worry about error semantics.
1140 */
1141 int
1142 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1143 nvlist_t *props, nvlist_t *fsprops)
1144 {
1145 zfs_cmd_t zc = {"\0"};
1146 nvlist_t *zc_fsprops = NULL;
1147 nvlist_t *zc_props = NULL;
1148 char msg[1024];
1149 int ret = -1;
1150
1151 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1152 "cannot create '%s'"), pool);
1153
1154 if (!zpool_name_valid(hdl, B_FALSE, pool))
1155 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1156
1157 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1158 return (-1);
1159
1160 if (props) {
1161 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1162
1163 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1164 SPA_VERSION_1, flags, msg)) == NULL) {
1165 goto create_failed;
1166 }
1167 }
1168
1169 if (fsprops) {
1170 uint64_t zoned;
1171 char *zonestr;
1172
1173 zoned = ((nvlist_lookup_string(fsprops,
1174 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1175 strcmp(zonestr, "on") == 0);
1176
1177 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1178 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1179 goto create_failed;
1180 }
1181 if (!zc_props &&
1182 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1183 goto create_failed;
1184 }
1185 if (nvlist_add_nvlist(zc_props,
1186 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1187 goto create_failed;
1188 }
1189 }
1190
1191 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1192 goto create_failed;
1193
1194 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1195
1196 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1197
1198 zcmd_free_nvlists(&zc);
1199 nvlist_free(zc_props);
1200 nvlist_free(zc_fsprops);
1201
1202 switch (errno) {
1203 case EBUSY:
1204 /*
1205 * This can happen if the user has specified the same
1206 * device multiple times. We can't reliably detect this
1207 * until we try to add it and see we already have a
1208 * label. This can also happen under if the device is
1209 * part of an active md or lvm device.
1210 */
1211 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1212 "one or more vdevs refer to the same device, or "
1213 "one of\nthe devices is part of an active md or "
1214 "lvm device"));
1215 return (zfs_error(hdl, EZFS_BADDEV, msg));
1216
1217 case ERANGE:
1218 /*
1219 * This happens if the record size is smaller or larger
1220 * than the allowed size range, or not a power of 2.
1221 *
1222 * NOTE: although zfs_valid_proplist is called earlier,
1223 * this case may have slipped through since the
1224 * pool does not exist yet and it is therefore
1225 * impossible to read properties e.g. max blocksize
1226 * from the pool.
1227 */
1228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1229 "record size invalid"));
1230 return (zfs_error(hdl, EZFS_BADPROP, msg));
1231
1232 case EOVERFLOW:
1233 /*
1234 * This occurs when one of the devices is below
1235 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1236 * device was the problem device since there's no
1237 * reliable way to determine device size from userland.
1238 */
1239 {
1240 char buf[64];
1241
1242 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1243
1244 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1245 "one or more devices is less than the "
1246 "minimum size (%s)"), buf);
1247 }
1248 return (zfs_error(hdl, EZFS_BADDEV, msg));
1249
1250 case ENOSPC:
1251 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1252 "one or more devices is out of space"));
1253 return (zfs_error(hdl, EZFS_BADDEV, msg));
1254
1255 case ENOTBLK:
1256 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1257 "cache device must be a disk or disk slice"));
1258 return (zfs_error(hdl, EZFS_BADDEV, msg));
1259
1260 default:
1261 return (zpool_standard_error(hdl, errno, msg));
1262 }
1263 }
1264
1265 create_failed:
1266 zcmd_free_nvlists(&zc);
1267 nvlist_free(zc_props);
1268 nvlist_free(zc_fsprops);
1269 return (ret);
1270 }
1271
1272 /*
1273 * Destroy the given pool. It is up to the caller to ensure that there are no
1274 * datasets left in the pool.
1275 */
1276 int
1277 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1278 {
1279 zfs_cmd_t zc = {"\0"};
1280 zfs_handle_t *zfp = NULL;
1281 libzfs_handle_t *hdl = zhp->zpool_hdl;
1282 char msg[1024];
1283
1284 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1285 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1286 return (-1);
1287
1288 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1289 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1290
1291 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1292 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1293 "cannot destroy '%s'"), zhp->zpool_name);
1294
1295 if (errno == EROFS) {
1296 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1297 "one or more devices is read only"));
1298 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1299 } else {
1300 (void) zpool_standard_error(hdl, errno, msg);
1301 }
1302
1303 if (zfp)
1304 zfs_close(zfp);
1305 return (-1);
1306 }
1307
1308 if (zfp) {
1309 remove_mountpoint(zfp);
1310 zfs_close(zfp);
1311 }
1312
1313 return (0);
1314 }
1315
1316 /*
1317 * Add the given vdevs to the pool. The caller must have already performed the
1318 * necessary verification to ensure that the vdev specification is well-formed.
1319 */
1320 int
1321 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1322 {
1323 zfs_cmd_t zc = {"\0"};
1324 int ret;
1325 libzfs_handle_t *hdl = zhp->zpool_hdl;
1326 char msg[1024];
1327 nvlist_t **spares, **l2cache;
1328 uint_t nspares, nl2cache;
1329
1330 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1331 "cannot add to '%s'"), zhp->zpool_name);
1332
1333 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1334 SPA_VERSION_SPARES &&
1335 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1336 &spares, &nspares) == 0) {
1337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1338 "upgraded to add hot spares"));
1339 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1340 }
1341
1342 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1343 SPA_VERSION_L2CACHE &&
1344 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1345 &l2cache, &nl2cache) == 0) {
1346 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1347 "upgraded to add cache devices"));
1348 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1349 }
1350
1351 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1352 return (-1);
1353 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1354
1355 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1356 switch (errno) {
1357 case EBUSY:
1358 /*
1359 * This can happen if the user has specified the same
1360 * device multiple times. We can't reliably detect this
1361 * until we try to add it and see we already have a
1362 * label.
1363 */
1364 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1365 "one or more vdevs refer to the same device"));
1366 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1367 break;
1368
1369 case EOVERFLOW:
1370 /*
1371 * This occurrs when one of the devices is below
1372 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1373 * device was the problem device since there's no
1374 * reliable way to determine device size from userland.
1375 */
1376 {
1377 char buf[64];
1378
1379 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1380
1381 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1382 "device is less than the minimum "
1383 "size (%s)"), buf);
1384 }
1385 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1386 break;
1387
1388 case ENOTSUP:
1389 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1390 "pool must be upgraded to add these vdevs"));
1391 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1392 break;
1393
1394 case ENOTBLK:
1395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1396 "cache device must be a disk or disk slice"));
1397 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1398 break;
1399
1400 default:
1401 (void) zpool_standard_error(hdl, errno, msg);
1402 }
1403
1404 ret = -1;
1405 } else {
1406 ret = 0;
1407 }
1408
1409 zcmd_free_nvlists(&zc);
1410
1411 return (ret);
1412 }
1413
1414 /*
1415 * Exports the pool from the system. The caller must ensure that there are no
1416 * mounted datasets in the pool.
1417 */
1418 static int
1419 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1420 const char *log_str)
1421 {
1422 zfs_cmd_t zc = {"\0"};
1423 char msg[1024];
1424
1425 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1426 "cannot export '%s'"), zhp->zpool_name);
1427
1428 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1429 zc.zc_cookie = force;
1430 zc.zc_guid = hardforce;
1431 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1432
1433 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1434 switch (errno) {
1435 case EXDEV:
1436 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1437 "use '-f' to override the following errors:\n"
1438 "'%s' has an active shared spare which could be"
1439 " used by other pools once '%s' is exported."),
1440 zhp->zpool_name, zhp->zpool_name);
1441 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1442 msg));
1443 default:
1444 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1445 msg));
1446 }
1447 }
1448
1449 return (0);
1450 }
1451
1452 int
1453 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1454 {
1455 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1456 }
1457
1458 int
1459 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1460 {
1461 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1462 }
1463
1464 static void
1465 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1466 nvlist_t *config)
1467 {
1468 nvlist_t *nv = NULL;
1469 uint64_t rewindto;
1470 int64_t loss = -1;
1471 struct tm t;
1472 char timestr[128];
1473
1474 if (!hdl->libzfs_printerr || config == NULL)
1475 return;
1476
1477 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1478 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1479 return;
1480 }
1481
1482 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1483 return;
1484 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1485
1486 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1487 strftime(timestr, 128, "%c", &t) != 0) {
1488 if (dryrun) {
1489 (void) printf(dgettext(TEXT_DOMAIN,
1490 "Would be able to return %s "
1491 "to its state as of %s.\n"),
1492 name, timestr);
1493 } else {
1494 (void) printf(dgettext(TEXT_DOMAIN,
1495 "Pool %s returned to its state as of %s.\n"),
1496 name, timestr);
1497 }
1498 if (loss > 120) {
1499 (void) printf(dgettext(TEXT_DOMAIN,
1500 "%s approximately %lld "),
1501 dryrun ? "Would discard" : "Discarded",
1502 ((longlong_t)loss + 30) / 60);
1503 (void) printf(dgettext(TEXT_DOMAIN,
1504 "minutes of transactions.\n"));
1505 } else if (loss > 0) {
1506 (void) printf(dgettext(TEXT_DOMAIN,
1507 "%s approximately %lld "),
1508 dryrun ? "Would discard" : "Discarded",
1509 (longlong_t)loss);
1510 (void) printf(dgettext(TEXT_DOMAIN,
1511 "seconds of transactions.\n"));
1512 }
1513 }
1514 }
1515
1516 void
1517 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1518 nvlist_t *config)
1519 {
1520 nvlist_t *nv = NULL;
1521 int64_t loss = -1;
1522 uint64_t edata = UINT64_MAX;
1523 uint64_t rewindto;
1524 struct tm t;
1525 char timestr[128];
1526
1527 if (!hdl->libzfs_printerr)
1528 return;
1529
1530 if (reason >= 0)
1531 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1532 else
1533 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1534
1535 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1536 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1537 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1538 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1539 goto no_info;
1540
1541 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1542 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1543 &edata);
1544
1545 (void) printf(dgettext(TEXT_DOMAIN,
1546 "Recovery is possible, but will result in some data loss.\n"));
1547
1548 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1549 strftime(timestr, 128, "%c", &t) != 0) {
1550 (void) printf(dgettext(TEXT_DOMAIN,
1551 "\tReturning the pool to its state as of %s\n"
1552 "\tshould correct the problem. "),
1553 timestr);
1554 } else {
1555 (void) printf(dgettext(TEXT_DOMAIN,
1556 "\tReverting the pool to an earlier state "
1557 "should correct the problem.\n\t"));
1558 }
1559
1560 if (loss > 120) {
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "Approximately %lld minutes of data\n"
1563 "\tmust be discarded, irreversibly. "),
1564 ((longlong_t)loss + 30) / 60);
1565 } else if (loss > 0) {
1566 (void) printf(dgettext(TEXT_DOMAIN,
1567 "Approximately %lld seconds of data\n"
1568 "\tmust be discarded, irreversibly. "),
1569 (longlong_t)loss);
1570 }
1571 if (edata != 0 && edata != UINT64_MAX) {
1572 if (edata == 1) {
1573 (void) printf(dgettext(TEXT_DOMAIN,
1574 "After rewind, at least\n"
1575 "\tone persistent user-data error will remain. "));
1576 } else {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "After rewind, several\n"
1579 "\tpersistent user-data errors will remain. "));
1580 }
1581 }
1582 (void) printf(dgettext(TEXT_DOMAIN,
1583 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1584 reason >= 0 ? "clear" : "import", name);
1585
1586 (void) printf(dgettext(TEXT_DOMAIN,
1587 "A scrub of the pool\n"
1588 "\tis strongly recommended after recovery.\n"));
1589 return;
1590
1591 no_info:
1592 (void) printf(dgettext(TEXT_DOMAIN,
1593 "Destroy and re-create the pool from\n\ta backup source.\n"));
1594 }
1595
1596 /*
1597 * zpool_import() is a contracted interface. Should be kept the same
1598 * if possible.
1599 *
1600 * Applications should use zpool_import_props() to import a pool with
1601 * new properties value to be set.
1602 */
1603 int
1604 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1605 char *altroot)
1606 {
1607 nvlist_t *props = NULL;
1608 int ret;
1609
1610 if (altroot != NULL) {
1611 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1612 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1613 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1614 newname));
1615 }
1616
1617 if (nvlist_add_string(props,
1618 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1619 nvlist_add_string(props,
1620 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1621 nvlist_free(props);
1622 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1623 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1624 newname));
1625 }
1626 }
1627
1628 ret = zpool_import_props(hdl, config, newname, props,
1629 ZFS_IMPORT_NORMAL);
1630 nvlist_free(props);
1631 return (ret);
1632 }
1633
1634 static void
1635 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1636 int indent)
1637 {
1638 nvlist_t **child;
1639 uint_t c, children;
1640 char *vname;
1641 uint64_t is_log = 0;
1642
1643 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1644 &is_log);
1645
1646 if (name != NULL)
1647 (void) printf("\t%*s%s%s\n", indent, "", name,
1648 is_log ? " [log]" : "");
1649
1650 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1651 &child, &children) != 0)
1652 return;
1653
1654 for (c = 0; c < children; c++) {
1655 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1656 print_vdev_tree(hdl, vname, child[c], indent + 2);
1657 free(vname);
1658 }
1659 }
1660
1661 void
1662 zpool_print_unsup_feat(nvlist_t *config)
1663 {
1664 nvlist_t *nvinfo, *unsup_feat;
1665 nvpair_t *nvp;
1666
1667 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1668 0);
1669 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1670 &unsup_feat) == 0);
1671
1672 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1673 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1674 char *desc;
1675
1676 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1677 verify(nvpair_value_string(nvp, &desc) == 0);
1678
1679 if (strlen(desc) > 0)
1680 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1681 else
1682 (void) printf("\t%s\n", nvpair_name(nvp));
1683 }
1684 }
1685
1686 /*
1687 * Import the given pool using the known configuration and a list of
1688 * properties to be set. The configuration should have come from
1689 * zpool_find_import(). The 'newname' parameters control whether the pool
1690 * is imported with a different name.
1691 */
1692 int
1693 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1694 nvlist_t *props, int flags)
1695 {
1696 zfs_cmd_t zc = {"\0"};
1697 zpool_rewind_policy_t policy;
1698 nvlist_t *nv = NULL;
1699 nvlist_t *nvinfo = NULL;
1700 nvlist_t *missing = NULL;
1701 char *thename;
1702 char *origname;
1703 int ret;
1704 int error = 0;
1705 char errbuf[1024];
1706
1707 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1708 &origname) == 0);
1709
1710 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1711 "cannot import pool '%s'"), origname);
1712
1713 if (newname != NULL) {
1714 if (!zpool_name_valid(hdl, B_FALSE, newname))
1715 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1716 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1717 newname));
1718 thename = (char *)newname;
1719 } else {
1720 thename = origname;
1721 }
1722
1723 if (props != NULL) {
1724 uint64_t version;
1725 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1726
1727 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1728 &version) == 0);
1729
1730 if ((props = zpool_valid_proplist(hdl, origname,
1731 props, version, flags, errbuf)) == NULL)
1732 return (-1);
1733 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1734 nvlist_free(props);
1735 return (-1);
1736 }
1737 nvlist_free(props);
1738 }
1739
1740 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1741
1742 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1743 &zc.zc_guid) == 0);
1744
1745 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1746 zcmd_free_nvlists(&zc);
1747 return (-1);
1748 }
1749 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1750 zcmd_free_nvlists(&zc);
1751 return (-1);
1752 }
1753
1754 zc.zc_cookie = flags;
1755 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1756 errno == ENOMEM) {
1757 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1758 zcmd_free_nvlists(&zc);
1759 return (-1);
1760 }
1761 }
1762 if (ret != 0)
1763 error = errno;
1764
1765 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1766
1767 zcmd_free_nvlists(&zc);
1768
1769 zpool_get_rewind_policy(config, &policy);
1770
1771 if (error) {
1772 char desc[1024];
1773
1774 /*
1775 * Dry-run failed, but we print out what success
1776 * looks like if we found a best txg
1777 */
1778 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1779 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1780 B_TRUE, nv);
1781 nvlist_free(nv);
1782 return (-1);
1783 }
1784
1785 if (newname == NULL)
1786 (void) snprintf(desc, sizeof (desc),
1787 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1788 thename);
1789 else
1790 (void) snprintf(desc, sizeof (desc),
1791 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1792 origname, thename);
1793
1794 switch (error) {
1795 case ENOTSUP:
1796 if (nv != NULL && nvlist_lookup_nvlist(nv,
1797 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1798 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1799 (void) printf(dgettext(TEXT_DOMAIN, "This "
1800 "pool uses the following feature(s) not "
1801 "supported by this system:\n"));
1802 zpool_print_unsup_feat(nv);
1803 if (nvlist_exists(nvinfo,
1804 ZPOOL_CONFIG_CAN_RDONLY)) {
1805 (void) printf(dgettext(TEXT_DOMAIN,
1806 "All unsupported features are only "
1807 "required for writing to the pool."
1808 "\nThe pool can be imported using "
1809 "'-o readonly=on'.\n"));
1810 }
1811 }
1812 /*
1813 * Unsupported version.
1814 */
1815 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1816 break;
1817
1818 case EINVAL:
1819 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1820 break;
1821
1822 case EROFS:
1823 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1824 "one or more devices is read only"));
1825 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1826 break;
1827
1828 case ENXIO:
1829 if (nv && nvlist_lookup_nvlist(nv,
1830 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1831 nvlist_lookup_nvlist(nvinfo,
1832 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1833 (void) printf(dgettext(TEXT_DOMAIN,
1834 "The devices below are missing, use "
1835 "'-m' to import the pool anyway:\n"));
1836 print_vdev_tree(hdl, NULL, missing, 2);
1837 (void) printf("\n");
1838 }
1839 (void) zpool_standard_error(hdl, error, desc);
1840 break;
1841
1842 case EEXIST:
1843 (void) zpool_standard_error(hdl, error, desc);
1844 break;
1845
1846 case EBUSY:
1847 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1848 "one or more devices are already in use\n"));
1849 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1850 break;
1851 case ENAMETOOLONG:
1852 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1853 "new name of at least one dataset is longer than "
1854 "the maximum allowable length"));
1855 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1856 break;
1857 default:
1858 (void) zpool_standard_error(hdl, error, desc);
1859 zpool_explain_recover(hdl,
1860 newname ? origname : thename, -error, nv);
1861 break;
1862 }
1863
1864 nvlist_free(nv);
1865 ret = -1;
1866 } else {
1867 zpool_handle_t *zhp;
1868
1869 /*
1870 * This should never fail, but play it safe anyway.
1871 */
1872 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1873 ret = -1;
1874 else if (zhp != NULL)
1875 zpool_close(zhp);
1876 if (policy.zrp_request &
1877 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1878 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1879 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1880 }
1881 nvlist_free(nv);
1882 return (0);
1883 }
1884
1885 return (ret);
1886 }
1887
1888 /*
1889 * Scan the pool.
1890 */
1891 int
1892 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1893 {
1894 zfs_cmd_t zc = {"\0"};
1895 char msg[1024];
1896 libzfs_handle_t *hdl = zhp->zpool_hdl;
1897
1898 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1899 zc.zc_cookie = func;
1900
1901 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1902 (errno == ENOENT && func != POOL_SCAN_NONE))
1903 return (0);
1904
1905 if (func == POOL_SCAN_SCRUB) {
1906 (void) snprintf(msg, sizeof (msg),
1907 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1908 } else if (func == POOL_SCAN_NONE) {
1909 (void) snprintf(msg, sizeof (msg),
1910 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1911 zc.zc_name);
1912 } else {
1913 assert(!"unexpected result");
1914 }
1915
1916 if (errno == EBUSY) {
1917 nvlist_t *nvroot;
1918 pool_scan_stat_t *ps = NULL;
1919 uint_t psc;
1920
1921 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1922 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1923 (void) nvlist_lookup_uint64_array(nvroot,
1924 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1925 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1926 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1927 else
1928 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1929 } else if (errno == ENOENT) {
1930 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1931 } else {
1932 return (zpool_standard_error(hdl, errno, msg));
1933 }
1934 }
1935
1936 /*
1937 * Find a vdev that matches the search criteria specified. We use the
1938 * the nvpair name to determine how we should look for the device.
1939 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1940 * spare; but FALSE if its an INUSE spare.
1941 */
1942 static nvlist_t *
1943 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1944 boolean_t *l2cache, boolean_t *log)
1945 {
1946 uint_t c, children;
1947 nvlist_t **child;
1948 nvlist_t *ret;
1949 uint64_t is_log;
1950 char *srchkey;
1951 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1952
1953 /* Nothing to look for */
1954 if (search == NULL || pair == NULL)
1955 return (NULL);
1956
1957 /* Obtain the key we will use to search */
1958 srchkey = nvpair_name(pair);
1959
1960 switch (nvpair_type(pair)) {
1961 case DATA_TYPE_UINT64:
1962 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1963 uint64_t srchval, theguid;
1964
1965 verify(nvpair_value_uint64(pair, &srchval) == 0);
1966 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1967 &theguid) == 0);
1968 if (theguid == srchval)
1969 return (nv);
1970 }
1971 break;
1972
1973 case DATA_TYPE_STRING: {
1974 char *srchval, *val;
1975
1976 verify(nvpair_value_string(pair, &srchval) == 0);
1977 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1978 break;
1979
1980 /*
1981 * Search for the requested value. Special cases:
1982 *
1983 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1984 * "-part1", or "p1". The suffix is hidden from the user,
1985 * but included in the string, so this matches around it.
1986 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1987 * is used to check all possible expanded paths.
1988 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1989 *
1990 * Otherwise, all other searches are simple string compares.
1991 */
1992 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
1993 uint64_t wholedisk = 0;
1994
1995 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1996 &wholedisk);
1997 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
1998 return (nv);
1999
2000 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2001 char *type, *idx, *end, *p;
2002 uint64_t id, vdev_id;
2003
2004 /*
2005 * Determine our vdev type, keeping in mind
2006 * that the srchval is composed of a type and
2007 * vdev id pair (i.e. mirror-4).
2008 */
2009 if ((type = strdup(srchval)) == NULL)
2010 return (NULL);
2011
2012 if ((p = strrchr(type, '-')) == NULL) {
2013 free(type);
2014 break;
2015 }
2016 idx = p + 1;
2017 *p = '\0';
2018
2019 /*
2020 * If the types don't match then keep looking.
2021 */
2022 if (strncmp(val, type, strlen(val)) != 0) {
2023 free(type);
2024 break;
2025 }
2026
2027 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2028 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2029 strncmp(type, VDEV_TYPE_MIRROR,
2030 strlen(VDEV_TYPE_MIRROR)) == 0);
2031 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2032 &id) == 0);
2033
2034 errno = 0;
2035 vdev_id = strtoull(idx, &end, 10);
2036
2037 free(type);
2038 if (errno != 0)
2039 return (NULL);
2040
2041 /*
2042 * Now verify that we have the correct vdev id.
2043 */
2044 if (vdev_id == id)
2045 return (nv);
2046 }
2047
2048 /*
2049 * Common case
2050 */
2051 if (strcmp(srchval, val) == 0)
2052 return (nv);
2053 break;
2054 }
2055
2056 default:
2057 break;
2058 }
2059
2060 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2061 &child, &children) != 0)
2062 return (NULL);
2063
2064 for (c = 0; c < children; c++) {
2065 if ((ret = vdev_to_nvlist_iter(child[c], search,
2066 avail_spare, l2cache, NULL)) != NULL) {
2067 /*
2068 * The 'is_log' value is only set for the toplevel
2069 * vdev, not the leaf vdevs. So we always lookup the
2070 * log device from the root of the vdev tree (where
2071 * 'log' is non-NULL).
2072 */
2073 if (log != NULL &&
2074 nvlist_lookup_uint64(child[c],
2075 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2076 is_log) {
2077 *log = B_TRUE;
2078 }
2079 return (ret);
2080 }
2081 }
2082
2083 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2084 &child, &children) == 0) {
2085 for (c = 0; c < children; c++) {
2086 if ((ret = vdev_to_nvlist_iter(child[c], search,
2087 avail_spare, l2cache, NULL)) != NULL) {
2088 *avail_spare = B_TRUE;
2089 return (ret);
2090 }
2091 }
2092 }
2093
2094 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2095 &child, &children) == 0) {
2096 for (c = 0; c < children; c++) {
2097 if ((ret = vdev_to_nvlist_iter(child[c], search,
2098 avail_spare, l2cache, NULL)) != NULL) {
2099 *l2cache = B_TRUE;
2100 return (ret);
2101 }
2102 }
2103 }
2104
2105 return (NULL);
2106 }
2107
2108 /*
2109 * Given a physical path (minus the "/devices" prefix), find the
2110 * associated vdev.
2111 */
2112 nvlist_t *
2113 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2114 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2115 {
2116 nvlist_t *search, *nvroot, *ret;
2117
2118 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2119 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2120
2121 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2122 &nvroot) == 0);
2123
2124 *avail_spare = B_FALSE;
2125 *l2cache = B_FALSE;
2126 if (log != NULL)
2127 *log = B_FALSE;
2128 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2129 nvlist_free(search);
2130
2131 return (ret);
2132 }
2133
2134 /*
2135 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2136 */
2137 boolean_t
2138 zpool_vdev_is_interior(const char *name)
2139 {
2140 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2141 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2142 return (B_TRUE);
2143 return (B_FALSE);
2144 }
2145
2146 nvlist_t *
2147 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2148 boolean_t *l2cache, boolean_t *log)
2149 {
2150 char *end;
2151 nvlist_t *nvroot, *search, *ret;
2152 uint64_t guid;
2153
2154 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2155
2156 guid = strtoull(path, &end, 0);
2157 if (guid != 0 && *end == '\0') {
2158 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2159 } else if (zpool_vdev_is_interior(path)) {
2160 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2161 } else {
2162 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2163 }
2164
2165 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2166 &nvroot) == 0);
2167
2168 *avail_spare = B_FALSE;
2169 *l2cache = B_FALSE;
2170 if (log != NULL)
2171 *log = B_FALSE;
2172 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2173 nvlist_free(search);
2174
2175 return (ret);
2176 }
2177
2178 static int
2179 vdev_online(nvlist_t *nv)
2180 {
2181 uint64_t ival;
2182
2183 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2184 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2185 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2186 return (0);
2187
2188 return (1);
2189 }
2190
2191 /*
2192 * Helper function for zpool_get_physpaths().
2193 */
2194 static int
2195 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2196 size_t *bytes_written)
2197 {
2198 size_t bytes_left, pos, rsz;
2199 char *tmppath;
2200 const char *format;
2201
2202 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2203 &tmppath) != 0)
2204 return (EZFS_NODEVICE);
2205
2206 pos = *bytes_written;
2207 bytes_left = physpath_size - pos;
2208 format = (pos == 0) ? "%s" : " %s";
2209
2210 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2211 *bytes_written += rsz;
2212
2213 if (rsz >= bytes_left) {
2214 /* if physpath was not copied properly, clear it */
2215 if (bytes_left != 0) {
2216 physpath[pos] = 0;
2217 }
2218 return (EZFS_NOSPC);
2219 }
2220 return (0);
2221 }
2222
2223 static int
2224 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2225 size_t *rsz, boolean_t is_spare)
2226 {
2227 char *type;
2228 int ret;
2229
2230 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2231 return (EZFS_INVALCONFIG);
2232
2233 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2234 /*
2235 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2236 * For a spare vdev, we only want to boot from the active
2237 * spare device.
2238 */
2239 if (is_spare) {
2240 uint64_t spare = 0;
2241 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2242 &spare);
2243 if (!spare)
2244 return (EZFS_INVALCONFIG);
2245 }
2246
2247 if (vdev_online(nv)) {
2248 if ((ret = vdev_get_one_physpath(nv, physpath,
2249 phypath_size, rsz)) != 0)
2250 return (ret);
2251 }
2252 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2253 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2254 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2255 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2256 nvlist_t **child;
2257 uint_t count;
2258 int i, ret;
2259
2260 if (nvlist_lookup_nvlist_array(nv,
2261 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2262 return (EZFS_INVALCONFIG);
2263
2264 for (i = 0; i < count; i++) {
2265 ret = vdev_get_physpaths(child[i], physpath,
2266 phypath_size, rsz, is_spare);
2267 if (ret == EZFS_NOSPC)
2268 return (ret);
2269 }
2270 }
2271
2272 return (EZFS_POOL_INVALARG);
2273 }
2274
2275 /*
2276 * Get phys_path for a root pool config.
2277 * Return 0 on success; non-zero on failure.
2278 */
2279 static int
2280 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2281 {
2282 size_t rsz;
2283 nvlist_t *vdev_root;
2284 nvlist_t **child;
2285 uint_t count;
2286 char *type;
2287
2288 rsz = 0;
2289
2290 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2291 &vdev_root) != 0)
2292 return (EZFS_INVALCONFIG);
2293
2294 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2295 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2296 &child, &count) != 0)
2297 return (EZFS_INVALCONFIG);
2298
2299 /*
2300 * root pool can only have a single top-level vdev.
2301 */
2302 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2303 return (EZFS_POOL_INVALARG);
2304
2305 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2306 B_FALSE);
2307
2308 /* No online devices */
2309 if (rsz == 0)
2310 return (EZFS_NODEVICE);
2311
2312 return (0);
2313 }
2314
2315 /*
2316 * Get phys_path for a root pool
2317 * Return 0 on success; non-zero on failure.
2318 */
2319 int
2320 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2321 {
2322 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2323 phypath_size));
2324 }
2325
2326 /*
2327 * If the device has being dynamically expanded then we need to relabel
2328 * the disk to use the new unallocated space.
2329 */
2330 static int
2331 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2332 {
2333 int fd, error;
2334
2335 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2336 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2337 "relabel '%s': unable to open device: %d"), path, errno);
2338 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2339 }
2340
2341 /*
2342 * It's possible that we might encounter an error if the device
2343 * does not have any unallocated space left. If so, we simply
2344 * ignore that error and continue on.
2345 *
2346 * Also, we don't call efi_rescan() - that would just return EBUSY.
2347 * The module will do it for us in vdev_disk_open().
2348 */
2349 error = efi_use_whole_disk(fd);
2350 (void) close(fd);
2351 if (error && error != VT_ENOSPC) {
2352 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2353 "relabel '%s': unable to read disk capacity"), path);
2354 return (zfs_error(hdl, EZFS_NOCAP, msg));
2355 }
2356 return (0);
2357 }
2358
2359 /*
2360 * Bring the specified vdev online. The 'flags' parameter is a set of the
2361 * ZFS_ONLINE_* flags.
2362 */
2363 int
2364 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2365 vdev_state_t *newstate)
2366 {
2367 zfs_cmd_t zc = {"\0"};
2368 char msg[1024];
2369 nvlist_t *tgt;
2370 boolean_t avail_spare, l2cache, islog;
2371 libzfs_handle_t *hdl = zhp->zpool_hdl;
2372 int error;
2373
2374 if (flags & ZFS_ONLINE_EXPAND) {
2375 (void) snprintf(msg, sizeof (msg),
2376 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2377 } else {
2378 (void) snprintf(msg, sizeof (msg),
2379 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2380 }
2381
2382 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2383 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2384 &islog)) == NULL)
2385 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2386
2387 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2388
2389 if (avail_spare)
2390 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2391
2392 if (flags & ZFS_ONLINE_EXPAND ||
2393 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2394 uint64_t wholedisk = 0;
2395
2396 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2397 &wholedisk);
2398
2399 /*
2400 * XXX - L2ARC 1.0 devices can't support expansion.
2401 */
2402 if (l2cache) {
2403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2404 "cannot expand cache devices"));
2405 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2406 }
2407
2408 if (wholedisk) {
2409 const char *fullpath = path;
2410 char buf[MAXPATHLEN];
2411
2412 if (path[0] != '/') {
2413 error = zfs_resolve_shortname(path, buf,
2414 sizeof (buf));
2415 if (error != 0)
2416 return (zfs_error(hdl, EZFS_NODEVICE,
2417 msg));
2418
2419 fullpath = buf;
2420 }
2421
2422 error = zpool_relabel_disk(hdl, fullpath, msg);
2423 if (error != 0)
2424 return (error);
2425 }
2426 }
2427
2428 zc.zc_cookie = VDEV_STATE_ONLINE;
2429 zc.zc_obj = flags;
2430
2431 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2432 if (errno == EINVAL) {
2433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2434 "from this pool into a new one. Use '%s' "
2435 "instead"), "zpool detach");
2436 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2437 }
2438 return (zpool_standard_error(hdl, errno, msg));
2439 }
2440
2441 *newstate = zc.zc_cookie;
2442 return (0);
2443 }
2444
2445 /*
2446 * Take the specified vdev offline
2447 */
2448 int
2449 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2450 {
2451 zfs_cmd_t zc = {"\0"};
2452 char msg[1024];
2453 nvlist_t *tgt;
2454 boolean_t avail_spare, l2cache;
2455 libzfs_handle_t *hdl = zhp->zpool_hdl;
2456
2457 (void) snprintf(msg, sizeof (msg),
2458 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2459
2460 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2461 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2462 NULL)) == NULL)
2463 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2464
2465 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2466
2467 if (avail_spare)
2468 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2469
2470 zc.zc_cookie = VDEV_STATE_OFFLINE;
2471 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2472
2473 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2474 return (0);
2475
2476 switch (errno) {
2477 case EBUSY:
2478
2479 /*
2480 * There are no other replicas of this device.
2481 */
2482 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2483
2484 case EEXIST:
2485 /*
2486 * The log device has unplayed logs
2487 */
2488 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2489
2490 default:
2491 return (zpool_standard_error(hdl, errno, msg));
2492 }
2493 }
2494
2495 /*
2496 * Mark the given vdev faulted.
2497 */
2498 int
2499 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2500 {
2501 zfs_cmd_t zc = {"\0"};
2502 char msg[1024];
2503 libzfs_handle_t *hdl = zhp->zpool_hdl;
2504
2505 (void) snprintf(msg, sizeof (msg),
2506 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2507
2508 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2509 zc.zc_guid = guid;
2510 zc.zc_cookie = VDEV_STATE_FAULTED;
2511 zc.zc_obj = aux;
2512
2513 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2514 return (0);
2515
2516 switch (errno) {
2517 case EBUSY:
2518
2519 /*
2520 * There are no other replicas of this device.
2521 */
2522 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2523
2524 default:
2525 return (zpool_standard_error(hdl, errno, msg));
2526 }
2527
2528 }
2529
2530 /*
2531 * Mark the given vdev degraded.
2532 */
2533 int
2534 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2535 {
2536 zfs_cmd_t zc = {"\0"};
2537 char msg[1024];
2538 libzfs_handle_t *hdl = zhp->zpool_hdl;
2539
2540 (void) snprintf(msg, sizeof (msg),
2541 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2542
2543 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2544 zc.zc_guid = guid;
2545 zc.zc_cookie = VDEV_STATE_DEGRADED;
2546 zc.zc_obj = aux;
2547
2548 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2549 return (0);
2550
2551 return (zpool_standard_error(hdl, errno, msg));
2552 }
2553
2554 /*
2555 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2556 * a hot spare.
2557 */
2558 static boolean_t
2559 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2560 {
2561 nvlist_t **child;
2562 uint_t c, children;
2563 char *type;
2564
2565 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2566 &children) == 0) {
2567 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2568 &type) == 0);
2569
2570 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2571 children == 2 && child[which] == tgt)
2572 return (B_TRUE);
2573
2574 for (c = 0; c < children; c++)
2575 if (is_replacing_spare(child[c], tgt, which))
2576 return (B_TRUE);
2577 }
2578
2579 return (B_FALSE);
2580 }
2581
2582 /*
2583 * Attach new_disk (fully described by nvroot) to old_disk.
2584 * If 'replacing' is specified, the new disk will replace the old one.
2585 */
2586 int
2587 zpool_vdev_attach(zpool_handle_t *zhp,
2588 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2589 {
2590 zfs_cmd_t zc = {"\0"};
2591 char msg[1024];
2592 int ret;
2593 nvlist_t *tgt;
2594 boolean_t avail_spare, l2cache, islog;
2595 uint64_t val;
2596 char *newname;
2597 nvlist_t **child;
2598 uint_t children;
2599 nvlist_t *config_root;
2600 libzfs_handle_t *hdl = zhp->zpool_hdl;
2601 boolean_t rootpool = zpool_is_bootable(zhp);
2602
2603 if (replacing)
2604 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2605 "cannot replace %s with %s"), old_disk, new_disk);
2606 else
2607 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2608 "cannot attach %s to %s"), new_disk, old_disk);
2609
2610 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2611 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2612 &islog)) == 0)
2613 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2614
2615 if (avail_spare)
2616 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2617
2618 if (l2cache)
2619 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2620
2621 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2622 zc.zc_cookie = replacing;
2623
2624 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2625 &child, &children) != 0 || children != 1) {
2626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2627 "new device must be a single disk"));
2628 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2629 }
2630
2631 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2632 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2633
2634 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2635 return (-1);
2636
2637 /*
2638 * If the target is a hot spare that has been swapped in, we can only
2639 * replace it with another hot spare.
2640 */
2641 if (replacing &&
2642 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2643 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2644 NULL) == NULL || !avail_spare) &&
2645 is_replacing_spare(config_root, tgt, 1)) {
2646 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2647 "can only be replaced by another hot spare"));
2648 free(newname);
2649 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2650 }
2651
2652 free(newname);
2653
2654 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2655 return (-1);
2656
2657 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2658
2659 zcmd_free_nvlists(&zc);
2660
2661 if (ret == 0) {
2662 if (rootpool) {
2663 /*
2664 * XXX need a better way to prevent user from
2665 * booting up a half-baked vdev.
2666 */
2667 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2668 "sure to wait until resilver is done "
2669 "before rebooting.\n"));
2670 }
2671 return (0);
2672 }
2673
2674 switch (errno) {
2675 case ENOTSUP:
2676 /*
2677 * Can't attach to or replace this type of vdev.
2678 */
2679 if (replacing) {
2680 uint64_t version = zpool_get_prop_int(zhp,
2681 ZPOOL_PROP_VERSION, NULL);
2682
2683 if (islog)
2684 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2685 "cannot replace a log with a spare"));
2686 else if (version >= SPA_VERSION_MULTI_REPLACE)
2687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2688 "already in replacing/spare config; wait "
2689 "for completion or use 'zpool detach'"));
2690 else
2691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2692 "cannot replace a replacing device"));
2693 } else {
2694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2695 "can only attach to mirrors and top-level "
2696 "disks"));
2697 }
2698 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2699 break;
2700
2701 case EINVAL:
2702 /*
2703 * The new device must be a single disk.
2704 */
2705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2706 "new device must be a single disk"));
2707 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2708 break;
2709
2710 case EBUSY:
2711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2712 new_disk);
2713 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2714 break;
2715
2716 case EOVERFLOW:
2717 /*
2718 * The new device is too small.
2719 */
2720 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2721 "device is too small"));
2722 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2723 break;
2724
2725 case EDOM:
2726 /*
2727 * The new device has a different optimal sector size.
2728 */
2729 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2730 "new device has a different optimal sector size; use the "
2731 "option '-o ashift=N' to override the optimal size"));
2732 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2733 break;
2734
2735 case ENAMETOOLONG:
2736 /*
2737 * The resulting top-level vdev spec won't fit in the label.
2738 */
2739 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2740 break;
2741
2742 default:
2743 (void) zpool_standard_error(hdl, errno, msg);
2744 }
2745
2746 return (-1);
2747 }
2748
2749 /*
2750 * Detach the specified device.
2751 */
2752 int
2753 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2754 {
2755 zfs_cmd_t zc = {"\0"};
2756 char msg[1024];
2757 nvlist_t *tgt;
2758 boolean_t avail_spare, l2cache;
2759 libzfs_handle_t *hdl = zhp->zpool_hdl;
2760
2761 (void) snprintf(msg, sizeof (msg),
2762 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2763
2764 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2765 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2766 NULL)) == 0)
2767 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2768
2769 if (avail_spare)
2770 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2771
2772 if (l2cache)
2773 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2774
2775 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2776
2777 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2778 return (0);
2779
2780 switch (errno) {
2781
2782 case ENOTSUP:
2783 /*
2784 * Can't detach from this type of vdev.
2785 */
2786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2787 "applicable to mirror and replacing vdevs"));
2788 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2789 break;
2790
2791 case EBUSY:
2792 /*
2793 * There are no other replicas of this device.
2794 */
2795 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2796 break;
2797
2798 default:
2799 (void) zpool_standard_error(hdl, errno, msg);
2800 }
2801
2802 return (-1);
2803 }
2804
2805 /*
2806 * Find a mirror vdev in the source nvlist.
2807 *
2808 * The mchild array contains a list of disks in one of the top-level mirrors
2809 * of the source pool. The schild array contains a list of disks that the
2810 * user specified on the command line. We loop over the mchild array to
2811 * see if any entry in the schild array matches.
2812 *
2813 * If a disk in the mchild array is found in the schild array, we return
2814 * the index of that entry. Otherwise we return -1.
2815 */
2816 static int
2817 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2818 nvlist_t **schild, uint_t schildren)
2819 {
2820 uint_t mc;
2821
2822 for (mc = 0; mc < mchildren; mc++) {
2823 uint_t sc;
2824 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2825 mchild[mc], 0);
2826
2827 for (sc = 0; sc < schildren; sc++) {
2828 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2829 schild[sc], 0);
2830 boolean_t result = (strcmp(mpath, spath) == 0);
2831
2832 free(spath);
2833 if (result) {
2834 free(mpath);
2835 return (mc);
2836 }
2837 }
2838
2839 free(mpath);
2840 }
2841
2842 return (-1);
2843 }
2844
2845 /*
2846 * Split a mirror pool. If newroot points to null, then a new nvlist
2847 * is generated and it is the responsibility of the caller to free it.
2848 */
2849 int
2850 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2851 nvlist_t *props, splitflags_t flags)
2852 {
2853 zfs_cmd_t zc = {"\0"};
2854 char msg[1024];
2855 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2856 nvlist_t **varray = NULL, *zc_props = NULL;
2857 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2858 libzfs_handle_t *hdl = zhp->zpool_hdl;
2859 uint64_t vers;
2860 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2861 int retval = 0;
2862
2863 (void) snprintf(msg, sizeof (msg),
2864 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2865
2866 if (!zpool_name_valid(hdl, B_FALSE, newname))
2867 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2868
2869 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2870 (void) fprintf(stderr, gettext("Internal error: unable to "
2871 "retrieve pool configuration\n"));
2872 return (-1);
2873 }
2874
2875 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2876 == 0);
2877 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2878
2879 if (props) {
2880 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2881 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2882 props, vers, flags, msg)) == NULL)
2883 return (-1);
2884 }
2885
2886 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2887 &children) != 0) {
2888 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2889 "Source pool is missing vdev tree"));
2890 nvlist_free(zc_props);
2891 return (-1);
2892 }
2893
2894 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2895 vcount = 0;
2896
2897 if (*newroot == NULL ||
2898 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2899 &newchild, &newchildren) != 0)
2900 newchildren = 0;
2901
2902 for (c = 0; c < children; c++) {
2903 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2904 char *type;
2905 nvlist_t **mchild, *vdev;
2906 uint_t mchildren;
2907 int entry;
2908
2909 /*
2910 * Unlike cache & spares, slogs are stored in the
2911 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2912 */
2913 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2914 &is_log);
2915 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2916 &is_hole);
2917 if (is_log || is_hole) {
2918 /*
2919 * Create a hole vdev and put it in the config.
2920 */
2921 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2922 goto out;
2923 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2924 VDEV_TYPE_HOLE) != 0)
2925 goto out;
2926 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2927 1) != 0)
2928 goto out;
2929 if (lastlog == 0)
2930 lastlog = vcount;
2931 varray[vcount++] = vdev;
2932 continue;
2933 }
2934 lastlog = 0;
2935 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2936 == 0);
2937 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2938 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2939 "Source pool must be composed only of mirrors\n"));
2940 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2941 goto out;
2942 }
2943
2944 verify(nvlist_lookup_nvlist_array(child[c],
2945 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2946
2947 /* find or add an entry for this top-level vdev */
2948 if (newchildren > 0 &&
2949 (entry = find_vdev_entry(zhp, mchild, mchildren,
2950 newchild, newchildren)) >= 0) {
2951 /* We found a disk that the user specified. */
2952 vdev = mchild[entry];
2953 ++found;
2954 } else {
2955 /* User didn't specify a disk for this vdev. */
2956 vdev = mchild[mchildren - 1];
2957 }
2958
2959 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2960 goto out;
2961 }
2962
2963 /* did we find every disk the user specified? */
2964 if (found != newchildren) {
2965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2966 "include at most one disk from each mirror"));
2967 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2968 goto out;
2969 }
2970
2971 /* Prepare the nvlist for populating. */
2972 if (*newroot == NULL) {
2973 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2974 goto out;
2975 freelist = B_TRUE;
2976 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2977 VDEV_TYPE_ROOT) != 0)
2978 goto out;
2979 } else {
2980 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2981 }
2982
2983 /* Add all the children we found */
2984 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2985 lastlog == 0 ? vcount : lastlog) != 0)
2986 goto out;
2987
2988 /*
2989 * If we're just doing a dry run, exit now with success.
2990 */
2991 if (flags.dryrun) {
2992 memory_err = B_FALSE;
2993 freelist = B_FALSE;
2994 goto out;
2995 }
2996
2997 /* now build up the config list & call the ioctl */
2998 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2999 goto out;
3000
3001 if (nvlist_add_nvlist(newconfig,
3002 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3003 nvlist_add_string(newconfig,
3004 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3005 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3006 goto out;
3007
3008 /*
3009 * The new pool is automatically part of the namespace unless we
3010 * explicitly export it.
3011 */
3012 if (!flags.import)
3013 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3014 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3015 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3016 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3017 goto out;
3018 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3019 goto out;
3020
3021 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3022 retval = zpool_standard_error(hdl, errno, msg);
3023 goto out;
3024 }
3025
3026 freelist = B_FALSE;
3027 memory_err = B_FALSE;
3028
3029 out:
3030 if (varray != NULL) {
3031 int v;
3032
3033 for (v = 0; v < vcount; v++)
3034 nvlist_free(varray[v]);
3035 free(varray);
3036 }
3037 zcmd_free_nvlists(&zc);
3038 nvlist_free(zc_props);
3039 nvlist_free(newconfig);
3040 if (freelist) {
3041 nvlist_free(*newroot);
3042 *newroot = NULL;
3043 }
3044
3045 if (retval != 0)
3046 return (retval);
3047
3048 if (memory_err)
3049 return (no_memory(hdl));
3050
3051 return (0);
3052 }
3053
3054 /*
3055 * Remove the given device. Currently, this is supported only for hot spares,
3056 * cache, and log devices.
3057 */
3058 int
3059 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3060 {
3061 zfs_cmd_t zc = {"\0"};
3062 char msg[1024];
3063 nvlist_t *tgt;
3064 boolean_t avail_spare, l2cache, islog;
3065 libzfs_handle_t *hdl = zhp->zpool_hdl;
3066 uint64_t version;
3067
3068 (void) snprintf(msg, sizeof (msg),
3069 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3070
3071 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3072 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3073 &islog)) == 0)
3074 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3075 /*
3076 * XXX - this should just go away.
3077 */
3078 if (!avail_spare && !l2cache && !islog) {
3079 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3080 "only inactive hot spares, cache, "
3081 "or log devices can be removed"));
3082 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3083 }
3084
3085 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3086 if (islog && version < SPA_VERSION_HOLES) {
3087 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3088 "pool must be upgrade to support log removal"));
3089 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3090 }
3091
3092 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3093
3094 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3095 return (0);
3096
3097 return (zpool_standard_error(hdl, errno, msg));
3098 }
3099
3100 /*
3101 * Clear the errors for the pool, or the particular device if specified.
3102 */
3103 int
3104 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3105 {
3106 zfs_cmd_t zc = {"\0"};
3107 char msg[1024];
3108 nvlist_t *tgt;
3109 zpool_rewind_policy_t policy;
3110 boolean_t avail_spare, l2cache;
3111 libzfs_handle_t *hdl = zhp->zpool_hdl;
3112 nvlist_t *nvi = NULL;
3113 int error;
3114
3115 if (path)
3116 (void) snprintf(msg, sizeof (msg),
3117 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3118 path);
3119 else
3120 (void) snprintf(msg, sizeof (msg),
3121 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3122 zhp->zpool_name);
3123
3124 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3125 if (path) {
3126 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3127 &l2cache, NULL)) == 0)
3128 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3129
3130 /*
3131 * Don't allow error clearing for hot spares. Do allow
3132 * error clearing for l2cache devices.
3133 */
3134 if (avail_spare)
3135 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3136
3137 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3138 &zc.zc_guid) == 0);
3139 }
3140
3141 zpool_get_rewind_policy(rewindnvl, &policy);
3142 zc.zc_cookie = policy.zrp_request;
3143
3144 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3145 return (-1);
3146
3147 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3148 return (-1);
3149
3150 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3151 errno == ENOMEM) {
3152 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3153 zcmd_free_nvlists(&zc);
3154 return (-1);
3155 }
3156 }
3157
3158 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3159 errno != EPERM && errno != EACCES)) {
3160 if (policy.zrp_request &
3161 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3162 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3163 zpool_rewind_exclaim(hdl, zc.zc_name,
3164 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3165 nvi);
3166 nvlist_free(nvi);
3167 }
3168 zcmd_free_nvlists(&zc);
3169 return (0);
3170 }
3171
3172 zcmd_free_nvlists(&zc);
3173 return (zpool_standard_error(hdl, errno, msg));
3174 }
3175
3176 /*
3177 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3178 */
3179 int
3180 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3181 {
3182 zfs_cmd_t zc = {"\0"};
3183 char msg[1024];
3184 libzfs_handle_t *hdl = zhp->zpool_hdl;
3185
3186 (void) snprintf(msg, sizeof (msg),
3187 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3188 (u_longlong_t)guid);
3189
3190 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3191 zc.zc_guid = guid;
3192 zc.zc_cookie = ZPOOL_NO_REWIND;
3193
3194 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3195 return (0);
3196
3197 return (zpool_standard_error(hdl, errno, msg));
3198 }
3199
3200 /*
3201 * Change the GUID for a pool.
3202 */
3203 int
3204 zpool_reguid(zpool_handle_t *zhp)
3205 {
3206 char msg[1024];
3207 libzfs_handle_t *hdl = zhp->zpool_hdl;
3208 zfs_cmd_t zc = {"\0"};
3209
3210 (void) snprintf(msg, sizeof (msg),
3211 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3212
3213 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3214 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3215 return (0);
3216
3217 return (zpool_standard_error(hdl, errno, msg));
3218 }
3219
3220 /*
3221 * Reopen the pool.
3222 */
3223 int
3224 zpool_reopen(zpool_handle_t *zhp)
3225 {
3226 zfs_cmd_t zc = {"\0"};
3227 char msg[1024];
3228 libzfs_handle_t *hdl = zhp->zpool_hdl;
3229
3230 (void) snprintf(msg, sizeof (msg),
3231 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3232 zhp->zpool_name);
3233
3234 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3235 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3236 return (0);
3237 return (zpool_standard_error(hdl, errno, msg));
3238 }
3239
3240 #if defined(__sun__) || defined(__sun)
3241 /*
3242 * Convert from a devid string to a path.
3243 */
3244 static char *
3245 devid_to_path(char *devid_str)
3246 {
3247 ddi_devid_t devid;
3248 char *minor;
3249 char *path;
3250 devid_nmlist_t *list = NULL;
3251 int ret;
3252
3253 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3254 return (NULL);
3255
3256 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3257
3258 devid_str_free(minor);
3259 devid_free(devid);
3260
3261 if (ret != 0)
3262 return (NULL);
3263
3264 /*
3265 * In a case the strdup() fails, we will just return NULL below.
3266 */
3267 path = strdup(list[0].devname);
3268
3269 devid_free_nmlist(list);
3270
3271 return (path);
3272 }
3273
3274 /*
3275 * Convert from a path to a devid string.
3276 */
3277 static char *
3278 path_to_devid(const char *path)
3279 {
3280 int fd;
3281 ddi_devid_t devid;
3282 char *minor, *ret;
3283
3284 if ((fd = open(path, O_RDONLY)) < 0)
3285 return (NULL);
3286
3287 minor = NULL;
3288 ret = NULL;
3289 if (devid_get(fd, &devid) == 0) {
3290 if (devid_get_minor_name(fd, &minor) == 0)
3291 ret = devid_str_encode(devid, minor);
3292 if (minor != NULL)
3293 devid_str_free(minor);
3294 devid_free(devid);
3295 }
3296 (void) close(fd);
3297
3298 return (ret);
3299 }
3300
3301 /*
3302 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3303 * ignore any failure here, since a common case is for an unprivileged user to
3304 * type 'zpool status', and we'll display the correct information anyway.
3305 */
3306 static void
3307 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3308 {
3309 zfs_cmd_t zc = {"\0"};
3310
3311 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3312 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3313 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3314 &zc.zc_guid) == 0);
3315
3316 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3317 }
3318 #endif /* sun */
3319
3320 /*
3321 * Remove partition suffix from a vdev path. Partition suffixes may take three
3322 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3323 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3324 * third case only occurs when preceded by a string matching the regular
3325 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3326 *
3327 * caller must free the returned string
3328 */
3329 char *
3330 zfs_strip_partition(char *path)
3331 {
3332 char *tmp = strdup(path);
3333 char *part = NULL, *d = NULL;
3334 if (!tmp)
3335 return (NULL);
3336
3337 if ((part = strstr(tmp, "-part")) && part != tmp) {
3338 d = part + 5;
3339 } else if ((part = strrchr(tmp, 'p')) &&
3340 part > tmp + 1 && isdigit(*(part-1))) {
3341 d = part + 1;
3342 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3343 tmp[1] == 'd') {
3344 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3345 } else if (strncmp("xvd", tmp, 3) == 0) {
3346 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3347 }
3348 if (part && d && *d != '\0') {
3349 for (; isdigit(*d); d++) { }
3350 if (*d == '\0')
3351 *part = '\0';
3352 }
3353
3354 return (tmp);
3355 }
3356
3357 /*
3358 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3359 *
3360 * path: /dev/sda1
3361 * returns: /dev/sda
3362 *
3363 * Returned string must be freed.
3364 */
3365 char *
3366 zfs_strip_partition_path(char *path)
3367 {
3368 char *newpath = strdup(path);
3369 char *sd_offset;
3370 char *new_sd;
3371
3372 if (!newpath)
3373 return (NULL);
3374
3375 /* Point to "sda1" part of "/dev/sda1" */
3376 sd_offset = strrchr(newpath, '/') + 1;
3377
3378 /* Get our new name "sda" */
3379 new_sd = zfs_strip_partition(sd_offset);
3380 if (!new_sd) {
3381 free(newpath);
3382 return (NULL);
3383 }
3384
3385 /* Paste the "sda" where "sda1" was */
3386 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3387
3388 /* Free temporary "sda" */
3389 free(new_sd);
3390
3391 return (newpath);
3392 }
3393
3394 #define PATH_BUF_LEN 64
3395
3396 /*
3397 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3398 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3399 * We also check if this is a whole disk, in which case we strip off the
3400 * trailing 's0' slice name.
3401 *
3402 * This routine is also responsible for identifying when disks have been
3403 * reconfigured in a new location. The kernel will have opened the device by
3404 * devid, but the path will still refer to the old location. To catch this, we
3405 * first do a path -> devid translation (which is fast for the common case). If
3406 * the devid matches, we're done. If not, we do a reverse devid -> path
3407 * translation and issue the appropriate ioctl() to update the path of the vdev.
3408 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3409 * of these checks.
3410 */
3411 char *
3412 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3413 int name_flags)
3414 {
3415 char *path, *type, *env;
3416 uint64_t value;
3417 char buf[PATH_BUF_LEN];
3418 char tmpbuf[PATH_BUF_LEN];
3419
3420 env = getenv("ZPOOL_VDEV_NAME_PATH");
3421 if (env && (strtoul(env, NULL, 0) > 0 ||
3422 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3423 name_flags |= VDEV_NAME_PATH;
3424
3425 env = getenv("ZPOOL_VDEV_NAME_GUID");
3426 if (env && (strtoul(env, NULL, 0) > 0 ||
3427 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3428 name_flags |= VDEV_NAME_GUID;
3429
3430 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3431 if (env && (strtoul(env, NULL, 0) > 0 ||
3432 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3433 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3434
3435 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3436 name_flags & VDEV_NAME_GUID) {
3437 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3438 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3439 path = buf;
3440 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3441 #if defined(__sun__) || defined(__sun)
3442 /*
3443 * Live VDEV path updates to a kernel VDEV during a
3444 * zpool_vdev_name lookup are not supported on Linux.
3445 */
3446 char *devid;
3447 vdev_stat_t *vs;
3448 uint_t vsc;
3449
3450 /*
3451 * If the device is dead (faulted, offline, etc) then don't
3452 * bother opening it. Otherwise we may be forcing the user to
3453 * open a misbehaving device, which can have undesirable
3454 * effects.
3455 */
3456 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3457 (uint64_t **)&vs, &vsc) != 0 ||
3458 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3459 zhp != NULL &&
3460 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3461 /*
3462 * Determine if the current path is correct.
3463 */
3464 char *newdevid = path_to_devid(path);
3465
3466 if (newdevid == NULL ||
3467 strcmp(devid, newdevid) != 0) {
3468 char *newpath;
3469
3470 if ((newpath = devid_to_path(devid)) != NULL) {
3471 /*
3472 * Update the path appropriately.
3473 */
3474 set_path(zhp, nv, newpath);
3475 if (nvlist_add_string(nv,
3476 ZPOOL_CONFIG_PATH, newpath) == 0)
3477 verify(nvlist_lookup_string(nv,
3478 ZPOOL_CONFIG_PATH,
3479 &path) == 0);
3480 free(newpath);
3481 }
3482 }
3483
3484 if (newdevid)
3485 devid_str_free(newdevid);
3486 }
3487 #endif /* sun */
3488
3489 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3490 char *rp = realpath(path, NULL);
3491 if (rp) {
3492 strlcpy(buf, rp, sizeof (buf));
3493 path = buf;
3494 free(rp);
3495 }
3496 }
3497
3498 /*
3499 * For a block device only use the name.
3500 */
3501 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3502 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3503 !(name_flags & VDEV_NAME_PATH)) {
3504 path = strrchr(path, '/');
3505 path++;
3506 }
3507
3508 /*
3509 * Remove the partition from the path it this is a whole disk.
3510 */
3511 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3512 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3513 return (zfs_strip_partition(path));
3514 }
3515 } else {
3516 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3517
3518 /*
3519 * If it's a raidz device, we need to stick in the parity level.
3520 */
3521 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3522 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3523 &value) == 0);
3524 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3525 (u_longlong_t)value);
3526 path = buf;
3527 }
3528
3529 /*
3530 * We identify each top-level vdev by using a <type-id>
3531 * naming convention.
3532 */
3533 if (name_flags & VDEV_NAME_TYPE_ID) {
3534 uint64_t id;
3535 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3536 &id) == 0);
3537 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3538 path, (u_longlong_t)id);
3539 path = tmpbuf;
3540 }
3541 }
3542
3543 return (zfs_strdup(hdl, path));
3544 }
3545
3546 static int
3547 zbookmark_mem_compare(const void *a, const void *b)
3548 {
3549 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3550 }
3551
3552 /*
3553 * Retrieve the persistent error log, uniquify the members, and return to the
3554 * caller.
3555 */
3556 int
3557 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3558 {
3559 zfs_cmd_t zc = {"\0"};
3560 uint64_t count;
3561 zbookmark_phys_t *zb = NULL;
3562 int i;
3563
3564 /*
3565 * Retrieve the raw error list from the kernel. If the number of errors
3566 * has increased, allocate more space and continue until we get the
3567 * entire list.
3568 */
3569 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3570 &count) == 0);
3571 if (count == 0)
3572 return (0);
3573 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3574 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3575 return (-1);
3576 zc.zc_nvlist_dst_size = count;
3577 (void) strcpy(zc.zc_name, zhp->zpool_name);
3578 for (;;) {
3579 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3580 &zc) != 0) {
3581 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3582 if (errno == ENOMEM) {
3583 void *dst;
3584
3585 count = zc.zc_nvlist_dst_size;
3586 dst = zfs_alloc(zhp->zpool_hdl, count *
3587 sizeof (zbookmark_phys_t));
3588 if (dst == NULL)
3589 return (-1);
3590 zc.zc_nvlist_dst = (uintptr_t)dst;
3591 } else {
3592 return (-1);
3593 }
3594 } else {
3595 break;
3596 }
3597 }
3598
3599 /*
3600 * Sort the resulting bookmarks. This is a little confusing due to the
3601 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3602 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3603 * _not_ copied as part of the process. So we point the start of our
3604 * array appropriate and decrement the total number of elements.
3605 */
3606 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3607 zc.zc_nvlist_dst_size;
3608 count -= zc.zc_nvlist_dst_size;
3609
3610 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3611
3612 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3613
3614 /*
3615 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3616 */
3617 for (i = 0; i < count; i++) {
3618 nvlist_t *nv;
3619
3620 /* ignoring zb_blkid and zb_level for now */
3621 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3622 zb[i-1].zb_object == zb[i].zb_object)
3623 continue;
3624
3625 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3626 goto nomem;
3627 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3628 zb[i].zb_objset) != 0) {
3629 nvlist_free(nv);
3630 goto nomem;
3631 }
3632 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3633 zb[i].zb_object) != 0) {
3634 nvlist_free(nv);
3635 goto nomem;
3636 }
3637 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3638 nvlist_free(nv);
3639 goto nomem;
3640 }
3641 nvlist_free(nv);
3642 }
3643
3644 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3645 return (0);
3646
3647 nomem:
3648 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3649 return (no_memory(zhp->zpool_hdl));
3650 }
3651
3652 /*
3653 * Upgrade a ZFS pool to the latest on-disk version.
3654 */
3655 int
3656 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3657 {
3658 zfs_cmd_t zc = {"\0"};
3659 libzfs_handle_t *hdl = zhp->zpool_hdl;
3660
3661 (void) strcpy(zc.zc_name, zhp->zpool_name);
3662 zc.zc_cookie = new_version;
3663
3664 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3665 return (zpool_standard_error_fmt(hdl, errno,
3666 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3667 zhp->zpool_name));
3668 return (0);
3669 }
3670
3671 void
3672 zfs_save_arguments(int argc, char **argv, char *string, int len)
3673 {
3674 int i;
3675
3676 (void) strlcpy(string, basename(argv[0]), len);
3677 for (i = 1; i < argc; i++) {
3678 (void) strlcat(string, " ", len);
3679 (void) strlcat(string, argv[i], len);
3680 }
3681 }
3682
3683 int
3684 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3685 {
3686 zfs_cmd_t zc = {"\0"};
3687 nvlist_t *args;
3688 int err;
3689
3690 args = fnvlist_alloc();
3691 fnvlist_add_string(args, "message", message);
3692 err = zcmd_write_src_nvlist(hdl, &zc, args);
3693 if (err == 0)
3694 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3695 nvlist_free(args);
3696 zcmd_free_nvlists(&zc);
3697 return (err);
3698 }
3699
3700 /*
3701 * Perform ioctl to get some command history of a pool.
3702 *
3703 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3704 * logical offset of the history buffer to start reading from.
3705 *
3706 * Upon return, 'off' is the next logical offset to read from and
3707 * 'len' is the actual amount of bytes read into 'buf'.
3708 */
3709 static int
3710 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3711 {
3712 zfs_cmd_t zc = {"\0"};
3713 libzfs_handle_t *hdl = zhp->zpool_hdl;
3714
3715 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3716
3717 zc.zc_history = (uint64_t)(uintptr_t)buf;
3718 zc.zc_history_len = *len;
3719 zc.zc_history_offset = *off;
3720
3721 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3722 switch (errno) {
3723 case EPERM:
3724 return (zfs_error_fmt(hdl, EZFS_PERM,
3725 dgettext(TEXT_DOMAIN,
3726 "cannot show history for pool '%s'"),
3727 zhp->zpool_name));
3728 case ENOENT:
3729 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3730 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3731 "'%s'"), zhp->zpool_name));
3732 case ENOTSUP:
3733 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3734 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3735 "'%s', pool must be upgraded"), zhp->zpool_name));
3736 default:
3737 return (zpool_standard_error_fmt(hdl, errno,
3738 dgettext(TEXT_DOMAIN,
3739 "cannot get history for '%s'"), zhp->zpool_name));
3740 }
3741 }
3742
3743 *len = zc.zc_history_len;
3744 *off = zc.zc_history_offset;
3745
3746 return (0);
3747 }
3748
3749 /*
3750 * Process the buffer of nvlists, unpacking and storing each nvlist record
3751 * into 'records'. 'leftover' is set to the number of bytes that weren't
3752 * processed as there wasn't a complete record.
3753 */
3754 int
3755 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3756 nvlist_t ***records, uint_t *numrecords)
3757 {
3758 uint64_t reclen;
3759 nvlist_t *nv;
3760 int i;
3761 void *tmp;
3762
3763 while (bytes_read > sizeof (reclen)) {
3764
3765 /* get length of packed record (stored as little endian) */
3766 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3767 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3768
3769 if (bytes_read < sizeof (reclen) + reclen)
3770 break;
3771
3772 /* unpack record */
3773 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3774 return (ENOMEM);
3775 bytes_read -= sizeof (reclen) + reclen;
3776 buf += sizeof (reclen) + reclen;
3777
3778 /* add record to nvlist array */
3779 (*numrecords)++;
3780 if (ISP2(*numrecords + 1)) {
3781 tmp = realloc(*records,
3782 *numrecords * 2 * sizeof (nvlist_t *));
3783 if (tmp == NULL) {
3784 nvlist_free(nv);
3785 (*numrecords)--;
3786 return (ENOMEM);
3787 }
3788 *records = tmp;
3789 }
3790 (*records)[*numrecords - 1] = nv;
3791 }
3792
3793 *leftover = bytes_read;
3794 return (0);
3795 }
3796
3797 /*
3798 * Retrieve the command history of a pool.
3799 */
3800 int
3801 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3802 {
3803 char *buf;
3804 int buflen = 128 * 1024;
3805 uint64_t off = 0;
3806 nvlist_t **records = NULL;
3807 uint_t numrecords = 0;
3808 int err, i;
3809
3810 buf = malloc(buflen);
3811 if (buf == NULL)
3812 return (ENOMEM);
3813 do {
3814 uint64_t bytes_read = buflen;
3815 uint64_t leftover;
3816
3817 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3818 break;
3819
3820 /* if nothing else was read in, we're at EOF, just return */
3821 if (!bytes_read)
3822 break;
3823
3824 if ((err = zpool_history_unpack(buf, bytes_read,
3825 &leftover, &records, &numrecords)) != 0)
3826 break;
3827 off -= leftover;
3828 if (leftover == bytes_read) {
3829 /*
3830 * no progress made, because buffer is not big enough
3831 * to hold this record; resize and retry.
3832 */
3833 buflen *= 2;
3834 free(buf);
3835 buf = malloc(buflen);
3836 if (buf == NULL)
3837 return (ENOMEM);
3838 }
3839
3840 /* CONSTCOND */
3841 } while (1);
3842
3843 free(buf);
3844
3845 if (!err) {
3846 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3847 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3848 records, numrecords) == 0);
3849 }
3850 for (i = 0; i < numrecords; i++)
3851 nvlist_free(records[i]);
3852 free(records);
3853
3854 return (err);
3855 }
3856
3857 /*
3858 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3859 * If there is a new event available 'nvp' will contain a newly allocated
3860 * nvlist and 'dropped' will be set to the number of missed events since
3861 * the last call to this function. When 'nvp' is set to NULL it indicates
3862 * no new events are available. In either case the function returns 0 and
3863 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3864 * function will return a non-zero value. When the function is called in
3865 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3866 * it will not return until a new event is available.
3867 */
3868 int
3869 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3870 int *dropped, unsigned flags, int zevent_fd)
3871 {
3872 zfs_cmd_t zc = {"\0"};
3873 int error = 0;
3874
3875 *nvp = NULL;
3876 *dropped = 0;
3877 zc.zc_cleanup_fd = zevent_fd;
3878
3879 if (flags & ZEVENT_NONBLOCK)
3880 zc.zc_guid = ZEVENT_NONBLOCK;
3881
3882 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3883 return (-1);
3884
3885 retry:
3886 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3887 switch (errno) {
3888 case ESHUTDOWN:
3889 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3890 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3891 goto out;
3892 case ENOENT:
3893 /* Blocking error case should not occur */
3894 if (!(flags & ZEVENT_NONBLOCK))
3895 error = zpool_standard_error_fmt(hdl, errno,
3896 dgettext(TEXT_DOMAIN, "cannot get event"));
3897
3898 goto out;
3899 case ENOMEM:
3900 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3901 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3902 dgettext(TEXT_DOMAIN, "cannot get event"));
3903 goto out;
3904 } else {
3905 goto retry;
3906 }
3907 default:
3908 error = zpool_standard_error_fmt(hdl, errno,
3909 dgettext(TEXT_DOMAIN, "cannot get event"));
3910 goto out;
3911 }
3912 }
3913
3914 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3915 if (error != 0)
3916 goto out;
3917
3918 *dropped = (int)zc.zc_cookie;
3919 out:
3920 zcmd_free_nvlists(&zc);
3921
3922 return (error);
3923 }
3924
3925 /*
3926 * Clear all events.
3927 */
3928 int
3929 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3930 {
3931 zfs_cmd_t zc = {"\0"};
3932 char msg[1024];
3933
3934 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3935 "cannot clear events"));
3936
3937 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3938 return (zpool_standard_error_fmt(hdl, errno, msg));
3939
3940 if (count != NULL)
3941 *count = (int)zc.zc_cookie; /* # of events cleared */
3942
3943 return (0);
3944 }
3945
3946 /*
3947 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3948 * the passed zevent_fd file handle. On success zero is returned,
3949 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3950 */
3951 int
3952 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3953 {
3954 zfs_cmd_t zc = {"\0"};
3955 int error = 0;
3956
3957 zc.zc_guid = eid;
3958 zc.zc_cleanup_fd = zevent_fd;
3959
3960 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3961 switch (errno) {
3962 case ENOENT:
3963 error = zfs_error_fmt(hdl, EZFS_NOENT,
3964 dgettext(TEXT_DOMAIN, "cannot get event"));
3965 break;
3966
3967 case ENOMEM:
3968 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3969 dgettext(TEXT_DOMAIN, "cannot get event"));
3970 break;
3971
3972 default:
3973 error = zpool_standard_error_fmt(hdl, errno,
3974 dgettext(TEXT_DOMAIN, "cannot get event"));
3975 break;
3976 }
3977 }
3978
3979 return (error);
3980 }
3981
3982 void
3983 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3984 char *pathname, size_t len)
3985 {
3986 zfs_cmd_t zc = {"\0"};
3987 boolean_t mounted = B_FALSE;
3988 char *mntpnt = NULL;
3989 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3990
3991 if (dsobj == 0) {
3992 /* special case for the MOS */
3993 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
3994 (longlong_t)obj);
3995 return;
3996 }
3997
3998 /* get the dataset's name */
3999 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4000 zc.zc_obj = dsobj;
4001 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4002 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4003 /* just write out a path of two object numbers */
4004 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4005 (longlong_t)dsobj, (longlong_t)obj);
4006 return;
4007 }
4008 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4009
4010 /* find out if the dataset is mounted */
4011 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4012
4013 /* get the corrupted object's path */
4014 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4015 zc.zc_obj = obj;
4016 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4017 &zc) == 0) {
4018 if (mounted) {
4019 (void) snprintf(pathname, len, "%s%s", mntpnt,
4020 zc.zc_value);
4021 } else {
4022 (void) snprintf(pathname, len, "%s:%s",
4023 dsname, zc.zc_value);
4024 }
4025 } else {
4026 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4027 (longlong_t)obj);
4028 }
4029 free(mntpnt);
4030 }
4031
4032 /*
4033 * Read the EFI label from the config, if a label does not exist then
4034 * pass back the error to the caller. If the caller has passed a non-NULL
4035 * diskaddr argument then we set it to the starting address of the EFI
4036 * partition.
4037 */
4038 static int
4039 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4040 {
4041 char *path;
4042 int fd;
4043 char diskname[MAXPATHLEN];
4044 int err = -1;
4045
4046 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4047 return (err);
4048
4049 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4050 strrchr(path, '/'));
4051 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4052 struct dk_gpt *vtoc;
4053
4054 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4055 if (sb != NULL)
4056 *sb = vtoc->efi_parts[0].p_start;
4057 efi_free(vtoc);
4058 }
4059 (void) close(fd);
4060 }
4061 return (err);
4062 }
4063
4064 /*
4065 * determine where a partition starts on a disk in the current
4066 * configuration
4067 */
4068 static diskaddr_t
4069 find_start_block(nvlist_t *config)
4070 {
4071 nvlist_t **child;
4072 uint_t c, children;
4073 diskaddr_t sb = MAXOFFSET_T;
4074 uint64_t wholedisk;
4075
4076 if (nvlist_lookup_nvlist_array(config,
4077 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4078 if (nvlist_lookup_uint64(config,
4079 ZPOOL_CONFIG_WHOLE_DISK,
4080 &wholedisk) != 0 || !wholedisk) {
4081 return (MAXOFFSET_T);
4082 }
4083 if (read_efi_label(config, &sb) < 0)
4084 sb = MAXOFFSET_T;
4085 return (sb);
4086 }
4087
4088 for (c = 0; c < children; c++) {
4089 sb = find_start_block(child[c]);
4090 if (sb != MAXOFFSET_T) {
4091 return (sb);
4092 }
4093 }
4094 return (MAXOFFSET_T);
4095 }
4096
4097 static int
4098 zpool_label_disk_check(char *path)
4099 {
4100 struct dk_gpt *vtoc;
4101 int fd, err;
4102
4103 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4104 return (errno);
4105
4106 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4107 (void) close(fd);
4108 return (err);
4109 }
4110
4111 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4112 efi_free(vtoc);
4113 (void) close(fd);
4114 return (EIDRM);
4115 }
4116
4117 efi_free(vtoc);
4118 (void) close(fd);
4119 return (0);
4120 }
4121
4122 /*
4123 * Generate a unique partition name for the ZFS member. Partitions must
4124 * have unique names to ensure udev will be able to create symlinks under
4125 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4126 * of the form <pool>-<unique-id>.
4127 */
4128 static void
4129 zpool_label_name(char *label_name, int label_size)
4130 {
4131 uint64_t id = 0;
4132 int fd;
4133
4134 fd = open("/dev/urandom", O_RDONLY);
4135 if (fd >= 0) {
4136 if (read(fd, &id, sizeof (id)) != sizeof (id))
4137 id = 0;
4138
4139 close(fd);
4140 }
4141
4142 if (id == 0)
4143 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4144
4145 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4146 }
4147
4148 /*
4149 * Label an individual disk. The name provided is the short name,
4150 * stripped of any leading /dev path.
4151 */
4152 int
4153 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4154 {
4155 char path[MAXPATHLEN];
4156 struct dk_gpt *vtoc;
4157 int rval, fd;
4158 size_t resv = EFI_MIN_RESV_SIZE;
4159 uint64_t slice_size;
4160 diskaddr_t start_block;
4161 char errbuf[1024];
4162
4163 /* prepare an error message just in case */
4164 (void) snprintf(errbuf, sizeof (errbuf),
4165 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4166
4167 if (zhp) {
4168 nvlist_t *nvroot;
4169
4170 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4171 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4172
4173 if (zhp->zpool_start_block == 0)
4174 start_block = find_start_block(nvroot);
4175 else
4176 start_block = zhp->zpool_start_block;
4177 zhp->zpool_start_block = start_block;
4178 } else {
4179 /* new pool */
4180 start_block = NEW_START_BLOCK;
4181 }
4182
4183 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4184
4185 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4186 /*
4187 * This shouldn't happen. We've long since verified that this
4188 * is a valid device.
4189 */
4190 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4191 "label '%s': unable to open device: %d"), path, errno);
4192 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4193 }
4194
4195 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4196 /*
4197 * The only way this can fail is if we run out of memory, or we
4198 * were unable to read the disk's capacity
4199 */
4200 if (errno == ENOMEM)
4201 (void) no_memory(hdl);
4202
4203 (void) close(fd);
4204 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4205 "label '%s': unable to read disk capacity"), path);
4206
4207 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4208 }
4209
4210 slice_size = vtoc->efi_last_u_lba + 1;
4211 slice_size -= EFI_MIN_RESV_SIZE;
4212 if (start_block == MAXOFFSET_T)
4213 start_block = NEW_START_BLOCK;
4214 slice_size -= start_block;
4215 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4216
4217 vtoc->efi_parts[0].p_start = start_block;
4218 vtoc->efi_parts[0].p_size = slice_size;
4219
4220 /*
4221 * Why we use V_USR: V_BACKUP confuses users, and is considered
4222 * disposable by some EFI utilities (since EFI doesn't have a backup
4223 * slice). V_UNASSIGNED is supposed to be used only for zero size
4224 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4225 * etc. were all pretty specific. V_USR is as close to reality as we
4226 * can get, in the absence of V_OTHER.
4227 */
4228 vtoc->efi_parts[0].p_tag = V_USR;
4229 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4230
4231 vtoc->efi_parts[8].p_start = slice_size + start_block;
4232 vtoc->efi_parts[8].p_size = resv;
4233 vtoc->efi_parts[8].p_tag = V_RESERVED;
4234
4235 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4236 /*
4237 * Some block drivers (like pcata) may not support EFI
4238 * GPT labels. Print out a helpful error message dir-
4239 * ecting the user to manually label the disk and give
4240 * a specific slice.
4241 */
4242 (void) close(fd);
4243 efi_free(vtoc);
4244
4245 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4246 "parted(8) and then provide a specific slice: %d"), rval);
4247 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4248 }
4249
4250 (void) close(fd);
4251 efi_free(vtoc);
4252
4253 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4254 (void) zfs_append_partition(path, MAXPATHLEN);
4255
4256 /* Wait to udev to signal use the device has settled. */
4257 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4258 if (rval) {
4259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4260 "detect device partitions on '%s': %d"), path, rval);
4261 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4262 }
4263
4264 /* We can't be to paranoid. Read the label back and verify it. */
4265 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4266 rval = zpool_label_disk_check(path);
4267 if (rval) {
4268 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4269 "EFI label on '%s' is damaged. Ensure\nthis device "
4270 "is not in in use, and is functioning properly: %d"),
4271 path, rval);
4272 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4273 }
4274
4275 return (0);
4276 }
4277
4278 /*
4279 * Allocate and return the underlying device name for a device mapper device.
4280 * If a device mapper device maps to multiple devices, return the first device.
4281 *
4282 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4283 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4284 *
4285 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4286 * device then return NULL.
4287 *
4288 * NOTE: The returned name string must be *freed*.
4289 */
4290 char *
4291 dm_get_underlying_path(char *dm_name)
4292 {
4293 DIR *dp = NULL;
4294 struct dirent *ep;
4295 char *realp;
4296 char *tmp = NULL;
4297 char *path = NULL;
4298 char *dev_str;
4299 int size;
4300
4301 if (dm_name == NULL)
4302 return (NULL);
4303
4304 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4305 realp = realpath(dm_name, NULL);
4306 if (realp == NULL)
4307 return (NULL);
4308
4309 /*
4310 * If they preface 'dev' with a path (like "/dev") then strip it off.
4311 * We just want the 'dm-N' part.
4312 */
4313 tmp = strrchr(realp, '/');
4314 if (tmp != NULL)
4315 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4316 else
4317 dev_str = tmp;
4318
4319 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4320 if (size == -1 || !tmp)
4321 goto end;
4322
4323 dp = opendir(tmp);
4324 if (dp == NULL)
4325 goto end;
4326
4327 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4328 while ((ep = readdir(dp))) {
4329 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4330 size = asprintf(&path, "/dev/%s", ep->d_name);
4331 break;
4332 }
4333 }
4334
4335 end:
4336 if (dp != NULL)
4337 closedir(dp);
4338 free(tmp);
4339 free(realp);
4340 return (path);
4341 }
4342
4343 /*
4344 * Return 1 if device is a device mapper or multipath device.
4345 * Return 0 if not.
4346 */
4347 int
4348 zfs_dev_is_dm(char *dev_name)
4349 {
4350
4351 char *tmp;
4352 tmp = dm_get_underlying_path(dev_name);
4353 if (tmp == NULL)
4354 return (0);
4355
4356 free(tmp);
4357 return (1);
4358 }
4359
4360 /*
4361 * Lookup the underlying device for a device name
4362 *
4363 * Often you'll have a symlink to a device, a partition device,
4364 * or a multipath device, and want to look up the underlying device.
4365 * This function returns the underlying device name. If the device
4366 * name is already the underlying device, then just return the same
4367 * name. If the device is a DM device with multiple underlying devices
4368 * then return the first one.
4369 *
4370 * For example:
4371 *
4372 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4373 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4374 * returns: /dev/sda
4375 *
4376 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4377 * dev_name: /dev/mapper/mpatha
4378 * returns: /dev/sda (first device)
4379 *
4380 * 3. /dev/sda (already the underlying device)
4381 * dev_name: /dev/sda
4382 * returns: /dev/sda
4383 *
4384 * 4. /dev/dm-3 (mapped to /dev/sda)
4385 * dev_name: /dev/dm-3
4386 * returns: /dev/sda
4387 *
4388 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4389 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4390 * returns: /dev/sdb
4391 *
4392 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4393 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4394 * returns: /dev/sda
4395 *
4396 * Returns underlying device name, or NULL on error or no match.
4397 *
4398 * NOTE: The returned name string must be *freed*.
4399 */
4400 char *
4401 zfs_get_underlying_path(char *dev_name)
4402 {
4403 char *name = NULL;
4404 char *tmp;
4405
4406 if (dev_name == NULL)
4407 return (NULL);
4408
4409 tmp = dm_get_underlying_path(dev_name);
4410
4411 /* dev_name not a DM device, so just un-symlinkize it */
4412 if (tmp == NULL)
4413 tmp = realpath(dev_name, NULL);
4414
4415 if (tmp != NULL) {
4416 name = zfs_strip_partition_path(tmp);
4417 free(tmp);
4418 }
4419
4420 return (name);
4421 }
4422
4423 /*
4424 * Given a dev name like "sda", return the full enclosure sysfs path to
4425 * the disk. You can also pass in the name with "/dev" prepended
4426 * to it (like /dev/sda).
4427 *
4428 * For example, disk "sda" in enclosure slot 1:
4429 * dev: "sda"
4430 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4431 *
4432 * 'dev' must be a non-devicemapper device.
4433 *
4434 * Returned string must be freed.
4435 */
4436 char *
4437 zfs_get_enclosure_sysfs_path(char *dev_name)
4438 {
4439 DIR *dp = NULL;
4440 struct dirent *ep;
4441 char buf[MAXPATHLEN];
4442 char *tmp1 = NULL;
4443 char *tmp2 = NULL;
4444 char *tmp3 = NULL;
4445 char *path = NULL;
4446 size_t size;
4447 int tmpsize;
4448
4449 if (dev_name == NULL)
4450 return (NULL);
4451
4452 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4453 tmp1 = strrchr(dev_name, '/');
4454 if (tmp1 != NULL)
4455 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4456
4457 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4458 if (tmpsize == -1 || tmp1 == NULL) {
4459 tmp1 = NULL;
4460 goto end;
4461 }
4462
4463 dp = opendir(tmp1);
4464 if (dp == NULL) {
4465 tmp1 = NULL; /* To make free() at the end a NOP */
4466 goto end;
4467 }
4468
4469 /*
4470 * Look though all sysfs entries in /sys/block/<dev>/device for
4471 * the enclosure symlink.
4472 */
4473 while ((ep = readdir(dp))) {
4474 /* Ignore everything that's not our enclosure_device link */
4475 if (strstr(ep->d_name, "enclosure_device") == NULL)
4476 continue;
4477
4478 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4479 tmp2 == NULL)
4480 break;
4481
4482 size = readlink(tmp2, buf, sizeof (buf));
4483
4484 /* Did readlink fail or crop the link name? */
4485 if (size == -1 || size >= sizeof (buf)) {
4486 free(tmp2);
4487 tmp2 = NULL; /* To make free() at the end a NOP */
4488 break;
4489 }
4490
4491 /*
4492 * We got a valid link. readlink() doesn't terminate strings
4493 * so we have to do it.
4494 */
4495 buf[size] = '\0';
4496
4497 /*
4498 * Our link will look like:
4499 *
4500 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4501 *
4502 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4503 */
4504 tmp3 = strstr(buf, "enclosure");
4505 if (tmp3 == NULL)
4506 break;
4507
4508 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4509 /* If asprintf() fails, 'path' is undefined */
4510 path = NULL;
4511 break;
4512 }
4513
4514 if (path == NULL)
4515 break;
4516 }
4517
4518 end:
4519 free(tmp2);
4520 free(tmp1);
4521
4522 if (dp != NULL)
4523 closedir(dp);
4524
4525 return (path);
4526 }