]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
OpenZFS 6931 - lib/libzfs: cleanup gcc warnings
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 */
28
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <zone.h>
40 #include <sys/stat.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53
54 typedef struct prop_flags {
55 int create:1; /* Validate property on creation */
56 int import:1; /* Validate property on import */
57 } prop_flags_t;
58
59 /*
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
63 */
64
65 static int
66 zpool_get_all_props(zpool_handle_t *zhp)
67 {
68 zfs_cmd_t zc = {"\0"};
69 libzfs_handle_t *hdl = zhp->zpool_hdl;
70
71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72
73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 return (-1);
75
76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 if (errno == ENOMEM) {
78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 zcmd_free_nvlists(&zc);
80 return (-1);
81 }
82 } else {
83 zcmd_free_nvlists(&zc);
84 return (-1);
85 }
86 }
87
88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 zcmd_free_nvlists(&zc);
90 return (-1);
91 }
92
93 zcmd_free_nvlists(&zc);
94
95 return (0);
96 }
97
98 static int
99 zpool_props_refresh(zpool_handle_t *zhp)
100 {
101 nvlist_t *old_props;
102
103 old_props = zhp->zpool_props;
104
105 if (zpool_get_all_props(zhp) != 0)
106 return (-1);
107
108 nvlist_free(old_props);
109 return (0);
110 }
111
112 static char *
113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 zprop_source_t *src)
115 {
116 nvlist_t *nv, *nvl;
117 uint64_t ival;
118 char *value;
119 zprop_source_t source;
120
121 nvl = zhp->zpool_props;
122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 source = ival;
125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 } else {
127 source = ZPROP_SRC_DEFAULT;
128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 value = "-";
130 }
131
132 if (src)
133 *src = source;
134
135 return (value);
136 }
137
138 uint64_t
139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 {
141 nvlist_t *nv, *nvl;
142 uint64_t value;
143 zprop_source_t source;
144
145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 /*
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
150 */
151 if ((prop == ZPOOL_PROP_GUID) &&
152 (nvlist_lookup_nvlist(zhp->zpool_config,
153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 == 0)) {
156 return (value);
157 }
158 return (zpool_prop_default_numeric(prop));
159 }
160
161 nvl = zhp->zpool_props;
162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 source = value;
165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 } else {
167 source = ZPROP_SRC_DEFAULT;
168 value = zpool_prop_default_numeric(prop);
169 }
170
171 if (src)
172 *src = source;
173
174 return (value);
175 }
176
177 /*
178 * Map VDEV STATE to printed strings.
179 */
180 char *
181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 {
183 switch (state) {
184 case VDEV_STATE_CLOSED:
185 case VDEV_STATE_OFFLINE:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN:
190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 return (gettext("FAULTED"));
192 else if (aux == VDEV_AUX_SPLIT_POOL)
193 return (gettext("SPLIT"));
194 else
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY:
201 return (gettext("ONLINE"));
202
203 default:
204 break;
205 }
206
207 return (gettext("UNKNOWN"));
208 }
209
210 /*
211 * Map POOL STATE to printed strings.
212 */
213 const char *
214 zpool_pool_state_to_name(pool_state_t state)
215 {
216 switch (state) {
217 default:
218 break;
219 case POOL_STATE_ACTIVE:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE:
234 return (gettext("POTENTIALLY_ACTIVE"));
235 }
236
237 return (gettext("UNKNOWN"));
238 }
239
240 /*
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
243 */
244 int
245 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
246 size_t len, zprop_source_t *srctype, boolean_t literal)
247 {
248 uint64_t intval;
249 const char *strval;
250 zprop_source_t src = ZPROP_SRC_NONE;
251 nvlist_t *nvroot;
252 vdev_stat_t *vs;
253 uint_t vsc;
254
255 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 switch (prop) {
257 case ZPOOL_PROP_NAME:
258 (void) strlcpy(buf, zpool_get_name(zhp), len);
259 break;
260
261 case ZPOOL_PROP_HEALTH:
262 (void) strlcpy(buf, "FAULTED", len);
263 break;
264
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
268 break;
269
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
275 (void) strlcpy(buf,
276 zpool_get_prop_string(zhp, prop, &src),
277 len);
278 break;
279 }
280 /* FALLTHROUGH */
281 default:
282 (void) strlcpy(buf, "-", len);
283 break;
284 }
285
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
293 return (-1);
294
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 len);
299 break;
300
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
303
304 switch (prop) {
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
310 case ZPOOL_PROP_ASHIFT:
311 if (literal)
312 (void) snprintf(buf, len, "%llu",
313 (u_longlong_t)intval);
314 else
315 (void) zfs_nicenum(intval, buf, len);
316 break;
317
318 case ZPOOL_PROP_EXPANDSZ:
319 if (intval == 0) {
320 (void) strlcpy(buf, "-", len);
321 } else if (literal) {
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 } else {
325 (void) zfs_nicenum(intval, buf, len);
326 }
327 break;
328
329 case ZPOOL_PROP_CAPACITY:
330 if (literal) {
331 (void) snprintf(buf, len, "%llu",
332 (u_longlong_t)intval);
333 } else {
334 (void) snprintf(buf, len, "%llu%%",
335 (u_longlong_t)intval);
336 }
337 break;
338
339 case ZPOOL_PROP_FRAGMENTATION:
340 if (intval == UINT64_MAX) {
341 (void) strlcpy(buf, "-", len);
342 } else if (literal) {
343 (void) snprintf(buf, len, "%llu",
344 (u_longlong_t)intval);
345 } else {
346 (void) snprintf(buf, len, "%llu%%",
347 (u_longlong_t)intval);
348 }
349 break;
350
351 case ZPOOL_PROP_DEDUPRATIO:
352 if (literal)
353 (void) snprintf(buf, len, "%llu.%02llu",
354 (u_longlong_t)(intval / 100),
355 (u_longlong_t)(intval % 100));
356 else
357 (void) snprintf(buf, len, "%llu.%02llux",
358 (u_longlong_t)(intval / 100),
359 (u_longlong_t)(intval % 100));
360 break;
361
362 case ZPOOL_PROP_HEALTH:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
364 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot,
366 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
367 == 0);
368
369 (void) strlcpy(buf, zpool_state_to_name(intval,
370 vs->vs_aux), len);
371 break;
372 case ZPOOL_PROP_VERSION:
373 if (intval >= SPA_VERSION_FEATURES) {
374 (void) snprintf(buf, len, "-");
375 break;
376 }
377 /* FALLTHROUGH */
378 default:
379 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
380 }
381 break;
382
383 case PROP_TYPE_INDEX:
384 intval = zpool_get_prop_int(zhp, prop, &src);
385 if (zpool_prop_index_to_string(prop, intval, &strval)
386 != 0)
387 return (-1);
388 (void) strlcpy(buf, strval, len);
389 break;
390
391 default:
392 abort();
393 }
394
395 if (srctype)
396 *srctype = src;
397
398 return (0);
399 }
400
401 /*
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
404 */
405 static boolean_t
406 bootfs_name_valid(const char *pool, char *bootfs)
407 {
408 int len = strlen(pool);
409
410 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
411 return (B_FALSE);
412
413 if (strncmp(pool, bootfs, len) == 0 &&
414 (bootfs[len] == '/' || bootfs[len] == '\0'))
415 return (B_TRUE);
416
417 return (B_FALSE);
418 }
419
420 boolean_t
421 zpool_is_bootable(zpool_handle_t *zhp)
422 {
423 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
424
425 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
426 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
427 sizeof (bootfs)) != 0);
428 }
429
430
431 /*
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
435 */
436 static nvlist_t *
437 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
438 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
439 {
440 nvpair_t *elem;
441 nvlist_t *retprops;
442 zpool_prop_t prop;
443 char *strval;
444 uint64_t intval;
445 char *slash, *check;
446 struct stat64 statbuf;
447 zpool_handle_t *zhp;
448
449 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
450 (void) no_memory(hdl);
451 return (NULL);
452 }
453
454 elem = NULL;
455 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
456 const char *propname = nvpair_name(elem);
457
458 prop = zpool_name_to_prop(propname);
459 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
460 int err;
461 char *fname = strchr(propname, '@') + 1;
462
463 err = zfeature_lookup_name(fname, NULL);
464 if (err != 0) {
465 ASSERT3U(err, ==, ENOENT);
466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
467 "invalid feature '%s'"), fname);
468 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
469 goto error;
470 }
471
472 if (nvpair_type(elem) != DATA_TYPE_STRING) {
473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
474 "'%s' must be a string"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
476 goto error;
477 }
478
479 (void) nvpair_value_string(elem, &strval);
480 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
481 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486 goto error;
487 }
488
489 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
490 (void) no_memory(hdl);
491 goto error;
492 }
493 continue;
494 }
495
496 /*
497 * Make sure this property is valid and applies to this type.
498 */
499 if (prop == ZPROP_INVAL) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "invalid property '%s'"), propname);
502 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503 goto error;
504 }
505
506 if (zpool_prop_readonly(prop)) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
508 "is readonly"), propname);
509 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
510 goto error;
511 }
512
513 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
514 &strval, &intval, errbuf) != 0)
515 goto error;
516
517 /*
518 * Perform additional checking for specific properties.
519 */
520 switch (prop) {
521 case ZPOOL_PROP_VERSION:
522 if (intval < version ||
523 !SPA_VERSION_IS_SUPPORTED(intval)) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' number %d is invalid."),
526 propname, intval);
527 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
528 goto error;
529 }
530 break;
531
532 case ZPOOL_PROP_ASHIFT:
533 if (!flags.create) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' can only be set at "
536 "creation time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 goto error;
539 }
540
541 if (intval != 0 && (intval < 9 || intval > 13)) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "property '%s' number %d is invalid."),
544 propname, intval);
545 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
546 goto error;
547 }
548 break;
549
550 case ZPOOL_PROP_BOOTFS:
551 if (flags.create || flags.import) {
552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
553 "property '%s' cannot be set at creation "
554 "or import time"), propname);
555 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
556 goto error;
557 }
558
559 if (version < SPA_VERSION_BOOTFS) {
560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561 "pool must be upgraded to support "
562 "'%s' property"), propname);
563 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
564 goto error;
565 }
566
567 /*
568 * bootfs property value has to be a dataset name and
569 * the dataset has to be in the same pool as it sets to.
570 */
571 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
572 strval)) {
573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
574 "is an invalid name"), strval);
575 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
576 goto error;
577 }
578
579 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "could not open pool '%s'"), poolname);
582 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
583 goto error;
584 }
585 zpool_close(zhp);
586 break;
587
588 case ZPOOL_PROP_ALTROOT:
589 if (!flags.create && !flags.import) {
590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 "property '%s' can only be set during pool "
592 "creation or import"), propname);
593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
594 goto error;
595 }
596
597 if (strval[0] != '/') {
598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
599 "bad alternate root '%s'"), strval);
600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
601 goto error;
602 }
603 break;
604
605 case ZPOOL_PROP_CACHEFILE:
606 if (strval[0] == '\0')
607 break;
608
609 if (strcmp(strval, "none") == 0)
610 break;
611
612 if (strval[0] != '/') {
613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
614 "property '%s' must be empty, an "
615 "absolute path, or 'none'"), propname);
616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
617 goto error;
618 }
619
620 slash = strrchr(strval, '/');
621
622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
623 strcmp(slash, "/..") == 0) {
624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
625 "'%s' is not a valid file"), strval);
626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
627 goto error;
628 }
629
630 *slash = '\0';
631
632 if (strval[0] != '\0' &&
633 (stat64(strval, &statbuf) != 0 ||
634 !S_ISDIR(statbuf.st_mode))) {
635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
636 "'%s' is not a valid directory"),
637 strval);
638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
639 goto error;
640 }
641
642 *slash = '/';
643 break;
644
645 case ZPOOL_PROP_COMMENT:
646 for (check = strval; *check != '\0'; check++) {
647 if (!isprint(*check)) {
648 zfs_error_aux(hdl,
649 dgettext(TEXT_DOMAIN,
650 "comment may only have printable "
651 "characters"));
652 (void) zfs_error(hdl, EZFS_BADPROP,
653 errbuf);
654 goto error;
655 }
656 }
657 if (strlen(strval) > ZPROP_MAX_COMMENT) {
658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
659 "comment must not exceed %d characters"),
660 ZPROP_MAX_COMMENT);
661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
662 goto error;
663 }
664 break;
665 case ZPOOL_PROP_READONLY:
666 if (!flags.import) {
667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 "property '%s' can only be set at "
669 "import time"), propname);
670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 goto error;
672 }
673 break;
674 case ZPOOL_PROP_TNAME:
675 if (!flags.create) {
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "property '%s' can only be set at "
678 "creation time"), propname);
679 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
680 goto error;
681 }
682
683 default:
684 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
685 "property '%s'(%d) not defined"), propname, prop);
686 break;
687 }
688 }
689
690 return (retprops);
691 error:
692 nvlist_free(retprops);
693 return (NULL);
694 }
695
696 /*
697 * Set zpool property : propname=propval.
698 */
699 int
700 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
701 {
702 zfs_cmd_t zc = {"\0"};
703 int ret = -1;
704 char errbuf[1024];
705 nvlist_t *nvl = NULL;
706 nvlist_t *realprops;
707 uint64_t version;
708 prop_flags_t flags = { 0 };
709
710 (void) snprintf(errbuf, sizeof (errbuf),
711 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
712 zhp->zpool_name);
713
714 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
715 return (no_memory(zhp->zpool_hdl));
716
717 if (nvlist_add_string(nvl, propname, propval) != 0) {
718 nvlist_free(nvl);
719 return (no_memory(zhp->zpool_hdl));
720 }
721
722 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
723 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
724 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
725 nvlist_free(nvl);
726 return (-1);
727 }
728
729 nvlist_free(nvl);
730 nvl = realprops;
731
732 /*
733 * Execute the corresponding ioctl() to set this property.
734 */
735 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
736
737 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
738 nvlist_free(nvl);
739 return (-1);
740 }
741
742 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
743
744 zcmd_free_nvlists(&zc);
745 nvlist_free(nvl);
746
747 if (ret)
748 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
749 else
750 (void) zpool_props_refresh(zhp);
751
752 return (ret);
753 }
754
755 int
756 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
757 {
758 libzfs_handle_t *hdl = zhp->zpool_hdl;
759 zprop_list_t *entry;
760 char buf[ZFS_MAXPROPLEN];
761 nvlist_t *features = NULL;
762 nvpair_t *nvp;
763 zprop_list_t **last;
764 boolean_t firstexpand = (NULL == *plp);
765 int i;
766
767 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
768 return (-1);
769
770 last = plp;
771 while (*last != NULL)
772 last = &(*last)->pl_next;
773
774 if ((*plp)->pl_all)
775 features = zpool_get_features(zhp);
776
777 if ((*plp)->pl_all && firstexpand) {
778 for (i = 0; i < SPA_FEATURES; i++) {
779 zprop_list_t *entry = zfs_alloc(hdl,
780 sizeof (zprop_list_t));
781 entry->pl_prop = ZPROP_INVAL;
782 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
783 spa_feature_table[i].fi_uname);
784 entry->pl_width = strlen(entry->pl_user_prop);
785 entry->pl_all = B_TRUE;
786
787 *last = entry;
788 last = &entry->pl_next;
789 }
790 }
791
792 /* add any unsupported features */
793 for (nvp = nvlist_next_nvpair(features, NULL);
794 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
795 char *propname;
796 boolean_t found;
797 zprop_list_t *entry;
798
799 if (zfeature_is_supported(nvpair_name(nvp)))
800 continue;
801
802 propname = zfs_asprintf(hdl, "unsupported@%s",
803 nvpair_name(nvp));
804
805 /*
806 * Before adding the property to the list make sure that no
807 * other pool already added the same property.
808 */
809 found = B_FALSE;
810 entry = *plp;
811 while (entry != NULL) {
812 if (entry->pl_user_prop != NULL &&
813 strcmp(propname, entry->pl_user_prop) == 0) {
814 found = B_TRUE;
815 break;
816 }
817 entry = entry->pl_next;
818 }
819 if (found) {
820 free(propname);
821 continue;
822 }
823
824 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
825 entry->pl_prop = ZPROP_INVAL;
826 entry->pl_user_prop = propname;
827 entry->pl_width = strlen(entry->pl_user_prop);
828 entry->pl_all = B_TRUE;
829
830 *last = entry;
831 last = &entry->pl_next;
832 }
833
834 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
835
836 if (entry->pl_fixed)
837 continue;
838
839 if (entry->pl_prop != ZPROP_INVAL &&
840 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
841 NULL, B_FALSE) == 0) {
842 if (strlen(buf) > entry->pl_width)
843 entry->pl_width = strlen(buf);
844 }
845 }
846
847 return (0);
848 }
849
850 /*
851 * Get the state for the given feature on the given ZFS pool.
852 */
853 int
854 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
855 size_t len)
856 {
857 uint64_t refcount;
858 boolean_t found = B_FALSE;
859 nvlist_t *features = zpool_get_features(zhp);
860 boolean_t supported;
861 const char *feature = strchr(propname, '@') + 1;
862
863 supported = zpool_prop_feature(propname);
864 ASSERT(supported || zpool_prop_unsupported(propname));
865
866 /*
867 * Convert from feature name to feature guid. This conversion is
868 * unnecessary for unsupported@... properties because they already
869 * use guids.
870 */
871 if (supported) {
872 int ret;
873 spa_feature_t fid;
874
875 ret = zfeature_lookup_name(feature, &fid);
876 if (ret != 0) {
877 (void) strlcpy(buf, "-", len);
878 return (ENOTSUP);
879 }
880 feature = spa_feature_table[fid].fi_guid;
881 }
882
883 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
884 found = B_TRUE;
885
886 if (supported) {
887 if (!found) {
888 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
889 } else {
890 if (refcount == 0)
891 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
892 else
893 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
894 }
895 } else {
896 if (found) {
897 if (refcount == 0) {
898 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
899 } else {
900 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
901 }
902 } else {
903 (void) strlcpy(buf, "-", len);
904 return (ENOTSUP);
905 }
906 }
907
908 return (0);
909 }
910
911 /*
912 * Don't start the slice at the default block of 34; many storage
913 * devices will use a stripe width of 128k, other vendors prefer a 1m
914 * alignment. It is best to play it safe and ensure a 1m alignment
915 * given 512B blocks. When the block size is larger by a power of 2
916 * we will still be 1m aligned. Some devices are sensitive to the
917 * partition ending alignment as well.
918 */
919 #define NEW_START_BLOCK 2048
920 #define PARTITION_END_ALIGNMENT 2048
921
922 /*
923 * Validate the given pool name, optionally putting an extended error message in
924 * 'buf'.
925 */
926 boolean_t
927 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
928 {
929 namecheck_err_t why;
930 char what;
931 int ret;
932
933 ret = pool_namecheck(pool, &why, &what);
934
935 /*
936 * The rules for reserved pool names were extended at a later point.
937 * But we need to support users with existing pools that may now be
938 * invalid. So we only check for this expanded set of names during a
939 * create (or import), and only in userland.
940 */
941 if (ret == 0 && !isopen &&
942 (strncmp(pool, "mirror", 6) == 0 ||
943 strncmp(pool, "raidz", 5) == 0 ||
944 strncmp(pool, "spare", 5) == 0 ||
945 strcmp(pool, "log") == 0)) {
946 if (hdl != NULL)
947 zfs_error_aux(hdl,
948 dgettext(TEXT_DOMAIN, "name is reserved"));
949 return (B_FALSE);
950 }
951
952
953 if (ret != 0) {
954 if (hdl != NULL) {
955 switch (why) {
956 case NAME_ERR_TOOLONG:
957 zfs_error_aux(hdl,
958 dgettext(TEXT_DOMAIN, "name is too long"));
959 break;
960
961 case NAME_ERR_INVALCHAR:
962 zfs_error_aux(hdl,
963 dgettext(TEXT_DOMAIN, "invalid character "
964 "'%c' in pool name"), what);
965 break;
966
967 case NAME_ERR_NOLETTER:
968 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
969 "name must begin with a letter"));
970 break;
971
972 case NAME_ERR_RESERVED:
973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
974 "name is reserved"));
975 break;
976
977 case NAME_ERR_DISKLIKE:
978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
979 "pool name is reserved"));
980 break;
981
982 case NAME_ERR_LEADING_SLASH:
983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
984 "leading slash in name"));
985 break;
986
987 case NAME_ERR_EMPTY_COMPONENT:
988 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
989 "empty component in name"));
990 break;
991
992 case NAME_ERR_TRAILING_SLASH:
993 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
994 "trailing slash in name"));
995 break;
996
997 case NAME_ERR_MULTIPLE_DELIMITERS:
998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 "multiple '@' and/or '#' delimiters in "
1000 "name"));
1001 break;
1002 case NAME_ERR_NO_AT:
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "permission set is missing '@'"));
1005
1006 default:
1007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1008 "(%d) not defined"), why);
1009 break;
1010 }
1011 }
1012 return (B_FALSE);
1013 }
1014
1015 return (B_TRUE);
1016 }
1017
1018 /*
1019 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1020 * state.
1021 */
1022 zpool_handle_t *
1023 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1024 {
1025 zpool_handle_t *zhp;
1026 boolean_t missing;
1027
1028 /*
1029 * Make sure the pool name is valid.
1030 */
1031 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1032 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1033 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1034 pool);
1035 return (NULL);
1036 }
1037
1038 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1039 return (NULL);
1040
1041 zhp->zpool_hdl = hdl;
1042 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1043
1044 if (zpool_refresh_stats(zhp, &missing) != 0) {
1045 zpool_close(zhp);
1046 return (NULL);
1047 }
1048
1049 if (missing) {
1050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1051 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1052 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1053 zpool_close(zhp);
1054 return (NULL);
1055 }
1056
1057 return (zhp);
1058 }
1059
1060 /*
1061 * Like the above, but silent on error. Used when iterating over pools (because
1062 * the configuration cache may be out of date).
1063 */
1064 int
1065 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1066 {
1067 zpool_handle_t *zhp;
1068 boolean_t missing;
1069
1070 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1071 return (-1);
1072
1073 zhp->zpool_hdl = hdl;
1074 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1075
1076 if (zpool_refresh_stats(zhp, &missing) != 0) {
1077 zpool_close(zhp);
1078 return (-1);
1079 }
1080
1081 if (missing) {
1082 zpool_close(zhp);
1083 *ret = NULL;
1084 return (0);
1085 }
1086
1087 *ret = zhp;
1088 return (0);
1089 }
1090
1091 /*
1092 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1093 * state.
1094 */
1095 zpool_handle_t *
1096 zpool_open(libzfs_handle_t *hdl, const char *pool)
1097 {
1098 zpool_handle_t *zhp;
1099
1100 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1101 return (NULL);
1102
1103 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1104 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1105 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1106 zpool_close(zhp);
1107 return (NULL);
1108 }
1109
1110 return (zhp);
1111 }
1112
1113 /*
1114 * Close the handle. Simply frees the memory associated with the handle.
1115 */
1116 void
1117 zpool_close(zpool_handle_t *zhp)
1118 {
1119 nvlist_free(zhp->zpool_config);
1120 nvlist_free(zhp->zpool_old_config);
1121 nvlist_free(zhp->zpool_props);
1122 free(zhp);
1123 }
1124
1125 /*
1126 * Return the name of the pool.
1127 */
1128 const char *
1129 zpool_get_name(zpool_handle_t *zhp)
1130 {
1131 return (zhp->zpool_name);
1132 }
1133
1134
1135 /*
1136 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1137 */
1138 int
1139 zpool_get_state(zpool_handle_t *zhp)
1140 {
1141 return (zhp->zpool_state);
1142 }
1143
1144 /*
1145 * Create the named pool, using the provided vdev list. It is assumed
1146 * that the consumer has already validated the contents of the nvlist, so we
1147 * don't have to worry about error semantics.
1148 */
1149 int
1150 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1151 nvlist_t *props, nvlist_t *fsprops)
1152 {
1153 zfs_cmd_t zc = {"\0"};
1154 nvlist_t *zc_fsprops = NULL;
1155 nvlist_t *zc_props = NULL;
1156 char msg[1024];
1157 int ret = -1;
1158
1159 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1160 "cannot create '%s'"), pool);
1161
1162 if (!zpool_name_valid(hdl, B_FALSE, pool))
1163 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1164
1165 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1166 return (-1);
1167
1168 if (props) {
1169 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1170
1171 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1172 SPA_VERSION_1, flags, msg)) == NULL) {
1173 goto create_failed;
1174 }
1175 }
1176
1177 if (fsprops) {
1178 uint64_t zoned;
1179 char *zonestr;
1180
1181 zoned = ((nvlist_lookup_string(fsprops,
1182 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1183 strcmp(zonestr, "on") == 0);
1184
1185 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1186 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1187 goto create_failed;
1188 }
1189 if (!zc_props &&
1190 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1191 goto create_failed;
1192 }
1193 if (nvlist_add_nvlist(zc_props,
1194 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1195 goto create_failed;
1196 }
1197 }
1198
1199 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1200 goto create_failed;
1201
1202 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1203
1204 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1205
1206 zcmd_free_nvlists(&zc);
1207 nvlist_free(zc_props);
1208 nvlist_free(zc_fsprops);
1209
1210 switch (errno) {
1211 case EBUSY:
1212 /*
1213 * This can happen if the user has specified the same
1214 * device multiple times. We can't reliably detect this
1215 * until we try to add it and see we already have a
1216 * label. This can also happen under if the device is
1217 * part of an active md or lvm device.
1218 */
1219 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1220 "one or more vdevs refer to the same device, or "
1221 "one of\nthe devices is part of an active md or "
1222 "lvm device"));
1223 return (zfs_error(hdl, EZFS_BADDEV, msg));
1224
1225 case ERANGE:
1226 /*
1227 * This happens if the record size is smaller or larger
1228 * than the allowed size range, or not a power of 2.
1229 *
1230 * NOTE: although zfs_valid_proplist is called earlier,
1231 * this case may have slipped through since the
1232 * pool does not exist yet and it is therefore
1233 * impossible to read properties e.g. max blocksize
1234 * from the pool.
1235 */
1236 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1237 "record size invalid"));
1238 return (zfs_error(hdl, EZFS_BADPROP, msg));
1239
1240 case EOVERFLOW:
1241 /*
1242 * This occurs when one of the devices is below
1243 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1244 * device was the problem device since there's no
1245 * reliable way to determine device size from userland.
1246 */
1247 {
1248 char buf[64];
1249
1250 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1251
1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1253 "one or more devices is less than the "
1254 "minimum size (%s)"), buf);
1255 }
1256 return (zfs_error(hdl, EZFS_BADDEV, msg));
1257
1258 case ENOSPC:
1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 "one or more devices is out of space"));
1261 return (zfs_error(hdl, EZFS_BADDEV, msg));
1262
1263 case ENOTBLK:
1264 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1265 "cache device must be a disk or disk slice"));
1266 return (zfs_error(hdl, EZFS_BADDEV, msg));
1267
1268 default:
1269 return (zpool_standard_error(hdl, errno, msg));
1270 }
1271 }
1272
1273 create_failed:
1274 zcmd_free_nvlists(&zc);
1275 nvlist_free(zc_props);
1276 nvlist_free(zc_fsprops);
1277 return (ret);
1278 }
1279
1280 /*
1281 * Destroy the given pool. It is up to the caller to ensure that there are no
1282 * datasets left in the pool.
1283 */
1284 int
1285 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1286 {
1287 zfs_cmd_t zc = {"\0"};
1288 zfs_handle_t *zfp = NULL;
1289 libzfs_handle_t *hdl = zhp->zpool_hdl;
1290 char msg[1024];
1291
1292 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1293 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1294 return (-1);
1295
1296 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1297 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1298
1299 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1300 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1301 "cannot destroy '%s'"), zhp->zpool_name);
1302
1303 if (errno == EROFS) {
1304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1305 "one or more devices is read only"));
1306 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1307 } else {
1308 (void) zpool_standard_error(hdl, errno, msg);
1309 }
1310
1311 if (zfp)
1312 zfs_close(zfp);
1313 return (-1);
1314 }
1315
1316 if (zfp) {
1317 remove_mountpoint(zfp);
1318 zfs_close(zfp);
1319 }
1320
1321 return (0);
1322 }
1323
1324 /*
1325 * Add the given vdevs to the pool. The caller must have already performed the
1326 * necessary verification to ensure that the vdev specification is well-formed.
1327 */
1328 int
1329 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1330 {
1331 zfs_cmd_t zc = {"\0"};
1332 int ret;
1333 libzfs_handle_t *hdl = zhp->zpool_hdl;
1334 char msg[1024];
1335 nvlist_t **spares, **l2cache;
1336 uint_t nspares, nl2cache;
1337
1338 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1339 "cannot add to '%s'"), zhp->zpool_name);
1340
1341 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1342 SPA_VERSION_SPARES &&
1343 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1344 &spares, &nspares) == 0) {
1345 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1346 "upgraded to add hot spares"));
1347 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1348 }
1349
1350 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1351 SPA_VERSION_L2CACHE &&
1352 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1353 &l2cache, &nl2cache) == 0) {
1354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1355 "upgraded to add cache devices"));
1356 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1357 }
1358
1359 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1360 return (-1);
1361 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1362
1363 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1364 switch (errno) {
1365 case EBUSY:
1366 /*
1367 * This can happen if the user has specified the same
1368 * device multiple times. We can't reliably detect this
1369 * until we try to add it and see we already have a
1370 * label.
1371 */
1372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1373 "one or more vdevs refer to the same device"));
1374 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1375 break;
1376
1377 case EOVERFLOW:
1378 /*
1379 * This occurrs when one of the devices is below
1380 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1381 * device was the problem device since there's no
1382 * reliable way to determine device size from userland.
1383 */
1384 {
1385 char buf[64];
1386
1387 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1388
1389 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1390 "device is less than the minimum "
1391 "size (%s)"), buf);
1392 }
1393 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1394 break;
1395
1396 case ENOTSUP:
1397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1398 "pool must be upgraded to add these vdevs"));
1399 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1400 break;
1401
1402 case ENOTBLK:
1403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1404 "cache device must be a disk or disk slice"));
1405 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1406 break;
1407
1408 default:
1409 (void) zpool_standard_error(hdl, errno, msg);
1410 }
1411
1412 ret = -1;
1413 } else {
1414 ret = 0;
1415 }
1416
1417 zcmd_free_nvlists(&zc);
1418
1419 return (ret);
1420 }
1421
1422 /*
1423 * Exports the pool from the system. The caller must ensure that there are no
1424 * mounted datasets in the pool.
1425 */
1426 static int
1427 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1428 const char *log_str)
1429 {
1430 zfs_cmd_t zc = {"\0"};
1431 char msg[1024];
1432
1433 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1434 "cannot export '%s'"), zhp->zpool_name);
1435
1436 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1437 zc.zc_cookie = force;
1438 zc.zc_guid = hardforce;
1439 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1440
1441 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1442 switch (errno) {
1443 case EXDEV:
1444 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1445 "use '-f' to override the following errors:\n"
1446 "'%s' has an active shared spare which could be"
1447 " used by other pools once '%s' is exported."),
1448 zhp->zpool_name, zhp->zpool_name);
1449 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1450 msg));
1451 default:
1452 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1453 msg));
1454 }
1455 }
1456
1457 return (0);
1458 }
1459
1460 int
1461 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1462 {
1463 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1464 }
1465
1466 int
1467 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1468 {
1469 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1470 }
1471
1472 static void
1473 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1474 nvlist_t *config)
1475 {
1476 nvlist_t *nv = NULL;
1477 uint64_t rewindto;
1478 int64_t loss = -1;
1479 struct tm t;
1480 char timestr[128];
1481
1482 if (!hdl->libzfs_printerr || config == NULL)
1483 return;
1484
1485 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1486 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1487 return;
1488 }
1489
1490 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1491 return;
1492 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1493
1494 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1495 strftime(timestr, 128, "%c", &t) != 0) {
1496 if (dryrun) {
1497 (void) printf(dgettext(TEXT_DOMAIN,
1498 "Would be able to return %s "
1499 "to its state as of %s.\n"),
1500 name, timestr);
1501 } else {
1502 (void) printf(dgettext(TEXT_DOMAIN,
1503 "Pool %s returned to its state as of %s.\n"),
1504 name, timestr);
1505 }
1506 if (loss > 120) {
1507 (void) printf(dgettext(TEXT_DOMAIN,
1508 "%s approximately %lld "),
1509 dryrun ? "Would discard" : "Discarded",
1510 ((longlong_t)loss + 30) / 60);
1511 (void) printf(dgettext(TEXT_DOMAIN,
1512 "minutes of transactions.\n"));
1513 } else if (loss > 0) {
1514 (void) printf(dgettext(TEXT_DOMAIN,
1515 "%s approximately %lld "),
1516 dryrun ? "Would discard" : "Discarded",
1517 (longlong_t)loss);
1518 (void) printf(dgettext(TEXT_DOMAIN,
1519 "seconds of transactions.\n"));
1520 }
1521 }
1522 }
1523
1524 void
1525 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1526 nvlist_t *config)
1527 {
1528 nvlist_t *nv = NULL;
1529 int64_t loss = -1;
1530 uint64_t edata = UINT64_MAX;
1531 uint64_t rewindto;
1532 struct tm t;
1533 char timestr[128];
1534
1535 if (!hdl->libzfs_printerr)
1536 return;
1537
1538 if (reason >= 0)
1539 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1540 else
1541 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1542
1543 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1544 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1545 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1546 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1547 goto no_info;
1548
1549 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1550 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1551 &edata);
1552
1553 (void) printf(dgettext(TEXT_DOMAIN,
1554 "Recovery is possible, but will result in some data loss.\n"));
1555
1556 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1557 strftime(timestr, 128, "%c", &t) != 0) {
1558 (void) printf(dgettext(TEXT_DOMAIN,
1559 "\tReturning the pool to its state as of %s\n"
1560 "\tshould correct the problem. "),
1561 timestr);
1562 } else {
1563 (void) printf(dgettext(TEXT_DOMAIN,
1564 "\tReverting the pool to an earlier state "
1565 "should correct the problem.\n\t"));
1566 }
1567
1568 if (loss > 120) {
1569 (void) printf(dgettext(TEXT_DOMAIN,
1570 "Approximately %lld minutes of data\n"
1571 "\tmust be discarded, irreversibly. "),
1572 ((longlong_t)loss + 30) / 60);
1573 } else if (loss > 0) {
1574 (void) printf(dgettext(TEXT_DOMAIN,
1575 "Approximately %lld seconds of data\n"
1576 "\tmust be discarded, irreversibly. "),
1577 (longlong_t)loss);
1578 }
1579 if (edata != 0 && edata != UINT64_MAX) {
1580 if (edata == 1) {
1581 (void) printf(dgettext(TEXT_DOMAIN,
1582 "After rewind, at least\n"
1583 "\tone persistent user-data error will remain. "));
1584 } else {
1585 (void) printf(dgettext(TEXT_DOMAIN,
1586 "After rewind, several\n"
1587 "\tpersistent user-data errors will remain. "));
1588 }
1589 }
1590 (void) printf(dgettext(TEXT_DOMAIN,
1591 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1592 reason >= 0 ? "clear" : "import", name);
1593
1594 (void) printf(dgettext(TEXT_DOMAIN,
1595 "A scrub of the pool\n"
1596 "\tis strongly recommended after recovery.\n"));
1597 return;
1598
1599 no_info:
1600 (void) printf(dgettext(TEXT_DOMAIN,
1601 "Destroy and re-create the pool from\n\ta backup source.\n"));
1602 }
1603
1604 /*
1605 * zpool_import() is a contracted interface. Should be kept the same
1606 * if possible.
1607 *
1608 * Applications should use zpool_import_props() to import a pool with
1609 * new properties value to be set.
1610 */
1611 int
1612 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1613 char *altroot)
1614 {
1615 nvlist_t *props = NULL;
1616 int ret;
1617
1618 if (altroot != NULL) {
1619 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1620 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1621 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1622 newname));
1623 }
1624
1625 if (nvlist_add_string(props,
1626 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1627 nvlist_add_string(props,
1628 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1629 nvlist_free(props);
1630 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1631 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1632 newname));
1633 }
1634 }
1635
1636 ret = zpool_import_props(hdl, config, newname, props,
1637 ZFS_IMPORT_NORMAL);
1638 nvlist_free(props);
1639 return (ret);
1640 }
1641
1642 static void
1643 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1644 int indent)
1645 {
1646 nvlist_t **child;
1647 uint_t c, children;
1648 char *vname;
1649 uint64_t is_log = 0;
1650
1651 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1652 &is_log);
1653
1654 if (name != NULL)
1655 (void) printf("\t%*s%s%s\n", indent, "", name,
1656 is_log ? " [log]" : "");
1657
1658 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1659 &child, &children) != 0)
1660 return;
1661
1662 for (c = 0; c < children; c++) {
1663 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1664 print_vdev_tree(hdl, vname, child[c], indent + 2);
1665 free(vname);
1666 }
1667 }
1668
1669 void
1670 zpool_print_unsup_feat(nvlist_t *config)
1671 {
1672 nvlist_t *nvinfo, *unsup_feat;
1673 nvpair_t *nvp;
1674
1675 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1676 0);
1677 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1678 &unsup_feat) == 0);
1679
1680 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1681 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1682 char *desc;
1683
1684 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1685 verify(nvpair_value_string(nvp, &desc) == 0);
1686
1687 if (strlen(desc) > 0)
1688 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1689 else
1690 (void) printf("\t%s\n", nvpair_name(nvp));
1691 }
1692 }
1693
1694 /*
1695 * Import the given pool using the known configuration and a list of
1696 * properties to be set. The configuration should have come from
1697 * zpool_find_import(). The 'newname' parameters control whether the pool
1698 * is imported with a different name.
1699 */
1700 int
1701 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1702 nvlist_t *props, int flags)
1703 {
1704 zfs_cmd_t zc = {"\0"};
1705 zpool_rewind_policy_t policy;
1706 nvlist_t *nv = NULL;
1707 nvlist_t *nvinfo = NULL;
1708 nvlist_t *missing = NULL;
1709 char *thename;
1710 char *origname;
1711 int ret;
1712 int error = 0;
1713 char errbuf[1024];
1714
1715 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1716 &origname) == 0);
1717
1718 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1719 "cannot import pool '%s'"), origname);
1720
1721 if (newname != NULL) {
1722 if (!zpool_name_valid(hdl, B_FALSE, newname))
1723 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1724 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1725 newname));
1726 thename = (char *)newname;
1727 } else {
1728 thename = origname;
1729 }
1730
1731 if (props != NULL) {
1732 uint64_t version;
1733 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1734
1735 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1736 &version) == 0);
1737
1738 if ((props = zpool_valid_proplist(hdl, origname,
1739 props, version, flags, errbuf)) == NULL)
1740 return (-1);
1741 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1742 nvlist_free(props);
1743 return (-1);
1744 }
1745 nvlist_free(props);
1746 }
1747
1748 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1749
1750 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1751 &zc.zc_guid) == 0);
1752
1753 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1754 zcmd_free_nvlists(&zc);
1755 return (-1);
1756 }
1757 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1758 zcmd_free_nvlists(&zc);
1759 return (-1);
1760 }
1761
1762 zc.zc_cookie = flags;
1763 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1764 errno == ENOMEM) {
1765 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1766 zcmd_free_nvlists(&zc);
1767 return (-1);
1768 }
1769 }
1770 if (ret != 0)
1771 error = errno;
1772
1773 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1774
1775 zcmd_free_nvlists(&zc);
1776
1777 zpool_get_rewind_policy(config, &policy);
1778
1779 if (error) {
1780 char desc[1024];
1781
1782 /*
1783 * Dry-run failed, but we print out what success
1784 * looks like if we found a best txg
1785 */
1786 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1787 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1788 B_TRUE, nv);
1789 nvlist_free(nv);
1790 return (-1);
1791 }
1792
1793 if (newname == NULL)
1794 (void) snprintf(desc, sizeof (desc),
1795 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1796 thename);
1797 else
1798 (void) snprintf(desc, sizeof (desc),
1799 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1800 origname, thename);
1801
1802 switch (error) {
1803 case ENOTSUP:
1804 if (nv != NULL && nvlist_lookup_nvlist(nv,
1805 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1806 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1807 (void) printf(dgettext(TEXT_DOMAIN, "This "
1808 "pool uses the following feature(s) not "
1809 "supported by this system:\n"));
1810 zpool_print_unsup_feat(nv);
1811 if (nvlist_exists(nvinfo,
1812 ZPOOL_CONFIG_CAN_RDONLY)) {
1813 (void) printf(dgettext(TEXT_DOMAIN,
1814 "All unsupported features are only "
1815 "required for writing to the pool."
1816 "\nThe pool can be imported using "
1817 "'-o readonly=on'.\n"));
1818 }
1819 }
1820 /*
1821 * Unsupported version.
1822 */
1823 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1824 break;
1825
1826 case EINVAL:
1827 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1828 break;
1829
1830 case EROFS:
1831 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1832 "one or more devices is read only"));
1833 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1834 break;
1835
1836 case ENXIO:
1837 if (nv && nvlist_lookup_nvlist(nv,
1838 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1839 nvlist_lookup_nvlist(nvinfo,
1840 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1841 (void) printf(dgettext(TEXT_DOMAIN,
1842 "The devices below are missing, use "
1843 "'-m' to import the pool anyway:\n"));
1844 print_vdev_tree(hdl, NULL, missing, 2);
1845 (void) printf("\n");
1846 }
1847 (void) zpool_standard_error(hdl, error, desc);
1848 break;
1849
1850 case EEXIST:
1851 (void) zpool_standard_error(hdl, error, desc);
1852 break;
1853
1854 case EBUSY:
1855 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1856 "one or more devices are already in use\n"));
1857 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1858 break;
1859 case ENAMETOOLONG:
1860 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1861 "new name of at least one dataset is longer than "
1862 "the maximum allowable length"));
1863 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1864 break;
1865 default:
1866 (void) zpool_standard_error(hdl, error, desc);
1867 zpool_explain_recover(hdl,
1868 newname ? origname : thename, -error, nv);
1869 break;
1870 }
1871
1872 nvlist_free(nv);
1873 ret = -1;
1874 } else {
1875 zpool_handle_t *zhp;
1876
1877 /*
1878 * This should never fail, but play it safe anyway.
1879 */
1880 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1881 ret = -1;
1882 else if (zhp != NULL)
1883 zpool_close(zhp);
1884 if (policy.zrp_request &
1885 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1886 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1887 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1888 }
1889 nvlist_free(nv);
1890 return (0);
1891 }
1892
1893 return (ret);
1894 }
1895
1896 /*
1897 * Scan the pool.
1898 */
1899 int
1900 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1901 {
1902 zfs_cmd_t zc = {"\0"};
1903 char msg[1024];
1904 libzfs_handle_t *hdl = zhp->zpool_hdl;
1905
1906 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1907 zc.zc_cookie = func;
1908
1909 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1910 (errno == ENOENT && func != POOL_SCAN_NONE))
1911 return (0);
1912
1913 if (func == POOL_SCAN_SCRUB) {
1914 (void) snprintf(msg, sizeof (msg),
1915 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1916 } else if (func == POOL_SCAN_NONE) {
1917 (void) snprintf(msg, sizeof (msg),
1918 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1919 zc.zc_name);
1920 } else {
1921 assert(!"unexpected result");
1922 }
1923
1924 if (errno == EBUSY) {
1925 nvlist_t *nvroot;
1926 pool_scan_stat_t *ps = NULL;
1927 uint_t psc;
1928
1929 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1930 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1931 (void) nvlist_lookup_uint64_array(nvroot,
1932 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1933 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1934 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1935 else
1936 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1937 } else if (errno == ENOENT) {
1938 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1939 } else {
1940 return (zpool_standard_error(hdl, errno, msg));
1941 }
1942 }
1943
1944 /*
1945 * Find a vdev that matches the search criteria specified. We use the
1946 * the nvpair name to determine how we should look for the device.
1947 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1948 * spare; but FALSE if its an INUSE spare.
1949 */
1950 static nvlist_t *
1951 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1952 boolean_t *l2cache, boolean_t *log)
1953 {
1954 uint_t c, children;
1955 nvlist_t **child;
1956 nvlist_t *ret;
1957 uint64_t is_log;
1958 char *srchkey;
1959 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1960
1961 /* Nothing to look for */
1962 if (search == NULL || pair == NULL)
1963 return (NULL);
1964
1965 /* Obtain the key we will use to search */
1966 srchkey = nvpair_name(pair);
1967
1968 switch (nvpair_type(pair)) {
1969 case DATA_TYPE_UINT64:
1970 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1971 uint64_t srchval, theguid;
1972
1973 verify(nvpair_value_uint64(pair, &srchval) == 0);
1974 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1975 &theguid) == 0);
1976 if (theguid == srchval)
1977 return (nv);
1978 }
1979 break;
1980
1981 case DATA_TYPE_STRING: {
1982 char *srchval, *val;
1983
1984 verify(nvpair_value_string(pair, &srchval) == 0);
1985 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1986 break;
1987
1988 /*
1989 * Search for the requested value. Special cases:
1990 *
1991 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1992 * "-part1", or "p1". The suffix is hidden from the user,
1993 * but included in the string, so this matches around it.
1994 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1995 * is used to check all possible expanded paths.
1996 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1997 *
1998 * Otherwise, all other searches are simple string compares.
1999 */
2000 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2001 uint64_t wholedisk = 0;
2002
2003 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2004 &wholedisk);
2005 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2006 return (nv);
2007
2008 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2009 char *type, *idx, *end, *p;
2010 uint64_t id, vdev_id;
2011
2012 /*
2013 * Determine our vdev type, keeping in mind
2014 * that the srchval is composed of a type and
2015 * vdev id pair (i.e. mirror-4).
2016 */
2017 if ((type = strdup(srchval)) == NULL)
2018 return (NULL);
2019
2020 if ((p = strrchr(type, '-')) == NULL) {
2021 free(type);
2022 break;
2023 }
2024 idx = p + 1;
2025 *p = '\0';
2026
2027 /*
2028 * If the types don't match then keep looking.
2029 */
2030 if (strncmp(val, type, strlen(val)) != 0) {
2031 free(type);
2032 break;
2033 }
2034
2035 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2036 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2037 strncmp(type, VDEV_TYPE_MIRROR,
2038 strlen(VDEV_TYPE_MIRROR)) == 0);
2039 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2040 &id) == 0);
2041
2042 errno = 0;
2043 vdev_id = strtoull(idx, &end, 10);
2044
2045 free(type);
2046 if (errno != 0)
2047 return (NULL);
2048
2049 /*
2050 * Now verify that we have the correct vdev id.
2051 */
2052 if (vdev_id == id)
2053 return (nv);
2054 }
2055
2056 /*
2057 * Common case
2058 */
2059 if (strcmp(srchval, val) == 0)
2060 return (nv);
2061 break;
2062 }
2063
2064 default:
2065 break;
2066 }
2067
2068 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2069 &child, &children) != 0)
2070 return (NULL);
2071
2072 for (c = 0; c < children; c++) {
2073 if ((ret = vdev_to_nvlist_iter(child[c], search,
2074 avail_spare, l2cache, NULL)) != NULL) {
2075 /*
2076 * The 'is_log' value is only set for the toplevel
2077 * vdev, not the leaf vdevs. So we always lookup the
2078 * log device from the root of the vdev tree (where
2079 * 'log' is non-NULL).
2080 */
2081 if (log != NULL &&
2082 nvlist_lookup_uint64(child[c],
2083 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2084 is_log) {
2085 *log = B_TRUE;
2086 }
2087 return (ret);
2088 }
2089 }
2090
2091 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2092 &child, &children) == 0) {
2093 for (c = 0; c < children; c++) {
2094 if ((ret = vdev_to_nvlist_iter(child[c], search,
2095 avail_spare, l2cache, NULL)) != NULL) {
2096 *avail_spare = B_TRUE;
2097 return (ret);
2098 }
2099 }
2100 }
2101
2102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2103 &child, &children) == 0) {
2104 for (c = 0; c < children; c++) {
2105 if ((ret = vdev_to_nvlist_iter(child[c], search,
2106 avail_spare, l2cache, NULL)) != NULL) {
2107 *l2cache = B_TRUE;
2108 return (ret);
2109 }
2110 }
2111 }
2112
2113 return (NULL);
2114 }
2115
2116 /*
2117 * Given a physical path (minus the "/devices" prefix), find the
2118 * associated vdev.
2119 */
2120 nvlist_t *
2121 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2122 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2123 {
2124 nvlist_t *search, *nvroot, *ret;
2125
2126 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2127 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2128
2129 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2130 &nvroot) == 0);
2131
2132 *avail_spare = B_FALSE;
2133 *l2cache = B_FALSE;
2134 if (log != NULL)
2135 *log = B_FALSE;
2136 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2137 nvlist_free(search);
2138
2139 return (ret);
2140 }
2141
2142 /*
2143 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2144 */
2145 boolean_t
2146 zpool_vdev_is_interior(const char *name)
2147 {
2148 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2149 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2150 return (B_TRUE);
2151 return (B_FALSE);
2152 }
2153
2154 nvlist_t *
2155 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2156 boolean_t *l2cache, boolean_t *log)
2157 {
2158 char *end;
2159 nvlist_t *nvroot, *search, *ret;
2160 uint64_t guid;
2161
2162 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2163
2164 guid = strtoull(path, &end, 0);
2165 if (guid != 0 && *end == '\0') {
2166 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2167 } else if (zpool_vdev_is_interior(path)) {
2168 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2169 } else {
2170 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2171 }
2172
2173 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2174 &nvroot) == 0);
2175
2176 *avail_spare = B_FALSE;
2177 *l2cache = B_FALSE;
2178 if (log != NULL)
2179 *log = B_FALSE;
2180 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2181 nvlist_free(search);
2182
2183 return (ret);
2184 }
2185
2186 static int
2187 vdev_online(nvlist_t *nv)
2188 {
2189 uint64_t ival;
2190
2191 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2192 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2193 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2194 return (0);
2195
2196 return (1);
2197 }
2198
2199 /*
2200 * Helper function for zpool_get_physpaths().
2201 */
2202 static int
2203 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2204 size_t *bytes_written)
2205 {
2206 size_t bytes_left, pos, rsz;
2207 char *tmppath;
2208 const char *format;
2209
2210 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2211 &tmppath) != 0)
2212 return (EZFS_NODEVICE);
2213
2214 pos = *bytes_written;
2215 bytes_left = physpath_size - pos;
2216 format = (pos == 0) ? "%s" : " %s";
2217
2218 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2219 *bytes_written += rsz;
2220
2221 if (rsz >= bytes_left) {
2222 /* if physpath was not copied properly, clear it */
2223 if (bytes_left != 0) {
2224 physpath[pos] = 0;
2225 }
2226 return (EZFS_NOSPC);
2227 }
2228 return (0);
2229 }
2230
2231 static int
2232 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2233 size_t *rsz, boolean_t is_spare)
2234 {
2235 char *type;
2236 int ret;
2237
2238 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2239 return (EZFS_INVALCONFIG);
2240
2241 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2242 /*
2243 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2244 * For a spare vdev, we only want to boot from the active
2245 * spare device.
2246 */
2247 if (is_spare) {
2248 uint64_t spare = 0;
2249 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2250 &spare);
2251 if (!spare)
2252 return (EZFS_INVALCONFIG);
2253 }
2254
2255 if (vdev_online(nv)) {
2256 if ((ret = vdev_get_one_physpath(nv, physpath,
2257 phypath_size, rsz)) != 0)
2258 return (ret);
2259 }
2260 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2261 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2262 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2263 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2264 nvlist_t **child;
2265 uint_t count;
2266 int i, ret;
2267
2268 if (nvlist_lookup_nvlist_array(nv,
2269 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2270 return (EZFS_INVALCONFIG);
2271
2272 for (i = 0; i < count; i++) {
2273 ret = vdev_get_physpaths(child[i], physpath,
2274 phypath_size, rsz, is_spare);
2275 if (ret == EZFS_NOSPC)
2276 return (ret);
2277 }
2278 }
2279
2280 return (EZFS_POOL_INVALARG);
2281 }
2282
2283 /*
2284 * Get phys_path for a root pool config.
2285 * Return 0 on success; non-zero on failure.
2286 */
2287 static int
2288 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2289 {
2290 size_t rsz;
2291 nvlist_t *vdev_root;
2292 nvlist_t **child;
2293 uint_t count;
2294 char *type;
2295
2296 rsz = 0;
2297
2298 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2299 &vdev_root) != 0)
2300 return (EZFS_INVALCONFIG);
2301
2302 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2303 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2304 &child, &count) != 0)
2305 return (EZFS_INVALCONFIG);
2306
2307 /*
2308 * root pool can only have a single top-level vdev.
2309 */
2310 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2311 return (EZFS_POOL_INVALARG);
2312
2313 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2314 B_FALSE);
2315
2316 /* No online devices */
2317 if (rsz == 0)
2318 return (EZFS_NODEVICE);
2319
2320 return (0);
2321 }
2322
2323 /*
2324 * Get phys_path for a root pool
2325 * Return 0 on success; non-zero on failure.
2326 */
2327 int
2328 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2329 {
2330 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2331 phypath_size));
2332 }
2333
2334 /*
2335 * If the device has being dynamically expanded then we need to relabel
2336 * the disk to use the new unallocated space.
2337 */
2338 static int
2339 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2340 {
2341 int fd, error;
2342
2343 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2344 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2345 "relabel '%s': unable to open device: %d"), path, errno);
2346 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2347 }
2348
2349 /*
2350 * It's possible that we might encounter an error if the device
2351 * does not have any unallocated space left. If so, we simply
2352 * ignore that error and continue on.
2353 *
2354 * Also, we don't call efi_rescan() - that would just return EBUSY.
2355 * The module will do it for us in vdev_disk_open().
2356 */
2357 error = efi_use_whole_disk(fd);
2358 (void) close(fd);
2359 if (error && error != VT_ENOSPC) {
2360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2361 "relabel '%s': unable to read disk capacity"), path);
2362 return (zfs_error(hdl, EZFS_NOCAP, msg));
2363 }
2364 return (0);
2365 }
2366
2367 /*
2368 * Bring the specified vdev online. The 'flags' parameter is a set of the
2369 * ZFS_ONLINE_* flags.
2370 */
2371 int
2372 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2373 vdev_state_t *newstate)
2374 {
2375 zfs_cmd_t zc = {"\0"};
2376 char msg[1024];
2377 nvlist_t *tgt;
2378 boolean_t avail_spare, l2cache, islog;
2379 libzfs_handle_t *hdl = zhp->zpool_hdl;
2380 int error;
2381
2382 if (flags & ZFS_ONLINE_EXPAND) {
2383 (void) snprintf(msg, sizeof (msg),
2384 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2385 } else {
2386 (void) snprintf(msg, sizeof (msg),
2387 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2388 }
2389
2390 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2391 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2392 &islog)) == NULL)
2393 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2394
2395 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2396
2397 if (avail_spare)
2398 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2399
2400 if (flags & ZFS_ONLINE_EXPAND ||
2401 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2402 uint64_t wholedisk = 0;
2403
2404 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2405 &wholedisk);
2406
2407 /*
2408 * XXX - L2ARC 1.0 devices can't support expansion.
2409 */
2410 if (l2cache) {
2411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2412 "cannot expand cache devices"));
2413 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2414 }
2415
2416 if (wholedisk) {
2417 const char *fullpath = path;
2418 char buf[MAXPATHLEN];
2419
2420 if (path[0] != '/') {
2421 error = zfs_resolve_shortname(path, buf,
2422 sizeof (buf));
2423 if (error != 0)
2424 return (zfs_error(hdl, EZFS_NODEVICE,
2425 msg));
2426
2427 fullpath = buf;
2428 }
2429
2430 error = zpool_relabel_disk(hdl, fullpath, msg);
2431 if (error != 0)
2432 return (error);
2433 }
2434 }
2435
2436 zc.zc_cookie = VDEV_STATE_ONLINE;
2437 zc.zc_obj = flags;
2438
2439 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2440 if (errno == EINVAL) {
2441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2442 "from this pool into a new one. Use '%s' "
2443 "instead"), "zpool detach");
2444 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2445 }
2446 return (zpool_standard_error(hdl, errno, msg));
2447 }
2448
2449 *newstate = zc.zc_cookie;
2450 return (0);
2451 }
2452
2453 /*
2454 * Take the specified vdev offline
2455 */
2456 int
2457 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2458 {
2459 zfs_cmd_t zc = {"\0"};
2460 char msg[1024];
2461 nvlist_t *tgt;
2462 boolean_t avail_spare, l2cache;
2463 libzfs_handle_t *hdl = zhp->zpool_hdl;
2464
2465 (void) snprintf(msg, sizeof (msg),
2466 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2467
2468 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2469 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2470 NULL)) == NULL)
2471 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2472
2473 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2474
2475 if (avail_spare)
2476 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2477
2478 zc.zc_cookie = VDEV_STATE_OFFLINE;
2479 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2480
2481 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2482 return (0);
2483
2484 switch (errno) {
2485 case EBUSY:
2486
2487 /*
2488 * There are no other replicas of this device.
2489 */
2490 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2491
2492 case EEXIST:
2493 /*
2494 * The log device has unplayed logs
2495 */
2496 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2497
2498 default:
2499 return (zpool_standard_error(hdl, errno, msg));
2500 }
2501 }
2502
2503 /*
2504 * Mark the given vdev faulted.
2505 */
2506 int
2507 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2508 {
2509 zfs_cmd_t zc = {"\0"};
2510 char msg[1024];
2511 libzfs_handle_t *hdl = zhp->zpool_hdl;
2512
2513 (void) snprintf(msg, sizeof (msg),
2514 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2515
2516 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2517 zc.zc_guid = guid;
2518 zc.zc_cookie = VDEV_STATE_FAULTED;
2519 zc.zc_obj = aux;
2520
2521 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2522 return (0);
2523
2524 switch (errno) {
2525 case EBUSY:
2526
2527 /*
2528 * There are no other replicas of this device.
2529 */
2530 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2531
2532 default:
2533 return (zpool_standard_error(hdl, errno, msg));
2534 }
2535
2536 }
2537
2538 /*
2539 * Mark the given vdev degraded.
2540 */
2541 int
2542 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2543 {
2544 zfs_cmd_t zc = {"\0"};
2545 char msg[1024];
2546 libzfs_handle_t *hdl = zhp->zpool_hdl;
2547
2548 (void) snprintf(msg, sizeof (msg),
2549 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2550
2551 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2552 zc.zc_guid = guid;
2553 zc.zc_cookie = VDEV_STATE_DEGRADED;
2554 zc.zc_obj = aux;
2555
2556 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2557 return (0);
2558
2559 return (zpool_standard_error(hdl, errno, msg));
2560 }
2561
2562 /*
2563 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2564 * a hot spare.
2565 */
2566 static boolean_t
2567 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2568 {
2569 nvlist_t **child;
2570 uint_t c, children;
2571 char *type;
2572
2573 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2574 &children) == 0) {
2575 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2576 &type) == 0);
2577
2578 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2579 children == 2 && child[which] == tgt)
2580 return (B_TRUE);
2581
2582 for (c = 0; c < children; c++)
2583 if (is_replacing_spare(child[c], tgt, which))
2584 return (B_TRUE);
2585 }
2586
2587 return (B_FALSE);
2588 }
2589
2590 /*
2591 * Attach new_disk (fully described by nvroot) to old_disk.
2592 * If 'replacing' is specified, the new disk will replace the old one.
2593 */
2594 int
2595 zpool_vdev_attach(zpool_handle_t *zhp,
2596 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2597 {
2598 zfs_cmd_t zc = {"\0"};
2599 char msg[1024];
2600 int ret;
2601 nvlist_t *tgt;
2602 boolean_t avail_spare, l2cache, islog;
2603 uint64_t val;
2604 char *newname;
2605 nvlist_t **child;
2606 uint_t children;
2607 nvlist_t *config_root;
2608 libzfs_handle_t *hdl = zhp->zpool_hdl;
2609 boolean_t rootpool = zpool_is_bootable(zhp);
2610
2611 if (replacing)
2612 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2613 "cannot replace %s with %s"), old_disk, new_disk);
2614 else
2615 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2616 "cannot attach %s to %s"), new_disk, old_disk);
2617
2618 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2619 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2620 &islog)) == 0)
2621 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2622
2623 if (avail_spare)
2624 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2625
2626 if (l2cache)
2627 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2628
2629 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2630 zc.zc_cookie = replacing;
2631
2632 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2633 &child, &children) != 0 || children != 1) {
2634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2635 "new device must be a single disk"));
2636 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2637 }
2638
2639 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2640 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2641
2642 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2643 return (-1);
2644
2645 /*
2646 * If the target is a hot spare that has been swapped in, we can only
2647 * replace it with another hot spare.
2648 */
2649 if (replacing &&
2650 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2651 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2652 NULL) == NULL || !avail_spare) &&
2653 is_replacing_spare(config_root, tgt, 1)) {
2654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2655 "can only be replaced by another hot spare"));
2656 free(newname);
2657 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2658 }
2659
2660 free(newname);
2661
2662 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2663 return (-1);
2664
2665 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2666
2667 zcmd_free_nvlists(&zc);
2668
2669 if (ret == 0) {
2670 if (rootpool) {
2671 /*
2672 * XXX need a better way to prevent user from
2673 * booting up a half-baked vdev.
2674 */
2675 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2676 "sure to wait until resilver is done "
2677 "before rebooting.\n"));
2678 }
2679 return (0);
2680 }
2681
2682 switch (errno) {
2683 case ENOTSUP:
2684 /*
2685 * Can't attach to or replace this type of vdev.
2686 */
2687 if (replacing) {
2688 uint64_t version = zpool_get_prop_int(zhp,
2689 ZPOOL_PROP_VERSION, NULL);
2690
2691 if (islog)
2692 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2693 "cannot replace a log with a spare"));
2694 else if (version >= SPA_VERSION_MULTI_REPLACE)
2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2696 "already in replacing/spare config; wait "
2697 "for completion or use 'zpool detach'"));
2698 else
2699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2700 "cannot replace a replacing device"));
2701 } else {
2702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2703 "can only attach to mirrors and top-level "
2704 "disks"));
2705 }
2706 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2707 break;
2708
2709 case EINVAL:
2710 /*
2711 * The new device must be a single disk.
2712 */
2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 "new device must be a single disk"));
2715 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2716 break;
2717
2718 case EBUSY:
2719 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2720 new_disk);
2721 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2722 break;
2723
2724 case EOVERFLOW:
2725 /*
2726 * The new device is too small.
2727 */
2728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2729 "device is too small"));
2730 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2731 break;
2732
2733 case EDOM:
2734 /*
2735 * The new device has a different optimal sector size.
2736 */
2737 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2738 "new device has a different optimal sector size; use the "
2739 "option '-o ashift=N' to override the optimal size"));
2740 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2741 break;
2742
2743 case ENAMETOOLONG:
2744 /*
2745 * The resulting top-level vdev spec won't fit in the label.
2746 */
2747 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2748 break;
2749
2750 default:
2751 (void) zpool_standard_error(hdl, errno, msg);
2752 }
2753
2754 return (-1);
2755 }
2756
2757 /*
2758 * Detach the specified device.
2759 */
2760 int
2761 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2762 {
2763 zfs_cmd_t zc = {"\0"};
2764 char msg[1024];
2765 nvlist_t *tgt;
2766 boolean_t avail_spare, l2cache;
2767 libzfs_handle_t *hdl = zhp->zpool_hdl;
2768
2769 (void) snprintf(msg, sizeof (msg),
2770 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2771
2772 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2773 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2774 NULL)) == 0)
2775 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2776
2777 if (avail_spare)
2778 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2779
2780 if (l2cache)
2781 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2782
2783 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2784
2785 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2786 return (0);
2787
2788 switch (errno) {
2789
2790 case ENOTSUP:
2791 /*
2792 * Can't detach from this type of vdev.
2793 */
2794 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2795 "applicable to mirror and replacing vdevs"));
2796 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2797 break;
2798
2799 case EBUSY:
2800 /*
2801 * There are no other replicas of this device.
2802 */
2803 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2804 break;
2805
2806 default:
2807 (void) zpool_standard_error(hdl, errno, msg);
2808 }
2809
2810 return (-1);
2811 }
2812
2813 /*
2814 * Find a mirror vdev in the source nvlist.
2815 *
2816 * The mchild array contains a list of disks in one of the top-level mirrors
2817 * of the source pool. The schild array contains a list of disks that the
2818 * user specified on the command line. We loop over the mchild array to
2819 * see if any entry in the schild array matches.
2820 *
2821 * If a disk in the mchild array is found in the schild array, we return
2822 * the index of that entry. Otherwise we return -1.
2823 */
2824 static int
2825 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2826 nvlist_t **schild, uint_t schildren)
2827 {
2828 uint_t mc;
2829
2830 for (mc = 0; mc < mchildren; mc++) {
2831 uint_t sc;
2832 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2833 mchild[mc], 0);
2834
2835 for (sc = 0; sc < schildren; sc++) {
2836 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2837 schild[sc], 0);
2838 boolean_t result = (strcmp(mpath, spath) == 0);
2839
2840 free(spath);
2841 if (result) {
2842 free(mpath);
2843 return (mc);
2844 }
2845 }
2846
2847 free(mpath);
2848 }
2849
2850 return (-1);
2851 }
2852
2853 /*
2854 * Split a mirror pool. If newroot points to null, then a new nvlist
2855 * is generated and it is the responsibility of the caller to free it.
2856 */
2857 int
2858 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2859 nvlist_t *props, splitflags_t flags)
2860 {
2861 zfs_cmd_t zc = {"\0"};
2862 char msg[1024];
2863 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2864 nvlist_t **varray = NULL, *zc_props = NULL;
2865 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2866 libzfs_handle_t *hdl = zhp->zpool_hdl;
2867 uint64_t vers;
2868 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2869 int retval = 0;
2870
2871 (void) snprintf(msg, sizeof (msg),
2872 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2873
2874 if (!zpool_name_valid(hdl, B_FALSE, newname))
2875 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2876
2877 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2878 (void) fprintf(stderr, gettext("Internal error: unable to "
2879 "retrieve pool configuration\n"));
2880 return (-1);
2881 }
2882
2883 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2884 == 0);
2885 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2886
2887 if (props) {
2888 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2889 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2890 props, vers, flags, msg)) == NULL)
2891 return (-1);
2892 }
2893
2894 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2895 &children) != 0) {
2896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2897 "Source pool is missing vdev tree"));
2898 nvlist_free(zc_props);
2899 return (-1);
2900 }
2901
2902 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2903 vcount = 0;
2904
2905 if (*newroot == NULL ||
2906 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2907 &newchild, &newchildren) != 0)
2908 newchildren = 0;
2909
2910 for (c = 0; c < children; c++) {
2911 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2912 char *type;
2913 nvlist_t **mchild, *vdev;
2914 uint_t mchildren;
2915 int entry;
2916
2917 /*
2918 * Unlike cache & spares, slogs are stored in the
2919 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2920 */
2921 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2922 &is_log);
2923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2924 &is_hole);
2925 if (is_log || is_hole) {
2926 /*
2927 * Create a hole vdev and put it in the config.
2928 */
2929 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2930 goto out;
2931 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2932 VDEV_TYPE_HOLE) != 0)
2933 goto out;
2934 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2935 1) != 0)
2936 goto out;
2937 if (lastlog == 0)
2938 lastlog = vcount;
2939 varray[vcount++] = vdev;
2940 continue;
2941 }
2942 lastlog = 0;
2943 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2944 == 0);
2945 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2947 "Source pool must be composed only of mirrors\n"));
2948 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2949 goto out;
2950 }
2951
2952 verify(nvlist_lookup_nvlist_array(child[c],
2953 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2954
2955 /* find or add an entry for this top-level vdev */
2956 if (newchildren > 0 &&
2957 (entry = find_vdev_entry(zhp, mchild, mchildren,
2958 newchild, newchildren)) >= 0) {
2959 /* We found a disk that the user specified. */
2960 vdev = mchild[entry];
2961 ++found;
2962 } else {
2963 /* User didn't specify a disk for this vdev. */
2964 vdev = mchild[mchildren - 1];
2965 }
2966
2967 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2968 goto out;
2969 }
2970
2971 /* did we find every disk the user specified? */
2972 if (found != newchildren) {
2973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2974 "include at most one disk from each mirror"));
2975 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2976 goto out;
2977 }
2978
2979 /* Prepare the nvlist for populating. */
2980 if (*newroot == NULL) {
2981 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2982 goto out;
2983 freelist = B_TRUE;
2984 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2985 VDEV_TYPE_ROOT) != 0)
2986 goto out;
2987 } else {
2988 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2989 }
2990
2991 /* Add all the children we found */
2992 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2993 lastlog == 0 ? vcount : lastlog) != 0)
2994 goto out;
2995
2996 /*
2997 * If we're just doing a dry run, exit now with success.
2998 */
2999 if (flags.dryrun) {
3000 memory_err = B_FALSE;
3001 freelist = B_FALSE;
3002 goto out;
3003 }
3004
3005 /* now build up the config list & call the ioctl */
3006 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3007 goto out;
3008
3009 if (nvlist_add_nvlist(newconfig,
3010 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3011 nvlist_add_string(newconfig,
3012 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3013 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3014 goto out;
3015
3016 /*
3017 * The new pool is automatically part of the namespace unless we
3018 * explicitly export it.
3019 */
3020 if (!flags.import)
3021 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3022 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3023 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3024 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3025 goto out;
3026 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3027 goto out;
3028
3029 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3030 retval = zpool_standard_error(hdl, errno, msg);
3031 goto out;
3032 }
3033
3034 freelist = B_FALSE;
3035 memory_err = B_FALSE;
3036
3037 out:
3038 if (varray != NULL) {
3039 int v;
3040
3041 for (v = 0; v < vcount; v++)
3042 nvlist_free(varray[v]);
3043 free(varray);
3044 }
3045 zcmd_free_nvlists(&zc);
3046 nvlist_free(zc_props);
3047 nvlist_free(newconfig);
3048 if (freelist) {
3049 nvlist_free(*newroot);
3050 *newroot = NULL;
3051 }
3052
3053 if (retval != 0)
3054 return (retval);
3055
3056 if (memory_err)
3057 return (no_memory(hdl));
3058
3059 return (0);
3060 }
3061
3062 /*
3063 * Remove the given device. Currently, this is supported only for hot spares,
3064 * cache, and log devices.
3065 */
3066 int
3067 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3068 {
3069 zfs_cmd_t zc = {"\0"};
3070 char msg[1024];
3071 nvlist_t *tgt;
3072 boolean_t avail_spare, l2cache, islog;
3073 libzfs_handle_t *hdl = zhp->zpool_hdl;
3074 uint64_t version;
3075
3076 (void) snprintf(msg, sizeof (msg),
3077 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3078
3079 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3080 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3081 &islog)) == 0)
3082 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3083 /*
3084 * XXX - this should just go away.
3085 */
3086 if (!avail_spare && !l2cache && !islog) {
3087 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3088 "only inactive hot spares, cache, "
3089 "or log devices can be removed"));
3090 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3091 }
3092
3093 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3094 if (islog && version < SPA_VERSION_HOLES) {
3095 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3096 "pool must be upgrade to support log removal"));
3097 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3098 }
3099
3100 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3101
3102 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3103 return (0);
3104
3105 return (zpool_standard_error(hdl, errno, msg));
3106 }
3107
3108 /*
3109 * Clear the errors for the pool, or the particular device if specified.
3110 */
3111 int
3112 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3113 {
3114 zfs_cmd_t zc = {"\0"};
3115 char msg[1024];
3116 nvlist_t *tgt;
3117 zpool_rewind_policy_t policy;
3118 boolean_t avail_spare, l2cache;
3119 libzfs_handle_t *hdl = zhp->zpool_hdl;
3120 nvlist_t *nvi = NULL;
3121 int error;
3122
3123 if (path)
3124 (void) snprintf(msg, sizeof (msg),
3125 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3126 path);
3127 else
3128 (void) snprintf(msg, sizeof (msg),
3129 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3130 zhp->zpool_name);
3131
3132 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3133 if (path) {
3134 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3135 &l2cache, NULL)) == 0)
3136 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3137
3138 /*
3139 * Don't allow error clearing for hot spares. Do allow
3140 * error clearing for l2cache devices.
3141 */
3142 if (avail_spare)
3143 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3144
3145 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3146 &zc.zc_guid) == 0);
3147 }
3148
3149 zpool_get_rewind_policy(rewindnvl, &policy);
3150 zc.zc_cookie = policy.zrp_request;
3151
3152 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3153 return (-1);
3154
3155 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3156 return (-1);
3157
3158 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3159 errno == ENOMEM) {
3160 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3161 zcmd_free_nvlists(&zc);
3162 return (-1);
3163 }
3164 }
3165
3166 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3167 errno != EPERM && errno != EACCES)) {
3168 if (policy.zrp_request &
3169 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3170 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3171 zpool_rewind_exclaim(hdl, zc.zc_name,
3172 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3173 nvi);
3174 nvlist_free(nvi);
3175 }
3176 zcmd_free_nvlists(&zc);
3177 return (0);
3178 }
3179
3180 zcmd_free_nvlists(&zc);
3181 return (zpool_standard_error(hdl, errno, msg));
3182 }
3183
3184 /*
3185 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3186 */
3187 int
3188 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3189 {
3190 zfs_cmd_t zc = {"\0"};
3191 char msg[1024];
3192 libzfs_handle_t *hdl = zhp->zpool_hdl;
3193
3194 (void) snprintf(msg, sizeof (msg),
3195 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3196 (u_longlong_t)guid);
3197
3198 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3199 zc.zc_guid = guid;
3200 zc.zc_cookie = ZPOOL_NO_REWIND;
3201
3202 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3203 return (0);
3204
3205 return (zpool_standard_error(hdl, errno, msg));
3206 }
3207
3208 /*
3209 * Change the GUID for a pool.
3210 */
3211 int
3212 zpool_reguid(zpool_handle_t *zhp)
3213 {
3214 char msg[1024];
3215 libzfs_handle_t *hdl = zhp->zpool_hdl;
3216 zfs_cmd_t zc = {"\0"};
3217
3218 (void) snprintf(msg, sizeof (msg),
3219 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3220
3221 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3222 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3223 return (0);
3224
3225 return (zpool_standard_error(hdl, errno, msg));
3226 }
3227
3228 /*
3229 * Reopen the pool.
3230 */
3231 int
3232 zpool_reopen(zpool_handle_t *zhp)
3233 {
3234 zfs_cmd_t zc = {"\0"};
3235 char msg[1024];
3236 libzfs_handle_t *hdl = zhp->zpool_hdl;
3237
3238 (void) snprintf(msg, sizeof (msg),
3239 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3240 zhp->zpool_name);
3241
3242 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3243 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3244 return (0);
3245 return (zpool_standard_error(hdl, errno, msg));
3246 }
3247
3248 #if defined(__sun__) || defined(__sun)
3249 /*
3250 * Convert from a devid string to a path.
3251 */
3252 static char *
3253 devid_to_path(char *devid_str)
3254 {
3255 ddi_devid_t devid;
3256 char *minor;
3257 char *path;
3258 devid_nmlist_t *list = NULL;
3259 int ret;
3260
3261 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3262 return (NULL);
3263
3264 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3265
3266 devid_str_free(minor);
3267 devid_free(devid);
3268
3269 if (ret != 0)
3270 return (NULL);
3271
3272 /*
3273 * In a case the strdup() fails, we will just return NULL below.
3274 */
3275 path = strdup(list[0].devname);
3276
3277 devid_free_nmlist(list);
3278
3279 return (path);
3280 }
3281
3282 /*
3283 * Convert from a path to a devid string.
3284 */
3285 static char *
3286 path_to_devid(const char *path)
3287 {
3288 int fd;
3289 ddi_devid_t devid;
3290 char *minor, *ret;
3291
3292 if ((fd = open(path, O_RDONLY)) < 0)
3293 return (NULL);
3294
3295 minor = NULL;
3296 ret = NULL;
3297 if (devid_get(fd, &devid) == 0) {
3298 if (devid_get_minor_name(fd, &minor) == 0)
3299 ret = devid_str_encode(devid, minor);
3300 if (minor != NULL)
3301 devid_str_free(minor);
3302 devid_free(devid);
3303 }
3304 (void) close(fd);
3305
3306 return (ret);
3307 }
3308
3309 /*
3310 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3311 * ignore any failure here, since a common case is for an unprivileged user to
3312 * type 'zpool status', and we'll display the correct information anyway.
3313 */
3314 static void
3315 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3316 {
3317 zfs_cmd_t zc = {"\0"};
3318
3319 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3320 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3321 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3322 &zc.zc_guid) == 0);
3323
3324 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3325 }
3326 #endif /* sun */
3327
3328 /*
3329 * Remove partition suffix from a vdev path. Partition suffixes may take three
3330 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3331 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3332 * third case only occurs when preceded by a string matching the regular
3333 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3334 *
3335 * caller must free the returned string
3336 */
3337 char *
3338 zfs_strip_partition(char *path)
3339 {
3340 char *tmp = strdup(path);
3341 char *part = NULL, *d = NULL;
3342 if (!tmp)
3343 return (NULL);
3344
3345 if ((part = strstr(tmp, "-part")) && part != tmp) {
3346 d = part + 5;
3347 } else if ((part = strrchr(tmp, 'p')) &&
3348 part > tmp + 1 && isdigit(*(part-1))) {
3349 d = part + 1;
3350 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3351 tmp[1] == 'd') {
3352 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3353 } else if (strncmp("xvd", tmp, 3) == 0) {
3354 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3355 }
3356 if (part && d && *d != '\0') {
3357 for (; isdigit(*d); d++) { }
3358 if (*d == '\0')
3359 *part = '\0';
3360 }
3361
3362 return (tmp);
3363 }
3364
3365 /*
3366 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3367 *
3368 * path: /dev/sda1
3369 * returns: /dev/sda
3370 *
3371 * Returned string must be freed.
3372 */
3373 char *
3374 zfs_strip_partition_path(char *path)
3375 {
3376 char *newpath = strdup(path);
3377 char *sd_offset;
3378 char *new_sd;
3379
3380 if (!newpath)
3381 return (NULL);
3382
3383 /* Point to "sda1" part of "/dev/sda1" */
3384 sd_offset = strrchr(newpath, '/') + 1;
3385
3386 /* Get our new name "sda" */
3387 new_sd = zfs_strip_partition(sd_offset);
3388 if (!new_sd) {
3389 free(newpath);
3390 return (NULL);
3391 }
3392
3393 /* Paste the "sda" where "sda1" was */
3394 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3395
3396 /* Free temporary "sda" */
3397 free(new_sd);
3398
3399 return (newpath);
3400 }
3401
3402 #define PATH_BUF_LEN 64
3403
3404 /*
3405 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3406 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3407 * We also check if this is a whole disk, in which case we strip off the
3408 * trailing 's0' slice name.
3409 *
3410 * This routine is also responsible for identifying when disks have been
3411 * reconfigured in a new location. The kernel will have opened the device by
3412 * devid, but the path will still refer to the old location. To catch this, we
3413 * first do a path -> devid translation (which is fast for the common case). If
3414 * the devid matches, we're done. If not, we do a reverse devid -> path
3415 * translation and issue the appropriate ioctl() to update the path of the vdev.
3416 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3417 * of these checks.
3418 */
3419 char *
3420 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3421 int name_flags)
3422 {
3423 char *path, *type, *env;
3424 uint64_t value;
3425 char buf[PATH_BUF_LEN];
3426 char tmpbuf[PATH_BUF_LEN];
3427
3428 env = getenv("ZPOOL_VDEV_NAME_PATH");
3429 if (env && (strtoul(env, NULL, 0) > 0 ||
3430 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3431 name_flags |= VDEV_NAME_PATH;
3432
3433 env = getenv("ZPOOL_VDEV_NAME_GUID");
3434 if (env && (strtoul(env, NULL, 0) > 0 ||
3435 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3436 name_flags |= VDEV_NAME_GUID;
3437
3438 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3439 if (env && (strtoul(env, NULL, 0) > 0 ||
3440 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3441 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3442
3443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3444 name_flags & VDEV_NAME_GUID) {
3445 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3446 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3447 path = buf;
3448 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3449 #if defined(__sun__) || defined(__sun)
3450 /*
3451 * Live VDEV path updates to a kernel VDEV during a
3452 * zpool_vdev_name lookup are not supported on Linux.
3453 */
3454 char *devid;
3455 vdev_stat_t *vs;
3456 uint_t vsc;
3457
3458 /*
3459 * If the device is dead (faulted, offline, etc) then don't
3460 * bother opening it. Otherwise we may be forcing the user to
3461 * open a misbehaving device, which can have undesirable
3462 * effects.
3463 */
3464 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3465 (uint64_t **)&vs, &vsc) != 0 ||
3466 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3467 zhp != NULL &&
3468 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3469 /*
3470 * Determine if the current path is correct.
3471 */
3472 char *newdevid = path_to_devid(path);
3473
3474 if (newdevid == NULL ||
3475 strcmp(devid, newdevid) != 0) {
3476 char *newpath;
3477
3478 if ((newpath = devid_to_path(devid)) != NULL) {
3479 /*
3480 * Update the path appropriately.
3481 */
3482 set_path(zhp, nv, newpath);
3483 if (nvlist_add_string(nv,
3484 ZPOOL_CONFIG_PATH, newpath) == 0)
3485 verify(nvlist_lookup_string(nv,
3486 ZPOOL_CONFIG_PATH,
3487 &path) == 0);
3488 free(newpath);
3489 }
3490 }
3491
3492 if (newdevid)
3493 devid_str_free(newdevid);
3494 }
3495 #endif /* sun */
3496
3497 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3498 char *rp = realpath(path, NULL);
3499 if (rp) {
3500 strlcpy(buf, rp, sizeof (buf));
3501 path = buf;
3502 free(rp);
3503 }
3504 }
3505
3506 /*
3507 * For a block device only use the name.
3508 */
3509 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3510 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3511 !(name_flags & VDEV_NAME_PATH)) {
3512 path = strrchr(path, '/');
3513 path++;
3514 }
3515
3516 /*
3517 * Remove the partition from the path it this is a whole disk.
3518 */
3519 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3520 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3521 return (zfs_strip_partition(path));
3522 }
3523 } else {
3524 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3525
3526 /*
3527 * If it's a raidz device, we need to stick in the parity level.
3528 */
3529 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3530 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3531 &value) == 0);
3532 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3533 (u_longlong_t)value);
3534 path = buf;
3535 }
3536
3537 /*
3538 * We identify each top-level vdev by using a <type-id>
3539 * naming convention.
3540 */
3541 if (name_flags & VDEV_NAME_TYPE_ID) {
3542 uint64_t id;
3543 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3544 &id) == 0);
3545 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3546 path, (u_longlong_t)id);
3547 path = tmpbuf;
3548 }
3549 }
3550
3551 return (zfs_strdup(hdl, path));
3552 }
3553
3554 static int
3555 zbookmark_mem_compare(const void *a, const void *b)
3556 {
3557 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3558 }
3559
3560 /*
3561 * Retrieve the persistent error log, uniquify the members, and return to the
3562 * caller.
3563 */
3564 int
3565 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3566 {
3567 zfs_cmd_t zc = {"\0"};
3568 uint64_t count;
3569 zbookmark_phys_t *zb = NULL;
3570 int i;
3571
3572 /*
3573 * Retrieve the raw error list from the kernel. If the number of errors
3574 * has increased, allocate more space and continue until we get the
3575 * entire list.
3576 */
3577 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3578 &count) == 0);
3579 if (count == 0)
3580 return (0);
3581 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3582 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3583 return (-1);
3584 zc.zc_nvlist_dst_size = count;
3585 (void) strcpy(zc.zc_name, zhp->zpool_name);
3586 for (;;) {
3587 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3588 &zc) != 0) {
3589 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3590 if (errno == ENOMEM) {
3591 void *dst;
3592
3593 count = zc.zc_nvlist_dst_size;
3594 dst = zfs_alloc(zhp->zpool_hdl, count *
3595 sizeof (zbookmark_phys_t));
3596 if (dst == NULL)
3597 return (-1);
3598 zc.zc_nvlist_dst = (uintptr_t)dst;
3599 } else {
3600 return (-1);
3601 }
3602 } else {
3603 break;
3604 }
3605 }
3606
3607 /*
3608 * Sort the resulting bookmarks. This is a little confusing due to the
3609 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3610 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3611 * _not_ copied as part of the process. So we point the start of our
3612 * array appropriate and decrement the total number of elements.
3613 */
3614 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3615 zc.zc_nvlist_dst_size;
3616 count -= zc.zc_nvlist_dst_size;
3617
3618 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3619
3620 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3621
3622 /*
3623 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3624 */
3625 for (i = 0; i < count; i++) {
3626 nvlist_t *nv;
3627
3628 /* ignoring zb_blkid and zb_level for now */
3629 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3630 zb[i-1].zb_object == zb[i].zb_object)
3631 continue;
3632
3633 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3634 goto nomem;
3635 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3636 zb[i].zb_objset) != 0) {
3637 nvlist_free(nv);
3638 goto nomem;
3639 }
3640 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3641 zb[i].zb_object) != 0) {
3642 nvlist_free(nv);
3643 goto nomem;
3644 }
3645 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3646 nvlist_free(nv);
3647 goto nomem;
3648 }
3649 nvlist_free(nv);
3650 }
3651
3652 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3653 return (0);
3654
3655 nomem:
3656 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3657 return (no_memory(zhp->zpool_hdl));
3658 }
3659
3660 /*
3661 * Upgrade a ZFS pool to the latest on-disk version.
3662 */
3663 int
3664 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3665 {
3666 zfs_cmd_t zc = {"\0"};
3667 libzfs_handle_t *hdl = zhp->zpool_hdl;
3668
3669 (void) strcpy(zc.zc_name, zhp->zpool_name);
3670 zc.zc_cookie = new_version;
3671
3672 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3673 return (zpool_standard_error_fmt(hdl, errno,
3674 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3675 zhp->zpool_name));
3676 return (0);
3677 }
3678
3679 void
3680 zfs_save_arguments(int argc, char **argv, char *string, int len)
3681 {
3682 int i;
3683
3684 (void) strlcpy(string, basename(argv[0]), len);
3685 for (i = 1; i < argc; i++) {
3686 (void) strlcat(string, " ", len);
3687 (void) strlcat(string, argv[i], len);
3688 }
3689 }
3690
3691 int
3692 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3693 {
3694 zfs_cmd_t zc = {"\0"};
3695 nvlist_t *args;
3696 int err;
3697
3698 args = fnvlist_alloc();
3699 fnvlist_add_string(args, "message", message);
3700 err = zcmd_write_src_nvlist(hdl, &zc, args);
3701 if (err == 0)
3702 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3703 nvlist_free(args);
3704 zcmd_free_nvlists(&zc);
3705 return (err);
3706 }
3707
3708 /*
3709 * Perform ioctl to get some command history of a pool.
3710 *
3711 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3712 * logical offset of the history buffer to start reading from.
3713 *
3714 * Upon return, 'off' is the next logical offset to read from and
3715 * 'len' is the actual amount of bytes read into 'buf'.
3716 */
3717 static int
3718 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3719 {
3720 zfs_cmd_t zc = {"\0"};
3721 libzfs_handle_t *hdl = zhp->zpool_hdl;
3722
3723 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3724
3725 zc.zc_history = (uint64_t)(uintptr_t)buf;
3726 zc.zc_history_len = *len;
3727 zc.zc_history_offset = *off;
3728
3729 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3730 switch (errno) {
3731 case EPERM:
3732 return (zfs_error_fmt(hdl, EZFS_PERM,
3733 dgettext(TEXT_DOMAIN,
3734 "cannot show history for pool '%s'"),
3735 zhp->zpool_name));
3736 case ENOENT:
3737 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3738 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3739 "'%s'"), zhp->zpool_name));
3740 case ENOTSUP:
3741 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3742 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3743 "'%s', pool must be upgraded"), zhp->zpool_name));
3744 default:
3745 return (zpool_standard_error_fmt(hdl, errno,
3746 dgettext(TEXT_DOMAIN,
3747 "cannot get history for '%s'"), zhp->zpool_name));
3748 }
3749 }
3750
3751 *len = zc.zc_history_len;
3752 *off = zc.zc_history_offset;
3753
3754 return (0);
3755 }
3756
3757 /*
3758 * Process the buffer of nvlists, unpacking and storing each nvlist record
3759 * into 'records'. 'leftover' is set to the number of bytes that weren't
3760 * processed as there wasn't a complete record.
3761 */
3762 int
3763 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3764 nvlist_t ***records, uint_t *numrecords)
3765 {
3766 uint64_t reclen;
3767 nvlist_t *nv;
3768 int i;
3769 void *tmp;
3770
3771 while (bytes_read > sizeof (reclen)) {
3772
3773 /* get length of packed record (stored as little endian) */
3774 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3775 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3776
3777 if (bytes_read < sizeof (reclen) + reclen)
3778 break;
3779
3780 /* unpack record */
3781 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3782 return (ENOMEM);
3783 bytes_read -= sizeof (reclen) + reclen;
3784 buf += sizeof (reclen) + reclen;
3785
3786 /* add record to nvlist array */
3787 (*numrecords)++;
3788 if (ISP2(*numrecords + 1)) {
3789 tmp = realloc(*records,
3790 *numrecords * 2 * sizeof (nvlist_t *));
3791 if (tmp == NULL) {
3792 nvlist_free(nv);
3793 (*numrecords)--;
3794 return (ENOMEM);
3795 }
3796 *records = tmp;
3797 }
3798 (*records)[*numrecords - 1] = nv;
3799 }
3800
3801 *leftover = bytes_read;
3802 return (0);
3803 }
3804
3805 /*
3806 * Retrieve the command history of a pool.
3807 */
3808 int
3809 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3810 {
3811 char *buf;
3812 int buflen = 128 * 1024;
3813 uint64_t off = 0;
3814 nvlist_t **records = NULL;
3815 uint_t numrecords = 0;
3816 int err, i;
3817
3818 buf = malloc(buflen);
3819 if (buf == NULL)
3820 return (ENOMEM);
3821 do {
3822 uint64_t bytes_read = buflen;
3823 uint64_t leftover;
3824
3825 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3826 break;
3827
3828 /* if nothing else was read in, we're at EOF, just return */
3829 if (!bytes_read)
3830 break;
3831
3832 if ((err = zpool_history_unpack(buf, bytes_read,
3833 &leftover, &records, &numrecords)) != 0)
3834 break;
3835 off -= leftover;
3836 if (leftover == bytes_read) {
3837 /*
3838 * no progress made, because buffer is not big enough
3839 * to hold this record; resize and retry.
3840 */
3841 buflen *= 2;
3842 free(buf);
3843 buf = malloc(buflen);
3844 if (buf == NULL)
3845 return (ENOMEM);
3846 }
3847
3848 /* CONSTCOND */
3849 } while (1);
3850
3851 free(buf);
3852
3853 if (!err) {
3854 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3855 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3856 records, numrecords) == 0);
3857 }
3858 for (i = 0; i < numrecords; i++)
3859 nvlist_free(records[i]);
3860 free(records);
3861
3862 return (err);
3863 }
3864
3865 /*
3866 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3867 * If there is a new event available 'nvp' will contain a newly allocated
3868 * nvlist and 'dropped' will be set to the number of missed events since
3869 * the last call to this function. When 'nvp' is set to NULL it indicates
3870 * no new events are available. In either case the function returns 0 and
3871 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3872 * function will return a non-zero value. When the function is called in
3873 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3874 * it will not return until a new event is available.
3875 */
3876 int
3877 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3878 int *dropped, unsigned flags, int zevent_fd)
3879 {
3880 zfs_cmd_t zc = {"\0"};
3881 int error = 0;
3882
3883 *nvp = NULL;
3884 *dropped = 0;
3885 zc.zc_cleanup_fd = zevent_fd;
3886
3887 if (flags & ZEVENT_NONBLOCK)
3888 zc.zc_guid = ZEVENT_NONBLOCK;
3889
3890 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3891 return (-1);
3892
3893 retry:
3894 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3895 switch (errno) {
3896 case ESHUTDOWN:
3897 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3898 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3899 goto out;
3900 case ENOENT:
3901 /* Blocking error case should not occur */
3902 if (!(flags & ZEVENT_NONBLOCK))
3903 error = zpool_standard_error_fmt(hdl, errno,
3904 dgettext(TEXT_DOMAIN, "cannot get event"));
3905
3906 goto out;
3907 case ENOMEM:
3908 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3909 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3910 dgettext(TEXT_DOMAIN, "cannot get event"));
3911 goto out;
3912 } else {
3913 goto retry;
3914 }
3915 default:
3916 error = zpool_standard_error_fmt(hdl, errno,
3917 dgettext(TEXT_DOMAIN, "cannot get event"));
3918 goto out;
3919 }
3920 }
3921
3922 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3923 if (error != 0)
3924 goto out;
3925
3926 *dropped = (int)zc.zc_cookie;
3927 out:
3928 zcmd_free_nvlists(&zc);
3929
3930 return (error);
3931 }
3932
3933 /*
3934 * Clear all events.
3935 */
3936 int
3937 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3938 {
3939 zfs_cmd_t zc = {"\0"};
3940 char msg[1024];
3941
3942 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3943 "cannot clear events"));
3944
3945 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3946 return (zpool_standard_error_fmt(hdl, errno, msg));
3947
3948 if (count != NULL)
3949 *count = (int)zc.zc_cookie; /* # of events cleared */
3950
3951 return (0);
3952 }
3953
3954 /*
3955 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3956 * the passed zevent_fd file handle. On success zero is returned,
3957 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3958 */
3959 int
3960 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3961 {
3962 zfs_cmd_t zc = {"\0"};
3963 int error = 0;
3964
3965 zc.zc_guid = eid;
3966 zc.zc_cleanup_fd = zevent_fd;
3967
3968 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3969 switch (errno) {
3970 case ENOENT:
3971 error = zfs_error_fmt(hdl, EZFS_NOENT,
3972 dgettext(TEXT_DOMAIN, "cannot get event"));
3973 break;
3974
3975 case ENOMEM:
3976 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3977 dgettext(TEXT_DOMAIN, "cannot get event"));
3978 break;
3979
3980 default:
3981 error = zpool_standard_error_fmt(hdl, errno,
3982 dgettext(TEXT_DOMAIN, "cannot get event"));
3983 break;
3984 }
3985 }
3986
3987 return (error);
3988 }
3989
3990 void
3991 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3992 char *pathname, size_t len)
3993 {
3994 zfs_cmd_t zc = {"\0"};
3995 boolean_t mounted = B_FALSE;
3996 char *mntpnt = NULL;
3997 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3998
3999 if (dsobj == 0) {
4000 /* special case for the MOS */
4001 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4002 (longlong_t)obj);
4003 return;
4004 }
4005
4006 /* get the dataset's name */
4007 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4008 zc.zc_obj = dsobj;
4009 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4010 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4011 /* just write out a path of two object numbers */
4012 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4013 (longlong_t)dsobj, (longlong_t)obj);
4014 return;
4015 }
4016 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4017
4018 /* find out if the dataset is mounted */
4019 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4020
4021 /* get the corrupted object's path */
4022 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4023 zc.zc_obj = obj;
4024 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4025 &zc) == 0) {
4026 if (mounted) {
4027 (void) snprintf(pathname, len, "%s%s", mntpnt,
4028 zc.zc_value);
4029 } else {
4030 (void) snprintf(pathname, len, "%s:%s",
4031 dsname, zc.zc_value);
4032 }
4033 } else {
4034 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4035 (longlong_t)obj);
4036 }
4037 free(mntpnt);
4038 }
4039
4040 /*
4041 * Read the EFI label from the config, if a label does not exist then
4042 * pass back the error to the caller. If the caller has passed a non-NULL
4043 * diskaddr argument then we set it to the starting address of the EFI
4044 * partition.
4045 */
4046 static int
4047 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4048 {
4049 char *path;
4050 int fd;
4051 char diskname[MAXPATHLEN];
4052 int err = -1;
4053
4054 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4055 return (err);
4056
4057 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4058 strrchr(path, '/'));
4059 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4060 struct dk_gpt *vtoc;
4061
4062 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4063 if (sb != NULL)
4064 *sb = vtoc->efi_parts[0].p_start;
4065 efi_free(vtoc);
4066 }
4067 (void) close(fd);
4068 }
4069 return (err);
4070 }
4071
4072 /*
4073 * determine where a partition starts on a disk in the current
4074 * configuration
4075 */
4076 static diskaddr_t
4077 find_start_block(nvlist_t *config)
4078 {
4079 nvlist_t **child;
4080 uint_t c, children;
4081 diskaddr_t sb = MAXOFFSET_T;
4082 uint64_t wholedisk;
4083
4084 if (nvlist_lookup_nvlist_array(config,
4085 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4086 if (nvlist_lookup_uint64(config,
4087 ZPOOL_CONFIG_WHOLE_DISK,
4088 &wholedisk) != 0 || !wholedisk) {
4089 return (MAXOFFSET_T);
4090 }
4091 if (read_efi_label(config, &sb) < 0)
4092 sb = MAXOFFSET_T;
4093 return (sb);
4094 }
4095
4096 for (c = 0; c < children; c++) {
4097 sb = find_start_block(child[c]);
4098 if (sb != MAXOFFSET_T) {
4099 return (sb);
4100 }
4101 }
4102 return (MAXOFFSET_T);
4103 }
4104
4105 static int
4106 zpool_label_disk_check(char *path)
4107 {
4108 struct dk_gpt *vtoc;
4109 int fd, err;
4110
4111 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4112 return (errno);
4113
4114 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4115 (void) close(fd);
4116 return (err);
4117 }
4118
4119 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4120 efi_free(vtoc);
4121 (void) close(fd);
4122 return (EIDRM);
4123 }
4124
4125 efi_free(vtoc);
4126 (void) close(fd);
4127 return (0);
4128 }
4129
4130 /*
4131 * Generate a unique partition name for the ZFS member. Partitions must
4132 * have unique names to ensure udev will be able to create symlinks under
4133 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4134 * of the form <pool>-<unique-id>.
4135 */
4136 static void
4137 zpool_label_name(char *label_name, int label_size)
4138 {
4139 uint64_t id = 0;
4140 int fd;
4141
4142 fd = open("/dev/urandom", O_RDONLY);
4143 if (fd >= 0) {
4144 if (read(fd, &id, sizeof (id)) != sizeof (id))
4145 id = 0;
4146
4147 close(fd);
4148 }
4149
4150 if (id == 0)
4151 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4152
4153 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4154 }
4155
4156 /*
4157 * Label an individual disk. The name provided is the short name,
4158 * stripped of any leading /dev path.
4159 */
4160 int
4161 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4162 {
4163 char path[MAXPATHLEN];
4164 struct dk_gpt *vtoc;
4165 int rval, fd;
4166 size_t resv = EFI_MIN_RESV_SIZE;
4167 uint64_t slice_size;
4168 diskaddr_t start_block;
4169 char errbuf[1024];
4170
4171 /* prepare an error message just in case */
4172 (void) snprintf(errbuf, sizeof (errbuf),
4173 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4174
4175 if (zhp) {
4176 nvlist_t *nvroot;
4177
4178 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4179 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4180
4181 if (zhp->zpool_start_block == 0)
4182 start_block = find_start_block(nvroot);
4183 else
4184 start_block = zhp->zpool_start_block;
4185 zhp->zpool_start_block = start_block;
4186 } else {
4187 /* new pool */
4188 start_block = NEW_START_BLOCK;
4189 }
4190
4191 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4192
4193 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4194 /*
4195 * This shouldn't happen. We've long since verified that this
4196 * is a valid device.
4197 */
4198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4199 "label '%s': unable to open device: %d"), path, errno);
4200 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4201 }
4202
4203 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4204 /*
4205 * The only way this can fail is if we run out of memory, or we
4206 * were unable to read the disk's capacity
4207 */
4208 if (errno == ENOMEM)
4209 (void) no_memory(hdl);
4210
4211 (void) close(fd);
4212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4213 "label '%s': unable to read disk capacity"), path);
4214
4215 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4216 }
4217
4218 slice_size = vtoc->efi_last_u_lba + 1;
4219 slice_size -= EFI_MIN_RESV_SIZE;
4220 if (start_block == MAXOFFSET_T)
4221 start_block = NEW_START_BLOCK;
4222 slice_size -= start_block;
4223 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4224
4225 vtoc->efi_parts[0].p_start = start_block;
4226 vtoc->efi_parts[0].p_size = slice_size;
4227
4228 /*
4229 * Why we use V_USR: V_BACKUP confuses users, and is considered
4230 * disposable by some EFI utilities (since EFI doesn't have a backup
4231 * slice). V_UNASSIGNED is supposed to be used only for zero size
4232 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4233 * etc. were all pretty specific. V_USR is as close to reality as we
4234 * can get, in the absence of V_OTHER.
4235 */
4236 vtoc->efi_parts[0].p_tag = V_USR;
4237 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4238
4239 vtoc->efi_parts[8].p_start = slice_size + start_block;
4240 vtoc->efi_parts[8].p_size = resv;
4241 vtoc->efi_parts[8].p_tag = V_RESERVED;
4242
4243 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4244 /*
4245 * Some block drivers (like pcata) may not support EFI
4246 * GPT labels. Print out a helpful error message dir-
4247 * ecting the user to manually label the disk and give
4248 * a specific slice.
4249 */
4250 (void) close(fd);
4251 efi_free(vtoc);
4252
4253 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4254 "parted(8) and then provide a specific slice: %d"), rval);
4255 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4256 }
4257
4258 (void) close(fd);
4259 efi_free(vtoc);
4260
4261 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4262 (void) zfs_append_partition(path, MAXPATHLEN);
4263
4264 /* Wait to udev to signal use the device has settled. */
4265 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4266 if (rval) {
4267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4268 "detect device partitions on '%s': %d"), path, rval);
4269 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4270 }
4271
4272 /* We can't be to paranoid. Read the label back and verify it. */
4273 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4274 rval = zpool_label_disk_check(path);
4275 if (rval) {
4276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4277 "EFI label on '%s' is damaged. Ensure\nthis device "
4278 "is not in in use, and is functioning properly: %d"),
4279 path, rval);
4280 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4281 }
4282
4283 return (0);
4284 }
4285
4286 /*
4287 * Allocate and return the underlying device name for a device mapper device.
4288 * If a device mapper device maps to multiple devices, return the first device.
4289 *
4290 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4291 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4292 *
4293 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4294 * device then return NULL.
4295 *
4296 * NOTE: The returned name string must be *freed*.
4297 */
4298 char *
4299 dm_get_underlying_path(char *dm_name)
4300 {
4301 DIR *dp = NULL;
4302 struct dirent *ep;
4303 char *realp;
4304 char *tmp = NULL;
4305 char *path = NULL;
4306 char *dev_str;
4307 int size;
4308
4309 if (dm_name == NULL)
4310 return (NULL);
4311
4312 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4313 realp = realpath(dm_name, NULL);
4314 if (realp == NULL)
4315 return (NULL);
4316
4317 /*
4318 * If they preface 'dev' with a path (like "/dev") then strip it off.
4319 * We just want the 'dm-N' part.
4320 */
4321 tmp = strrchr(realp, '/');
4322 if (tmp != NULL)
4323 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4324 else
4325 dev_str = tmp;
4326
4327 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4328 if (size == -1 || !tmp)
4329 goto end;
4330
4331 dp = opendir(tmp);
4332 if (dp == NULL)
4333 goto end;
4334
4335 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4336 while ((ep = readdir(dp))) {
4337 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4338 size = asprintf(&path, "/dev/%s", ep->d_name);
4339 break;
4340 }
4341 }
4342
4343 end:
4344 if (dp != NULL)
4345 closedir(dp);
4346 free(tmp);
4347 free(realp);
4348 return (path);
4349 }
4350
4351 /*
4352 * Return 1 if device is a device mapper or multipath device.
4353 * Return 0 if not.
4354 */
4355 int
4356 zfs_dev_is_dm(char *dev_name)
4357 {
4358
4359 char *tmp;
4360 tmp = dm_get_underlying_path(dev_name);
4361 if (tmp == NULL)
4362 return (0);
4363
4364 free(tmp);
4365 return (1);
4366 }
4367
4368 /*
4369 * Lookup the underlying device for a device name
4370 *
4371 * Often you'll have a symlink to a device, a partition device,
4372 * or a multipath device, and want to look up the underlying device.
4373 * This function returns the underlying device name. If the device
4374 * name is already the underlying device, then just return the same
4375 * name. If the device is a DM device with multiple underlying devices
4376 * then return the first one.
4377 *
4378 * For example:
4379 *
4380 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4381 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4382 * returns: /dev/sda
4383 *
4384 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4385 * dev_name: /dev/mapper/mpatha
4386 * returns: /dev/sda (first device)
4387 *
4388 * 3. /dev/sda (already the underlying device)
4389 * dev_name: /dev/sda
4390 * returns: /dev/sda
4391 *
4392 * 4. /dev/dm-3 (mapped to /dev/sda)
4393 * dev_name: /dev/dm-3
4394 * returns: /dev/sda
4395 *
4396 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4397 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4398 * returns: /dev/sdb
4399 *
4400 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4401 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4402 * returns: /dev/sda
4403 *
4404 * Returns underlying device name, or NULL on error or no match.
4405 *
4406 * NOTE: The returned name string must be *freed*.
4407 */
4408 char *
4409 zfs_get_underlying_path(char *dev_name)
4410 {
4411 char *name = NULL;
4412 char *tmp;
4413
4414 if (dev_name == NULL)
4415 return (NULL);
4416
4417 tmp = dm_get_underlying_path(dev_name);
4418
4419 /* dev_name not a DM device, so just un-symlinkize it */
4420 if (tmp == NULL)
4421 tmp = realpath(dev_name, NULL);
4422
4423 if (tmp != NULL) {
4424 name = zfs_strip_partition_path(tmp);
4425 free(tmp);
4426 }
4427
4428 return (name);
4429 }
4430
4431 /*
4432 * Given a dev name like "sda", return the full enclosure sysfs path to
4433 * the disk. You can also pass in the name with "/dev" prepended
4434 * to it (like /dev/sda).
4435 *
4436 * For example, disk "sda" in enclosure slot 1:
4437 * dev: "sda"
4438 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4439 *
4440 * 'dev' must be a non-devicemapper device.
4441 *
4442 * Returned string must be freed.
4443 */
4444 char *
4445 zfs_get_enclosure_sysfs_path(char *dev_name)
4446 {
4447 DIR *dp = NULL;
4448 struct dirent *ep;
4449 char buf[MAXPATHLEN];
4450 char *tmp1 = NULL;
4451 char *tmp2 = NULL;
4452 char *tmp3 = NULL;
4453 char *path = NULL;
4454 size_t size;
4455 int tmpsize;
4456
4457 if (dev_name == NULL)
4458 return (NULL);
4459
4460 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4461 tmp1 = strrchr(dev_name, '/');
4462 if (tmp1 != NULL)
4463 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4464
4465 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4466 if (tmpsize == -1 || tmp1 == NULL) {
4467 tmp1 = NULL;
4468 goto end;
4469 }
4470
4471 dp = opendir(tmp1);
4472 if (dp == NULL) {
4473 tmp1 = NULL; /* To make free() at the end a NOP */
4474 goto end;
4475 }
4476
4477 /*
4478 * Look though all sysfs entries in /sys/block/<dev>/device for
4479 * the enclosure symlink.
4480 */
4481 while ((ep = readdir(dp))) {
4482 /* Ignore everything that's not our enclosure_device link */
4483 if (strstr(ep->d_name, "enclosure_device") == NULL)
4484 continue;
4485
4486 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4487 tmp2 == NULL)
4488 break;
4489
4490 size = readlink(tmp2, buf, sizeof (buf));
4491
4492 /* Did readlink fail or crop the link name? */
4493 if (size == -1 || size >= sizeof (buf)) {
4494 free(tmp2);
4495 tmp2 = NULL; /* To make free() at the end a NOP */
4496 break;
4497 }
4498
4499 /*
4500 * We got a valid link. readlink() doesn't terminate strings
4501 * so we have to do it.
4502 */
4503 buf[size] = '\0';
4504
4505 /*
4506 * Our link will look like:
4507 *
4508 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4509 *
4510 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4511 */
4512 tmp3 = strstr(buf, "enclosure");
4513 if (tmp3 == NULL)
4514 break;
4515
4516 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4517 /* If asprintf() fails, 'path' is undefined */
4518 path = NULL;
4519 break;
4520 }
4521
4522 if (path == NULL)
4523 break;
4524 }
4525
4526 end:
4527 free(tmp2);
4528 free(tmp1);
4529
4530 if (dp != NULL)
4531 closedir(dp);
4532
4533 return (path);
4534 }