]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
OpenZFS 9193 - bootcfg -C doesn't work
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2017 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 */
30
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <fcntl.h>
35 #include <libintl.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <strings.h>
39 #include <unistd.h>
40 #include <libgen.h>
41 #include <zone.h>
42 #include <sys/stat.h>
43 #include <sys/efi_partition.h>
44 #include <sys/systeminfo.h>
45 #include <sys/vtoc.h>
46 #include <sys/zfs_ioctl.h>
47 #include <dlfcn.h>
48
49 #include "zfs_namecheck.h"
50 #include "zfs_prop.h"
51 #include "libzfs_impl.h"
52 #include "zfs_comutil.h"
53 #include "zfeature_common.h"
54
55 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
56 static boolean_t zpool_vdev_is_interior(const char *name);
57
58 typedef struct prop_flags {
59 int create:1; /* Validate property on creation */
60 int import:1; /* Validate property on import */
61 } prop_flags_t;
62
63 /*
64 * ====================================================================
65 * zpool property functions
66 * ====================================================================
67 */
68
69 static int
70 zpool_get_all_props(zpool_handle_t *zhp)
71 {
72 zfs_cmd_t zc = {"\0"};
73 libzfs_handle_t *hdl = zhp->zpool_hdl;
74
75 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
76
77 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
78 return (-1);
79
80 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
81 if (errno == ENOMEM) {
82 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
83 zcmd_free_nvlists(&zc);
84 return (-1);
85 }
86 } else {
87 zcmd_free_nvlists(&zc);
88 return (-1);
89 }
90 }
91
92 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
93 zcmd_free_nvlists(&zc);
94 return (-1);
95 }
96
97 zcmd_free_nvlists(&zc);
98
99 return (0);
100 }
101
102 static int
103 zpool_props_refresh(zpool_handle_t *zhp)
104 {
105 nvlist_t *old_props;
106
107 old_props = zhp->zpool_props;
108
109 if (zpool_get_all_props(zhp) != 0)
110 return (-1);
111
112 nvlist_free(old_props);
113 return (0);
114 }
115
116 static const char *
117 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
118 zprop_source_t *src)
119 {
120 nvlist_t *nv, *nvl;
121 uint64_t ival;
122 char *value;
123 zprop_source_t source;
124
125 nvl = zhp->zpool_props;
126 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
127 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
128 source = ival;
129 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
130 } else {
131 source = ZPROP_SRC_DEFAULT;
132 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
133 value = "-";
134 }
135
136 if (src)
137 *src = source;
138
139 return (value);
140 }
141
142 uint64_t
143 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
144 {
145 nvlist_t *nv, *nvl;
146 uint64_t value;
147 zprop_source_t source;
148
149 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
150 /*
151 * zpool_get_all_props() has most likely failed because
152 * the pool is faulted, but if all we need is the top level
153 * vdev's guid then get it from the zhp config nvlist.
154 */
155 if ((prop == ZPOOL_PROP_GUID) &&
156 (nvlist_lookup_nvlist(zhp->zpool_config,
157 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
158 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
159 == 0)) {
160 return (value);
161 }
162 return (zpool_prop_default_numeric(prop));
163 }
164
165 nvl = zhp->zpool_props;
166 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
167 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
168 source = value;
169 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
170 } else {
171 source = ZPROP_SRC_DEFAULT;
172 value = zpool_prop_default_numeric(prop);
173 }
174
175 if (src)
176 *src = source;
177
178 return (value);
179 }
180
181 /*
182 * Map VDEV STATE to printed strings.
183 */
184 const char *
185 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
186 {
187 switch (state) {
188 case VDEV_STATE_CLOSED:
189 case VDEV_STATE_OFFLINE:
190 return (gettext("OFFLINE"));
191 case VDEV_STATE_REMOVED:
192 return (gettext("REMOVED"));
193 case VDEV_STATE_CANT_OPEN:
194 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
195 return (gettext("FAULTED"));
196 else if (aux == VDEV_AUX_SPLIT_POOL)
197 return (gettext("SPLIT"));
198 else
199 return (gettext("UNAVAIL"));
200 case VDEV_STATE_FAULTED:
201 return (gettext("FAULTED"));
202 case VDEV_STATE_DEGRADED:
203 return (gettext("DEGRADED"));
204 case VDEV_STATE_HEALTHY:
205 return (gettext("ONLINE"));
206
207 default:
208 break;
209 }
210
211 return (gettext("UNKNOWN"));
212 }
213
214 /*
215 * Map POOL STATE to printed strings.
216 */
217 const char *
218 zpool_pool_state_to_name(pool_state_t state)
219 {
220 switch (state) {
221 default:
222 break;
223 case POOL_STATE_ACTIVE:
224 return (gettext("ACTIVE"));
225 case POOL_STATE_EXPORTED:
226 return (gettext("EXPORTED"));
227 case POOL_STATE_DESTROYED:
228 return (gettext("DESTROYED"));
229 case POOL_STATE_SPARE:
230 return (gettext("SPARE"));
231 case POOL_STATE_L2CACHE:
232 return (gettext("L2CACHE"));
233 case POOL_STATE_UNINITIALIZED:
234 return (gettext("UNINITIALIZED"));
235 case POOL_STATE_UNAVAIL:
236 return (gettext("UNAVAIL"));
237 case POOL_STATE_POTENTIALLY_ACTIVE:
238 return (gettext("POTENTIALLY_ACTIVE"));
239 }
240
241 return (gettext("UNKNOWN"));
242 }
243
244 /*
245 * Get a zpool property value for 'prop' and return the value in
246 * a pre-allocated buffer.
247 */
248 int
249 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
250 size_t len, zprop_source_t *srctype, boolean_t literal)
251 {
252 uint64_t intval;
253 const char *strval;
254 zprop_source_t src = ZPROP_SRC_NONE;
255 nvlist_t *nvroot;
256 vdev_stat_t *vs;
257 uint_t vsc;
258
259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260 switch (prop) {
261 case ZPOOL_PROP_NAME:
262 (void) strlcpy(buf, zpool_get_name(zhp), len);
263 break;
264
265 case ZPOOL_PROP_HEALTH:
266 (void) strlcpy(buf, "FAULTED", len);
267 break;
268
269 case ZPOOL_PROP_GUID:
270 intval = zpool_get_prop_int(zhp, prop, &src);
271 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
272 break;
273
274 case ZPOOL_PROP_ALTROOT:
275 case ZPOOL_PROP_CACHEFILE:
276 case ZPOOL_PROP_COMMENT:
277 if (zhp->zpool_props != NULL ||
278 zpool_get_all_props(zhp) == 0) {
279 (void) strlcpy(buf,
280 zpool_get_prop_string(zhp, prop, &src),
281 len);
282 break;
283 }
284 /* FALLTHROUGH */
285 default:
286 (void) strlcpy(buf, "-", len);
287 break;
288 }
289
290 if (srctype != NULL)
291 *srctype = src;
292 return (0);
293 }
294
295 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
296 prop != ZPOOL_PROP_NAME)
297 return (-1);
298
299 switch (zpool_prop_get_type(prop)) {
300 case PROP_TYPE_STRING:
301 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
302 len);
303 break;
304
305 case PROP_TYPE_NUMBER:
306 intval = zpool_get_prop_int(zhp, prop, &src);
307
308 switch (prop) {
309 case ZPOOL_PROP_SIZE:
310 case ZPOOL_PROP_ALLOCATED:
311 case ZPOOL_PROP_FREE:
312 case ZPOOL_PROP_FREEING:
313 case ZPOOL_PROP_LEAKED:
314 case ZPOOL_PROP_ASHIFT:
315 if (literal)
316 (void) snprintf(buf, len, "%llu",
317 (u_longlong_t)intval);
318 else
319 (void) zfs_nicenum(intval, buf, len);
320 break;
321
322 case ZPOOL_PROP_EXPANDSZ:
323 if (intval == 0) {
324 (void) strlcpy(buf, "-", len);
325 } else if (literal) {
326 (void) snprintf(buf, len, "%llu",
327 (u_longlong_t)intval);
328 } else {
329 (void) zfs_nicebytes(intval, buf, len);
330 }
331 break;
332
333 case ZPOOL_PROP_CAPACITY:
334 if (literal) {
335 (void) snprintf(buf, len, "%llu",
336 (u_longlong_t)intval);
337 } else {
338 (void) snprintf(buf, len, "%llu%%",
339 (u_longlong_t)intval);
340 }
341 break;
342
343 case ZPOOL_PROP_FRAGMENTATION:
344 if (intval == UINT64_MAX) {
345 (void) strlcpy(buf, "-", len);
346 } else if (literal) {
347 (void) snprintf(buf, len, "%llu",
348 (u_longlong_t)intval);
349 } else {
350 (void) snprintf(buf, len, "%llu%%",
351 (u_longlong_t)intval);
352 }
353 break;
354
355 case ZPOOL_PROP_DEDUPRATIO:
356 if (literal)
357 (void) snprintf(buf, len, "%llu.%02llu",
358 (u_longlong_t)(intval / 100),
359 (u_longlong_t)(intval % 100));
360 else
361 (void) snprintf(buf, len, "%llu.%02llux",
362 (u_longlong_t)(intval / 100),
363 (u_longlong_t)(intval % 100));
364 break;
365
366 case ZPOOL_PROP_HEALTH:
367 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
368 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
369 verify(nvlist_lookup_uint64_array(nvroot,
370 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
371 == 0);
372
373 (void) strlcpy(buf, zpool_state_to_name(intval,
374 vs->vs_aux), len);
375 break;
376 case ZPOOL_PROP_VERSION:
377 if (intval >= SPA_VERSION_FEATURES) {
378 (void) snprintf(buf, len, "-");
379 break;
380 }
381 /* FALLTHROUGH */
382 default:
383 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
384 }
385 break;
386
387 case PROP_TYPE_INDEX:
388 intval = zpool_get_prop_int(zhp, prop, &src);
389 if (zpool_prop_index_to_string(prop, intval, &strval)
390 != 0)
391 return (-1);
392 (void) strlcpy(buf, strval, len);
393 break;
394
395 default:
396 abort();
397 }
398
399 if (srctype)
400 *srctype = src;
401
402 return (0);
403 }
404
405 /*
406 * Check if the bootfs name has the same pool name as it is set to.
407 * Assuming bootfs is a valid dataset name.
408 */
409 static boolean_t
410 bootfs_name_valid(const char *pool, char *bootfs)
411 {
412 int len = strlen(pool);
413 if (bootfs[0] == '\0')
414 return (B_TRUE);
415
416 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
417 return (B_FALSE);
418
419 if (strncmp(pool, bootfs, len) == 0 &&
420 (bootfs[len] == '/' || bootfs[len] == '\0'))
421 return (B_TRUE);
422
423 return (B_FALSE);
424 }
425
426 boolean_t
427 zpool_is_bootable(zpool_handle_t *zhp)
428 {
429 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
430
431 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
432 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
433 sizeof (bootfs)) != 0);
434 }
435
436
437 /*
438 * Given an nvlist of zpool properties to be set, validate that they are
439 * correct, and parse any numeric properties (index, boolean, etc) if they are
440 * specified as strings.
441 */
442 static nvlist_t *
443 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
444 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
445 {
446 nvpair_t *elem;
447 nvlist_t *retprops;
448 zpool_prop_t prop;
449 char *strval;
450 uint64_t intval;
451 char *slash, *check;
452 struct stat64 statbuf;
453 zpool_handle_t *zhp;
454
455 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
456 (void) no_memory(hdl);
457 return (NULL);
458 }
459
460 elem = NULL;
461 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
462 const char *propname = nvpair_name(elem);
463
464 prop = zpool_name_to_prop(propname);
465 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
466 int err;
467 char *fname = strchr(propname, '@') + 1;
468
469 err = zfeature_lookup_name(fname, NULL);
470 if (err != 0) {
471 ASSERT3U(err, ==, ENOENT);
472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
473 "invalid feature '%s'"), fname);
474 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
475 goto error;
476 }
477
478 if (nvpair_type(elem) != DATA_TYPE_STRING) {
479 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 "'%s' must be a string"), propname);
481 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
482 goto error;
483 }
484
485 (void) nvpair_value_string(elem, &strval);
486 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
487 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489 "property '%s' can only be set to "
490 "'enabled' or 'disabled'"), propname);
491 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
492 goto error;
493 }
494
495 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
496 (void) no_memory(hdl);
497 goto error;
498 }
499 continue;
500 }
501
502 /*
503 * Make sure this property is valid and applies to this type.
504 */
505 if (prop == ZPOOL_PROP_INVAL) {
506 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
507 "invalid property '%s'"), propname);
508 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
509 goto error;
510 }
511
512 if (zpool_prop_readonly(prop)) {
513 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
514 "is readonly"), propname);
515 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
516 goto error;
517 }
518
519 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
520 &strval, &intval, errbuf) != 0)
521 goto error;
522
523 /*
524 * Perform additional checking for specific properties.
525 */
526 switch (prop) {
527 case ZPOOL_PROP_VERSION:
528 if (intval < version ||
529 !SPA_VERSION_IS_SUPPORTED(intval)) {
530 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
531 "property '%s' number %d is invalid."),
532 propname, intval);
533 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
534 goto error;
535 }
536 break;
537
538 case ZPOOL_PROP_ASHIFT:
539 if (intval != 0 &&
540 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "invalid '%s=%d' property: only values "
543 "between %" PRId32 " and %" PRId32 " "
544 "are allowed.\n"),
545 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
546 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
547 goto error;
548 }
549 break;
550
551 case ZPOOL_PROP_BOOTFS:
552 if (flags.create || flags.import) {
553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
554 "property '%s' cannot be set at creation "
555 "or import time"), propname);
556 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
557 goto error;
558 }
559
560 if (version < SPA_VERSION_BOOTFS) {
561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
562 "pool must be upgraded to support "
563 "'%s' property"), propname);
564 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
565 goto error;
566 }
567
568 /*
569 * bootfs property value has to be a dataset name and
570 * the dataset has to be in the same pool as it sets to.
571 */
572 if (!bootfs_name_valid(poolname, strval)) {
573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
574 "is an invalid name"), strval);
575 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
576 goto error;
577 }
578
579 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "could not open pool '%s'"), poolname);
582 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
583 goto error;
584 }
585 zpool_close(zhp);
586 break;
587
588 case ZPOOL_PROP_ALTROOT:
589 if (!flags.create && !flags.import) {
590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 "property '%s' can only be set during pool "
592 "creation or import"), propname);
593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
594 goto error;
595 }
596
597 if (strval[0] != '/') {
598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
599 "bad alternate root '%s'"), strval);
600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
601 goto error;
602 }
603 break;
604
605 case ZPOOL_PROP_CACHEFILE:
606 if (strval[0] == '\0')
607 break;
608
609 if (strcmp(strval, "none") == 0)
610 break;
611
612 if (strval[0] != '/') {
613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
614 "property '%s' must be empty, an "
615 "absolute path, or 'none'"), propname);
616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
617 goto error;
618 }
619
620 slash = strrchr(strval, '/');
621
622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
623 strcmp(slash, "/..") == 0) {
624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
625 "'%s' is not a valid file"), strval);
626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
627 goto error;
628 }
629
630 *slash = '\0';
631
632 if (strval[0] != '\0' &&
633 (stat64(strval, &statbuf) != 0 ||
634 !S_ISDIR(statbuf.st_mode))) {
635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
636 "'%s' is not a valid directory"),
637 strval);
638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
639 goto error;
640 }
641
642 *slash = '/';
643 break;
644
645 case ZPOOL_PROP_COMMENT:
646 for (check = strval; *check != '\0'; check++) {
647 if (!isprint(*check)) {
648 zfs_error_aux(hdl,
649 dgettext(TEXT_DOMAIN,
650 "comment may only have printable "
651 "characters"));
652 (void) zfs_error(hdl, EZFS_BADPROP,
653 errbuf);
654 goto error;
655 }
656 }
657 if (strlen(strval) > ZPROP_MAX_COMMENT) {
658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
659 "comment must not exceed %d characters"),
660 ZPROP_MAX_COMMENT);
661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
662 goto error;
663 }
664 break;
665 case ZPOOL_PROP_READONLY:
666 if (!flags.import) {
667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 "property '%s' can only be set at "
669 "import time"), propname);
670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 goto error;
672 }
673 break;
674 case ZPOOL_PROP_TNAME:
675 if (!flags.create) {
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "property '%s' can only be set at "
678 "creation time"), propname);
679 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
680 goto error;
681 }
682 break;
683 case ZPOOL_PROP_MULTIHOST:
684 if (get_system_hostid() == 0) {
685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
686 "requires a non-zero system hostid"));
687 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
688 goto error;
689 }
690 break;
691 default:
692 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
693 "property '%s'(%d) not defined"), propname, prop);
694 break;
695 }
696 }
697
698 return (retprops);
699 error:
700 nvlist_free(retprops);
701 return (NULL);
702 }
703
704 /*
705 * Set zpool property : propname=propval.
706 */
707 int
708 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
709 {
710 zfs_cmd_t zc = {"\0"};
711 int ret = -1;
712 char errbuf[1024];
713 nvlist_t *nvl = NULL;
714 nvlist_t *realprops;
715 uint64_t version;
716 prop_flags_t flags = { 0 };
717
718 (void) snprintf(errbuf, sizeof (errbuf),
719 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
720 zhp->zpool_name);
721
722 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
723 return (no_memory(zhp->zpool_hdl));
724
725 if (nvlist_add_string(nvl, propname, propval) != 0) {
726 nvlist_free(nvl);
727 return (no_memory(zhp->zpool_hdl));
728 }
729
730 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
731 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
732 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
733 nvlist_free(nvl);
734 return (-1);
735 }
736
737 nvlist_free(nvl);
738 nvl = realprops;
739
740 /*
741 * Execute the corresponding ioctl() to set this property.
742 */
743 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
744
745 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
746 nvlist_free(nvl);
747 return (-1);
748 }
749
750 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
751
752 zcmd_free_nvlists(&zc);
753 nvlist_free(nvl);
754
755 if (ret)
756 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
757 else
758 (void) zpool_props_refresh(zhp);
759
760 return (ret);
761 }
762
763 int
764 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
765 {
766 libzfs_handle_t *hdl = zhp->zpool_hdl;
767 zprop_list_t *entry;
768 char buf[ZFS_MAXPROPLEN];
769 nvlist_t *features = NULL;
770 nvpair_t *nvp;
771 zprop_list_t **last;
772 boolean_t firstexpand = (NULL == *plp);
773 int i;
774
775 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
776 return (-1);
777
778 last = plp;
779 while (*last != NULL)
780 last = &(*last)->pl_next;
781
782 if ((*plp)->pl_all)
783 features = zpool_get_features(zhp);
784
785 if ((*plp)->pl_all && firstexpand) {
786 for (i = 0; i < SPA_FEATURES; i++) {
787 zprop_list_t *entry = zfs_alloc(hdl,
788 sizeof (zprop_list_t));
789 entry->pl_prop = ZPROP_INVAL;
790 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
791 spa_feature_table[i].fi_uname);
792 entry->pl_width = strlen(entry->pl_user_prop);
793 entry->pl_all = B_TRUE;
794
795 *last = entry;
796 last = &entry->pl_next;
797 }
798 }
799
800 /* add any unsupported features */
801 for (nvp = nvlist_next_nvpair(features, NULL);
802 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
803 char *propname;
804 boolean_t found;
805 zprop_list_t *entry;
806
807 if (zfeature_is_supported(nvpair_name(nvp)))
808 continue;
809
810 propname = zfs_asprintf(hdl, "unsupported@%s",
811 nvpair_name(nvp));
812
813 /*
814 * Before adding the property to the list make sure that no
815 * other pool already added the same property.
816 */
817 found = B_FALSE;
818 entry = *plp;
819 while (entry != NULL) {
820 if (entry->pl_user_prop != NULL &&
821 strcmp(propname, entry->pl_user_prop) == 0) {
822 found = B_TRUE;
823 break;
824 }
825 entry = entry->pl_next;
826 }
827 if (found) {
828 free(propname);
829 continue;
830 }
831
832 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
833 entry->pl_prop = ZPROP_INVAL;
834 entry->pl_user_prop = propname;
835 entry->pl_width = strlen(entry->pl_user_prop);
836 entry->pl_all = B_TRUE;
837
838 *last = entry;
839 last = &entry->pl_next;
840 }
841
842 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
843
844 if (entry->pl_fixed)
845 continue;
846
847 if (entry->pl_prop != ZPROP_INVAL &&
848 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
849 NULL, B_FALSE) == 0) {
850 if (strlen(buf) > entry->pl_width)
851 entry->pl_width = strlen(buf);
852 }
853 }
854
855 return (0);
856 }
857
858 /*
859 * Get the state for the given feature on the given ZFS pool.
860 */
861 int
862 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
863 size_t len)
864 {
865 uint64_t refcount;
866 boolean_t found = B_FALSE;
867 nvlist_t *features = zpool_get_features(zhp);
868 boolean_t supported;
869 const char *feature = strchr(propname, '@') + 1;
870
871 supported = zpool_prop_feature(propname);
872 ASSERT(supported || zpool_prop_unsupported(propname));
873
874 /*
875 * Convert from feature name to feature guid. This conversion is
876 * unnecessary for unsupported@... properties because they already
877 * use guids.
878 */
879 if (supported) {
880 int ret;
881 spa_feature_t fid;
882
883 ret = zfeature_lookup_name(feature, &fid);
884 if (ret != 0) {
885 (void) strlcpy(buf, "-", len);
886 return (ENOTSUP);
887 }
888 feature = spa_feature_table[fid].fi_guid;
889 }
890
891 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
892 found = B_TRUE;
893
894 if (supported) {
895 if (!found) {
896 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
897 } else {
898 if (refcount == 0)
899 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
900 else
901 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
902 }
903 } else {
904 if (found) {
905 if (refcount == 0) {
906 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
907 } else {
908 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
909 }
910 } else {
911 (void) strlcpy(buf, "-", len);
912 return (ENOTSUP);
913 }
914 }
915
916 return (0);
917 }
918
919 /*
920 * Don't start the slice at the default block of 34; many storage
921 * devices will use a stripe width of 128k, other vendors prefer a 1m
922 * alignment. It is best to play it safe and ensure a 1m alignment
923 * given 512B blocks. When the block size is larger by a power of 2
924 * we will still be 1m aligned. Some devices are sensitive to the
925 * partition ending alignment as well.
926 */
927 #define NEW_START_BLOCK 2048
928 #define PARTITION_END_ALIGNMENT 2048
929
930 /*
931 * Validate the given pool name, optionally putting an extended error message in
932 * 'buf'.
933 */
934 boolean_t
935 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
936 {
937 namecheck_err_t why;
938 char what;
939 int ret;
940
941 ret = pool_namecheck(pool, &why, &what);
942
943 /*
944 * The rules for reserved pool names were extended at a later point.
945 * But we need to support users with existing pools that may now be
946 * invalid. So we only check for this expanded set of names during a
947 * create (or import), and only in userland.
948 */
949 if (ret == 0 && !isopen &&
950 (strncmp(pool, "mirror", 6) == 0 ||
951 strncmp(pool, "raidz", 5) == 0 ||
952 strncmp(pool, "spare", 5) == 0 ||
953 strcmp(pool, "log") == 0)) {
954 if (hdl != NULL)
955 zfs_error_aux(hdl,
956 dgettext(TEXT_DOMAIN, "name is reserved"));
957 return (B_FALSE);
958 }
959
960
961 if (ret != 0) {
962 if (hdl != NULL) {
963 switch (why) {
964 case NAME_ERR_TOOLONG:
965 zfs_error_aux(hdl,
966 dgettext(TEXT_DOMAIN, "name is too long"));
967 break;
968
969 case NAME_ERR_INVALCHAR:
970 zfs_error_aux(hdl,
971 dgettext(TEXT_DOMAIN, "invalid character "
972 "'%c' in pool name"), what);
973 break;
974
975 case NAME_ERR_NOLETTER:
976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
977 "name must begin with a letter"));
978 break;
979
980 case NAME_ERR_RESERVED:
981 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
982 "name is reserved"));
983 break;
984
985 case NAME_ERR_DISKLIKE:
986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
987 "pool name is reserved"));
988 break;
989
990 case NAME_ERR_LEADING_SLASH:
991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
992 "leading slash in name"));
993 break;
994
995 case NAME_ERR_EMPTY_COMPONENT:
996 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
997 "empty component in name"));
998 break;
999
1000 case NAME_ERR_TRAILING_SLASH:
1001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1002 "trailing slash in name"));
1003 break;
1004
1005 case NAME_ERR_MULTIPLE_DELIMITERS:
1006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1007 "multiple '@' and/or '#' delimiters in "
1008 "name"));
1009 break;
1010
1011 case NAME_ERR_NO_AT:
1012 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1013 "permission set is missing '@'"));
1014 break;
1015
1016 default:
1017 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1018 "(%d) not defined"), why);
1019 break;
1020 }
1021 }
1022 return (B_FALSE);
1023 }
1024
1025 return (B_TRUE);
1026 }
1027
1028 /*
1029 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1030 * state.
1031 */
1032 zpool_handle_t *
1033 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1034 {
1035 zpool_handle_t *zhp;
1036 boolean_t missing;
1037
1038 /*
1039 * Make sure the pool name is valid.
1040 */
1041 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1042 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1043 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1044 pool);
1045 return (NULL);
1046 }
1047
1048 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1049 return (NULL);
1050
1051 zhp->zpool_hdl = hdl;
1052 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1053
1054 if (zpool_refresh_stats(zhp, &missing) != 0) {
1055 zpool_close(zhp);
1056 return (NULL);
1057 }
1058
1059 if (missing) {
1060 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1061 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1062 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1063 zpool_close(zhp);
1064 return (NULL);
1065 }
1066
1067 return (zhp);
1068 }
1069
1070 /*
1071 * Like the above, but silent on error. Used when iterating over pools (because
1072 * the configuration cache may be out of date).
1073 */
1074 int
1075 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1076 {
1077 zpool_handle_t *zhp;
1078 boolean_t missing;
1079
1080 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1081 return (-1);
1082
1083 zhp->zpool_hdl = hdl;
1084 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1085
1086 if (zpool_refresh_stats(zhp, &missing) != 0) {
1087 zpool_close(zhp);
1088 return (-1);
1089 }
1090
1091 if (missing) {
1092 zpool_close(zhp);
1093 *ret = NULL;
1094 return (0);
1095 }
1096
1097 *ret = zhp;
1098 return (0);
1099 }
1100
1101 /*
1102 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1103 * state.
1104 */
1105 zpool_handle_t *
1106 zpool_open(libzfs_handle_t *hdl, const char *pool)
1107 {
1108 zpool_handle_t *zhp;
1109
1110 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1111 return (NULL);
1112
1113 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1114 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1115 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1116 zpool_close(zhp);
1117 return (NULL);
1118 }
1119
1120 return (zhp);
1121 }
1122
1123 /*
1124 * Close the handle. Simply frees the memory associated with the handle.
1125 */
1126 void
1127 zpool_close(zpool_handle_t *zhp)
1128 {
1129 nvlist_free(zhp->zpool_config);
1130 nvlist_free(zhp->zpool_old_config);
1131 nvlist_free(zhp->zpool_props);
1132 free(zhp);
1133 }
1134
1135 /*
1136 * Return the name of the pool.
1137 */
1138 const char *
1139 zpool_get_name(zpool_handle_t *zhp)
1140 {
1141 return (zhp->zpool_name);
1142 }
1143
1144
1145 /*
1146 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1147 */
1148 int
1149 zpool_get_state(zpool_handle_t *zhp)
1150 {
1151 return (zhp->zpool_state);
1152 }
1153
1154 /*
1155 * Create the named pool, using the provided vdev list. It is assumed
1156 * that the consumer has already validated the contents of the nvlist, so we
1157 * don't have to worry about error semantics.
1158 */
1159 int
1160 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1161 nvlist_t *props, nvlist_t *fsprops)
1162 {
1163 zfs_cmd_t zc = {"\0"};
1164 nvlist_t *zc_fsprops = NULL;
1165 nvlist_t *zc_props = NULL;
1166 nvlist_t *hidden_args = NULL;
1167 uint8_t *wkeydata = NULL;
1168 uint_t wkeylen = 0;
1169 char msg[1024];
1170 int ret = -1;
1171
1172 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1173 "cannot create '%s'"), pool);
1174
1175 if (!zpool_name_valid(hdl, B_FALSE, pool))
1176 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1177
1178 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1179 return (-1);
1180
1181 if (props) {
1182 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1183
1184 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1185 SPA_VERSION_1, flags, msg)) == NULL) {
1186 goto create_failed;
1187 }
1188 }
1189
1190 if (fsprops) {
1191 uint64_t zoned;
1192 char *zonestr;
1193
1194 zoned = ((nvlist_lookup_string(fsprops,
1195 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1196 strcmp(zonestr, "on") == 0);
1197
1198 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1199 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1200 goto create_failed;
1201 }
1202 if (!zc_props &&
1203 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1204 goto create_failed;
1205 }
1206 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props,
1207 &wkeydata, &wkeylen) != 0) {
1208 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1209 goto create_failed;
1210 }
1211 if (nvlist_add_nvlist(zc_props,
1212 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1213 goto create_failed;
1214 }
1215 if (wkeydata != NULL) {
1216 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1217 goto create_failed;
1218
1219 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1220 wkeydata, wkeylen) != 0)
1221 goto create_failed;
1222
1223 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1224 hidden_args) != 0)
1225 goto create_failed;
1226 }
1227 }
1228
1229 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1230 goto create_failed;
1231
1232 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1233
1234 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1235
1236 zcmd_free_nvlists(&zc);
1237 nvlist_free(zc_props);
1238 nvlist_free(zc_fsprops);
1239 nvlist_free(hidden_args);
1240 if (wkeydata != NULL)
1241 free(wkeydata);
1242
1243 switch (errno) {
1244 case EBUSY:
1245 /*
1246 * This can happen if the user has specified the same
1247 * device multiple times. We can't reliably detect this
1248 * until we try to add it and see we already have a
1249 * label. This can also happen under if the device is
1250 * part of an active md or lvm device.
1251 */
1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1253 "one or more vdevs refer to the same device, or "
1254 "one of\nthe devices is part of an active md or "
1255 "lvm device"));
1256 return (zfs_error(hdl, EZFS_BADDEV, msg));
1257
1258 case ERANGE:
1259 /*
1260 * This happens if the record size is smaller or larger
1261 * than the allowed size range, or not a power of 2.
1262 *
1263 * NOTE: although zfs_valid_proplist is called earlier,
1264 * this case may have slipped through since the
1265 * pool does not exist yet and it is therefore
1266 * impossible to read properties e.g. max blocksize
1267 * from the pool.
1268 */
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1270 "record size invalid"));
1271 return (zfs_error(hdl, EZFS_BADPROP, msg));
1272
1273 case EOVERFLOW:
1274 /*
1275 * This occurs when one of the devices is below
1276 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1277 * device was the problem device since there's no
1278 * reliable way to determine device size from userland.
1279 */
1280 {
1281 char buf[64];
1282
1283 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1284 sizeof (buf));
1285
1286 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1287 "one or more devices is less than the "
1288 "minimum size (%s)"), buf);
1289 }
1290 return (zfs_error(hdl, EZFS_BADDEV, msg));
1291
1292 case ENOSPC:
1293 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1294 "one or more devices is out of space"));
1295 return (zfs_error(hdl, EZFS_BADDEV, msg));
1296
1297 case ENOTBLK:
1298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1299 "cache device must be a disk or disk slice"));
1300 return (zfs_error(hdl, EZFS_BADDEV, msg));
1301
1302 default:
1303 return (zpool_standard_error(hdl, errno, msg));
1304 }
1305 }
1306
1307 create_failed:
1308 zcmd_free_nvlists(&zc);
1309 nvlist_free(zc_props);
1310 nvlist_free(zc_fsprops);
1311 nvlist_free(hidden_args);
1312 if (wkeydata != NULL)
1313 free(wkeydata);
1314 return (ret);
1315 }
1316
1317 /*
1318 * Destroy the given pool. It is up to the caller to ensure that there are no
1319 * datasets left in the pool.
1320 */
1321 int
1322 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1323 {
1324 zfs_cmd_t zc = {"\0"};
1325 zfs_handle_t *zfp = NULL;
1326 libzfs_handle_t *hdl = zhp->zpool_hdl;
1327 char msg[1024];
1328
1329 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1330 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1331 return (-1);
1332
1333 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1334 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1335
1336 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1337 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1338 "cannot destroy '%s'"), zhp->zpool_name);
1339
1340 if (errno == EROFS) {
1341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1342 "one or more devices is read only"));
1343 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1344 } else {
1345 (void) zpool_standard_error(hdl, errno, msg);
1346 }
1347
1348 if (zfp)
1349 zfs_close(zfp);
1350 return (-1);
1351 }
1352
1353 if (zfp) {
1354 remove_mountpoint(zfp);
1355 zfs_close(zfp);
1356 }
1357
1358 return (0);
1359 }
1360
1361 /*
1362 * Add the given vdevs to the pool. The caller must have already performed the
1363 * necessary verification to ensure that the vdev specification is well-formed.
1364 */
1365 int
1366 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1367 {
1368 zfs_cmd_t zc = {"\0"};
1369 int ret;
1370 libzfs_handle_t *hdl = zhp->zpool_hdl;
1371 char msg[1024];
1372 nvlist_t **spares, **l2cache;
1373 uint_t nspares, nl2cache;
1374
1375 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1376 "cannot add to '%s'"), zhp->zpool_name);
1377
1378 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1379 SPA_VERSION_SPARES &&
1380 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1381 &spares, &nspares) == 0) {
1382 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1383 "upgraded to add hot spares"));
1384 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1385 }
1386
1387 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1388 SPA_VERSION_L2CACHE &&
1389 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1390 &l2cache, &nl2cache) == 0) {
1391 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1392 "upgraded to add cache devices"));
1393 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1394 }
1395
1396 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1397 return (-1);
1398 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1399
1400 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1401 switch (errno) {
1402 case EBUSY:
1403 /*
1404 * This can happen if the user has specified the same
1405 * device multiple times. We can't reliably detect this
1406 * until we try to add it and see we already have a
1407 * label.
1408 */
1409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1410 "one or more vdevs refer to the same device"));
1411 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1412 break;
1413
1414 case EOVERFLOW:
1415 /*
1416 * This occurrs when one of the devices is below
1417 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1418 * device was the problem device since there's no
1419 * reliable way to determine device size from userland.
1420 */
1421 {
1422 char buf[64];
1423
1424 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1425 sizeof (buf));
1426
1427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1428 "device is less than the minimum "
1429 "size (%s)"), buf);
1430 }
1431 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1432 break;
1433
1434 case ENOTSUP:
1435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1436 "pool must be upgraded to add these vdevs"));
1437 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1438 break;
1439
1440 case ENOTBLK:
1441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1442 "cache device must be a disk or disk slice"));
1443 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1444 break;
1445
1446 default:
1447 (void) zpool_standard_error(hdl, errno, msg);
1448 }
1449
1450 ret = -1;
1451 } else {
1452 ret = 0;
1453 }
1454
1455 zcmd_free_nvlists(&zc);
1456
1457 return (ret);
1458 }
1459
1460 /*
1461 * Exports the pool from the system. The caller must ensure that there are no
1462 * mounted datasets in the pool.
1463 */
1464 static int
1465 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1466 const char *log_str)
1467 {
1468 zfs_cmd_t zc = {"\0"};
1469 char msg[1024];
1470
1471 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1472 "cannot export '%s'"), zhp->zpool_name);
1473
1474 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1475 zc.zc_cookie = force;
1476 zc.zc_guid = hardforce;
1477 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1478
1479 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1480 switch (errno) {
1481 case EXDEV:
1482 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1483 "use '-f' to override the following errors:\n"
1484 "'%s' has an active shared spare which could be"
1485 " used by other pools once '%s' is exported."),
1486 zhp->zpool_name, zhp->zpool_name);
1487 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1488 msg));
1489 default:
1490 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1491 msg));
1492 }
1493 }
1494
1495 return (0);
1496 }
1497
1498 int
1499 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1500 {
1501 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1502 }
1503
1504 int
1505 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1506 {
1507 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1508 }
1509
1510 static void
1511 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1512 nvlist_t *config)
1513 {
1514 nvlist_t *nv = NULL;
1515 uint64_t rewindto;
1516 int64_t loss = -1;
1517 struct tm t;
1518 char timestr[128];
1519
1520 if (!hdl->libzfs_printerr || config == NULL)
1521 return;
1522
1523 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1524 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1525 return;
1526 }
1527
1528 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1529 return;
1530 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1531
1532 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1533 strftime(timestr, 128, "%c", &t) != 0) {
1534 if (dryrun) {
1535 (void) printf(dgettext(TEXT_DOMAIN,
1536 "Would be able to return %s "
1537 "to its state as of %s.\n"),
1538 name, timestr);
1539 } else {
1540 (void) printf(dgettext(TEXT_DOMAIN,
1541 "Pool %s returned to its state as of %s.\n"),
1542 name, timestr);
1543 }
1544 if (loss > 120) {
1545 (void) printf(dgettext(TEXT_DOMAIN,
1546 "%s approximately %lld "),
1547 dryrun ? "Would discard" : "Discarded",
1548 ((longlong_t)loss + 30) / 60);
1549 (void) printf(dgettext(TEXT_DOMAIN,
1550 "minutes of transactions.\n"));
1551 } else if (loss > 0) {
1552 (void) printf(dgettext(TEXT_DOMAIN,
1553 "%s approximately %lld "),
1554 dryrun ? "Would discard" : "Discarded",
1555 (longlong_t)loss);
1556 (void) printf(dgettext(TEXT_DOMAIN,
1557 "seconds of transactions.\n"));
1558 }
1559 }
1560 }
1561
1562 void
1563 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1564 nvlist_t *config)
1565 {
1566 nvlist_t *nv = NULL;
1567 int64_t loss = -1;
1568 uint64_t edata = UINT64_MAX;
1569 uint64_t rewindto;
1570 struct tm t;
1571 char timestr[128];
1572
1573 if (!hdl->libzfs_printerr)
1574 return;
1575
1576 if (reason >= 0)
1577 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1578 else
1579 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1580
1581 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1582 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1583 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1584 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1585 goto no_info;
1586
1587 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1588 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1589 &edata);
1590
1591 (void) printf(dgettext(TEXT_DOMAIN,
1592 "Recovery is possible, but will result in some data loss.\n"));
1593
1594 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1595 strftime(timestr, 128, "%c", &t) != 0) {
1596 (void) printf(dgettext(TEXT_DOMAIN,
1597 "\tReturning the pool to its state as of %s\n"
1598 "\tshould correct the problem. "),
1599 timestr);
1600 } else {
1601 (void) printf(dgettext(TEXT_DOMAIN,
1602 "\tReverting the pool to an earlier state "
1603 "should correct the problem.\n\t"));
1604 }
1605
1606 if (loss > 120) {
1607 (void) printf(dgettext(TEXT_DOMAIN,
1608 "Approximately %lld minutes of data\n"
1609 "\tmust be discarded, irreversibly. "),
1610 ((longlong_t)loss + 30) / 60);
1611 } else if (loss > 0) {
1612 (void) printf(dgettext(TEXT_DOMAIN,
1613 "Approximately %lld seconds of data\n"
1614 "\tmust be discarded, irreversibly. "),
1615 (longlong_t)loss);
1616 }
1617 if (edata != 0 && edata != UINT64_MAX) {
1618 if (edata == 1) {
1619 (void) printf(dgettext(TEXT_DOMAIN,
1620 "After rewind, at least\n"
1621 "\tone persistent user-data error will remain. "));
1622 } else {
1623 (void) printf(dgettext(TEXT_DOMAIN,
1624 "After rewind, several\n"
1625 "\tpersistent user-data errors will remain. "));
1626 }
1627 }
1628 (void) printf(dgettext(TEXT_DOMAIN,
1629 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1630 reason >= 0 ? "clear" : "import", name);
1631
1632 (void) printf(dgettext(TEXT_DOMAIN,
1633 "A scrub of the pool\n"
1634 "\tis strongly recommended after recovery.\n"));
1635 return;
1636
1637 no_info:
1638 (void) printf(dgettext(TEXT_DOMAIN,
1639 "Destroy and re-create the pool from\n\ta backup source.\n"));
1640 }
1641
1642 /*
1643 * zpool_import() is a contracted interface. Should be kept the same
1644 * if possible.
1645 *
1646 * Applications should use zpool_import_props() to import a pool with
1647 * new properties value to be set.
1648 */
1649 int
1650 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1651 char *altroot)
1652 {
1653 nvlist_t *props = NULL;
1654 int ret;
1655
1656 if (altroot != NULL) {
1657 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1658 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1659 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1660 newname));
1661 }
1662
1663 if (nvlist_add_string(props,
1664 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1665 nvlist_add_string(props,
1666 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1667 nvlist_free(props);
1668 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1669 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1670 newname));
1671 }
1672 }
1673
1674 ret = zpool_import_props(hdl, config, newname, props,
1675 ZFS_IMPORT_NORMAL);
1676 nvlist_free(props);
1677 return (ret);
1678 }
1679
1680 static void
1681 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1682 int indent)
1683 {
1684 nvlist_t **child;
1685 uint_t c, children;
1686 char *vname;
1687 uint64_t is_log = 0;
1688
1689 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1690 &is_log);
1691
1692 if (name != NULL)
1693 (void) printf("\t%*s%s%s\n", indent, "", name,
1694 is_log ? " [log]" : "");
1695
1696 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1697 &child, &children) != 0)
1698 return;
1699
1700 for (c = 0; c < children; c++) {
1701 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1702 print_vdev_tree(hdl, vname, child[c], indent + 2);
1703 free(vname);
1704 }
1705 }
1706
1707 void
1708 zpool_print_unsup_feat(nvlist_t *config)
1709 {
1710 nvlist_t *nvinfo, *unsup_feat;
1711 nvpair_t *nvp;
1712
1713 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1714 0);
1715 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1716 &unsup_feat) == 0);
1717
1718 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1719 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1720 char *desc;
1721
1722 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1723 verify(nvpair_value_string(nvp, &desc) == 0);
1724
1725 if (strlen(desc) > 0)
1726 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1727 else
1728 (void) printf("\t%s\n", nvpair_name(nvp));
1729 }
1730 }
1731
1732 /*
1733 * Import the given pool using the known configuration and a list of
1734 * properties to be set. The configuration should have come from
1735 * zpool_find_import(). The 'newname' parameters control whether the pool
1736 * is imported with a different name.
1737 */
1738 int
1739 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1740 nvlist_t *props, int flags)
1741 {
1742 zfs_cmd_t zc = {"\0"};
1743 zpool_rewind_policy_t policy;
1744 nvlist_t *nv = NULL;
1745 nvlist_t *nvinfo = NULL;
1746 nvlist_t *missing = NULL;
1747 char *thename;
1748 char *origname;
1749 int ret;
1750 int error = 0;
1751 char errbuf[1024];
1752
1753 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1754 &origname) == 0);
1755
1756 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1757 "cannot import pool '%s'"), origname);
1758
1759 if (newname != NULL) {
1760 if (!zpool_name_valid(hdl, B_FALSE, newname))
1761 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1762 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1763 newname));
1764 thename = (char *)newname;
1765 } else {
1766 thename = origname;
1767 }
1768
1769 if (props != NULL) {
1770 uint64_t version;
1771 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1772
1773 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1774 &version) == 0);
1775
1776 if ((props = zpool_valid_proplist(hdl, origname,
1777 props, version, flags, errbuf)) == NULL)
1778 return (-1);
1779 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1780 nvlist_free(props);
1781 return (-1);
1782 }
1783 nvlist_free(props);
1784 }
1785
1786 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1787
1788 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1789 &zc.zc_guid) == 0);
1790
1791 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1792 zcmd_free_nvlists(&zc);
1793 return (-1);
1794 }
1795 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1796 zcmd_free_nvlists(&zc);
1797 return (-1);
1798 }
1799
1800 zc.zc_cookie = flags;
1801 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1802 errno == ENOMEM) {
1803 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1804 zcmd_free_nvlists(&zc);
1805 return (-1);
1806 }
1807 }
1808 if (ret != 0)
1809 error = errno;
1810
1811 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1812
1813 zcmd_free_nvlists(&zc);
1814
1815 zpool_get_rewind_policy(config, &policy);
1816
1817 if (error) {
1818 char desc[1024];
1819 char aux[256];
1820
1821 /*
1822 * Dry-run failed, but we print out what success
1823 * looks like if we found a best txg
1824 */
1825 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1826 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1827 B_TRUE, nv);
1828 nvlist_free(nv);
1829 return (-1);
1830 }
1831
1832 if (newname == NULL)
1833 (void) snprintf(desc, sizeof (desc),
1834 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1835 thename);
1836 else
1837 (void) snprintf(desc, sizeof (desc),
1838 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1839 origname, thename);
1840
1841 switch (error) {
1842 case ENOTSUP:
1843 if (nv != NULL && nvlist_lookup_nvlist(nv,
1844 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1845 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1846 (void) printf(dgettext(TEXT_DOMAIN, "This "
1847 "pool uses the following feature(s) not "
1848 "supported by this system:\n"));
1849 zpool_print_unsup_feat(nv);
1850 if (nvlist_exists(nvinfo,
1851 ZPOOL_CONFIG_CAN_RDONLY)) {
1852 (void) printf(dgettext(TEXT_DOMAIN,
1853 "All unsupported features are only "
1854 "required for writing to the pool."
1855 "\nThe pool can be imported using "
1856 "'-o readonly=on'.\n"));
1857 }
1858 }
1859 /*
1860 * Unsupported version.
1861 */
1862 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1863 break;
1864
1865 case EREMOTEIO:
1866 if (nv != NULL && nvlist_lookup_nvlist(nv,
1867 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1868 char *hostname = "<unknown>";
1869 uint64_t hostid = 0;
1870 mmp_state_t mmp_state;
1871
1872 mmp_state = fnvlist_lookup_uint64(nvinfo,
1873 ZPOOL_CONFIG_MMP_STATE);
1874
1875 if (nvlist_exists(nvinfo,
1876 ZPOOL_CONFIG_MMP_HOSTNAME))
1877 hostname = fnvlist_lookup_string(nvinfo,
1878 ZPOOL_CONFIG_MMP_HOSTNAME);
1879
1880 if (nvlist_exists(nvinfo,
1881 ZPOOL_CONFIG_MMP_HOSTID))
1882 hostid = fnvlist_lookup_uint64(nvinfo,
1883 ZPOOL_CONFIG_MMP_HOSTID);
1884
1885 if (mmp_state == MMP_STATE_ACTIVE) {
1886 (void) snprintf(aux, sizeof (aux),
1887 dgettext(TEXT_DOMAIN, "pool is imp"
1888 "orted on host '%s' (hostid=%lx).\n"
1889 "Export the pool on the other "
1890 "system, then run 'zpool import'."),
1891 hostname, (unsigned long) hostid);
1892 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1893 (void) snprintf(aux, sizeof (aux),
1894 dgettext(TEXT_DOMAIN, "pool has "
1895 "the multihost property on and "
1896 "the\nsystem's hostid is not set. "
1897 "Set a unique system hostid with "
1898 "the zgenhostid(8) command.\n"));
1899 }
1900
1901 (void) zfs_error_aux(hdl, aux);
1902 }
1903 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1904 break;
1905
1906 case EINVAL:
1907 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1908 break;
1909
1910 case EROFS:
1911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1912 "one or more devices is read only"));
1913 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1914 break;
1915
1916 case ENXIO:
1917 if (nv && nvlist_lookup_nvlist(nv,
1918 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1919 nvlist_lookup_nvlist(nvinfo,
1920 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1921 (void) printf(dgettext(TEXT_DOMAIN,
1922 "The devices below are missing, use "
1923 "'-m' to import the pool anyway:\n"));
1924 print_vdev_tree(hdl, NULL, missing, 2);
1925 (void) printf("\n");
1926 }
1927 (void) zpool_standard_error(hdl, error, desc);
1928 break;
1929
1930 case EEXIST:
1931 (void) zpool_standard_error(hdl, error, desc);
1932 break;
1933
1934 case EBUSY:
1935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1936 "one or more devices are already in use\n"));
1937 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1938 break;
1939 case ENAMETOOLONG:
1940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1941 "new name of at least one dataset is longer than "
1942 "the maximum allowable length"));
1943 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1944 break;
1945 default:
1946 (void) zpool_standard_error(hdl, error, desc);
1947 zpool_explain_recover(hdl,
1948 newname ? origname : thename, -error, nv);
1949 break;
1950 }
1951
1952 nvlist_free(nv);
1953 ret = -1;
1954 } else {
1955 zpool_handle_t *zhp;
1956
1957 /*
1958 * This should never fail, but play it safe anyway.
1959 */
1960 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1961 ret = -1;
1962 else if (zhp != NULL)
1963 zpool_close(zhp);
1964 if (policy.zrp_request &
1965 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1966 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1967 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1968 }
1969 nvlist_free(nv);
1970 return (0);
1971 }
1972
1973 return (ret);
1974 }
1975
1976 /*
1977 * Scan the pool.
1978 */
1979 int
1980 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
1981 {
1982 zfs_cmd_t zc = {"\0"};
1983 char msg[1024];
1984 int err;
1985 libzfs_handle_t *hdl = zhp->zpool_hdl;
1986
1987 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1988 zc.zc_cookie = func;
1989 zc.zc_flags = cmd;
1990
1991 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
1992 return (0);
1993
1994 err = errno;
1995
1996 /* ECANCELED on a scrub means we resumed a paused scrub */
1997 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
1998 cmd == POOL_SCRUB_NORMAL)
1999 return (0);
2000
2001 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2002 return (0);
2003
2004 if (func == POOL_SCAN_SCRUB) {
2005 if (cmd == POOL_SCRUB_PAUSE) {
2006 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2007 "cannot pause scrubbing %s"), zc.zc_name);
2008 } else {
2009 assert(cmd == POOL_SCRUB_NORMAL);
2010 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2011 "cannot scrub %s"), zc.zc_name);
2012 }
2013 } else if (func == POOL_SCAN_NONE) {
2014 (void) snprintf(msg, sizeof (msg),
2015 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2016 zc.zc_name);
2017 } else {
2018 assert(!"unexpected result");
2019 }
2020
2021 if (err == EBUSY) {
2022 nvlist_t *nvroot;
2023 pool_scan_stat_t *ps = NULL;
2024 uint_t psc;
2025
2026 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2027 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2028 (void) nvlist_lookup_uint64_array(nvroot,
2029 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2030 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2031 if (cmd == POOL_SCRUB_PAUSE)
2032 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2033 else
2034 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2035 } else {
2036 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2037 }
2038 } else if (err == ENOENT) {
2039 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2040 } else {
2041 return (zpool_standard_error(hdl, err, msg));
2042 }
2043 }
2044
2045 /*
2046 * Find a vdev that matches the search criteria specified. We use the
2047 * the nvpair name to determine how we should look for the device.
2048 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2049 * spare; but FALSE if its an INUSE spare.
2050 */
2051 static nvlist_t *
2052 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2053 boolean_t *l2cache, boolean_t *log)
2054 {
2055 uint_t c, children;
2056 nvlist_t **child;
2057 nvlist_t *ret;
2058 uint64_t is_log;
2059 char *srchkey;
2060 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2061
2062 /* Nothing to look for */
2063 if (search == NULL || pair == NULL)
2064 return (NULL);
2065
2066 /* Obtain the key we will use to search */
2067 srchkey = nvpair_name(pair);
2068
2069 switch (nvpair_type(pair)) {
2070 case DATA_TYPE_UINT64:
2071 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2072 uint64_t srchval, theguid;
2073
2074 verify(nvpair_value_uint64(pair, &srchval) == 0);
2075 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2076 &theguid) == 0);
2077 if (theguid == srchval)
2078 return (nv);
2079 }
2080 break;
2081
2082 case DATA_TYPE_STRING: {
2083 char *srchval, *val;
2084
2085 verify(nvpair_value_string(pair, &srchval) == 0);
2086 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2087 break;
2088
2089 /*
2090 * Search for the requested value. Special cases:
2091 *
2092 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2093 * "-part1", or "p1". The suffix is hidden from the user,
2094 * but included in the string, so this matches around it.
2095 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2096 * is used to check all possible expanded paths.
2097 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2098 *
2099 * Otherwise, all other searches are simple string compares.
2100 */
2101 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2102 uint64_t wholedisk = 0;
2103
2104 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2105 &wholedisk);
2106 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2107 return (nv);
2108
2109 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2110 char *type, *idx, *end, *p;
2111 uint64_t id, vdev_id;
2112
2113 /*
2114 * Determine our vdev type, keeping in mind
2115 * that the srchval is composed of a type and
2116 * vdev id pair (i.e. mirror-4).
2117 */
2118 if ((type = strdup(srchval)) == NULL)
2119 return (NULL);
2120
2121 if ((p = strrchr(type, '-')) == NULL) {
2122 free(type);
2123 break;
2124 }
2125 idx = p + 1;
2126 *p = '\0';
2127
2128 /*
2129 * If the types don't match then keep looking.
2130 */
2131 if (strncmp(val, type, strlen(val)) != 0) {
2132 free(type);
2133 break;
2134 }
2135
2136 verify(zpool_vdev_is_interior(type));
2137 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2138 &id) == 0);
2139
2140 errno = 0;
2141 vdev_id = strtoull(idx, &end, 10);
2142
2143 free(type);
2144 if (errno != 0)
2145 return (NULL);
2146
2147 /*
2148 * Now verify that we have the correct vdev id.
2149 */
2150 if (vdev_id == id)
2151 return (nv);
2152 }
2153
2154 /*
2155 * Common case
2156 */
2157 if (strcmp(srchval, val) == 0)
2158 return (nv);
2159 break;
2160 }
2161
2162 default:
2163 break;
2164 }
2165
2166 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2167 &child, &children) != 0)
2168 return (NULL);
2169
2170 for (c = 0; c < children; c++) {
2171 if ((ret = vdev_to_nvlist_iter(child[c], search,
2172 avail_spare, l2cache, NULL)) != NULL) {
2173 /*
2174 * The 'is_log' value is only set for the toplevel
2175 * vdev, not the leaf vdevs. So we always lookup the
2176 * log device from the root of the vdev tree (where
2177 * 'log' is non-NULL).
2178 */
2179 if (log != NULL &&
2180 nvlist_lookup_uint64(child[c],
2181 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2182 is_log) {
2183 *log = B_TRUE;
2184 }
2185 return (ret);
2186 }
2187 }
2188
2189 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2190 &child, &children) == 0) {
2191 for (c = 0; c < children; c++) {
2192 if ((ret = vdev_to_nvlist_iter(child[c], search,
2193 avail_spare, l2cache, NULL)) != NULL) {
2194 *avail_spare = B_TRUE;
2195 return (ret);
2196 }
2197 }
2198 }
2199
2200 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2201 &child, &children) == 0) {
2202 for (c = 0; c < children; c++) {
2203 if ((ret = vdev_to_nvlist_iter(child[c], search,
2204 avail_spare, l2cache, NULL)) != NULL) {
2205 *l2cache = B_TRUE;
2206 return (ret);
2207 }
2208 }
2209 }
2210
2211 return (NULL);
2212 }
2213
2214 /*
2215 * Given a physical path (minus the "/devices" prefix), find the
2216 * associated vdev.
2217 */
2218 nvlist_t *
2219 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2220 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2221 {
2222 nvlist_t *search, *nvroot, *ret;
2223
2224 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2225 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2226
2227 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2228 &nvroot) == 0);
2229
2230 *avail_spare = B_FALSE;
2231 *l2cache = B_FALSE;
2232 if (log != NULL)
2233 *log = B_FALSE;
2234 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2235 nvlist_free(search);
2236
2237 return (ret);
2238 }
2239
2240 /*
2241 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2242 */
2243 static boolean_t
2244 zpool_vdev_is_interior(const char *name)
2245 {
2246 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2247 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2248 strncmp(name,
2249 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2250 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2251 return (B_TRUE);
2252 return (B_FALSE);
2253 }
2254
2255 nvlist_t *
2256 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2257 boolean_t *l2cache, boolean_t *log)
2258 {
2259 char *end;
2260 nvlist_t *nvroot, *search, *ret;
2261 uint64_t guid;
2262
2263 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2264
2265 guid = strtoull(path, &end, 0);
2266 if (guid != 0 && *end == '\0') {
2267 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2268 } else if (zpool_vdev_is_interior(path)) {
2269 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2270 } else {
2271 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2272 }
2273
2274 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2275 &nvroot) == 0);
2276
2277 *avail_spare = B_FALSE;
2278 *l2cache = B_FALSE;
2279 if (log != NULL)
2280 *log = B_FALSE;
2281 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2282 nvlist_free(search);
2283
2284 return (ret);
2285 }
2286
2287 static int
2288 vdev_is_online(nvlist_t *nv)
2289 {
2290 uint64_t ival;
2291
2292 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2293 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2294 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2295 return (0);
2296
2297 return (1);
2298 }
2299
2300 /*
2301 * Helper function for zpool_get_physpaths().
2302 */
2303 static int
2304 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2305 size_t *bytes_written)
2306 {
2307 size_t bytes_left, pos, rsz;
2308 char *tmppath;
2309 const char *format;
2310
2311 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2312 &tmppath) != 0)
2313 return (EZFS_NODEVICE);
2314
2315 pos = *bytes_written;
2316 bytes_left = physpath_size - pos;
2317 format = (pos == 0) ? "%s" : " %s";
2318
2319 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2320 *bytes_written += rsz;
2321
2322 if (rsz >= bytes_left) {
2323 /* if physpath was not copied properly, clear it */
2324 if (bytes_left != 0) {
2325 physpath[pos] = 0;
2326 }
2327 return (EZFS_NOSPC);
2328 }
2329 return (0);
2330 }
2331
2332 static int
2333 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2334 size_t *rsz, boolean_t is_spare)
2335 {
2336 char *type;
2337 int ret;
2338
2339 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2340 return (EZFS_INVALCONFIG);
2341
2342 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2343 /*
2344 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2345 * For a spare vdev, we only want to boot from the active
2346 * spare device.
2347 */
2348 if (is_spare) {
2349 uint64_t spare = 0;
2350 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2351 &spare);
2352 if (!spare)
2353 return (EZFS_INVALCONFIG);
2354 }
2355
2356 if (vdev_is_online(nv)) {
2357 if ((ret = vdev_get_one_physpath(nv, physpath,
2358 phypath_size, rsz)) != 0)
2359 return (ret);
2360 }
2361 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2362 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2363 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2364 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2365 nvlist_t **child;
2366 uint_t count;
2367 int i, ret;
2368
2369 if (nvlist_lookup_nvlist_array(nv,
2370 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2371 return (EZFS_INVALCONFIG);
2372
2373 for (i = 0; i < count; i++) {
2374 ret = vdev_get_physpaths(child[i], physpath,
2375 phypath_size, rsz, is_spare);
2376 if (ret == EZFS_NOSPC)
2377 return (ret);
2378 }
2379 }
2380
2381 return (EZFS_POOL_INVALARG);
2382 }
2383
2384 /*
2385 * Get phys_path for a root pool config.
2386 * Return 0 on success; non-zero on failure.
2387 */
2388 static int
2389 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2390 {
2391 size_t rsz;
2392 nvlist_t *vdev_root;
2393 nvlist_t **child;
2394 uint_t count;
2395 char *type;
2396
2397 rsz = 0;
2398
2399 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2400 &vdev_root) != 0)
2401 return (EZFS_INVALCONFIG);
2402
2403 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2404 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2405 &child, &count) != 0)
2406 return (EZFS_INVALCONFIG);
2407
2408 /*
2409 * root pool can only have a single top-level vdev.
2410 */
2411 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2412 return (EZFS_POOL_INVALARG);
2413
2414 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2415 B_FALSE);
2416
2417 /* No online devices */
2418 if (rsz == 0)
2419 return (EZFS_NODEVICE);
2420
2421 return (0);
2422 }
2423
2424 /*
2425 * Get phys_path for a root pool
2426 * Return 0 on success; non-zero on failure.
2427 */
2428 int
2429 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2430 {
2431 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2432 phypath_size));
2433 }
2434
2435 /*
2436 * If the device has being dynamically expanded then we need to relabel
2437 * the disk to use the new unallocated space.
2438 */
2439 static int
2440 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2441 {
2442 int fd, error;
2443
2444 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2446 "relabel '%s': unable to open device: %d"), path, errno);
2447 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2448 }
2449
2450 /*
2451 * It's possible that we might encounter an error if the device
2452 * does not have any unallocated space left. If so, we simply
2453 * ignore that error and continue on.
2454 *
2455 * Also, we don't call efi_rescan() - that would just return EBUSY.
2456 * The module will do it for us in vdev_disk_open().
2457 */
2458 error = efi_use_whole_disk(fd);
2459
2460 /* Flush the buffers to disk and invalidate the page cache. */
2461 (void) fsync(fd);
2462 (void) ioctl(fd, BLKFLSBUF);
2463
2464 (void) close(fd);
2465 if (error && error != VT_ENOSPC) {
2466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2467 "relabel '%s': unable to read disk capacity"), path);
2468 return (zfs_error(hdl, EZFS_NOCAP, msg));
2469 }
2470
2471 return (0);
2472 }
2473
2474 /*
2475 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2476 *
2477 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2478 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2479 * ignore them.
2480 */
2481 static uint64_t
2482 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2483 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2484 {
2485 uint64_t guid;
2486 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2487 nvlist_t *tgt;
2488
2489 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2490 &log)) == NULL)
2491 return (0);
2492
2493 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2494 if (is_spare != NULL)
2495 *is_spare = spare;
2496 if (is_l2cache != NULL)
2497 *is_l2cache = l2cache;
2498 if (is_log != NULL)
2499 *is_log = log;
2500
2501 return (guid);
2502 }
2503
2504 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2505 uint64_t
2506 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2507 {
2508 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2509 }
2510
2511 /*
2512 * Bring the specified vdev online. The 'flags' parameter is a set of the
2513 * ZFS_ONLINE_* flags.
2514 */
2515 int
2516 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2517 vdev_state_t *newstate)
2518 {
2519 zfs_cmd_t zc = {"\0"};
2520 char msg[1024];
2521 char *pathname;
2522 nvlist_t *tgt;
2523 boolean_t avail_spare, l2cache, islog;
2524 libzfs_handle_t *hdl = zhp->zpool_hdl;
2525 int error;
2526
2527 if (flags & ZFS_ONLINE_EXPAND) {
2528 (void) snprintf(msg, sizeof (msg),
2529 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2530 } else {
2531 (void) snprintf(msg, sizeof (msg),
2532 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2533 }
2534
2535 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2536 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2537 &islog)) == NULL)
2538 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2539
2540 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2541
2542 if (avail_spare)
2543 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2544
2545 if ((flags & ZFS_ONLINE_EXPAND ||
2546 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2547 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2548 uint64_t wholedisk = 0;
2549
2550 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2551 &wholedisk);
2552
2553 /*
2554 * XXX - L2ARC 1.0 devices can't support expansion.
2555 */
2556 if (l2cache) {
2557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2558 "cannot expand cache devices"));
2559 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2560 }
2561
2562 if (wholedisk) {
2563 const char *fullpath = path;
2564 char buf[MAXPATHLEN];
2565
2566 if (path[0] != '/') {
2567 error = zfs_resolve_shortname(path, buf,
2568 sizeof (buf));
2569 if (error != 0)
2570 return (zfs_error(hdl, EZFS_NODEVICE,
2571 msg));
2572
2573 fullpath = buf;
2574 }
2575
2576 error = zpool_relabel_disk(hdl, fullpath, msg);
2577 if (error != 0)
2578 return (error);
2579 }
2580 }
2581
2582 zc.zc_cookie = VDEV_STATE_ONLINE;
2583 zc.zc_obj = flags;
2584
2585 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2586 if (errno == EINVAL) {
2587 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2588 "from this pool into a new one. Use '%s' "
2589 "instead"), "zpool detach");
2590 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2591 }
2592 return (zpool_standard_error(hdl, errno, msg));
2593 }
2594
2595 *newstate = zc.zc_cookie;
2596 return (0);
2597 }
2598
2599 /*
2600 * Take the specified vdev offline
2601 */
2602 int
2603 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2604 {
2605 zfs_cmd_t zc = {"\0"};
2606 char msg[1024];
2607 nvlist_t *tgt;
2608 boolean_t avail_spare, l2cache;
2609 libzfs_handle_t *hdl = zhp->zpool_hdl;
2610
2611 (void) snprintf(msg, sizeof (msg),
2612 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2613
2614 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2615 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2616 NULL)) == NULL)
2617 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2618
2619 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2620
2621 if (avail_spare)
2622 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2623
2624 zc.zc_cookie = VDEV_STATE_OFFLINE;
2625 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2626
2627 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2628 return (0);
2629
2630 switch (errno) {
2631 case EBUSY:
2632
2633 /*
2634 * There are no other replicas of this device.
2635 */
2636 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2637
2638 case EEXIST:
2639 /*
2640 * The log device has unplayed logs
2641 */
2642 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2643
2644 default:
2645 return (zpool_standard_error(hdl, errno, msg));
2646 }
2647 }
2648
2649 /*
2650 * Mark the given vdev faulted.
2651 */
2652 int
2653 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2654 {
2655 zfs_cmd_t zc = {"\0"};
2656 char msg[1024];
2657 libzfs_handle_t *hdl = zhp->zpool_hdl;
2658
2659 (void) snprintf(msg, sizeof (msg),
2660 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2661
2662 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2663 zc.zc_guid = guid;
2664 zc.zc_cookie = VDEV_STATE_FAULTED;
2665 zc.zc_obj = aux;
2666
2667 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2668 return (0);
2669
2670 switch (errno) {
2671 case EBUSY:
2672
2673 /*
2674 * There are no other replicas of this device.
2675 */
2676 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2677
2678 default:
2679 return (zpool_standard_error(hdl, errno, msg));
2680 }
2681
2682 }
2683
2684 /*
2685 * Mark the given vdev degraded.
2686 */
2687 int
2688 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2689 {
2690 zfs_cmd_t zc = {"\0"};
2691 char msg[1024];
2692 libzfs_handle_t *hdl = zhp->zpool_hdl;
2693
2694 (void) snprintf(msg, sizeof (msg),
2695 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2696
2697 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2698 zc.zc_guid = guid;
2699 zc.zc_cookie = VDEV_STATE_DEGRADED;
2700 zc.zc_obj = aux;
2701
2702 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2703 return (0);
2704
2705 return (zpool_standard_error(hdl, errno, msg));
2706 }
2707
2708 /*
2709 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2710 * a hot spare.
2711 */
2712 static boolean_t
2713 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2714 {
2715 nvlist_t **child;
2716 uint_t c, children;
2717 char *type;
2718
2719 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2720 &children) == 0) {
2721 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2722 &type) == 0);
2723
2724 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2725 children == 2 && child[which] == tgt)
2726 return (B_TRUE);
2727
2728 for (c = 0; c < children; c++)
2729 if (is_replacing_spare(child[c], tgt, which))
2730 return (B_TRUE);
2731 }
2732
2733 return (B_FALSE);
2734 }
2735
2736 /*
2737 * Attach new_disk (fully described by nvroot) to old_disk.
2738 * If 'replacing' is specified, the new disk will replace the old one.
2739 */
2740 int
2741 zpool_vdev_attach(zpool_handle_t *zhp,
2742 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2743 {
2744 zfs_cmd_t zc = {"\0"};
2745 char msg[1024];
2746 int ret;
2747 nvlist_t *tgt;
2748 boolean_t avail_spare, l2cache, islog;
2749 uint64_t val;
2750 char *newname;
2751 nvlist_t **child;
2752 uint_t children;
2753 nvlist_t *config_root;
2754 libzfs_handle_t *hdl = zhp->zpool_hdl;
2755 boolean_t rootpool = zpool_is_bootable(zhp);
2756
2757 if (replacing)
2758 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2759 "cannot replace %s with %s"), old_disk, new_disk);
2760 else
2761 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2762 "cannot attach %s to %s"), new_disk, old_disk);
2763
2764 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2765 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2766 &islog)) == 0)
2767 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2768
2769 if (avail_spare)
2770 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2771
2772 if (l2cache)
2773 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2774
2775 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2776 zc.zc_cookie = replacing;
2777
2778 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2779 &child, &children) != 0 || children != 1) {
2780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2781 "new device must be a single disk"));
2782 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2783 }
2784
2785 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2786 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2787
2788 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2789 return (-1);
2790
2791 /*
2792 * If the target is a hot spare that has been swapped in, we can only
2793 * replace it with another hot spare.
2794 */
2795 if (replacing &&
2796 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2797 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2798 NULL) == NULL || !avail_spare) &&
2799 is_replacing_spare(config_root, tgt, 1)) {
2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2801 "can only be replaced by another hot spare"));
2802 free(newname);
2803 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2804 }
2805
2806 free(newname);
2807
2808 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2809 return (-1);
2810
2811 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2812
2813 zcmd_free_nvlists(&zc);
2814
2815 if (ret == 0) {
2816 if (rootpool) {
2817 /*
2818 * XXX need a better way to prevent user from
2819 * booting up a half-baked vdev.
2820 */
2821 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2822 "sure to wait until resilver is done "
2823 "before rebooting.\n"));
2824 }
2825 return (0);
2826 }
2827
2828 switch (errno) {
2829 case ENOTSUP:
2830 /*
2831 * Can't attach to or replace this type of vdev.
2832 */
2833 if (replacing) {
2834 uint64_t version = zpool_get_prop_int(zhp,
2835 ZPOOL_PROP_VERSION, NULL);
2836
2837 if (islog)
2838 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2839 "cannot replace a log with a spare"));
2840 else if (version >= SPA_VERSION_MULTI_REPLACE)
2841 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2842 "already in replacing/spare config; wait "
2843 "for completion or use 'zpool detach'"));
2844 else
2845 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2846 "cannot replace a replacing device"));
2847 } else {
2848 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2849 "can only attach to mirrors and top-level "
2850 "disks"));
2851 }
2852 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2853 break;
2854
2855 case EINVAL:
2856 /*
2857 * The new device must be a single disk.
2858 */
2859 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2860 "new device must be a single disk"));
2861 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2862 break;
2863
2864 case EBUSY:
2865 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2866 new_disk);
2867 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2868 break;
2869
2870 case EOVERFLOW:
2871 /*
2872 * The new device is too small.
2873 */
2874 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2875 "device is too small"));
2876 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2877 break;
2878
2879 case EDOM:
2880 /*
2881 * The new device has a different optimal sector size.
2882 */
2883 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2884 "new device has a different optimal sector size; use the "
2885 "option '-o ashift=N' to override the optimal size"));
2886 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2887 break;
2888
2889 case ENAMETOOLONG:
2890 /*
2891 * The resulting top-level vdev spec won't fit in the label.
2892 */
2893 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2894 break;
2895
2896 default:
2897 (void) zpool_standard_error(hdl, errno, msg);
2898 }
2899
2900 return (-1);
2901 }
2902
2903 /*
2904 * Detach the specified device.
2905 */
2906 int
2907 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2908 {
2909 zfs_cmd_t zc = {"\0"};
2910 char msg[1024];
2911 nvlist_t *tgt;
2912 boolean_t avail_spare, l2cache;
2913 libzfs_handle_t *hdl = zhp->zpool_hdl;
2914
2915 (void) snprintf(msg, sizeof (msg),
2916 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2917
2918 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2919 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2920 NULL)) == 0)
2921 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2922
2923 if (avail_spare)
2924 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2925
2926 if (l2cache)
2927 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2928
2929 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2930
2931 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2932 return (0);
2933
2934 switch (errno) {
2935
2936 case ENOTSUP:
2937 /*
2938 * Can't detach from this type of vdev.
2939 */
2940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2941 "applicable to mirror and replacing vdevs"));
2942 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2943 break;
2944
2945 case EBUSY:
2946 /*
2947 * There are no other replicas of this device.
2948 */
2949 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2950 break;
2951
2952 default:
2953 (void) zpool_standard_error(hdl, errno, msg);
2954 }
2955
2956 return (-1);
2957 }
2958
2959 /*
2960 * Find a mirror vdev in the source nvlist.
2961 *
2962 * The mchild array contains a list of disks in one of the top-level mirrors
2963 * of the source pool. The schild array contains a list of disks that the
2964 * user specified on the command line. We loop over the mchild array to
2965 * see if any entry in the schild array matches.
2966 *
2967 * If a disk in the mchild array is found in the schild array, we return
2968 * the index of that entry. Otherwise we return -1.
2969 */
2970 static int
2971 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2972 nvlist_t **schild, uint_t schildren)
2973 {
2974 uint_t mc;
2975
2976 for (mc = 0; mc < mchildren; mc++) {
2977 uint_t sc;
2978 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2979 mchild[mc], 0);
2980
2981 for (sc = 0; sc < schildren; sc++) {
2982 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2983 schild[sc], 0);
2984 boolean_t result = (strcmp(mpath, spath) == 0);
2985
2986 free(spath);
2987 if (result) {
2988 free(mpath);
2989 return (mc);
2990 }
2991 }
2992
2993 free(mpath);
2994 }
2995
2996 return (-1);
2997 }
2998
2999 /*
3000 * Split a mirror pool. If newroot points to null, then a new nvlist
3001 * is generated and it is the responsibility of the caller to free it.
3002 */
3003 int
3004 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3005 nvlist_t *props, splitflags_t flags)
3006 {
3007 zfs_cmd_t zc = {"\0"};
3008 char msg[1024];
3009 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3010 nvlist_t **varray = NULL, *zc_props = NULL;
3011 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3012 libzfs_handle_t *hdl = zhp->zpool_hdl;
3013 uint64_t vers;
3014 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3015 int retval = 0;
3016
3017 (void) snprintf(msg, sizeof (msg),
3018 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3019
3020 if (!zpool_name_valid(hdl, B_FALSE, newname))
3021 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3022
3023 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3024 (void) fprintf(stderr, gettext("Internal error: unable to "
3025 "retrieve pool configuration\n"));
3026 return (-1);
3027 }
3028
3029 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3030 == 0);
3031 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3032
3033 if (props) {
3034 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3035 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3036 props, vers, flags, msg)) == NULL)
3037 return (-1);
3038 }
3039
3040 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3041 &children) != 0) {
3042 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3043 "Source pool is missing vdev tree"));
3044 nvlist_free(zc_props);
3045 return (-1);
3046 }
3047
3048 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3049 vcount = 0;
3050
3051 if (*newroot == NULL ||
3052 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3053 &newchild, &newchildren) != 0)
3054 newchildren = 0;
3055
3056 for (c = 0; c < children; c++) {
3057 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3058 char *type;
3059 nvlist_t **mchild, *vdev;
3060 uint_t mchildren;
3061 int entry;
3062
3063 /*
3064 * Unlike cache & spares, slogs are stored in the
3065 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3066 */
3067 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3068 &is_log);
3069 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3070 &is_hole);
3071 if (is_log || is_hole) {
3072 /*
3073 * Create a hole vdev and put it in the config.
3074 */
3075 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3076 goto out;
3077 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3078 VDEV_TYPE_HOLE) != 0)
3079 goto out;
3080 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3081 1) != 0)
3082 goto out;
3083 if (lastlog == 0)
3084 lastlog = vcount;
3085 varray[vcount++] = vdev;
3086 continue;
3087 }
3088 lastlog = 0;
3089 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3090 == 0);
3091 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3093 "Source pool must be composed only of mirrors\n"));
3094 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3095 goto out;
3096 }
3097
3098 verify(nvlist_lookup_nvlist_array(child[c],
3099 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3100
3101 /* find or add an entry for this top-level vdev */
3102 if (newchildren > 0 &&
3103 (entry = find_vdev_entry(zhp, mchild, mchildren,
3104 newchild, newchildren)) >= 0) {
3105 /* We found a disk that the user specified. */
3106 vdev = mchild[entry];
3107 ++found;
3108 } else {
3109 /* User didn't specify a disk for this vdev. */
3110 vdev = mchild[mchildren - 1];
3111 }
3112
3113 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3114 goto out;
3115 }
3116
3117 /* did we find every disk the user specified? */
3118 if (found != newchildren) {
3119 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3120 "include at most one disk from each mirror"));
3121 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3122 goto out;
3123 }
3124
3125 /* Prepare the nvlist for populating. */
3126 if (*newroot == NULL) {
3127 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3128 goto out;
3129 freelist = B_TRUE;
3130 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3131 VDEV_TYPE_ROOT) != 0)
3132 goto out;
3133 } else {
3134 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3135 }
3136
3137 /* Add all the children we found */
3138 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3139 lastlog == 0 ? vcount : lastlog) != 0)
3140 goto out;
3141
3142 /*
3143 * If we're just doing a dry run, exit now with success.
3144 */
3145 if (flags.dryrun) {
3146 memory_err = B_FALSE;
3147 freelist = B_FALSE;
3148 goto out;
3149 }
3150
3151 /* now build up the config list & call the ioctl */
3152 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3153 goto out;
3154
3155 if (nvlist_add_nvlist(newconfig,
3156 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3157 nvlist_add_string(newconfig,
3158 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3159 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3160 goto out;
3161
3162 /*
3163 * The new pool is automatically part of the namespace unless we
3164 * explicitly export it.
3165 */
3166 if (!flags.import)
3167 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3168 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3169 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3170 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3171 goto out;
3172 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3173 goto out;
3174
3175 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3176 retval = zpool_standard_error(hdl, errno, msg);
3177 goto out;
3178 }
3179
3180 freelist = B_FALSE;
3181 memory_err = B_FALSE;
3182
3183 out:
3184 if (varray != NULL) {
3185 int v;
3186
3187 for (v = 0; v < vcount; v++)
3188 nvlist_free(varray[v]);
3189 free(varray);
3190 }
3191 zcmd_free_nvlists(&zc);
3192 nvlist_free(zc_props);
3193 nvlist_free(newconfig);
3194 if (freelist) {
3195 nvlist_free(*newroot);
3196 *newroot = NULL;
3197 }
3198
3199 if (retval != 0)
3200 return (retval);
3201
3202 if (memory_err)
3203 return (no_memory(hdl));
3204
3205 return (0);
3206 }
3207
3208 /*
3209 * Remove the given device. Currently, this is supported only for hot spares,
3210 * cache, and log devices.
3211 */
3212 int
3213 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3214 {
3215 zfs_cmd_t zc = {"\0"};
3216 char msg[1024];
3217 nvlist_t *tgt;
3218 boolean_t avail_spare, l2cache, islog;
3219 libzfs_handle_t *hdl = zhp->zpool_hdl;
3220 uint64_t version;
3221
3222 (void) snprintf(msg, sizeof (msg),
3223 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3224
3225 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3226 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3227 &islog)) == 0)
3228 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3229 /*
3230 * XXX - this should just go away.
3231 */
3232 if (!avail_spare && !l2cache && !islog) {
3233 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3234 "only inactive hot spares, cache, "
3235 "or log devices can be removed"));
3236 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3237 }
3238
3239 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3240 if (islog && version < SPA_VERSION_HOLES) {
3241 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3242 "pool must be upgrade to support log removal"));
3243 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3244 }
3245
3246 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3247
3248 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3249 return (0);
3250
3251 return (zpool_standard_error(hdl, errno, msg));
3252 }
3253
3254 /*
3255 * Clear the errors for the pool, or the particular device if specified.
3256 */
3257 int
3258 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3259 {
3260 zfs_cmd_t zc = {"\0"};
3261 char msg[1024];
3262 nvlist_t *tgt;
3263 zpool_rewind_policy_t policy;
3264 boolean_t avail_spare, l2cache;
3265 libzfs_handle_t *hdl = zhp->zpool_hdl;
3266 nvlist_t *nvi = NULL;
3267 int error;
3268
3269 if (path)
3270 (void) snprintf(msg, sizeof (msg),
3271 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3272 path);
3273 else
3274 (void) snprintf(msg, sizeof (msg),
3275 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3276 zhp->zpool_name);
3277
3278 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3279 if (path) {
3280 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3281 &l2cache, NULL)) == 0)
3282 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3283
3284 /*
3285 * Don't allow error clearing for hot spares. Do allow
3286 * error clearing for l2cache devices.
3287 */
3288 if (avail_spare)
3289 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3290
3291 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3292 &zc.zc_guid) == 0);
3293 }
3294
3295 zpool_get_rewind_policy(rewindnvl, &policy);
3296 zc.zc_cookie = policy.zrp_request;
3297
3298 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3299 return (-1);
3300
3301 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3302 return (-1);
3303
3304 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3305 errno == ENOMEM) {
3306 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3307 zcmd_free_nvlists(&zc);
3308 return (-1);
3309 }
3310 }
3311
3312 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3313 errno != EPERM && errno != EACCES)) {
3314 if (policy.zrp_request &
3315 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3316 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3317 zpool_rewind_exclaim(hdl, zc.zc_name,
3318 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3319 nvi);
3320 nvlist_free(nvi);
3321 }
3322 zcmd_free_nvlists(&zc);
3323 return (0);
3324 }
3325
3326 zcmd_free_nvlists(&zc);
3327 return (zpool_standard_error(hdl, errno, msg));
3328 }
3329
3330 /*
3331 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3332 */
3333 int
3334 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3335 {
3336 zfs_cmd_t zc = {"\0"};
3337 char msg[1024];
3338 libzfs_handle_t *hdl = zhp->zpool_hdl;
3339
3340 (void) snprintf(msg, sizeof (msg),
3341 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3342 (u_longlong_t)guid);
3343
3344 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3345 zc.zc_guid = guid;
3346 zc.zc_cookie = ZPOOL_NO_REWIND;
3347
3348 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3349 return (0);
3350
3351 return (zpool_standard_error(hdl, errno, msg));
3352 }
3353
3354 /*
3355 * Change the GUID for a pool.
3356 */
3357 int
3358 zpool_reguid(zpool_handle_t *zhp)
3359 {
3360 char msg[1024];
3361 libzfs_handle_t *hdl = zhp->zpool_hdl;
3362 zfs_cmd_t zc = {"\0"};
3363
3364 (void) snprintf(msg, sizeof (msg),
3365 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3366
3367 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3368 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3369 return (0);
3370
3371 return (zpool_standard_error(hdl, errno, msg));
3372 }
3373
3374 /*
3375 * Reopen the pool.
3376 */
3377 int
3378 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3379 {
3380 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3381 const char *pool_name = zpool_get_name(zhp);
3382 boolean_t *scrub_restart = data;
3383 int error;
3384
3385 error = lzc_reopen(pool_name, *scrub_restart);
3386 if (error) {
3387 return (zpool_standard_error_fmt(hdl, error,
3388 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3389 }
3390
3391 return (0);
3392 }
3393
3394 /* call into libzfs_core to execute the sync IOCTL per pool */
3395 int
3396 zpool_sync_one(zpool_handle_t *zhp, void *data)
3397 {
3398 int ret;
3399 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3400 const char *pool_name = zpool_get_name(zhp);
3401 boolean_t *force = data;
3402 nvlist_t *innvl = fnvlist_alloc();
3403
3404 fnvlist_add_boolean_value(innvl, "force", *force);
3405 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3406 nvlist_free(innvl);
3407 return (zpool_standard_error_fmt(hdl, ret,
3408 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3409 }
3410 nvlist_free(innvl);
3411
3412 return (0);
3413 }
3414
3415 #if defined(__sun__) || defined(__sun)
3416 /*
3417 * Convert from a devid string to a path.
3418 */
3419 static char *
3420 devid_to_path(char *devid_str)
3421 {
3422 ddi_devid_t devid;
3423 char *minor;
3424 char *path;
3425 devid_nmlist_t *list = NULL;
3426 int ret;
3427
3428 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3429 return (NULL);
3430
3431 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3432
3433 devid_str_free(minor);
3434 devid_free(devid);
3435
3436 if (ret != 0)
3437 return (NULL);
3438
3439 /*
3440 * In a case the strdup() fails, we will just return NULL below.
3441 */
3442 path = strdup(list[0].devname);
3443
3444 devid_free_nmlist(list);
3445
3446 return (path);
3447 }
3448
3449 /*
3450 * Convert from a path to a devid string.
3451 */
3452 static char *
3453 path_to_devid(const char *path)
3454 {
3455 int fd;
3456 ddi_devid_t devid;
3457 char *minor, *ret;
3458
3459 if ((fd = open(path, O_RDONLY)) < 0)
3460 return (NULL);
3461
3462 minor = NULL;
3463 ret = NULL;
3464 if (devid_get(fd, &devid) == 0) {
3465 if (devid_get_minor_name(fd, &minor) == 0)
3466 ret = devid_str_encode(devid, minor);
3467 if (minor != NULL)
3468 devid_str_free(minor);
3469 devid_free(devid);
3470 }
3471 (void) close(fd);
3472
3473 return (ret);
3474 }
3475
3476 /*
3477 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3478 * ignore any failure here, since a common case is for an unprivileged user to
3479 * type 'zpool status', and we'll display the correct information anyway.
3480 */
3481 static void
3482 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3483 {
3484 zfs_cmd_t zc = {"\0"};
3485
3486 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3487 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3488 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3489 &zc.zc_guid) == 0);
3490
3491 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3492 }
3493 #endif /* sun */
3494
3495 /*
3496 * Remove partition suffix from a vdev path. Partition suffixes may take three
3497 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3498 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3499 * third case only occurs when preceded by a string matching the regular
3500 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3501 *
3502 * caller must free the returned string
3503 */
3504 char *
3505 zfs_strip_partition(char *path)
3506 {
3507 char *tmp = strdup(path);
3508 char *part = NULL, *d = NULL;
3509 if (!tmp)
3510 return (NULL);
3511
3512 if ((part = strstr(tmp, "-part")) && part != tmp) {
3513 d = part + 5;
3514 } else if ((part = strrchr(tmp, 'p')) &&
3515 part > tmp + 1 && isdigit(*(part-1))) {
3516 d = part + 1;
3517 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3518 tmp[1] == 'd') {
3519 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3520 } else if (strncmp("xvd", tmp, 3) == 0) {
3521 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3522 }
3523 if (part && d && *d != '\0') {
3524 for (; isdigit(*d); d++) { }
3525 if (*d == '\0')
3526 *part = '\0';
3527 }
3528
3529 return (tmp);
3530 }
3531
3532 /*
3533 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3534 *
3535 * path: /dev/sda1
3536 * returns: /dev/sda
3537 *
3538 * Returned string must be freed.
3539 */
3540 char *
3541 zfs_strip_partition_path(char *path)
3542 {
3543 char *newpath = strdup(path);
3544 char *sd_offset;
3545 char *new_sd;
3546
3547 if (!newpath)
3548 return (NULL);
3549
3550 /* Point to "sda1" part of "/dev/sda1" */
3551 sd_offset = strrchr(newpath, '/') + 1;
3552
3553 /* Get our new name "sda" */
3554 new_sd = zfs_strip_partition(sd_offset);
3555 if (!new_sd) {
3556 free(newpath);
3557 return (NULL);
3558 }
3559
3560 /* Paste the "sda" where "sda1" was */
3561 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3562
3563 /* Free temporary "sda" */
3564 free(new_sd);
3565
3566 return (newpath);
3567 }
3568
3569 #define PATH_BUF_LEN 64
3570
3571 /*
3572 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3573 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3574 * We also check if this is a whole disk, in which case we strip off the
3575 * trailing 's0' slice name.
3576 *
3577 * This routine is also responsible for identifying when disks have been
3578 * reconfigured in a new location. The kernel will have opened the device by
3579 * devid, but the path will still refer to the old location. To catch this, we
3580 * first do a path -> devid translation (which is fast for the common case). If
3581 * the devid matches, we're done. If not, we do a reverse devid -> path
3582 * translation and issue the appropriate ioctl() to update the path of the vdev.
3583 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3584 * of these checks.
3585 */
3586 char *
3587 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3588 int name_flags)
3589 {
3590 char *path, *type, *env;
3591 uint64_t value;
3592 char buf[PATH_BUF_LEN];
3593 char tmpbuf[PATH_BUF_LEN];
3594
3595 /*
3596 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3597 * zpool name that will be displayed to the user.
3598 */
3599 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3600 if (zhp != NULL && strcmp(type, "root") == 0)
3601 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3602
3603 env = getenv("ZPOOL_VDEV_NAME_PATH");
3604 if (env && (strtoul(env, NULL, 0) > 0 ||
3605 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3606 name_flags |= VDEV_NAME_PATH;
3607
3608 env = getenv("ZPOOL_VDEV_NAME_GUID");
3609 if (env && (strtoul(env, NULL, 0) > 0 ||
3610 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3611 name_flags |= VDEV_NAME_GUID;
3612
3613 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3614 if (env && (strtoul(env, NULL, 0) > 0 ||
3615 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3616 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3617
3618 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3619 name_flags & VDEV_NAME_GUID) {
3620 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3621 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3622 path = buf;
3623 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3624 #if defined(__sun__) || defined(__sun)
3625 /*
3626 * Live VDEV path updates to a kernel VDEV during a
3627 * zpool_vdev_name lookup are not supported on Linux.
3628 */
3629 char *devid;
3630 vdev_stat_t *vs;
3631 uint_t vsc;
3632
3633 /*
3634 * If the device is dead (faulted, offline, etc) then don't
3635 * bother opening it. Otherwise we may be forcing the user to
3636 * open a misbehaving device, which can have undesirable
3637 * effects.
3638 */
3639 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3640 (uint64_t **)&vs, &vsc) != 0 ||
3641 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3642 zhp != NULL &&
3643 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3644 /*
3645 * Determine if the current path is correct.
3646 */
3647 char *newdevid = path_to_devid(path);
3648
3649 if (newdevid == NULL ||
3650 strcmp(devid, newdevid) != 0) {
3651 char *newpath;
3652
3653 if ((newpath = devid_to_path(devid)) != NULL) {
3654 /*
3655 * Update the path appropriately.
3656 */
3657 set_path(zhp, nv, newpath);
3658 if (nvlist_add_string(nv,
3659 ZPOOL_CONFIG_PATH, newpath) == 0)
3660 verify(nvlist_lookup_string(nv,
3661 ZPOOL_CONFIG_PATH,
3662 &path) == 0);
3663 free(newpath);
3664 }
3665 }
3666
3667 if (newdevid)
3668 devid_str_free(newdevid);
3669 }
3670 #endif /* sun */
3671
3672 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3673 char *rp = realpath(path, NULL);
3674 if (rp) {
3675 strlcpy(buf, rp, sizeof (buf));
3676 path = buf;
3677 free(rp);
3678 }
3679 }
3680
3681 /*
3682 * For a block device only use the name.
3683 */
3684 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3685 !(name_flags & VDEV_NAME_PATH)) {
3686 path = strrchr(path, '/');
3687 path++;
3688 }
3689
3690 /*
3691 * Remove the partition from the path it this is a whole disk.
3692 */
3693 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3694 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3695 return (zfs_strip_partition(path));
3696 }
3697 } else {
3698 path = type;
3699
3700 /*
3701 * If it's a raidz device, we need to stick in the parity level.
3702 */
3703 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3704 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3705 &value) == 0);
3706 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3707 (u_longlong_t)value);
3708 path = buf;
3709 }
3710
3711 /*
3712 * We identify each top-level vdev by using a <type-id>
3713 * naming convention.
3714 */
3715 if (name_flags & VDEV_NAME_TYPE_ID) {
3716 uint64_t id;
3717 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3718 &id) == 0);
3719 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3720 path, (u_longlong_t)id);
3721 path = tmpbuf;
3722 }
3723 }
3724
3725 return (zfs_strdup(hdl, path));
3726 }
3727
3728 static int
3729 zbookmark_mem_compare(const void *a, const void *b)
3730 {
3731 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3732 }
3733
3734 /*
3735 * Retrieve the persistent error log, uniquify the members, and return to the
3736 * caller.
3737 */
3738 int
3739 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3740 {
3741 zfs_cmd_t zc = {"\0"};
3742 libzfs_handle_t *hdl = zhp->zpool_hdl;
3743 uint64_t count;
3744 zbookmark_phys_t *zb = NULL;
3745 int i;
3746
3747 /*
3748 * Retrieve the raw error list from the kernel. If the number of errors
3749 * has increased, allocate more space and continue until we get the
3750 * entire list.
3751 */
3752 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3753 &count) == 0);
3754 if (count == 0)
3755 return (0);
3756 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3757 count * sizeof (zbookmark_phys_t));
3758 zc.zc_nvlist_dst_size = count;
3759 (void) strcpy(zc.zc_name, zhp->zpool_name);
3760 for (;;) {
3761 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3762 &zc) != 0) {
3763 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3764 if (errno == ENOMEM) {
3765 void *dst;
3766
3767 count = zc.zc_nvlist_dst_size;
3768 dst = zfs_alloc(zhp->zpool_hdl, count *
3769 sizeof (zbookmark_phys_t));
3770 zc.zc_nvlist_dst = (uintptr_t)dst;
3771 } else {
3772 return (zpool_standard_error_fmt(hdl, errno,
3773 dgettext(TEXT_DOMAIN, "errors: List of "
3774 "errors unavailable")));
3775 }
3776 } else {
3777 break;
3778 }
3779 }
3780
3781 /*
3782 * Sort the resulting bookmarks. This is a little confusing due to the
3783 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3784 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3785 * _not_ copied as part of the process. So we point the start of our
3786 * array appropriate and decrement the total number of elements.
3787 */
3788 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3789 zc.zc_nvlist_dst_size;
3790 count -= zc.zc_nvlist_dst_size;
3791
3792 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3793
3794 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3795
3796 /*
3797 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3798 */
3799 for (i = 0; i < count; i++) {
3800 nvlist_t *nv;
3801
3802 /* ignoring zb_blkid and zb_level for now */
3803 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3804 zb[i-1].zb_object == zb[i].zb_object)
3805 continue;
3806
3807 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3808 goto nomem;
3809 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3810 zb[i].zb_objset) != 0) {
3811 nvlist_free(nv);
3812 goto nomem;
3813 }
3814 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3815 zb[i].zb_object) != 0) {
3816 nvlist_free(nv);
3817 goto nomem;
3818 }
3819 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3820 nvlist_free(nv);
3821 goto nomem;
3822 }
3823 nvlist_free(nv);
3824 }
3825
3826 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3827 return (0);
3828
3829 nomem:
3830 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3831 return (no_memory(zhp->zpool_hdl));
3832 }
3833
3834 /*
3835 * Upgrade a ZFS pool to the latest on-disk version.
3836 */
3837 int
3838 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3839 {
3840 zfs_cmd_t zc = {"\0"};
3841 libzfs_handle_t *hdl = zhp->zpool_hdl;
3842
3843 (void) strcpy(zc.zc_name, zhp->zpool_name);
3844 zc.zc_cookie = new_version;
3845
3846 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3847 return (zpool_standard_error_fmt(hdl, errno,
3848 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3849 zhp->zpool_name));
3850 return (0);
3851 }
3852
3853 void
3854 zfs_save_arguments(int argc, char **argv, char *string, int len)
3855 {
3856 int i;
3857
3858 (void) strlcpy(string, basename(argv[0]), len);
3859 for (i = 1; i < argc; i++) {
3860 (void) strlcat(string, " ", len);
3861 (void) strlcat(string, argv[i], len);
3862 }
3863 }
3864
3865 int
3866 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3867 {
3868 zfs_cmd_t zc = {"\0"};
3869 nvlist_t *args;
3870 int err;
3871
3872 args = fnvlist_alloc();
3873 fnvlist_add_string(args, "message", message);
3874 err = zcmd_write_src_nvlist(hdl, &zc, args);
3875 if (err == 0)
3876 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3877 nvlist_free(args);
3878 zcmd_free_nvlists(&zc);
3879 return (err);
3880 }
3881
3882 /*
3883 * Perform ioctl to get some command history of a pool.
3884 *
3885 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3886 * logical offset of the history buffer to start reading from.
3887 *
3888 * Upon return, 'off' is the next logical offset to read from and
3889 * 'len' is the actual amount of bytes read into 'buf'.
3890 */
3891 static int
3892 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3893 {
3894 zfs_cmd_t zc = {"\0"};
3895 libzfs_handle_t *hdl = zhp->zpool_hdl;
3896
3897 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3898
3899 zc.zc_history = (uint64_t)(uintptr_t)buf;
3900 zc.zc_history_len = *len;
3901 zc.zc_history_offset = *off;
3902
3903 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3904 switch (errno) {
3905 case EPERM:
3906 return (zfs_error_fmt(hdl, EZFS_PERM,
3907 dgettext(TEXT_DOMAIN,
3908 "cannot show history for pool '%s'"),
3909 zhp->zpool_name));
3910 case ENOENT:
3911 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3912 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3913 "'%s'"), zhp->zpool_name));
3914 case ENOTSUP:
3915 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3916 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3917 "'%s', pool must be upgraded"), zhp->zpool_name));
3918 default:
3919 return (zpool_standard_error_fmt(hdl, errno,
3920 dgettext(TEXT_DOMAIN,
3921 "cannot get history for '%s'"), zhp->zpool_name));
3922 }
3923 }
3924
3925 *len = zc.zc_history_len;
3926 *off = zc.zc_history_offset;
3927
3928 return (0);
3929 }
3930
3931 /*
3932 * Process the buffer of nvlists, unpacking and storing each nvlist record
3933 * into 'records'. 'leftover' is set to the number of bytes that weren't
3934 * processed as there wasn't a complete record.
3935 */
3936 int
3937 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3938 nvlist_t ***records, uint_t *numrecords)
3939 {
3940 uint64_t reclen;
3941 nvlist_t *nv;
3942 int i;
3943 void *tmp;
3944
3945 while (bytes_read > sizeof (reclen)) {
3946
3947 /* get length of packed record (stored as little endian) */
3948 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3949 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3950
3951 if (bytes_read < sizeof (reclen) + reclen)
3952 break;
3953
3954 /* unpack record */
3955 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3956 return (ENOMEM);
3957 bytes_read -= sizeof (reclen) + reclen;
3958 buf += sizeof (reclen) + reclen;
3959
3960 /* add record to nvlist array */
3961 (*numrecords)++;
3962 if (ISP2(*numrecords + 1)) {
3963 tmp = realloc(*records,
3964 *numrecords * 2 * sizeof (nvlist_t *));
3965 if (tmp == NULL) {
3966 nvlist_free(nv);
3967 (*numrecords)--;
3968 return (ENOMEM);
3969 }
3970 *records = tmp;
3971 }
3972 (*records)[*numrecords - 1] = nv;
3973 }
3974
3975 *leftover = bytes_read;
3976 return (0);
3977 }
3978
3979 /*
3980 * Retrieve the command history of a pool.
3981 */
3982 int
3983 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3984 {
3985 char *buf;
3986 int buflen = 128 * 1024;
3987 uint64_t off = 0;
3988 nvlist_t **records = NULL;
3989 uint_t numrecords = 0;
3990 int err, i;
3991
3992 buf = malloc(buflen);
3993 if (buf == NULL)
3994 return (ENOMEM);
3995 do {
3996 uint64_t bytes_read = buflen;
3997 uint64_t leftover;
3998
3999 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
4000 break;
4001
4002 /* if nothing else was read in, we're at EOF, just return */
4003 if (!bytes_read)
4004 break;
4005
4006 if ((err = zpool_history_unpack(buf, bytes_read,
4007 &leftover, &records, &numrecords)) != 0)
4008 break;
4009 off -= leftover;
4010 if (leftover == bytes_read) {
4011 /*
4012 * no progress made, because buffer is not big enough
4013 * to hold this record; resize and retry.
4014 */
4015 buflen *= 2;
4016 free(buf);
4017 buf = malloc(buflen);
4018 if (buf == NULL)
4019 return (ENOMEM);
4020 }
4021
4022 /* CONSTCOND */
4023 } while (1);
4024
4025 free(buf);
4026
4027 if (!err) {
4028 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4029 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4030 records, numrecords) == 0);
4031 }
4032 for (i = 0; i < numrecords; i++)
4033 nvlist_free(records[i]);
4034 free(records);
4035
4036 return (err);
4037 }
4038
4039 /*
4040 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4041 * If there is a new event available 'nvp' will contain a newly allocated
4042 * nvlist and 'dropped' will be set to the number of missed events since
4043 * the last call to this function. When 'nvp' is set to NULL it indicates
4044 * no new events are available. In either case the function returns 0 and
4045 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4046 * function will return a non-zero value. When the function is called in
4047 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4048 * it will not return until a new event is available.
4049 */
4050 int
4051 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4052 int *dropped, unsigned flags, int zevent_fd)
4053 {
4054 zfs_cmd_t zc = {"\0"};
4055 int error = 0;
4056
4057 *nvp = NULL;
4058 *dropped = 0;
4059 zc.zc_cleanup_fd = zevent_fd;
4060
4061 if (flags & ZEVENT_NONBLOCK)
4062 zc.zc_guid = ZEVENT_NONBLOCK;
4063
4064 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4065 return (-1);
4066
4067 retry:
4068 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4069 switch (errno) {
4070 case ESHUTDOWN:
4071 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4072 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4073 goto out;
4074 case ENOENT:
4075 /* Blocking error case should not occur */
4076 if (!(flags & ZEVENT_NONBLOCK))
4077 error = zpool_standard_error_fmt(hdl, errno,
4078 dgettext(TEXT_DOMAIN, "cannot get event"));
4079
4080 goto out;
4081 case ENOMEM:
4082 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4083 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4084 dgettext(TEXT_DOMAIN, "cannot get event"));
4085 goto out;
4086 } else {
4087 goto retry;
4088 }
4089 default:
4090 error = zpool_standard_error_fmt(hdl, errno,
4091 dgettext(TEXT_DOMAIN, "cannot get event"));
4092 goto out;
4093 }
4094 }
4095
4096 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4097 if (error != 0)
4098 goto out;
4099
4100 *dropped = (int)zc.zc_cookie;
4101 out:
4102 zcmd_free_nvlists(&zc);
4103
4104 return (error);
4105 }
4106
4107 /*
4108 * Clear all events.
4109 */
4110 int
4111 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4112 {
4113 zfs_cmd_t zc = {"\0"};
4114 char msg[1024];
4115
4116 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4117 "cannot clear events"));
4118
4119 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4120 return (zpool_standard_error_fmt(hdl, errno, msg));
4121
4122 if (count != NULL)
4123 *count = (int)zc.zc_cookie; /* # of events cleared */
4124
4125 return (0);
4126 }
4127
4128 /*
4129 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4130 * the passed zevent_fd file handle. On success zero is returned,
4131 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4132 */
4133 int
4134 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4135 {
4136 zfs_cmd_t zc = {"\0"};
4137 int error = 0;
4138
4139 zc.zc_guid = eid;
4140 zc.zc_cleanup_fd = zevent_fd;
4141
4142 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4143 switch (errno) {
4144 case ENOENT:
4145 error = zfs_error_fmt(hdl, EZFS_NOENT,
4146 dgettext(TEXT_DOMAIN, "cannot get event"));
4147 break;
4148
4149 case ENOMEM:
4150 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4151 dgettext(TEXT_DOMAIN, "cannot get event"));
4152 break;
4153
4154 default:
4155 error = zpool_standard_error_fmt(hdl, errno,
4156 dgettext(TEXT_DOMAIN, "cannot get event"));
4157 break;
4158 }
4159 }
4160
4161 return (error);
4162 }
4163
4164 void
4165 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4166 char *pathname, size_t len)
4167 {
4168 zfs_cmd_t zc = {"\0"};
4169 boolean_t mounted = B_FALSE;
4170 char *mntpnt = NULL;
4171 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4172
4173 if (dsobj == 0) {
4174 /* special case for the MOS */
4175 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4176 (longlong_t)obj);
4177 return;
4178 }
4179
4180 /* get the dataset's name */
4181 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4182 zc.zc_obj = dsobj;
4183 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4184 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4185 /* just write out a path of two object numbers */
4186 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4187 (longlong_t)dsobj, (longlong_t)obj);
4188 return;
4189 }
4190 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4191
4192 /* find out if the dataset is mounted */
4193 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4194
4195 /* get the corrupted object's path */
4196 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4197 zc.zc_obj = obj;
4198 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4199 &zc) == 0) {
4200 if (mounted) {
4201 (void) snprintf(pathname, len, "%s%s", mntpnt,
4202 zc.zc_value);
4203 } else {
4204 (void) snprintf(pathname, len, "%s:%s",
4205 dsname, zc.zc_value);
4206 }
4207 } else {
4208 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4209 (longlong_t)obj);
4210 }
4211 free(mntpnt);
4212 }
4213
4214 /*
4215 * Read the EFI label from the config, if a label does not exist then
4216 * pass back the error to the caller. If the caller has passed a non-NULL
4217 * diskaddr argument then we set it to the starting address of the EFI
4218 * partition.
4219 */
4220 static int
4221 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4222 {
4223 char *path;
4224 int fd;
4225 char diskname[MAXPATHLEN];
4226 int err = -1;
4227
4228 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4229 return (err);
4230
4231 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4232 strrchr(path, '/'));
4233 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
4234 struct dk_gpt *vtoc;
4235
4236 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4237 if (sb != NULL)
4238 *sb = vtoc->efi_parts[0].p_start;
4239 efi_free(vtoc);
4240 }
4241 (void) close(fd);
4242 }
4243 return (err);
4244 }
4245
4246 /*
4247 * determine where a partition starts on a disk in the current
4248 * configuration
4249 */
4250 static diskaddr_t
4251 find_start_block(nvlist_t *config)
4252 {
4253 nvlist_t **child;
4254 uint_t c, children;
4255 diskaddr_t sb = MAXOFFSET_T;
4256 uint64_t wholedisk;
4257
4258 if (nvlist_lookup_nvlist_array(config,
4259 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4260 if (nvlist_lookup_uint64(config,
4261 ZPOOL_CONFIG_WHOLE_DISK,
4262 &wholedisk) != 0 || !wholedisk) {
4263 return (MAXOFFSET_T);
4264 }
4265 if (read_efi_label(config, &sb) < 0)
4266 sb = MAXOFFSET_T;
4267 return (sb);
4268 }
4269
4270 for (c = 0; c < children; c++) {
4271 sb = find_start_block(child[c]);
4272 if (sb != MAXOFFSET_T) {
4273 return (sb);
4274 }
4275 }
4276 return (MAXOFFSET_T);
4277 }
4278
4279 static int
4280 zpool_label_disk_check(char *path)
4281 {
4282 struct dk_gpt *vtoc;
4283 int fd, err;
4284
4285 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
4286 return (errno);
4287
4288 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4289 (void) close(fd);
4290 return (err);
4291 }
4292
4293 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4294 efi_free(vtoc);
4295 (void) close(fd);
4296 return (EIDRM);
4297 }
4298
4299 efi_free(vtoc);
4300 (void) close(fd);
4301 return (0);
4302 }
4303
4304 /*
4305 * Generate a unique partition name for the ZFS member. Partitions must
4306 * have unique names to ensure udev will be able to create symlinks under
4307 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4308 * of the form <pool>-<unique-id>.
4309 */
4310 static void
4311 zpool_label_name(char *label_name, int label_size)
4312 {
4313 uint64_t id = 0;
4314 int fd;
4315
4316 fd = open("/dev/urandom", O_RDONLY);
4317 if (fd >= 0) {
4318 if (read(fd, &id, sizeof (id)) != sizeof (id))
4319 id = 0;
4320
4321 close(fd);
4322 }
4323
4324 if (id == 0)
4325 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4326
4327 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4328 }
4329
4330 /*
4331 * Label an individual disk. The name provided is the short name,
4332 * stripped of any leading /dev path.
4333 */
4334 int
4335 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4336 {
4337 char path[MAXPATHLEN];
4338 struct dk_gpt *vtoc;
4339 int rval, fd;
4340 size_t resv = EFI_MIN_RESV_SIZE;
4341 uint64_t slice_size;
4342 diskaddr_t start_block;
4343 char errbuf[1024];
4344
4345 /* prepare an error message just in case */
4346 (void) snprintf(errbuf, sizeof (errbuf),
4347 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4348
4349 if (zhp) {
4350 nvlist_t *nvroot;
4351
4352 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4353 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4354
4355 if (zhp->zpool_start_block == 0)
4356 start_block = find_start_block(nvroot);
4357 else
4358 start_block = zhp->zpool_start_block;
4359 zhp->zpool_start_block = start_block;
4360 } else {
4361 /* new pool */
4362 start_block = NEW_START_BLOCK;
4363 }
4364
4365 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4366
4367 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4368 /*
4369 * This shouldn't happen. We've long since verified that this
4370 * is a valid device.
4371 */
4372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4373 "label '%s': unable to open device: %d"), path, errno);
4374 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4375 }
4376
4377 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4378 /*
4379 * The only way this can fail is if we run out of memory, or we
4380 * were unable to read the disk's capacity
4381 */
4382 if (errno == ENOMEM)
4383 (void) no_memory(hdl);
4384
4385 (void) close(fd);
4386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4387 "label '%s': unable to read disk capacity"), path);
4388
4389 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4390 }
4391
4392 slice_size = vtoc->efi_last_u_lba + 1;
4393 slice_size -= EFI_MIN_RESV_SIZE;
4394 if (start_block == MAXOFFSET_T)
4395 start_block = NEW_START_BLOCK;
4396 slice_size -= start_block;
4397 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4398
4399 vtoc->efi_parts[0].p_start = start_block;
4400 vtoc->efi_parts[0].p_size = slice_size;
4401
4402 /*
4403 * Why we use V_USR: V_BACKUP confuses users, and is considered
4404 * disposable by some EFI utilities (since EFI doesn't have a backup
4405 * slice). V_UNASSIGNED is supposed to be used only for zero size
4406 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4407 * etc. were all pretty specific. V_USR is as close to reality as we
4408 * can get, in the absence of V_OTHER.
4409 */
4410 vtoc->efi_parts[0].p_tag = V_USR;
4411 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4412
4413 vtoc->efi_parts[8].p_start = slice_size + start_block;
4414 vtoc->efi_parts[8].p_size = resv;
4415 vtoc->efi_parts[8].p_tag = V_RESERVED;
4416
4417 rval = efi_write(fd, vtoc);
4418
4419 /* Flush the buffers to disk and invalidate the page cache. */
4420 (void) fsync(fd);
4421 (void) ioctl(fd, BLKFLSBUF);
4422
4423 if (rval == 0)
4424 rval = efi_rescan(fd);
4425
4426 /*
4427 * Some block drivers (like pcata) may not support EFI GPT labels.
4428 * Print out a helpful error message directing the user to manually
4429 * label the disk and give a specific slice.
4430 */
4431 if (rval != 0) {
4432 (void) close(fd);
4433 efi_free(vtoc);
4434
4435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4436 "parted(8) and then provide a specific slice: %d"), rval);
4437 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4438 }
4439
4440 (void) close(fd);
4441 efi_free(vtoc);
4442
4443 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4444 (void) zfs_append_partition(path, MAXPATHLEN);
4445
4446 /* Wait to udev to signal use the device has settled. */
4447 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4448 if (rval) {
4449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4450 "detect device partitions on '%s': %d"), path, rval);
4451 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4452 }
4453
4454 /* We can't be to paranoid. Read the label back and verify it. */
4455 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4456 rval = zpool_label_disk_check(path);
4457 if (rval) {
4458 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4459 "EFI label on '%s' is damaged. Ensure\nthis device "
4460 "is not in in use, and is functioning properly: %d"),
4461 path, rval);
4462 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4463 }
4464
4465 return (0);
4466 }
4467
4468 /*
4469 * Allocate and return the underlying device name for a device mapper device.
4470 * If a device mapper device maps to multiple devices, return the first device.
4471 *
4472 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4473 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4474 *
4475 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4476 * device then return NULL.
4477 *
4478 * NOTE: The returned name string must be *freed*.
4479 */
4480 char *
4481 dm_get_underlying_path(char *dm_name)
4482 {
4483 DIR *dp = NULL;
4484 struct dirent *ep;
4485 char *realp;
4486 char *tmp = NULL;
4487 char *path = NULL;
4488 char *dev_str;
4489 int size;
4490
4491 if (dm_name == NULL)
4492 return (NULL);
4493
4494 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4495 realp = realpath(dm_name, NULL);
4496 if (realp == NULL)
4497 return (NULL);
4498
4499 /*
4500 * If they preface 'dev' with a path (like "/dev") then strip it off.
4501 * We just want the 'dm-N' part.
4502 */
4503 tmp = strrchr(realp, '/');
4504 if (tmp != NULL)
4505 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4506 else
4507 dev_str = tmp;
4508
4509 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4510 if (size == -1 || !tmp)
4511 goto end;
4512
4513 dp = opendir(tmp);
4514 if (dp == NULL)
4515 goto end;
4516
4517 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4518 while ((ep = readdir(dp))) {
4519 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4520 size = asprintf(&path, "/dev/%s", ep->d_name);
4521 break;
4522 }
4523 }
4524
4525 end:
4526 if (dp != NULL)
4527 closedir(dp);
4528 free(tmp);
4529 free(realp);
4530 return (path);
4531 }
4532
4533 /*
4534 * Return 1 if device is a device mapper or multipath device.
4535 * Return 0 if not.
4536 */
4537 int
4538 zfs_dev_is_dm(char *dev_name)
4539 {
4540
4541 char *tmp;
4542 tmp = dm_get_underlying_path(dev_name);
4543 if (tmp == NULL)
4544 return (0);
4545
4546 free(tmp);
4547 return (1);
4548 }
4549
4550 /*
4551 * By "whole disk" we mean an entire physical disk (something we can
4552 * label, toggle the write cache on, etc.) as opposed to the full
4553 * capacity of a pseudo-device such as lofi or did. We act as if we
4554 * are labeling the disk, which should be a pretty good test of whether
4555 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4556 * it isn't.
4557 */
4558 int
4559 zfs_dev_is_whole_disk(char *dev_name)
4560 {
4561 struct dk_gpt *label;
4562 int fd;
4563
4564 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4565 return (0);
4566
4567 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4568 (void) close(fd);
4569 return (0);
4570 }
4571
4572 efi_free(label);
4573 (void) close(fd);
4574
4575 return (1);
4576 }
4577
4578 /*
4579 * Lookup the underlying device for a device name
4580 *
4581 * Often you'll have a symlink to a device, a partition device,
4582 * or a multipath device, and want to look up the underlying device.
4583 * This function returns the underlying device name. If the device
4584 * name is already the underlying device, then just return the same
4585 * name. If the device is a DM device with multiple underlying devices
4586 * then return the first one.
4587 *
4588 * For example:
4589 *
4590 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4591 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4592 * returns: /dev/sda
4593 *
4594 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4595 * dev_name: /dev/mapper/mpatha
4596 * returns: /dev/sda (first device)
4597 *
4598 * 3. /dev/sda (already the underlying device)
4599 * dev_name: /dev/sda
4600 * returns: /dev/sda
4601 *
4602 * 4. /dev/dm-3 (mapped to /dev/sda)
4603 * dev_name: /dev/dm-3
4604 * returns: /dev/sda
4605 *
4606 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4607 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4608 * returns: /dev/sdb
4609 *
4610 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4611 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4612 * returns: /dev/sda
4613 *
4614 * Returns underlying device name, or NULL on error or no match.
4615 *
4616 * NOTE: The returned name string must be *freed*.
4617 */
4618 char *
4619 zfs_get_underlying_path(char *dev_name)
4620 {
4621 char *name = NULL;
4622 char *tmp;
4623
4624 if (dev_name == NULL)
4625 return (NULL);
4626
4627 tmp = dm_get_underlying_path(dev_name);
4628
4629 /* dev_name not a DM device, so just un-symlinkize it */
4630 if (tmp == NULL)
4631 tmp = realpath(dev_name, NULL);
4632
4633 if (tmp != NULL) {
4634 name = zfs_strip_partition_path(tmp);
4635 free(tmp);
4636 }
4637
4638 return (name);
4639 }
4640
4641 /*
4642 * Given a dev name like "sda", return the full enclosure sysfs path to
4643 * the disk. You can also pass in the name with "/dev" prepended
4644 * to it (like /dev/sda).
4645 *
4646 * For example, disk "sda" in enclosure slot 1:
4647 * dev: "sda"
4648 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4649 *
4650 * 'dev' must be a non-devicemapper device.
4651 *
4652 * Returned string must be freed.
4653 */
4654 char *
4655 zfs_get_enclosure_sysfs_path(char *dev_name)
4656 {
4657 DIR *dp = NULL;
4658 struct dirent *ep;
4659 char buf[MAXPATHLEN];
4660 char *tmp1 = NULL;
4661 char *tmp2 = NULL;
4662 char *tmp3 = NULL;
4663 char *path = NULL;
4664 size_t size;
4665 int tmpsize;
4666
4667 if (dev_name == NULL)
4668 return (NULL);
4669
4670 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4671 tmp1 = strrchr(dev_name, '/');
4672 if (tmp1 != NULL)
4673 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4674
4675 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4676 if (tmpsize == -1 || tmp1 == NULL) {
4677 tmp1 = NULL;
4678 goto end;
4679 }
4680
4681 dp = opendir(tmp1);
4682 if (dp == NULL) {
4683 tmp1 = NULL; /* To make free() at the end a NOP */
4684 goto end;
4685 }
4686
4687 /*
4688 * Look though all sysfs entries in /sys/block/<dev>/device for
4689 * the enclosure symlink.
4690 */
4691 while ((ep = readdir(dp))) {
4692 /* Ignore everything that's not our enclosure_device link */
4693 if (strstr(ep->d_name, "enclosure_device") == NULL)
4694 continue;
4695
4696 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4697 tmp2 == NULL)
4698 break;
4699
4700 size = readlink(tmp2, buf, sizeof (buf));
4701
4702 /* Did readlink fail or crop the link name? */
4703 if (size == -1 || size >= sizeof (buf)) {
4704 free(tmp2);
4705 tmp2 = NULL; /* To make free() at the end a NOP */
4706 break;
4707 }
4708
4709 /*
4710 * We got a valid link. readlink() doesn't terminate strings
4711 * so we have to do it.
4712 */
4713 buf[size] = '\0';
4714
4715 /*
4716 * Our link will look like:
4717 *
4718 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4719 *
4720 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4721 */
4722 tmp3 = strstr(buf, "enclosure");
4723 if (tmp3 == NULL)
4724 break;
4725
4726 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4727 /* If asprintf() fails, 'path' is undefined */
4728 path = NULL;
4729 break;
4730 }
4731
4732 if (path == NULL)
4733 break;
4734 }
4735
4736 end:
4737 free(tmp2);
4738 free(tmp1);
4739
4740 if (dp != NULL)
4741 closedir(dp);
4742
4743 return (path);
4744 }