]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_pool.c
Fix coverity defects: CID 155964, 155965
[mirror_zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 */
28
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <zone.h>
40 #include <sys/stat.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53
54 typedef struct prop_flags {
55 int create:1; /* Validate property on creation */
56 int import:1; /* Validate property on import */
57 } prop_flags_t;
58
59 /*
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
63 */
64
65 static int
66 zpool_get_all_props(zpool_handle_t *zhp)
67 {
68 zfs_cmd_t zc = {"\0"};
69 libzfs_handle_t *hdl = zhp->zpool_hdl;
70
71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72
73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 return (-1);
75
76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 if (errno == ENOMEM) {
78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 zcmd_free_nvlists(&zc);
80 return (-1);
81 }
82 } else {
83 zcmd_free_nvlists(&zc);
84 return (-1);
85 }
86 }
87
88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 zcmd_free_nvlists(&zc);
90 return (-1);
91 }
92
93 zcmd_free_nvlists(&zc);
94
95 return (0);
96 }
97
98 static int
99 zpool_props_refresh(zpool_handle_t *zhp)
100 {
101 nvlist_t *old_props;
102
103 old_props = zhp->zpool_props;
104
105 if (zpool_get_all_props(zhp) != 0)
106 return (-1);
107
108 nvlist_free(old_props);
109 return (0);
110 }
111
112 static char *
113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 zprop_source_t *src)
115 {
116 nvlist_t *nv, *nvl;
117 uint64_t ival;
118 char *value;
119 zprop_source_t source;
120
121 nvl = zhp->zpool_props;
122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 source = ival;
125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 } else {
127 source = ZPROP_SRC_DEFAULT;
128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 value = "-";
130 }
131
132 if (src)
133 *src = source;
134
135 return (value);
136 }
137
138 uint64_t
139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 {
141 nvlist_t *nv, *nvl;
142 uint64_t value;
143 zprop_source_t source;
144
145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 /*
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
150 */
151 if ((prop == ZPOOL_PROP_GUID) &&
152 (nvlist_lookup_nvlist(zhp->zpool_config,
153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 == 0)) {
156 return (value);
157 }
158 return (zpool_prop_default_numeric(prop));
159 }
160
161 nvl = zhp->zpool_props;
162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 source = value;
165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 } else {
167 source = ZPROP_SRC_DEFAULT;
168 value = zpool_prop_default_numeric(prop);
169 }
170
171 if (src)
172 *src = source;
173
174 return (value);
175 }
176
177 /*
178 * Map VDEV STATE to printed strings.
179 */
180 char *
181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 {
183 switch (state) {
184 case VDEV_STATE_CLOSED:
185 case VDEV_STATE_OFFLINE:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN:
190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 return (gettext("FAULTED"));
192 else if (aux == VDEV_AUX_SPLIT_POOL)
193 return (gettext("SPLIT"));
194 else
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY:
201 return (gettext("ONLINE"));
202
203 default:
204 break;
205 }
206
207 return (gettext("UNKNOWN"));
208 }
209
210 /*
211 * Map POOL STATE to printed strings.
212 */
213 const char *
214 zpool_pool_state_to_name(pool_state_t state)
215 {
216 switch (state) {
217 default:
218 break;
219 case POOL_STATE_ACTIVE:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE:
234 return (gettext("POTENTIALLY_ACTIVE"));
235 }
236
237 return (gettext("UNKNOWN"));
238 }
239
240 /*
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
243 */
244 int
245 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
246 size_t len, zprop_source_t *srctype, boolean_t literal)
247 {
248 uint64_t intval;
249 const char *strval;
250 zprop_source_t src = ZPROP_SRC_NONE;
251 nvlist_t *nvroot;
252 vdev_stat_t *vs;
253 uint_t vsc;
254
255 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 switch (prop) {
257 case ZPOOL_PROP_NAME:
258 (void) strlcpy(buf, zpool_get_name(zhp), len);
259 break;
260
261 case ZPOOL_PROP_HEALTH:
262 (void) strlcpy(buf, "FAULTED", len);
263 break;
264
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
268 break;
269
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
275 (void) strlcpy(buf,
276 zpool_get_prop_string(zhp, prop, &src),
277 len);
278 break;
279 }
280 /* FALLTHROUGH */
281 default:
282 (void) strlcpy(buf, "-", len);
283 break;
284 }
285
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
289 }
290
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
293 return (-1);
294
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 len);
299 break;
300
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
303
304 switch (prop) {
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
310 case ZPOOL_PROP_ASHIFT:
311 if (literal)
312 (void) snprintf(buf, len, "%llu",
313 (u_longlong_t)intval);
314 else
315 (void) zfs_nicenum(intval, buf, len);
316 break;
317
318 case ZPOOL_PROP_EXPANDSZ:
319 if (intval == 0) {
320 (void) strlcpy(buf, "-", len);
321 } else if (literal) {
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 } else {
325 (void) zfs_nicenum(intval, buf, len);
326 }
327 break;
328
329 case ZPOOL_PROP_CAPACITY:
330 if (literal) {
331 (void) snprintf(buf, len, "%llu",
332 (u_longlong_t)intval);
333 } else {
334 (void) snprintf(buf, len, "%llu%%",
335 (u_longlong_t)intval);
336 }
337 break;
338
339 case ZPOOL_PROP_FRAGMENTATION:
340 if (intval == UINT64_MAX) {
341 (void) strlcpy(buf, "-", len);
342 } else if (literal) {
343 (void) snprintf(buf, len, "%llu",
344 (u_longlong_t)intval);
345 } else {
346 (void) snprintf(buf, len, "%llu%%",
347 (u_longlong_t)intval);
348 }
349 break;
350
351 case ZPOOL_PROP_DEDUPRATIO:
352 if (literal)
353 (void) snprintf(buf, len, "%llu.%02llu",
354 (u_longlong_t)(intval / 100),
355 (u_longlong_t)(intval % 100));
356 else
357 (void) snprintf(buf, len, "%llu.%02llux",
358 (u_longlong_t)(intval / 100),
359 (u_longlong_t)(intval % 100));
360 break;
361
362 case ZPOOL_PROP_HEALTH:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
364 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot,
366 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
367 == 0);
368
369 (void) strlcpy(buf, zpool_state_to_name(intval,
370 vs->vs_aux), len);
371 break;
372 case ZPOOL_PROP_VERSION:
373 if (intval >= SPA_VERSION_FEATURES) {
374 (void) snprintf(buf, len, "-");
375 break;
376 }
377 /* FALLTHROUGH */
378 default:
379 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
380 }
381 break;
382
383 case PROP_TYPE_INDEX:
384 intval = zpool_get_prop_int(zhp, prop, &src);
385 if (zpool_prop_index_to_string(prop, intval, &strval)
386 != 0)
387 return (-1);
388 (void) strlcpy(buf, strval, len);
389 break;
390
391 default:
392 abort();
393 }
394
395 if (srctype)
396 *srctype = src;
397
398 return (0);
399 }
400
401 /*
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
404 */
405 static boolean_t
406 bootfs_name_valid(const char *pool, char *bootfs)
407 {
408 int len = strlen(pool);
409
410 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
411 return (B_FALSE);
412
413 if (strncmp(pool, bootfs, len) == 0 &&
414 (bootfs[len] == '/' || bootfs[len] == '\0'))
415 return (B_TRUE);
416
417 return (B_FALSE);
418 }
419
420 boolean_t
421 zpool_is_bootable(zpool_handle_t *zhp)
422 {
423 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
424
425 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
426 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
427 sizeof (bootfs)) != 0);
428 }
429
430
431 /*
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
435 */
436 static nvlist_t *
437 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
438 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
439 {
440 nvpair_t *elem;
441 nvlist_t *retprops;
442 zpool_prop_t prop;
443 char *strval;
444 uint64_t intval;
445 char *slash, *check;
446 struct stat64 statbuf;
447 zpool_handle_t *zhp;
448
449 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
450 (void) no_memory(hdl);
451 return (NULL);
452 }
453
454 elem = NULL;
455 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
456 const char *propname = nvpair_name(elem);
457
458 prop = zpool_name_to_prop(propname);
459 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
460 int err;
461 char *fname = strchr(propname, '@') + 1;
462
463 err = zfeature_lookup_name(fname, NULL);
464 if (err != 0) {
465 ASSERT3U(err, ==, ENOENT);
466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
467 "invalid feature '%s'"), fname);
468 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
469 goto error;
470 }
471
472 if (nvpair_type(elem) != DATA_TYPE_STRING) {
473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
474 "'%s' must be a string"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
476 goto error;
477 }
478
479 (void) nvpair_value_string(elem, &strval);
480 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
481 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486 goto error;
487 }
488
489 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
490 (void) no_memory(hdl);
491 goto error;
492 }
493 continue;
494 }
495
496 /*
497 * Make sure this property is valid and applies to this type.
498 */
499 if (prop == ZPROP_INVAL) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "invalid property '%s'"), propname);
502 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503 goto error;
504 }
505
506 if (zpool_prop_readonly(prop)) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
508 "is readonly"), propname);
509 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
510 goto error;
511 }
512
513 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
514 &strval, &intval, errbuf) != 0)
515 goto error;
516
517 /*
518 * Perform additional checking for specific properties.
519 */
520 switch (prop) {
521 case ZPOOL_PROP_VERSION:
522 if (intval < version ||
523 !SPA_VERSION_IS_SUPPORTED(intval)) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' number %d is invalid."),
526 propname, intval);
527 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
528 goto error;
529 }
530 break;
531
532 case ZPOOL_PROP_ASHIFT:
533 if (!flags.create) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' can only be set at "
536 "creation time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 goto error;
539 }
540
541 if (intval != 0 && (intval < 9 || intval > 13)) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "property '%s' number %d is invalid."),
544 propname, intval);
545 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
546 goto error;
547 }
548 break;
549
550 case ZPOOL_PROP_BOOTFS:
551 if (flags.create || flags.import) {
552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
553 "property '%s' cannot be set at creation "
554 "or import time"), propname);
555 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
556 goto error;
557 }
558
559 if (version < SPA_VERSION_BOOTFS) {
560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561 "pool must be upgraded to support "
562 "'%s' property"), propname);
563 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
564 goto error;
565 }
566
567 /*
568 * bootfs property value has to be a dataset name and
569 * the dataset has to be in the same pool as it sets to.
570 */
571 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
572 strval)) {
573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
574 "is an invalid name"), strval);
575 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
576 goto error;
577 }
578
579 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "could not open pool '%s'"), poolname);
582 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
583 goto error;
584 }
585 zpool_close(zhp);
586 break;
587
588 case ZPOOL_PROP_ALTROOT:
589 if (!flags.create && !flags.import) {
590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 "property '%s' can only be set during pool "
592 "creation or import"), propname);
593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
594 goto error;
595 }
596
597 if (strval[0] != '/') {
598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
599 "bad alternate root '%s'"), strval);
600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
601 goto error;
602 }
603 break;
604
605 case ZPOOL_PROP_CACHEFILE:
606 if (strval[0] == '\0')
607 break;
608
609 if (strcmp(strval, "none") == 0)
610 break;
611
612 if (strval[0] != '/') {
613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
614 "property '%s' must be empty, an "
615 "absolute path, or 'none'"), propname);
616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
617 goto error;
618 }
619
620 slash = strrchr(strval, '/');
621
622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
623 strcmp(slash, "/..") == 0) {
624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
625 "'%s' is not a valid file"), strval);
626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
627 goto error;
628 }
629
630 *slash = '\0';
631
632 if (strval[0] != '\0' &&
633 (stat64(strval, &statbuf) != 0 ||
634 !S_ISDIR(statbuf.st_mode))) {
635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
636 "'%s' is not a valid directory"),
637 strval);
638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
639 goto error;
640 }
641
642 *slash = '/';
643 break;
644
645 case ZPOOL_PROP_COMMENT:
646 for (check = strval; *check != '\0'; check++) {
647 if (!isprint(*check)) {
648 zfs_error_aux(hdl,
649 dgettext(TEXT_DOMAIN,
650 "comment may only have printable "
651 "characters"));
652 (void) zfs_error(hdl, EZFS_BADPROP,
653 errbuf);
654 goto error;
655 }
656 }
657 if (strlen(strval) > ZPROP_MAX_COMMENT) {
658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
659 "comment must not exceed %d characters"),
660 ZPROP_MAX_COMMENT);
661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
662 goto error;
663 }
664 break;
665 case ZPOOL_PROP_READONLY:
666 if (!flags.import) {
667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 "property '%s' can only be set at "
669 "import time"), propname);
670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 goto error;
672 }
673 break;
674 case ZPOOL_PROP_TNAME:
675 if (!flags.create) {
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "property '%s' can only be set at "
678 "creation time"), propname);
679 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
680 goto error;
681 }
682 break;
683
684 default:
685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
686 "property '%s'(%d) not defined"), propname, prop);
687 break;
688 }
689 }
690
691 return (retprops);
692 error:
693 nvlist_free(retprops);
694 return (NULL);
695 }
696
697 /*
698 * Set zpool property : propname=propval.
699 */
700 int
701 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
702 {
703 zfs_cmd_t zc = {"\0"};
704 int ret = -1;
705 char errbuf[1024];
706 nvlist_t *nvl = NULL;
707 nvlist_t *realprops;
708 uint64_t version;
709 prop_flags_t flags = { 0 };
710
711 (void) snprintf(errbuf, sizeof (errbuf),
712 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
713 zhp->zpool_name);
714
715 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
716 return (no_memory(zhp->zpool_hdl));
717
718 if (nvlist_add_string(nvl, propname, propval) != 0) {
719 nvlist_free(nvl);
720 return (no_memory(zhp->zpool_hdl));
721 }
722
723 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
724 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
725 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
726 nvlist_free(nvl);
727 return (-1);
728 }
729
730 nvlist_free(nvl);
731 nvl = realprops;
732
733 /*
734 * Execute the corresponding ioctl() to set this property.
735 */
736 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
737
738 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
739 nvlist_free(nvl);
740 return (-1);
741 }
742
743 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
744
745 zcmd_free_nvlists(&zc);
746 nvlist_free(nvl);
747
748 if (ret)
749 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
750 else
751 (void) zpool_props_refresh(zhp);
752
753 return (ret);
754 }
755
756 int
757 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
758 {
759 libzfs_handle_t *hdl = zhp->zpool_hdl;
760 zprop_list_t *entry;
761 char buf[ZFS_MAXPROPLEN];
762 nvlist_t *features = NULL;
763 nvpair_t *nvp;
764 zprop_list_t **last;
765 boolean_t firstexpand = (NULL == *plp);
766 int i;
767
768 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
769 return (-1);
770
771 last = plp;
772 while (*last != NULL)
773 last = &(*last)->pl_next;
774
775 if ((*plp)->pl_all)
776 features = zpool_get_features(zhp);
777
778 if ((*plp)->pl_all && firstexpand) {
779 for (i = 0; i < SPA_FEATURES; i++) {
780 zprop_list_t *entry = zfs_alloc(hdl,
781 sizeof (zprop_list_t));
782 entry->pl_prop = ZPROP_INVAL;
783 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
784 spa_feature_table[i].fi_uname);
785 entry->pl_width = strlen(entry->pl_user_prop);
786 entry->pl_all = B_TRUE;
787
788 *last = entry;
789 last = &entry->pl_next;
790 }
791 }
792
793 /* add any unsupported features */
794 for (nvp = nvlist_next_nvpair(features, NULL);
795 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
796 char *propname;
797 boolean_t found;
798 zprop_list_t *entry;
799
800 if (zfeature_is_supported(nvpair_name(nvp)))
801 continue;
802
803 propname = zfs_asprintf(hdl, "unsupported@%s",
804 nvpair_name(nvp));
805
806 /*
807 * Before adding the property to the list make sure that no
808 * other pool already added the same property.
809 */
810 found = B_FALSE;
811 entry = *plp;
812 while (entry != NULL) {
813 if (entry->pl_user_prop != NULL &&
814 strcmp(propname, entry->pl_user_prop) == 0) {
815 found = B_TRUE;
816 break;
817 }
818 entry = entry->pl_next;
819 }
820 if (found) {
821 free(propname);
822 continue;
823 }
824
825 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
826 entry->pl_prop = ZPROP_INVAL;
827 entry->pl_user_prop = propname;
828 entry->pl_width = strlen(entry->pl_user_prop);
829 entry->pl_all = B_TRUE;
830
831 *last = entry;
832 last = &entry->pl_next;
833 }
834
835 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
836
837 if (entry->pl_fixed)
838 continue;
839
840 if (entry->pl_prop != ZPROP_INVAL &&
841 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
842 NULL, B_FALSE) == 0) {
843 if (strlen(buf) > entry->pl_width)
844 entry->pl_width = strlen(buf);
845 }
846 }
847
848 return (0);
849 }
850
851 /*
852 * Get the state for the given feature on the given ZFS pool.
853 */
854 int
855 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
856 size_t len)
857 {
858 uint64_t refcount;
859 boolean_t found = B_FALSE;
860 nvlist_t *features = zpool_get_features(zhp);
861 boolean_t supported;
862 const char *feature = strchr(propname, '@') + 1;
863
864 supported = zpool_prop_feature(propname);
865 ASSERT(supported || zpool_prop_unsupported(propname));
866
867 /*
868 * Convert from feature name to feature guid. This conversion is
869 * unnecessary for unsupported@... properties because they already
870 * use guids.
871 */
872 if (supported) {
873 int ret;
874 spa_feature_t fid;
875
876 ret = zfeature_lookup_name(feature, &fid);
877 if (ret != 0) {
878 (void) strlcpy(buf, "-", len);
879 return (ENOTSUP);
880 }
881 feature = spa_feature_table[fid].fi_guid;
882 }
883
884 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
885 found = B_TRUE;
886
887 if (supported) {
888 if (!found) {
889 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
890 } else {
891 if (refcount == 0)
892 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
893 else
894 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
895 }
896 } else {
897 if (found) {
898 if (refcount == 0) {
899 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
900 } else {
901 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
902 }
903 } else {
904 (void) strlcpy(buf, "-", len);
905 return (ENOTSUP);
906 }
907 }
908
909 return (0);
910 }
911
912 /*
913 * Don't start the slice at the default block of 34; many storage
914 * devices will use a stripe width of 128k, other vendors prefer a 1m
915 * alignment. It is best to play it safe and ensure a 1m alignment
916 * given 512B blocks. When the block size is larger by a power of 2
917 * we will still be 1m aligned. Some devices are sensitive to the
918 * partition ending alignment as well.
919 */
920 #define NEW_START_BLOCK 2048
921 #define PARTITION_END_ALIGNMENT 2048
922
923 /*
924 * Validate the given pool name, optionally putting an extended error message in
925 * 'buf'.
926 */
927 boolean_t
928 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
929 {
930 namecheck_err_t why;
931 char what;
932 int ret;
933
934 ret = pool_namecheck(pool, &why, &what);
935
936 /*
937 * The rules for reserved pool names were extended at a later point.
938 * But we need to support users with existing pools that may now be
939 * invalid. So we only check for this expanded set of names during a
940 * create (or import), and only in userland.
941 */
942 if (ret == 0 && !isopen &&
943 (strncmp(pool, "mirror", 6) == 0 ||
944 strncmp(pool, "raidz", 5) == 0 ||
945 strncmp(pool, "spare", 5) == 0 ||
946 strcmp(pool, "log") == 0)) {
947 if (hdl != NULL)
948 zfs_error_aux(hdl,
949 dgettext(TEXT_DOMAIN, "name is reserved"));
950 return (B_FALSE);
951 }
952
953
954 if (ret != 0) {
955 if (hdl != NULL) {
956 switch (why) {
957 case NAME_ERR_TOOLONG:
958 zfs_error_aux(hdl,
959 dgettext(TEXT_DOMAIN, "name is too long"));
960 break;
961
962 case NAME_ERR_INVALCHAR:
963 zfs_error_aux(hdl,
964 dgettext(TEXT_DOMAIN, "invalid character "
965 "'%c' in pool name"), what);
966 break;
967
968 case NAME_ERR_NOLETTER:
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "name must begin with a letter"));
971 break;
972
973 case NAME_ERR_RESERVED:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "name is reserved"));
976 break;
977
978 case NAME_ERR_DISKLIKE:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "pool name is reserved"));
981 break;
982
983 case NAME_ERR_LEADING_SLASH:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "leading slash in name"));
986 break;
987
988 case NAME_ERR_EMPTY_COMPONENT:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "empty component in name"));
991 break;
992
993 case NAME_ERR_TRAILING_SLASH:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "trailing slash in name"));
996 break;
997
998 case NAME_ERR_MULTIPLE_DELIMITERS:
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 "multiple '@' and/or '#' delimiters in "
1001 "name"));
1002 break;
1003
1004 case NAME_ERR_NO_AT:
1005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1006 "permission set is missing '@'"));
1007 break;
1008
1009 default:
1010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1011 "(%d) not defined"), why);
1012 break;
1013 }
1014 }
1015 return (B_FALSE);
1016 }
1017
1018 return (B_TRUE);
1019 }
1020
1021 /*
1022 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1023 * state.
1024 */
1025 zpool_handle_t *
1026 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1027 {
1028 zpool_handle_t *zhp;
1029 boolean_t missing;
1030
1031 /*
1032 * Make sure the pool name is valid.
1033 */
1034 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1035 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1036 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1037 pool);
1038 return (NULL);
1039 }
1040
1041 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1042 return (NULL);
1043
1044 zhp->zpool_hdl = hdl;
1045 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1046
1047 if (zpool_refresh_stats(zhp, &missing) != 0) {
1048 zpool_close(zhp);
1049 return (NULL);
1050 }
1051
1052 if (missing) {
1053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1054 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1055 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1056 zpool_close(zhp);
1057 return (NULL);
1058 }
1059
1060 return (zhp);
1061 }
1062
1063 /*
1064 * Like the above, but silent on error. Used when iterating over pools (because
1065 * the configuration cache may be out of date).
1066 */
1067 int
1068 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1069 {
1070 zpool_handle_t *zhp;
1071 boolean_t missing;
1072
1073 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1074 return (-1);
1075
1076 zhp->zpool_hdl = hdl;
1077 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1078
1079 if (zpool_refresh_stats(zhp, &missing) != 0) {
1080 zpool_close(zhp);
1081 return (-1);
1082 }
1083
1084 if (missing) {
1085 zpool_close(zhp);
1086 *ret = NULL;
1087 return (0);
1088 }
1089
1090 *ret = zhp;
1091 return (0);
1092 }
1093
1094 /*
1095 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1096 * state.
1097 */
1098 zpool_handle_t *
1099 zpool_open(libzfs_handle_t *hdl, const char *pool)
1100 {
1101 zpool_handle_t *zhp;
1102
1103 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1104 return (NULL);
1105
1106 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1107 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1108 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1109 zpool_close(zhp);
1110 return (NULL);
1111 }
1112
1113 return (zhp);
1114 }
1115
1116 /*
1117 * Close the handle. Simply frees the memory associated with the handle.
1118 */
1119 void
1120 zpool_close(zpool_handle_t *zhp)
1121 {
1122 nvlist_free(zhp->zpool_config);
1123 nvlist_free(zhp->zpool_old_config);
1124 nvlist_free(zhp->zpool_props);
1125 free(zhp);
1126 }
1127
1128 /*
1129 * Return the name of the pool.
1130 */
1131 const char *
1132 zpool_get_name(zpool_handle_t *zhp)
1133 {
1134 return (zhp->zpool_name);
1135 }
1136
1137
1138 /*
1139 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1140 */
1141 int
1142 zpool_get_state(zpool_handle_t *zhp)
1143 {
1144 return (zhp->zpool_state);
1145 }
1146
1147 /*
1148 * Create the named pool, using the provided vdev list. It is assumed
1149 * that the consumer has already validated the contents of the nvlist, so we
1150 * don't have to worry about error semantics.
1151 */
1152 int
1153 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1154 nvlist_t *props, nvlist_t *fsprops)
1155 {
1156 zfs_cmd_t zc = {"\0"};
1157 nvlist_t *zc_fsprops = NULL;
1158 nvlist_t *zc_props = NULL;
1159 char msg[1024];
1160 int ret = -1;
1161
1162 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1163 "cannot create '%s'"), pool);
1164
1165 if (!zpool_name_valid(hdl, B_FALSE, pool))
1166 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1167
1168 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1169 return (-1);
1170
1171 if (props) {
1172 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1173
1174 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1175 SPA_VERSION_1, flags, msg)) == NULL) {
1176 goto create_failed;
1177 }
1178 }
1179
1180 if (fsprops) {
1181 uint64_t zoned;
1182 char *zonestr;
1183
1184 zoned = ((nvlist_lookup_string(fsprops,
1185 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1186 strcmp(zonestr, "on") == 0);
1187
1188 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1189 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1190 goto create_failed;
1191 }
1192 if (!zc_props &&
1193 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1194 goto create_failed;
1195 }
1196 if (nvlist_add_nvlist(zc_props,
1197 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1198 goto create_failed;
1199 }
1200 }
1201
1202 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1203 goto create_failed;
1204
1205 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1206
1207 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1208
1209 zcmd_free_nvlists(&zc);
1210 nvlist_free(zc_props);
1211 nvlist_free(zc_fsprops);
1212
1213 switch (errno) {
1214 case EBUSY:
1215 /*
1216 * This can happen if the user has specified the same
1217 * device multiple times. We can't reliably detect this
1218 * until we try to add it and see we already have a
1219 * label. This can also happen under if the device is
1220 * part of an active md or lvm device.
1221 */
1222 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1223 "one or more vdevs refer to the same device, or "
1224 "one of\nthe devices is part of an active md or "
1225 "lvm device"));
1226 return (zfs_error(hdl, EZFS_BADDEV, msg));
1227
1228 case ERANGE:
1229 /*
1230 * This happens if the record size is smaller or larger
1231 * than the allowed size range, or not a power of 2.
1232 *
1233 * NOTE: although zfs_valid_proplist is called earlier,
1234 * this case may have slipped through since the
1235 * pool does not exist yet and it is therefore
1236 * impossible to read properties e.g. max blocksize
1237 * from the pool.
1238 */
1239 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1240 "record size invalid"));
1241 return (zfs_error(hdl, EZFS_BADPROP, msg));
1242
1243 case EOVERFLOW:
1244 /*
1245 * This occurs when one of the devices is below
1246 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1247 * device was the problem device since there's no
1248 * reliable way to determine device size from userland.
1249 */
1250 {
1251 char buf[64];
1252
1253 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1254
1255 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1256 "one or more devices is less than the "
1257 "minimum size (%s)"), buf);
1258 }
1259 return (zfs_error(hdl, EZFS_BADDEV, msg));
1260
1261 case ENOSPC:
1262 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1263 "one or more devices is out of space"));
1264 return (zfs_error(hdl, EZFS_BADDEV, msg));
1265
1266 case ENOTBLK:
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "cache device must be a disk or disk slice"));
1269 return (zfs_error(hdl, EZFS_BADDEV, msg));
1270
1271 default:
1272 return (zpool_standard_error(hdl, errno, msg));
1273 }
1274 }
1275
1276 create_failed:
1277 zcmd_free_nvlists(&zc);
1278 nvlist_free(zc_props);
1279 nvlist_free(zc_fsprops);
1280 return (ret);
1281 }
1282
1283 /*
1284 * Destroy the given pool. It is up to the caller to ensure that there are no
1285 * datasets left in the pool.
1286 */
1287 int
1288 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1289 {
1290 zfs_cmd_t zc = {"\0"};
1291 zfs_handle_t *zfp = NULL;
1292 libzfs_handle_t *hdl = zhp->zpool_hdl;
1293 char msg[1024];
1294
1295 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1296 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1297 return (-1);
1298
1299 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1300 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1301
1302 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1303 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1304 "cannot destroy '%s'"), zhp->zpool_name);
1305
1306 if (errno == EROFS) {
1307 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1308 "one or more devices is read only"));
1309 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1310 } else {
1311 (void) zpool_standard_error(hdl, errno, msg);
1312 }
1313
1314 if (zfp)
1315 zfs_close(zfp);
1316 return (-1);
1317 }
1318
1319 if (zfp) {
1320 remove_mountpoint(zfp);
1321 zfs_close(zfp);
1322 }
1323
1324 return (0);
1325 }
1326
1327 /*
1328 * Add the given vdevs to the pool. The caller must have already performed the
1329 * necessary verification to ensure that the vdev specification is well-formed.
1330 */
1331 int
1332 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1333 {
1334 zfs_cmd_t zc = {"\0"};
1335 int ret;
1336 libzfs_handle_t *hdl = zhp->zpool_hdl;
1337 char msg[1024];
1338 nvlist_t **spares, **l2cache;
1339 uint_t nspares, nl2cache;
1340
1341 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1342 "cannot add to '%s'"), zhp->zpool_name);
1343
1344 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1345 SPA_VERSION_SPARES &&
1346 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1347 &spares, &nspares) == 0) {
1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1349 "upgraded to add hot spares"));
1350 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1351 }
1352
1353 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1354 SPA_VERSION_L2CACHE &&
1355 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1356 &l2cache, &nl2cache) == 0) {
1357 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1358 "upgraded to add cache devices"));
1359 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1360 }
1361
1362 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1363 return (-1);
1364 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1365
1366 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1367 switch (errno) {
1368 case EBUSY:
1369 /*
1370 * This can happen if the user has specified the same
1371 * device multiple times. We can't reliably detect this
1372 * until we try to add it and see we already have a
1373 * label.
1374 */
1375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1376 "one or more vdevs refer to the same device"));
1377 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1378 break;
1379
1380 case EOVERFLOW:
1381 /*
1382 * This occurrs when one of the devices is below
1383 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1384 * device was the problem device since there's no
1385 * reliable way to determine device size from userland.
1386 */
1387 {
1388 char buf[64];
1389
1390 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1391
1392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1393 "device is less than the minimum "
1394 "size (%s)"), buf);
1395 }
1396 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1397 break;
1398
1399 case ENOTSUP:
1400 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1401 "pool must be upgraded to add these vdevs"));
1402 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1403 break;
1404
1405 case ENOTBLK:
1406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1407 "cache device must be a disk or disk slice"));
1408 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1409 break;
1410
1411 default:
1412 (void) zpool_standard_error(hdl, errno, msg);
1413 }
1414
1415 ret = -1;
1416 } else {
1417 ret = 0;
1418 }
1419
1420 zcmd_free_nvlists(&zc);
1421
1422 return (ret);
1423 }
1424
1425 /*
1426 * Exports the pool from the system. The caller must ensure that there are no
1427 * mounted datasets in the pool.
1428 */
1429 static int
1430 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1431 const char *log_str)
1432 {
1433 zfs_cmd_t zc = {"\0"};
1434 char msg[1024];
1435
1436 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1437 "cannot export '%s'"), zhp->zpool_name);
1438
1439 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1440 zc.zc_cookie = force;
1441 zc.zc_guid = hardforce;
1442 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1443
1444 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1445 switch (errno) {
1446 case EXDEV:
1447 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1448 "use '-f' to override the following errors:\n"
1449 "'%s' has an active shared spare which could be"
1450 " used by other pools once '%s' is exported."),
1451 zhp->zpool_name, zhp->zpool_name);
1452 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1453 msg));
1454 default:
1455 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1456 msg));
1457 }
1458 }
1459
1460 return (0);
1461 }
1462
1463 int
1464 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1465 {
1466 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1467 }
1468
1469 int
1470 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1471 {
1472 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1473 }
1474
1475 static void
1476 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1477 nvlist_t *config)
1478 {
1479 nvlist_t *nv = NULL;
1480 uint64_t rewindto;
1481 int64_t loss = -1;
1482 struct tm t;
1483 char timestr[128];
1484
1485 if (!hdl->libzfs_printerr || config == NULL)
1486 return;
1487
1488 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1489 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1490 return;
1491 }
1492
1493 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1494 return;
1495 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1496
1497 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1498 strftime(timestr, 128, "%c", &t) != 0) {
1499 if (dryrun) {
1500 (void) printf(dgettext(TEXT_DOMAIN,
1501 "Would be able to return %s "
1502 "to its state as of %s.\n"),
1503 name, timestr);
1504 } else {
1505 (void) printf(dgettext(TEXT_DOMAIN,
1506 "Pool %s returned to its state as of %s.\n"),
1507 name, timestr);
1508 }
1509 if (loss > 120) {
1510 (void) printf(dgettext(TEXT_DOMAIN,
1511 "%s approximately %lld "),
1512 dryrun ? "Would discard" : "Discarded",
1513 ((longlong_t)loss + 30) / 60);
1514 (void) printf(dgettext(TEXT_DOMAIN,
1515 "minutes of transactions.\n"));
1516 } else if (loss > 0) {
1517 (void) printf(dgettext(TEXT_DOMAIN,
1518 "%s approximately %lld "),
1519 dryrun ? "Would discard" : "Discarded",
1520 (longlong_t)loss);
1521 (void) printf(dgettext(TEXT_DOMAIN,
1522 "seconds of transactions.\n"));
1523 }
1524 }
1525 }
1526
1527 void
1528 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1529 nvlist_t *config)
1530 {
1531 nvlist_t *nv = NULL;
1532 int64_t loss = -1;
1533 uint64_t edata = UINT64_MAX;
1534 uint64_t rewindto;
1535 struct tm t;
1536 char timestr[128];
1537
1538 if (!hdl->libzfs_printerr)
1539 return;
1540
1541 if (reason >= 0)
1542 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1543 else
1544 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1545
1546 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1547 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1548 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1549 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1550 goto no_info;
1551
1552 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1553 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1554 &edata);
1555
1556 (void) printf(dgettext(TEXT_DOMAIN,
1557 "Recovery is possible, but will result in some data loss.\n"));
1558
1559 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1560 strftime(timestr, 128, "%c", &t) != 0) {
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "\tReturning the pool to its state as of %s\n"
1563 "\tshould correct the problem. "),
1564 timestr);
1565 } else {
1566 (void) printf(dgettext(TEXT_DOMAIN,
1567 "\tReverting the pool to an earlier state "
1568 "should correct the problem.\n\t"));
1569 }
1570
1571 if (loss > 120) {
1572 (void) printf(dgettext(TEXT_DOMAIN,
1573 "Approximately %lld minutes of data\n"
1574 "\tmust be discarded, irreversibly. "),
1575 ((longlong_t)loss + 30) / 60);
1576 } else if (loss > 0) {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "Approximately %lld seconds of data\n"
1579 "\tmust be discarded, irreversibly. "),
1580 (longlong_t)loss);
1581 }
1582 if (edata != 0 && edata != UINT64_MAX) {
1583 if (edata == 1) {
1584 (void) printf(dgettext(TEXT_DOMAIN,
1585 "After rewind, at least\n"
1586 "\tone persistent user-data error will remain. "));
1587 } else {
1588 (void) printf(dgettext(TEXT_DOMAIN,
1589 "After rewind, several\n"
1590 "\tpersistent user-data errors will remain. "));
1591 }
1592 }
1593 (void) printf(dgettext(TEXT_DOMAIN,
1594 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1595 reason >= 0 ? "clear" : "import", name);
1596
1597 (void) printf(dgettext(TEXT_DOMAIN,
1598 "A scrub of the pool\n"
1599 "\tis strongly recommended after recovery.\n"));
1600 return;
1601
1602 no_info:
1603 (void) printf(dgettext(TEXT_DOMAIN,
1604 "Destroy and re-create the pool from\n\ta backup source.\n"));
1605 }
1606
1607 /*
1608 * zpool_import() is a contracted interface. Should be kept the same
1609 * if possible.
1610 *
1611 * Applications should use zpool_import_props() to import a pool with
1612 * new properties value to be set.
1613 */
1614 int
1615 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1616 char *altroot)
1617 {
1618 nvlist_t *props = NULL;
1619 int ret;
1620
1621 if (altroot != NULL) {
1622 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1623 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1624 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1625 newname));
1626 }
1627
1628 if (nvlist_add_string(props,
1629 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1630 nvlist_add_string(props,
1631 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1632 nvlist_free(props);
1633 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1634 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1635 newname));
1636 }
1637 }
1638
1639 ret = zpool_import_props(hdl, config, newname, props,
1640 ZFS_IMPORT_NORMAL);
1641 nvlist_free(props);
1642 return (ret);
1643 }
1644
1645 static void
1646 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1647 int indent)
1648 {
1649 nvlist_t **child;
1650 uint_t c, children;
1651 char *vname;
1652 uint64_t is_log = 0;
1653
1654 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1655 &is_log);
1656
1657 if (name != NULL)
1658 (void) printf("\t%*s%s%s\n", indent, "", name,
1659 is_log ? " [log]" : "");
1660
1661 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1662 &child, &children) != 0)
1663 return;
1664
1665 for (c = 0; c < children; c++) {
1666 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1667 print_vdev_tree(hdl, vname, child[c], indent + 2);
1668 free(vname);
1669 }
1670 }
1671
1672 void
1673 zpool_print_unsup_feat(nvlist_t *config)
1674 {
1675 nvlist_t *nvinfo, *unsup_feat;
1676 nvpair_t *nvp;
1677
1678 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1679 0);
1680 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1681 &unsup_feat) == 0);
1682
1683 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1684 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1685 char *desc;
1686
1687 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1688 verify(nvpair_value_string(nvp, &desc) == 0);
1689
1690 if (strlen(desc) > 0)
1691 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1692 else
1693 (void) printf("\t%s\n", nvpair_name(nvp));
1694 }
1695 }
1696
1697 /*
1698 * Import the given pool using the known configuration and a list of
1699 * properties to be set. The configuration should have come from
1700 * zpool_find_import(). The 'newname' parameters control whether the pool
1701 * is imported with a different name.
1702 */
1703 int
1704 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1705 nvlist_t *props, int flags)
1706 {
1707 zfs_cmd_t zc = {"\0"};
1708 zpool_rewind_policy_t policy;
1709 nvlist_t *nv = NULL;
1710 nvlist_t *nvinfo = NULL;
1711 nvlist_t *missing = NULL;
1712 char *thename;
1713 char *origname;
1714 int ret;
1715 int error = 0;
1716 char errbuf[1024];
1717
1718 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1719 &origname) == 0);
1720
1721 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1722 "cannot import pool '%s'"), origname);
1723
1724 if (newname != NULL) {
1725 if (!zpool_name_valid(hdl, B_FALSE, newname))
1726 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1727 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1728 newname));
1729 thename = (char *)newname;
1730 } else {
1731 thename = origname;
1732 }
1733
1734 if (props != NULL) {
1735 uint64_t version;
1736 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1737
1738 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1739 &version) == 0);
1740
1741 if ((props = zpool_valid_proplist(hdl, origname,
1742 props, version, flags, errbuf)) == NULL)
1743 return (-1);
1744 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1745 nvlist_free(props);
1746 return (-1);
1747 }
1748 nvlist_free(props);
1749 }
1750
1751 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1752
1753 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1754 &zc.zc_guid) == 0);
1755
1756 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1757 zcmd_free_nvlists(&zc);
1758 return (-1);
1759 }
1760 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1761 zcmd_free_nvlists(&zc);
1762 return (-1);
1763 }
1764
1765 zc.zc_cookie = flags;
1766 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1767 errno == ENOMEM) {
1768 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1769 zcmd_free_nvlists(&zc);
1770 return (-1);
1771 }
1772 }
1773 if (ret != 0)
1774 error = errno;
1775
1776 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1777
1778 zcmd_free_nvlists(&zc);
1779
1780 zpool_get_rewind_policy(config, &policy);
1781
1782 if (error) {
1783 char desc[1024];
1784
1785 /*
1786 * Dry-run failed, but we print out what success
1787 * looks like if we found a best txg
1788 */
1789 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1790 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1791 B_TRUE, nv);
1792 nvlist_free(nv);
1793 return (-1);
1794 }
1795
1796 if (newname == NULL)
1797 (void) snprintf(desc, sizeof (desc),
1798 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1799 thename);
1800 else
1801 (void) snprintf(desc, sizeof (desc),
1802 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1803 origname, thename);
1804
1805 switch (error) {
1806 case ENOTSUP:
1807 if (nv != NULL && nvlist_lookup_nvlist(nv,
1808 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1809 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1810 (void) printf(dgettext(TEXT_DOMAIN, "This "
1811 "pool uses the following feature(s) not "
1812 "supported by this system:\n"));
1813 zpool_print_unsup_feat(nv);
1814 if (nvlist_exists(nvinfo,
1815 ZPOOL_CONFIG_CAN_RDONLY)) {
1816 (void) printf(dgettext(TEXT_DOMAIN,
1817 "All unsupported features are only "
1818 "required for writing to the pool."
1819 "\nThe pool can be imported using "
1820 "'-o readonly=on'.\n"));
1821 }
1822 }
1823 /*
1824 * Unsupported version.
1825 */
1826 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1827 break;
1828
1829 case EINVAL:
1830 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1831 break;
1832
1833 case EROFS:
1834 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1835 "one or more devices is read only"));
1836 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1837 break;
1838
1839 case ENXIO:
1840 if (nv && nvlist_lookup_nvlist(nv,
1841 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1842 nvlist_lookup_nvlist(nvinfo,
1843 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1844 (void) printf(dgettext(TEXT_DOMAIN,
1845 "The devices below are missing, use "
1846 "'-m' to import the pool anyway:\n"));
1847 print_vdev_tree(hdl, NULL, missing, 2);
1848 (void) printf("\n");
1849 }
1850 (void) zpool_standard_error(hdl, error, desc);
1851 break;
1852
1853 case EEXIST:
1854 (void) zpool_standard_error(hdl, error, desc);
1855 break;
1856
1857 case EBUSY:
1858 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1859 "one or more devices are already in use\n"));
1860 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1861 break;
1862 case ENAMETOOLONG:
1863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1864 "new name of at least one dataset is longer than "
1865 "the maximum allowable length"));
1866 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1867 break;
1868 default:
1869 (void) zpool_standard_error(hdl, error, desc);
1870 zpool_explain_recover(hdl,
1871 newname ? origname : thename, -error, nv);
1872 break;
1873 }
1874
1875 nvlist_free(nv);
1876 ret = -1;
1877 } else {
1878 zpool_handle_t *zhp;
1879
1880 /*
1881 * This should never fail, but play it safe anyway.
1882 */
1883 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1884 ret = -1;
1885 else if (zhp != NULL)
1886 zpool_close(zhp);
1887 if (policy.zrp_request &
1888 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1889 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1890 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1891 }
1892 nvlist_free(nv);
1893 return (0);
1894 }
1895
1896 return (ret);
1897 }
1898
1899 /*
1900 * Scan the pool.
1901 */
1902 int
1903 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1904 {
1905 zfs_cmd_t zc = {"\0"};
1906 char msg[1024];
1907 libzfs_handle_t *hdl = zhp->zpool_hdl;
1908
1909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1910 zc.zc_cookie = func;
1911
1912 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1913 (errno == ENOENT && func != POOL_SCAN_NONE))
1914 return (0);
1915
1916 if (func == POOL_SCAN_SCRUB) {
1917 (void) snprintf(msg, sizeof (msg),
1918 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1919 } else if (func == POOL_SCAN_NONE) {
1920 (void) snprintf(msg, sizeof (msg),
1921 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1922 zc.zc_name);
1923 } else {
1924 assert(!"unexpected result");
1925 }
1926
1927 if (errno == EBUSY) {
1928 nvlist_t *nvroot;
1929 pool_scan_stat_t *ps = NULL;
1930 uint_t psc;
1931
1932 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1933 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1934 (void) nvlist_lookup_uint64_array(nvroot,
1935 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1936 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1937 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1938 else
1939 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1940 } else if (errno == ENOENT) {
1941 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1942 } else {
1943 return (zpool_standard_error(hdl, errno, msg));
1944 }
1945 }
1946
1947 /*
1948 * Find a vdev that matches the search criteria specified. We use the
1949 * the nvpair name to determine how we should look for the device.
1950 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1951 * spare; but FALSE if its an INUSE spare.
1952 */
1953 static nvlist_t *
1954 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1955 boolean_t *l2cache, boolean_t *log)
1956 {
1957 uint_t c, children;
1958 nvlist_t **child;
1959 nvlist_t *ret;
1960 uint64_t is_log;
1961 char *srchkey;
1962 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1963
1964 /* Nothing to look for */
1965 if (search == NULL || pair == NULL)
1966 return (NULL);
1967
1968 /* Obtain the key we will use to search */
1969 srchkey = nvpair_name(pair);
1970
1971 switch (nvpair_type(pair)) {
1972 case DATA_TYPE_UINT64:
1973 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1974 uint64_t srchval, theguid;
1975
1976 verify(nvpair_value_uint64(pair, &srchval) == 0);
1977 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1978 &theguid) == 0);
1979 if (theguid == srchval)
1980 return (nv);
1981 }
1982 break;
1983
1984 case DATA_TYPE_STRING: {
1985 char *srchval, *val;
1986
1987 verify(nvpair_value_string(pair, &srchval) == 0);
1988 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1989 break;
1990
1991 /*
1992 * Search for the requested value. Special cases:
1993 *
1994 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1995 * "-part1", or "p1". The suffix is hidden from the user,
1996 * but included in the string, so this matches around it.
1997 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1998 * is used to check all possible expanded paths.
1999 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2000 *
2001 * Otherwise, all other searches are simple string compares.
2002 */
2003 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2004 uint64_t wholedisk = 0;
2005
2006 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2007 &wholedisk);
2008 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2009 return (nv);
2010
2011 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2012 char *type, *idx, *end, *p;
2013 uint64_t id, vdev_id;
2014
2015 /*
2016 * Determine our vdev type, keeping in mind
2017 * that the srchval is composed of a type and
2018 * vdev id pair (i.e. mirror-4).
2019 */
2020 if ((type = strdup(srchval)) == NULL)
2021 return (NULL);
2022
2023 if ((p = strrchr(type, '-')) == NULL) {
2024 free(type);
2025 break;
2026 }
2027 idx = p + 1;
2028 *p = '\0';
2029
2030 /*
2031 * If the types don't match then keep looking.
2032 */
2033 if (strncmp(val, type, strlen(val)) != 0) {
2034 free(type);
2035 break;
2036 }
2037
2038 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2039 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2040 strncmp(type, VDEV_TYPE_MIRROR,
2041 strlen(VDEV_TYPE_MIRROR)) == 0);
2042 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2043 &id) == 0);
2044
2045 errno = 0;
2046 vdev_id = strtoull(idx, &end, 10);
2047
2048 free(type);
2049 if (errno != 0)
2050 return (NULL);
2051
2052 /*
2053 * Now verify that we have the correct vdev id.
2054 */
2055 if (vdev_id == id)
2056 return (nv);
2057 }
2058
2059 /*
2060 * Common case
2061 */
2062 if (strcmp(srchval, val) == 0)
2063 return (nv);
2064 break;
2065 }
2066
2067 default:
2068 break;
2069 }
2070
2071 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2072 &child, &children) != 0)
2073 return (NULL);
2074
2075 for (c = 0; c < children; c++) {
2076 if ((ret = vdev_to_nvlist_iter(child[c], search,
2077 avail_spare, l2cache, NULL)) != NULL) {
2078 /*
2079 * The 'is_log' value is only set for the toplevel
2080 * vdev, not the leaf vdevs. So we always lookup the
2081 * log device from the root of the vdev tree (where
2082 * 'log' is non-NULL).
2083 */
2084 if (log != NULL &&
2085 nvlist_lookup_uint64(child[c],
2086 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2087 is_log) {
2088 *log = B_TRUE;
2089 }
2090 return (ret);
2091 }
2092 }
2093
2094 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2095 &child, &children) == 0) {
2096 for (c = 0; c < children; c++) {
2097 if ((ret = vdev_to_nvlist_iter(child[c], search,
2098 avail_spare, l2cache, NULL)) != NULL) {
2099 *avail_spare = B_TRUE;
2100 return (ret);
2101 }
2102 }
2103 }
2104
2105 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2106 &child, &children) == 0) {
2107 for (c = 0; c < children; c++) {
2108 if ((ret = vdev_to_nvlist_iter(child[c], search,
2109 avail_spare, l2cache, NULL)) != NULL) {
2110 *l2cache = B_TRUE;
2111 return (ret);
2112 }
2113 }
2114 }
2115
2116 return (NULL);
2117 }
2118
2119 /*
2120 * Given a physical path (minus the "/devices" prefix), find the
2121 * associated vdev.
2122 */
2123 nvlist_t *
2124 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2125 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2126 {
2127 nvlist_t *search, *nvroot, *ret;
2128
2129 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2130 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2131
2132 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2133 &nvroot) == 0);
2134
2135 *avail_spare = B_FALSE;
2136 *l2cache = B_FALSE;
2137 if (log != NULL)
2138 *log = B_FALSE;
2139 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2140 nvlist_free(search);
2141
2142 return (ret);
2143 }
2144
2145 /*
2146 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2147 */
2148 boolean_t
2149 zpool_vdev_is_interior(const char *name)
2150 {
2151 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2152 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2153 return (B_TRUE);
2154 return (B_FALSE);
2155 }
2156
2157 nvlist_t *
2158 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2159 boolean_t *l2cache, boolean_t *log)
2160 {
2161 char *end;
2162 nvlist_t *nvroot, *search, *ret;
2163 uint64_t guid;
2164
2165 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2166
2167 guid = strtoull(path, &end, 0);
2168 if (guid != 0 && *end == '\0') {
2169 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2170 } else if (zpool_vdev_is_interior(path)) {
2171 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2172 } else {
2173 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2174 }
2175
2176 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2177 &nvroot) == 0);
2178
2179 *avail_spare = B_FALSE;
2180 *l2cache = B_FALSE;
2181 if (log != NULL)
2182 *log = B_FALSE;
2183 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2184 nvlist_free(search);
2185
2186 return (ret);
2187 }
2188
2189 static int
2190 vdev_online(nvlist_t *nv)
2191 {
2192 uint64_t ival;
2193
2194 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2195 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2196 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2197 return (0);
2198
2199 return (1);
2200 }
2201
2202 /*
2203 * Helper function for zpool_get_physpaths().
2204 */
2205 static int
2206 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2207 size_t *bytes_written)
2208 {
2209 size_t bytes_left, pos, rsz;
2210 char *tmppath;
2211 const char *format;
2212
2213 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2214 &tmppath) != 0)
2215 return (EZFS_NODEVICE);
2216
2217 pos = *bytes_written;
2218 bytes_left = physpath_size - pos;
2219 format = (pos == 0) ? "%s" : " %s";
2220
2221 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2222 *bytes_written += rsz;
2223
2224 if (rsz >= bytes_left) {
2225 /* if physpath was not copied properly, clear it */
2226 if (bytes_left != 0) {
2227 physpath[pos] = 0;
2228 }
2229 return (EZFS_NOSPC);
2230 }
2231 return (0);
2232 }
2233
2234 static int
2235 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2236 size_t *rsz, boolean_t is_spare)
2237 {
2238 char *type;
2239 int ret;
2240
2241 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2242 return (EZFS_INVALCONFIG);
2243
2244 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2245 /*
2246 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2247 * For a spare vdev, we only want to boot from the active
2248 * spare device.
2249 */
2250 if (is_spare) {
2251 uint64_t spare = 0;
2252 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2253 &spare);
2254 if (!spare)
2255 return (EZFS_INVALCONFIG);
2256 }
2257
2258 if (vdev_online(nv)) {
2259 if ((ret = vdev_get_one_physpath(nv, physpath,
2260 phypath_size, rsz)) != 0)
2261 return (ret);
2262 }
2263 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2264 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2265 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2266 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2267 nvlist_t **child;
2268 uint_t count;
2269 int i, ret;
2270
2271 if (nvlist_lookup_nvlist_array(nv,
2272 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2273 return (EZFS_INVALCONFIG);
2274
2275 for (i = 0; i < count; i++) {
2276 ret = vdev_get_physpaths(child[i], physpath,
2277 phypath_size, rsz, is_spare);
2278 if (ret == EZFS_NOSPC)
2279 return (ret);
2280 }
2281 }
2282
2283 return (EZFS_POOL_INVALARG);
2284 }
2285
2286 /*
2287 * Get phys_path for a root pool config.
2288 * Return 0 on success; non-zero on failure.
2289 */
2290 static int
2291 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2292 {
2293 size_t rsz;
2294 nvlist_t *vdev_root;
2295 nvlist_t **child;
2296 uint_t count;
2297 char *type;
2298
2299 rsz = 0;
2300
2301 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2302 &vdev_root) != 0)
2303 return (EZFS_INVALCONFIG);
2304
2305 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2306 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2307 &child, &count) != 0)
2308 return (EZFS_INVALCONFIG);
2309
2310 /*
2311 * root pool can only have a single top-level vdev.
2312 */
2313 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2314 return (EZFS_POOL_INVALARG);
2315
2316 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2317 B_FALSE);
2318
2319 /* No online devices */
2320 if (rsz == 0)
2321 return (EZFS_NODEVICE);
2322
2323 return (0);
2324 }
2325
2326 /*
2327 * Get phys_path for a root pool
2328 * Return 0 on success; non-zero on failure.
2329 */
2330 int
2331 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2332 {
2333 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2334 phypath_size));
2335 }
2336
2337 /*
2338 * If the device has being dynamically expanded then we need to relabel
2339 * the disk to use the new unallocated space.
2340 */
2341 static int
2342 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2343 {
2344 int fd, error;
2345
2346 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2347 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2348 "relabel '%s': unable to open device: %d"), path, errno);
2349 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2350 }
2351
2352 /*
2353 * It's possible that we might encounter an error if the device
2354 * does not have any unallocated space left. If so, we simply
2355 * ignore that error and continue on.
2356 *
2357 * Also, we don't call efi_rescan() - that would just return EBUSY.
2358 * The module will do it for us in vdev_disk_open().
2359 */
2360 error = efi_use_whole_disk(fd);
2361 (void) close(fd);
2362 if (error && error != VT_ENOSPC) {
2363 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2364 "relabel '%s': unable to read disk capacity"), path);
2365 return (zfs_error(hdl, EZFS_NOCAP, msg));
2366 }
2367 return (0);
2368 }
2369
2370 /*
2371 * Bring the specified vdev online. The 'flags' parameter is a set of the
2372 * ZFS_ONLINE_* flags.
2373 */
2374 int
2375 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2376 vdev_state_t *newstate)
2377 {
2378 zfs_cmd_t zc = {"\0"};
2379 char msg[1024];
2380 nvlist_t *tgt;
2381 boolean_t avail_spare, l2cache, islog;
2382 libzfs_handle_t *hdl = zhp->zpool_hdl;
2383 int error;
2384
2385 if (flags & ZFS_ONLINE_EXPAND) {
2386 (void) snprintf(msg, sizeof (msg),
2387 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2388 } else {
2389 (void) snprintf(msg, sizeof (msg),
2390 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2391 }
2392
2393 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2394 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2395 &islog)) == NULL)
2396 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2397
2398 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2399
2400 if (avail_spare)
2401 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2402
2403 if (flags & ZFS_ONLINE_EXPAND ||
2404 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2405 uint64_t wholedisk = 0;
2406
2407 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2408 &wholedisk);
2409
2410 /*
2411 * XXX - L2ARC 1.0 devices can't support expansion.
2412 */
2413 if (l2cache) {
2414 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2415 "cannot expand cache devices"));
2416 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2417 }
2418
2419 if (wholedisk) {
2420 const char *fullpath = path;
2421 char buf[MAXPATHLEN];
2422
2423 if (path[0] != '/') {
2424 error = zfs_resolve_shortname(path, buf,
2425 sizeof (buf));
2426 if (error != 0)
2427 return (zfs_error(hdl, EZFS_NODEVICE,
2428 msg));
2429
2430 fullpath = buf;
2431 }
2432
2433 error = zpool_relabel_disk(hdl, fullpath, msg);
2434 if (error != 0)
2435 return (error);
2436 }
2437 }
2438
2439 zc.zc_cookie = VDEV_STATE_ONLINE;
2440 zc.zc_obj = flags;
2441
2442 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2443 if (errno == EINVAL) {
2444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2445 "from this pool into a new one. Use '%s' "
2446 "instead"), "zpool detach");
2447 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2448 }
2449 return (zpool_standard_error(hdl, errno, msg));
2450 }
2451
2452 *newstate = zc.zc_cookie;
2453 return (0);
2454 }
2455
2456 /*
2457 * Take the specified vdev offline
2458 */
2459 int
2460 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2461 {
2462 zfs_cmd_t zc = {"\0"};
2463 char msg[1024];
2464 nvlist_t *tgt;
2465 boolean_t avail_spare, l2cache;
2466 libzfs_handle_t *hdl = zhp->zpool_hdl;
2467
2468 (void) snprintf(msg, sizeof (msg),
2469 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2470
2471 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2472 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2473 NULL)) == NULL)
2474 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2475
2476 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2477
2478 if (avail_spare)
2479 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2480
2481 zc.zc_cookie = VDEV_STATE_OFFLINE;
2482 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2483
2484 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2485 return (0);
2486
2487 switch (errno) {
2488 case EBUSY:
2489
2490 /*
2491 * There are no other replicas of this device.
2492 */
2493 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2494
2495 case EEXIST:
2496 /*
2497 * The log device has unplayed logs
2498 */
2499 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2500
2501 default:
2502 return (zpool_standard_error(hdl, errno, msg));
2503 }
2504 }
2505
2506 /*
2507 * Mark the given vdev faulted.
2508 */
2509 int
2510 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2511 {
2512 zfs_cmd_t zc = {"\0"};
2513 char msg[1024];
2514 libzfs_handle_t *hdl = zhp->zpool_hdl;
2515
2516 (void) snprintf(msg, sizeof (msg),
2517 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2518
2519 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2520 zc.zc_guid = guid;
2521 zc.zc_cookie = VDEV_STATE_FAULTED;
2522 zc.zc_obj = aux;
2523
2524 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2525 return (0);
2526
2527 switch (errno) {
2528 case EBUSY:
2529
2530 /*
2531 * There are no other replicas of this device.
2532 */
2533 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2534
2535 default:
2536 return (zpool_standard_error(hdl, errno, msg));
2537 }
2538
2539 }
2540
2541 /*
2542 * Mark the given vdev degraded.
2543 */
2544 int
2545 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2546 {
2547 zfs_cmd_t zc = {"\0"};
2548 char msg[1024];
2549 libzfs_handle_t *hdl = zhp->zpool_hdl;
2550
2551 (void) snprintf(msg, sizeof (msg),
2552 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2553
2554 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2555 zc.zc_guid = guid;
2556 zc.zc_cookie = VDEV_STATE_DEGRADED;
2557 zc.zc_obj = aux;
2558
2559 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2560 return (0);
2561
2562 return (zpool_standard_error(hdl, errno, msg));
2563 }
2564
2565 /*
2566 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2567 * a hot spare.
2568 */
2569 static boolean_t
2570 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2571 {
2572 nvlist_t **child;
2573 uint_t c, children;
2574 char *type;
2575
2576 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2577 &children) == 0) {
2578 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2579 &type) == 0);
2580
2581 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2582 children == 2 && child[which] == tgt)
2583 return (B_TRUE);
2584
2585 for (c = 0; c < children; c++)
2586 if (is_replacing_spare(child[c], tgt, which))
2587 return (B_TRUE);
2588 }
2589
2590 return (B_FALSE);
2591 }
2592
2593 /*
2594 * Attach new_disk (fully described by nvroot) to old_disk.
2595 * If 'replacing' is specified, the new disk will replace the old one.
2596 */
2597 int
2598 zpool_vdev_attach(zpool_handle_t *zhp,
2599 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2600 {
2601 zfs_cmd_t zc = {"\0"};
2602 char msg[1024];
2603 int ret;
2604 nvlist_t *tgt;
2605 boolean_t avail_spare, l2cache, islog;
2606 uint64_t val;
2607 char *newname;
2608 nvlist_t **child;
2609 uint_t children;
2610 nvlist_t *config_root;
2611 libzfs_handle_t *hdl = zhp->zpool_hdl;
2612 boolean_t rootpool = zpool_is_bootable(zhp);
2613
2614 if (replacing)
2615 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2616 "cannot replace %s with %s"), old_disk, new_disk);
2617 else
2618 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2619 "cannot attach %s to %s"), new_disk, old_disk);
2620
2621 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2622 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2623 &islog)) == 0)
2624 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2625
2626 if (avail_spare)
2627 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2628
2629 if (l2cache)
2630 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2631
2632 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2633 zc.zc_cookie = replacing;
2634
2635 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2636 &child, &children) != 0 || children != 1) {
2637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2638 "new device must be a single disk"));
2639 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2640 }
2641
2642 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2643 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2644
2645 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2646 return (-1);
2647
2648 /*
2649 * If the target is a hot spare that has been swapped in, we can only
2650 * replace it with another hot spare.
2651 */
2652 if (replacing &&
2653 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2654 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2655 NULL) == NULL || !avail_spare) &&
2656 is_replacing_spare(config_root, tgt, 1)) {
2657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2658 "can only be replaced by another hot spare"));
2659 free(newname);
2660 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2661 }
2662
2663 free(newname);
2664
2665 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2666 return (-1);
2667
2668 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2669
2670 zcmd_free_nvlists(&zc);
2671
2672 if (ret == 0) {
2673 if (rootpool) {
2674 /*
2675 * XXX need a better way to prevent user from
2676 * booting up a half-baked vdev.
2677 */
2678 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2679 "sure to wait until resilver is done "
2680 "before rebooting.\n"));
2681 }
2682 return (0);
2683 }
2684
2685 switch (errno) {
2686 case ENOTSUP:
2687 /*
2688 * Can't attach to or replace this type of vdev.
2689 */
2690 if (replacing) {
2691 uint64_t version = zpool_get_prop_int(zhp,
2692 ZPOOL_PROP_VERSION, NULL);
2693
2694 if (islog)
2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2696 "cannot replace a log with a spare"));
2697 else if (version >= SPA_VERSION_MULTI_REPLACE)
2698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2699 "already in replacing/spare config; wait "
2700 "for completion or use 'zpool detach'"));
2701 else
2702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2703 "cannot replace a replacing device"));
2704 } else {
2705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2706 "can only attach to mirrors and top-level "
2707 "disks"));
2708 }
2709 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2710 break;
2711
2712 case EINVAL:
2713 /*
2714 * The new device must be a single disk.
2715 */
2716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2717 "new device must be a single disk"));
2718 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2719 break;
2720
2721 case EBUSY:
2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2723 new_disk);
2724 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2725 break;
2726
2727 case EOVERFLOW:
2728 /*
2729 * The new device is too small.
2730 */
2731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2732 "device is too small"));
2733 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2734 break;
2735
2736 case EDOM:
2737 /*
2738 * The new device has a different optimal sector size.
2739 */
2740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2741 "new device has a different optimal sector size; use the "
2742 "option '-o ashift=N' to override the optimal size"));
2743 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2744 break;
2745
2746 case ENAMETOOLONG:
2747 /*
2748 * The resulting top-level vdev spec won't fit in the label.
2749 */
2750 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2751 break;
2752
2753 default:
2754 (void) zpool_standard_error(hdl, errno, msg);
2755 }
2756
2757 return (-1);
2758 }
2759
2760 /*
2761 * Detach the specified device.
2762 */
2763 int
2764 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2765 {
2766 zfs_cmd_t zc = {"\0"};
2767 char msg[1024];
2768 nvlist_t *tgt;
2769 boolean_t avail_spare, l2cache;
2770 libzfs_handle_t *hdl = zhp->zpool_hdl;
2771
2772 (void) snprintf(msg, sizeof (msg),
2773 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2774
2775 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2776 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2777 NULL)) == 0)
2778 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2779
2780 if (avail_spare)
2781 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2782
2783 if (l2cache)
2784 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2785
2786 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2787
2788 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2789 return (0);
2790
2791 switch (errno) {
2792
2793 case ENOTSUP:
2794 /*
2795 * Can't detach from this type of vdev.
2796 */
2797 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2798 "applicable to mirror and replacing vdevs"));
2799 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2800 break;
2801
2802 case EBUSY:
2803 /*
2804 * There are no other replicas of this device.
2805 */
2806 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2807 break;
2808
2809 default:
2810 (void) zpool_standard_error(hdl, errno, msg);
2811 }
2812
2813 return (-1);
2814 }
2815
2816 /*
2817 * Find a mirror vdev in the source nvlist.
2818 *
2819 * The mchild array contains a list of disks in one of the top-level mirrors
2820 * of the source pool. The schild array contains a list of disks that the
2821 * user specified on the command line. We loop over the mchild array to
2822 * see if any entry in the schild array matches.
2823 *
2824 * If a disk in the mchild array is found in the schild array, we return
2825 * the index of that entry. Otherwise we return -1.
2826 */
2827 static int
2828 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2829 nvlist_t **schild, uint_t schildren)
2830 {
2831 uint_t mc;
2832
2833 for (mc = 0; mc < mchildren; mc++) {
2834 uint_t sc;
2835 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2836 mchild[mc], 0);
2837
2838 for (sc = 0; sc < schildren; sc++) {
2839 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2840 schild[sc], 0);
2841 boolean_t result = (strcmp(mpath, spath) == 0);
2842
2843 free(spath);
2844 if (result) {
2845 free(mpath);
2846 return (mc);
2847 }
2848 }
2849
2850 free(mpath);
2851 }
2852
2853 return (-1);
2854 }
2855
2856 /*
2857 * Split a mirror pool. If newroot points to null, then a new nvlist
2858 * is generated and it is the responsibility of the caller to free it.
2859 */
2860 int
2861 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2862 nvlist_t *props, splitflags_t flags)
2863 {
2864 zfs_cmd_t zc = {"\0"};
2865 char msg[1024];
2866 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2867 nvlist_t **varray = NULL, *zc_props = NULL;
2868 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2869 libzfs_handle_t *hdl = zhp->zpool_hdl;
2870 uint64_t vers;
2871 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2872 int retval = 0;
2873
2874 (void) snprintf(msg, sizeof (msg),
2875 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2876
2877 if (!zpool_name_valid(hdl, B_FALSE, newname))
2878 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2879
2880 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2881 (void) fprintf(stderr, gettext("Internal error: unable to "
2882 "retrieve pool configuration\n"));
2883 return (-1);
2884 }
2885
2886 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2887 == 0);
2888 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2889
2890 if (props) {
2891 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2892 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2893 props, vers, flags, msg)) == NULL)
2894 return (-1);
2895 }
2896
2897 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2898 &children) != 0) {
2899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2900 "Source pool is missing vdev tree"));
2901 nvlist_free(zc_props);
2902 return (-1);
2903 }
2904
2905 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2906 vcount = 0;
2907
2908 if (*newroot == NULL ||
2909 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2910 &newchild, &newchildren) != 0)
2911 newchildren = 0;
2912
2913 for (c = 0; c < children; c++) {
2914 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2915 char *type;
2916 nvlist_t **mchild, *vdev;
2917 uint_t mchildren;
2918 int entry;
2919
2920 /*
2921 * Unlike cache & spares, slogs are stored in the
2922 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2923 */
2924 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2925 &is_log);
2926 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2927 &is_hole);
2928 if (is_log || is_hole) {
2929 /*
2930 * Create a hole vdev and put it in the config.
2931 */
2932 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2933 goto out;
2934 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2935 VDEV_TYPE_HOLE) != 0)
2936 goto out;
2937 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2938 1) != 0)
2939 goto out;
2940 if (lastlog == 0)
2941 lastlog = vcount;
2942 varray[vcount++] = vdev;
2943 continue;
2944 }
2945 lastlog = 0;
2946 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2947 == 0);
2948 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2949 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2950 "Source pool must be composed only of mirrors\n"));
2951 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2952 goto out;
2953 }
2954
2955 verify(nvlist_lookup_nvlist_array(child[c],
2956 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2957
2958 /* find or add an entry for this top-level vdev */
2959 if (newchildren > 0 &&
2960 (entry = find_vdev_entry(zhp, mchild, mchildren,
2961 newchild, newchildren)) >= 0) {
2962 /* We found a disk that the user specified. */
2963 vdev = mchild[entry];
2964 ++found;
2965 } else {
2966 /* User didn't specify a disk for this vdev. */
2967 vdev = mchild[mchildren - 1];
2968 }
2969
2970 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2971 goto out;
2972 }
2973
2974 /* did we find every disk the user specified? */
2975 if (found != newchildren) {
2976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2977 "include at most one disk from each mirror"));
2978 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2979 goto out;
2980 }
2981
2982 /* Prepare the nvlist for populating. */
2983 if (*newroot == NULL) {
2984 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2985 goto out;
2986 freelist = B_TRUE;
2987 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2988 VDEV_TYPE_ROOT) != 0)
2989 goto out;
2990 } else {
2991 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2992 }
2993
2994 /* Add all the children we found */
2995 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2996 lastlog == 0 ? vcount : lastlog) != 0)
2997 goto out;
2998
2999 /*
3000 * If we're just doing a dry run, exit now with success.
3001 */
3002 if (flags.dryrun) {
3003 memory_err = B_FALSE;
3004 freelist = B_FALSE;
3005 goto out;
3006 }
3007
3008 /* now build up the config list & call the ioctl */
3009 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3010 goto out;
3011
3012 if (nvlist_add_nvlist(newconfig,
3013 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3014 nvlist_add_string(newconfig,
3015 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3016 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3017 goto out;
3018
3019 /*
3020 * The new pool is automatically part of the namespace unless we
3021 * explicitly export it.
3022 */
3023 if (!flags.import)
3024 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3025 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3026 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3027 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3028 goto out;
3029 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3030 goto out;
3031
3032 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3033 retval = zpool_standard_error(hdl, errno, msg);
3034 goto out;
3035 }
3036
3037 freelist = B_FALSE;
3038 memory_err = B_FALSE;
3039
3040 out:
3041 if (varray != NULL) {
3042 int v;
3043
3044 for (v = 0; v < vcount; v++)
3045 nvlist_free(varray[v]);
3046 free(varray);
3047 }
3048 zcmd_free_nvlists(&zc);
3049 nvlist_free(zc_props);
3050 nvlist_free(newconfig);
3051 if (freelist) {
3052 nvlist_free(*newroot);
3053 *newroot = NULL;
3054 }
3055
3056 if (retval != 0)
3057 return (retval);
3058
3059 if (memory_err)
3060 return (no_memory(hdl));
3061
3062 return (0);
3063 }
3064
3065 /*
3066 * Remove the given device. Currently, this is supported only for hot spares,
3067 * cache, and log devices.
3068 */
3069 int
3070 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3071 {
3072 zfs_cmd_t zc = {"\0"};
3073 char msg[1024];
3074 nvlist_t *tgt;
3075 boolean_t avail_spare, l2cache, islog;
3076 libzfs_handle_t *hdl = zhp->zpool_hdl;
3077 uint64_t version;
3078
3079 (void) snprintf(msg, sizeof (msg),
3080 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3081
3082 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3083 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3084 &islog)) == 0)
3085 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3086 /*
3087 * XXX - this should just go away.
3088 */
3089 if (!avail_spare && !l2cache && !islog) {
3090 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3091 "only inactive hot spares, cache, "
3092 "or log devices can be removed"));
3093 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3094 }
3095
3096 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3097 if (islog && version < SPA_VERSION_HOLES) {
3098 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3099 "pool must be upgrade to support log removal"));
3100 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3101 }
3102
3103 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3104
3105 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3106 return (0);
3107
3108 return (zpool_standard_error(hdl, errno, msg));
3109 }
3110
3111 /*
3112 * Clear the errors for the pool, or the particular device if specified.
3113 */
3114 int
3115 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3116 {
3117 zfs_cmd_t zc = {"\0"};
3118 char msg[1024];
3119 nvlist_t *tgt;
3120 zpool_rewind_policy_t policy;
3121 boolean_t avail_spare, l2cache;
3122 libzfs_handle_t *hdl = zhp->zpool_hdl;
3123 nvlist_t *nvi = NULL;
3124 int error;
3125
3126 if (path)
3127 (void) snprintf(msg, sizeof (msg),
3128 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3129 path);
3130 else
3131 (void) snprintf(msg, sizeof (msg),
3132 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3133 zhp->zpool_name);
3134
3135 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3136 if (path) {
3137 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3138 &l2cache, NULL)) == 0)
3139 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3140
3141 /*
3142 * Don't allow error clearing for hot spares. Do allow
3143 * error clearing for l2cache devices.
3144 */
3145 if (avail_spare)
3146 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3147
3148 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3149 &zc.zc_guid) == 0);
3150 }
3151
3152 zpool_get_rewind_policy(rewindnvl, &policy);
3153 zc.zc_cookie = policy.zrp_request;
3154
3155 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3156 return (-1);
3157
3158 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3159 return (-1);
3160
3161 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3162 errno == ENOMEM) {
3163 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3164 zcmd_free_nvlists(&zc);
3165 return (-1);
3166 }
3167 }
3168
3169 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3170 errno != EPERM && errno != EACCES)) {
3171 if (policy.zrp_request &
3172 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3173 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3174 zpool_rewind_exclaim(hdl, zc.zc_name,
3175 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3176 nvi);
3177 nvlist_free(nvi);
3178 }
3179 zcmd_free_nvlists(&zc);
3180 return (0);
3181 }
3182
3183 zcmd_free_nvlists(&zc);
3184 return (zpool_standard_error(hdl, errno, msg));
3185 }
3186
3187 /*
3188 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3189 */
3190 int
3191 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3192 {
3193 zfs_cmd_t zc = {"\0"};
3194 char msg[1024];
3195 libzfs_handle_t *hdl = zhp->zpool_hdl;
3196
3197 (void) snprintf(msg, sizeof (msg),
3198 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3199 (u_longlong_t)guid);
3200
3201 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3202 zc.zc_guid = guid;
3203 zc.zc_cookie = ZPOOL_NO_REWIND;
3204
3205 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3206 return (0);
3207
3208 return (zpool_standard_error(hdl, errno, msg));
3209 }
3210
3211 /*
3212 * Change the GUID for a pool.
3213 */
3214 int
3215 zpool_reguid(zpool_handle_t *zhp)
3216 {
3217 char msg[1024];
3218 libzfs_handle_t *hdl = zhp->zpool_hdl;
3219 zfs_cmd_t zc = {"\0"};
3220
3221 (void) snprintf(msg, sizeof (msg),
3222 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3223
3224 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3225 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3226 return (0);
3227
3228 return (zpool_standard_error(hdl, errno, msg));
3229 }
3230
3231 /*
3232 * Reopen the pool.
3233 */
3234 int
3235 zpool_reopen(zpool_handle_t *zhp)
3236 {
3237 zfs_cmd_t zc = {"\0"};
3238 char msg[1024];
3239 libzfs_handle_t *hdl = zhp->zpool_hdl;
3240
3241 (void) snprintf(msg, sizeof (msg),
3242 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3243 zhp->zpool_name);
3244
3245 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3246 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3247 return (0);
3248 return (zpool_standard_error(hdl, errno, msg));
3249 }
3250
3251 #if defined(__sun__) || defined(__sun)
3252 /*
3253 * Convert from a devid string to a path.
3254 */
3255 static char *
3256 devid_to_path(char *devid_str)
3257 {
3258 ddi_devid_t devid;
3259 char *minor;
3260 char *path;
3261 devid_nmlist_t *list = NULL;
3262 int ret;
3263
3264 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3265 return (NULL);
3266
3267 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3268
3269 devid_str_free(minor);
3270 devid_free(devid);
3271
3272 if (ret != 0)
3273 return (NULL);
3274
3275 /*
3276 * In a case the strdup() fails, we will just return NULL below.
3277 */
3278 path = strdup(list[0].devname);
3279
3280 devid_free_nmlist(list);
3281
3282 return (path);
3283 }
3284
3285 /*
3286 * Convert from a path to a devid string.
3287 */
3288 static char *
3289 path_to_devid(const char *path)
3290 {
3291 int fd;
3292 ddi_devid_t devid;
3293 char *minor, *ret;
3294
3295 if ((fd = open(path, O_RDONLY)) < 0)
3296 return (NULL);
3297
3298 minor = NULL;
3299 ret = NULL;
3300 if (devid_get(fd, &devid) == 0) {
3301 if (devid_get_minor_name(fd, &minor) == 0)
3302 ret = devid_str_encode(devid, minor);
3303 if (minor != NULL)
3304 devid_str_free(minor);
3305 devid_free(devid);
3306 }
3307 (void) close(fd);
3308
3309 return (ret);
3310 }
3311
3312 /*
3313 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3314 * ignore any failure here, since a common case is for an unprivileged user to
3315 * type 'zpool status', and we'll display the correct information anyway.
3316 */
3317 static void
3318 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3319 {
3320 zfs_cmd_t zc = {"\0"};
3321
3322 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3323 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3324 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3325 &zc.zc_guid) == 0);
3326
3327 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3328 }
3329 #endif /* sun */
3330
3331 /*
3332 * Remove partition suffix from a vdev path. Partition suffixes may take three
3333 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3334 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3335 * third case only occurs when preceded by a string matching the regular
3336 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3337 *
3338 * caller must free the returned string
3339 */
3340 char *
3341 zfs_strip_partition(char *path)
3342 {
3343 char *tmp = strdup(path);
3344 char *part = NULL, *d = NULL;
3345 if (!tmp)
3346 return (NULL);
3347
3348 if ((part = strstr(tmp, "-part")) && part != tmp) {
3349 d = part + 5;
3350 } else if ((part = strrchr(tmp, 'p')) &&
3351 part > tmp + 1 && isdigit(*(part-1))) {
3352 d = part + 1;
3353 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3354 tmp[1] == 'd') {
3355 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3356 } else if (strncmp("xvd", tmp, 3) == 0) {
3357 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3358 }
3359 if (part && d && *d != '\0') {
3360 for (; isdigit(*d); d++) { }
3361 if (*d == '\0')
3362 *part = '\0';
3363 }
3364
3365 return (tmp);
3366 }
3367
3368 /*
3369 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3370 *
3371 * path: /dev/sda1
3372 * returns: /dev/sda
3373 *
3374 * Returned string must be freed.
3375 */
3376 char *
3377 zfs_strip_partition_path(char *path)
3378 {
3379 char *newpath = strdup(path);
3380 char *sd_offset;
3381 char *new_sd;
3382
3383 if (!newpath)
3384 return (NULL);
3385
3386 /* Point to "sda1" part of "/dev/sda1" */
3387 sd_offset = strrchr(newpath, '/') + 1;
3388
3389 /* Get our new name "sda" */
3390 new_sd = zfs_strip_partition(sd_offset);
3391 if (!new_sd) {
3392 free(newpath);
3393 return (NULL);
3394 }
3395
3396 /* Paste the "sda" where "sda1" was */
3397 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3398
3399 /* Free temporary "sda" */
3400 free(new_sd);
3401
3402 return (newpath);
3403 }
3404
3405 #define PATH_BUF_LEN 64
3406
3407 /*
3408 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3409 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3410 * We also check if this is a whole disk, in which case we strip off the
3411 * trailing 's0' slice name.
3412 *
3413 * This routine is also responsible for identifying when disks have been
3414 * reconfigured in a new location. The kernel will have opened the device by
3415 * devid, but the path will still refer to the old location. To catch this, we
3416 * first do a path -> devid translation (which is fast for the common case). If
3417 * the devid matches, we're done. If not, we do a reverse devid -> path
3418 * translation and issue the appropriate ioctl() to update the path of the vdev.
3419 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3420 * of these checks.
3421 */
3422 char *
3423 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3424 int name_flags)
3425 {
3426 char *path, *type, *env;
3427 uint64_t value;
3428 char buf[PATH_BUF_LEN];
3429 char tmpbuf[PATH_BUF_LEN];
3430
3431 env = getenv("ZPOOL_VDEV_NAME_PATH");
3432 if (env && (strtoul(env, NULL, 0) > 0 ||
3433 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3434 name_flags |= VDEV_NAME_PATH;
3435
3436 env = getenv("ZPOOL_VDEV_NAME_GUID");
3437 if (env && (strtoul(env, NULL, 0) > 0 ||
3438 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3439 name_flags |= VDEV_NAME_GUID;
3440
3441 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3442 if (env && (strtoul(env, NULL, 0) > 0 ||
3443 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3444 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3445
3446 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3447 name_flags & VDEV_NAME_GUID) {
3448 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3449 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3450 path = buf;
3451 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3452 #if defined(__sun__) || defined(__sun)
3453 /*
3454 * Live VDEV path updates to a kernel VDEV during a
3455 * zpool_vdev_name lookup are not supported on Linux.
3456 */
3457 char *devid;
3458 vdev_stat_t *vs;
3459 uint_t vsc;
3460
3461 /*
3462 * If the device is dead (faulted, offline, etc) then don't
3463 * bother opening it. Otherwise we may be forcing the user to
3464 * open a misbehaving device, which can have undesirable
3465 * effects.
3466 */
3467 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3468 (uint64_t **)&vs, &vsc) != 0 ||
3469 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3470 zhp != NULL &&
3471 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3472 /*
3473 * Determine if the current path is correct.
3474 */
3475 char *newdevid = path_to_devid(path);
3476
3477 if (newdevid == NULL ||
3478 strcmp(devid, newdevid) != 0) {
3479 char *newpath;
3480
3481 if ((newpath = devid_to_path(devid)) != NULL) {
3482 /*
3483 * Update the path appropriately.
3484 */
3485 set_path(zhp, nv, newpath);
3486 if (nvlist_add_string(nv,
3487 ZPOOL_CONFIG_PATH, newpath) == 0)
3488 verify(nvlist_lookup_string(nv,
3489 ZPOOL_CONFIG_PATH,
3490 &path) == 0);
3491 free(newpath);
3492 }
3493 }
3494
3495 if (newdevid)
3496 devid_str_free(newdevid);
3497 }
3498 #endif /* sun */
3499
3500 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3501 char *rp = realpath(path, NULL);
3502 if (rp) {
3503 strlcpy(buf, rp, sizeof (buf));
3504 path = buf;
3505 free(rp);
3506 }
3507 }
3508
3509 /*
3510 * For a block device only use the name.
3511 */
3512 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3513 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3514 !(name_flags & VDEV_NAME_PATH)) {
3515 path = strrchr(path, '/');
3516 path++;
3517 }
3518
3519 /*
3520 * Remove the partition from the path it this is a whole disk.
3521 */
3522 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3523 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3524 return (zfs_strip_partition(path));
3525 }
3526 } else {
3527 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3528
3529 /*
3530 * If it's a raidz device, we need to stick in the parity level.
3531 */
3532 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3533 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3534 &value) == 0);
3535 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3536 (u_longlong_t)value);
3537 path = buf;
3538 }
3539
3540 /*
3541 * We identify each top-level vdev by using a <type-id>
3542 * naming convention.
3543 */
3544 if (name_flags & VDEV_NAME_TYPE_ID) {
3545 uint64_t id;
3546 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3547 &id) == 0);
3548 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3549 path, (u_longlong_t)id);
3550 path = tmpbuf;
3551 }
3552 }
3553
3554 return (zfs_strdup(hdl, path));
3555 }
3556
3557 static int
3558 zbookmark_mem_compare(const void *a, const void *b)
3559 {
3560 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3561 }
3562
3563 /*
3564 * Retrieve the persistent error log, uniquify the members, and return to the
3565 * caller.
3566 */
3567 int
3568 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3569 {
3570 zfs_cmd_t zc = {"\0"};
3571 uint64_t count;
3572 zbookmark_phys_t *zb = NULL;
3573 int i;
3574
3575 /*
3576 * Retrieve the raw error list from the kernel. If the number of errors
3577 * has increased, allocate more space and continue until we get the
3578 * entire list.
3579 */
3580 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3581 &count) == 0);
3582 if (count == 0)
3583 return (0);
3584 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3585 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3586 return (-1);
3587 zc.zc_nvlist_dst_size = count;
3588 (void) strcpy(zc.zc_name, zhp->zpool_name);
3589 for (;;) {
3590 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3591 &zc) != 0) {
3592 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3593 if (errno == ENOMEM) {
3594 void *dst;
3595
3596 count = zc.zc_nvlist_dst_size;
3597 dst = zfs_alloc(zhp->zpool_hdl, count *
3598 sizeof (zbookmark_phys_t));
3599 if (dst == NULL)
3600 return (-1);
3601 zc.zc_nvlist_dst = (uintptr_t)dst;
3602 } else {
3603 return (-1);
3604 }
3605 } else {
3606 break;
3607 }
3608 }
3609
3610 /*
3611 * Sort the resulting bookmarks. This is a little confusing due to the
3612 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3613 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3614 * _not_ copied as part of the process. So we point the start of our
3615 * array appropriate and decrement the total number of elements.
3616 */
3617 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3618 zc.zc_nvlist_dst_size;
3619 count -= zc.zc_nvlist_dst_size;
3620
3621 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3622
3623 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3624
3625 /*
3626 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3627 */
3628 for (i = 0; i < count; i++) {
3629 nvlist_t *nv;
3630
3631 /* ignoring zb_blkid and zb_level for now */
3632 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3633 zb[i-1].zb_object == zb[i].zb_object)
3634 continue;
3635
3636 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3637 goto nomem;
3638 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3639 zb[i].zb_objset) != 0) {
3640 nvlist_free(nv);
3641 goto nomem;
3642 }
3643 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3644 zb[i].zb_object) != 0) {
3645 nvlist_free(nv);
3646 goto nomem;
3647 }
3648 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3649 nvlist_free(nv);
3650 goto nomem;
3651 }
3652 nvlist_free(nv);
3653 }
3654
3655 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3656 return (0);
3657
3658 nomem:
3659 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3660 return (no_memory(zhp->zpool_hdl));
3661 }
3662
3663 /*
3664 * Upgrade a ZFS pool to the latest on-disk version.
3665 */
3666 int
3667 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3668 {
3669 zfs_cmd_t zc = {"\0"};
3670 libzfs_handle_t *hdl = zhp->zpool_hdl;
3671
3672 (void) strcpy(zc.zc_name, zhp->zpool_name);
3673 zc.zc_cookie = new_version;
3674
3675 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3676 return (zpool_standard_error_fmt(hdl, errno,
3677 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3678 zhp->zpool_name));
3679 return (0);
3680 }
3681
3682 void
3683 zfs_save_arguments(int argc, char **argv, char *string, int len)
3684 {
3685 int i;
3686
3687 (void) strlcpy(string, basename(argv[0]), len);
3688 for (i = 1; i < argc; i++) {
3689 (void) strlcat(string, " ", len);
3690 (void) strlcat(string, argv[i], len);
3691 }
3692 }
3693
3694 int
3695 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3696 {
3697 zfs_cmd_t zc = {"\0"};
3698 nvlist_t *args;
3699 int err;
3700
3701 args = fnvlist_alloc();
3702 fnvlist_add_string(args, "message", message);
3703 err = zcmd_write_src_nvlist(hdl, &zc, args);
3704 if (err == 0)
3705 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3706 nvlist_free(args);
3707 zcmd_free_nvlists(&zc);
3708 return (err);
3709 }
3710
3711 /*
3712 * Perform ioctl to get some command history of a pool.
3713 *
3714 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3715 * logical offset of the history buffer to start reading from.
3716 *
3717 * Upon return, 'off' is the next logical offset to read from and
3718 * 'len' is the actual amount of bytes read into 'buf'.
3719 */
3720 static int
3721 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3722 {
3723 zfs_cmd_t zc = {"\0"};
3724 libzfs_handle_t *hdl = zhp->zpool_hdl;
3725
3726 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3727
3728 zc.zc_history = (uint64_t)(uintptr_t)buf;
3729 zc.zc_history_len = *len;
3730 zc.zc_history_offset = *off;
3731
3732 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3733 switch (errno) {
3734 case EPERM:
3735 return (zfs_error_fmt(hdl, EZFS_PERM,
3736 dgettext(TEXT_DOMAIN,
3737 "cannot show history for pool '%s'"),
3738 zhp->zpool_name));
3739 case ENOENT:
3740 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3741 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3742 "'%s'"), zhp->zpool_name));
3743 case ENOTSUP:
3744 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3745 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3746 "'%s', pool must be upgraded"), zhp->zpool_name));
3747 default:
3748 return (zpool_standard_error_fmt(hdl, errno,
3749 dgettext(TEXT_DOMAIN,
3750 "cannot get history for '%s'"), zhp->zpool_name));
3751 }
3752 }
3753
3754 *len = zc.zc_history_len;
3755 *off = zc.zc_history_offset;
3756
3757 return (0);
3758 }
3759
3760 /*
3761 * Process the buffer of nvlists, unpacking and storing each nvlist record
3762 * into 'records'. 'leftover' is set to the number of bytes that weren't
3763 * processed as there wasn't a complete record.
3764 */
3765 int
3766 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3767 nvlist_t ***records, uint_t *numrecords)
3768 {
3769 uint64_t reclen;
3770 nvlist_t *nv;
3771 int i;
3772 void *tmp;
3773
3774 while (bytes_read > sizeof (reclen)) {
3775
3776 /* get length of packed record (stored as little endian) */
3777 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3778 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3779
3780 if (bytes_read < sizeof (reclen) + reclen)
3781 break;
3782
3783 /* unpack record */
3784 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3785 return (ENOMEM);
3786 bytes_read -= sizeof (reclen) + reclen;
3787 buf += sizeof (reclen) + reclen;
3788
3789 /* add record to nvlist array */
3790 (*numrecords)++;
3791 if (ISP2(*numrecords + 1)) {
3792 tmp = realloc(*records,
3793 *numrecords * 2 * sizeof (nvlist_t *));
3794 if (tmp == NULL) {
3795 nvlist_free(nv);
3796 (*numrecords)--;
3797 return (ENOMEM);
3798 }
3799 *records = tmp;
3800 }
3801 (*records)[*numrecords - 1] = nv;
3802 }
3803
3804 *leftover = bytes_read;
3805 return (0);
3806 }
3807
3808 /*
3809 * Retrieve the command history of a pool.
3810 */
3811 int
3812 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3813 {
3814 char *buf;
3815 int buflen = 128 * 1024;
3816 uint64_t off = 0;
3817 nvlist_t **records = NULL;
3818 uint_t numrecords = 0;
3819 int err, i;
3820
3821 buf = malloc(buflen);
3822 if (buf == NULL)
3823 return (ENOMEM);
3824 do {
3825 uint64_t bytes_read = buflen;
3826 uint64_t leftover;
3827
3828 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3829 break;
3830
3831 /* if nothing else was read in, we're at EOF, just return */
3832 if (!bytes_read)
3833 break;
3834
3835 if ((err = zpool_history_unpack(buf, bytes_read,
3836 &leftover, &records, &numrecords)) != 0)
3837 break;
3838 off -= leftover;
3839 if (leftover == bytes_read) {
3840 /*
3841 * no progress made, because buffer is not big enough
3842 * to hold this record; resize and retry.
3843 */
3844 buflen *= 2;
3845 free(buf);
3846 buf = malloc(buflen);
3847 if (buf == NULL)
3848 return (ENOMEM);
3849 }
3850
3851 /* CONSTCOND */
3852 } while (1);
3853
3854 free(buf);
3855
3856 if (!err) {
3857 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3858 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3859 records, numrecords) == 0);
3860 }
3861 for (i = 0; i < numrecords; i++)
3862 nvlist_free(records[i]);
3863 free(records);
3864
3865 return (err);
3866 }
3867
3868 /*
3869 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3870 * If there is a new event available 'nvp' will contain a newly allocated
3871 * nvlist and 'dropped' will be set to the number of missed events since
3872 * the last call to this function. When 'nvp' is set to NULL it indicates
3873 * no new events are available. In either case the function returns 0 and
3874 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3875 * function will return a non-zero value. When the function is called in
3876 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3877 * it will not return until a new event is available.
3878 */
3879 int
3880 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3881 int *dropped, unsigned flags, int zevent_fd)
3882 {
3883 zfs_cmd_t zc = {"\0"};
3884 int error = 0;
3885
3886 *nvp = NULL;
3887 *dropped = 0;
3888 zc.zc_cleanup_fd = zevent_fd;
3889
3890 if (flags & ZEVENT_NONBLOCK)
3891 zc.zc_guid = ZEVENT_NONBLOCK;
3892
3893 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3894 return (-1);
3895
3896 retry:
3897 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3898 switch (errno) {
3899 case ESHUTDOWN:
3900 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3901 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3902 goto out;
3903 case ENOENT:
3904 /* Blocking error case should not occur */
3905 if (!(flags & ZEVENT_NONBLOCK))
3906 error = zpool_standard_error_fmt(hdl, errno,
3907 dgettext(TEXT_DOMAIN, "cannot get event"));
3908
3909 goto out;
3910 case ENOMEM:
3911 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3912 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3913 dgettext(TEXT_DOMAIN, "cannot get event"));
3914 goto out;
3915 } else {
3916 goto retry;
3917 }
3918 default:
3919 error = zpool_standard_error_fmt(hdl, errno,
3920 dgettext(TEXT_DOMAIN, "cannot get event"));
3921 goto out;
3922 }
3923 }
3924
3925 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3926 if (error != 0)
3927 goto out;
3928
3929 *dropped = (int)zc.zc_cookie;
3930 out:
3931 zcmd_free_nvlists(&zc);
3932
3933 return (error);
3934 }
3935
3936 /*
3937 * Clear all events.
3938 */
3939 int
3940 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3941 {
3942 zfs_cmd_t zc = {"\0"};
3943 char msg[1024];
3944
3945 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3946 "cannot clear events"));
3947
3948 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3949 return (zpool_standard_error_fmt(hdl, errno, msg));
3950
3951 if (count != NULL)
3952 *count = (int)zc.zc_cookie; /* # of events cleared */
3953
3954 return (0);
3955 }
3956
3957 /*
3958 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3959 * the passed zevent_fd file handle. On success zero is returned,
3960 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
3961 */
3962 int
3963 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
3964 {
3965 zfs_cmd_t zc = {"\0"};
3966 int error = 0;
3967
3968 zc.zc_guid = eid;
3969 zc.zc_cleanup_fd = zevent_fd;
3970
3971 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
3972 switch (errno) {
3973 case ENOENT:
3974 error = zfs_error_fmt(hdl, EZFS_NOENT,
3975 dgettext(TEXT_DOMAIN, "cannot get event"));
3976 break;
3977
3978 case ENOMEM:
3979 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3980 dgettext(TEXT_DOMAIN, "cannot get event"));
3981 break;
3982
3983 default:
3984 error = zpool_standard_error_fmt(hdl, errno,
3985 dgettext(TEXT_DOMAIN, "cannot get event"));
3986 break;
3987 }
3988 }
3989
3990 return (error);
3991 }
3992
3993 void
3994 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3995 char *pathname, size_t len)
3996 {
3997 zfs_cmd_t zc = {"\0"};
3998 boolean_t mounted = B_FALSE;
3999 char *mntpnt = NULL;
4000 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4001
4002 if (dsobj == 0) {
4003 /* special case for the MOS */
4004 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4005 (longlong_t)obj);
4006 return;
4007 }
4008
4009 /* get the dataset's name */
4010 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4011 zc.zc_obj = dsobj;
4012 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4013 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4014 /* just write out a path of two object numbers */
4015 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4016 (longlong_t)dsobj, (longlong_t)obj);
4017 return;
4018 }
4019 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4020
4021 /* find out if the dataset is mounted */
4022 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4023
4024 /* get the corrupted object's path */
4025 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4026 zc.zc_obj = obj;
4027 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4028 &zc) == 0) {
4029 if (mounted) {
4030 (void) snprintf(pathname, len, "%s%s", mntpnt,
4031 zc.zc_value);
4032 } else {
4033 (void) snprintf(pathname, len, "%s:%s",
4034 dsname, zc.zc_value);
4035 }
4036 } else {
4037 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4038 (longlong_t)obj);
4039 }
4040 free(mntpnt);
4041 }
4042
4043 /*
4044 * Read the EFI label from the config, if a label does not exist then
4045 * pass back the error to the caller. If the caller has passed a non-NULL
4046 * diskaddr argument then we set it to the starting address of the EFI
4047 * partition.
4048 */
4049 static int
4050 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4051 {
4052 char *path;
4053 int fd;
4054 char diskname[MAXPATHLEN];
4055 int err = -1;
4056
4057 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4058 return (err);
4059
4060 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4061 strrchr(path, '/'));
4062 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
4063 struct dk_gpt *vtoc;
4064
4065 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4066 if (sb != NULL)
4067 *sb = vtoc->efi_parts[0].p_start;
4068 efi_free(vtoc);
4069 }
4070 (void) close(fd);
4071 }
4072 return (err);
4073 }
4074
4075 /*
4076 * determine where a partition starts on a disk in the current
4077 * configuration
4078 */
4079 static diskaddr_t
4080 find_start_block(nvlist_t *config)
4081 {
4082 nvlist_t **child;
4083 uint_t c, children;
4084 diskaddr_t sb = MAXOFFSET_T;
4085 uint64_t wholedisk;
4086
4087 if (nvlist_lookup_nvlist_array(config,
4088 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4089 if (nvlist_lookup_uint64(config,
4090 ZPOOL_CONFIG_WHOLE_DISK,
4091 &wholedisk) != 0 || !wholedisk) {
4092 return (MAXOFFSET_T);
4093 }
4094 if (read_efi_label(config, &sb) < 0)
4095 sb = MAXOFFSET_T;
4096 return (sb);
4097 }
4098
4099 for (c = 0; c < children; c++) {
4100 sb = find_start_block(child[c]);
4101 if (sb != MAXOFFSET_T) {
4102 return (sb);
4103 }
4104 }
4105 return (MAXOFFSET_T);
4106 }
4107
4108 static int
4109 zpool_label_disk_check(char *path)
4110 {
4111 struct dk_gpt *vtoc;
4112 int fd, err;
4113
4114 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
4115 return (errno);
4116
4117 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4118 (void) close(fd);
4119 return (err);
4120 }
4121
4122 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4123 efi_free(vtoc);
4124 (void) close(fd);
4125 return (EIDRM);
4126 }
4127
4128 efi_free(vtoc);
4129 (void) close(fd);
4130 return (0);
4131 }
4132
4133 /*
4134 * Generate a unique partition name for the ZFS member. Partitions must
4135 * have unique names to ensure udev will be able to create symlinks under
4136 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4137 * of the form <pool>-<unique-id>.
4138 */
4139 static void
4140 zpool_label_name(char *label_name, int label_size)
4141 {
4142 uint64_t id = 0;
4143 int fd;
4144
4145 fd = open("/dev/urandom", O_RDONLY);
4146 if (fd >= 0) {
4147 if (read(fd, &id, sizeof (id)) != sizeof (id))
4148 id = 0;
4149
4150 close(fd);
4151 }
4152
4153 if (id == 0)
4154 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4155
4156 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4157 }
4158
4159 /*
4160 * Label an individual disk. The name provided is the short name,
4161 * stripped of any leading /dev path.
4162 */
4163 int
4164 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4165 {
4166 char path[MAXPATHLEN];
4167 struct dk_gpt *vtoc;
4168 int rval, fd;
4169 size_t resv = EFI_MIN_RESV_SIZE;
4170 uint64_t slice_size;
4171 diskaddr_t start_block;
4172 char errbuf[1024];
4173
4174 /* prepare an error message just in case */
4175 (void) snprintf(errbuf, sizeof (errbuf),
4176 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4177
4178 if (zhp) {
4179 nvlist_t *nvroot;
4180
4181 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4182 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4183
4184 if (zhp->zpool_start_block == 0)
4185 start_block = find_start_block(nvroot);
4186 else
4187 start_block = zhp->zpool_start_block;
4188 zhp->zpool_start_block = start_block;
4189 } else {
4190 /* new pool */
4191 start_block = NEW_START_BLOCK;
4192 }
4193
4194 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4195
4196 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4197 /*
4198 * This shouldn't happen. We've long since verified that this
4199 * is a valid device.
4200 */
4201 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4202 "label '%s': unable to open device: %d"), path, errno);
4203 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4204 }
4205
4206 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4207 /*
4208 * The only way this can fail is if we run out of memory, or we
4209 * were unable to read the disk's capacity
4210 */
4211 if (errno == ENOMEM)
4212 (void) no_memory(hdl);
4213
4214 (void) close(fd);
4215 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4216 "label '%s': unable to read disk capacity"), path);
4217
4218 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4219 }
4220
4221 slice_size = vtoc->efi_last_u_lba + 1;
4222 slice_size -= EFI_MIN_RESV_SIZE;
4223 if (start_block == MAXOFFSET_T)
4224 start_block = NEW_START_BLOCK;
4225 slice_size -= start_block;
4226 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4227
4228 vtoc->efi_parts[0].p_start = start_block;
4229 vtoc->efi_parts[0].p_size = slice_size;
4230
4231 /*
4232 * Why we use V_USR: V_BACKUP confuses users, and is considered
4233 * disposable by some EFI utilities (since EFI doesn't have a backup
4234 * slice). V_UNASSIGNED is supposed to be used only for zero size
4235 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4236 * etc. were all pretty specific. V_USR is as close to reality as we
4237 * can get, in the absence of V_OTHER.
4238 */
4239 vtoc->efi_parts[0].p_tag = V_USR;
4240 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4241
4242 vtoc->efi_parts[8].p_start = slice_size + start_block;
4243 vtoc->efi_parts[8].p_size = resv;
4244 vtoc->efi_parts[8].p_tag = V_RESERVED;
4245
4246 if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
4247 /*
4248 * Some block drivers (like pcata) may not support EFI
4249 * GPT labels. Print out a helpful error message dir-
4250 * ecting the user to manually label the disk and give
4251 * a specific slice.
4252 */
4253 (void) close(fd);
4254 efi_free(vtoc);
4255
4256 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4257 "parted(8) and then provide a specific slice: %d"), rval);
4258 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4259 }
4260
4261 (void) close(fd);
4262 efi_free(vtoc);
4263
4264 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4265 (void) zfs_append_partition(path, MAXPATHLEN);
4266
4267 /* Wait to udev to signal use the device has settled. */
4268 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4269 if (rval) {
4270 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4271 "detect device partitions on '%s': %d"), path, rval);
4272 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4273 }
4274
4275 /* We can't be to paranoid. Read the label back and verify it. */
4276 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4277 rval = zpool_label_disk_check(path);
4278 if (rval) {
4279 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4280 "EFI label on '%s' is damaged. Ensure\nthis device "
4281 "is not in in use, and is functioning properly: %d"),
4282 path, rval);
4283 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4284 }
4285
4286 return (0);
4287 }
4288
4289 /*
4290 * Allocate and return the underlying device name for a device mapper device.
4291 * If a device mapper device maps to multiple devices, return the first device.
4292 *
4293 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4294 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4295 *
4296 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4297 * device then return NULL.
4298 *
4299 * NOTE: The returned name string must be *freed*.
4300 */
4301 char *
4302 dm_get_underlying_path(char *dm_name)
4303 {
4304 DIR *dp = NULL;
4305 struct dirent *ep;
4306 char *realp;
4307 char *tmp = NULL;
4308 char *path = NULL;
4309 char *dev_str;
4310 int size;
4311
4312 if (dm_name == NULL)
4313 return (NULL);
4314
4315 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4316 realp = realpath(dm_name, NULL);
4317 if (realp == NULL)
4318 return (NULL);
4319
4320 /*
4321 * If they preface 'dev' with a path (like "/dev") then strip it off.
4322 * We just want the 'dm-N' part.
4323 */
4324 tmp = strrchr(realp, '/');
4325 if (tmp != NULL)
4326 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4327 else
4328 dev_str = tmp;
4329
4330 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4331 if (size == -1 || !tmp)
4332 goto end;
4333
4334 dp = opendir(tmp);
4335 if (dp == NULL)
4336 goto end;
4337
4338 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4339 while ((ep = readdir(dp))) {
4340 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4341 size = asprintf(&path, "/dev/%s", ep->d_name);
4342 break;
4343 }
4344 }
4345
4346 end:
4347 if (dp != NULL)
4348 closedir(dp);
4349 free(tmp);
4350 free(realp);
4351 return (path);
4352 }
4353
4354 /*
4355 * Return 1 if device is a device mapper or multipath device.
4356 * Return 0 if not.
4357 */
4358 int
4359 zfs_dev_is_dm(char *dev_name)
4360 {
4361
4362 char *tmp;
4363 tmp = dm_get_underlying_path(dev_name);
4364 if (tmp == NULL)
4365 return (0);
4366
4367 free(tmp);
4368 return (1);
4369 }
4370
4371 /*
4372 * Lookup the underlying device for a device name
4373 *
4374 * Often you'll have a symlink to a device, a partition device,
4375 * or a multipath device, and want to look up the underlying device.
4376 * This function returns the underlying device name. If the device
4377 * name is already the underlying device, then just return the same
4378 * name. If the device is a DM device with multiple underlying devices
4379 * then return the first one.
4380 *
4381 * For example:
4382 *
4383 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4384 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4385 * returns: /dev/sda
4386 *
4387 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4388 * dev_name: /dev/mapper/mpatha
4389 * returns: /dev/sda (first device)
4390 *
4391 * 3. /dev/sda (already the underlying device)
4392 * dev_name: /dev/sda
4393 * returns: /dev/sda
4394 *
4395 * 4. /dev/dm-3 (mapped to /dev/sda)
4396 * dev_name: /dev/dm-3
4397 * returns: /dev/sda
4398 *
4399 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4400 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4401 * returns: /dev/sdb
4402 *
4403 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4404 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4405 * returns: /dev/sda
4406 *
4407 * Returns underlying device name, or NULL on error or no match.
4408 *
4409 * NOTE: The returned name string must be *freed*.
4410 */
4411 char *
4412 zfs_get_underlying_path(char *dev_name)
4413 {
4414 char *name = NULL;
4415 char *tmp;
4416
4417 if (dev_name == NULL)
4418 return (NULL);
4419
4420 tmp = dm_get_underlying_path(dev_name);
4421
4422 /* dev_name not a DM device, so just un-symlinkize it */
4423 if (tmp == NULL)
4424 tmp = realpath(dev_name, NULL);
4425
4426 if (tmp != NULL) {
4427 name = zfs_strip_partition_path(tmp);
4428 free(tmp);
4429 }
4430
4431 return (name);
4432 }
4433
4434 /*
4435 * Given a dev name like "sda", return the full enclosure sysfs path to
4436 * the disk. You can also pass in the name with "/dev" prepended
4437 * to it (like /dev/sda).
4438 *
4439 * For example, disk "sda" in enclosure slot 1:
4440 * dev: "sda"
4441 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4442 *
4443 * 'dev' must be a non-devicemapper device.
4444 *
4445 * Returned string must be freed.
4446 */
4447 char *
4448 zfs_get_enclosure_sysfs_path(char *dev_name)
4449 {
4450 DIR *dp = NULL;
4451 struct dirent *ep;
4452 char buf[MAXPATHLEN];
4453 char *tmp1 = NULL;
4454 char *tmp2 = NULL;
4455 char *tmp3 = NULL;
4456 char *path = NULL;
4457 size_t size;
4458 int tmpsize;
4459
4460 if (dev_name == NULL)
4461 return (NULL);
4462
4463 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4464 tmp1 = strrchr(dev_name, '/');
4465 if (tmp1 != NULL)
4466 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4467
4468 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4469 if (tmpsize == -1 || tmp1 == NULL) {
4470 tmp1 = NULL;
4471 goto end;
4472 }
4473
4474 dp = opendir(tmp1);
4475 if (dp == NULL) {
4476 tmp1 = NULL; /* To make free() at the end a NOP */
4477 goto end;
4478 }
4479
4480 /*
4481 * Look though all sysfs entries in /sys/block/<dev>/device for
4482 * the enclosure symlink.
4483 */
4484 while ((ep = readdir(dp))) {
4485 /* Ignore everything that's not our enclosure_device link */
4486 if (strstr(ep->d_name, "enclosure_device") == NULL)
4487 continue;
4488
4489 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4490 tmp2 == NULL)
4491 break;
4492
4493 size = readlink(tmp2, buf, sizeof (buf));
4494
4495 /* Did readlink fail or crop the link name? */
4496 if (size == -1 || size >= sizeof (buf)) {
4497 free(tmp2);
4498 tmp2 = NULL; /* To make free() at the end a NOP */
4499 break;
4500 }
4501
4502 /*
4503 * We got a valid link. readlink() doesn't terminate strings
4504 * so we have to do it.
4505 */
4506 buf[size] = '\0';
4507
4508 /*
4509 * Our link will look like:
4510 *
4511 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4512 *
4513 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4514 */
4515 tmp3 = strstr(buf, "enclosure");
4516 if (tmp3 == NULL)
4517 break;
4518
4519 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4520 /* If asprintf() fails, 'path' is undefined */
4521 path = NULL;
4522 break;
4523 }
4524
4525 if (path == NULL)
4526 break;
4527 }
4528
4529 end:
4530 free(tmp2);
4531 free(tmp1);
4532
4533 if (dp != NULL)
4534 closedir(dp);
4535
4536 return (path);
4537 }