]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_main.c
0b55bf21f448d7f73a9519b65bce7ab5e253b3ee
[mirror_zfs.git] / cmd / zpool / zpool_main.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <time.h>
54 #include <unistd.h>
55 #include <pwd.h>
56 #include <zone.h>
57 #include <sys/wait.h>
58 #include <zfs_prop.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/stat.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/fs/zfs.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/zfs_ioctl.h>
66 #include <sys/mount.h>
67 #include <sys/sysmacros.h>
68
69 #include <math.h>
70
71 #include <libzfs.h>
72 #include <libzutil.h>
73
74 #include "zpool_util.h"
75 #include "zfs_comutil.h"
76 #include "zfeature_common.h"
77
78 #include "statcommon.h"
79
80 libzfs_handle_t *g_zfs;
81
82 static int zpool_do_create(int, char **);
83 static int zpool_do_destroy(int, char **);
84
85 static int zpool_do_add(int, char **);
86 static int zpool_do_remove(int, char **);
87 static int zpool_do_labelclear(int, char **);
88
89 static int zpool_do_checkpoint(int, char **);
90
91 static int zpool_do_list(int, char **);
92 static int zpool_do_iostat(int, char **);
93 static int zpool_do_status(int, char **);
94
95 static int zpool_do_online(int, char **);
96 static int zpool_do_offline(int, char **);
97 static int zpool_do_clear(int, char **);
98 static int zpool_do_reopen(int, char **);
99
100 static int zpool_do_reguid(int, char **);
101
102 static int zpool_do_attach(int, char **);
103 static int zpool_do_detach(int, char **);
104 static int zpool_do_replace(int, char **);
105 static int zpool_do_split(int, char **);
106
107 static int zpool_do_initialize(int, char **);
108 static int zpool_do_scrub(int, char **);
109 static int zpool_do_resilver(int, char **);
110 static int zpool_do_trim(int, char **);
111
112 static int zpool_do_import(int, char **);
113 static int zpool_do_export(int, char **);
114
115 static int zpool_do_upgrade(int, char **);
116
117 static int zpool_do_history(int, char **);
118 static int zpool_do_events(int, char **);
119
120 static int zpool_do_get(int, char **);
121 static int zpool_do_set(int, char **);
122
123 static int zpool_do_sync(int, char **);
124
125 static int zpool_do_version(int, char **);
126
127 static int zpool_do_wait(int, char **);
128
129 static zpool_compat_status_t zpool_do_load_compat(
130 const char *, boolean_t *);
131
132 /*
133 * These libumem hooks provide a reasonable set of defaults for the allocator's
134 * debugging facilities.
135 */
136
137 #ifdef DEBUG
138 const char *
139 _umem_debug_init(void)
140 {
141 return ("default,verbose"); /* $UMEM_DEBUG setting */
142 }
143
144 const char *
145 _umem_logging_init(void)
146 {
147 return ("fail,contents"); /* $UMEM_LOGGING setting */
148 }
149 #endif
150
151 typedef enum {
152 HELP_ADD,
153 HELP_ATTACH,
154 HELP_CLEAR,
155 HELP_CREATE,
156 HELP_CHECKPOINT,
157 HELP_DESTROY,
158 HELP_DETACH,
159 HELP_EXPORT,
160 HELP_HISTORY,
161 HELP_IMPORT,
162 HELP_IOSTAT,
163 HELP_LABELCLEAR,
164 HELP_LIST,
165 HELP_OFFLINE,
166 HELP_ONLINE,
167 HELP_REPLACE,
168 HELP_REMOVE,
169 HELP_INITIALIZE,
170 HELP_SCRUB,
171 HELP_RESILVER,
172 HELP_TRIM,
173 HELP_STATUS,
174 HELP_UPGRADE,
175 HELP_EVENTS,
176 HELP_GET,
177 HELP_SET,
178 HELP_SPLIT,
179 HELP_SYNC,
180 HELP_REGUID,
181 HELP_REOPEN,
182 HELP_VERSION,
183 HELP_WAIT
184 } zpool_help_t;
185
186
187 /*
188 * Flags for stats to display with "zpool iostats"
189 */
190 enum iostat_type {
191 IOS_DEFAULT = 0,
192 IOS_LATENCY = 1,
193 IOS_QUEUES = 2,
194 IOS_L_HISTO = 3,
195 IOS_RQ_HISTO = 4,
196 IOS_COUNT, /* always last element */
197 };
198
199 /* iostat_type entries as bitmasks */
200 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
201 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
202 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
203 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
204 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
205
206 /* Mask of all the histo bits */
207 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
208
209 /*
210 * Lookup table for iostat flags to nvlist names. Basically a list
211 * of all the nvlists a flag requires. Also specifies the order in
212 * which data gets printed in zpool iostat.
213 */
214 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
215 [IOS_L_HISTO] = {
216 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
217 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
218 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
219 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
220 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
221 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
222 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
223 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
224 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
225 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
226 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
227 NULL},
228 [IOS_LATENCY] = {
229 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
230 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
231 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
232 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
233 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
234 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
235 NULL},
236 [IOS_QUEUES] = {
237 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
238 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
239 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
240 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
241 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
242 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
243 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
244 NULL},
245 [IOS_RQ_HISTO] = {
246 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
247 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
248 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
249 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
250 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
251 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
252 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
253 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
254 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
255 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
256 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
257 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
258 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
259 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
260 NULL},
261 };
262
263
264 /*
265 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
266 * Right now, only one histo bit is ever set at one time, so we can
267 * just do a highbit64(a)
268 */
269 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
270
271 typedef struct zpool_command {
272 const char *name;
273 int (*func)(int, char **);
274 zpool_help_t usage;
275 } zpool_command_t;
276
277 /*
278 * Master command table. Each ZFS command has a name, associated function, and
279 * usage message. The usage messages need to be internationalized, so we have
280 * to have a function to return the usage message based on a command index.
281 *
282 * These commands are organized according to how they are displayed in the usage
283 * message. An empty command (one with a NULL name) indicates an empty line in
284 * the generic usage message.
285 */
286 static zpool_command_t command_table[] = {
287 { "version", zpool_do_version, HELP_VERSION },
288 { NULL },
289 { "create", zpool_do_create, HELP_CREATE },
290 { "destroy", zpool_do_destroy, HELP_DESTROY },
291 { NULL },
292 { "add", zpool_do_add, HELP_ADD },
293 { "remove", zpool_do_remove, HELP_REMOVE },
294 { NULL },
295 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
296 { NULL },
297 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
298 { NULL },
299 { "list", zpool_do_list, HELP_LIST },
300 { "iostat", zpool_do_iostat, HELP_IOSTAT },
301 { "status", zpool_do_status, HELP_STATUS },
302 { NULL },
303 { "online", zpool_do_online, HELP_ONLINE },
304 { "offline", zpool_do_offline, HELP_OFFLINE },
305 { "clear", zpool_do_clear, HELP_CLEAR },
306 { "reopen", zpool_do_reopen, HELP_REOPEN },
307 { NULL },
308 { "attach", zpool_do_attach, HELP_ATTACH },
309 { "detach", zpool_do_detach, HELP_DETACH },
310 { "replace", zpool_do_replace, HELP_REPLACE },
311 { "split", zpool_do_split, HELP_SPLIT },
312 { NULL },
313 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
314 { "resilver", zpool_do_resilver, HELP_RESILVER },
315 { "scrub", zpool_do_scrub, HELP_SCRUB },
316 { "trim", zpool_do_trim, HELP_TRIM },
317 { NULL },
318 { "import", zpool_do_import, HELP_IMPORT },
319 { "export", zpool_do_export, HELP_EXPORT },
320 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
321 { "reguid", zpool_do_reguid, HELP_REGUID },
322 { NULL },
323 { "history", zpool_do_history, HELP_HISTORY },
324 { "events", zpool_do_events, HELP_EVENTS },
325 { NULL },
326 { "get", zpool_do_get, HELP_GET },
327 { "set", zpool_do_set, HELP_SET },
328 { "sync", zpool_do_sync, HELP_SYNC },
329 { NULL },
330 { "wait", zpool_do_wait, HELP_WAIT },
331 };
332
333 #define NCOMMAND (ARRAY_SIZE(command_table))
334
335 #define VDEV_ALLOC_CLASS_LOGS "logs"
336
337 static zpool_command_t *current_command;
338 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
339 static char history_str[HIS_MAX_RECORD_LEN];
340 static boolean_t log_history = B_TRUE;
341 static uint_t timestamp_fmt = NODATE;
342
343 static const char *
344 get_usage(zpool_help_t idx)
345 {
346 switch (idx) {
347 case HELP_ADD:
348 return (gettext("\tadd [-fgLnP] [-o property=value] "
349 "<pool> <vdev> ...\n"));
350 case HELP_ATTACH:
351 return (gettext("\tattach [-fsw] [-o property=value] "
352 "<pool> <device> <new-device>\n"));
353 case HELP_CLEAR:
354 return (gettext("\tclear [-nF] <pool> [device]\n"));
355 case HELP_CREATE:
356 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
357 "\t [-O file-system-property=value] ... \n"
358 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
359 case HELP_CHECKPOINT:
360 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
361 case HELP_DESTROY:
362 return (gettext("\tdestroy [-f] <pool>\n"));
363 case HELP_DETACH:
364 return (gettext("\tdetach <pool> <device>\n"));
365 case HELP_EXPORT:
366 return (gettext("\texport [-af] <pool> ...\n"));
367 case HELP_HISTORY:
368 return (gettext("\thistory [-il] [<pool>] ...\n"));
369 case HELP_IMPORT:
370 return (gettext("\timport [-d dir] [-D]\n"
371 "\timport [-o mntopts] [-o property=value] ... \n"
372 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
373 "[-R root] [-F [-n]] -a\n"
374 "\timport [-o mntopts] [-o property=value] ... \n"
375 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
376 "[-R root] [-F [-n]]\n"
377 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
378 case HELP_IOSTAT:
379 return (gettext("\tiostat [[[-c [script1,script2,...]"
380 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
381 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
382 " [[-n] interval [count]]\n"));
383 case HELP_LABELCLEAR:
384 return (gettext("\tlabelclear [-f] <vdev>\n"));
385 case HELP_LIST:
386 return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
387 "[-T d|u] [pool] ... \n"
388 "\t [interval [count]]\n"));
389 case HELP_OFFLINE:
390 return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
391 case HELP_ONLINE:
392 return (gettext("\tonline [-e] <pool> <device> ...\n"));
393 case HELP_REPLACE:
394 return (gettext("\treplace [-fsw] [-o property=value] "
395 "<pool> <device> [new-device]\n"));
396 case HELP_REMOVE:
397 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
398 case HELP_REOPEN:
399 return (gettext("\treopen [-n] <pool>\n"));
400 case HELP_INITIALIZE:
401 return (gettext("\tinitialize [-c | -s] [-w] <pool> "
402 "[<device> ...]\n"));
403 case HELP_SCRUB:
404 return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
405 case HELP_RESILVER:
406 return (gettext("\tresilver <pool> ...\n"));
407 case HELP_TRIM:
408 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
409 "[<device> ...]\n"));
410 case HELP_STATUS:
411 return (gettext("\tstatus [-c [script1,script2,...]] "
412 "[-igLpPstvxD] [-T d|u] [pool] ... \n"
413 "\t [interval [count]]\n"));
414 case HELP_UPGRADE:
415 return (gettext("\tupgrade\n"
416 "\tupgrade -v\n"
417 "\tupgrade [-V version] <-a | pool ...>\n"));
418 case HELP_EVENTS:
419 return (gettext("\tevents [-vHf [pool] | -c]\n"));
420 case HELP_GET:
421 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
422 "<\"all\" | property[,...]> <pool> ...\n"));
423 case HELP_SET:
424 return (gettext("\tset <property=value> <pool> \n"));
425 case HELP_SPLIT:
426 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
427 "\t [-o property=value] <pool> <newpool> "
428 "[<device> ...]\n"));
429 case HELP_REGUID:
430 return (gettext("\treguid <pool>\n"));
431 case HELP_SYNC:
432 return (gettext("\tsync [pool] ...\n"));
433 case HELP_VERSION:
434 return (gettext("\tversion\n"));
435 case HELP_WAIT:
436 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
437 "<pool> [interval]\n"));
438 default:
439 __builtin_unreachable();
440 }
441 }
442
443 static void
444 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
445 {
446 uint_t children = 0;
447 nvlist_t **child;
448 uint_t i;
449
450 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
451 &child, &children);
452
453 if (children == 0) {
454 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
455 VDEV_NAME_PATH);
456
457 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
458 strcmp(path, VDEV_TYPE_HOLE) != 0)
459 fnvlist_add_boolean(res, path);
460
461 free(path);
462 return;
463 }
464
465 for (i = 0; i < children; i++) {
466 zpool_collect_leaves(zhp, child[i], res);
467 }
468 }
469
470 /*
471 * Callback routine that will print out a pool property value.
472 */
473 static int
474 print_pool_prop_cb(int prop, void *cb)
475 {
476 FILE *fp = cb;
477
478 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
479
480 if (zpool_prop_readonly(prop))
481 (void) fprintf(fp, " NO ");
482 else
483 (void) fprintf(fp, " YES ");
484
485 if (zpool_prop_values(prop) == NULL)
486 (void) fprintf(fp, "-\n");
487 else
488 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
489
490 return (ZPROP_CONT);
491 }
492
493 /*
494 * Callback routine that will print out a vdev property value.
495 */
496 static int
497 print_vdev_prop_cb(int prop, void *cb)
498 {
499 FILE *fp = cb;
500
501 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
502
503 if (vdev_prop_readonly(prop))
504 (void) fprintf(fp, " NO ");
505 else
506 (void) fprintf(fp, " YES ");
507
508 if (vdev_prop_values(prop) == NULL)
509 (void) fprintf(fp, "-\n");
510 else
511 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
512
513 return (ZPROP_CONT);
514 }
515
516 /*
517 * Display usage message. If we're inside a command, display only the usage for
518 * that command. Otherwise, iterate over the entire command table and display
519 * a complete usage message.
520 */
521 static __attribute__((noreturn)) void
522 usage(boolean_t requested)
523 {
524 FILE *fp = requested ? stdout : stderr;
525
526 if (current_command == NULL) {
527 int i;
528
529 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
530 (void) fprintf(fp,
531 gettext("where 'command' is one of the following:\n\n"));
532
533 for (i = 0; i < NCOMMAND; i++) {
534 if (command_table[i].name == NULL)
535 (void) fprintf(fp, "\n");
536 else
537 (void) fprintf(fp, "%s",
538 get_usage(command_table[i].usage));
539 }
540 } else {
541 (void) fprintf(fp, gettext("usage:\n"));
542 (void) fprintf(fp, "%s", get_usage(current_command->usage));
543 }
544
545 if (current_command != NULL &&
546 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
547 ((strcmp(current_command->name, "set") == 0) ||
548 (strcmp(current_command->name, "get") == 0) ||
549 (strcmp(current_command->name, "list") == 0))) {
550
551 (void) fprintf(fp, "%s",
552 gettext("\nthe following properties are supported:\n"));
553
554 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
555 "PROPERTY", "EDIT", "VALUES");
556
557 /* Iterate over all properties */
558 if (current_prop_type == ZFS_TYPE_POOL) {
559 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
560 B_TRUE, current_prop_type);
561
562 (void) fprintf(fp, "\t%-19s ", "feature@...");
563 (void) fprintf(fp, "YES "
564 "disabled | enabled | active\n");
565
566 (void) fprintf(fp, gettext("\nThe feature@ properties "
567 "must be appended with a feature name.\n"
568 "See zpool-features(7).\n"));
569 } else if (current_prop_type == ZFS_TYPE_VDEV) {
570 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
571 B_TRUE, current_prop_type);
572 }
573 }
574
575 /*
576 * See comments at end of main().
577 */
578 if (getenv("ZFS_ABORT") != NULL) {
579 (void) printf("dumping core by request\n");
580 abort();
581 }
582
583 exit(requested ? 0 : 2);
584 }
585
586 /*
587 * zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
588 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
589 * if none specified.
590 *
591 * -c Cancel. Ends active initializing.
592 * -s Suspend. Initializing can then be restarted with no flags.
593 * -w Wait. Blocks until initializing has completed.
594 */
595 int
596 zpool_do_initialize(int argc, char **argv)
597 {
598 int c;
599 char *poolname;
600 zpool_handle_t *zhp;
601 nvlist_t *vdevs;
602 int err = 0;
603 boolean_t wait = B_FALSE;
604
605 struct option long_options[] = {
606 {"cancel", no_argument, NULL, 'c'},
607 {"suspend", no_argument, NULL, 's'},
608 {"wait", no_argument, NULL, 'w'},
609 {0, 0, 0, 0}
610 };
611
612 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
613 while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
614 switch (c) {
615 case 'c':
616 if (cmd_type != POOL_INITIALIZE_START &&
617 cmd_type != POOL_INITIALIZE_CANCEL) {
618 (void) fprintf(stderr, gettext("-c cannot be "
619 "combined with other options\n"));
620 usage(B_FALSE);
621 }
622 cmd_type = POOL_INITIALIZE_CANCEL;
623 break;
624 case 's':
625 if (cmd_type != POOL_INITIALIZE_START &&
626 cmd_type != POOL_INITIALIZE_SUSPEND) {
627 (void) fprintf(stderr, gettext("-s cannot be "
628 "combined with other options\n"));
629 usage(B_FALSE);
630 }
631 cmd_type = POOL_INITIALIZE_SUSPEND;
632 break;
633 case 'w':
634 wait = B_TRUE;
635 break;
636 case '?':
637 if (optopt != 0) {
638 (void) fprintf(stderr,
639 gettext("invalid option '%c'\n"), optopt);
640 } else {
641 (void) fprintf(stderr,
642 gettext("invalid option '%s'\n"),
643 argv[optind - 1]);
644 }
645 usage(B_FALSE);
646 }
647 }
648
649 argc -= optind;
650 argv += optind;
651
652 if (argc < 1) {
653 (void) fprintf(stderr, gettext("missing pool name argument\n"));
654 usage(B_FALSE);
655 return (-1);
656 }
657
658 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
659 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
660 "-s\n"));
661 usage(B_FALSE);
662 }
663
664 poolname = argv[0];
665 zhp = zpool_open(g_zfs, poolname);
666 if (zhp == NULL)
667 return (-1);
668
669 vdevs = fnvlist_alloc();
670 if (argc == 1) {
671 /* no individual leaf vdevs specified, so add them all */
672 nvlist_t *config = zpool_get_config(zhp, NULL);
673 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
674 ZPOOL_CONFIG_VDEV_TREE);
675 zpool_collect_leaves(zhp, nvroot, vdevs);
676 } else {
677 for (int i = 1; i < argc; i++) {
678 fnvlist_add_boolean(vdevs, argv[i]);
679 }
680 }
681
682 if (wait)
683 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
684 else
685 err = zpool_initialize(zhp, cmd_type, vdevs);
686
687 fnvlist_free(vdevs);
688 zpool_close(zhp);
689
690 return (err);
691 }
692
693 /*
694 * print a pool vdev config for dry runs
695 */
696 static void
697 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
698 const char *match, int name_flags)
699 {
700 nvlist_t **child;
701 uint_t c, children;
702 char *vname;
703 boolean_t printed = B_FALSE;
704
705 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
706 &child, &children) != 0) {
707 if (name != NULL)
708 (void) printf("\t%*s%s\n", indent, "", name);
709 return;
710 }
711
712 for (c = 0; c < children; c++) {
713 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
714 char *class = (char *)"";
715
716 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
717 &is_hole);
718
719 if (is_hole == B_TRUE) {
720 continue;
721 }
722
723 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
724 &is_log);
725 if (is_log)
726 class = (char *)VDEV_ALLOC_BIAS_LOG;
727 (void) nvlist_lookup_string(child[c],
728 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
729 if (strcmp(match, class) != 0)
730 continue;
731
732 if (!printed && name != NULL) {
733 (void) printf("\t%*s%s\n", indent, "", name);
734 printed = B_TRUE;
735 }
736 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
737 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
738 name_flags);
739 free(vname);
740 }
741 }
742
743 /*
744 * Print the list of l2cache devices for dry runs.
745 */
746 static void
747 print_cache_list(nvlist_t *nv, int indent)
748 {
749 nvlist_t **child;
750 uint_t c, children;
751
752 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
753 &child, &children) == 0 && children > 0) {
754 (void) printf("\t%*s%s\n", indent, "", "cache");
755 } else {
756 return;
757 }
758 for (c = 0; c < children; c++) {
759 char *vname;
760
761 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
762 (void) printf("\t%*s%s\n", indent + 2, "", vname);
763 free(vname);
764 }
765 }
766
767 /*
768 * Print the list of spares for dry runs.
769 */
770 static void
771 print_spare_list(nvlist_t *nv, int indent)
772 {
773 nvlist_t **child;
774 uint_t c, children;
775
776 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
777 &child, &children) == 0 && children > 0) {
778 (void) printf("\t%*s%s\n", indent, "", "spares");
779 } else {
780 return;
781 }
782 for (c = 0; c < children; c++) {
783 char *vname;
784
785 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
786 (void) printf("\t%*s%s\n", indent + 2, "", vname);
787 free(vname);
788 }
789 }
790
791 static boolean_t
792 prop_list_contains_feature(nvlist_t *proplist)
793 {
794 nvpair_t *nvp;
795 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
796 nvp = nvlist_next_nvpair(proplist, nvp)) {
797 if (zpool_prop_feature(nvpair_name(nvp)))
798 return (B_TRUE);
799 }
800 return (B_FALSE);
801 }
802
803 /*
804 * Add a property pair (name, string-value) into a property nvlist.
805 */
806 static int
807 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
808 boolean_t poolprop)
809 {
810 zpool_prop_t prop = ZPOOL_PROP_INVAL;
811 nvlist_t *proplist;
812 const char *normnm;
813 char *strval;
814
815 if (*props == NULL &&
816 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
817 (void) fprintf(stderr,
818 gettext("internal error: out of memory\n"));
819 return (1);
820 }
821
822 proplist = *props;
823
824 if (poolprop) {
825 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
826 const char *cname =
827 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
828
829 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
830 (!zpool_prop_feature(propname) &&
831 !zpool_prop_vdev(propname))) {
832 (void) fprintf(stderr, gettext("property '%s' is "
833 "not a valid pool or vdev property\n"), propname);
834 return (2);
835 }
836
837 /*
838 * feature@ properties and version should not be specified
839 * at the same time.
840 */
841 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
842 nvlist_exists(proplist, vname)) ||
843 (prop == ZPOOL_PROP_VERSION &&
844 prop_list_contains_feature(proplist))) {
845 (void) fprintf(stderr, gettext("'feature@' and "
846 "'version' properties cannot be specified "
847 "together\n"));
848 return (2);
849 }
850
851 /*
852 * if version is specified, only "legacy" compatibility
853 * may be requested
854 */
855 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
856 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
857 nvlist_exists(proplist, vname)) ||
858 (prop == ZPOOL_PROP_VERSION &&
859 nvlist_exists(proplist, cname) &&
860 strcmp(fnvlist_lookup_string(proplist, cname),
861 ZPOOL_COMPAT_LEGACY) != 0)) {
862 (void) fprintf(stderr, gettext("when 'version' is "
863 "specified, the 'compatibility' feature may only "
864 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
865 return (2);
866 }
867
868 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
869 normnm = propname;
870 else
871 normnm = zpool_prop_to_name(prop);
872 } else {
873 zfs_prop_t fsprop = zfs_name_to_prop(propname);
874
875 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
876 B_FALSE)) {
877 normnm = zfs_prop_to_name(fsprop);
878 } else if (zfs_prop_user(propname) ||
879 zfs_prop_userquota(propname)) {
880 normnm = propname;
881 } else {
882 (void) fprintf(stderr, gettext("property '%s' is "
883 "not a valid filesystem property\n"), propname);
884 return (2);
885 }
886 }
887
888 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
889 prop != ZPOOL_PROP_CACHEFILE) {
890 (void) fprintf(stderr, gettext("property '%s' "
891 "specified multiple times\n"), propname);
892 return (2);
893 }
894
895 if (nvlist_add_string(proplist, normnm, propval) != 0) {
896 (void) fprintf(stderr, gettext("internal "
897 "error: out of memory\n"));
898 return (1);
899 }
900
901 return (0);
902 }
903
904 /*
905 * Set a default property pair (name, string-value) in a property nvlist
906 */
907 static int
908 add_prop_list_default(const char *propname, const char *propval,
909 nvlist_t **props)
910 {
911 char *pval;
912
913 if (nvlist_lookup_string(*props, propname, &pval) == 0)
914 return (0);
915
916 return (add_prop_list(propname, propval, props, B_TRUE));
917 }
918
919 /*
920 * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
921 *
922 * -f Force addition of devices, even if they appear in use
923 * -g Display guid for individual vdev name.
924 * -L Follow links when resolving vdev path name.
925 * -n Do not add the devices, but display the resulting layout if
926 * they were to be added.
927 * -o Set property=value.
928 * -P Display full path for vdev name.
929 *
930 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
931 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
932 * libzfs.
933 */
934 int
935 zpool_do_add(int argc, char **argv)
936 {
937 boolean_t force = B_FALSE;
938 boolean_t dryrun = B_FALSE;
939 int name_flags = 0;
940 int c;
941 nvlist_t *nvroot;
942 char *poolname;
943 int ret;
944 zpool_handle_t *zhp;
945 nvlist_t *config;
946 nvlist_t *props = NULL;
947 char *propval;
948
949 /* check options */
950 while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
951 switch (c) {
952 case 'f':
953 force = B_TRUE;
954 break;
955 case 'g':
956 name_flags |= VDEV_NAME_GUID;
957 break;
958 case 'L':
959 name_flags |= VDEV_NAME_FOLLOW_LINKS;
960 break;
961 case 'n':
962 dryrun = B_TRUE;
963 break;
964 case 'o':
965 if ((propval = strchr(optarg, '=')) == NULL) {
966 (void) fprintf(stderr, gettext("missing "
967 "'=' for -o option\n"));
968 usage(B_FALSE);
969 }
970 *propval = '\0';
971 propval++;
972
973 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
974 (add_prop_list(optarg, propval, &props, B_TRUE)))
975 usage(B_FALSE);
976 break;
977 case 'P':
978 name_flags |= VDEV_NAME_PATH;
979 break;
980 case '?':
981 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
982 optopt);
983 usage(B_FALSE);
984 }
985 }
986
987 argc -= optind;
988 argv += optind;
989
990 /* get pool name and check number of arguments */
991 if (argc < 1) {
992 (void) fprintf(stderr, gettext("missing pool name argument\n"));
993 usage(B_FALSE);
994 }
995 if (argc < 2) {
996 (void) fprintf(stderr, gettext("missing vdev specification\n"));
997 usage(B_FALSE);
998 }
999
1000 poolname = argv[0];
1001
1002 argc--;
1003 argv++;
1004
1005 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1006 return (1);
1007
1008 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1009 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1010 poolname);
1011 zpool_close(zhp);
1012 return (1);
1013 }
1014
1015 /* unless manually specified use "ashift" pool property (if set) */
1016 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1017 int intval;
1018 zprop_source_t src;
1019 char strval[ZPOOL_MAXPROPLEN];
1020
1021 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1022 if (src != ZPROP_SRC_DEFAULT) {
1023 (void) sprintf(strval, "%" PRId32, intval);
1024 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1025 &props, B_TRUE) == 0);
1026 }
1027 }
1028
1029 /* pass off to make_root_vdev for processing */
1030 nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
1031 argc, argv);
1032 if (nvroot == NULL) {
1033 zpool_close(zhp);
1034 return (1);
1035 }
1036
1037 if (dryrun) {
1038 nvlist_t *poolnvroot;
1039 nvlist_t **l2child, **sparechild;
1040 uint_t l2children, sparechildren, c;
1041 char *vname;
1042 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1043
1044 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1045 &poolnvroot) == 0);
1046
1047 (void) printf(gettext("would update '%s' to the following "
1048 "configuration:\n\n"), zpool_get_name(zhp));
1049
1050 /* print original main pool and new tree */
1051 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1052 name_flags | VDEV_NAME_TYPE_ID);
1053 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1054
1055 /* print other classes: 'dedup', 'special', and 'log' */
1056 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1057 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1058 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1059 print_vdev_tree(zhp, NULL, nvroot, 0,
1060 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1061 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1062 print_vdev_tree(zhp, "dedup", nvroot, 0,
1063 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1064 }
1065
1066 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1067 print_vdev_tree(zhp, "special", poolnvroot, 0,
1068 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1069 print_vdev_tree(zhp, NULL, nvroot, 0,
1070 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1071 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1072 print_vdev_tree(zhp, "special", nvroot, 0,
1073 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1074 }
1075
1076 if (num_logs(poolnvroot) > 0) {
1077 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1078 VDEV_ALLOC_BIAS_LOG, name_flags);
1079 print_vdev_tree(zhp, NULL, nvroot, 0,
1080 VDEV_ALLOC_BIAS_LOG, name_flags);
1081 } else if (num_logs(nvroot) > 0) {
1082 print_vdev_tree(zhp, "logs", nvroot, 0,
1083 VDEV_ALLOC_BIAS_LOG, name_flags);
1084 }
1085
1086 /* Do the same for the caches */
1087 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1088 &l2child, &l2children) == 0 && l2children) {
1089 hadcache = B_TRUE;
1090 (void) printf(gettext("\tcache\n"));
1091 for (c = 0; c < l2children; c++) {
1092 vname = zpool_vdev_name(g_zfs, NULL,
1093 l2child[c], name_flags);
1094 (void) printf("\t %s\n", vname);
1095 free(vname);
1096 }
1097 }
1098 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1099 &l2child, &l2children) == 0 && l2children) {
1100 if (!hadcache)
1101 (void) printf(gettext("\tcache\n"));
1102 for (c = 0; c < l2children; c++) {
1103 vname = zpool_vdev_name(g_zfs, NULL,
1104 l2child[c], name_flags);
1105 (void) printf("\t %s\n", vname);
1106 free(vname);
1107 }
1108 }
1109 /* And finally the spares */
1110 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1111 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1112 hadspare = B_TRUE;
1113 (void) printf(gettext("\tspares\n"));
1114 for (c = 0; c < sparechildren; c++) {
1115 vname = zpool_vdev_name(g_zfs, NULL,
1116 sparechild[c], name_flags);
1117 (void) printf("\t %s\n", vname);
1118 free(vname);
1119 }
1120 }
1121 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1122 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1123 if (!hadspare)
1124 (void) printf(gettext("\tspares\n"));
1125 for (c = 0; c < sparechildren; c++) {
1126 vname = zpool_vdev_name(g_zfs, NULL,
1127 sparechild[c], name_flags);
1128 (void) printf("\t %s\n", vname);
1129 free(vname);
1130 }
1131 }
1132
1133 ret = 0;
1134 } else {
1135 ret = (zpool_add(zhp, nvroot) != 0);
1136 }
1137
1138 nvlist_free(props);
1139 nvlist_free(nvroot);
1140 zpool_close(zhp);
1141
1142 return (ret);
1143 }
1144
1145 /*
1146 * zpool remove [-npsw] <pool> <vdev> ...
1147 *
1148 * Removes the given vdev from the pool.
1149 */
1150 int
1151 zpool_do_remove(int argc, char **argv)
1152 {
1153 char *poolname;
1154 int i, ret = 0;
1155 zpool_handle_t *zhp = NULL;
1156 boolean_t stop = B_FALSE;
1157 int c;
1158 boolean_t noop = B_FALSE;
1159 boolean_t parsable = B_FALSE;
1160 boolean_t wait = B_FALSE;
1161
1162 /* check options */
1163 while ((c = getopt(argc, argv, "npsw")) != -1) {
1164 switch (c) {
1165 case 'n':
1166 noop = B_TRUE;
1167 break;
1168 case 'p':
1169 parsable = B_TRUE;
1170 break;
1171 case 's':
1172 stop = B_TRUE;
1173 break;
1174 case 'w':
1175 wait = B_TRUE;
1176 break;
1177 case '?':
1178 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1179 optopt);
1180 usage(B_FALSE);
1181 }
1182 }
1183
1184 argc -= optind;
1185 argv += optind;
1186
1187 /* get pool name and check number of arguments */
1188 if (argc < 1) {
1189 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1190 usage(B_FALSE);
1191 }
1192
1193 poolname = argv[0];
1194
1195 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1196 return (1);
1197
1198 if (stop && noop) {
1199 zpool_close(zhp);
1200 (void) fprintf(stderr, gettext("stop request ignored\n"));
1201 return (0);
1202 }
1203
1204 if (stop) {
1205 if (argc > 1) {
1206 (void) fprintf(stderr, gettext("too many arguments\n"));
1207 usage(B_FALSE);
1208 }
1209 if (zpool_vdev_remove_cancel(zhp) != 0)
1210 ret = 1;
1211 if (wait) {
1212 (void) fprintf(stderr, gettext("invalid option "
1213 "combination: -w cannot be used with -s\n"));
1214 usage(B_FALSE);
1215 }
1216 } else {
1217 if (argc < 2) {
1218 (void) fprintf(stderr, gettext("missing device\n"));
1219 usage(B_FALSE);
1220 }
1221
1222 for (i = 1; i < argc; i++) {
1223 if (noop) {
1224 uint64_t size;
1225
1226 if (zpool_vdev_indirect_size(zhp, argv[i],
1227 &size) != 0) {
1228 ret = 1;
1229 break;
1230 }
1231 if (parsable) {
1232 (void) printf("%s %llu\n",
1233 argv[i], (unsigned long long)size);
1234 } else {
1235 char valstr[32];
1236 zfs_nicenum(size, valstr,
1237 sizeof (valstr));
1238 (void) printf("Memory that will be "
1239 "used after removing %s: %s\n",
1240 argv[i], valstr);
1241 }
1242 } else {
1243 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1244 ret = 1;
1245 }
1246 }
1247
1248 if (ret == 0 && wait)
1249 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1250 }
1251 zpool_close(zhp);
1252
1253 return (ret);
1254 }
1255
1256 /*
1257 * Return 1 if a vdev is active (being used in a pool)
1258 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1259 *
1260 * This is useful for checking if a disk in an active pool is offlined or
1261 * faulted.
1262 */
1263 static int
1264 vdev_is_active(char *vdev_path)
1265 {
1266 int fd;
1267 fd = open(vdev_path, O_EXCL);
1268 if (fd < 0) {
1269 return (1); /* cant open O_EXCL - disk is active */
1270 }
1271
1272 close(fd);
1273 return (0); /* disk is inactive in the pool */
1274 }
1275
1276 /*
1277 * zpool labelclear [-f] <vdev>
1278 *
1279 * -f Force clearing the label for the vdevs which are members of
1280 * the exported or foreign pools.
1281 *
1282 * Verifies that the vdev is not active and zeros out the label information
1283 * on the device.
1284 */
1285 int
1286 zpool_do_labelclear(int argc, char **argv)
1287 {
1288 char vdev[MAXPATHLEN];
1289 char *name = NULL;
1290 struct stat st;
1291 int c, fd = -1, ret = 0;
1292 nvlist_t *config;
1293 pool_state_t state;
1294 boolean_t inuse = B_FALSE;
1295 boolean_t force = B_FALSE;
1296
1297 /* check options */
1298 while ((c = getopt(argc, argv, "f")) != -1) {
1299 switch (c) {
1300 case 'f':
1301 force = B_TRUE;
1302 break;
1303 default:
1304 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1305 optopt);
1306 usage(B_FALSE);
1307 }
1308 }
1309
1310 argc -= optind;
1311 argv += optind;
1312
1313 /* get vdev name */
1314 if (argc < 1) {
1315 (void) fprintf(stderr, gettext("missing vdev name\n"));
1316 usage(B_FALSE);
1317 }
1318 if (argc > 1) {
1319 (void) fprintf(stderr, gettext("too many arguments\n"));
1320 usage(B_FALSE);
1321 }
1322
1323 /*
1324 * Check if we were given absolute path and use it as is.
1325 * Otherwise if the provided vdev name doesn't point to a file,
1326 * try prepending expected disk paths and partition numbers.
1327 */
1328 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1329 if (vdev[0] != '/' && stat(vdev, &st) != 0) {
1330 int error;
1331
1332 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1333 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1334 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1335 error = ENOENT;
1336 }
1337
1338 if (error || (stat(vdev, &st) != 0)) {
1339 (void) fprintf(stderr, gettext(
1340 "failed to find device %s, try specifying absolute "
1341 "path instead\n"), argv[0]);
1342 return (1);
1343 }
1344 }
1345
1346 if ((fd = open(vdev, O_RDWR)) < 0) {
1347 (void) fprintf(stderr, gettext("failed to open %s: %s\n"),
1348 vdev, strerror(errno));
1349 return (1);
1350 }
1351
1352 /*
1353 * Flush all dirty pages for the block device. This should not be
1354 * fatal when the device does not support BLKFLSBUF as would be the
1355 * case for a file vdev.
1356 */
1357 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1358 (void) fprintf(stderr, gettext("failed to invalidate "
1359 "cache for %s: %s\n"), vdev, strerror(errno));
1360
1361 if (zpool_read_label(fd, &config, NULL) != 0) {
1362 (void) fprintf(stderr,
1363 gettext("failed to read label from %s\n"), vdev);
1364 ret = 1;
1365 goto errout;
1366 }
1367 nvlist_free(config);
1368
1369 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1370 if (ret != 0) {
1371 (void) fprintf(stderr,
1372 gettext("failed to check state for %s\n"), vdev);
1373 ret = 1;
1374 goto errout;
1375 }
1376
1377 if (!inuse)
1378 goto wipe_label;
1379
1380 switch (state) {
1381 default:
1382 case POOL_STATE_ACTIVE:
1383 case POOL_STATE_SPARE:
1384 case POOL_STATE_L2CACHE:
1385 /*
1386 * We allow the user to call 'zpool offline -f'
1387 * on an offlined disk in an active pool. We can check if
1388 * the disk is online by calling vdev_is_active().
1389 */
1390 if (force && !vdev_is_active(vdev))
1391 break;
1392
1393 (void) fprintf(stderr, gettext(
1394 "%s is a member (%s) of pool \"%s\""),
1395 vdev, zpool_pool_state_to_name(state), name);
1396
1397 if (force) {
1398 (void) fprintf(stderr, gettext(
1399 ". Offline the disk first to clear its label."));
1400 }
1401 printf("\n");
1402 ret = 1;
1403 goto errout;
1404
1405 case POOL_STATE_EXPORTED:
1406 if (force)
1407 break;
1408 (void) fprintf(stderr, gettext(
1409 "use '-f' to override the following error:\n"
1410 "%s is a member of exported pool \"%s\"\n"),
1411 vdev, name);
1412 ret = 1;
1413 goto errout;
1414
1415 case POOL_STATE_POTENTIALLY_ACTIVE:
1416 if (force)
1417 break;
1418 (void) fprintf(stderr, gettext(
1419 "use '-f' to override the following error:\n"
1420 "%s is a member of potentially active pool \"%s\"\n"),
1421 vdev, name);
1422 ret = 1;
1423 goto errout;
1424
1425 case POOL_STATE_DESTROYED:
1426 /* inuse should never be set for a destroyed pool */
1427 assert(0);
1428 break;
1429 }
1430
1431 wipe_label:
1432 ret = zpool_clear_label(fd);
1433 if (ret != 0) {
1434 (void) fprintf(stderr,
1435 gettext("failed to clear label for %s\n"), vdev);
1436 }
1437
1438 errout:
1439 free(name);
1440 (void) close(fd);
1441
1442 return (ret);
1443 }
1444
1445 /*
1446 * zpool create [-fnd] [-o property=value] ...
1447 * [-O file-system-property=value] ...
1448 * [-R root] [-m mountpoint] <pool> <dev> ...
1449 *
1450 * -f Force creation, even if devices appear in use
1451 * -n Do not create the pool, but display the resulting layout if it
1452 * were to be created.
1453 * -R Create a pool under an alternate root
1454 * -m Set default mountpoint for the root dataset. By default it's
1455 * '/<pool>'
1456 * -o Set property=value.
1457 * -o Set feature@feature=enabled|disabled.
1458 * -d Don't automatically enable all supported pool features
1459 * (individual features can be enabled with -o).
1460 * -O Set fsproperty=value in the pool's root file system
1461 *
1462 * Creates the named pool according to the given vdev specification. The
1463 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1464 * Once we get the nvlist back from make_root_vdev(), we either print out the
1465 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1466 */
1467 int
1468 zpool_do_create(int argc, char **argv)
1469 {
1470 boolean_t force = B_FALSE;
1471 boolean_t dryrun = B_FALSE;
1472 boolean_t enable_pool_features = B_TRUE;
1473
1474 int c;
1475 nvlist_t *nvroot = NULL;
1476 char *poolname;
1477 char *tname = NULL;
1478 int ret = 1;
1479 char *altroot = NULL;
1480 char *compat = NULL;
1481 char *mountpoint = NULL;
1482 nvlist_t *fsprops = NULL;
1483 nvlist_t *props = NULL;
1484 char *propval;
1485
1486 /* check options */
1487 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1488 switch (c) {
1489 case 'f':
1490 force = B_TRUE;
1491 break;
1492 case 'n':
1493 dryrun = B_TRUE;
1494 break;
1495 case 'd':
1496 enable_pool_features = B_FALSE;
1497 break;
1498 case 'R':
1499 altroot = optarg;
1500 if (add_prop_list(zpool_prop_to_name(
1501 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1502 goto errout;
1503 if (add_prop_list_default(zpool_prop_to_name(
1504 ZPOOL_PROP_CACHEFILE), "none", &props))
1505 goto errout;
1506 break;
1507 case 'm':
1508 /* Equivalent to -O mountpoint=optarg */
1509 mountpoint = optarg;
1510 break;
1511 case 'o':
1512 if ((propval = strchr(optarg, '=')) == NULL) {
1513 (void) fprintf(stderr, gettext("missing "
1514 "'=' for -o option\n"));
1515 goto errout;
1516 }
1517 *propval = '\0';
1518 propval++;
1519
1520 if (add_prop_list(optarg, propval, &props, B_TRUE))
1521 goto errout;
1522
1523 /*
1524 * If the user is creating a pool that doesn't support
1525 * feature flags, don't enable any features.
1526 */
1527 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1528 char *end;
1529 u_longlong_t ver;
1530
1531 ver = strtoull(propval, &end, 10);
1532 if (*end == '\0' &&
1533 ver < SPA_VERSION_FEATURES) {
1534 enable_pool_features = B_FALSE;
1535 }
1536 }
1537 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1538 altroot = propval;
1539 if (zpool_name_to_prop(optarg) ==
1540 ZPOOL_PROP_COMPATIBILITY)
1541 compat = propval;
1542 break;
1543 case 'O':
1544 if ((propval = strchr(optarg, '=')) == NULL) {
1545 (void) fprintf(stderr, gettext("missing "
1546 "'=' for -O option\n"));
1547 goto errout;
1548 }
1549 *propval = '\0';
1550 propval++;
1551
1552 /*
1553 * Mountpoints are checked and then added later.
1554 * Uniquely among properties, they can be specified
1555 * more than once, to avoid conflict with -m.
1556 */
1557 if (0 == strcmp(optarg,
1558 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1559 mountpoint = propval;
1560 } else if (add_prop_list(optarg, propval, &fsprops,
1561 B_FALSE)) {
1562 goto errout;
1563 }
1564 break;
1565 case 't':
1566 /*
1567 * Sanity check temporary pool name.
1568 */
1569 if (strchr(optarg, '/') != NULL) {
1570 (void) fprintf(stderr, gettext("cannot create "
1571 "'%s': invalid character '/' in temporary "
1572 "name\n"), optarg);
1573 (void) fprintf(stderr, gettext("use 'zfs "
1574 "create' to create a dataset\n"));
1575 goto errout;
1576 }
1577
1578 if (add_prop_list(zpool_prop_to_name(
1579 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1580 goto errout;
1581 if (add_prop_list_default(zpool_prop_to_name(
1582 ZPOOL_PROP_CACHEFILE), "none", &props))
1583 goto errout;
1584 tname = optarg;
1585 break;
1586 case ':':
1587 (void) fprintf(stderr, gettext("missing argument for "
1588 "'%c' option\n"), optopt);
1589 goto badusage;
1590 case '?':
1591 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1592 optopt);
1593 goto badusage;
1594 }
1595 }
1596
1597 argc -= optind;
1598 argv += optind;
1599
1600 /* get pool name and check number of arguments */
1601 if (argc < 1) {
1602 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1603 goto badusage;
1604 }
1605 if (argc < 2) {
1606 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1607 goto badusage;
1608 }
1609
1610 poolname = argv[0];
1611
1612 /*
1613 * As a special case, check for use of '/' in the name, and direct the
1614 * user to use 'zfs create' instead.
1615 */
1616 if (strchr(poolname, '/') != NULL) {
1617 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
1618 "character '/' in pool name\n"), poolname);
1619 (void) fprintf(stderr, gettext("use 'zfs create' to "
1620 "create a dataset\n"));
1621 goto errout;
1622 }
1623
1624 /* pass off to make_root_vdev for bulk processing */
1625 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1626 argc - 1, argv + 1);
1627 if (nvroot == NULL)
1628 goto errout;
1629
1630 /* make_root_vdev() allows 0 toplevel children if there are spares */
1631 if (!zfs_allocatable_devs(nvroot)) {
1632 (void) fprintf(stderr, gettext("invalid vdev "
1633 "specification: at least one toplevel vdev must be "
1634 "specified\n"));
1635 goto errout;
1636 }
1637
1638 if (altroot != NULL && altroot[0] != '/') {
1639 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
1640 "must be an absolute path\n"), altroot);
1641 goto errout;
1642 }
1643
1644 /*
1645 * Check the validity of the mountpoint and direct the user to use the
1646 * '-m' mountpoint option if it looks like its in use.
1647 */
1648 if (mountpoint == NULL ||
1649 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1650 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1651 char buf[MAXPATHLEN];
1652 DIR *dirp;
1653
1654 if (mountpoint && mountpoint[0] != '/') {
1655 (void) fprintf(stderr, gettext("invalid mountpoint "
1656 "'%s': must be an absolute path, 'legacy', or "
1657 "'none'\n"), mountpoint);
1658 goto errout;
1659 }
1660
1661 if (mountpoint == NULL) {
1662 if (altroot != NULL)
1663 (void) snprintf(buf, sizeof (buf), "%s/%s",
1664 altroot, poolname);
1665 else
1666 (void) snprintf(buf, sizeof (buf), "/%s",
1667 poolname);
1668 } else {
1669 if (altroot != NULL)
1670 (void) snprintf(buf, sizeof (buf), "%s%s",
1671 altroot, mountpoint);
1672 else
1673 (void) snprintf(buf, sizeof (buf), "%s",
1674 mountpoint);
1675 }
1676
1677 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1678 (void) fprintf(stderr, gettext("mountpoint '%s' : "
1679 "%s\n"), buf, strerror(errno));
1680 (void) fprintf(stderr, gettext("use '-m' "
1681 "option to provide a different default\n"));
1682 goto errout;
1683 } else if (dirp) {
1684 int count = 0;
1685
1686 while (count < 3 && readdir(dirp) != NULL)
1687 count++;
1688 (void) closedir(dirp);
1689
1690 if (count > 2) {
1691 (void) fprintf(stderr, gettext("mountpoint "
1692 "'%s' exists and is not empty\n"), buf);
1693 (void) fprintf(stderr, gettext("use '-m' "
1694 "option to provide a "
1695 "different default\n"));
1696 goto errout;
1697 }
1698 }
1699 }
1700
1701 /*
1702 * Now that the mountpoint's validity has been checked, ensure that
1703 * the property is set appropriately prior to creating the pool.
1704 */
1705 if (mountpoint != NULL) {
1706 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1707 mountpoint, &fsprops, B_FALSE);
1708 if (ret != 0)
1709 goto errout;
1710 }
1711
1712 ret = 1;
1713 if (dryrun) {
1714 /*
1715 * For a dry run invocation, print out a basic message and run
1716 * through all the vdevs in the list and print out in an
1717 * appropriate hierarchy.
1718 */
1719 (void) printf(gettext("would create '%s' with the "
1720 "following layout:\n\n"), poolname);
1721
1722 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1723 print_vdev_tree(NULL, "dedup", nvroot, 0,
1724 VDEV_ALLOC_BIAS_DEDUP, 0);
1725 print_vdev_tree(NULL, "special", nvroot, 0,
1726 VDEV_ALLOC_BIAS_SPECIAL, 0);
1727 print_vdev_tree(NULL, "logs", nvroot, 0,
1728 VDEV_ALLOC_BIAS_LOG, 0);
1729 print_cache_list(nvroot, 0);
1730 print_spare_list(nvroot, 0);
1731
1732 ret = 0;
1733 } else {
1734 /*
1735 * Load in feature set.
1736 * Note: if compatibility property not given, we'll have
1737 * NULL, which means 'all features'.
1738 */
1739 boolean_t requested_features[SPA_FEATURES];
1740 if (zpool_do_load_compat(compat, requested_features) !=
1741 ZPOOL_COMPATIBILITY_OK)
1742 goto errout;
1743
1744 /*
1745 * props contains list of features to enable.
1746 * For each feature:
1747 * - remove it if feature@name=disabled
1748 * - leave it there if feature@name=enabled
1749 * - add it if:
1750 * - enable_pool_features (ie: no '-d' or '-o version')
1751 * - it's supported by the kernel module
1752 * - it's in the requested feature set
1753 * - warn if it's enabled but not in compat
1754 */
1755 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1756 char propname[MAXPATHLEN];
1757 char *propval;
1758 zfeature_info_t *feat = &spa_feature_table[i];
1759
1760 (void) snprintf(propname, sizeof (propname),
1761 "feature@%s", feat->fi_uname);
1762
1763 if (!nvlist_lookup_string(props, propname, &propval)) {
1764 if (strcmp(propval,
1765 ZFS_FEATURE_DISABLED) == 0) {
1766 (void) nvlist_remove_all(props,
1767 propname);
1768 } else if (strcmp(propval,
1769 ZFS_FEATURE_ENABLED) == 0 &&
1770 !requested_features[i]) {
1771 (void) fprintf(stderr, gettext(
1772 "Warning: feature \"%s\" enabled "
1773 "but is not in specified "
1774 "'compatibility' feature set.\n"),
1775 feat->fi_uname);
1776 }
1777 } else if (
1778 enable_pool_features &&
1779 feat->fi_zfs_mod_supported &&
1780 requested_features[i]) {
1781 ret = add_prop_list(propname,
1782 ZFS_FEATURE_ENABLED, &props, B_TRUE);
1783 if (ret != 0)
1784 goto errout;
1785 }
1786 }
1787
1788 ret = 1;
1789 if (zpool_create(g_zfs, poolname,
1790 nvroot, props, fsprops) == 0) {
1791 zfs_handle_t *pool = zfs_open(g_zfs,
1792 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1793 if (pool != NULL) {
1794 if (zfs_mount(pool, NULL, 0) == 0) {
1795 ret = zfs_share(pool, NULL);
1796 zfs_commit_shares(NULL);
1797 }
1798 zfs_close(pool);
1799 }
1800 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1801 (void) fprintf(stderr, gettext("pool name may have "
1802 "been omitted\n"));
1803 }
1804 }
1805
1806 errout:
1807 nvlist_free(nvroot);
1808 nvlist_free(fsprops);
1809 nvlist_free(props);
1810 return (ret);
1811 badusage:
1812 nvlist_free(fsprops);
1813 nvlist_free(props);
1814 usage(B_FALSE);
1815 return (2);
1816 }
1817
1818 /*
1819 * zpool destroy <pool>
1820 *
1821 * -f Forcefully unmount any datasets
1822 *
1823 * Destroy the given pool. Automatically unmounts any datasets in the pool.
1824 */
1825 int
1826 zpool_do_destroy(int argc, char **argv)
1827 {
1828 boolean_t force = B_FALSE;
1829 int c;
1830 char *pool;
1831 zpool_handle_t *zhp;
1832 int ret;
1833
1834 /* check options */
1835 while ((c = getopt(argc, argv, "f")) != -1) {
1836 switch (c) {
1837 case 'f':
1838 force = B_TRUE;
1839 break;
1840 case '?':
1841 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1842 optopt);
1843 usage(B_FALSE);
1844 }
1845 }
1846
1847 argc -= optind;
1848 argv += optind;
1849
1850 /* check arguments */
1851 if (argc < 1) {
1852 (void) fprintf(stderr, gettext("missing pool argument\n"));
1853 usage(B_FALSE);
1854 }
1855 if (argc > 1) {
1856 (void) fprintf(stderr, gettext("too many arguments\n"));
1857 usage(B_FALSE);
1858 }
1859
1860 pool = argv[0];
1861
1862 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
1863 /*
1864 * As a special case, check for use of '/' in the name, and
1865 * direct the user to use 'zfs destroy' instead.
1866 */
1867 if (strchr(pool, '/') != NULL)
1868 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
1869 "destroy a dataset\n"));
1870 return (1);
1871 }
1872
1873 if (zpool_disable_datasets(zhp, force) != 0) {
1874 (void) fprintf(stderr, gettext("could not destroy '%s': "
1875 "could not unmount datasets\n"), zpool_get_name(zhp));
1876 zpool_close(zhp);
1877 return (1);
1878 }
1879
1880 /* The history must be logged as part of the export */
1881 log_history = B_FALSE;
1882
1883 ret = (zpool_destroy(zhp, history_str) != 0);
1884
1885 zpool_close(zhp);
1886
1887 return (ret);
1888 }
1889
1890 typedef struct export_cbdata {
1891 boolean_t force;
1892 boolean_t hardforce;
1893 } export_cbdata_t;
1894
1895 /*
1896 * Export one pool
1897 */
1898 static int
1899 zpool_export_one(zpool_handle_t *zhp, void *data)
1900 {
1901 export_cbdata_t *cb = data;
1902
1903 if (zpool_disable_datasets(zhp, cb->force) != 0)
1904 return (1);
1905
1906 /* The history must be logged as part of the export */
1907 log_history = B_FALSE;
1908
1909 if (cb->hardforce) {
1910 if (zpool_export_force(zhp, history_str) != 0)
1911 return (1);
1912 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
1913 return (1);
1914 }
1915
1916 return (0);
1917 }
1918
1919 /*
1920 * zpool export [-f] <pool> ...
1921 *
1922 * -a Export all pools
1923 * -f Forcefully unmount datasets
1924 *
1925 * Export the given pools. By default, the command will attempt to cleanly
1926 * unmount any active datasets within the pool. If the '-f' flag is specified,
1927 * then the datasets will be forcefully unmounted.
1928 */
1929 int
1930 zpool_do_export(int argc, char **argv)
1931 {
1932 export_cbdata_t cb;
1933 boolean_t do_all = B_FALSE;
1934 boolean_t force = B_FALSE;
1935 boolean_t hardforce = B_FALSE;
1936 int c, ret;
1937
1938 /* check options */
1939 while ((c = getopt(argc, argv, "afF")) != -1) {
1940 switch (c) {
1941 case 'a':
1942 do_all = B_TRUE;
1943 break;
1944 case 'f':
1945 force = B_TRUE;
1946 break;
1947 case 'F':
1948 hardforce = B_TRUE;
1949 break;
1950 case '?':
1951 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1952 optopt);
1953 usage(B_FALSE);
1954 }
1955 }
1956
1957 cb.force = force;
1958 cb.hardforce = hardforce;
1959 argc -= optind;
1960 argv += optind;
1961
1962 if (do_all) {
1963 if (argc != 0) {
1964 (void) fprintf(stderr, gettext("too many arguments\n"));
1965 usage(B_FALSE);
1966 }
1967
1968 return (for_each_pool(argc, argv, B_TRUE, NULL,
1969 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
1970 }
1971
1972 /* check arguments */
1973 if (argc < 1) {
1974 (void) fprintf(stderr, gettext("missing pool argument\n"));
1975 usage(B_FALSE);
1976 }
1977
1978 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
1979 B_FALSE, zpool_export_one, &cb);
1980
1981 return (ret);
1982 }
1983
1984 /*
1985 * Given a vdev configuration, determine the maximum width needed for the device
1986 * name column.
1987 */
1988 static int
1989 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
1990 int name_flags)
1991 {
1992 static const char *const subtypes[] =
1993 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
1994
1995 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
1996 max = MAX(strlen(name) + depth, max);
1997 free(name);
1998
1999 nvlist_t **child;
2000 uint_t children;
2001 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2002 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2003 &child, &children) == 0)
2004 for (uint_t c = 0; c < children; ++c)
2005 max = MAX(max_width(zhp, child[c], depth + 2,
2006 max, name_flags), max);
2007
2008 return (max);
2009 }
2010
2011 typedef struct spare_cbdata {
2012 uint64_t cb_guid;
2013 zpool_handle_t *cb_zhp;
2014 } spare_cbdata_t;
2015
2016 static boolean_t
2017 find_vdev(nvlist_t *nv, uint64_t search)
2018 {
2019 uint64_t guid;
2020 nvlist_t **child;
2021 uint_t c, children;
2022
2023 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2024 search == guid)
2025 return (B_TRUE);
2026
2027 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2028 &child, &children) == 0) {
2029 for (c = 0; c < children; c++)
2030 if (find_vdev(child[c], search))
2031 return (B_TRUE);
2032 }
2033
2034 return (B_FALSE);
2035 }
2036
2037 static int
2038 find_spare(zpool_handle_t *zhp, void *data)
2039 {
2040 spare_cbdata_t *cbp = data;
2041 nvlist_t *config, *nvroot;
2042
2043 config = zpool_get_config(zhp, NULL);
2044 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2045 &nvroot) == 0);
2046
2047 if (find_vdev(nvroot, cbp->cb_guid)) {
2048 cbp->cb_zhp = zhp;
2049 return (1);
2050 }
2051
2052 zpool_close(zhp);
2053 return (0);
2054 }
2055
2056 typedef struct status_cbdata {
2057 int cb_count;
2058 int cb_name_flags;
2059 int cb_namewidth;
2060 boolean_t cb_allpools;
2061 boolean_t cb_verbose;
2062 boolean_t cb_literal;
2063 boolean_t cb_explain;
2064 boolean_t cb_first;
2065 boolean_t cb_dedup_stats;
2066 boolean_t cb_print_status;
2067 boolean_t cb_print_slow_ios;
2068 boolean_t cb_print_vdev_init;
2069 boolean_t cb_print_vdev_trim;
2070 vdev_cmd_data_list_t *vcdl;
2071 } status_cbdata_t;
2072
2073 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2074 static boolean_t
2075 is_blank_str(const char *str)
2076 {
2077 for (; str != NULL && *str != '\0'; ++str)
2078 if (!isblank(*str))
2079 return (B_FALSE);
2080 return (B_TRUE);
2081 }
2082
2083 /* Print command output lines for specific vdev in a specific pool */
2084 static void
2085 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
2086 {
2087 vdev_cmd_data_t *data;
2088 int i, j;
2089 const char *val;
2090
2091 for (i = 0; i < vcdl->count; i++) {
2092 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2093 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2094 /* Not the vdev we're looking for */
2095 continue;
2096 }
2097
2098 data = &vcdl->data[i];
2099 /* Print out all the output values for this vdev */
2100 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2101 val = NULL;
2102 /* Does this vdev have values for this column? */
2103 for (int k = 0; k < data->cols_cnt; k++) {
2104 if (strcmp(data->cols[k],
2105 vcdl->uniq_cols[j]) == 0) {
2106 /* yes it does, record the value */
2107 val = data->lines[k];
2108 break;
2109 }
2110 }
2111 /*
2112 * Mark empty values with dashes to make output
2113 * awk-able.
2114 */
2115 if (val == NULL || is_blank_str(val))
2116 val = "-";
2117
2118 printf("%*s", vcdl->uniq_cols_width[j], val);
2119 if (j < vcdl->uniq_cols_cnt - 1)
2120 fputs(" ", stdout);
2121 }
2122
2123 /* Print out any values that aren't in a column at the end */
2124 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2125 /* Did we have any columns? If so print a spacer. */
2126 if (vcdl->uniq_cols_cnt > 0)
2127 fputs(" ", stdout);
2128
2129 val = data->lines[j];
2130 fputs(val ?: "", stdout);
2131 }
2132 break;
2133 }
2134 }
2135
2136 /*
2137 * Print vdev initialization status for leaves
2138 */
2139 static void
2140 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2141 {
2142 if (verbose) {
2143 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2144 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2145 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2146 !vs->vs_scan_removing) {
2147 char zbuf[1024];
2148 char tbuf[256];
2149 struct tm zaction_ts;
2150
2151 time_t t = vs->vs_initialize_action_time;
2152 int initialize_pct = 100;
2153 if (vs->vs_initialize_state !=
2154 VDEV_INITIALIZE_COMPLETE) {
2155 initialize_pct = (vs->vs_initialize_bytes_done *
2156 100 / (vs->vs_initialize_bytes_est + 1));
2157 }
2158
2159 (void) localtime_r(&t, &zaction_ts);
2160 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2161
2162 switch (vs->vs_initialize_state) {
2163 case VDEV_INITIALIZE_SUSPENDED:
2164 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2165 gettext("suspended, started at"), tbuf);
2166 break;
2167 case VDEV_INITIALIZE_ACTIVE:
2168 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2169 gettext("started at"), tbuf);
2170 break;
2171 case VDEV_INITIALIZE_COMPLETE:
2172 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2173 gettext("completed at"), tbuf);
2174 break;
2175 }
2176
2177 (void) printf(gettext(" (%d%% initialized%s)"),
2178 initialize_pct, zbuf);
2179 } else {
2180 (void) printf(gettext(" (uninitialized)"));
2181 }
2182 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2183 (void) printf(gettext(" (initializing)"));
2184 }
2185 }
2186
2187 /*
2188 * Print vdev TRIM status for leaves
2189 */
2190 static void
2191 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2192 {
2193 if (verbose) {
2194 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2195 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2196 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2197 !vs->vs_scan_removing) {
2198 char zbuf[1024];
2199 char tbuf[256];
2200 struct tm zaction_ts;
2201
2202 time_t t = vs->vs_trim_action_time;
2203 int trim_pct = 100;
2204 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2205 trim_pct = (vs->vs_trim_bytes_done *
2206 100 / (vs->vs_trim_bytes_est + 1));
2207 }
2208
2209 (void) localtime_r(&t, &zaction_ts);
2210 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2211
2212 switch (vs->vs_trim_state) {
2213 case VDEV_TRIM_SUSPENDED:
2214 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2215 gettext("suspended, started at"), tbuf);
2216 break;
2217 case VDEV_TRIM_ACTIVE:
2218 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2219 gettext("started at"), tbuf);
2220 break;
2221 case VDEV_TRIM_COMPLETE:
2222 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2223 gettext("completed at"), tbuf);
2224 break;
2225 }
2226
2227 (void) printf(gettext(" (%d%% trimmed%s)"),
2228 trim_pct, zbuf);
2229 } else if (vs->vs_trim_notsup) {
2230 (void) printf(gettext(" (trim unsupported)"));
2231 } else {
2232 (void) printf(gettext(" (untrimmed)"));
2233 }
2234 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2235 (void) printf(gettext(" (trimming)"));
2236 }
2237 }
2238
2239 /*
2240 * Return the color associated with a health string. This includes returning
2241 * NULL for no color change.
2242 */
2243 static const char *
2244 health_str_to_color(const char *health)
2245 {
2246 if (strcmp(health, gettext("FAULTED")) == 0 ||
2247 strcmp(health, gettext("SUSPENDED")) == 0 ||
2248 strcmp(health, gettext("UNAVAIL")) == 0) {
2249 return (ANSI_RED);
2250 }
2251
2252 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2253 strcmp(health, gettext("DEGRADED")) == 0 ||
2254 strcmp(health, gettext("REMOVED")) == 0) {
2255 return (ANSI_YELLOW);
2256 }
2257
2258 return (NULL);
2259 }
2260
2261 /*
2262 * Print out configuration state as requested by status_callback.
2263 */
2264 static void
2265 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2266 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2267 {
2268 nvlist_t **child, *root;
2269 uint_t c, i, vsc, children;
2270 pool_scan_stat_t *ps = NULL;
2271 vdev_stat_t *vs;
2272 char rbuf[6], wbuf[6], cbuf[6];
2273 char *vname;
2274 uint64_t notpresent;
2275 spare_cbdata_t spare_cb;
2276 const char *state;
2277 char *type;
2278 char *path = NULL;
2279 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
2280
2281 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2282 &child, &children) != 0)
2283 children = 0;
2284
2285 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2286 (uint64_t **)&vs, &vsc) == 0);
2287
2288 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2289
2290 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2291 return;
2292
2293 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2294
2295 if (isspare) {
2296 /*
2297 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2298 * online drives.
2299 */
2300 if (vs->vs_aux == VDEV_AUX_SPARED)
2301 state = gettext("INUSE");
2302 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2303 state = gettext("AVAIL");
2304 }
2305
2306 printf_color(health_str_to_color(state),
2307 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2308 name, state);
2309
2310 if (!isspare) {
2311 if (vs->vs_read_errors)
2312 rcolor = ANSI_RED;
2313
2314 if (vs->vs_write_errors)
2315 wcolor = ANSI_RED;
2316
2317 if (vs->vs_checksum_errors)
2318 ccolor = ANSI_RED;
2319
2320 if (cb->cb_literal) {
2321 fputc(' ', stdout);
2322 printf_color(rcolor, "%5llu",
2323 (u_longlong_t)vs->vs_read_errors);
2324 fputc(' ', stdout);
2325 printf_color(wcolor, "%5llu",
2326 (u_longlong_t)vs->vs_write_errors);
2327 fputc(' ', stdout);
2328 printf_color(ccolor, "%5llu",
2329 (u_longlong_t)vs->vs_checksum_errors);
2330 } else {
2331 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2332 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2333 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2334 sizeof (cbuf));
2335 fputc(' ', stdout);
2336 printf_color(rcolor, "%5s", rbuf);
2337 fputc(' ', stdout);
2338 printf_color(wcolor, "%5s", wbuf);
2339 fputc(' ', stdout);
2340 printf_color(ccolor, "%5s", cbuf);
2341 }
2342 if (cb->cb_print_slow_ios) {
2343 if (children == 0) {
2344 /* Only leafs vdevs have slow IOs */
2345 zfs_nicenum(vs->vs_slow_ios, rbuf,
2346 sizeof (rbuf));
2347 } else {
2348 snprintf(rbuf, sizeof (rbuf), "-");
2349 }
2350
2351 if (cb->cb_literal)
2352 printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
2353 else
2354 printf(" %5s", rbuf);
2355 }
2356 }
2357
2358 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2359 &notpresent) == 0) {
2360 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2361 (void) printf(" %s %s", gettext("was"), path);
2362 } else if (vs->vs_aux != 0) {
2363 (void) printf(" ");
2364 color_start(ANSI_RED);
2365 switch (vs->vs_aux) {
2366 case VDEV_AUX_OPEN_FAILED:
2367 (void) printf(gettext("cannot open"));
2368 break;
2369
2370 case VDEV_AUX_BAD_GUID_SUM:
2371 (void) printf(gettext("missing device"));
2372 break;
2373
2374 case VDEV_AUX_NO_REPLICAS:
2375 (void) printf(gettext("insufficient replicas"));
2376 break;
2377
2378 case VDEV_AUX_VERSION_NEWER:
2379 (void) printf(gettext("newer version"));
2380 break;
2381
2382 case VDEV_AUX_UNSUP_FEAT:
2383 (void) printf(gettext("unsupported feature(s)"));
2384 break;
2385
2386 case VDEV_AUX_ASHIFT_TOO_BIG:
2387 (void) printf(gettext("unsupported minimum blocksize"));
2388 break;
2389
2390 case VDEV_AUX_SPARED:
2391 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2392 &spare_cb.cb_guid) == 0);
2393 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2394 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2395 zpool_get_name(zhp)) == 0)
2396 (void) printf(gettext("currently in "
2397 "use"));
2398 else
2399 (void) printf(gettext("in use by "
2400 "pool '%s'"),
2401 zpool_get_name(spare_cb.cb_zhp));
2402 zpool_close(spare_cb.cb_zhp);
2403 } else {
2404 (void) printf(gettext("currently in use"));
2405 }
2406 break;
2407
2408 case VDEV_AUX_ERR_EXCEEDED:
2409 (void) printf(gettext("too many errors"));
2410 break;
2411
2412 case VDEV_AUX_IO_FAILURE:
2413 (void) printf(gettext("experienced I/O failures"));
2414 break;
2415
2416 case VDEV_AUX_BAD_LOG:
2417 (void) printf(gettext("bad intent log"));
2418 break;
2419
2420 case VDEV_AUX_EXTERNAL:
2421 (void) printf(gettext("external device fault"));
2422 break;
2423
2424 case VDEV_AUX_SPLIT_POOL:
2425 (void) printf(gettext("split into new pool"));
2426 break;
2427
2428 case VDEV_AUX_ACTIVE:
2429 (void) printf(gettext("currently in use"));
2430 break;
2431
2432 case VDEV_AUX_CHILDREN_OFFLINE:
2433 (void) printf(gettext("all children offline"));
2434 break;
2435
2436 case VDEV_AUX_BAD_LABEL:
2437 (void) printf(gettext("invalid label"));
2438 break;
2439
2440 default:
2441 (void) printf(gettext("corrupted data"));
2442 break;
2443 }
2444 color_end();
2445 } else if (children == 0 && !isspare &&
2446 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2447 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2448 vs->vs_configured_ashift < vs->vs_physical_ashift) {
2449 (void) printf(
2450 gettext(" block size: %dB configured, %dB native"),
2451 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2452 }
2453
2454 if (vs->vs_scan_removing != 0) {
2455 (void) printf(gettext(" (removing)"));
2456 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
2457 (void) printf(gettext(" (non-allocating)"));
2458 }
2459
2460 /* The root vdev has the scrub/resilver stats */
2461 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2462 ZPOOL_CONFIG_VDEV_TREE);
2463 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2464 (uint64_t **)&ps, &c);
2465
2466 /*
2467 * If you force fault a drive that's resilvering, its scan stats can
2468 * get frozen in time, giving the false impression that it's
2469 * being resilvered. That's why we check the state to see if the vdev
2470 * is healthy before reporting "resilvering" or "repairing".
2471 */
2472 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
2473 vs->vs_state == VDEV_STATE_HEALTHY) {
2474 if (vs->vs_scan_processed != 0) {
2475 (void) printf(gettext(" (%s)"),
2476 (ps->pss_func == POOL_SCAN_RESILVER) ?
2477 "resilvering" : "repairing");
2478 } else if (vs->vs_resilver_deferred) {
2479 (void) printf(gettext(" (awaiting resilver)"));
2480 }
2481 }
2482
2483 /* The top-level vdevs have the rebuild stats */
2484 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2485 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
2486 if (vs->vs_rebuild_processed != 0) {
2487 (void) printf(gettext(" (resilvering)"));
2488 }
2489 }
2490
2491 if (cb->vcdl != NULL) {
2492 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2493 printf(" ");
2494 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2495 }
2496 }
2497
2498 /* Display vdev initialization and trim status for leaves. */
2499 if (children == 0) {
2500 print_status_initialize(vs, cb->cb_print_vdev_init);
2501 print_status_trim(vs, cb->cb_print_vdev_trim);
2502 }
2503
2504 (void) printf("\n");
2505
2506 for (c = 0; c < children; c++) {
2507 uint64_t islog = B_FALSE, ishole = B_FALSE;
2508
2509 /* Don't print logs or holes here */
2510 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2511 &islog);
2512 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2513 &ishole);
2514 if (islog || ishole)
2515 continue;
2516 /* Only print normal classes here */
2517 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2518 continue;
2519
2520 /* Provide vdev_rebuild_stats to children if available */
2521 if (vrs == NULL) {
2522 (void) nvlist_lookup_uint64_array(nv,
2523 ZPOOL_CONFIG_REBUILD_STATS,
2524 (uint64_t **)&vrs, &i);
2525 }
2526
2527 vname = zpool_vdev_name(g_zfs, zhp, child[c],
2528 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2529 print_status_config(zhp, cb, vname, child[c], depth + 2,
2530 isspare, vrs);
2531 free(vname);
2532 }
2533 }
2534
2535 /*
2536 * Print the configuration of an exported pool. Iterate over all vdevs in the
2537 * pool, printing out the name and status for each one.
2538 */
2539 static void
2540 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2541 int depth)
2542 {
2543 nvlist_t **child;
2544 uint_t c, children;
2545 vdev_stat_t *vs;
2546 char *type, *vname;
2547
2548 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2549 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2550 strcmp(type, VDEV_TYPE_HOLE) == 0)
2551 return;
2552
2553 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2554 (uint64_t **)&vs, &c) == 0);
2555
2556 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2557 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2558
2559 if (vs->vs_aux != 0) {
2560 (void) printf(" ");
2561
2562 switch (vs->vs_aux) {
2563 case VDEV_AUX_OPEN_FAILED:
2564 (void) printf(gettext("cannot open"));
2565 break;
2566
2567 case VDEV_AUX_BAD_GUID_SUM:
2568 (void) printf(gettext("missing device"));
2569 break;
2570
2571 case VDEV_AUX_NO_REPLICAS:
2572 (void) printf(gettext("insufficient replicas"));
2573 break;
2574
2575 case VDEV_AUX_VERSION_NEWER:
2576 (void) printf(gettext("newer version"));
2577 break;
2578
2579 case VDEV_AUX_UNSUP_FEAT:
2580 (void) printf(gettext("unsupported feature(s)"));
2581 break;
2582
2583 case VDEV_AUX_ERR_EXCEEDED:
2584 (void) printf(gettext("too many errors"));
2585 break;
2586
2587 case VDEV_AUX_ACTIVE:
2588 (void) printf(gettext("currently in use"));
2589 break;
2590
2591 case VDEV_AUX_CHILDREN_OFFLINE:
2592 (void) printf(gettext("all children offline"));
2593 break;
2594
2595 case VDEV_AUX_BAD_LABEL:
2596 (void) printf(gettext("invalid label"));
2597 break;
2598
2599 default:
2600 (void) printf(gettext("corrupted data"));
2601 break;
2602 }
2603 }
2604 (void) printf("\n");
2605
2606 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2607 &child, &children) != 0)
2608 return;
2609
2610 for (c = 0; c < children; c++) {
2611 uint64_t is_log = B_FALSE;
2612
2613 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2614 &is_log);
2615 if (is_log)
2616 continue;
2617 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2618 continue;
2619
2620 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2621 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2622 print_import_config(cb, vname, child[c], depth + 2);
2623 free(vname);
2624 }
2625
2626 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2627 &child, &children) == 0) {
2628 (void) printf(gettext("\tcache\n"));
2629 for (c = 0; c < children; c++) {
2630 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2631 cb->cb_name_flags);
2632 (void) printf("\t %s\n", vname);
2633 free(vname);
2634 }
2635 }
2636
2637 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2638 &child, &children) == 0) {
2639 (void) printf(gettext("\tspares\n"));
2640 for (c = 0; c < children; c++) {
2641 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2642 cb->cb_name_flags);
2643 (void) printf("\t %s\n", vname);
2644 free(vname);
2645 }
2646 }
2647 }
2648
2649 /*
2650 * Print specialized class vdevs.
2651 *
2652 * These are recorded as top level vdevs in the main pool child array
2653 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2654 * print_status_config() or print_import_config() to print the top level
2655 * class vdevs then any of their children (eg mirrored slogs) are printed
2656 * recursively - which works because only the top level vdev is marked.
2657 */
2658 static void
2659 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2660 const char *class)
2661 {
2662 uint_t c, children;
2663 nvlist_t **child;
2664 boolean_t printed = B_FALSE;
2665
2666 assert(zhp != NULL || !cb->cb_verbose);
2667
2668 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2669 &children) != 0)
2670 return;
2671
2672 for (c = 0; c < children; c++) {
2673 uint64_t is_log = B_FALSE;
2674 char *bias = NULL;
2675 char *type = NULL;
2676
2677 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2678 &is_log);
2679
2680 if (is_log) {
2681 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
2682 } else {
2683 (void) nvlist_lookup_string(child[c],
2684 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2685 (void) nvlist_lookup_string(child[c],
2686 ZPOOL_CONFIG_TYPE, &type);
2687 }
2688
2689 if (bias == NULL || strcmp(bias, class) != 0)
2690 continue;
2691 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2692 continue;
2693
2694 if (!printed) {
2695 (void) printf("\t%s\t\n", gettext(class));
2696 printed = B_TRUE;
2697 }
2698
2699 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2700 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2701 if (cb->cb_print_status)
2702 print_status_config(zhp, cb, name, child[c], 2,
2703 B_FALSE, NULL);
2704 else
2705 print_import_config(cb, name, child[c], 2);
2706 free(name);
2707 }
2708 }
2709
2710 /*
2711 * Display the status for the given pool.
2712 */
2713 static int
2714 show_import(nvlist_t *config, boolean_t report_error)
2715 {
2716 uint64_t pool_state;
2717 vdev_stat_t *vs;
2718 char *name;
2719 uint64_t guid;
2720 uint64_t hostid = 0;
2721 const char *msgid;
2722 const char *hostname = "unknown";
2723 nvlist_t *nvroot, *nvinfo;
2724 zpool_status_t reason;
2725 zpool_errata_t errata;
2726 const char *health;
2727 uint_t vsc;
2728 char *comment;
2729 status_cbdata_t cb = { 0 };
2730
2731 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2732 &name) == 0);
2733 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2734 &guid) == 0);
2735 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2736 &pool_state) == 0);
2737 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2738 &nvroot) == 0);
2739
2740 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2741 (uint64_t **)&vs, &vsc) == 0);
2742 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2743
2744 reason = zpool_import_status(config, &msgid, &errata);
2745
2746 /*
2747 * If we're importing using a cachefile, then we won't report any
2748 * errors unless we are in the scan phase of the import.
2749 */
2750 if (reason != ZPOOL_STATUS_OK && !report_error)
2751 return (reason);
2752
2753 (void) printf(gettext(" pool: %s\n"), name);
2754 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
2755 (void) printf(gettext(" state: %s"), health);
2756 if (pool_state == POOL_STATE_DESTROYED)
2757 (void) printf(gettext(" (DESTROYED)"));
2758 (void) printf("\n");
2759
2760 switch (reason) {
2761 case ZPOOL_STATUS_MISSING_DEV_R:
2762 case ZPOOL_STATUS_MISSING_DEV_NR:
2763 case ZPOOL_STATUS_BAD_GUID_SUM:
2764 printf_color(ANSI_BOLD, gettext("status: "));
2765 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2766 "missing from the system.\n"));
2767 break;
2768
2769 case ZPOOL_STATUS_CORRUPT_LABEL_R:
2770 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2771 printf_color(ANSI_BOLD, gettext("status: "));
2772 printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2773 " corrupted data.\n"));
2774 break;
2775
2776 case ZPOOL_STATUS_CORRUPT_DATA:
2777 (void) printf(
2778 gettext(" status: The pool data is corrupted.\n"));
2779 break;
2780
2781 case ZPOOL_STATUS_OFFLINE_DEV:
2782 printf_color(ANSI_BOLD, gettext("status: "));
2783 printf_color(ANSI_YELLOW, gettext("One or more devices "
2784 "are offlined.\n"));
2785 break;
2786
2787 case ZPOOL_STATUS_CORRUPT_POOL:
2788 printf_color(ANSI_BOLD, gettext("status: "));
2789 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
2790 "corrupted.\n"));
2791 break;
2792
2793 case ZPOOL_STATUS_VERSION_OLDER:
2794 printf_color(ANSI_BOLD, gettext("status: "));
2795 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2796 "a legacy on-disk version.\n"));
2797 break;
2798
2799 case ZPOOL_STATUS_VERSION_NEWER:
2800 printf_color(ANSI_BOLD, gettext("status: "));
2801 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2802 "an incompatible version.\n"));
2803 break;
2804
2805 case ZPOOL_STATUS_FEAT_DISABLED:
2806 printf_color(ANSI_BOLD, gettext("status: "));
2807 printf_color(ANSI_YELLOW, gettext("Some supported "
2808 "features are not enabled on the pool.\n\t"
2809 "(Note that they may be intentionally disabled "
2810 "if the\n\t'compatibility' property is set.)\n"));
2811 break;
2812
2813 case ZPOOL_STATUS_COMPATIBILITY_ERR:
2814 printf_color(ANSI_BOLD, gettext("status: "));
2815 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
2816 "the file(s) indicated by the 'compatibility'\n"
2817 "property.\n"));
2818 break;
2819
2820 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
2821 printf_color(ANSI_BOLD, gettext("status: "));
2822 printf_color(ANSI_YELLOW, gettext("One or more features "
2823 "are enabled on the pool despite not being\n"
2824 "requested by the 'compatibility' property.\n"));
2825 break;
2826
2827 case ZPOOL_STATUS_UNSUP_FEAT_READ:
2828 printf_color(ANSI_BOLD, gettext("status: "));
2829 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
2830 "feature(s) not supported on this system:\n"));
2831 color_start(ANSI_YELLOW);
2832 zpool_print_unsup_feat(config);
2833 color_end();
2834 break;
2835
2836 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
2837 printf_color(ANSI_BOLD, gettext("status: "));
2838 printf_color(ANSI_YELLOW, gettext("The pool can only be "
2839 "accessed in read-only mode on this system. It\n\tcannot be"
2840 " accessed in read-write mode because it uses the "
2841 "following\n\tfeature(s) not supported on this system:\n"));
2842 color_start(ANSI_YELLOW);
2843 zpool_print_unsup_feat(config);
2844 color_end();
2845 break;
2846
2847 case ZPOOL_STATUS_HOSTID_ACTIVE:
2848 printf_color(ANSI_BOLD, gettext("status: "));
2849 printf_color(ANSI_YELLOW, gettext("The pool is currently "
2850 "imported by another system.\n"));
2851 break;
2852
2853 case ZPOOL_STATUS_HOSTID_REQUIRED:
2854 printf_color(ANSI_BOLD, gettext("status: "));
2855 printf_color(ANSI_YELLOW, gettext("The pool has the "
2856 "multihost property on. It cannot\n\tbe safely imported "
2857 "when the system hostid is not set.\n"));
2858 break;
2859
2860 case ZPOOL_STATUS_HOSTID_MISMATCH:
2861 printf_color(ANSI_BOLD, gettext("status: "));
2862 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
2863 "by another system.\n"));
2864 break;
2865
2866 case ZPOOL_STATUS_FAULTED_DEV_R:
2867 case ZPOOL_STATUS_FAULTED_DEV_NR:
2868 printf_color(ANSI_BOLD, gettext("status: "));
2869 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2870 "faulted.\n"));
2871 break;
2872
2873 case ZPOOL_STATUS_BAD_LOG:
2874 printf_color(ANSI_BOLD, gettext("status: "));
2875 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
2876 "be read.\n"));
2877 break;
2878
2879 case ZPOOL_STATUS_RESILVERING:
2880 case ZPOOL_STATUS_REBUILDING:
2881 printf_color(ANSI_BOLD, gettext("status: "));
2882 printf_color(ANSI_YELLOW, gettext("One or more devices were "
2883 "being resilvered.\n"));
2884 break;
2885
2886 case ZPOOL_STATUS_ERRATA:
2887 printf_color(ANSI_BOLD, gettext("status: "));
2888 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
2889 errata);
2890 break;
2891
2892 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
2893 printf_color(ANSI_BOLD, gettext("status: "));
2894 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2895 "configured to use a non-native block size.\n"
2896 "\tExpect reduced performance.\n"));
2897 break;
2898
2899 default:
2900 /*
2901 * No other status can be seen when importing pools.
2902 */
2903 assert(reason == ZPOOL_STATUS_OK);
2904 }
2905
2906 /*
2907 * Print out an action according to the overall state of the pool.
2908 */
2909 if (vs->vs_state == VDEV_STATE_HEALTHY) {
2910 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
2911 reason == ZPOOL_STATUS_FEAT_DISABLED) {
2912 (void) printf(gettext(" action: The pool can be "
2913 "imported using its name or numeric identifier, "
2914 "though\n\tsome features will not be available "
2915 "without an explicit 'zpool upgrade'.\n"));
2916 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
2917 (void) printf(gettext(" action: The pool can be "
2918 "imported using its name or numeric\n\tidentifier, "
2919 "though the file(s) indicated by its "
2920 "'compatibility'\n\tproperty cannot be parsed at "
2921 "this time.\n"));
2922 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
2923 (void) printf(gettext(" action: The pool can be "
2924 "imported using its name or numeric "
2925 "identifier and\n\tthe '-f' flag.\n"));
2926 } else if (reason == ZPOOL_STATUS_ERRATA) {
2927 switch (errata) {
2928 case ZPOOL_ERRATA_NONE:
2929 break;
2930
2931 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
2932 (void) printf(gettext(" action: The pool can "
2933 "be imported using its name or numeric "
2934 "identifier,\n\thowever there is a compat"
2935 "ibility issue which should be corrected"
2936 "\n\tby running 'zpool scrub'\n"));
2937 break;
2938
2939 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
2940 (void) printf(gettext(" action: The pool can"
2941 "not be imported with this version of ZFS "
2942 "due to\n\tan active asynchronous destroy. "
2943 "Revert to an earlier version\n\tand "
2944 "allow the destroy to complete before "
2945 "updating.\n"));
2946 break;
2947
2948 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
2949 (void) printf(gettext(" action: Existing "
2950 "encrypted datasets contain an on-disk "
2951 "incompatibility, which\n\tneeds to be "
2952 "corrected. Backup these datasets to new "
2953 "encrypted datasets\n\tand destroy the "
2954 "old ones.\n"));
2955 break;
2956
2957 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
2958 (void) printf(gettext(" action: Existing "
2959 "encrypted snapshots and bookmarks contain "
2960 "an on-disk\n\tincompatibility. This may "
2961 "cause on-disk corruption if they are used"
2962 "\n\twith 'zfs recv'. To correct the "
2963 "issue, enable the bookmark_v2 feature.\n\t"
2964 "No additional action is needed if there "
2965 "are no encrypted snapshots or\n\t"
2966 "bookmarks. If preserving the encrypted "
2967 "snapshots and bookmarks is\n\trequired, "
2968 "use a non-raw send to backup and restore "
2969 "them. Alternately,\n\tthey may be removed"
2970 " to resolve the incompatibility.\n"));
2971 break;
2972 default:
2973 /*
2974 * All errata must contain an action message.
2975 */
2976 assert(0);
2977 }
2978 } else {
2979 (void) printf(gettext(" action: The pool can be "
2980 "imported using its name or numeric "
2981 "identifier.\n"));
2982 }
2983 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
2984 (void) printf(gettext(" action: The pool can be imported "
2985 "despite missing or damaged devices. The\n\tfault "
2986 "tolerance of the pool may be compromised if imported.\n"));
2987 } else {
2988 switch (reason) {
2989 case ZPOOL_STATUS_VERSION_NEWER:
2990 (void) printf(gettext(" action: The pool cannot be "
2991 "imported. Access the pool on a system running "
2992 "newer\n\tsoftware, or recreate the pool from "
2993 "backup.\n"));
2994 break;
2995 case ZPOOL_STATUS_UNSUP_FEAT_READ:
2996 printf_color(ANSI_BOLD, gettext("action: "));
2997 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
2998 "imported. Access the pool on a system that "
2999 "supports\n\tthe required feature(s), or recreate "
3000 "the pool from backup.\n"));
3001 break;
3002 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3003 printf_color(ANSI_BOLD, gettext("action: "));
3004 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3005 "imported in read-write mode. Import the pool "
3006 "with\n"
3007 "\t\"-o readonly=on\", access the pool on a system "
3008 "that supports the\n\trequired feature(s), or "
3009 "recreate the pool from backup.\n"));
3010 break;
3011 case ZPOOL_STATUS_MISSING_DEV_R:
3012 case ZPOOL_STATUS_MISSING_DEV_NR:
3013 case ZPOOL_STATUS_BAD_GUID_SUM:
3014 (void) printf(gettext(" action: The pool cannot be "
3015 "imported. Attach the missing\n\tdevices and try "
3016 "again.\n"));
3017 break;
3018 case ZPOOL_STATUS_HOSTID_ACTIVE:
3019 VERIFY0(nvlist_lookup_nvlist(config,
3020 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3021
3022 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3023 hostname = fnvlist_lookup_string(nvinfo,
3024 ZPOOL_CONFIG_MMP_HOSTNAME);
3025
3026 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3027 hostid = fnvlist_lookup_uint64(nvinfo,
3028 ZPOOL_CONFIG_MMP_HOSTID);
3029
3030 (void) printf(gettext(" action: The pool must be "
3031 "exported from %s (hostid=%"PRIx64")\n\tbefore it "
3032 "can be safely imported.\n"), hostname, hostid);
3033 break;
3034 case ZPOOL_STATUS_HOSTID_REQUIRED:
3035 (void) printf(gettext(" action: Set a unique system "
3036 "hostid with the zgenhostid(8) command.\n"));
3037 break;
3038 default:
3039 (void) printf(gettext(" action: The pool cannot be "
3040 "imported due to damaged devices or data.\n"));
3041 }
3042 }
3043
3044 /* Print the comment attached to the pool. */
3045 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3046 (void) printf(gettext("comment: %s\n"), comment);
3047
3048 /*
3049 * If the state is "closed" or "can't open", and the aux state
3050 * is "corrupt data":
3051 */
3052 if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3053 (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3054 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3055 if (pool_state == POOL_STATE_DESTROYED)
3056 (void) printf(gettext("\tThe pool was destroyed, "
3057 "but can be imported using the '-Df' flags.\n"));
3058 else if (pool_state != POOL_STATE_EXPORTED)
3059 (void) printf(gettext("\tThe pool may be active on "
3060 "another system, but can be imported using\n\t"
3061 "the '-f' flag.\n"));
3062 }
3063
3064 if (msgid != NULL) {
3065 (void) printf(gettext(
3066 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3067 msgid);
3068 }
3069
3070 (void) printf(gettext(" config:\n\n"));
3071
3072 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3073 VDEV_NAME_TYPE_ID);
3074 if (cb.cb_namewidth < 10)
3075 cb.cb_namewidth = 10;
3076
3077 print_import_config(&cb, name, nvroot, 0);
3078
3079 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3080 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3081 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3082
3083 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3084 (void) printf(gettext("\n\tAdditional devices are known to "
3085 "be part of this pool, though their\n\texact "
3086 "configuration cannot be determined.\n"));
3087 }
3088 return (0);
3089 }
3090
3091 static boolean_t
3092 zfs_force_import_required(nvlist_t *config)
3093 {
3094 uint64_t state;
3095 uint64_t hostid = 0;
3096 nvlist_t *nvinfo;
3097
3098 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3099 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
3100
3101 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3102 return (B_TRUE);
3103
3104 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3105 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3106 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3107 ZPOOL_CONFIG_MMP_STATE);
3108
3109 if (mmp_state != MMP_STATE_INACTIVE)
3110 return (B_TRUE);
3111 }
3112
3113 return (B_FALSE);
3114 }
3115
3116 /*
3117 * Perform the import for the given configuration. This passes the heavy
3118 * lifting off to zpool_import_props(), and then mounts the datasets contained
3119 * within the pool.
3120 */
3121 static int
3122 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3123 nvlist_t *props, int flags)
3124 {
3125 int ret = 0;
3126 zpool_handle_t *zhp;
3127 const char *name;
3128 uint64_t version;
3129
3130 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3131 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3132
3133 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3134 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3135 "is formatted using an unsupported ZFS version\n"), name);
3136 return (1);
3137 } else if (zfs_force_import_required(config) &&
3138 !(flags & ZFS_IMPORT_ANY_HOST)) {
3139 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3140 nvlist_t *nvinfo;
3141
3142 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3143 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3144 mmp_state = fnvlist_lookup_uint64(nvinfo,
3145 ZPOOL_CONFIG_MMP_STATE);
3146
3147 if (mmp_state == MMP_STATE_ACTIVE) {
3148 const char *hostname = "<unknown>";
3149 uint64_t hostid = 0;
3150
3151 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3152 hostname = fnvlist_lookup_string(nvinfo,
3153 ZPOOL_CONFIG_MMP_HOSTNAME);
3154
3155 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3156 hostid = fnvlist_lookup_uint64(nvinfo,
3157 ZPOOL_CONFIG_MMP_HOSTID);
3158
3159 (void) fprintf(stderr, gettext("cannot import '%s': "
3160 "pool is imported on %s (hostid: "
3161 "0x%"PRIx64")\nExport the pool on the other "
3162 "system, then run 'zpool import'.\n"),
3163 name, hostname, hostid);
3164 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3165 (void) fprintf(stderr, gettext("Cannot import '%s': "
3166 "pool has the multihost property on and the\n"
3167 "system's hostid is not set. Set a unique hostid "
3168 "with the zgenhostid(8) command.\n"), name);
3169 } else {
3170 const char *hostname = "<unknown>";
3171 time_t timestamp = 0;
3172 uint64_t hostid = 0;
3173
3174 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3175 hostname = fnvlist_lookup_string(config,
3176 ZPOOL_CONFIG_HOSTNAME);
3177
3178 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3179 timestamp = fnvlist_lookup_uint64(config,
3180 ZPOOL_CONFIG_TIMESTAMP);
3181
3182 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3183 hostid = fnvlist_lookup_uint64(config,
3184 ZPOOL_CONFIG_HOSTID);
3185
3186 (void) fprintf(stderr, gettext("cannot import '%s': "
3187 "pool was previously in use from another system.\n"
3188 "Last accessed by %s (hostid=%"PRIx64") at %s"
3189 "The pool can be imported, use 'zpool import -f' "
3190 "to import the pool.\n"), name, hostname,
3191 hostid, ctime(&timestamp));
3192 }
3193
3194 return (1);
3195 }
3196
3197 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3198 return (1);
3199
3200 if (newname != NULL)
3201 name = newname;
3202
3203 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3204 return (1);
3205
3206 /*
3207 * Loading keys is best effort. We don't want to return immediately
3208 * if it fails but we do want to give the error to the caller.
3209 */
3210 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3211 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3212 ret = 1;
3213
3214 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3215 !(flags & ZFS_IMPORT_ONLY) &&
3216 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
3217 zpool_close(zhp);
3218 return (1);
3219 }
3220
3221 zpool_close(zhp);
3222 return (ret);
3223 }
3224
3225 static int
3226 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3227 char *orig_name, char *new_name,
3228 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3229 importargs_t *import)
3230 {
3231 nvlist_t *config = NULL;
3232 nvlist_t *found_config = NULL;
3233 uint64_t pool_state;
3234
3235 /*
3236 * At this point we have a list of import candidate configs. Even if
3237 * we were searching by pool name or guid, we still need to
3238 * post-process the list to deal with pool state and possible
3239 * duplicate names.
3240 */
3241 int err = 0;
3242 nvpair_t *elem = NULL;
3243 boolean_t first = B_TRUE;
3244 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3245
3246 verify(nvpair_value_nvlist(elem, &config) == 0);
3247
3248 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3249 &pool_state) == 0);
3250 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3251 continue;
3252 if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3253 continue;
3254
3255 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3256 import->policy) == 0);
3257
3258 if (!pool_specified) {
3259 if (first)
3260 first = B_FALSE;
3261 else if (!do_all)
3262 (void) fputc('\n', stdout);
3263
3264 if (do_all) {
3265 err |= do_import(config, NULL, mntopts,
3266 props, flags);
3267 } else {
3268 /*
3269 * If we're importing from cachefile, then
3270 * we don't want to report errors until we
3271 * are in the scan phase of the import. If
3272 * we get an error, then we return that error
3273 * to invoke the scan phase.
3274 */
3275 if (import->cachefile && !import->scan)
3276 err = show_import(config, B_FALSE);
3277 else
3278 (void) show_import(config, B_TRUE);
3279 }
3280 } else if (import->poolname != NULL) {
3281 char *name;
3282
3283 /*
3284 * We are searching for a pool based on name.
3285 */
3286 verify(nvlist_lookup_string(config,
3287 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3288
3289 if (strcmp(name, import->poolname) == 0) {
3290 if (found_config != NULL) {
3291 (void) fprintf(stderr, gettext(
3292 "cannot import '%s': more than "
3293 "one matching pool\n"),
3294 import->poolname);
3295 (void) fprintf(stderr, gettext(
3296 "import by numeric ID instead\n"));
3297 err = B_TRUE;
3298 }
3299 found_config = config;
3300 }
3301 } else {
3302 uint64_t guid;
3303
3304 /*
3305 * Search for a pool by guid.
3306 */
3307 verify(nvlist_lookup_uint64(config,
3308 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3309
3310 if (guid == import->guid)
3311 found_config = config;
3312 }
3313 }
3314
3315 /*
3316 * If we were searching for a specific pool, verify that we found a
3317 * pool, and then do the import.
3318 */
3319 if (pool_specified && err == 0) {
3320 if (found_config == NULL) {
3321 (void) fprintf(stderr, gettext("cannot import '%s': "
3322 "no such pool available\n"), orig_name);
3323 err = B_TRUE;
3324 } else {
3325 err |= do_import(found_config, new_name,
3326 mntopts, props, flags);
3327 }
3328 }
3329
3330 /*
3331 * If we were just looking for pools, report an error if none were
3332 * found.
3333 */
3334 if (!pool_specified && first)
3335 (void) fprintf(stderr,
3336 gettext("no pools available to import\n"));
3337 return (err);
3338 }
3339
3340 typedef struct target_exists_args {
3341 const char *poolname;
3342 uint64_t poolguid;
3343 } target_exists_args_t;
3344
3345 static int
3346 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3347 {
3348 target_exists_args_t *args = data;
3349 nvlist_t *config = zpool_get_config(zhp, NULL);
3350 int found = 0;
3351
3352 if (config == NULL)
3353 return (0);
3354
3355 if (args->poolname != NULL) {
3356 char *pool_name;
3357
3358 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3359 &pool_name) == 0);
3360 if (strcmp(pool_name, args->poolname) == 0)
3361 found = 1;
3362 } else {
3363 uint64_t pool_guid;
3364
3365 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3366 &pool_guid) == 0);
3367 if (pool_guid == args->poolguid)
3368 found = 1;
3369 }
3370 zpool_close(zhp);
3371
3372 return (found);
3373 }
3374 /*
3375 * zpool checkpoint <pool>
3376 * checkpoint --discard <pool>
3377 *
3378 * -d Discard the checkpoint from a checkpointed
3379 * --discard pool.
3380 *
3381 * -w Wait for discarding a checkpoint to complete.
3382 * --wait
3383 *
3384 * Checkpoints the specified pool, by taking a "snapshot" of its
3385 * current state. A pool can only have one checkpoint at a time.
3386 */
3387 int
3388 zpool_do_checkpoint(int argc, char **argv)
3389 {
3390 boolean_t discard, wait;
3391 char *pool;
3392 zpool_handle_t *zhp;
3393 int c, err;
3394
3395 struct option long_options[] = {
3396 {"discard", no_argument, NULL, 'd'},
3397 {"wait", no_argument, NULL, 'w'},
3398 {0, 0, 0, 0}
3399 };
3400
3401 discard = B_FALSE;
3402 wait = B_FALSE;
3403 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3404 switch (c) {
3405 case 'd':
3406 discard = B_TRUE;
3407 break;
3408 case 'w':
3409 wait = B_TRUE;
3410 break;
3411 case '?':
3412 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3413 optopt);
3414 usage(B_FALSE);
3415 }
3416 }
3417
3418 if (wait && !discard) {
3419 (void) fprintf(stderr, gettext("--wait only valid when "
3420 "--discard also specified\n"));
3421 usage(B_FALSE);
3422 }
3423
3424 argc -= optind;
3425 argv += optind;
3426
3427 if (argc < 1) {
3428 (void) fprintf(stderr, gettext("missing pool argument\n"));
3429 usage(B_FALSE);
3430 }
3431
3432 if (argc > 1) {
3433 (void) fprintf(stderr, gettext("too many arguments\n"));
3434 usage(B_FALSE);
3435 }
3436
3437 pool = argv[0];
3438
3439 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3440 /* As a special case, check for use of '/' in the name */
3441 if (strchr(pool, '/') != NULL)
3442 (void) fprintf(stderr, gettext("'zpool checkpoint' "
3443 "doesn't work on datasets. To save the state "
3444 "of a dataset from a specific point in time "
3445 "please use 'zfs snapshot'\n"));
3446 return (1);
3447 }
3448
3449 if (discard) {
3450 err = (zpool_discard_checkpoint(zhp) != 0);
3451 if (err == 0 && wait)
3452 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3453 } else {
3454 err = (zpool_checkpoint(zhp) != 0);
3455 }
3456
3457 zpool_close(zhp);
3458
3459 return (err);
3460 }
3461
3462 #define CHECKPOINT_OPT 1024
3463
3464 /*
3465 * zpool import [-d dir] [-D]
3466 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3467 * [-d dir | -c cachefile | -s] [-f] -a
3468 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3469 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3470 * [newpool]
3471 *
3472 * -c Read pool information from a cachefile instead of searching
3473 * devices. If importing from a cachefile config fails, then
3474 * fallback to searching for devices only in the directories that
3475 * exist in the cachefile.
3476 *
3477 * -d Scan in a specific directory, other than /dev/. More than
3478 * one directory can be specified using multiple '-d' options.
3479 *
3480 * -D Scan for previously destroyed pools or import all or only
3481 * specified destroyed pools.
3482 *
3483 * -R Temporarily import the pool, with all mountpoints relative to
3484 * the given root. The pool will remain exported when the machine
3485 * is rebooted.
3486 *
3487 * -V Import even in the presence of faulted vdevs. This is an
3488 * intentionally undocumented option for testing purposes, and
3489 * treats the pool configuration as complete, leaving any bad
3490 * vdevs in the FAULTED state. In other words, it does verbatim
3491 * import.
3492 *
3493 * -f Force import, even if it appears that the pool is active.
3494 *
3495 * -F Attempt rewind if necessary.
3496 *
3497 * -n See if rewind would work, but don't actually rewind.
3498 *
3499 * -N Import the pool but don't mount datasets.
3500 *
3501 * -T Specify a starting txg to use for import. This option is
3502 * intentionally undocumented option for testing purposes.
3503 *
3504 * -a Import all pools found.
3505 *
3506 * -l Load encryption keys while importing.
3507 *
3508 * -o Set property=value and/or temporary mount options (without '=').
3509 *
3510 * -s Scan using the default search path, the libblkid cache will
3511 * not be consulted.
3512 *
3513 * --rewind-to-checkpoint
3514 * Import the pool and revert back to the checkpoint.
3515 *
3516 * The import command scans for pools to import, and import pools based on pool
3517 * name and GUID. The pool can also be renamed as part of the import process.
3518 */
3519 int
3520 zpool_do_import(int argc, char **argv)
3521 {
3522 char **searchdirs = NULL;
3523 char *env, *envdup = NULL;
3524 int nsearch = 0;
3525 int c;
3526 int err = 0;
3527 nvlist_t *pools = NULL;
3528 boolean_t do_all = B_FALSE;
3529 boolean_t do_destroyed = B_FALSE;
3530 char *mntopts = NULL;
3531 uint64_t searchguid = 0;
3532 char *searchname = NULL;
3533 char *propval;
3534 nvlist_t *policy = NULL;
3535 nvlist_t *props = NULL;
3536 int flags = ZFS_IMPORT_NORMAL;
3537 uint32_t rewind_policy = ZPOOL_NO_REWIND;
3538 boolean_t dryrun = B_FALSE;
3539 boolean_t do_rewind = B_FALSE;
3540 boolean_t xtreme_rewind = B_FALSE;
3541 boolean_t do_scan = B_FALSE;
3542 boolean_t pool_exists = B_FALSE;
3543 boolean_t pool_specified = B_FALSE;
3544 uint64_t txg = -1ULL;
3545 char *cachefile = NULL;
3546 importargs_t idata = { 0 };
3547 char *endptr;
3548
3549 struct option long_options[] = {
3550 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3551 {0, 0, 0, 0}
3552 };
3553
3554 /* check options */
3555 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3556 long_options, NULL)) != -1) {
3557 switch (c) {
3558 case 'a':
3559 do_all = B_TRUE;
3560 break;
3561 case 'c':
3562 cachefile = optarg;
3563 break;
3564 case 'd':
3565 searchdirs = safe_realloc(searchdirs,
3566 (nsearch + 1) * sizeof (char *));
3567 searchdirs[nsearch++] = optarg;
3568 break;
3569 case 'D':
3570 do_destroyed = B_TRUE;
3571 break;
3572 case 'f':
3573 flags |= ZFS_IMPORT_ANY_HOST;
3574 break;
3575 case 'F':
3576 do_rewind = B_TRUE;
3577 break;
3578 case 'l':
3579 flags |= ZFS_IMPORT_LOAD_KEYS;
3580 break;
3581 case 'm':
3582 flags |= ZFS_IMPORT_MISSING_LOG;
3583 break;
3584 case 'n':
3585 dryrun = B_TRUE;
3586 break;
3587 case 'N':
3588 flags |= ZFS_IMPORT_ONLY;
3589 break;
3590 case 'o':
3591 if ((propval = strchr(optarg, '=')) != NULL) {
3592 *propval = '\0';
3593 propval++;
3594 if (add_prop_list(optarg, propval,
3595 &props, B_TRUE))
3596 goto error;
3597 } else {
3598 mntopts = optarg;
3599 }
3600 break;
3601 case 'R':
3602 if (add_prop_list(zpool_prop_to_name(
3603 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3604 goto error;
3605 if (add_prop_list_default(zpool_prop_to_name(
3606 ZPOOL_PROP_CACHEFILE), "none", &props))
3607 goto error;
3608 break;
3609 case 's':
3610 do_scan = B_TRUE;
3611 break;
3612 case 't':
3613 flags |= ZFS_IMPORT_TEMP_NAME;
3614 if (add_prop_list_default(zpool_prop_to_name(
3615 ZPOOL_PROP_CACHEFILE), "none", &props))
3616 goto error;
3617 break;
3618
3619 case 'T':
3620 errno = 0;
3621 txg = strtoull(optarg, &endptr, 0);
3622 if (errno != 0 || *endptr != '\0') {
3623 (void) fprintf(stderr,
3624 gettext("invalid txg value\n"));
3625 usage(B_FALSE);
3626 }
3627 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3628 break;
3629 case 'V':
3630 flags |= ZFS_IMPORT_VERBATIM;
3631 break;
3632 case 'X':
3633 xtreme_rewind = B_TRUE;
3634 break;
3635 case CHECKPOINT_OPT:
3636 flags |= ZFS_IMPORT_CHECKPOINT;
3637 break;
3638 case ':':
3639 (void) fprintf(stderr, gettext("missing argument for "
3640 "'%c' option\n"), optopt);
3641 usage(B_FALSE);
3642 break;
3643 case '?':
3644 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3645 optopt);
3646 usage(B_FALSE);
3647 }
3648 }
3649
3650 argc -= optind;
3651 argv += optind;
3652
3653 if (cachefile && nsearch != 0) {
3654 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3655 usage(B_FALSE);
3656 }
3657
3658 if (cachefile && do_scan) {
3659 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3660 usage(B_FALSE);
3661 }
3662
3663 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3664 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3665 usage(B_FALSE);
3666 }
3667
3668 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3669 (void) fprintf(stderr, gettext("-l is only meaningful during "
3670 "an import\n"));
3671 usage(B_FALSE);
3672 }
3673
3674 if ((dryrun || xtreme_rewind) && !do_rewind) {
3675 (void) fprintf(stderr,
3676 gettext("-n or -X only meaningful with -F\n"));
3677 usage(B_FALSE);
3678 }
3679 if (dryrun)
3680 rewind_policy = ZPOOL_TRY_REWIND;
3681 else if (do_rewind)
3682 rewind_policy = ZPOOL_DO_REWIND;
3683 if (xtreme_rewind)
3684 rewind_policy |= ZPOOL_EXTREME_REWIND;
3685
3686 /* In the future, we can capture further policy and include it here */
3687 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3688 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3689 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3690 rewind_policy) != 0)
3691 goto error;
3692
3693 /* check argument count */
3694 if (do_all) {
3695 if (argc != 0) {
3696 (void) fprintf(stderr, gettext("too many arguments\n"));
3697 usage(B_FALSE);
3698 }
3699 } else {
3700 if (argc > 2) {
3701 (void) fprintf(stderr, gettext("too many arguments\n"));
3702 usage(B_FALSE);
3703 }
3704 }
3705
3706 /*
3707 * Check for the effective uid. We do this explicitly here because
3708 * otherwise any attempt to discover pools will silently fail.
3709 */
3710 if (argc == 0 && geteuid() != 0) {
3711 (void) fprintf(stderr, gettext("cannot "
3712 "discover pools: permission denied\n"));
3713
3714 free(searchdirs);
3715 nvlist_free(props);
3716 nvlist_free(policy);
3717 return (1);
3718 }
3719
3720 /*
3721 * Depending on the arguments given, we do one of the following:
3722 *
3723 * <none> Iterate through all pools and display information about
3724 * each one.
3725 *
3726 * -a Iterate through all pools and try to import each one.
3727 *
3728 * <id> Find the pool that corresponds to the given GUID/pool
3729 * name and import that one.
3730 *
3731 * -D Above options applies only to destroyed pools.
3732 */
3733 if (argc != 0) {
3734 char *endptr;
3735
3736 errno = 0;
3737 searchguid = strtoull(argv[0], &endptr, 10);
3738 if (errno != 0 || *endptr != '\0') {
3739 searchname = argv[0];
3740 searchguid = 0;
3741 }
3742 pool_specified = B_TRUE;
3743
3744 /*
3745 * User specified a name or guid. Ensure it's unique.
3746 */
3747 target_exists_args_t search = {searchname, searchguid};
3748 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3749 }
3750
3751 /*
3752 * Check the environment for the preferred search path.
3753 */
3754 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3755 char *dir, *tmp = NULL;
3756
3757 envdup = strdup(env);
3758
3759 for (dir = strtok_r(envdup, ":", &tmp);
3760 dir != NULL;
3761 dir = strtok_r(NULL, ":", &tmp)) {
3762 searchdirs = safe_realloc(searchdirs,
3763 (nsearch + 1) * sizeof (char *));
3764 searchdirs[nsearch++] = dir;
3765 }
3766 }
3767
3768 idata.path = searchdirs;
3769 idata.paths = nsearch;
3770 idata.poolname = searchname;
3771 idata.guid = searchguid;
3772 idata.cachefile = cachefile;
3773 idata.scan = do_scan;
3774 idata.policy = policy;
3775
3776 libpc_handle_t lpch = {
3777 .lpc_lib_handle = g_zfs,
3778 .lpc_ops = &libzfs_config_ops,
3779 .lpc_printerr = B_TRUE
3780 };
3781 pools = zpool_search_import(&lpch, &idata);
3782
3783 if (pools != NULL && pool_exists &&
3784 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
3785 (void) fprintf(stderr, gettext("cannot import '%s': "
3786 "a pool with that name already exists\n"),
3787 argv[0]);
3788 (void) fprintf(stderr, gettext("use the form '%s "
3789 "<pool | id> <newpool>' to give it a new name\n"),
3790 "zpool import");
3791 err = 1;
3792 } else if (pools == NULL && pool_exists) {
3793 (void) fprintf(stderr, gettext("cannot import '%s': "
3794 "a pool with that name is already created/imported,\n"),
3795 argv[0]);
3796 (void) fprintf(stderr, gettext("and no additional pools "
3797 "with that name were found\n"));
3798 err = 1;
3799 } else if (pools == NULL) {
3800 if (argc != 0) {
3801 (void) fprintf(stderr, gettext("cannot import '%s': "
3802 "no such pool available\n"), argv[0]);
3803 }
3804 err = 1;
3805 }
3806
3807 if (err == 1) {
3808 free(searchdirs);
3809 free(envdup);
3810 nvlist_free(policy);
3811 nvlist_free(pools);
3812 nvlist_free(props);
3813 return (1);
3814 }
3815
3816 err = import_pools(pools, props, mntopts, flags,
3817 argc >= 1 ? argv[0] : NULL,
3818 argc >= 2 ? argv[1] : NULL,
3819 do_destroyed, pool_specified, do_all, &idata);
3820
3821 /*
3822 * If we're using the cachefile and we failed to import, then
3823 * fallback to scanning the directory for pools that match
3824 * those in the cachefile.
3825 */
3826 if (err != 0 && cachefile != NULL) {
3827 (void) printf(gettext("cachefile import failed, retrying\n"));
3828
3829 /*
3830 * We use the scan flag to gather the directories that exist
3831 * in the cachefile. If we need to fallback to searching for
3832 * the pool config, we will only search devices in these
3833 * directories.
3834 */
3835 idata.scan = B_TRUE;
3836 nvlist_free(pools);
3837 pools = zpool_search_import(&lpch, &idata);
3838
3839 err = import_pools(pools, props, mntopts, flags,
3840 argc >= 1 ? argv[0] : NULL,
3841 argc >= 2 ? argv[1] : NULL,
3842 do_destroyed, pool_specified, do_all, &idata);
3843 }
3844
3845 error:
3846 nvlist_free(props);
3847 nvlist_free(pools);
3848 nvlist_free(policy);
3849 free(searchdirs);
3850 free(envdup);
3851
3852 return (err ? 1 : 0);
3853 }
3854
3855 /*
3856 * zpool sync [-f] [pool] ...
3857 *
3858 * -f (undocumented) force uberblock (and config including zpool cache file)
3859 * update.
3860 *
3861 * Sync the specified pool(s).
3862 * Without arguments "zpool sync" will sync all pools.
3863 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
3864 *
3865 */
3866 static int
3867 zpool_do_sync(int argc, char **argv)
3868 {
3869 int ret;
3870 boolean_t force = B_FALSE;
3871
3872 /* check options */
3873 while ((ret = getopt(argc, argv, "f")) != -1) {
3874 switch (ret) {
3875 case 'f':
3876 force = B_TRUE;
3877 break;
3878 case '?':
3879 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3880 optopt);
3881 usage(B_FALSE);
3882 }
3883 }
3884
3885 argc -= optind;
3886 argv += optind;
3887
3888 /* if argc == 0 we will execute zpool_sync_one on all pools */
3889 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
3890 B_FALSE, zpool_sync_one, &force);
3891
3892 return (ret);
3893 }
3894
3895 typedef struct iostat_cbdata {
3896 uint64_t cb_flags;
3897 int cb_namewidth;
3898 int cb_iteration;
3899 boolean_t cb_verbose;
3900 boolean_t cb_literal;
3901 boolean_t cb_scripted;
3902 zpool_list_t *cb_list;
3903 vdev_cmd_data_list_t *vcdl;
3904 vdev_cbdata_t cb_vdevs;
3905 } iostat_cbdata_t;
3906
3907 /* iostat labels */
3908 typedef struct name_and_columns {
3909 const char *name; /* Column name */
3910 unsigned int columns; /* Center name to this number of columns */
3911 } name_and_columns_t;
3912
3913 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
3914
3915 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
3916 {
3917 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
3918 {NULL}},
3919 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3920 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
3921 {NULL}},
3922 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
3923 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
3924 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
3925 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3926 {"asyncq_wait", 2}, {NULL}},
3927 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
3928 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
3929 {"trim", 2}, {"rebuild", 2}, {NULL}},
3930 };
3931
3932 /* Shorthand - if "columns" field not set, default to 1 column */
3933 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
3934 {
3935 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
3936 {"write"}, {NULL}},
3937 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3938 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
3939 {NULL}},
3940 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
3941 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
3942 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
3943 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3944 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
3945 {NULL}},
3946 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3947 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3948 {"ind"}, {"agg"}, {NULL}},
3949 };
3950
3951 static const char *histo_to_title[] = {
3952 [IOS_L_HISTO] = "latency",
3953 [IOS_RQ_HISTO] = "req_size",
3954 };
3955
3956 /*
3957 * Return the number of labels in a null-terminated name_and_columns_t
3958 * array.
3959 *
3960 */
3961 static unsigned int
3962 label_array_len(const name_and_columns_t *labels)
3963 {
3964 int i = 0;
3965
3966 while (labels[i].name)
3967 i++;
3968
3969 return (i);
3970 }
3971
3972 /*
3973 * Return the number of strings in a null-terminated string array.
3974 * For example:
3975 *
3976 * const char foo[] = {"bar", "baz", NULL}
3977 *
3978 * returns 2
3979 */
3980 static uint64_t
3981 str_array_len(const char *array[])
3982 {
3983 uint64_t i = 0;
3984 while (array[i])
3985 i++;
3986
3987 return (i);
3988 }
3989
3990
3991 /*
3992 * Return a default column width for default/latency/queue columns. This does
3993 * not include histograms, which have their columns autosized.
3994 */
3995 static unsigned int
3996 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
3997 {
3998 unsigned long column_width = 5; /* Normal niceprint */
3999 static unsigned long widths[] = {
4000 /*
4001 * Choose some sane default column sizes for printing the
4002 * raw numbers.
4003 */
4004 [IOS_DEFAULT] = 15, /* 1PB capacity */
4005 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4006 [IOS_QUEUES] = 6, /* 1M queue entries */
4007 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4008 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4009 };
4010
4011 if (cb->cb_literal)
4012 column_width = widths[type];
4013
4014 return (column_width);
4015 }
4016
4017 /*
4018 * Print the column labels, i.e:
4019 *
4020 * capacity operations bandwidth
4021 * alloc free read write read write ...
4022 *
4023 * If force_column_width is set, use it for the column width. If not set, use
4024 * the default column width.
4025 */
4026 static void
4027 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4028 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4029 {
4030 int i, idx, s;
4031 int text_start, rw_column_width, spaces_to_end;
4032 uint64_t flags = cb->cb_flags;
4033 uint64_t f;
4034 unsigned int column_width = force_column_width;
4035
4036 /* For each bit set in flags */
4037 for (f = flags; f; f &= ~(1ULL << idx)) {
4038 idx = lowbit64(f) - 1;
4039 if (!force_column_width)
4040 column_width = default_column_width(cb, idx);
4041 /* Print our top labels centered over "read write" label. */
4042 for (i = 0; i < label_array_len(labels[idx]); i++) {
4043 const char *name = labels[idx][i].name;
4044 /*
4045 * We treat labels[][].columns == 0 as shorthand
4046 * for one column. It makes writing out the label
4047 * tables more concise.
4048 */
4049 unsigned int columns = MAX(1, labels[idx][i].columns);
4050 unsigned int slen = strlen(name);
4051
4052 rw_column_width = (column_width * columns) +
4053 (2 * (columns - 1));
4054
4055 text_start = (int)((rw_column_width) / columns -
4056 slen / columns);
4057 if (text_start < 0)
4058 text_start = 0;
4059
4060 printf(" "); /* Two spaces between columns */
4061
4062 /* Space from beginning of column to label */
4063 for (s = 0; s < text_start; s++)
4064 printf(" ");
4065
4066 printf("%s", name);
4067
4068 /* Print space after label to end of column */
4069 spaces_to_end = rw_column_width - text_start - slen;
4070 if (spaces_to_end < 0)
4071 spaces_to_end = 0;
4072
4073 for (s = 0; s < spaces_to_end; s++)
4074 printf(" ");
4075 }
4076 }
4077 }
4078
4079
4080 /*
4081 * print_cmd_columns - Print custom column titles from -c
4082 *
4083 * If the user specified the "zpool status|iostat -c" then print their custom
4084 * column titles in the header. For example, print_cmd_columns() would print
4085 * the " col1 col2" part of this:
4086 *
4087 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4088 * ...
4089 * capacity operations bandwidth
4090 * pool alloc free read write read write col1 col2
4091 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4092 * mypool 269K 1008M 0 0 107 946
4093 * mirror 269K 1008M 0 0 107 946
4094 * sdb - - 0 0 102 473 val1 val2
4095 * sdc - - 0 0 5 473 val1 val2
4096 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4097 */
4098 static void
4099 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4100 {
4101 int i, j;
4102 vdev_cmd_data_t *data = &vcdl->data[0];
4103
4104 if (vcdl->count == 0 || data == NULL)
4105 return;
4106
4107 /*
4108 * Each vdev cmd should have the same column names unless the user did
4109 * something weird with their cmd. Just take the column names from the
4110 * first vdev and assume it works for all of them.
4111 */
4112 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4113 printf(" ");
4114 if (use_dashes) {
4115 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4116 printf("-");
4117 } else {
4118 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4119 vcdl->uniq_cols[i]);
4120 }
4121 }
4122 }
4123
4124
4125 /*
4126 * Utility function to print out a line of dashes like:
4127 *
4128 * -------------------------------- ----- ----- ----- ----- -----
4129 *
4130 * ...or a dashed named-row line like:
4131 *
4132 * logs - - - - -
4133 *
4134 * @cb: iostat data
4135 *
4136 * @force_column_width If non-zero, use the value as the column width.
4137 * Otherwise use the default column widths.
4138 *
4139 * @name: Print a dashed named-row line starting
4140 * with @name. Otherwise, print a regular
4141 * dashed line.
4142 */
4143 static void
4144 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4145 const char *name)
4146 {
4147 int i;
4148 unsigned int namewidth;
4149 uint64_t flags = cb->cb_flags;
4150 uint64_t f;
4151 int idx;
4152 const name_and_columns_t *labels;
4153 const char *title;
4154
4155
4156 if (cb->cb_flags & IOS_ANYHISTO_M) {
4157 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4158 } else if (cb->cb_vdevs.cb_names_count) {
4159 title = "vdev";
4160 } else {
4161 title = "pool";
4162 }
4163
4164 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4165 name ? strlen(name) : 0);
4166
4167
4168 if (name) {
4169 printf("%-*s", namewidth, name);
4170 } else {
4171 for (i = 0; i < namewidth; i++)
4172 (void) printf("-");
4173 }
4174
4175 /* For each bit in flags */
4176 for (f = flags; f; f &= ~(1ULL << idx)) {
4177 unsigned int column_width;
4178 idx = lowbit64(f) - 1;
4179 if (force_column_width)
4180 column_width = force_column_width;
4181 else
4182 column_width = default_column_width(cb, idx);
4183
4184 labels = iostat_bottom_labels[idx];
4185 for (i = 0; i < label_array_len(labels); i++) {
4186 if (name)
4187 printf(" %*s-", column_width - 1, " ");
4188 else
4189 printf(" %.*s", column_width,
4190 "--------------------");
4191 }
4192 }
4193 }
4194
4195
4196 static void
4197 print_iostat_separator_impl(iostat_cbdata_t *cb,
4198 unsigned int force_column_width)
4199 {
4200 print_iostat_dashes(cb, force_column_width, NULL);
4201 }
4202
4203 static void
4204 print_iostat_separator(iostat_cbdata_t *cb)
4205 {
4206 print_iostat_separator_impl(cb, 0);
4207 }
4208
4209 static void
4210 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4211 const char *histo_vdev_name)
4212 {
4213 unsigned int namewidth;
4214 const char *title;
4215
4216 if (cb->cb_flags & IOS_ANYHISTO_M) {
4217 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4218 } else if (cb->cb_vdevs.cb_names_count) {
4219 title = "vdev";
4220 } else {
4221 title = "pool";
4222 }
4223
4224 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4225 histo_vdev_name ? strlen(histo_vdev_name) : 0);
4226
4227 if (histo_vdev_name)
4228 printf("%-*s", namewidth, histo_vdev_name);
4229 else
4230 printf("%*s", namewidth, "");
4231
4232
4233 print_iostat_labels(cb, force_column_width, iostat_top_labels);
4234 printf("\n");
4235
4236 printf("%-*s", namewidth, title);
4237
4238 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4239 if (cb->vcdl != NULL)
4240 print_cmd_columns(cb->vcdl, 0);
4241
4242 printf("\n");
4243
4244 print_iostat_separator_impl(cb, force_column_width);
4245
4246 if (cb->vcdl != NULL)
4247 print_cmd_columns(cb->vcdl, 1);
4248
4249 printf("\n");
4250 }
4251
4252 static void
4253 print_iostat_header(iostat_cbdata_t *cb)
4254 {
4255 print_iostat_header_impl(cb, 0, NULL);
4256 }
4257
4258
4259 /*
4260 * Display a single statistic.
4261 */
4262 static void
4263 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4264 unsigned int column_size, boolean_t scripted)
4265 {
4266 char buf[64];
4267
4268 zfs_nicenum_format(value, buf, sizeof (buf), format);
4269
4270 if (scripted)
4271 printf("\t%s", buf);
4272 else
4273 printf(" %*s", column_size, buf);
4274 }
4275
4276 /*
4277 * Calculate the default vdev stats
4278 *
4279 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4280 * stats into calcvs.
4281 */
4282 static void
4283 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4284 vdev_stat_t *calcvs)
4285 {
4286 int i;
4287
4288 memcpy(calcvs, newvs, sizeof (*calcvs));
4289 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4290 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4291
4292 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4293 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4294 }
4295
4296 /*
4297 * Internal representation of the extended iostats data.
4298 *
4299 * The extended iostat stats are exported in nvlists as either uint64_t arrays
4300 * or single uint64_t's. We make both look like arrays to make them easier
4301 * to process. In order to make single uint64_t's look like arrays, we set
4302 * __data to the stat data, and then set *data = &__data with count = 1. Then,
4303 * we can just use *data and count.
4304 */
4305 struct stat_array {
4306 uint64_t *data;
4307 uint_t count; /* Number of entries in data[] */
4308 uint64_t __data; /* Only used when data is a single uint64_t */
4309 };
4310
4311 static uint64_t
4312 stat_histo_max(struct stat_array *nva, unsigned int len)
4313 {
4314 uint64_t max = 0;
4315 int i;
4316 for (i = 0; i < len; i++)
4317 max = MAX(max, array64_max(nva[i].data, nva[i].count));
4318
4319 return (max);
4320 }
4321
4322 /*
4323 * Helper function to lookup a uint64_t array or uint64_t value and store its
4324 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
4325 * it look like a one element array to make it easier to process.
4326 */
4327 static int
4328 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4329 struct stat_array *nva)
4330 {
4331 nvpair_t *tmp;
4332 int ret;
4333
4334 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4335 switch (nvpair_type(tmp)) {
4336 case DATA_TYPE_UINT64_ARRAY:
4337 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4338 break;
4339 case DATA_TYPE_UINT64:
4340 ret = nvpair_value_uint64(tmp, &nva->__data);
4341 nva->data = &nva->__data;
4342 nva->count = 1;
4343 break;
4344 default:
4345 /* Not a uint64_t */
4346 ret = EINVAL;
4347 break;
4348 }
4349
4350 return (ret);
4351 }
4352
4353 /*
4354 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4355 * subtract them, and return the results in a newly allocated stat_array.
4356 * You must free the returned array after you are done with it with
4357 * free_calc_stats().
4358 *
4359 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4360 * values.
4361 */
4362 static struct stat_array *
4363 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4364 nvlist_t *newnv)
4365 {
4366 nvlist_t *oldnvx = NULL, *newnvx;
4367 struct stat_array *oldnva, *newnva, *calcnva;
4368 int i, j;
4369 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4370
4371 /* Extract our extended stats nvlist from the main list */
4372 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4373 &newnvx) == 0);
4374 if (oldnv) {
4375 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4376 &oldnvx) == 0);
4377 }
4378
4379 newnva = safe_malloc(alloc_size);
4380 oldnva = safe_malloc(alloc_size);
4381 calcnva = safe_malloc(alloc_size);
4382
4383 for (j = 0; j < len; j++) {
4384 verify(nvpair64_to_stat_array(newnvx, names[j],
4385 &newnva[j]) == 0);
4386 calcnva[j].count = newnva[j].count;
4387 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4388 calcnva[j].data = safe_malloc(alloc_size);
4389 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4390
4391 if (oldnvx) {
4392 verify(nvpair64_to_stat_array(oldnvx, names[j],
4393 &oldnva[j]) == 0);
4394 for (i = 0; i < oldnva[j].count; i++)
4395 calcnva[j].data[i] -= oldnva[j].data[i];
4396 }
4397 }
4398 free(newnva);
4399 free(oldnva);
4400 return (calcnva);
4401 }
4402
4403 static void
4404 free_calc_stats(struct stat_array *nva, unsigned int len)
4405 {
4406 int i;
4407 for (i = 0; i < len; i++)
4408 free(nva[i].data);
4409
4410 free(nva);
4411 }
4412
4413 static void
4414 print_iostat_histo(struct stat_array *nva, unsigned int len,
4415 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4416 double scale)
4417 {
4418 int i, j;
4419 char buf[6];
4420 uint64_t val;
4421 enum zfs_nicenum_format format;
4422 unsigned int buckets;
4423 unsigned int start_bucket;
4424
4425 if (cb->cb_literal)
4426 format = ZFS_NICENUM_RAW;
4427 else
4428 format = ZFS_NICENUM_1024;
4429
4430 /* All these histos are the same size, so just use nva[0].count */
4431 buckets = nva[0].count;
4432
4433 if (cb->cb_flags & IOS_RQ_HISTO_M) {
4434 /* Start at 512 - req size should never be lower than this */
4435 start_bucket = 9;
4436 } else {
4437 start_bucket = 0;
4438 }
4439
4440 for (j = start_bucket; j < buckets; j++) {
4441 /* Print histogram bucket label */
4442 if (cb->cb_flags & IOS_L_HISTO_M) {
4443 /* Ending range of this bucket */
4444 val = (1UL << (j + 1)) - 1;
4445 zfs_nicetime(val, buf, sizeof (buf));
4446 } else {
4447 /* Request size (starting range of bucket) */
4448 val = (1UL << j);
4449 zfs_nicenum(val, buf, sizeof (buf));
4450 }
4451
4452 if (cb->cb_scripted)
4453 printf("%llu", (u_longlong_t)val);
4454 else
4455 printf("%-*s", namewidth, buf);
4456
4457 /* Print the values on the line */
4458 for (i = 0; i < len; i++) {
4459 print_one_stat(nva[i].data[j] * scale, format,
4460 column_width, cb->cb_scripted);
4461 }
4462 printf("\n");
4463 }
4464 }
4465
4466 static void
4467 print_solid_separator(unsigned int length)
4468 {
4469 while (length--)
4470 printf("-");
4471 printf("\n");
4472 }
4473
4474 static void
4475 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4476 nvlist_t *newnv, double scale, const char *name)
4477 {
4478 unsigned int column_width;
4479 unsigned int namewidth;
4480 unsigned int entire_width;
4481 enum iostat_type type;
4482 struct stat_array *nva;
4483 const char **names;
4484 unsigned int names_len;
4485
4486 /* What type of histo are we? */
4487 type = IOS_HISTO_IDX(cb->cb_flags);
4488
4489 /* Get NULL-terminated array of nvlist names for our histo */
4490 names = vsx_type_to_nvlist[type];
4491 names_len = str_array_len(names); /* num of names */
4492
4493 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4494
4495 if (cb->cb_literal) {
4496 column_width = MAX(5,
4497 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4498 } else {
4499 column_width = 5;
4500 }
4501
4502 namewidth = MAX(cb->cb_namewidth,
4503 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4504
4505 /*
4506 * Calculate the entire line width of what we're printing. The
4507 * +2 is for the two spaces between columns:
4508 */
4509 /* read write */
4510 /* ----- ----- */
4511 /* |___| <---------- column_width */
4512 /* */
4513 /* |__________| <--- entire_width */
4514 /* */
4515 entire_width = namewidth + (column_width + 2) *
4516 label_array_len(iostat_bottom_labels[type]);
4517
4518 if (cb->cb_scripted)
4519 printf("%s\n", name);
4520 else
4521 print_iostat_header_impl(cb, column_width, name);
4522
4523 print_iostat_histo(nva, names_len, cb, column_width,
4524 namewidth, scale);
4525
4526 free_calc_stats(nva, names_len);
4527 if (!cb->cb_scripted)
4528 print_solid_separator(entire_width);
4529 }
4530
4531 /*
4532 * Calculate the average latency of a power-of-two latency histogram
4533 */
4534 static uint64_t
4535 single_histo_average(uint64_t *histo, unsigned int buckets)
4536 {
4537 int i;
4538 uint64_t count = 0, total = 0;
4539
4540 for (i = 0; i < buckets; i++) {
4541 /*
4542 * Our buckets are power-of-two latency ranges. Use the
4543 * midpoint latency of each bucket to calculate the average.
4544 * For example:
4545 *
4546 * Bucket Midpoint
4547 * 8ns-15ns: 12ns
4548 * 16ns-31ns: 24ns
4549 * ...
4550 */
4551 if (histo[i] != 0) {
4552 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4553 count += histo[i];
4554 }
4555 }
4556
4557 /* Prevent divide by zero */
4558 return (count == 0 ? 0 : total / count);
4559 }
4560
4561 static void
4562 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4563 {
4564 const char *names[] = {
4565 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4566 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4567 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4568 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4569 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4570 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4571 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4572 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4573 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4574 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4575 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4576 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4577 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4578 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4579 };
4580
4581 struct stat_array *nva;
4582
4583 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4584 enum zfs_nicenum_format format;
4585
4586 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4587
4588 if (cb->cb_literal)
4589 format = ZFS_NICENUM_RAW;
4590 else
4591 format = ZFS_NICENUM_1024;
4592
4593 for (int i = 0; i < ARRAY_SIZE(names); i++) {
4594 uint64_t val = nva[i].data[0];
4595 print_one_stat(val, format, column_width, cb->cb_scripted);
4596 }
4597
4598 free_calc_stats(nva, ARRAY_SIZE(names));
4599 }
4600
4601 static void
4602 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4603 nvlist_t *newnv)
4604 {
4605 int i;
4606 uint64_t val;
4607 const char *names[] = {
4608 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4609 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4610 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4611 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4612 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4613 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4614 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4615 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4616 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4617 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4618 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4619 };
4620 struct stat_array *nva;
4621
4622 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4623 enum zfs_nicenum_format format;
4624
4625 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4626
4627 if (cb->cb_literal)
4628 format = ZFS_NICENUM_RAWTIME;
4629 else
4630 format = ZFS_NICENUM_TIME;
4631
4632 /* Print our avg latencies on the line */
4633 for (i = 0; i < ARRAY_SIZE(names); i++) {
4634 /* Compute average latency for a latency histo */
4635 val = single_histo_average(nva[i].data, nva[i].count);
4636 print_one_stat(val, format, column_width, cb->cb_scripted);
4637 }
4638 free_calc_stats(nva, ARRAY_SIZE(names));
4639 }
4640
4641 /*
4642 * Print default statistics (capacity/operations/bandwidth)
4643 */
4644 static void
4645 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4646 {
4647 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4648 enum zfs_nicenum_format format;
4649 char na; /* char to print for "not applicable" values */
4650
4651 if (cb->cb_literal) {
4652 format = ZFS_NICENUM_RAW;
4653 na = '0';
4654 } else {
4655 format = ZFS_NICENUM_1024;
4656 na = '-';
4657 }
4658
4659 /* only toplevel vdevs have capacity stats */
4660 if (vs->vs_space == 0) {
4661 if (cb->cb_scripted)
4662 printf("\t%c\t%c", na, na);
4663 else
4664 printf(" %*c %*c", column_width, na, column_width,
4665 na);
4666 } else {
4667 print_one_stat(vs->vs_alloc, format, column_width,
4668 cb->cb_scripted);
4669 print_one_stat(vs->vs_space - vs->vs_alloc, format,
4670 column_width, cb->cb_scripted);
4671 }
4672
4673 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4674 format, column_width, cb->cb_scripted);
4675 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4676 format, column_width, cb->cb_scripted);
4677 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4678 format, column_width, cb->cb_scripted);
4679 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4680 format, column_width, cb->cb_scripted);
4681 }
4682
4683 static const char *const class_name[] = {
4684 VDEV_ALLOC_BIAS_DEDUP,
4685 VDEV_ALLOC_BIAS_SPECIAL,
4686 VDEV_ALLOC_CLASS_LOGS
4687 };
4688
4689 /*
4690 * Print out all the statistics for the given vdev. This can either be the
4691 * toplevel configuration, or called recursively. If 'name' is NULL, then this
4692 * is a verbose output, and we don't want to display the toplevel pool stats.
4693 *
4694 * Returns the number of stat lines printed.
4695 */
4696 static unsigned int
4697 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4698 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4699 {
4700 nvlist_t **oldchild, **newchild;
4701 uint_t c, children, oldchildren;
4702 vdev_stat_t *oldvs, *newvs, *calcvs;
4703 vdev_stat_t zerovs = { 0 };
4704 char *vname;
4705 int i;
4706 int ret = 0;
4707 uint64_t tdelta;
4708 double scale;
4709
4710 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4711 return (ret);
4712
4713 calcvs = safe_malloc(sizeof (*calcvs));
4714
4715 if (oldnv != NULL) {
4716 verify(nvlist_lookup_uint64_array(oldnv,
4717 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4718 } else {
4719 oldvs = &zerovs;
4720 }
4721
4722 /* Do we only want to see a specific vdev? */
4723 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4724 /* Yes we do. Is this the vdev? */
4725 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4726 /*
4727 * This is our vdev. Since it is the only vdev we
4728 * will be displaying, make depth = 0 so that it
4729 * doesn't get indented.
4730 */
4731 depth = 0;
4732 break;
4733 }
4734 }
4735
4736 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
4737 /* Couldn't match the name */
4738 goto children;
4739 }
4740
4741
4742 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
4743 (uint64_t **)&newvs, &c) == 0);
4744
4745 /*
4746 * Print the vdev name unless it's is a histogram. Histograms
4747 * display the vdev name in the header itself.
4748 */
4749 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
4750 if (cb->cb_scripted) {
4751 printf("%s", name);
4752 } else {
4753 if (strlen(name) + depth > cb->cb_namewidth)
4754 (void) printf("%*s%s", depth, "", name);
4755 else
4756 (void) printf("%*s%s%*s", depth, "", name,
4757 (int)(cb->cb_namewidth - strlen(name) -
4758 depth), "");
4759 }
4760 }
4761
4762 /* Calculate our scaling factor */
4763 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
4764 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
4765 /*
4766 * If we specify printing histograms with no time interval, then
4767 * print the histogram numbers over the entire lifetime of the
4768 * vdev.
4769 */
4770 scale = 1;
4771 } else {
4772 if (tdelta == 0)
4773 scale = 1.0;
4774 else
4775 scale = (double)NANOSEC / tdelta;
4776 }
4777
4778 if (cb->cb_flags & IOS_DEFAULT_M) {
4779 calc_default_iostats(oldvs, newvs, calcvs);
4780 print_iostat_default(calcvs, cb, scale);
4781 }
4782 if (cb->cb_flags & IOS_LATENCY_M)
4783 print_iostat_latency(cb, oldnv, newnv);
4784 if (cb->cb_flags & IOS_QUEUES_M)
4785 print_iostat_queues(cb, newnv);
4786 if (cb->cb_flags & IOS_ANYHISTO_M) {
4787 printf("\n");
4788 print_iostat_histos(cb, oldnv, newnv, scale, name);
4789 }
4790
4791 if (cb->vcdl != NULL) {
4792 char *path;
4793 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
4794 &path) == 0) {
4795 printf(" ");
4796 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
4797 }
4798 }
4799
4800 if (!(cb->cb_flags & IOS_ANYHISTO_M))
4801 printf("\n");
4802
4803 ret++;
4804
4805 children:
4806
4807 free(calcvs);
4808
4809 if (!cb->cb_verbose)
4810 return (ret);
4811
4812 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
4813 &newchild, &children) != 0)
4814 return (ret);
4815
4816 if (oldnv) {
4817 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
4818 &oldchild, &oldchildren) != 0)
4819 return (ret);
4820
4821 children = MIN(oldchildren, children);
4822 }
4823
4824 /*
4825 * print normal top-level devices
4826 */
4827 for (c = 0; c < children; c++) {
4828 uint64_t ishole = B_FALSE, islog = B_FALSE;
4829
4830 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
4831 &ishole);
4832
4833 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
4834 &islog);
4835
4836 if (ishole || islog)
4837 continue;
4838
4839 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
4840 continue;
4841
4842 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4843 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
4844 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
4845 newchild[c], cb, depth + 2);
4846 free(vname);
4847 }
4848
4849 /*
4850 * print all other top-level devices
4851 */
4852 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
4853 boolean_t printed = B_FALSE;
4854
4855 for (c = 0; c < children; c++) {
4856 uint64_t islog = B_FALSE;
4857 char *bias = NULL;
4858 char *type = NULL;
4859
4860 (void) nvlist_lookup_uint64(newchild[c],
4861 ZPOOL_CONFIG_IS_LOG, &islog);
4862 if (islog) {
4863 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
4864 } else {
4865 (void) nvlist_lookup_string(newchild[c],
4866 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
4867 (void) nvlist_lookup_string(newchild[c],
4868 ZPOOL_CONFIG_TYPE, &type);
4869 }
4870 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
4871 continue;
4872 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
4873 continue;
4874
4875 if (!printed) {
4876 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
4877 !cb->cb_scripted &&
4878 !cb->cb_vdevs.cb_names) {
4879 print_iostat_dashes(cb, 0,
4880 class_name[n]);
4881 }
4882 printf("\n");
4883 printed = B_TRUE;
4884 }
4885
4886 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4887 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
4888 ret += print_vdev_stats(zhp, vname, oldnv ?
4889 oldchild[c] : NULL, newchild[c], cb, depth + 2);
4890 free(vname);
4891 }
4892 }
4893
4894 /*
4895 * Include level 2 ARC devices in iostat output
4896 */
4897 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
4898 &newchild, &children) != 0)
4899 return (ret);
4900
4901 if (oldnv) {
4902 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
4903 &oldchild, &oldchildren) != 0)
4904 return (ret);
4905
4906 children = MIN(oldchildren, children);
4907 }
4908
4909 if (children > 0) {
4910 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
4911 !cb->cb_vdevs.cb_names) {
4912 print_iostat_dashes(cb, 0, "cache");
4913 }
4914 printf("\n");
4915
4916 for (c = 0; c < children; c++) {
4917 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4918 cb->cb_vdevs.cb_name_flags);
4919 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
4920 : NULL, newchild[c], cb, depth + 2);
4921 free(vname);
4922 }
4923 }
4924
4925 return (ret);
4926 }
4927
4928 static int
4929 refresh_iostat(zpool_handle_t *zhp, void *data)
4930 {
4931 iostat_cbdata_t *cb = data;
4932 boolean_t missing;
4933
4934 /*
4935 * If the pool has disappeared, remove it from the list and continue.
4936 */
4937 if (zpool_refresh_stats(zhp, &missing) != 0)
4938 return (-1);
4939
4940 if (missing)
4941 pool_list_remove(cb->cb_list, zhp);
4942
4943 return (0);
4944 }
4945
4946 /*
4947 * Callback to print out the iostats for the given pool.
4948 */
4949 static int
4950 print_iostat(zpool_handle_t *zhp, void *data)
4951 {
4952 iostat_cbdata_t *cb = data;
4953 nvlist_t *oldconfig, *newconfig;
4954 nvlist_t *oldnvroot, *newnvroot;
4955 int ret;
4956
4957 newconfig = zpool_get_config(zhp, &oldconfig);
4958
4959 if (cb->cb_iteration == 1)
4960 oldconfig = NULL;
4961
4962 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
4963 &newnvroot) == 0);
4964
4965 if (oldconfig == NULL)
4966 oldnvroot = NULL;
4967 else
4968 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
4969 &oldnvroot) == 0);
4970
4971 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
4972 cb, 0);
4973 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
4974 !cb->cb_scripted && cb->cb_verbose &&
4975 !cb->cb_vdevs.cb_names_count) {
4976 print_iostat_separator(cb);
4977 if (cb->vcdl != NULL) {
4978 print_cmd_columns(cb->vcdl, 1);
4979 }
4980 printf("\n");
4981 }
4982
4983 return (ret);
4984 }
4985
4986 static int
4987 get_columns(void)
4988 {
4989 struct winsize ws;
4990 int columns = 80;
4991 int error;
4992
4993 if (isatty(STDOUT_FILENO)) {
4994 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
4995 if (error == 0)
4996 columns = ws.ws_col;
4997 } else {
4998 columns = 999;
4999 }
5000
5001 return (columns);
5002 }
5003
5004 /*
5005 * Return the required length of the pool/vdev name column. The minimum
5006 * allowed width and output formatting flags must be provided.
5007 */
5008 static int
5009 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5010 {
5011 nvlist_t *config, *nvroot;
5012 int width = min_width;
5013
5014 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5015 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5016 &nvroot) == 0);
5017 size_t poolname_len = strlen(zpool_get_name(zhp));
5018 if (verbose == B_FALSE) {
5019 width = MAX(poolname_len, min_width);
5020 } else {
5021 width = MAX(poolname_len,
5022 max_width(zhp, nvroot, 0, min_width, flags));
5023 }
5024 }
5025
5026 return (width);
5027 }
5028
5029 /*
5030 * Parse the input string, get the 'interval' and 'count' value if there is one.
5031 */
5032 static void
5033 get_interval_count(int *argcp, char **argv, float *iv,
5034 unsigned long *cnt)
5035 {
5036 float interval = 0;
5037 unsigned long count = 0;
5038 int argc = *argcp;
5039
5040 /*
5041 * Determine if the last argument is an integer or a pool name
5042 */
5043 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5044 char *end;
5045
5046 errno = 0;
5047 interval = strtof(argv[argc - 1], &end);
5048
5049 if (*end == '\0' && errno == 0) {
5050 if (interval == 0) {
5051 (void) fprintf(stderr, gettext(
5052 "interval cannot be zero\n"));
5053 usage(B_FALSE);
5054 }
5055 /*
5056 * Ignore the last parameter
5057 */
5058 argc--;
5059 } else {
5060 /*
5061 * If this is not a valid number, just plow on. The
5062 * user will get a more informative error message later
5063 * on.
5064 */
5065 interval = 0;
5066 }
5067 }
5068
5069 /*
5070 * If the last argument is also an integer, then we have both a count
5071 * and an interval.
5072 */
5073 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5074 char *end;
5075
5076 errno = 0;
5077 count = interval;
5078 interval = strtof(argv[argc - 1], &end);
5079
5080 if (*end == '\0' && errno == 0) {
5081 if (interval == 0) {
5082 (void) fprintf(stderr, gettext(
5083 "interval cannot be zero\n"));
5084 usage(B_FALSE);
5085 }
5086
5087 /*
5088 * Ignore the last parameter
5089 */
5090 argc--;
5091 } else {
5092 interval = 0;
5093 }
5094 }
5095
5096 *iv = interval;
5097 *cnt = count;
5098 *argcp = argc;
5099 }
5100
5101 static void
5102 get_timestamp_arg(char c)
5103 {
5104 if (c == 'u')
5105 timestamp_fmt = UDATE;
5106 else if (c == 'd')
5107 timestamp_fmt = DDATE;
5108 else
5109 usage(B_FALSE);
5110 }
5111
5112 /*
5113 * Return stat flags that are supported by all pools by both the module and
5114 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5115 * It will get ANDed down until only the flags that are supported on all pools
5116 * remain.
5117 */
5118 static int
5119 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5120 {
5121 uint64_t *mask = data;
5122 nvlist_t *config, *nvroot, *nvx;
5123 uint64_t flags = 0;
5124 int i, j;
5125
5126 config = zpool_get_config(zhp, NULL);
5127 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5128 &nvroot) == 0);
5129
5130 /* Default stats are always supported, but for completeness.. */
5131 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5132 flags |= IOS_DEFAULT_M;
5133
5134 /* Get our extended stats nvlist from the main list */
5135 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5136 &nvx) != 0) {
5137 /*
5138 * No extended stats; they're probably running an older
5139 * module. No big deal, we support that too.
5140 */
5141 goto end;
5142 }
5143
5144 /* For each extended stat, make sure all its nvpairs are supported */
5145 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5146 if (!vsx_type_to_nvlist[j][0])
5147 continue;
5148
5149 /* Start off by assuming the flag is supported, then check */
5150 flags |= (1ULL << j);
5151 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5152 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5153 /* flag isn't supported */
5154 flags = flags & ~(1ULL << j);
5155 break;
5156 }
5157 }
5158 }
5159 end:
5160 *mask = *mask & flags;
5161 return (0);
5162 }
5163
5164 /*
5165 * Return a bitmask of stats that are supported on all pools by both the module
5166 * and zpool iostat.
5167 */
5168 static uint64_t
5169 get_stat_flags(zpool_list_t *list)
5170 {
5171 uint64_t mask = -1;
5172
5173 /*
5174 * get_stat_flags_cb() will lop off bits from "mask" until only the
5175 * flags that are supported on all pools remain.
5176 */
5177 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5178 return (mask);
5179 }
5180
5181 /*
5182 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5183 */
5184 static int
5185 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5186 {
5187 vdev_cbdata_t *cb = cb_data;
5188 char *name = NULL;
5189 int ret = 1; /* assume match */
5190 zpool_handle_t *zhp = zhp_data;
5191
5192 name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
5193
5194 if (strcmp(name, cb->cb_names[0])) {
5195 free(name);
5196 name = zpool_vdev_name(g_zfs, zhp, nv, VDEV_NAME_GUID);
5197 ret = (strcmp(name, cb->cb_names[0]) == 0);
5198 }
5199 free(name);
5200
5201 return (ret);
5202 }
5203
5204 /*
5205 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5206 */
5207 static int
5208 is_vdev(zpool_handle_t *zhp, void *cb_data)
5209 {
5210 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5211 }
5212
5213 /*
5214 * Check if vdevs are in a pool
5215 *
5216 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5217 * return 0. If pool_name is NULL, then search all pools.
5218 */
5219 static int
5220 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5221 vdev_cbdata_t *cb)
5222 {
5223 char **tmp_name;
5224 int ret = 0;
5225 int i;
5226 int pool_count = 0;
5227
5228 if ((argc == 0) || !*argv)
5229 return (0);
5230
5231 if (pool_name)
5232 pool_count = 1;
5233
5234 /* Temporarily hijack cb_names for a second... */
5235 tmp_name = cb->cb_names;
5236
5237 /* Go though our list of prospective vdev names */
5238 for (i = 0; i < argc; i++) {
5239 cb->cb_names = argv + i;
5240
5241 /* Is this name a vdev in our pools? */
5242 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5243 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5244 if (!ret) {
5245 /* No match */
5246 break;
5247 }
5248 }
5249
5250 cb->cb_names = tmp_name;
5251
5252 return (ret);
5253 }
5254
5255 static int
5256 is_pool_cb(zpool_handle_t *zhp, void *data)
5257 {
5258 char *name = data;
5259 if (strcmp(name, zpool_get_name(zhp)) == 0)
5260 return (1);
5261
5262 return (0);
5263 }
5264
5265 /*
5266 * Do we have a pool named *name? If so, return 1, otherwise 0.
5267 */
5268 static int
5269 is_pool(char *name)
5270 {
5271 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5272 is_pool_cb, name));
5273 }
5274
5275 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
5276 static int
5277 are_all_pools(int argc, char **argv)
5278 {
5279 if ((argc == 0) || !*argv)
5280 return (0);
5281
5282 while (--argc >= 0)
5283 if (!is_pool(argv[argc]))
5284 return (0);
5285
5286 return (1);
5287 }
5288
5289 /*
5290 * Helper function to print out vdev/pool names we can't resolve. Used for an
5291 * error message.
5292 */
5293 static void
5294 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5295 vdev_cbdata_t *cb)
5296 {
5297 int i;
5298 char *name;
5299 char *str;
5300 for (i = 0; i < argc; i++) {
5301 name = argv[i];
5302
5303 if (is_pool(name))
5304 str = gettext("pool");
5305 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5306 str = gettext("vdev in this pool");
5307 else if (are_vdevs_in_pool(1, &name, NULL, cb))
5308 str = gettext("vdev in another pool");
5309 else
5310 str = gettext("unknown");
5311
5312 fprintf(stderr, "\t%s (%s)\n", name, str);
5313 }
5314 }
5315
5316 /*
5317 * Same as get_interval_count(), but with additional checks to not misinterpret
5318 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
5319 * cb.cb_vdevs.cb_name_flags.
5320 */
5321 static void
5322 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5323 unsigned long *count, iostat_cbdata_t *cb)
5324 {
5325 char **tmpargv = argv;
5326 int argc_for_interval = 0;
5327
5328 /* Is the last arg an interval value? Or a guid? */
5329 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5330 &cb->cb_vdevs)) {
5331 /*
5332 * The last arg is not a guid, so it's probably an
5333 * interval value.
5334 */
5335 argc_for_interval++;
5336
5337 if (*argc >= 2 &&
5338 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5339 &cb->cb_vdevs)) {
5340 /*
5341 * The 2nd to last arg is not a guid, so it's probably
5342 * an interval value.
5343 */
5344 argc_for_interval++;
5345 }
5346 }
5347
5348 /* Point to our list of possible intervals */
5349 tmpargv = &argv[*argc - argc_for_interval];
5350
5351 *argc = *argc - argc_for_interval;
5352 get_interval_count(&argc_for_interval, tmpargv,
5353 interval, count);
5354 }
5355
5356 /*
5357 * Floating point sleep(). Allows you to pass in a floating point value for
5358 * seconds.
5359 */
5360 static void
5361 fsleep(float sec)
5362 {
5363 struct timespec req;
5364 req.tv_sec = floor(sec);
5365 req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
5366 nanosleep(&req, NULL);
5367 }
5368
5369 /*
5370 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5371 * if we were unable to determine its size.
5372 */
5373 static int
5374 terminal_height(void)
5375 {
5376 struct winsize win;
5377
5378 if (isatty(STDOUT_FILENO) == 0)
5379 return (-1);
5380
5381 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5382 return (win.ws_row);
5383
5384 return (-1);
5385 }
5386
5387 /*
5388 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5389 * print the result.
5390 *
5391 * name: Short name of the script ('iostat').
5392 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5393 */
5394 static void
5395 print_zpool_script_help(char *name, char *path)
5396 {
5397 char *argv[] = {path, (char *)"-h", NULL};
5398 char **lines = NULL;
5399 int lines_cnt = 0;
5400 int rc;
5401
5402 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5403 &lines_cnt);
5404 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5405 if (lines != NULL)
5406 libzfs_free_str_array(lines, lines_cnt);
5407 return;
5408 }
5409
5410 for (int i = 0; i < lines_cnt; i++)
5411 if (!is_blank_str(lines[i]))
5412 printf(" %-14s %s\n", name, lines[i]);
5413
5414 libzfs_free_str_array(lines, lines_cnt);
5415 }
5416
5417 /*
5418 * Go though the zpool status/iostat -c scripts in the user's path, run their
5419 * help option (-h), and print out the results.
5420 */
5421 static void
5422 print_zpool_dir_scripts(char *dirpath)
5423 {
5424 DIR *dir;
5425 struct dirent *ent;
5426 char fullpath[MAXPATHLEN];
5427 struct stat dir_stat;
5428
5429 if ((dir = opendir(dirpath)) != NULL) {
5430 /* print all the files and directories within directory */
5431 while ((ent = readdir(dir)) != NULL) {
5432 sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
5433
5434 /* Print the scripts */
5435 if (stat(fullpath, &dir_stat) == 0)
5436 if (dir_stat.st_mode & S_IXUSR &&
5437 S_ISREG(dir_stat.st_mode))
5438 print_zpool_script_help(ent->d_name,
5439 fullpath);
5440 }
5441 closedir(dir);
5442 }
5443 }
5444
5445 /*
5446 * Print out help text for all zpool status/iostat -c scripts.
5447 */
5448 static void
5449 print_zpool_script_list(const char *subcommand)
5450 {
5451 char *dir, *sp, *tmp;
5452
5453 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5454
5455 sp = zpool_get_cmd_search_path();
5456 if (sp == NULL)
5457 return;
5458
5459 for (dir = strtok_r(sp, ":", &tmp);
5460 dir != NULL;
5461 dir = strtok_r(NULL, ":", &tmp))
5462 print_zpool_dir_scripts(dir);
5463
5464 free(sp);
5465 }
5466
5467 /*
5468 * Set the minimum pool/vdev name column width. The width must be at least 10,
5469 * but may be as large as the column width - 42 so it still fits on one line.
5470 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5471 */
5472 static int
5473 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5474 {
5475 iostat_cbdata_t *cb = data;
5476 int width, available_width;
5477
5478 /*
5479 * get_namewidth() returns the maximum width of any name in that column
5480 * for any pool/vdev/device line that will be output.
5481 */
5482 width = get_namewidth(zhp, cb->cb_namewidth,
5483 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
5484
5485 /*
5486 * The width we are calculating is the width of the header and also the
5487 * padding width for names that are less than maximum width. The stats
5488 * take up 42 characters, so the width available for names is:
5489 */
5490 available_width = get_columns() - 42;
5491
5492 /*
5493 * If the maximum width fits on a screen, then great! Make everything
5494 * line up by justifying all lines to the same width. If that max
5495 * width is larger than what's available, the name plus stats won't fit
5496 * on one line, and justifying to that width would cause every line to
5497 * wrap on the screen. We only want lines with long names to wrap.
5498 * Limit the padding to what won't wrap.
5499 */
5500 if (width > available_width)
5501 width = available_width;
5502
5503 /*
5504 * And regardless of whatever the screen width is (get_columns can
5505 * return 0 if the width is not known or less than 42 for a narrow
5506 * terminal) have the width be a minimum of 10.
5507 */
5508 if (width < 10)
5509 width = 10;
5510
5511 /* Save the calculated width */
5512 cb->cb_namewidth = width;
5513
5514 return (0);
5515 }
5516
5517 /*
5518 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5519 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5520 * [interval [count]]
5521 *
5522 * -c CMD For each vdev, run command CMD
5523 * -g Display guid for individual vdev name.
5524 * -L Follow links when resolving vdev path name.
5525 * -P Display full path for vdev name.
5526 * -v Display statistics for individual vdevs
5527 * -h Display help
5528 * -p Display values in parsable (exact) format.
5529 * -H Scripted mode. Don't display headers, and separate properties
5530 * by a single tab.
5531 * -l Display average latency
5532 * -q Display queue depths
5533 * -w Display latency histograms
5534 * -r Display request size histogram
5535 * -T Display a timestamp in date(1) or Unix format
5536 * -n Only print headers once
5537 *
5538 * This command can be tricky because we want to be able to deal with pool
5539 * creation/destruction as well as vdev configuration changes. The bulk of this
5540 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
5541 * on pool_list_update() to detect the addition of new pools. Configuration
5542 * changes are all handled within libzfs.
5543 */
5544 int
5545 zpool_do_iostat(int argc, char **argv)
5546 {
5547 int c;
5548 int ret;
5549 int npools;
5550 float interval = 0;
5551 unsigned long count = 0;
5552 int winheight = 24;
5553 zpool_list_t *list;
5554 boolean_t verbose = B_FALSE;
5555 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5556 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5557 boolean_t omit_since_boot = B_FALSE;
5558 boolean_t guid = B_FALSE;
5559 boolean_t follow_links = B_FALSE;
5560 boolean_t full_name = B_FALSE;
5561 boolean_t headers_once = B_FALSE;
5562 iostat_cbdata_t cb = { 0 };
5563 char *cmd = NULL;
5564
5565 /* Used for printing error message */
5566 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5567 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5568
5569 uint64_t unsupported_flags;
5570
5571 /* check options */
5572 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5573 switch (c) {
5574 case 'c':
5575 if (cmd != NULL) {
5576 fprintf(stderr,
5577 gettext("Can't set -c flag twice\n"));
5578 exit(1);
5579 }
5580
5581 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5582 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5583 fprintf(stderr, gettext(
5584 "Can't run -c, disabled by "
5585 "ZPOOL_SCRIPTS_ENABLED.\n"));
5586 exit(1);
5587 }
5588
5589 if ((getuid() <= 0 || geteuid() <= 0) &&
5590 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5591 fprintf(stderr, gettext(
5592 "Can't run -c with root privileges "
5593 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5594 exit(1);
5595 }
5596 cmd = optarg;
5597 verbose = B_TRUE;
5598 break;
5599 case 'g':
5600 guid = B_TRUE;
5601 break;
5602 case 'L':
5603 follow_links = B_TRUE;
5604 break;
5605 case 'P':
5606 full_name = B_TRUE;
5607 break;
5608 case 'T':
5609 get_timestamp_arg(*optarg);
5610 break;
5611 case 'v':
5612 verbose = B_TRUE;
5613 break;
5614 case 'p':
5615 parsable = B_TRUE;
5616 break;
5617 case 'l':
5618 latency = B_TRUE;
5619 break;
5620 case 'q':
5621 queues = B_TRUE;
5622 break;
5623 case 'H':
5624 scripted = B_TRUE;
5625 break;
5626 case 'w':
5627 l_histo = B_TRUE;
5628 break;
5629 case 'r':
5630 rq_histo = B_TRUE;
5631 break;
5632 case 'y':
5633 omit_since_boot = B_TRUE;
5634 break;
5635 case 'n':
5636 headers_once = B_TRUE;
5637 break;
5638 case 'h':
5639 usage(B_FALSE);
5640 break;
5641 case '?':
5642 if (optopt == 'c') {
5643 print_zpool_script_list("iostat");
5644 exit(0);
5645 } else {
5646 fprintf(stderr,
5647 gettext("invalid option '%c'\n"), optopt);
5648 }
5649 usage(B_FALSE);
5650 }
5651 }
5652
5653 argc -= optind;
5654 argv += optind;
5655
5656 cb.cb_literal = parsable;
5657 cb.cb_scripted = scripted;
5658
5659 if (guid)
5660 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5661 if (follow_links)
5662 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5663 if (full_name)
5664 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5665 cb.cb_iteration = 0;
5666 cb.cb_namewidth = 0;
5667 cb.cb_verbose = verbose;
5668
5669 /* Get our interval and count values (if any) */
5670 if (guid) {
5671 get_interval_count_filter_guids(&argc, argv, &interval,
5672 &count, &cb);
5673 } else {
5674 get_interval_count(&argc, argv, &interval, &count);
5675 }
5676
5677 if (argc == 0) {
5678 /* No args, so just print the defaults. */
5679 } else if (are_all_pools(argc, argv)) {
5680 /* All the args are pool names */
5681 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5682 /* All the args are vdevs */
5683 cb.cb_vdevs.cb_names = argv;
5684 cb.cb_vdevs.cb_names_count = argc;
5685 argc = 0; /* No pools to process */
5686 } else if (are_all_pools(1, argv)) {
5687 /* The first arg is a pool name */
5688 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5689 &cb.cb_vdevs)) {
5690 /* ...and the rest are vdev names */
5691 cb.cb_vdevs.cb_names = argv + 1;
5692 cb.cb_vdevs.cb_names_count = argc - 1;
5693 argc = 1; /* One pool to process */
5694 } else {
5695 fprintf(stderr, gettext("Expected either a list of "));
5696 fprintf(stderr, gettext("pools, or list of vdevs in"));
5697 fprintf(stderr, " \"%s\", ", argv[0]);
5698 fprintf(stderr, gettext("but got:\n"));
5699 error_list_unresolved_vdevs(argc - 1, argv + 1,
5700 argv[0], &cb.cb_vdevs);
5701 fprintf(stderr, "\n");
5702 usage(B_FALSE);
5703 return (1);
5704 }
5705 } else {
5706 /*
5707 * The args don't make sense. The first arg isn't a pool name,
5708 * nor are all the args vdevs.
5709 */
5710 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5711 fprintf(stderr, "\n");
5712 return (1);
5713 }
5714
5715 if (cb.cb_vdevs.cb_names_count != 0) {
5716 /*
5717 * If user specified vdevs, it implies verbose.
5718 */
5719 cb.cb_verbose = B_TRUE;
5720 }
5721
5722 /*
5723 * Construct the list of all interesting pools.
5724 */
5725 ret = 0;
5726 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5727 &ret)) == NULL)
5728 return (1);
5729
5730 if (pool_list_count(list) == 0 && argc != 0) {
5731 pool_list_free(list);
5732 return (1);
5733 }
5734
5735 if (pool_list_count(list) == 0 && interval == 0) {
5736 pool_list_free(list);
5737 (void) fprintf(stderr, gettext("no pools available\n"));
5738 return (1);
5739 }
5740
5741 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5742 pool_list_free(list);
5743 (void) fprintf(stderr,
5744 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
5745 usage(B_FALSE);
5746 return (1);
5747 }
5748
5749 if (l_histo && rq_histo) {
5750 pool_list_free(list);
5751 (void) fprintf(stderr,
5752 gettext("Only one of [-r|-w] can be passed at a time\n"));
5753 usage(B_FALSE);
5754 return (1);
5755 }
5756
5757 /*
5758 * Enter the main iostat loop.
5759 */
5760 cb.cb_list = list;
5761
5762 if (l_histo) {
5763 /*
5764 * Histograms tables look out of place when you try to display
5765 * them with the other stats, so make a rule that you can only
5766 * print histograms by themselves.
5767 */
5768 cb.cb_flags = IOS_L_HISTO_M;
5769 } else if (rq_histo) {
5770 cb.cb_flags = IOS_RQ_HISTO_M;
5771 } else {
5772 cb.cb_flags = IOS_DEFAULT_M;
5773 if (latency)
5774 cb.cb_flags |= IOS_LATENCY_M;
5775 if (queues)
5776 cb.cb_flags |= IOS_QUEUES_M;
5777 }
5778
5779 /*
5780 * See if the module supports all the stats we want to display.
5781 */
5782 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
5783 if (unsupported_flags) {
5784 uint64_t f;
5785 int idx;
5786 fprintf(stderr,
5787 gettext("The loaded zfs module doesn't support:"));
5788
5789 /* for each bit set in unsupported_flags */
5790 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
5791 idx = lowbit64(f) - 1;
5792 fprintf(stderr, " -%c", flag_to_arg[idx]);
5793 }
5794
5795 fprintf(stderr, ". Try running a newer module.\n");
5796 pool_list_free(list);
5797
5798 return (1);
5799 }
5800
5801 for (;;) {
5802 if ((npools = pool_list_count(list)) == 0)
5803 (void) fprintf(stderr, gettext("no pools available\n"));
5804 else {
5805 /*
5806 * If this is the first iteration and -y was supplied
5807 * we skip any printing.
5808 */
5809 boolean_t skip = (omit_since_boot &&
5810 cb.cb_iteration == 0);
5811
5812 /*
5813 * Refresh all statistics. This is done as an
5814 * explicit step before calculating the maximum name
5815 * width, so that any * configuration changes are
5816 * properly accounted for.
5817 */
5818 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
5819 &cb);
5820
5821 /*
5822 * Iterate over all pools to determine the maximum width
5823 * for the pool / device name column across all pools.
5824 */
5825 cb.cb_namewidth = 0;
5826 (void) pool_list_iter(list, B_FALSE,
5827 get_namewidth_iostat, &cb);
5828
5829 if (timestamp_fmt != NODATE)
5830 print_timestamp(timestamp_fmt);
5831
5832 if (cmd != NULL && cb.cb_verbose &&
5833 !(cb.cb_flags & IOS_ANYHISTO_M)) {
5834 cb.vcdl = all_pools_for_each_vdev_run(argc,
5835 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
5836 cb.cb_vdevs.cb_names_count,
5837 cb.cb_vdevs.cb_name_flags);
5838 } else {
5839 cb.vcdl = NULL;
5840 }
5841
5842
5843 /*
5844 * Check terminal size so we can print headers
5845 * even when terminal window has its height
5846 * changed.
5847 */
5848 winheight = terminal_height();
5849 /*
5850 * Are we connected to TTY? If not, headers_once
5851 * should be true, to avoid breaking scripts.
5852 */
5853 if (winheight < 0)
5854 headers_once = B_TRUE;
5855
5856 /*
5857 * If it's the first time and we're not skipping it,
5858 * or either skip or verbose mode, print the header.
5859 *
5860 * The histogram code explicitly prints its header on
5861 * every vdev, so skip this for histograms.
5862 */
5863 if (((++cb.cb_iteration == 1 && !skip) ||
5864 (skip != verbose) ||
5865 (!headers_once &&
5866 (cb.cb_iteration % winheight) == 0)) &&
5867 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
5868 !cb.cb_scripted)
5869 print_iostat_header(&cb);
5870
5871 if (skip) {
5872 (void) fsleep(interval);
5873 continue;
5874 }
5875
5876 pool_list_iter(list, B_FALSE, print_iostat, &cb);
5877
5878 /*
5879 * If there's more than one pool, and we're not in
5880 * verbose mode (which prints a separator for us),
5881 * then print a separator.
5882 *
5883 * In addition, if we're printing specific vdevs then
5884 * we also want an ending separator.
5885 */
5886 if (((npools > 1 && !verbose &&
5887 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
5888 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
5889 cb.cb_vdevs.cb_names_count)) &&
5890 !cb.cb_scripted) {
5891 print_iostat_separator(&cb);
5892 if (cb.vcdl != NULL)
5893 print_cmd_columns(cb.vcdl, 1);
5894 printf("\n");
5895 }
5896
5897 if (cb.vcdl != NULL)
5898 free_vdev_cmd_data_list(cb.vcdl);
5899
5900 }
5901
5902 /*
5903 * Flush the output so that redirection to a file isn't buffered
5904 * indefinitely.
5905 */
5906 (void) fflush(stdout);
5907
5908 if (interval == 0)
5909 break;
5910
5911 if (count != 0 && --count == 0)
5912 break;
5913
5914 (void) fsleep(interval);
5915 }
5916
5917 pool_list_free(list);
5918
5919 return (ret);
5920 }
5921
5922 typedef struct list_cbdata {
5923 boolean_t cb_verbose;
5924 int cb_name_flags;
5925 int cb_namewidth;
5926 boolean_t cb_scripted;
5927 zprop_list_t *cb_proplist;
5928 boolean_t cb_literal;
5929 } list_cbdata_t;
5930
5931
5932 /*
5933 * Given a list of columns to display, output appropriate headers for each one.
5934 */
5935 static void
5936 print_header(list_cbdata_t *cb)
5937 {
5938 zprop_list_t *pl = cb->cb_proplist;
5939 char headerbuf[ZPOOL_MAXPROPLEN];
5940 const char *header;
5941 boolean_t first = B_TRUE;
5942 boolean_t right_justify;
5943 size_t width = 0;
5944
5945 for (; pl != NULL; pl = pl->pl_next) {
5946 width = pl->pl_width;
5947 if (first && cb->cb_verbose) {
5948 /*
5949 * Reset the width to accommodate the verbose listing
5950 * of devices.
5951 */
5952 width = cb->cb_namewidth;
5953 }
5954
5955 if (!first)
5956 (void) fputs(" ", stdout);
5957 else
5958 first = B_FALSE;
5959
5960 right_justify = B_FALSE;
5961 if (pl->pl_prop != ZPROP_USERPROP) {
5962 header = zpool_prop_column_name(pl->pl_prop);
5963 right_justify = zpool_prop_align_right(pl->pl_prop);
5964 } else {
5965 int i;
5966
5967 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
5968 headerbuf[i] = toupper(pl->pl_user_prop[i]);
5969 headerbuf[i] = '\0';
5970 header = headerbuf;
5971 }
5972
5973 if (pl->pl_next == NULL && !right_justify)
5974 (void) fputs(header, stdout);
5975 else if (right_justify)
5976 (void) printf("%*s", (int)width, header);
5977 else
5978 (void) printf("%-*s", (int)width, header);
5979 }
5980
5981 (void) fputc('\n', stdout);
5982 }
5983
5984 /*
5985 * Given a pool and a list of properties, print out all the properties according
5986 * to the described layout. Used by zpool_do_list().
5987 */
5988 static void
5989 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
5990 {
5991 zprop_list_t *pl = cb->cb_proplist;
5992 boolean_t first = B_TRUE;
5993 char property[ZPOOL_MAXPROPLEN];
5994 const char *propstr;
5995 boolean_t right_justify;
5996 size_t width;
5997
5998 for (; pl != NULL; pl = pl->pl_next) {
5999
6000 width = pl->pl_width;
6001 if (first && cb->cb_verbose) {
6002 /*
6003 * Reset the width to accommodate the verbose listing
6004 * of devices.
6005 */
6006 width = cb->cb_namewidth;
6007 }
6008
6009 if (!first) {
6010 if (cb->cb_scripted)
6011 (void) fputc('\t', stdout);
6012 else
6013 (void) fputs(" ", stdout);
6014 } else {
6015 first = B_FALSE;
6016 }
6017
6018 right_justify = B_FALSE;
6019 if (pl->pl_prop != ZPROP_USERPROP) {
6020 if (zpool_get_prop(zhp, pl->pl_prop, property,
6021 sizeof (property), NULL, cb->cb_literal) != 0)
6022 propstr = "-";
6023 else
6024 propstr = property;
6025
6026 right_justify = zpool_prop_align_right(pl->pl_prop);
6027 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6028 zpool_prop_unsupported(pl->pl_user_prop)) &&
6029 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6030 sizeof (property)) == 0) {
6031 propstr = property;
6032 } else {
6033 propstr = "-";
6034 }
6035
6036
6037 /*
6038 * If this is being called in scripted mode, or if this is the
6039 * last column and it is left-justified, don't include a width
6040 * format specifier.
6041 */
6042 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6043 (void) fputs(propstr, stdout);
6044 else if (right_justify)
6045 (void) printf("%*s", (int)width, propstr);
6046 else
6047 (void) printf("%-*s", (int)width, propstr);
6048 }
6049
6050 (void) fputc('\n', stdout);
6051 }
6052
6053 static void
6054 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6055 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6056 {
6057 char propval[64];
6058 boolean_t fixed;
6059 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6060
6061 switch (prop) {
6062 case ZPOOL_PROP_SIZE:
6063 case ZPOOL_PROP_EXPANDSZ:
6064 case ZPOOL_PROP_CHECKPOINT:
6065 case ZPOOL_PROP_DEDUPRATIO:
6066 if (value == 0)
6067 (void) strlcpy(propval, "-", sizeof (propval));
6068 else
6069 zfs_nicenum_format(value, propval, sizeof (propval),
6070 format);
6071 break;
6072 case ZPOOL_PROP_FRAGMENTATION:
6073 if (value == ZFS_FRAG_INVALID) {
6074 (void) strlcpy(propval, "-", sizeof (propval));
6075 } else if (format == ZFS_NICENUM_RAW) {
6076 (void) snprintf(propval, sizeof (propval), "%llu",
6077 (unsigned long long)value);
6078 } else {
6079 (void) snprintf(propval, sizeof (propval), "%llu%%",
6080 (unsigned long long)value);
6081 }
6082 break;
6083 case ZPOOL_PROP_CAPACITY:
6084 /* capacity value is in parts-per-10,000 (aka permyriad) */
6085 if (format == ZFS_NICENUM_RAW)
6086 (void) snprintf(propval, sizeof (propval), "%llu",
6087 (unsigned long long)value / 100);
6088 else
6089 (void) snprintf(propval, sizeof (propval),
6090 value < 1000 ? "%1.2f%%" : value < 10000 ?
6091 "%2.1f%%" : "%3.0f%%", value / 100.0);
6092 break;
6093 case ZPOOL_PROP_HEALTH:
6094 width = 8;
6095 (void) strlcpy(propval, str, sizeof (propval));
6096 break;
6097 default:
6098 zfs_nicenum_format(value, propval, sizeof (propval), format);
6099 }
6100
6101 if (!valid)
6102 (void) strlcpy(propval, "-", sizeof (propval));
6103
6104 if (scripted)
6105 (void) printf("\t%s", propval);
6106 else
6107 (void) printf(" %*s", (int)width, propval);
6108 }
6109
6110 /*
6111 * print static default line per vdev
6112 * not compatible with '-o' <proplist> option
6113 */
6114 static void
6115 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6116 list_cbdata_t *cb, int depth, boolean_t isspare)
6117 {
6118 nvlist_t **child;
6119 vdev_stat_t *vs;
6120 uint_t c, children;
6121 char *vname;
6122 boolean_t scripted = cb->cb_scripted;
6123 uint64_t islog = B_FALSE;
6124 const char *dashes = "%-*s - - - - "
6125 "- - - - -\n";
6126
6127 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6128 (uint64_t **)&vs, &c) == 0);
6129
6130 if (name != NULL) {
6131 boolean_t toplevel = (vs->vs_space != 0);
6132 uint64_t cap;
6133 enum zfs_nicenum_format format;
6134 const char *state;
6135
6136 if (cb->cb_literal)
6137 format = ZFS_NICENUM_RAW;
6138 else
6139 format = ZFS_NICENUM_1024;
6140
6141 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6142 return;
6143
6144 if (scripted)
6145 (void) printf("\t%s", name);
6146 else if (strlen(name) + depth > cb->cb_namewidth)
6147 (void) printf("%*s%s", depth, "", name);
6148 else
6149 (void) printf("%*s%s%*s", depth, "", name,
6150 (int)(cb->cb_namewidth - strlen(name) - depth), "");
6151
6152 /*
6153 * Print the properties for the individual vdevs. Some
6154 * properties are only applicable to toplevel vdevs. The
6155 * 'toplevel' boolean value is passed to the print_one_column()
6156 * to indicate that the value is valid.
6157 */
6158 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
6159 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
6160 scripted, B_TRUE, format);
6161 else
6162 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
6163 scripted, toplevel, format);
6164 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6165 scripted, toplevel, format);
6166 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6167 NULL, scripted, toplevel, format);
6168 print_one_column(ZPOOL_PROP_CHECKPOINT,
6169 vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6170 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6171 scripted, B_TRUE, format);
6172 print_one_column(ZPOOL_PROP_FRAGMENTATION,
6173 vs->vs_fragmentation, NULL, scripted,
6174 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6175 format);
6176 cap = (vs->vs_space == 0) ? 0 :
6177 (vs->vs_alloc * 10000 / vs->vs_space);
6178 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6179 scripted, toplevel, format);
6180 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6181 scripted, toplevel, format);
6182 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6183 if (isspare) {
6184 if (vs->vs_aux == VDEV_AUX_SPARED)
6185 state = "INUSE";
6186 else if (vs->vs_state == VDEV_STATE_HEALTHY)
6187 state = "AVAIL";
6188 }
6189 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6190 B_TRUE, format);
6191 (void) fputc('\n', stdout);
6192 }
6193
6194 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6195 &child, &children) != 0)
6196 return;
6197
6198 /* list the normal vdevs first */
6199 for (c = 0; c < children; c++) {
6200 uint64_t ishole = B_FALSE;
6201
6202 if (nvlist_lookup_uint64(child[c],
6203 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6204 continue;
6205
6206 if (nvlist_lookup_uint64(child[c],
6207 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6208 continue;
6209
6210 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6211 continue;
6212
6213 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6214 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6215 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6216 free(vname);
6217 }
6218
6219 /* list the classes: 'logs', 'dedup', and 'special' */
6220 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
6221 boolean_t printed = B_FALSE;
6222
6223 for (c = 0; c < children; c++) {
6224 char *bias = NULL;
6225 char *type = NULL;
6226
6227 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6228 &islog) == 0 && islog) {
6229 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
6230 } else {
6231 (void) nvlist_lookup_string(child[c],
6232 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6233 (void) nvlist_lookup_string(child[c],
6234 ZPOOL_CONFIG_TYPE, &type);
6235 }
6236 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6237 continue;
6238 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6239 continue;
6240
6241 if (!printed) {
6242 /* LINTED E_SEC_PRINTF_VAR_FMT */
6243 (void) printf(dashes, cb->cb_namewidth,
6244 class_name[n]);
6245 printed = B_TRUE;
6246 }
6247 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6248 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6249 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6250 B_FALSE);
6251 free(vname);
6252 }
6253 }
6254
6255 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6256 &child, &children) == 0 && children > 0) {
6257 /* LINTED E_SEC_PRINTF_VAR_FMT */
6258 (void) printf(dashes, cb->cb_namewidth, "cache");
6259 for (c = 0; c < children; c++) {
6260 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6261 cb->cb_name_flags);
6262 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6263 B_FALSE);
6264 free(vname);
6265 }
6266 }
6267
6268 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6269 &children) == 0 && children > 0) {
6270 /* LINTED E_SEC_PRINTF_VAR_FMT */
6271 (void) printf(dashes, cb->cb_namewidth, "spare");
6272 for (c = 0; c < children; c++) {
6273 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6274 cb->cb_name_flags);
6275 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6276 B_TRUE);
6277 free(vname);
6278 }
6279 }
6280 }
6281
6282 /*
6283 * Generic callback function to list a pool.
6284 */
6285 static int
6286 list_callback(zpool_handle_t *zhp, void *data)
6287 {
6288 list_cbdata_t *cbp = data;
6289
6290 print_pool(zhp, cbp);
6291
6292 if (cbp->cb_verbose) {
6293 nvlist_t *config, *nvroot;
6294
6295 config = zpool_get_config(zhp, NULL);
6296 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6297 &nvroot) == 0);
6298 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6299 }
6300
6301 return (0);
6302 }
6303
6304 /*
6305 * Set the minimum pool/vdev name column width. The width must be at least 9,
6306 * but may be as large as needed.
6307 */
6308 static int
6309 get_namewidth_list(zpool_handle_t *zhp, void *data)
6310 {
6311 list_cbdata_t *cb = data;
6312 int width;
6313
6314 width = get_namewidth(zhp, cb->cb_namewidth,
6315 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6316
6317 if (width < 9)
6318 width = 9;
6319
6320 cb->cb_namewidth = width;
6321
6322 return (0);
6323 }
6324
6325 /*
6326 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6327 *
6328 * -g Display guid for individual vdev name.
6329 * -H Scripted mode. Don't display headers, and separate properties
6330 * by a single tab.
6331 * -L Follow links when resolving vdev path name.
6332 * -o List of properties to display. Defaults to
6333 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
6334 * "dedupratio,health,altroot"
6335 * -p Display values in parsable (exact) format.
6336 * -P Display full path for vdev name.
6337 * -T Display a timestamp in date(1) or Unix format
6338 *
6339 * List all pools in the system, whether or not they're healthy. Output space
6340 * statistics for each one, as well as health status summary.
6341 */
6342 int
6343 zpool_do_list(int argc, char **argv)
6344 {
6345 int c;
6346 int ret = 0;
6347 list_cbdata_t cb = { 0 };
6348 static char default_props[] =
6349 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6350 "capacity,dedupratio,health,altroot";
6351 char *props = default_props;
6352 float interval = 0;
6353 unsigned long count = 0;
6354 zpool_list_t *list;
6355 boolean_t first = B_TRUE;
6356 current_prop_type = ZFS_TYPE_POOL;
6357
6358 /* check options */
6359 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6360 switch (c) {
6361 case 'g':
6362 cb.cb_name_flags |= VDEV_NAME_GUID;
6363 break;
6364 case 'H':
6365 cb.cb_scripted = B_TRUE;
6366 break;
6367 case 'L':
6368 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6369 break;
6370 case 'o':
6371 props = optarg;
6372 break;
6373 case 'P':
6374 cb.cb_name_flags |= VDEV_NAME_PATH;
6375 break;
6376 case 'p':
6377 cb.cb_literal = B_TRUE;
6378 break;
6379 case 'T':
6380 get_timestamp_arg(*optarg);
6381 break;
6382 case 'v':
6383 cb.cb_verbose = B_TRUE;
6384 cb.cb_namewidth = 8; /* 8 until precalc is avail */
6385 break;
6386 case ':':
6387 (void) fprintf(stderr, gettext("missing argument for "
6388 "'%c' option\n"), optopt);
6389 usage(B_FALSE);
6390 break;
6391 case '?':
6392 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6393 optopt);
6394 usage(B_FALSE);
6395 }
6396 }
6397
6398 argc -= optind;
6399 argv += optind;
6400
6401 get_interval_count(&argc, argv, &interval, &count);
6402
6403 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6404 usage(B_FALSE);
6405
6406 for (;;) {
6407 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6408 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6409 return (1);
6410
6411 if (pool_list_count(list) == 0)
6412 break;
6413
6414 cb.cb_namewidth = 0;
6415 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6416
6417 if (timestamp_fmt != NODATE)
6418 print_timestamp(timestamp_fmt);
6419
6420 if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6421 print_header(&cb);
6422 first = B_FALSE;
6423 }
6424 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6425
6426 if (interval == 0)
6427 break;
6428
6429 if (count != 0 && --count == 0)
6430 break;
6431
6432 pool_list_free(list);
6433 (void) fsleep(interval);
6434 }
6435
6436 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6437 (void) printf(gettext("no pools available\n"));
6438 ret = 0;
6439 }
6440
6441 pool_list_free(list);
6442 zprop_free_list(cb.cb_proplist);
6443 return (ret);
6444 }
6445
6446 static int
6447 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6448 {
6449 boolean_t force = B_FALSE;
6450 boolean_t rebuild = B_FALSE;
6451 boolean_t wait = B_FALSE;
6452 int c;
6453 nvlist_t *nvroot;
6454 char *poolname, *old_disk, *new_disk;
6455 zpool_handle_t *zhp;
6456 nvlist_t *props = NULL;
6457 char *propval;
6458 int ret;
6459
6460 /* check options */
6461 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6462 switch (c) {
6463 case 'f':
6464 force = B_TRUE;
6465 break;
6466 case 'o':
6467 if ((propval = strchr(optarg, '=')) == NULL) {
6468 (void) fprintf(stderr, gettext("missing "
6469 "'=' for -o option\n"));
6470 usage(B_FALSE);
6471 }
6472 *propval = '\0';
6473 propval++;
6474
6475 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6476 (add_prop_list(optarg, propval, &props, B_TRUE)))
6477 usage(B_FALSE);
6478 break;
6479 case 's':
6480 rebuild = B_TRUE;
6481 break;
6482 case 'w':
6483 wait = B_TRUE;
6484 break;
6485 case '?':
6486 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6487 optopt);
6488 usage(B_FALSE);
6489 }
6490 }
6491
6492 argc -= optind;
6493 argv += optind;
6494
6495 /* get pool name and check number of arguments */
6496 if (argc < 1) {
6497 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6498 usage(B_FALSE);
6499 }
6500
6501 poolname = argv[0];
6502
6503 if (argc < 2) {
6504 (void) fprintf(stderr,
6505 gettext("missing <device> specification\n"));
6506 usage(B_FALSE);
6507 }
6508
6509 old_disk = argv[1];
6510
6511 if (argc < 3) {
6512 if (!replacing) {
6513 (void) fprintf(stderr,
6514 gettext("missing <new_device> specification\n"));
6515 usage(B_FALSE);
6516 }
6517 new_disk = old_disk;
6518 argc -= 1;
6519 argv += 1;
6520 } else {
6521 new_disk = argv[2];
6522 argc -= 2;
6523 argv += 2;
6524 }
6525
6526 if (argc > 1) {
6527 (void) fprintf(stderr, gettext("too many arguments\n"));
6528 usage(B_FALSE);
6529 }
6530
6531 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6532 nvlist_free(props);
6533 return (1);
6534 }
6535
6536 if (zpool_get_config(zhp, NULL) == NULL) {
6537 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6538 poolname);
6539 zpool_close(zhp);
6540 nvlist_free(props);
6541 return (1);
6542 }
6543
6544 /* unless manually specified use "ashift" pool property (if set) */
6545 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6546 int intval;
6547 zprop_source_t src;
6548 char strval[ZPOOL_MAXPROPLEN];
6549
6550 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6551 if (src != ZPROP_SRC_DEFAULT) {
6552 (void) sprintf(strval, "%" PRId32, intval);
6553 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6554 &props, B_TRUE) == 0);
6555 }
6556 }
6557
6558 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6559 argc, argv);
6560 if (nvroot == NULL) {
6561 zpool_close(zhp);
6562 nvlist_free(props);
6563 return (1);
6564 }
6565
6566 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6567 rebuild);
6568
6569 if (ret == 0 && wait)
6570 ret = zpool_wait(zhp,
6571 replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
6572
6573 nvlist_free(props);
6574 nvlist_free(nvroot);
6575 zpool_close(zhp);
6576
6577 return (ret);
6578 }
6579
6580 /*
6581 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6582 *
6583 * -f Force attach, even if <new_device> appears to be in use.
6584 * -s Use sequential instead of healing reconstruction for resilver.
6585 * -o Set property=value.
6586 * -w Wait for replacing to complete before returning
6587 *
6588 * Replace <device> with <new_device>.
6589 */
6590 int
6591 zpool_do_replace(int argc, char **argv)
6592 {
6593 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6594 }
6595
6596 /*
6597 * zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
6598 *
6599 * -f Force attach, even if <new_device> appears to be in use.
6600 * -s Use sequential instead of healing reconstruction for resilver.
6601 * -o Set property=value.
6602 * -w Wait for resilvering to complete before returning
6603 *
6604 * Attach <new_device> to the mirror containing <device>. If <device> is not
6605 * part of a mirror, then <device> will be transformed into a mirror of
6606 * <device> and <new_device>. In either case, <new_device> will begin life
6607 * with a DTL of [0, now], and will immediately begin to resilver itself.
6608 */
6609 int
6610 zpool_do_attach(int argc, char **argv)
6611 {
6612 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6613 }
6614
6615 /*
6616 * zpool detach [-f] <pool> <device>
6617 *
6618 * -f Force detach of <device>, even if DTLs argue against it
6619 * (not supported yet)
6620 *
6621 * Detach a device from a mirror. The operation will be refused if <device>
6622 * is the last device in the mirror, or if the DTLs indicate that this device
6623 * has the only valid copy of some data.
6624 */
6625 int
6626 zpool_do_detach(int argc, char **argv)
6627 {
6628 int c;
6629 char *poolname, *path;
6630 zpool_handle_t *zhp;
6631 int ret;
6632
6633 /* check options */
6634 while ((c = getopt(argc, argv, "")) != -1) {
6635 switch (c) {
6636 case '?':
6637 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6638 optopt);
6639 usage(B_FALSE);
6640 }
6641 }
6642
6643 argc -= optind;
6644 argv += optind;
6645
6646 /* get pool name and check number of arguments */
6647 if (argc < 1) {
6648 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6649 usage(B_FALSE);
6650 }
6651
6652 if (argc < 2) {
6653 (void) fprintf(stderr,
6654 gettext("missing <device> specification\n"));
6655 usage(B_FALSE);
6656 }
6657
6658 poolname = argv[0];
6659 path = argv[1];
6660
6661 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6662 return (1);
6663
6664 ret = zpool_vdev_detach(zhp, path);
6665
6666 zpool_close(zhp);
6667
6668 return (ret);
6669 }
6670
6671 /*
6672 * zpool split [-gLnP] [-o prop=val] ...
6673 * [-o mntopt] ...
6674 * [-R altroot] <pool> <newpool> [<device> ...]
6675 *
6676 * -g Display guid for individual vdev name.
6677 * -L Follow links when resolving vdev path name.
6678 * -n Do not split the pool, but display the resulting layout if
6679 * it were to be split.
6680 * -o Set property=value, or set mount options.
6681 * -P Display full path for vdev name.
6682 * -R Mount the split-off pool under an alternate root.
6683 * -l Load encryption keys while importing.
6684 *
6685 * Splits the named pool and gives it the new pool name. Devices to be split
6686 * off may be listed, provided that no more than one device is specified
6687 * per top-level vdev mirror. The newly split pool is left in an exported
6688 * state unless -R is specified.
6689 *
6690 * Restrictions: the top-level of the pool pool must only be made up of
6691 * mirrors; all devices in the pool must be healthy; no device may be
6692 * undergoing a resilvering operation.
6693 */
6694 int
6695 zpool_do_split(int argc, char **argv)
6696 {
6697 char *srcpool, *newpool, *propval;
6698 char *mntopts = NULL;
6699 splitflags_t flags;
6700 int c, ret = 0;
6701 boolean_t loadkeys = B_FALSE;
6702 zpool_handle_t *zhp;
6703 nvlist_t *config, *props = NULL;
6704
6705 flags.dryrun = B_FALSE;
6706 flags.import = B_FALSE;
6707 flags.name_flags = 0;
6708
6709 /* check options */
6710 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6711 switch (c) {
6712 case 'g':
6713 flags.name_flags |= VDEV_NAME_GUID;
6714 break;
6715 case 'L':
6716 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6717 break;
6718 case 'R':
6719 flags.import = B_TRUE;
6720 if (add_prop_list(
6721 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6722 &props, B_TRUE) != 0) {
6723 nvlist_free(props);
6724 usage(B_FALSE);
6725 }
6726 break;
6727 case 'l':
6728 loadkeys = B_TRUE;
6729 break;
6730 case 'n':
6731 flags.dryrun = B_TRUE;
6732 break;
6733 case 'o':
6734 if ((propval = strchr(optarg, '=')) != NULL) {
6735 *propval = '\0';
6736 propval++;
6737 if (add_prop_list(optarg, propval,
6738 &props, B_TRUE) != 0) {
6739 nvlist_free(props);
6740 usage(B_FALSE);
6741 }
6742 } else {
6743 mntopts = optarg;
6744 }
6745 break;
6746 case 'P':
6747 flags.name_flags |= VDEV_NAME_PATH;
6748 break;
6749 case ':':
6750 (void) fprintf(stderr, gettext("missing argument for "
6751 "'%c' option\n"), optopt);
6752 usage(B_FALSE);
6753 break;
6754 case '?':
6755 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6756 optopt);
6757 usage(B_FALSE);
6758 break;
6759 }
6760 }
6761
6762 if (!flags.import && mntopts != NULL) {
6763 (void) fprintf(stderr, gettext("setting mntopts is only "
6764 "valid when importing the pool\n"));
6765 usage(B_FALSE);
6766 }
6767
6768 if (!flags.import && loadkeys) {
6769 (void) fprintf(stderr, gettext("loading keys is only "
6770 "valid when importing the pool\n"));
6771 usage(B_FALSE);
6772 }
6773
6774 argc -= optind;
6775 argv += optind;
6776
6777 if (argc < 1) {
6778 (void) fprintf(stderr, gettext("Missing pool name\n"));
6779 usage(B_FALSE);
6780 }
6781 if (argc < 2) {
6782 (void) fprintf(stderr, gettext("Missing new pool name\n"));
6783 usage(B_FALSE);
6784 }
6785
6786 srcpool = argv[0];
6787 newpool = argv[1];
6788
6789 argc -= 2;
6790 argv += 2;
6791
6792 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
6793 nvlist_free(props);
6794 return (1);
6795 }
6796
6797 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
6798 if (config == NULL) {
6799 ret = 1;
6800 } else {
6801 if (flags.dryrun) {
6802 (void) printf(gettext("would create '%s' with the "
6803 "following layout:\n\n"), newpool);
6804 print_vdev_tree(NULL, newpool, config, 0, "",
6805 flags.name_flags);
6806 print_vdev_tree(NULL, "dedup", config, 0,
6807 VDEV_ALLOC_BIAS_DEDUP, 0);
6808 print_vdev_tree(NULL, "special", config, 0,
6809 VDEV_ALLOC_BIAS_SPECIAL, 0);
6810 }
6811 }
6812
6813 zpool_close(zhp);
6814
6815 if (ret != 0 || flags.dryrun || !flags.import) {
6816 nvlist_free(config);
6817 nvlist_free(props);
6818 return (ret);
6819 }
6820
6821 /*
6822 * The split was successful. Now we need to open the new
6823 * pool and import it.
6824 */
6825 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
6826 nvlist_free(config);
6827 nvlist_free(props);
6828 return (1);
6829 }
6830
6831 if (loadkeys) {
6832 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
6833 if (ret != 0)
6834 ret = 1;
6835 }
6836
6837 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
6838 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
6839 ret = 1;
6840 (void) fprintf(stderr, gettext("Split was successful, but "
6841 "the datasets could not all be mounted\n"));
6842 (void) fprintf(stderr, gettext("Try doing '%s' with a "
6843 "different altroot\n"), "zpool import");
6844 }
6845 zpool_close(zhp);
6846 nvlist_free(config);
6847 nvlist_free(props);
6848
6849 return (ret);
6850 }
6851
6852
6853
6854 /*
6855 * zpool online <pool> <device> ...
6856 */
6857 int
6858 zpool_do_online(int argc, char **argv)
6859 {
6860 int c, i;
6861 char *poolname;
6862 zpool_handle_t *zhp;
6863 int ret = 0;
6864 vdev_state_t newstate;
6865 int flags = 0;
6866
6867 /* check options */
6868 while ((c = getopt(argc, argv, "e")) != -1) {
6869 switch (c) {
6870 case 'e':
6871 flags |= ZFS_ONLINE_EXPAND;
6872 break;
6873 case '?':
6874 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6875 optopt);
6876 usage(B_FALSE);
6877 }
6878 }
6879
6880 argc -= optind;
6881 argv += optind;
6882
6883 /* get pool name and check number of arguments */
6884 if (argc < 1) {
6885 (void) fprintf(stderr, gettext("missing pool name\n"));
6886 usage(B_FALSE);
6887 }
6888 if (argc < 2) {
6889 (void) fprintf(stderr, gettext("missing device name\n"));
6890 usage(B_FALSE);
6891 }
6892
6893 poolname = argv[0];
6894
6895 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6896 return (1);
6897
6898 for (i = 1; i < argc; i++) {
6899 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
6900 if (newstate != VDEV_STATE_HEALTHY) {
6901 (void) printf(gettext("warning: device '%s' "
6902 "onlined, but remains in faulted state\n"),
6903 argv[i]);
6904 if (newstate == VDEV_STATE_FAULTED)
6905 (void) printf(gettext("use 'zpool "
6906 "clear' to restore a faulted "
6907 "device\n"));
6908 else
6909 (void) printf(gettext("use 'zpool "
6910 "replace' to replace devices "
6911 "that are no longer present\n"));
6912 }
6913 } else {
6914 ret = 1;
6915 }
6916 }
6917
6918 zpool_close(zhp);
6919
6920 return (ret);
6921 }
6922
6923 /*
6924 * zpool offline [-ft] <pool> <device> ...
6925 *
6926 * -f Force the device into a faulted state.
6927 *
6928 * -t Only take the device off-line temporarily. The offline/faulted
6929 * state will not be persistent across reboots.
6930 */
6931 int
6932 zpool_do_offline(int argc, char **argv)
6933 {
6934 int c, i;
6935 char *poolname;
6936 zpool_handle_t *zhp;
6937 int ret = 0;
6938 boolean_t istmp = B_FALSE;
6939 boolean_t fault = B_FALSE;
6940
6941 /* check options */
6942 while ((c = getopt(argc, argv, "ft")) != -1) {
6943 switch (c) {
6944 case 'f':
6945 fault = B_TRUE;
6946 break;
6947 case 't':
6948 istmp = B_TRUE;
6949 break;
6950 case '?':
6951 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6952 optopt);
6953 usage(B_FALSE);
6954 }
6955 }
6956
6957 argc -= optind;
6958 argv += optind;
6959
6960 /* get pool name and check number of arguments */
6961 if (argc < 1) {
6962 (void) fprintf(stderr, gettext("missing pool name\n"));
6963 usage(B_FALSE);
6964 }
6965 if (argc < 2) {
6966 (void) fprintf(stderr, gettext("missing device name\n"));
6967 usage(B_FALSE);
6968 }
6969
6970 poolname = argv[0];
6971
6972 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6973 return (1);
6974
6975 for (i = 1; i < argc; i++) {
6976 if (fault) {
6977 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
6978 vdev_aux_t aux;
6979 if (istmp == B_FALSE) {
6980 /* Force the fault to persist across imports */
6981 aux = VDEV_AUX_EXTERNAL_PERSIST;
6982 } else {
6983 aux = VDEV_AUX_EXTERNAL;
6984 }
6985
6986 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
6987 ret = 1;
6988 } else {
6989 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
6990 ret = 1;
6991 }
6992 }
6993
6994 zpool_close(zhp);
6995
6996 return (ret);
6997 }
6998
6999 /*
7000 * zpool clear <pool> [device]
7001 *
7002 * Clear all errors associated with a pool or a particular device.
7003 */
7004 int
7005 zpool_do_clear(int argc, char **argv)
7006 {
7007 int c;
7008 int ret = 0;
7009 boolean_t dryrun = B_FALSE;
7010 boolean_t do_rewind = B_FALSE;
7011 boolean_t xtreme_rewind = B_FALSE;
7012 uint32_t rewind_policy = ZPOOL_NO_REWIND;
7013 nvlist_t *policy = NULL;
7014 zpool_handle_t *zhp;
7015 char *pool, *device;
7016
7017 /* check options */
7018 while ((c = getopt(argc, argv, "FnX")) != -1) {
7019 switch (c) {
7020 case 'F':
7021 do_rewind = B_TRUE;
7022 break;
7023 case 'n':
7024 dryrun = B_TRUE;
7025 break;
7026 case 'X':
7027 xtreme_rewind = B_TRUE;
7028 break;
7029 case '?':
7030 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7031 optopt);
7032 usage(B_FALSE);
7033 }
7034 }
7035
7036 argc -= optind;
7037 argv += optind;
7038
7039 if (argc < 1) {
7040 (void) fprintf(stderr, gettext("missing pool name\n"));
7041 usage(B_FALSE);
7042 }
7043
7044 if (argc > 2) {
7045 (void) fprintf(stderr, gettext("too many arguments\n"));
7046 usage(B_FALSE);
7047 }
7048
7049 if ((dryrun || xtreme_rewind) && !do_rewind) {
7050 (void) fprintf(stderr,
7051 gettext("-n or -X only meaningful with -F\n"));
7052 usage(B_FALSE);
7053 }
7054 if (dryrun)
7055 rewind_policy = ZPOOL_TRY_REWIND;
7056 else if (do_rewind)
7057 rewind_policy = ZPOOL_DO_REWIND;
7058 if (xtreme_rewind)
7059 rewind_policy |= ZPOOL_EXTREME_REWIND;
7060
7061 /* In future, further rewind policy choices can be passed along here */
7062 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7063 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7064 rewind_policy) != 0) {
7065 return (1);
7066 }
7067
7068 pool = argv[0];
7069 device = argc == 2 ? argv[1] : NULL;
7070
7071 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7072 nvlist_free(policy);
7073 return (1);
7074 }
7075
7076 if (zpool_clear(zhp, device, policy) != 0)
7077 ret = 1;
7078
7079 zpool_close(zhp);
7080
7081 nvlist_free(policy);
7082
7083 return (ret);
7084 }
7085
7086 /*
7087 * zpool reguid <pool>
7088 */
7089 int
7090 zpool_do_reguid(int argc, char **argv)
7091 {
7092 int c;
7093 char *poolname;
7094 zpool_handle_t *zhp;
7095 int ret = 0;
7096
7097 /* check options */
7098 while ((c = getopt(argc, argv, "")) != -1) {
7099 switch (c) {
7100 case '?':
7101 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7102 optopt);
7103 usage(B_FALSE);
7104 }
7105 }
7106
7107 argc -= optind;
7108 argv += optind;
7109
7110 /* get pool name and check number of arguments */
7111 if (argc < 1) {
7112 (void) fprintf(stderr, gettext("missing pool name\n"));
7113 usage(B_FALSE);
7114 }
7115
7116 if (argc > 1) {
7117 (void) fprintf(stderr, gettext("too many arguments\n"));
7118 usage(B_FALSE);
7119 }
7120
7121 poolname = argv[0];
7122 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7123 return (1);
7124
7125 ret = zpool_reguid(zhp);
7126
7127 zpool_close(zhp);
7128 return (ret);
7129 }
7130
7131
7132 /*
7133 * zpool reopen <pool>
7134 *
7135 * Reopen the pool so that the kernel can update the sizes of all vdevs.
7136 */
7137 int
7138 zpool_do_reopen(int argc, char **argv)
7139 {
7140 int c;
7141 int ret = 0;
7142 boolean_t scrub_restart = B_TRUE;
7143
7144 /* check options */
7145 while ((c = getopt(argc, argv, "n")) != -1) {
7146 switch (c) {
7147 case 'n':
7148 scrub_restart = B_FALSE;
7149 break;
7150 case '?':
7151 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7152 optopt);
7153 usage(B_FALSE);
7154 }
7155 }
7156
7157 argc -= optind;
7158 argv += optind;
7159
7160 /* if argc == 0 we will execute zpool_reopen_one on all pools */
7161 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7162 B_FALSE, zpool_reopen_one, &scrub_restart);
7163
7164 return (ret);
7165 }
7166
7167 typedef struct scrub_cbdata {
7168 int cb_type;
7169 pool_scrub_cmd_t cb_scrub_cmd;
7170 } scrub_cbdata_t;
7171
7172 static boolean_t
7173 zpool_has_checkpoint(zpool_handle_t *zhp)
7174 {
7175 nvlist_t *config, *nvroot;
7176
7177 config = zpool_get_config(zhp, NULL);
7178
7179 if (config != NULL) {
7180 pool_checkpoint_stat_t *pcs = NULL;
7181 uint_t c;
7182
7183 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7184 (void) nvlist_lookup_uint64_array(nvroot,
7185 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7186
7187 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7188 return (B_FALSE);
7189
7190 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7191 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7192 return (B_TRUE);
7193 }
7194
7195 return (B_FALSE);
7196 }
7197
7198 static int
7199 scrub_callback(zpool_handle_t *zhp, void *data)
7200 {
7201 scrub_cbdata_t *cb = data;
7202 int err;
7203
7204 /*
7205 * Ignore faulted pools.
7206 */
7207 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7208 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7209 "currently unavailable\n"), zpool_get_name(zhp));
7210 return (1);
7211 }
7212
7213 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7214
7215 if (err == 0 && zpool_has_checkpoint(zhp) &&
7216 cb->cb_type == POOL_SCAN_SCRUB) {
7217 (void) printf(gettext("warning: will not scrub state that "
7218 "belongs to the checkpoint of pool '%s'\n"),
7219 zpool_get_name(zhp));
7220 }
7221
7222 return (err != 0);
7223 }
7224
7225 static int
7226 wait_callback(zpool_handle_t *zhp, void *data)
7227 {
7228 zpool_wait_activity_t *act = data;
7229 return (zpool_wait(zhp, *act));
7230 }
7231
7232 /*
7233 * zpool scrub [-s | -p] [-w] <pool> ...
7234 *
7235 * -s Stop. Stops any in-progress scrub.
7236 * -p Pause. Pause in-progress scrub.
7237 * -w Wait. Blocks until scrub has completed.
7238 */
7239 int
7240 zpool_do_scrub(int argc, char **argv)
7241 {
7242 int c;
7243 scrub_cbdata_t cb;
7244 boolean_t wait = B_FALSE;
7245 int error;
7246
7247 cb.cb_type = POOL_SCAN_SCRUB;
7248 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7249
7250 /* check options */
7251 while ((c = getopt(argc, argv, "spw")) != -1) {
7252 switch (c) {
7253 case 's':
7254 cb.cb_type = POOL_SCAN_NONE;
7255 break;
7256 case 'p':
7257 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7258 break;
7259 case 'w':
7260 wait = B_TRUE;
7261 break;
7262 case '?':
7263 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7264 optopt);
7265 usage(B_FALSE);
7266 }
7267 }
7268
7269 if (cb.cb_type == POOL_SCAN_NONE &&
7270 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
7271 (void) fprintf(stderr, gettext("invalid option combination: "
7272 "-s and -p are mutually exclusive\n"));
7273 usage(B_FALSE);
7274 }
7275
7276 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7277 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7278 (void) fprintf(stderr, gettext("invalid option combination: "
7279 "-w cannot be used with -p or -s\n"));
7280 usage(B_FALSE);
7281 }
7282
7283 argc -= optind;
7284 argv += optind;
7285
7286 if (argc < 1) {
7287 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7288 usage(B_FALSE);
7289 }
7290
7291 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7292 B_FALSE, scrub_callback, &cb);
7293
7294 if (wait && !error) {
7295 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7296 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7297 B_FALSE, wait_callback, &act);
7298 }
7299
7300 return (error);
7301 }
7302
7303 /*
7304 * zpool resilver <pool> ...
7305 *
7306 * Restarts any in-progress resilver
7307 */
7308 int
7309 zpool_do_resilver(int argc, char **argv)
7310 {
7311 int c;
7312 scrub_cbdata_t cb;
7313
7314 cb.cb_type = POOL_SCAN_RESILVER;
7315 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7316
7317 /* check options */
7318 while ((c = getopt(argc, argv, "")) != -1) {
7319 switch (c) {
7320 case '?':
7321 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7322 optopt);
7323 usage(B_FALSE);
7324 }
7325 }
7326
7327 argc -= optind;
7328 argv += optind;
7329
7330 if (argc < 1) {
7331 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7332 usage(B_FALSE);
7333 }
7334
7335 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7336 B_FALSE, scrub_callback, &cb));
7337 }
7338
7339 /*
7340 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7341 *
7342 * -c Cancel. Ends any in-progress trim.
7343 * -d Secure trim. Requires kernel and device support.
7344 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
7345 * adding a multiplier suffix such as 'k' or 'm'.
7346 * -s Suspend. TRIM can then be restarted with no flags.
7347 * -w Wait. Blocks until trimming has completed.
7348 */
7349 int
7350 zpool_do_trim(int argc, char **argv)
7351 {
7352 struct option long_options[] = {
7353 {"cancel", no_argument, NULL, 'c'},
7354 {"secure", no_argument, NULL, 'd'},
7355 {"rate", required_argument, NULL, 'r'},
7356 {"suspend", no_argument, NULL, 's'},
7357 {"wait", no_argument, NULL, 'w'},
7358 {0, 0, 0, 0}
7359 };
7360
7361 pool_trim_func_t cmd_type = POOL_TRIM_START;
7362 uint64_t rate = 0;
7363 boolean_t secure = B_FALSE;
7364 boolean_t wait = B_FALSE;
7365
7366 int c;
7367 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7368 != -1) {
7369 switch (c) {
7370 case 'c':
7371 if (cmd_type != POOL_TRIM_START &&
7372 cmd_type != POOL_TRIM_CANCEL) {
7373 (void) fprintf(stderr, gettext("-c cannot be "
7374 "combined with other options\n"));
7375 usage(B_FALSE);
7376 }
7377 cmd_type = POOL_TRIM_CANCEL;
7378 break;
7379 case 'd':
7380 if (cmd_type != POOL_TRIM_START) {
7381 (void) fprintf(stderr, gettext("-d cannot be "
7382 "combined with the -c or -s options\n"));
7383 usage(B_FALSE);
7384 }
7385 secure = B_TRUE;
7386 break;
7387 case 'r':
7388 if (cmd_type != POOL_TRIM_START) {
7389 (void) fprintf(stderr, gettext("-r cannot be "
7390 "combined with the -c or -s options\n"));
7391 usage(B_FALSE);
7392 }
7393 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7394 (void) fprintf(stderr, "%s: %s\n",
7395 gettext("invalid value for rate"),
7396 libzfs_error_description(g_zfs));
7397 usage(B_FALSE);
7398 }
7399 break;
7400 case 's':
7401 if (cmd_type != POOL_TRIM_START &&
7402 cmd_type != POOL_TRIM_SUSPEND) {
7403 (void) fprintf(stderr, gettext("-s cannot be "
7404 "combined with other options\n"));
7405 usage(B_FALSE);
7406 }
7407 cmd_type = POOL_TRIM_SUSPEND;
7408 break;
7409 case 'w':
7410 wait = B_TRUE;
7411 break;
7412 case '?':
7413 if (optopt != 0) {
7414 (void) fprintf(stderr,
7415 gettext("invalid option '%c'\n"), optopt);
7416 } else {
7417 (void) fprintf(stderr,
7418 gettext("invalid option '%s'\n"),
7419 argv[optind - 1]);
7420 }
7421 usage(B_FALSE);
7422 }
7423 }
7424
7425 argc -= optind;
7426 argv += optind;
7427
7428 if (argc < 1) {
7429 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7430 usage(B_FALSE);
7431 return (-1);
7432 }
7433
7434 if (wait && (cmd_type != POOL_TRIM_START)) {
7435 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
7436 "-s\n"));
7437 usage(B_FALSE);
7438 }
7439
7440 char *poolname = argv[0];
7441 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7442 if (zhp == NULL)
7443 return (-1);
7444
7445 trimflags_t trim_flags = {
7446 .secure = secure,
7447 .rate = rate,
7448 .wait = wait,
7449 };
7450
7451 nvlist_t *vdevs = fnvlist_alloc();
7452 if (argc == 1) {
7453 /* no individual leaf vdevs specified, so add them all */
7454 nvlist_t *config = zpool_get_config(zhp, NULL);
7455 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7456 ZPOOL_CONFIG_VDEV_TREE);
7457 zpool_collect_leaves(zhp, nvroot, vdevs);
7458 trim_flags.fullpool = B_TRUE;
7459 } else {
7460 trim_flags.fullpool = B_FALSE;
7461 for (int i = 1; i < argc; i++) {
7462 fnvlist_add_boolean(vdevs, argv[i]);
7463 }
7464 }
7465
7466 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7467
7468 fnvlist_free(vdevs);
7469 zpool_close(zhp);
7470
7471 return (error);
7472 }
7473
7474 /*
7475 * Converts a total number of seconds to a human readable string broken
7476 * down in to days/hours/minutes/seconds.
7477 */
7478 static void
7479 secs_to_dhms(uint64_t total, char *buf)
7480 {
7481 uint64_t days = total / 60 / 60 / 24;
7482 uint64_t hours = (total / 60 / 60) % 24;
7483 uint64_t mins = (total / 60) % 60;
7484 uint64_t secs = (total % 60);
7485
7486 if (days > 0) {
7487 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7488 (u_longlong_t)days, (u_longlong_t)hours,
7489 (u_longlong_t)mins, (u_longlong_t)secs);
7490 } else {
7491 (void) sprintf(buf, "%02llu:%02llu:%02llu",
7492 (u_longlong_t)hours, (u_longlong_t)mins,
7493 (u_longlong_t)secs);
7494 }
7495 }
7496
7497 /*
7498 * Print out detailed scrub status.
7499 */
7500 static void
7501 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7502 {
7503 time_t start, end, pause;
7504 uint64_t pass_scanned, scanned, pass_issued, issued, total;
7505 uint64_t elapsed, scan_rate, issue_rate;
7506 double fraction_done;
7507 char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
7508 char srate_buf[7], irate_buf[7], time_buf[32];
7509
7510 printf(" ");
7511 printf_color(ANSI_BOLD, gettext("scan:"));
7512 printf(" ");
7513
7514 /* If there's never been a scan, there's not much to say. */
7515 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7516 ps->pss_func >= POOL_SCAN_FUNCS) {
7517 (void) printf(gettext("none requested\n"));
7518 return;
7519 }
7520
7521 start = ps->pss_start_time;
7522 end = ps->pss_end_time;
7523 pause = ps->pss_pass_scrub_pause;
7524
7525 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7526
7527 assert(ps->pss_func == POOL_SCAN_SCRUB ||
7528 ps->pss_func == POOL_SCAN_RESILVER);
7529
7530 /* Scan is finished or canceled. */
7531 if (ps->pss_state == DSS_FINISHED) {
7532 secs_to_dhms(end - start, time_buf);
7533
7534 if (ps->pss_func == POOL_SCAN_SCRUB) {
7535 (void) printf(gettext("scrub repaired %s "
7536 "in %s with %llu errors on %s"), processed_buf,
7537 time_buf, (u_longlong_t)ps->pss_errors,
7538 ctime(&end));
7539 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7540 (void) printf(gettext("resilvered %s "
7541 "in %s with %llu errors on %s"), processed_buf,
7542 time_buf, (u_longlong_t)ps->pss_errors,
7543 ctime(&end));
7544 }
7545 return;
7546 } else if (ps->pss_state == DSS_CANCELED) {
7547 if (ps->pss_func == POOL_SCAN_SCRUB) {
7548 (void) printf(gettext("scrub canceled on %s"),
7549 ctime(&end));
7550 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7551 (void) printf(gettext("resilver canceled on %s"),
7552 ctime(&end));
7553 }
7554 return;
7555 }
7556
7557 assert(ps->pss_state == DSS_SCANNING);
7558
7559 /* Scan is in progress. Resilvers can't be paused. */
7560 if (ps->pss_func == POOL_SCAN_SCRUB) {
7561 if (pause == 0) {
7562 (void) printf(gettext("scrub in progress since %s"),
7563 ctime(&start));
7564 } else {
7565 (void) printf(gettext("scrub paused since %s"),
7566 ctime(&pause));
7567 (void) printf(gettext("\tscrub started on %s"),
7568 ctime(&start));
7569 }
7570 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7571 (void) printf(gettext("resilver in progress since %s"),
7572 ctime(&start));
7573 }
7574
7575 scanned = ps->pss_examined;
7576 pass_scanned = ps->pss_pass_exam;
7577 issued = ps->pss_issued;
7578 pass_issued = ps->pss_pass_issued;
7579 total = ps->pss_to_examine;
7580
7581 /* we are only done with a block once we have issued the IO for it */
7582 fraction_done = (double)issued / total;
7583
7584 /* elapsed time for this pass, rounding up to 1 if it's 0 */
7585 elapsed = time(NULL) - ps->pss_pass_start;
7586 elapsed -= ps->pss_pass_scrub_spent_paused;
7587 elapsed = (elapsed != 0) ? elapsed : 1;
7588
7589 scan_rate = pass_scanned / elapsed;
7590 issue_rate = pass_issued / elapsed;
7591 uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
7592 ((total - issued) / issue_rate) : UINT64_MAX;
7593 secs_to_dhms(total_secs_left, time_buf);
7594
7595 /* format all of the numbers we will be reporting */
7596 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
7597 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
7598 zfs_nicebytes(total, total_buf, sizeof (total_buf));
7599 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
7600 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
7601
7602 /* do not print estimated time if we have a paused scrub */
7603 if (pause == 0) {
7604 (void) printf(gettext("\t%s scanned at %s/s, "
7605 "%s issued at %s/s, %s total\n"),
7606 scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
7607 } else {
7608 (void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
7609 scanned_buf, issued_buf, total_buf);
7610 }
7611
7612 if (ps->pss_func == POOL_SCAN_RESILVER) {
7613 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7614 processed_buf, 100 * fraction_done);
7615 } else if (ps->pss_func == POOL_SCAN_SCRUB) {
7616 (void) printf(gettext("\t%s repaired, %.2f%% done"),
7617 processed_buf, 100 * fraction_done);
7618 }
7619
7620 if (pause == 0) {
7621 if (total_secs_left != UINT64_MAX &&
7622 issue_rate >= 10 * 1024 * 1024) {
7623 (void) printf(gettext(", %s to go\n"), time_buf);
7624 } else {
7625 (void) printf(gettext(", no estimated "
7626 "completion time\n"));
7627 }
7628 } else {
7629 (void) printf(gettext("\n"));
7630 }
7631 }
7632
7633 static void
7634 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
7635 {
7636 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
7637 return;
7638
7639 printf(" ");
7640 printf_color(ANSI_BOLD, gettext("scan:"));
7641 printf(" ");
7642
7643 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
7644 uint64_t bytes_issued = vrs->vrs_bytes_issued;
7645 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
7646 uint64_t bytes_est = vrs->vrs_bytes_est;
7647 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
7648 (vrs->vrs_pass_time_ms + 1)) * 1000;
7649 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
7650 (vrs->vrs_pass_time_ms + 1)) * 1000;
7651 double scan_pct = MIN((double)bytes_scanned * 100 /
7652 (bytes_est + 1), 100);
7653
7654 /* Format all of the numbers we will be reporting */
7655 char bytes_scanned_buf[7], bytes_issued_buf[7];
7656 char bytes_rebuilt_buf[7], bytes_est_buf[7];
7657 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
7658 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
7659 sizeof (bytes_scanned_buf));
7660 zfs_nicebytes(bytes_issued, bytes_issued_buf,
7661 sizeof (bytes_issued_buf));
7662 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
7663 sizeof (bytes_rebuilt_buf));
7664 zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
7665 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
7666 zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
7667
7668 time_t start = vrs->vrs_start_time;
7669 time_t end = vrs->vrs_end_time;
7670
7671 /* Rebuild is finished or canceled. */
7672 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
7673 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
7674 (void) printf(gettext("resilvered (%s) %s in %s "
7675 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
7676 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
7677 return;
7678 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
7679 (void) printf(gettext("resilver (%s) canceled on %s"),
7680 vdev_name, ctime(&end));
7681 return;
7682 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7683 (void) printf(gettext("resilver (%s) in progress since %s"),
7684 vdev_name, ctime(&start));
7685 }
7686
7687 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
7688
7689 secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
7690 MAX(scan_rate, 1), time_buf);
7691
7692 (void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
7693 "%s total\n"), bytes_scanned_buf, scan_rate_buf,
7694 bytes_issued_buf, issue_rate_buf, bytes_est_buf);
7695 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7696 bytes_rebuilt_buf, scan_pct);
7697
7698 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7699 if (scan_rate >= 10 * 1024 * 1024) {
7700 (void) printf(gettext(", %s to go\n"), time_buf);
7701 } else {
7702 (void) printf(gettext(", no estimated "
7703 "completion time\n"));
7704 }
7705 } else {
7706 (void) printf(gettext("\n"));
7707 }
7708 }
7709
7710 /*
7711 * Print rebuild status for top-level vdevs.
7712 */
7713 static void
7714 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7715 {
7716 nvlist_t **child;
7717 uint_t children;
7718
7719 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7720 &child, &children) != 0)
7721 children = 0;
7722
7723 for (uint_t c = 0; c < children; c++) {
7724 vdev_rebuild_stat_t *vrs;
7725 uint_t i;
7726
7727 if (nvlist_lookup_uint64_array(child[c],
7728 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7729 char *name = zpool_vdev_name(g_zfs, zhp,
7730 child[c], VDEV_NAME_TYPE_ID);
7731 print_rebuild_status_impl(vrs, name);
7732 free(name);
7733 }
7734 }
7735 }
7736
7737 /*
7738 * As we don't scrub checkpointed blocks, we want to warn the user that we
7739 * skipped scanning some blocks if a checkpoint exists or existed at any
7740 * time during the scan. If a sequential instead of healing reconstruction
7741 * was performed then the blocks were reconstructed. However, their checksums
7742 * have not been verified so we still print the warning.
7743 */
7744 static void
7745 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
7746 {
7747 if (ps == NULL || pcs == NULL)
7748 return;
7749
7750 if (pcs->pcs_state == CS_NONE ||
7751 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
7752 return;
7753
7754 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
7755
7756 if (ps->pss_state == DSS_NONE)
7757 return;
7758
7759 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
7760 ps->pss_end_time < pcs->pcs_start_time)
7761 return;
7762
7763 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
7764 (void) printf(gettext(" scan warning: skipped blocks "
7765 "that are only referenced by the checkpoint.\n"));
7766 } else {
7767 assert(ps->pss_state == DSS_SCANNING);
7768 (void) printf(gettext(" scan warning: skipping blocks "
7769 "that are only referenced by the checkpoint.\n"));
7770 }
7771 }
7772
7773 /*
7774 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
7775 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
7776 * the last completed (or cancelled) rebuild.
7777 */
7778 static boolean_t
7779 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
7780 {
7781 nvlist_t **child;
7782 uint_t children;
7783 boolean_t rebuilding = B_FALSE;
7784 uint64_t end_time = 0;
7785
7786 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7787 &child, &children) != 0)
7788 children = 0;
7789
7790 for (uint_t c = 0; c < children; c++) {
7791 vdev_rebuild_stat_t *vrs;
7792 uint_t i;
7793
7794 if (nvlist_lookup_uint64_array(child[c],
7795 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7796
7797 if (vrs->vrs_end_time > end_time)
7798 end_time = vrs->vrs_end_time;
7799
7800 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7801 rebuilding = B_TRUE;
7802 end_time = 0;
7803 break;
7804 }
7805 }
7806 }
7807
7808 if (rebuild_end_time != NULL)
7809 *rebuild_end_time = end_time;
7810
7811 return (rebuilding);
7812 }
7813
7814 /*
7815 * Print the scan status.
7816 */
7817 static void
7818 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7819 {
7820 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
7821 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
7822 boolean_t active_resilver = B_FALSE;
7823 pool_checkpoint_stat_t *pcs = NULL;
7824 pool_scan_stat_t *ps = NULL;
7825 uint_t c;
7826
7827 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
7828 (uint64_t **)&ps, &c) == 0) {
7829 if (ps->pss_func == POOL_SCAN_RESILVER) {
7830 resilver_end_time = ps->pss_end_time;
7831 active_resilver = (ps->pss_state == DSS_SCANNING);
7832 }
7833
7834 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
7835 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
7836 }
7837
7838 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
7839 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
7840
7841 /* Always print the scrub status when available. */
7842 if (have_scrub)
7843 print_scan_scrub_resilver_status(ps);
7844
7845 /*
7846 * When there is an active resilver or rebuild print its status.
7847 * Otherwise print the status of the last resilver or rebuild.
7848 */
7849 if (active_resilver || (!active_rebuild && have_resilver &&
7850 resilver_end_time && resilver_end_time > rebuild_end_time)) {
7851 print_scan_scrub_resilver_status(ps);
7852 } else if (active_rebuild || (!active_resilver && have_rebuild &&
7853 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
7854 print_rebuild_status(zhp, nvroot);
7855 }
7856
7857 (void) nvlist_lookup_uint64_array(nvroot,
7858 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7859 print_checkpoint_scan_warning(ps, pcs);
7860 }
7861
7862 /*
7863 * Print out detailed removal status.
7864 */
7865 static void
7866 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
7867 {
7868 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
7869 time_t start, end;
7870 nvlist_t *config, *nvroot;
7871 nvlist_t **child;
7872 uint_t children;
7873 char *vdev_name;
7874
7875 if (prs == NULL || prs->prs_state == DSS_NONE)
7876 return;
7877
7878 /*
7879 * Determine name of vdev.
7880 */
7881 config = zpool_get_config(zhp, NULL);
7882 nvroot = fnvlist_lookup_nvlist(config,
7883 ZPOOL_CONFIG_VDEV_TREE);
7884 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7885 &child, &children) == 0);
7886 assert(prs->prs_removing_vdev < children);
7887 vdev_name = zpool_vdev_name(g_zfs, zhp,
7888 child[prs->prs_removing_vdev], B_TRUE);
7889
7890 printf_color(ANSI_BOLD, gettext("remove: "));
7891
7892 start = prs->prs_start_time;
7893 end = prs->prs_end_time;
7894 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
7895
7896 /*
7897 * Removal is finished or canceled.
7898 */
7899 if (prs->prs_state == DSS_FINISHED) {
7900 uint64_t minutes_taken = (end - start) / 60;
7901
7902 (void) printf(gettext("Removal of vdev %llu copied %s "
7903 "in %lluh%um, completed on %s"),
7904 (longlong_t)prs->prs_removing_vdev,
7905 copied_buf,
7906 (u_longlong_t)(minutes_taken / 60),
7907 (uint_t)(minutes_taken % 60),
7908 ctime((time_t *)&end));
7909 } else if (prs->prs_state == DSS_CANCELED) {
7910 (void) printf(gettext("Removal of %s canceled on %s"),
7911 vdev_name, ctime(&end));
7912 } else {
7913 uint64_t copied, total, elapsed, mins_left, hours_left;
7914 double fraction_done;
7915 uint_t rate;
7916
7917 assert(prs->prs_state == DSS_SCANNING);
7918
7919 /*
7920 * Removal is in progress.
7921 */
7922 (void) printf(gettext(
7923 "Evacuation of %s in progress since %s"),
7924 vdev_name, ctime(&start));
7925
7926 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
7927 total = prs->prs_to_copy;
7928 fraction_done = (double)copied / total;
7929
7930 /* elapsed time for this pass */
7931 elapsed = time(NULL) - prs->prs_start_time;
7932 elapsed = elapsed > 0 ? elapsed : 1;
7933 rate = copied / elapsed;
7934 rate = rate > 0 ? rate : 1;
7935 mins_left = ((total - copied) / rate) / 60;
7936 hours_left = mins_left / 60;
7937
7938 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
7939 zfs_nicenum(total, total_buf, sizeof (total_buf));
7940 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
7941
7942 /*
7943 * do not print estimated time if hours_left is more than
7944 * 30 days
7945 */
7946 (void) printf(gettext(
7947 "\t%s copied out of %s at %s/s, %.2f%% done"),
7948 examined_buf, total_buf, rate_buf, 100 * fraction_done);
7949 if (hours_left < (30 * 24)) {
7950 (void) printf(gettext(", %lluh%um to go\n"),
7951 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
7952 } else {
7953 (void) printf(gettext(
7954 ", (copy is slow, no estimated time)\n"));
7955 }
7956 }
7957 free(vdev_name);
7958
7959 if (prs->prs_mapping_memory > 0) {
7960 char mem_buf[7];
7961 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
7962 (void) printf(gettext(
7963 "\t%s memory used for removed device mappings\n"),
7964 mem_buf);
7965 }
7966 }
7967
7968 static void
7969 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
7970 {
7971 time_t start;
7972 char space_buf[7];
7973
7974 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7975 return;
7976
7977 (void) printf(gettext("checkpoint: "));
7978
7979 start = pcs->pcs_start_time;
7980 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
7981
7982 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
7983 char *date = ctime(&start);
7984
7985 /*
7986 * ctime() adds a newline at the end of the generated
7987 * string, thus the weird format specifier and the
7988 * strlen() call used to chop it off from the output.
7989 */
7990 (void) printf(gettext("created %.*s, consumes %s\n"),
7991 (int)(strlen(date) - 1), date, space_buf);
7992 return;
7993 }
7994
7995 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7996
7997 (void) printf(gettext("discarding, %s remaining.\n"),
7998 space_buf);
7999 }
8000
8001 static void
8002 print_error_log(zpool_handle_t *zhp)
8003 {
8004 nvlist_t *nverrlist = NULL;
8005 nvpair_t *elem;
8006 char *pathname;
8007 size_t len = MAXPATHLEN * 2;
8008
8009 if (zpool_get_errlog(zhp, &nverrlist) != 0)
8010 return;
8011
8012 (void) printf("errors: Permanent errors have been "
8013 "detected in the following files:\n\n");
8014
8015 pathname = safe_malloc(len);
8016 elem = NULL;
8017 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8018 nvlist_t *nv;
8019 uint64_t dsobj, obj;
8020
8021 verify(nvpair_value_nvlist(elem, &nv) == 0);
8022 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8023 &dsobj) == 0);
8024 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8025 &obj) == 0);
8026 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8027 (void) printf("%7s %s\n", "", pathname);
8028 }
8029 free(pathname);
8030 nvlist_free(nverrlist);
8031 }
8032
8033 static void
8034 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8035 uint_t nspares)
8036 {
8037 uint_t i;
8038 char *name;
8039
8040 if (nspares == 0)
8041 return;
8042
8043 (void) printf(gettext("\tspares\n"));
8044
8045 for (i = 0; i < nspares; i++) {
8046 name = zpool_vdev_name(g_zfs, zhp, spares[i],
8047 cb->cb_name_flags);
8048 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8049 free(name);
8050 }
8051 }
8052
8053 static void
8054 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8055 uint_t nl2cache)
8056 {
8057 uint_t i;
8058 char *name;
8059
8060 if (nl2cache == 0)
8061 return;
8062
8063 (void) printf(gettext("\tcache\n"));
8064
8065 for (i = 0; i < nl2cache; i++) {
8066 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8067 cb->cb_name_flags);
8068 print_status_config(zhp, cb, name, l2cache[i], 2,
8069 B_FALSE, NULL);
8070 free(name);
8071 }
8072 }
8073
8074 static void
8075 print_dedup_stats(nvlist_t *config)
8076 {
8077 ddt_histogram_t *ddh;
8078 ddt_stat_t *dds;
8079 ddt_object_t *ddo;
8080 uint_t c;
8081 char dspace[6], mspace[6];
8082
8083 /*
8084 * If the pool was faulted then we may not have been able to
8085 * obtain the config. Otherwise, if we have anything in the dedup
8086 * table continue processing the stats.
8087 */
8088 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8089 (uint64_t **)&ddo, &c) != 0)
8090 return;
8091
8092 (void) printf("\n");
8093 (void) printf(gettext(" dedup: "));
8094 if (ddo->ddo_count == 0) {
8095 (void) printf(gettext("no DDT entries\n"));
8096 return;
8097 }
8098
8099 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8100 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8101 (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8102 (u_longlong_t)ddo->ddo_count,
8103 dspace,
8104 mspace);
8105
8106 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8107 (uint64_t **)&dds, &c) == 0);
8108 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8109 (uint64_t **)&ddh, &c) == 0);
8110 zpool_dump_ddt(dds, ddh);
8111 }
8112
8113 /*
8114 * Display a summary of pool status. Displays a summary such as:
8115 *
8116 * pool: tank
8117 * status: DEGRADED
8118 * reason: One or more devices ...
8119 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8120 * config:
8121 * mirror DEGRADED
8122 * c1t0d0 OK
8123 * c2t0d0 UNAVAIL
8124 *
8125 * When given the '-v' option, we print out the complete config. If the '-e'
8126 * option is specified, then we print out error rate information as well.
8127 */
8128 static int
8129 status_callback(zpool_handle_t *zhp, void *data)
8130 {
8131 status_cbdata_t *cbp = data;
8132 nvlist_t *config, *nvroot;
8133 const char *msgid;
8134 zpool_status_t reason;
8135 zpool_errata_t errata;
8136 const char *health;
8137 uint_t c;
8138 vdev_stat_t *vs;
8139
8140 config = zpool_get_config(zhp, NULL);
8141 reason = zpool_get_status(zhp, &msgid, &errata);
8142
8143 cbp->cb_count++;
8144
8145 /*
8146 * If we were given 'zpool status -x', only report those pools with
8147 * problems.
8148 */
8149 if (cbp->cb_explain &&
8150 (reason == ZPOOL_STATUS_OK ||
8151 reason == ZPOOL_STATUS_VERSION_OLDER ||
8152 reason == ZPOOL_STATUS_FEAT_DISABLED ||
8153 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8154 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8155 if (!cbp->cb_allpools) {
8156 (void) printf(gettext("pool '%s' is healthy\n"),
8157 zpool_get_name(zhp));
8158 if (cbp->cb_first)
8159 cbp->cb_first = B_FALSE;
8160 }
8161 return (0);
8162 }
8163
8164 if (cbp->cb_first)
8165 cbp->cb_first = B_FALSE;
8166 else
8167 (void) printf("\n");
8168
8169 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8170 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8171 (uint64_t **)&vs, &c) == 0);
8172
8173 health = zpool_get_state_str(zhp);
8174
8175 printf(" ");
8176 printf_color(ANSI_BOLD, gettext("pool:"));
8177 printf(" %s\n", zpool_get_name(zhp));
8178 fputc(' ', stdout);
8179 printf_color(ANSI_BOLD, gettext("state: "));
8180
8181 printf_color(health_str_to_color(health), "%s", health);
8182
8183 fputc('\n', stdout);
8184
8185 switch (reason) {
8186 case ZPOOL_STATUS_MISSING_DEV_R:
8187 printf_color(ANSI_BOLD, gettext("status: "));
8188 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8189 "not be opened. Sufficient replicas exist for\n\tthe pool "
8190 "to continue functioning in a degraded state.\n"));
8191 printf_color(ANSI_BOLD, gettext("action: "));
8192 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8193 "and online it using 'zpool online'.\n"));
8194 break;
8195
8196 case ZPOOL_STATUS_MISSING_DEV_NR:
8197 printf_color(ANSI_BOLD, gettext("status: "));
8198 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8199 "not be opened. There are insufficient\n\treplicas for the"
8200 " pool to continue functioning.\n"));
8201 printf_color(ANSI_BOLD, gettext("action: "));
8202 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8203 "and online it using 'zpool online'.\n"));
8204 break;
8205
8206 case ZPOOL_STATUS_CORRUPT_LABEL_R:
8207 printf_color(ANSI_BOLD, gettext("status: "));
8208 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8209 "not be used because the label is missing or\n\tinvalid. "
8210 "Sufficient replicas exist for the pool to continue\n\t"
8211 "functioning in a degraded state.\n"));
8212 printf_color(ANSI_BOLD, gettext("action: "));
8213 printf_color(ANSI_YELLOW, gettext("Replace the device using "
8214 "'zpool replace'.\n"));
8215 break;
8216
8217 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8218 printf_color(ANSI_BOLD, gettext("status: "));
8219 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8220 "not be used because the label is missing \n\tor invalid. "
8221 "There are insufficient replicas for the pool to "
8222 "continue\n\tfunctioning.\n"));
8223 zpool_explain_recover(zpool_get_handle(zhp),
8224 zpool_get_name(zhp), reason, config);
8225 break;
8226
8227 case ZPOOL_STATUS_FAILING_DEV:
8228 printf_color(ANSI_BOLD, gettext("status: "));
8229 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8230 "experienced an unrecoverable error. An\n\tattempt was "
8231 "made to correct the error. Applications are "
8232 "unaffected.\n"));
8233 printf_color(ANSI_BOLD, gettext("action: "));
8234 printf_color(ANSI_YELLOW, gettext("Determine if the "
8235 "device needs to be replaced, and clear the errors\n\tusing"
8236 " 'zpool clear' or replace the device with 'zpool "
8237 "replace'.\n"));
8238 break;
8239
8240 case ZPOOL_STATUS_OFFLINE_DEV:
8241 printf_color(ANSI_BOLD, gettext("status: "));
8242 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8243 "been taken offline by the administrator.\n\tSufficient "
8244 "replicas exist for the pool to continue functioning in "
8245 "a\n\tdegraded state.\n"));
8246 printf_color(ANSI_BOLD, gettext("action: "));
8247 printf_color(ANSI_YELLOW, gettext("Online the device "
8248 "using 'zpool online' or replace the device with\n\t'zpool "
8249 "replace'.\n"));
8250 break;
8251
8252 case ZPOOL_STATUS_REMOVED_DEV:
8253 printf_color(ANSI_BOLD, gettext("status: "));
8254 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8255 "been removed by the administrator.\n\tSufficient "
8256 "replicas exist for the pool to continue functioning in "
8257 "a\n\tdegraded state.\n"));
8258 printf_color(ANSI_BOLD, gettext("action: "));
8259 printf_color(ANSI_YELLOW, gettext("Online the device "
8260 "using zpool online' or replace the device with\n\t'zpool "
8261 "replace'.\n"));
8262 break;
8263
8264 case ZPOOL_STATUS_RESILVERING:
8265 case ZPOOL_STATUS_REBUILDING:
8266 printf_color(ANSI_BOLD, gettext("status: "));
8267 printf_color(ANSI_YELLOW, gettext("One or more devices is "
8268 "currently being resilvered. The pool will\n\tcontinue "
8269 "to function, possibly in a degraded state.\n"));
8270 printf_color(ANSI_BOLD, gettext("action: "));
8271 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8272 "complete.\n"));
8273 break;
8274
8275 case ZPOOL_STATUS_REBUILD_SCRUB:
8276 printf_color(ANSI_BOLD, gettext("status: "));
8277 printf_color(ANSI_YELLOW, gettext("One or more devices have "
8278 "been sequentially resilvered, scrubbing\n\tthe pool "
8279 "is recommended.\n"));
8280 printf_color(ANSI_BOLD, gettext("action: "));
8281 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8282 "verify all data checksums.\n"));
8283 break;
8284
8285 case ZPOOL_STATUS_CORRUPT_DATA:
8286 printf_color(ANSI_BOLD, gettext("status: "));
8287 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8288 "experienced an error resulting in data\n\tcorruption. "
8289 "Applications may be affected.\n"));
8290 printf_color(ANSI_BOLD, gettext("action: "));
8291 printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8292 " if possible. Otherwise restore the\n\tentire pool from "
8293 "backup.\n"));
8294 break;
8295
8296 case ZPOOL_STATUS_CORRUPT_POOL:
8297 printf_color(ANSI_BOLD, gettext("status: "));
8298 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8299 "corrupted and the pool cannot be opened.\n"));
8300 zpool_explain_recover(zpool_get_handle(zhp),
8301 zpool_get_name(zhp), reason, config);
8302 break;
8303
8304 case ZPOOL_STATUS_VERSION_OLDER:
8305 printf_color(ANSI_BOLD, gettext("status: "));
8306 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8307 "a legacy on-disk format. The pool can\n\tstill be used, "
8308 "but some features are unavailable.\n"));
8309 printf_color(ANSI_BOLD, gettext("action: "));
8310 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8311 "'zpool upgrade'. Once this is done, the\n\tpool will no "
8312 "longer be accessible on software that does not support\n\t"
8313 "feature flags.\n"));
8314 break;
8315
8316 case ZPOOL_STATUS_VERSION_NEWER:
8317 printf_color(ANSI_BOLD, gettext("status: "));
8318 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8319 "to a newer, incompatible on-disk version.\n\tThe pool "
8320 "cannot be accessed on this system.\n"));
8321 printf_color(ANSI_BOLD, gettext("action: "));
8322 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8323 "system running more recent software, or\n\trestore the "
8324 "pool from backup.\n"));
8325 break;
8326
8327 case ZPOOL_STATUS_FEAT_DISABLED:
8328 printf_color(ANSI_BOLD, gettext("status: "));
8329 printf_color(ANSI_YELLOW, gettext("Some supported and "
8330 "requested features are not enabled on the pool.\n\t"
8331 "The pool can still be used, but some features are "
8332 "unavailable.\n"));
8333 printf_color(ANSI_BOLD, gettext("action: "));
8334 printf_color(ANSI_YELLOW, gettext("Enable all features using "
8335 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8336 "longer be accessible by software that does not support\n\t"
8337 "the features. See zpool-features(7) for details.\n"));
8338 break;
8339
8340 case ZPOOL_STATUS_COMPATIBILITY_ERR:
8341 printf_color(ANSI_BOLD, gettext("status: "));
8342 printf_color(ANSI_YELLOW, gettext("This pool has a "
8343 "compatibility list specified, but it could not be\n\t"
8344 "read/parsed at this time. The pool can still be used, "
8345 "but this\n\tshould be investigated.\n"));
8346 printf_color(ANSI_BOLD, gettext("action: "));
8347 printf_color(ANSI_YELLOW, gettext("Check the value of the "
8348 "'compatibility' property against the\n\t"
8349 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8350 ZPOOL_DATA_COMPAT_D ".\n"));
8351 break;
8352
8353 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8354 printf_color(ANSI_BOLD, gettext("status: "));
8355 printf_color(ANSI_YELLOW, gettext("One or more features "
8356 "are enabled on the pool despite not being\n\t"
8357 "requested by the 'compatibility' property.\n"));
8358 printf_color(ANSI_BOLD, gettext("action: "));
8359 printf_color(ANSI_YELLOW, gettext("Consider setting "
8360 "'compatibility' to an appropriate value, or\n\t"
8361 "adding needed features to the relevant file in\n\t"
8362 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8363 break;
8364
8365 case ZPOOL_STATUS_UNSUP_FEAT_READ:
8366 printf_color(ANSI_BOLD, gettext("status: "));
8367 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8368 "on this system because it uses the\n\tfollowing feature(s)"
8369 " not supported on this system:\n"));
8370 zpool_print_unsup_feat(config);
8371 (void) printf("\n");
8372 printf_color(ANSI_BOLD, gettext("action: "));
8373 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8374 "system that supports the required feature(s),\n\tor "
8375 "restore the pool from backup.\n"));
8376 break;
8377
8378 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8379 printf_color(ANSI_BOLD, gettext("status: "));
8380 printf_color(ANSI_YELLOW, gettext("The pool can only be "
8381 "accessed in read-only mode on this system. It\n\tcannot be"
8382 " accessed in read-write mode because it uses the "
8383 "following\n\tfeature(s) not supported on this system:\n"));
8384 zpool_print_unsup_feat(config);
8385 (void) printf("\n");
8386 printf_color(ANSI_BOLD, gettext("action: "));
8387 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8388 "in read-write mode. Import the pool with\n"
8389 "\t\"-o readonly=on\", access the pool from a system that "
8390 "supports the\n\trequired feature(s), or restore the "
8391 "pool from backup.\n"));
8392 break;
8393
8394 case ZPOOL_STATUS_FAULTED_DEV_R:
8395 printf_color(ANSI_BOLD, gettext("status: "));
8396 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8397 "faulted in response to persistent errors.\n\tSufficient "
8398 "replicas exist for the pool to continue functioning "
8399 "in a\n\tdegraded state.\n"));
8400 printf_color(ANSI_BOLD, gettext("action: "));
8401 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8402 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8403 break;
8404
8405 case ZPOOL_STATUS_FAULTED_DEV_NR:
8406 printf_color(ANSI_BOLD, gettext("status: "));
8407 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8408 "faulted in response to persistent errors. There are "
8409 "insufficient replicas for the pool to\n\tcontinue "
8410 "functioning.\n"));
8411 printf_color(ANSI_BOLD, gettext("action: "));
8412 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
8413 "pool from a backup source. Manually marking the device\n"
8414 "\trepaired using 'zpool clear' may allow some data "
8415 "to be recovered.\n"));
8416 break;
8417
8418 case ZPOOL_STATUS_IO_FAILURE_MMP:
8419 printf_color(ANSI_BOLD, gettext("status: "));
8420 printf_color(ANSI_YELLOW, gettext("The pool is suspended "
8421 "because multihost writes failed or were delayed;\n\t"
8422 "another system could import the pool undetected.\n"));
8423 printf_color(ANSI_BOLD, gettext("action: "));
8424 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
8425 " are connected, then reboot your system and\n\timport the "
8426 "pool.\n"));
8427 break;
8428
8429 case ZPOOL_STATUS_IO_FAILURE_WAIT:
8430 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
8431 printf_color(ANSI_BOLD, gettext("status: "));
8432 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8433 "faulted in response to IO failures.\n"));
8434 printf_color(ANSI_BOLD, gettext("action: "));
8435 printf_color(ANSI_YELLOW, gettext("Make sure the affected "
8436 "devices are connected, then run 'zpool clear'.\n"));
8437 break;
8438
8439 case ZPOOL_STATUS_BAD_LOG:
8440 printf_color(ANSI_BOLD, gettext("status: "));
8441 printf_color(ANSI_YELLOW, gettext("An intent log record "
8442 "could not be read.\n"
8443 "\tWaiting for administrator intervention to fix the "
8444 "faulted pool.\n"));
8445 printf_color(ANSI_BOLD, gettext("action: "));
8446 printf_color(ANSI_YELLOW, gettext("Either restore the affected "
8447 "device(s) and run 'zpool online',\n"
8448 "\tor ignore the intent log records by running "
8449 "'zpool clear'.\n"));
8450 break;
8451
8452 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
8453 (void) printf(gettext("status: One or more devices are "
8454 "configured to use a non-native block size.\n"
8455 "\tExpect reduced performance.\n"));
8456 (void) printf(gettext("action: Replace affected devices with "
8457 "devices that support the\n\tconfigured block size, or "
8458 "migrate data to a properly configured\n\tpool.\n"));
8459 break;
8460
8461 case ZPOOL_STATUS_HOSTID_MISMATCH:
8462 printf_color(ANSI_BOLD, gettext("status: "));
8463 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
8464 " and system hostid on imported pool.\n\tThis pool was "
8465 "previously imported into a system with a different "
8466 "hostid,\n\tand then was verbatim imported into this "
8467 "system.\n"));
8468 printf_color(ANSI_BOLD, gettext("action: "));
8469 printf_color(ANSI_YELLOW, gettext("Export this pool on all "
8470 "systems on which it is imported.\n"
8471 "\tThen import it to correct the mismatch.\n"));
8472 break;
8473
8474 case ZPOOL_STATUS_ERRATA:
8475 printf_color(ANSI_BOLD, gettext("status: "));
8476 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
8477 errata);
8478
8479 switch (errata) {
8480 case ZPOOL_ERRATA_NONE:
8481 break;
8482
8483 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
8484 printf_color(ANSI_BOLD, gettext("action: "));
8485 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8486 " run 'zpool scrub'.\n"));
8487 break;
8488
8489 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
8490 (void) printf(gettext("\tExisting encrypted datasets "
8491 "contain an on-disk incompatibility\n\twhich "
8492 "needs to be corrected.\n"));
8493 printf_color(ANSI_BOLD, gettext("action: "));
8494 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8495 " backup existing encrypted datasets to new\n\t"
8496 "encrypted datasets and destroy the old ones. "
8497 "'zfs mount -o ro' can\n\tbe used to temporarily "
8498 "mount existing encrypted datasets readonly.\n"));
8499 break;
8500
8501 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
8502 (void) printf(gettext("\tExisting encrypted snapshots "
8503 "and bookmarks contain an on-disk\n\tincompat"
8504 "ibility. This may cause on-disk corruption if "
8505 "they are used\n\twith 'zfs recv'.\n"));
8506 printf_color(ANSI_BOLD, gettext("action: "));
8507 printf_color(ANSI_YELLOW, gettext("To correct the"
8508 "issue, enable the bookmark_v2 feature. No "
8509 "additional\n\taction is needed if there are no "
8510 "encrypted snapshots or bookmarks.\n\tIf preserving"
8511 "the encrypted snapshots and bookmarks is required,"
8512 " use\n\ta non-raw send to backup and restore them."
8513 " Alternately, they may be\n\tremoved to resolve "
8514 "the incompatibility.\n"));
8515 break;
8516
8517 default:
8518 /*
8519 * All errata which allow the pool to be imported
8520 * must contain an action message.
8521 */
8522 assert(0);
8523 }
8524 break;
8525
8526 default:
8527 /*
8528 * The remaining errors can't actually be generated, yet.
8529 */
8530 assert(reason == ZPOOL_STATUS_OK);
8531 }
8532
8533 if (msgid != NULL) {
8534 printf(" ");
8535 printf_color(ANSI_BOLD, gettext("see:"));
8536 printf(gettext(
8537 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
8538 msgid);
8539 }
8540
8541 if (config != NULL) {
8542 uint64_t nerr;
8543 nvlist_t **spares, **l2cache;
8544 uint_t nspares, nl2cache;
8545 pool_checkpoint_stat_t *pcs = NULL;
8546 pool_removal_stat_t *prs = NULL;
8547
8548 print_scan_status(zhp, nvroot);
8549
8550 (void) nvlist_lookup_uint64_array(nvroot,
8551 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
8552 print_removal_status(zhp, prs);
8553
8554 (void) nvlist_lookup_uint64_array(nvroot,
8555 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8556 print_checkpoint_status(pcs);
8557
8558 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
8559 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
8560 if (cbp->cb_namewidth < 10)
8561 cbp->cb_namewidth = 10;
8562
8563 color_start(ANSI_BOLD);
8564 (void) printf(gettext("config:\n\n"));
8565 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
8566 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
8567 "CKSUM");
8568 color_end();
8569
8570 if (cbp->cb_print_slow_ios) {
8571 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
8572 }
8573
8574 if (cbp->vcdl != NULL)
8575 print_cmd_columns(cbp->vcdl, 0);
8576
8577 printf("\n");
8578
8579 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
8580 B_FALSE, NULL);
8581
8582 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
8583 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
8584 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
8585
8586 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
8587 &l2cache, &nl2cache) == 0)
8588 print_l2cache(zhp, cbp, l2cache, nl2cache);
8589
8590 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
8591 &spares, &nspares) == 0)
8592 print_spares(zhp, cbp, spares, nspares);
8593
8594 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
8595 &nerr) == 0) {
8596 nvlist_t *nverrlist = NULL;
8597
8598 /*
8599 * If the approximate error count is small, get a
8600 * precise count by fetching the entire log and
8601 * uniquifying the results.
8602 */
8603 if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
8604 zpool_get_errlog(zhp, &nverrlist) == 0) {
8605 nvpair_t *elem;
8606
8607 elem = NULL;
8608 nerr = 0;
8609 while ((elem = nvlist_next_nvpair(nverrlist,
8610 elem)) != NULL) {
8611 nerr++;
8612 }
8613 }
8614 nvlist_free(nverrlist);
8615
8616 (void) printf("\n");
8617
8618 if (nerr == 0)
8619 (void) printf(gettext("errors: No known data "
8620 "errors\n"));
8621 else if (!cbp->cb_verbose)
8622 (void) printf(gettext("errors: %llu data "
8623 "errors, use '-v' for a list\n"),
8624 (u_longlong_t)nerr);
8625 else
8626 print_error_log(zhp);
8627 }
8628
8629 if (cbp->cb_dedup_stats)
8630 print_dedup_stats(config);
8631 } else {
8632 (void) printf(gettext("config: The configuration cannot be "
8633 "determined.\n"));
8634 }
8635
8636 return (0);
8637 }
8638
8639 /*
8640 * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
8641 * [interval [count]]
8642 *
8643 * -c CMD For each vdev, run command CMD
8644 * -i Display vdev initialization status.
8645 * -g Display guid for individual vdev name.
8646 * -L Follow links when resolving vdev path name.
8647 * -p Display values in parsable (exact) format.
8648 * -P Display full path for vdev name.
8649 * -s Display slow IOs column.
8650 * -v Display complete error logs
8651 * -x Display only pools with potential problems
8652 * -D Display dedup status (undocumented)
8653 * -t Display vdev TRIM status.
8654 * -T Display a timestamp in date(1) or Unix format
8655 *
8656 * Describes the health status of all pools or some subset.
8657 */
8658 int
8659 zpool_do_status(int argc, char **argv)
8660 {
8661 int c;
8662 int ret;
8663 float interval = 0;
8664 unsigned long count = 0;
8665 status_cbdata_t cb = { 0 };
8666 char *cmd = NULL;
8667
8668 /* check options */
8669 while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
8670 switch (c) {
8671 case 'c':
8672 if (cmd != NULL) {
8673 fprintf(stderr,
8674 gettext("Can't set -c flag twice\n"));
8675 exit(1);
8676 }
8677
8678 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
8679 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
8680 fprintf(stderr, gettext(
8681 "Can't run -c, disabled by "
8682 "ZPOOL_SCRIPTS_ENABLED.\n"));
8683 exit(1);
8684 }
8685
8686 if ((getuid() <= 0 || geteuid() <= 0) &&
8687 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
8688 fprintf(stderr, gettext(
8689 "Can't run -c with root privileges "
8690 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
8691 exit(1);
8692 }
8693 cmd = optarg;
8694 break;
8695 case 'i':
8696 cb.cb_print_vdev_init = B_TRUE;
8697 break;
8698 case 'g':
8699 cb.cb_name_flags |= VDEV_NAME_GUID;
8700 break;
8701 case 'L':
8702 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
8703 break;
8704 case 'p':
8705 cb.cb_literal = B_TRUE;
8706 break;
8707 case 'P':
8708 cb.cb_name_flags |= VDEV_NAME_PATH;
8709 break;
8710 case 's':
8711 cb.cb_print_slow_ios = B_TRUE;
8712 break;
8713 case 'v':
8714 cb.cb_verbose = B_TRUE;
8715 break;
8716 case 'x':
8717 cb.cb_explain = B_TRUE;
8718 break;
8719 case 'D':
8720 cb.cb_dedup_stats = B_TRUE;
8721 break;
8722 case 't':
8723 cb.cb_print_vdev_trim = B_TRUE;
8724 break;
8725 case 'T':
8726 get_timestamp_arg(*optarg);
8727 break;
8728 case '?':
8729 if (optopt == 'c') {
8730 print_zpool_script_list("status");
8731 exit(0);
8732 } else {
8733 fprintf(stderr,
8734 gettext("invalid option '%c'\n"), optopt);
8735 }
8736 usage(B_FALSE);
8737 }
8738 }
8739
8740 argc -= optind;
8741 argv += optind;
8742
8743 get_interval_count(&argc, argv, &interval, &count);
8744
8745 if (argc == 0)
8746 cb.cb_allpools = B_TRUE;
8747
8748 cb.cb_first = B_TRUE;
8749 cb.cb_print_status = B_TRUE;
8750
8751 for (;;) {
8752 if (timestamp_fmt != NODATE)
8753 print_timestamp(timestamp_fmt);
8754
8755 if (cmd != NULL)
8756 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
8757 NULL, NULL, 0, 0);
8758
8759 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8760 cb.cb_literal, status_callback, &cb);
8761
8762 if (cb.vcdl != NULL)
8763 free_vdev_cmd_data_list(cb.vcdl);
8764
8765 if (argc == 0 && cb.cb_count == 0)
8766 (void) fprintf(stderr, gettext("no pools available\n"));
8767 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
8768 (void) printf(gettext("all pools are healthy\n"));
8769
8770 if (ret != 0)
8771 return (ret);
8772
8773 if (interval == 0)
8774 break;
8775
8776 if (count != 0 && --count == 0)
8777 break;
8778
8779 (void) fsleep(interval);
8780 }
8781
8782 return (0);
8783 }
8784
8785 typedef struct upgrade_cbdata {
8786 int cb_first;
8787 int cb_argc;
8788 uint64_t cb_version;
8789 char **cb_argv;
8790 } upgrade_cbdata_t;
8791
8792 static int
8793 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
8794 {
8795 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
8796 int *count = (int *)unsupp_fs;
8797
8798 if (zfs_version > ZPL_VERSION) {
8799 (void) printf(gettext("%s (v%d) is not supported by this "
8800 "implementation of ZFS.\n"),
8801 zfs_get_name(zhp), zfs_version);
8802 (*count)++;
8803 }
8804
8805 zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs);
8806
8807 zfs_close(zhp);
8808
8809 return (0);
8810 }
8811
8812 static int
8813 upgrade_version(zpool_handle_t *zhp, uint64_t version)
8814 {
8815 int ret;
8816 nvlist_t *config;
8817 uint64_t oldversion;
8818 int unsupp_fs = 0;
8819
8820 config = zpool_get_config(zhp, NULL);
8821 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8822 &oldversion) == 0);
8823
8824 char compat[ZFS_MAXPROPLEN];
8825 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8826 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8827 compat[0] = '\0';
8828
8829 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
8830 assert(oldversion < version);
8831
8832 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
8833 if (ret != 0)
8834 return (ret);
8835
8836 if (unsupp_fs) {
8837 (void) fprintf(stderr, gettext("Upgrade not performed due "
8838 "to %d unsupported filesystems (max v%d).\n"),
8839 unsupp_fs, (int)ZPL_VERSION);
8840 return (1);
8841 }
8842
8843 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
8844 (void) fprintf(stderr, gettext("Upgrade not performed because "
8845 "'compatibility' property set to '"
8846 ZPOOL_COMPAT_LEGACY "'.\n"));
8847 return (1);
8848 }
8849
8850 ret = zpool_upgrade(zhp, version);
8851 if (ret != 0)
8852 return (ret);
8853
8854 if (version >= SPA_VERSION_FEATURES) {
8855 (void) printf(gettext("Successfully upgraded "
8856 "'%s' from version %llu to feature flags.\n"),
8857 zpool_get_name(zhp), (u_longlong_t)oldversion);
8858 } else {
8859 (void) printf(gettext("Successfully upgraded "
8860 "'%s' from version %llu to version %llu.\n"),
8861 zpool_get_name(zhp), (u_longlong_t)oldversion,
8862 (u_longlong_t)version);
8863 }
8864
8865 return (0);
8866 }
8867
8868 static int
8869 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
8870 {
8871 int i, ret, count;
8872 boolean_t firstff = B_TRUE;
8873 nvlist_t *enabled = zpool_get_features(zhp);
8874
8875 char compat[ZFS_MAXPROPLEN];
8876 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8877 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8878 compat[0] = '\0';
8879
8880 boolean_t requested_features[SPA_FEATURES];
8881 if (zpool_do_load_compat(compat, requested_features) !=
8882 ZPOOL_COMPATIBILITY_OK)
8883 return (-1);
8884
8885 count = 0;
8886 for (i = 0; i < SPA_FEATURES; i++) {
8887 const char *fname = spa_feature_table[i].fi_uname;
8888 const char *fguid = spa_feature_table[i].fi_guid;
8889
8890 if (!spa_feature_table[i].fi_zfs_mod_supported)
8891 continue;
8892
8893 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
8894 char *propname;
8895 verify(-1 != asprintf(&propname, "feature@%s", fname));
8896 ret = zpool_set_prop(zhp, propname,
8897 ZFS_FEATURE_ENABLED);
8898 if (ret != 0) {
8899 free(propname);
8900 return (ret);
8901 }
8902 count++;
8903
8904 if (firstff) {
8905 (void) printf(gettext("Enabled the "
8906 "following features on '%s':\n"),
8907 zpool_get_name(zhp));
8908 firstff = B_FALSE;
8909 }
8910 (void) printf(gettext(" %s\n"), fname);
8911 free(propname);
8912 }
8913 }
8914
8915 if (countp != NULL)
8916 *countp = count;
8917 return (0);
8918 }
8919
8920 static int
8921 upgrade_cb(zpool_handle_t *zhp, void *arg)
8922 {
8923 upgrade_cbdata_t *cbp = arg;
8924 nvlist_t *config;
8925 uint64_t version;
8926 boolean_t modified_pool = B_FALSE;
8927 int ret;
8928
8929 config = zpool_get_config(zhp, NULL);
8930 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8931 &version) == 0);
8932
8933 assert(SPA_VERSION_IS_SUPPORTED(version));
8934
8935 if (version < cbp->cb_version) {
8936 cbp->cb_first = B_FALSE;
8937 ret = upgrade_version(zhp, cbp->cb_version);
8938 if (ret != 0)
8939 return (ret);
8940 modified_pool = B_TRUE;
8941
8942 /*
8943 * If they did "zpool upgrade -a", then we could
8944 * be doing ioctls to different pools. We need
8945 * to log this history once to each pool, and bypass
8946 * the normal history logging that happens in main().
8947 */
8948 (void) zpool_log_history(g_zfs, history_str);
8949 log_history = B_FALSE;
8950 }
8951
8952 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
8953 int count;
8954 ret = upgrade_enable_all(zhp, &count);
8955 if (ret != 0)
8956 return (ret);
8957
8958 if (count > 0) {
8959 cbp->cb_first = B_FALSE;
8960 modified_pool = B_TRUE;
8961 }
8962 }
8963
8964 if (modified_pool) {
8965 (void) printf("\n");
8966 (void) after_zpool_upgrade(zhp);
8967 }
8968
8969 return (0);
8970 }
8971
8972 static int
8973 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
8974 {
8975 upgrade_cbdata_t *cbp = arg;
8976 nvlist_t *config;
8977 uint64_t version;
8978
8979 config = zpool_get_config(zhp, NULL);
8980 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8981 &version) == 0);
8982
8983 assert(SPA_VERSION_IS_SUPPORTED(version));
8984
8985 if (version < SPA_VERSION_FEATURES) {
8986 if (cbp->cb_first) {
8987 (void) printf(gettext("The following pools are "
8988 "formatted with legacy version numbers and can\n"
8989 "be upgraded to use feature flags. After "
8990 "being upgraded, these pools\nwill no "
8991 "longer be accessible by software that does not "
8992 "support feature\nflags.\n\n"
8993 "Note that setting a pool's 'compatibility' "
8994 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
8995 "inhibit upgrades.\n\n"));
8996 (void) printf(gettext("VER POOL\n"));
8997 (void) printf(gettext("--- ------------\n"));
8998 cbp->cb_first = B_FALSE;
8999 }
9000
9001 (void) printf("%2llu %s\n", (u_longlong_t)version,
9002 zpool_get_name(zhp));
9003 }
9004
9005 return (0);
9006 }
9007
9008 static int
9009 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
9010 {
9011 upgrade_cbdata_t *cbp = arg;
9012 nvlist_t *config;
9013 uint64_t version;
9014
9015 config = zpool_get_config(zhp, NULL);
9016 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9017 &version) == 0);
9018
9019 if (version >= SPA_VERSION_FEATURES) {
9020 int i;
9021 boolean_t poolfirst = B_TRUE;
9022 nvlist_t *enabled = zpool_get_features(zhp);
9023
9024 for (i = 0; i < SPA_FEATURES; i++) {
9025 const char *fguid = spa_feature_table[i].fi_guid;
9026 const char *fname = spa_feature_table[i].fi_uname;
9027
9028 if (!spa_feature_table[i].fi_zfs_mod_supported)
9029 continue;
9030
9031 if (!nvlist_exists(enabled, fguid)) {
9032 if (cbp->cb_first) {
9033 (void) printf(gettext("\nSome "
9034 "supported features are not "
9035 "enabled on the following pools. "
9036 "Once a\nfeature is enabled the "
9037 "pool may become incompatible with "
9038 "software\nthat does not support "
9039 "the feature. See "
9040 "zpool-features(7) for "
9041 "details.\n\n"
9042 "Note that the pool "
9043 "'compatibility' feature can be "
9044 "used to inhibit\nfeature "
9045 "upgrades.\n\n"));
9046 (void) printf(gettext("POOL "
9047 "FEATURE\n"));
9048 (void) printf(gettext("------"
9049 "---------\n"));
9050 cbp->cb_first = B_FALSE;
9051 }
9052
9053 if (poolfirst) {
9054 (void) printf(gettext("%s\n"),
9055 zpool_get_name(zhp));
9056 poolfirst = B_FALSE;
9057 }
9058
9059 (void) printf(gettext(" %s\n"), fname);
9060 }
9061 /*
9062 * If they did "zpool upgrade -a", then we could
9063 * be doing ioctls to different pools. We need
9064 * to log this history once to each pool, and bypass
9065 * the normal history logging that happens in main().
9066 */
9067 (void) zpool_log_history(g_zfs, history_str);
9068 log_history = B_FALSE;
9069 }
9070 }
9071
9072 return (0);
9073 }
9074
9075 static int
9076 upgrade_one(zpool_handle_t *zhp, void *data)
9077 {
9078 boolean_t modified_pool = B_FALSE;
9079 upgrade_cbdata_t *cbp = data;
9080 uint64_t cur_version;
9081 int ret;
9082
9083 if (strcmp("log", zpool_get_name(zhp)) == 0) {
9084 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9085 "Pool 'log' must be renamed using export and import"
9086 " to upgrade.\n"));
9087 return (1);
9088 }
9089
9090 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9091 if (cur_version > cbp->cb_version) {
9092 (void) printf(gettext("Pool '%s' is already formatted "
9093 "using more current version '%llu'.\n\n"),
9094 zpool_get_name(zhp), (u_longlong_t)cur_version);
9095 return (0);
9096 }
9097
9098 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9099 (void) printf(gettext("Pool '%s' is already formatted "
9100 "using version %llu.\n\n"), zpool_get_name(zhp),
9101 (u_longlong_t)cbp->cb_version);
9102 return (0);
9103 }
9104
9105 if (cur_version != cbp->cb_version) {
9106 modified_pool = B_TRUE;
9107 ret = upgrade_version(zhp, cbp->cb_version);
9108 if (ret != 0)
9109 return (ret);
9110 }
9111
9112 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9113 int count = 0;
9114 ret = upgrade_enable_all(zhp, &count);
9115 if (ret != 0)
9116 return (ret);
9117
9118 if (count != 0) {
9119 modified_pool = B_TRUE;
9120 } else if (cur_version == SPA_VERSION) {
9121 (void) printf(gettext("Pool '%s' already has all "
9122 "supported and requested features enabled.\n"),
9123 zpool_get_name(zhp));
9124 }
9125 }
9126
9127 if (modified_pool) {
9128 (void) printf("\n");
9129 (void) after_zpool_upgrade(zhp);
9130 }
9131
9132 return (0);
9133 }
9134
9135 /*
9136 * zpool upgrade
9137 * zpool upgrade -v
9138 * zpool upgrade [-V version] <-a | pool ...>
9139 *
9140 * With no arguments, display downrev'd ZFS pool available for upgrade.
9141 * Individual pools can be upgraded by specifying the pool, and '-a' will
9142 * upgrade all pools.
9143 */
9144 int
9145 zpool_do_upgrade(int argc, char **argv)
9146 {
9147 int c;
9148 upgrade_cbdata_t cb = { 0 };
9149 int ret = 0;
9150 boolean_t showversions = B_FALSE;
9151 boolean_t upgradeall = B_FALSE;
9152 char *end;
9153
9154
9155 /* check options */
9156 while ((c = getopt(argc, argv, ":avV:")) != -1) {
9157 switch (c) {
9158 case 'a':
9159 upgradeall = B_TRUE;
9160 break;
9161 case 'v':
9162 showversions = B_TRUE;
9163 break;
9164 case 'V':
9165 cb.cb_version = strtoll(optarg, &end, 10);
9166 if (*end != '\0' ||
9167 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9168 (void) fprintf(stderr,
9169 gettext("invalid version '%s'\n"), optarg);
9170 usage(B_FALSE);
9171 }
9172 break;
9173 case ':':
9174 (void) fprintf(stderr, gettext("missing argument for "
9175 "'%c' option\n"), optopt);
9176 usage(B_FALSE);
9177 break;
9178 case '?':
9179 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9180 optopt);
9181 usage(B_FALSE);
9182 }
9183 }
9184
9185 cb.cb_argc = argc;
9186 cb.cb_argv = argv;
9187 argc -= optind;
9188 argv += optind;
9189
9190 if (cb.cb_version == 0) {
9191 cb.cb_version = SPA_VERSION;
9192 } else if (!upgradeall && argc == 0) {
9193 (void) fprintf(stderr, gettext("-V option is "
9194 "incompatible with other arguments\n"));
9195 usage(B_FALSE);
9196 }
9197
9198 if (showversions) {
9199 if (upgradeall || argc != 0) {
9200 (void) fprintf(stderr, gettext("-v option is "
9201 "incompatible with other arguments\n"));
9202 usage(B_FALSE);
9203 }
9204 } else if (upgradeall) {
9205 if (argc != 0) {
9206 (void) fprintf(stderr, gettext("-a option should not "
9207 "be used along with a pool name\n"));
9208 usage(B_FALSE);
9209 }
9210 }
9211
9212 (void) printf("%s", gettext("This system supports ZFS pool feature "
9213 "flags.\n\n"));
9214 if (showversions) {
9215 int i;
9216
9217 (void) printf(gettext("The following features are "
9218 "supported:\n\n"));
9219 (void) printf(gettext("FEAT DESCRIPTION\n"));
9220 (void) printf("----------------------------------------------"
9221 "---------------\n");
9222 for (i = 0; i < SPA_FEATURES; i++) {
9223 zfeature_info_t *fi = &spa_feature_table[i];
9224 if (!fi->fi_zfs_mod_supported)
9225 continue;
9226 const char *ro =
9227 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9228 " (read-only compatible)" : "";
9229
9230 (void) printf("%-37s%s\n", fi->fi_uname, ro);
9231 (void) printf(" %s\n", fi->fi_desc);
9232 }
9233 (void) printf("\n");
9234
9235 (void) printf(gettext("The following legacy versions are also "
9236 "supported:\n\n"));
9237 (void) printf(gettext("VER DESCRIPTION\n"));
9238 (void) printf("--- -----------------------------------------"
9239 "---------------\n");
9240 (void) printf(gettext(" 1 Initial ZFS version\n"));
9241 (void) printf(gettext(" 2 Ditto blocks "
9242 "(replicated metadata)\n"));
9243 (void) printf(gettext(" 3 Hot spares and double parity "
9244 "RAID-Z\n"));
9245 (void) printf(gettext(" 4 zpool history\n"));
9246 (void) printf(gettext(" 5 Compression using the gzip "
9247 "algorithm\n"));
9248 (void) printf(gettext(" 6 bootfs pool property\n"));
9249 (void) printf(gettext(" 7 Separate intent log devices\n"));
9250 (void) printf(gettext(" 8 Delegated administration\n"));
9251 (void) printf(gettext(" 9 refquota and refreservation "
9252 "properties\n"));
9253 (void) printf(gettext(" 10 Cache devices\n"));
9254 (void) printf(gettext(" 11 Improved scrub performance\n"));
9255 (void) printf(gettext(" 12 Snapshot properties\n"));
9256 (void) printf(gettext(" 13 snapused property\n"));
9257 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
9258 (void) printf(gettext(" 15 user/group space accounting\n"));
9259 (void) printf(gettext(" 16 stmf property support\n"));
9260 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
9261 (void) printf(gettext(" 18 Snapshot user holds\n"));
9262 (void) printf(gettext(" 19 Log device removal\n"));
9263 (void) printf(gettext(" 20 Compression using zle "
9264 "(zero-length encoding)\n"));
9265 (void) printf(gettext(" 21 Deduplication\n"));
9266 (void) printf(gettext(" 22 Received properties\n"));
9267 (void) printf(gettext(" 23 Slim ZIL\n"));
9268 (void) printf(gettext(" 24 System attributes\n"));
9269 (void) printf(gettext(" 25 Improved scrub stats\n"));
9270 (void) printf(gettext(" 26 Improved snapshot deletion "
9271 "performance\n"));
9272 (void) printf(gettext(" 27 Improved snapshot creation "
9273 "performance\n"));
9274 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
9275 (void) printf(gettext("\nFor more information on a particular "
9276 "version, including supported releases,\n"));
9277 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9278 } else if (argc == 0 && upgradeall) {
9279 cb.cb_first = B_TRUE;
9280 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9281 if (ret == 0 && cb.cb_first) {
9282 if (cb.cb_version == SPA_VERSION) {
9283 (void) printf(gettext("All pools are already "
9284 "formatted using feature flags.\n\n"));
9285 (void) printf(gettext("Every feature flags "
9286 "pool already has all supported and "
9287 "requested features enabled.\n"));
9288 } else {
9289 (void) printf(gettext("All pools are already "
9290 "formatted with version %llu or higher.\n"),
9291 (u_longlong_t)cb.cb_version);
9292 }
9293 }
9294 } else if (argc == 0) {
9295 cb.cb_first = B_TRUE;
9296 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9297 assert(ret == 0);
9298
9299 if (cb.cb_first) {
9300 (void) printf(gettext("All pools are formatted "
9301 "using feature flags.\n\n"));
9302 } else {
9303 (void) printf(gettext("\nUse 'zpool upgrade -v' "
9304 "for a list of available legacy versions.\n"));
9305 }
9306
9307 cb.cb_first = B_TRUE;
9308 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9309 assert(ret == 0);
9310
9311 if (cb.cb_first) {
9312 (void) printf(gettext("Every feature flags pool has "
9313 "all supported and requested features enabled.\n"));
9314 } else {
9315 (void) printf(gettext("\n"));
9316 }
9317 } else {
9318 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9319 B_FALSE, upgrade_one, &cb);
9320 }
9321
9322 return (ret);
9323 }
9324
9325 typedef struct hist_cbdata {
9326 boolean_t first;
9327 boolean_t longfmt;
9328 boolean_t internal;
9329 } hist_cbdata_t;
9330
9331 static void
9332 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9333 {
9334 nvlist_t **records;
9335 uint_t numrecords;
9336 int i;
9337
9338 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9339 &records, &numrecords) == 0);
9340 for (i = 0; i < numrecords; i++) {
9341 nvlist_t *rec = records[i];
9342 char tbuf[64] = "";
9343
9344 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9345 time_t tsec;
9346 struct tm t;
9347
9348 tsec = fnvlist_lookup_uint64(records[i],
9349 ZPOOL_HIST_TIME);
9350 (void) localtime_r(&tsec, &t);
9351 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9352 }
9353
9354 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9355 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9356 ZPOOL_HIST_ELAPSED_NS);
9357 (void) snprintf(tbuf + strlen(tbuf),
9358 sizeof (tbuf) - strlen(tbuf),
9359 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9360 }
9361
9362 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9363 (void) printf("%s %s", tbuf,
9364 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9365 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9366 int ievent =
9367 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9368 if (!cb->internal)
9369 continue;
9370 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9371 (void) printf("%s unrecognized record:\n",
9372 tbuf);
9373 dump_nvlist(rec, 4);
9374 continue;
9375 }
9376 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
9377 zfs_history_event_names[ievent],
9378 (longlong_t)fnvlist_lookup_uint64(
9379 rec, ZPOOL_HIST_TXG),
9380 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9381 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9382 if (!cb->internal)
9383 continue;
9384 (void) printf("%s [txg:%lld] %s", tbuf,
9385 (longlong_t)fnvlist_lookup_uint64(
9386 rec, ZPOOL_HIST_TXG),
9387 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9388 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9389 (void) printf(" %s (%llu)",
9390 fnvlist_lookup_string(rec,
9391 ZPOOL_HIST_DSNAME),
9392 (u_longlong_t)fnvlist_lookup_uint64(rec,
9393 ZPOOL_HIST_DSID));
9394 }
9395 (void) printf(" %s", fnvlist_lookup_string(rec,
9396 ZPOOL_HIST_INT_STR));
9397 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9398 if (!cb->internal)
9399 continue;
9400 (void) printf("%s ioctl %s\n", tbuf,
9401 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9402 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9403 (void) printf(" input:\n");
9404 dump_nvlist(fnvlist_lookup_nvlist(rec,
9405 ZPOOL_HIST_INPUT_NVL), 8);
9406 }
9407 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
9408 (void) printf(" output:\n");
9409 dump_nvlist(fnvlist_lookup_nvlist(rec,
9410 ZPOOL_HIST_OUTPUT_NVL), 8);
9411 }
9412 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
9413 (void) printf(" output nvlist omitted; "
9414 "original size: %lldKB\n",
9415 (longlong_t)fnvlist_lookup_int64(rec,
9416 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
9417 }
9418 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
9419 (void) printf(" errno: %lld\n",
9420 (longlong_t)fnvlist_lookup_int64(rec,
9421 ZPOOL_HIST_ERRNO));
9422 }
9423 } else {
9424 if (!cb->internal)
9425 continue;
9426 (void) printf("%s unrecognized record:\n", tbuf);
9427 dump_nvlist(rec, 4);
9428 }
9429
9430 if (!cb->longfmt) {
9431 (void) printf("\n");
9432 continue;
9433 }
9434 (void) printf(" [");
9435 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
9436 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
9437 struct passwd *pwd = getpwuid(who);
9438 (void) printf("user %d ", (int)who);
9439 if (pwd != NULL)
9440 (void) printf("(%s) ", pwd->pw_name);
9441 }
9442 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
9443 (void) printf("on %s",
9444 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
9445 }
9446 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
9447 (void) printf(":%s",
9448 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
9449 }
9450
9451 (void) printf("]");
9452 (void) printf("\n");
9453 }
9454 }
9455
9456 /*
9457 * Print out the command history for a specific pool.
9458 */
9459 static int
9460 get_history_one(zpool_handle_t *zhp, void *data)
9461 {
9462 nvlist_t *nvhis;
9463 int ret;
9464 hist_cbdata_t *cb = (hist_cbdata_t *)data;
9465 uint64_t off = 0;
9466 boolean_t eof = B_FALSE;
9467
9468 cb->first = B_FALSE;
9469
9470 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
9471
9472 while (!eof) {
9473 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
9474 return (ret);
9475
9476 print_history_records(nvhis, cb);
9477 nvlist_free(nvhis);
9478 }
9479 (void) printf("\n");
9480
9481 return (ret);
9482 }
9483
9484 /*
9485 * zpool history <pool>
9486 *
9487 * Displays the history of commands that modified pools.
9488 */
9489 int
9490 zpool_do_history(int argc, char **argv)
9491 {
9492 hist_cbdata_t cbdata = { 0 };
9493 int ret;
9494 int c;
9495
9496 cbdata.first = B_TRUE;
9497 /* check options */
9498 while ((c = getopt(argc, argv, "li")) != -1) {
9499 switch (c) {
9500 case 'l':
9501 cbdata.longfmt = B_TRUE;
9502 break;
9503 case 'i':
9504 cbdata.internal = B_TRUE;
9505 break;
9506 case '?':
9507 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9508 optopt);
9509 usage(B_FALSE);
9510 }
9511 }
9512 argc -= optind;
9513 argv += optind;
9514
9515 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9516 B_FALSE, get_history_one, &cbdata);
9517
9518 if (argc == 0 && cbdata.first == B_TRUE) {
9519 (void) fprintf(stderr, gettext("no pools available\n"));
9520 return (0);
9521 }
9522
9523 return (ret);
9524 }
9525
9526 typedef struct ev_opts {
9527 int verbose;
9528 int scripted;
9529 int follow;
9530 int clear;
9531 char poolname[ZFS_MAX_DATASET_NAME_LEN];
9532 } ev_opts_t;
9533
9534 static void
9535 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
9536 {
9537 char ctime_str[26], str[32], *ptr;
9538 int64_t *tv;
9539 uint_t n;
9540
9541 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
9542 memset(str, ' ', 32);
9543 (void) ctime_r((const time_t *)&tv[0], ctime_str);
9544 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
9545 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
9546 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
9547 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
9548 if (opts->scripted)
9549 (void) printf(gettext("%s\t"), str);
9550 else
9551 (void) printf(gettext("%s "), str);
9552
9553 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
9554 (void) printf(gettext("%s\n"), ptr);
9555 }
9556
9557 static void
9558 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
9559 {
9560 nvpair_t *nvp;
9561
9562 for (nvp = nvlist_next_nvpair(nvl, NULL);
9563 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
9564
9565 data_type_t type = nvpair_type(nvp);
9566 const char *name = nvpair_name(nvp);
9567
9568 boolean_t b;
9569 uint8_t i8;
9570 uint16_t i16;
9571 uint32_t i32;
9572 uint64_t i64;
9573 char *str;
9574 nvlist_t *cnv;
9575
9576 printf(gettext("%*s%s = "), depth, "", name);
9577
9578 switch (type) {
9579 case DATA_TYPE_BOOLEAN:
9580 printf(gettext("%s"), "1");
9581 break;
9582
9583 case DATA_TYPE_BOOLEAN_VALUE:
9584 (void) nvpair_value_boolean_value(nvp, &b);
9585 printf(gettext("%s"), b ? "1" : "0");
9586 break;
9587
9588 case DATA_TYPE_BYTE:
9589 (void) nvpair_value_byte(nvp, &i8);
9590 printf(gettext("0x%x"), i8);
9591 break;
9592
9593 case DATA_TYPE_INT8:
9594 (void) nvpair_value_int8(nvp, (void *)&i8);
9595 printf(gettext("0x%x"), i8);
9596 break;
9597
9598 case DATA_TYPE_UINT8:
9599 (void) nvpair_value_uint8(nvp, &i8);
9600 printf(gettext("0x%x"), i8);
9601 break;
9602
9603 case DATA_TYPE_INT16:
9604 (void) nvpair_value_int16(nvp, (void *)&i16);
9605 printf(gettext("0x%x"), i16);
9606 break;
9607
9608 case DATA_TYPE_UINT16:
9609 (void) nvpair_value_uint16(nvp, &i16);
9610 printf(gettext("0x%x"), i16);
9611 break;
9612
9613 case DATA_TYPE_INT32:
9614 (void) nvpair_value_int32(nvp, (void *)&i32);
9615 printf(gettext("0x%x"), i32);
9616 break;
9617
9618 case DATA_TYPE_UINT32:
9619 (void) nvpair_value_uint32(nvp, &i32);
9620 printf(gettext("0x%x"), i32);
9621 break;
9622
9623 case DATA_TYPE_INT64:
9624 (void) nvpair_value_int64(nvp, (void *)&i64);
9625 printf(gettext("0x%llx"), (u_longlong_t)i64);
9626 break;
9627
9628 case DATA_TYPE_UINT64:
9629 (void) nvpair_value_uint64(nvp, &i64);
9630 /*
9631 * translate vdev state values to readable
9632 * strings to aide zpool events consumers
9633 */
9634 if (strcmp(name,
9635 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
9636 strcmp(name,
9637 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
9638 printf(gettext("\"%s\" (0x%llx)"),
9639 zpool_state_to_name(i64, VDEV_AUX_NONE),
9640 (u_longlong_t)i64);
9641 } else {
9642 printf(gettext("0x%llx"), (u_longlong_t)i64);
9643 }
9644 break;
9645
9646 case DATA_TYPE_HRTIME:
9647 (void) nvpair_value_hrtime(nvp, (void *)&i64);
9648 printf(gettext("0x%llx"), (u_longlong_t)i64);
9649 break;
9650
9651 case DATA_TYPE_STRING:
9652 (void) nvpair_value_string(nvp, &str);
9653 printf(gettext("\"%s\""), str ? str : "<NULL>");
9654 break;
9655
9656 case DATA_TYPE_NVLIST:
9657 printf(gettext("(embedded nvlist)\n"));
9658 (void) nvpair_value_nvlist(nvp, &cnv);
9659 zpool_do_events_nvprint(cnv, depth + 8);
9660 printf(gettext("%*s(end %s)"), depth, "", name);
9661 break;
9662
9663 case DATA_TYPE_NVLIST_ARRAY: {
9664 nvlist_t **val;
9665 uint_t i, nelem;
9666
9667 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
9668 printf(gettext("(%d embedded nvlists)\n"), nelem);
9669 for (i = 0; i < nelem; i++) {
9670 printf(gettext("%*s%s[%d] = %s\n"),
9671 depth, "", name, i, "(embedded nvlist)");
9672 zpool_do_events_nvprint(val[i], depth + 8);
9673 printf(gettext("%*s(end %s[%i])\n"),
9674 depth, "", name, i);
9675 }
9676 printf(gettext("%*s(end %s)\n"), depth, "", name);
9677 }
9678 break;
9679
9680 case DATA_TYPE_INT8_ARRAY: {
9681 int8_t *val;
9682 uint_t i, nelem;
9683
9684 (void) nvpair_value_int8_array(nvp, &val, &nelem);
9685 for (i = 0; i < nelem; i++)
9686 printf(gettext("0x%x "), val[i]);
9687
9688 break;
9689 }
9690
9691 case DATA_TYPE_UINT8_ARRAY: {
9692 uint8_t *val;
9693 uint_t i, nelem;
9694
9695 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
9696 for (i = 0; i < nelem; i++)
9697 printf(gettext("0x%x "), val[i]);
9698
9699 break;
9700 }
9701
9702 case DATA_TYPE_INT16_ARRAY: {
9703 int16_t *val;
9704 uint_t i, nelem;
9705
9706 (void) nvpair_value_int16_array(nvp, &val, &nelem);
9707 for (i = 0; i < nelem; i++)
9708 printf(gettext("0x%x "), val[i]);
9709
9710 break;
9711 }
9712
9713 case DATA_TYPE_UINT16_ARRAY: {
9714 uint16_t *val;
9715 uint_t i, nelem;
9716
9717 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
9718 for (i = 0; i < nelem; i++)
9719 printf(gettext("0x%x "), val[i]);
9720
9721 break;
9722 }
9723
9724 case DATA_TYPE_INT32_ARRAY: {
9725 int32_t *val;
9726 uint_t i, nelem;
9727
9728 (void) nvpair_value_int32_array(nvp, &val, &nelem);
9729 for (i = 0; i < nelem; i++)
9730 printf(gettext("0x%x "), val[i]);
9731
9732 break;
9733 }
9734
9735 case DATA_TYPE_UINT32_ARRAY: {
9736 uint32_t *val;
9737 uint_t i, nelem;
9738
9739 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
9740 for (i = 0; i < nelem; i++)
9741 printf(gettext("0x%x "), val[i]);
9742
9743 break;
9744 }
9745
9746 case DATA_TYPE_INT64_ARRAY: {
9747 int64_t *val;
9748 uint_t i, nelem;
9749
9750 (void) nvpair_value_int64_array(nvp, &val, &nelem);
9751 for (i = 0; i < nelem; i++)
9752 printf(gettext("0x%llx "),
9753 (u_longlong_t)val[i]);
9754
9755 break;
9756 }
9757
9758 case DATA_TYPE_UINT64_ARRAY: {
9759 uint64_t *val;
9760 uint_t i, nelem;
9761
9762 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
9763 for (i = 0; i < nelem; i++)
9764 printf(gettext("0x%llx "),
9765 (u_longlong_t)val[i]);
9766
9767 break;
9768 }
9769
9770 case DATA_TYPE_STRING_ARRAY: {
9771 char **str;
9772 uint_t i, nelem;
9773
9774 (void) nvpair_value_string_array(nvp, &str, &nelem);
9775 for (i = 0; i < nelem; i++)
9776 printf(gettext("\"%s\" "),
9777 str[i] ? str[i] : "<NULL>");
9778
9779 break;
9780 }
9781
9782 case DATA_TYPE_BOOLEAN_ARRAY:
9783 case DATA_TYPE_BYTE_ARRAY:
9784 case DATA_TYPE_DOUBLE:
9785 case DATA_TYPE_DONTCARE:
9786 case DATA_TYPE_UNKNOWN:
9787 printf(gettext("<unknown>"));
9788 break;
9789 }
9790
9791 printf(gettext("\n"));
9792 }
9793 }
9794
9795 static int
9796 zpool_do_events_next(ev_opts_t *opts)
9797 {
9798 nvlist_t *nvl;
9799 int zevent_fd, ret, dropped;
9800 char *pool;
9801
9802 zevent_fd = open(ZFS_DEV, O_RDWR);
9803 VERIFY(zevent_fd >= 0);
9804
9805 if (!opts->scripted)
9806 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
9807
9808 while (1) {
9809 ret = zpool_events_next(g_zfs, &nvl, &dropped,
9810 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
9811 if (ret || nvl == NULL)
9812 break;
9813
9814 if (dropped > 0)
9815 (void) printf(gettext("dropped %d events\n"), dropped);
9816
9817 if (strlen(opts->poolname) > 0 &&
9818 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
9819 strcmp(opts->poolname, pool) != 0)
9820 continue;
9821
9822 zpool_do_events_short(nvl, opts);
9823
9824 if (opts->verbose) {
9825 zpool_do_events_nvprint(nvl, 8);
9826 printf(gettext("\n"));
9827 }
9828 (void) fflush(stdout);
9829
9830 nvlist_free(nvl);
9831 }
9832
9833 VERIFY(0 == close(zevent_fd));
9834
9835 return (ret);
9836 }
9837
9838 static int
9839 zpool_do_events_clear(void)
9840 {
9841 int count, ret;
9842
9843 ret = zpool_events_clear(g_zfs, &count);
9844 if (!ret)
9845 (void) printf(gettext("cleared %d events\n"), count);
9846
9847 return (ret);
9848 }
9849
9850 /*
9851 * zpool events [-vHf [pool] | -c]
9852 *
9853 * Displays events logs by ZFS.
9854 */
9855 int
9856 zpool_do_events(int argc, char **argv)
9857 {
9858 ev_opts_t opts = { 0 };
9859 int ret;
9860 int c;
9861
9862 /* check options */
9863 while ((c = getopt(argc, argv, "vHfc")) != -1) {
9864 switch (c) {
9865 case 'v':
9866 opts.verbose = 1;
9867 break;
9868 case 'H':
9869 opts.scripted = 1;
9870 break;
9871 case 'f':
9872 opts.follow = 1;
9873 break;
9874 case 'c':
9875 opts.clear = 1;
9876 break;
9877 case '?':
9878 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9879 optopt);
9880 usage(B_FALSE);
9881 }
9882 }
9883 argc -= optind;
9884 argv += optind;
9885
9886 if (argc > 1) {
9887 (void) fprintf(stderr, gettext("too many arguments\n"));
9888 usage(B_FALSE);
9889 } else if (argc == 1) {
9890 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
9891 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
9892 (void) fprintf(stderr,
9893 gettext("invalid pool name '%s'\n"), opts.poolname);
9894 usage(B_FALSE);
9895 }
9896 }
9897
9898 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
9899 opts.clear) {
9900 (void) fprintf(stderr,
9901 gettext("invalid options combined with -c\n"));
9902 usage(B_FALSE);
9903 }
9904
9905 if (opts.clear)
9906 ret = zpool_do_events_clear();
9907 else
9908 ret = zpool_do_events_next(&opts);
9909
9910 return (ret);
9911 }
9912
9913 static int
9914 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
9915 {
9916 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9917 char value[ZFS_MAXPROPLEN];
9918 zprop_source_t srctype;
9919
9920 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
9921 pl = pl->pl_next) {
9922 char *prop_name;
9923 /*
9924 * If the first property is pool name, it is a special
9925 * placeholder that we can skip. This will also skip
9926 * over the name property when 'all' is specified.
9927 */
9928 if (pl->pl_prop == ZPOOL_PROP_NAME &&
9929 pl == cbp->cb_proplist)
9930 continue;
9931
9932 if (pl->pl_prop == ZPROP_INVAL) {
9933 prop_name = pl->pl_user_prop;
9934 } else {
9935 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
9936 }
9937 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
9938 prop_name, value, sizeof (value), &srctype,
9939 cbp->cb_literal) == 0) {
9940 zprop_print_one_property(vdevname, cbp, prop_name,
9941 value, srctype, NULL, NULL);
9942 }
9943 }
9944
9945 return (0);
9946 }
9947
9948 static int
9949 get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data)
9950 {
9951 zpool_handle_t *zhp = zhp_data;
9952 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9953 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9954 cbp->cb_vdevs.cb_name_flags);
9955 int ret;
9956
9957 /* Adjust the column widths for the vdev properties */
9958 ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
9959
9960 return (ret);
9961 }
9962
9963 static int
9964 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
9965 {
9966 zpool_handle_t *zhp = zhp_data;
9967 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9968 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9969 cbp->cb_vdevs.cb_name_flags);
9970 int ret;
9971
9972 /* Display the properties */
9973 ret = get_callback_vdev(zhp, vdevname, data);
9974
9975 return (ret);
9976 }
9977
9978 static int
9979 get_callback(zpool_handle_t *zhp, void *data)
9980 {
9981 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9982 char value[MAXNAMELEN];
9983 zprop_source_t srctype;
9984 zprop_list_t *pl;
9985 int vid;
9986
9987 if (cbp->cb_type == ZFS_TYPE_VDEV) {
9988 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
9989 for_each_vdev(zhp, get_callback_vdev_width_cb, data);
9990 for_each_vdev(zhp, get_callback_vdev_cb, data);
9991 } else {
9992 /* Adjust column widths for vdev properties */
9993 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
9994 vid++) {
9995 vdev_expand_proplist(zhp,
9996 cbp->cb_vdevs.cb_names[vid],
9997 &cbp->cb_proplist);
9998 }
9999 /* Display the properties */
10000 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10001 vid++) {
10002 get_callback_vdev(zhp,
10003 cbp->cb_vdevs.cb_names[vid], data);
10004 }
10005 }
10006 } else {
10007 assert(cbp->cb_type == ZFS_TYPE_POOL);
10008 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
10009 /*
10010 * Skip the special fake placeholder. This will also
10011 * skip over the name property when 'all' is specified.
10012 */
10013 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10014 pl == cbp->cb_proplist)
10015 continue;
10016
10017 if (pl->pl_prop == ZPROP_INVAL &&
10018 (zpool_prop_feature(pl->pl_user_prop) ||
10019 zpool_prop_unsupported(pl->pl_user_prop))) {
10020 srctype = ZPROP_SRC_LOCAL;
10021
10022 if (zpool_prop_get_feature(zhp,
10023 pl->pl_user_prop, value,
10024 sizeof (value)) == 0) {
10025 zprop_print_one_property(
10026 zpool_get_name(zhp), cbp,
10027 pl->pl_user_prop, value, srctype,
10028 NULL, NULL);
10029 }
10030 } else {
10031 if (zpool_get_prop(zhp, pl->pl_prop, value,
10032 sizeof (value), &srctype,
10033 cbp->cb_literal) != 0)
10034 continue;
10035
10036 zprop_print_one_property(zpool_get_name(zhp),
10037 cbp, zpool_prop_to_name(pl->pl_prop),
10038 value, srctype, NULL, NULL);
10039 }
10040 }
10041 }
10042
10043 return (0);
10044 }
10045
10046 /*
10047 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10048 *
10049 * -H Scripted mode. Don't display headers, and separate properties
10050 * by a single tab.
10051 * -o List of columns to display. Defaults to
10052 * "name,property,value,source".
10053 * -p Display values in parsable (exact) format.
10054 *
10055 * Get properties of pools in the system. Output space statistics
10056 * for each one as well as other attributes.
10057 */
10058 int
10059 zpool_do_get(int argc, char **argv)
10060 {
10061 zprop_get_cbdata_t cb = { 0 };
10062 zprop_list_t fake_name = { 0 };
10063 int ret;
10064 int c, i;
10065 char *propstr = NULL;
10066
10067 cb.cb_first = B_TRUE;
10068
10069 /*
10070 * Set up default columns and sources.
10071 */
10072 cb.cb_sources = ZPROP_SRC_ALL;
10073 cb.cb_columns[0] = GET_COL_NAME;
10074 cb.cb_columns[1] = GET_COL_PROPERTY;
10075 cb.cb_columns[2] = GET_COL_VALUE;
10076 cb.cb_columns[3] = GET_COL_SOURCE;
10077 cb.cb_type = ZFS_TYPE_POOL;
10078 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10079 current_prop_type = cb.cb_type;
10080
10081 /* check options */
10082 while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10083 switch (c) {
10084 case 'p':
10085 cb.cb_literal = B_TRUE;
10086 break;
10087 case 'H':
10088 cb.cb_scripted = B_TRUE;
10089 break;
10090 case 'o':
10091 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
10092 i = 0;
10093
10094 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10095 static const char *const col_opts[] =
10096 { "name", "property", "value", "source",
10097 "all" };
10098 static const zfs_get_column_t col_cols[] =
10099 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
10100 GET_COL_SOURCE };
10101
10102 if (i == ZFS_GET_NCOLS - 1) {
10103 (void) fprintf(stderr, gettext("too "
10104 "many fields given to -o "
10105 "option\n"));
10106 usage(B_FALSE);
10107 }
10108
10109 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
10110 if (strcmp(tok, col_opts[c]) == 0)
10111 goto found;
10112
10113 (void) fprintf(stderr,
10114 gettext("invalid column name '%s'\n"), tok);
10115 usage(B_FALSE);
10116
10117 found:
10118 if (c >= 4) {
10119 if (i > 0) {
10120 (void) fprintf(stderr,
10121 gettext("\"all\" conflicts "
10122 "with specific fields "
10123 "given to -o option\n"));
10124 usage(B_FALSE);
10125 }
10126
10127 memcpy(cb.cb_columns, col_cols,
10128 sizeof (col_cols));
10129 i = ZFS_GET_NCOLS - 1;
10130 } else
10131 cb.cb_columns[i++] = col_cols[c];
10132 }
10133 break;
10134 case '?':
10135 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10136 optopt);
10137 usage(B_FALSE);
10138 }
10139 }
10140
10141 argc -= optind;
10142 argv += optind;
10143
10144 if (argc < 1) {
10145 (void) fprintf(stderr, gettext("missing property "
10146 "argument\n"));
10147 usage(B_FALSE);
10148 }
10149
10150 /* Properties list is needed later by zprop_get_list() */
10151 propstr = argv[0];
10152
10153 argc--;
10154 argv++;
10155
10156 if (argc == 0) {
10157 /* No args, so just print the defaults. */
10158 } else if (are_all_pools(argc, argv)) {
10159 /* All the args are pool names */
10160 } else if (are_all_pools(1, argv)) {
10161 /* The first arg is a pool name */
10162 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10163 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10164 &cb.cb_vdevs)) {
10165 /* ... and the rest are vdev names */
10166 cb.cb_vdevs.cb_names = argv + 1;
10167 cb.cb_vdevs.cb_names_count = argc - 1;
10168 cb.cb_type = ZFS_TYPE_VDEV;
10169 argc = 1; /* One pool to process */
10170 } else {
10171 fprintf(stderr, gettext("Expected a list of vdevs in"
10172 " \"%s\", but got:\n"), argv[0]);
10173 error_list_unresolved_vdevs(argc - 1, argv + 1,
10174 argv[0], &cb.cb_vdevs);
10175 fprintf(stderr, "\n");
10176 usage(B_FALSE);
10177 return (1);
10178 }
10179 } else {
10180 /*
10181 * The first arg isn't a pool name,
10182 */
10183 fprintf(stderr, gettext("missing pool name.\n"));
10184 fprintf(stderr, "\n");
10185 usage(B_FALSE);
10186 return (1);
10187 }
10188
10189 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10190 cb.cb_type) != 0) {
10191 /* Use correct list of valid properties (pool or vdev) */
10192 current_prop_type = cb.cb_type;
10193 usage(B_FALSE);
10194 }
10195
10196 if (cb.cb_proplist != NULL) {
10197 fake_name.pl_prop = ZPOOL_PROP_NAME;
10198 fake_name.pl_width = strlen(gettext("NAME"));
10199 fake_name.pl_next = cb.cb_proplist;
10200 cb.cb_proplist = &fake_name;
10201 }
10202
10203 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10204 cb.cb_literal, get_callback, &cb);
10205
10206 if (cb.cb_proplist == &fake_name)
10207 zprop_free_list(fake_name.pl_next);
10208 else
10209 zprop_free_list(cb.cb_proplist);
10210
10211 return (ret);
10212 }
10213
10214 typedef struct set_cbdata {
10215 char *cb_propname;
10216 char *cb_value;
10217 zfs_type_t cb_type;
10218 vdev_cbdata_t cb_vdevs;
10219 boolean_t cb_any_successful;
10220 } set_cbdata_t;
10221
10222 static int
10223 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10224 {
10225 int error;
10226
10227 /* Check if we have out-of-bounds features */
10228 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10229 boolean_t features[SPA_FEATURES];
10230 if (zpool_do_load_compat(cb->cb_value, features) !=
10231 ZPOOL_COMPATIBILITY_OK)
10232 return (-1);
10233
10234 nvlist_t *enabled = zpool_get_features(zhp);
10235 spa_feature_t i;
10236 for (i = 0; i < SPA_FEATURES; i++) {
10237 const char *fguid = spa_feature_table[i].fi_guid;
10238 if (nvlist_exists(enabled, fguid) && !features[i])
10239 break;
10240 }
10241 if (i < SPA_FEATURES)
10242 (void) fprintf(stderr, gettext("Warning: one or "
10243 "more features already enabled on pool '%s'\n"
10244 "are not present in this compatibility set.\n"),
10245 zpool_get_name(zhp));
10246 }
10247
10248 /* if we're setting a feature, check it's in compatibility set */
10249 if (zpool_prop_feature(cb->cb_propname) &&
10250 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10251 char *fname = strchr(cb->cb_propname, '@') + 1;
10252 spa_feature_t f;
10253
10254 if (zfeature_lookup_name(fname, &f) == 0) {
10255 char compat[ZFS_MAXPROPLEN];
10256 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10257 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10258 compat[0] = '\0';
10259
10260 boolean_t features[SPA_FEATURES];
10261 if (zpool_do_load_compat(compat, features) !=
10262 ZPOOL_COMPATIBILITY_OK) {
10263 (void) fprintf(stderr, gettext("Error: "
10264 "cannot enable feature '%s' on pool '%s'\n"
10265 "because the pool's 'compatibility' "
10266 "property cannot be parsed.\n"),
10267 fname, zpool_get_name(zhp));
10268 return (-1);
10269 }
10270
10271 if (!features[f]) {
10272 (void) fprintf(stderr, gettext("Error: "
10273 "cannot enable feature '%s' on pool '%s'\n"
10274 "as it is not specified in this pool's "
10275 "current compatibility set.\n"
10276 "Consider setting 'compatibility' to a "
10277 "less restrictive set, or to 'off'.\n"),
10278 fname, zpool_get_name(zhp));
10279 return (-1);
10280 }
10281 }
10282 }
10283
10284 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10285
10286 return (error);
10287 }
10288
10289 static int
10290 set_callback(zpool_handle_t *zhp, void *data)
10291 {
10292 int error;
10293 set_cbdata_t *cb = (set_cbdata_t *)data;
10294
10295 if (cb->cb_type == ZFS_TYPE_VDEV) {
10296 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10297 cb->cb_propname, cb->cb_value);
10298 } else {
10299 assert(cb->cb_type == ZFS_TYPE_POOL);
10300 error = set_pool_callback(zhp, cb);
10301 }
10302
10303 cb->cb_any_successful = !error;
10304 return (error);
10305 }
10306
10307 int
10308 zpool_do_set(int argc, char **argv)
10309 {
10310 set_cbdata_t cb = { 0 };
10311 int error;
10312
10313 current_prop_type = ZFS_TYPE_POOL;
10314 if (argc > 1 && argv[1][0] == '-') {
10315 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10316 argv[1][1]);
10317 usage(B_FALSE);
10318 }
10319
10320 if (argc < 2) {
10321 (void) fprintf(stderr, gettext("missing property=value "
10322 "argument\n"));
10323 usage(B_FALSE);
10324 }
10325
10326 if (argc < 3) {
10327 (void) fprintf(stderr, gettext("missing pool name\n"));
10328 usage(B_FALSE);
10329 }
10330
10331 if (argc > 4) {
10332 (void) fprintf(stderr, gettext("too many pool names\n"));
10333 usage(B_FALSE);
10334 }
10335
10336 cb.cb_propname = argv[1];
10337 cb.cb_type = ZFS_TYPE_POOL;
10338 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10339 cb.cb_value = strchr(cb.cb_propname, '=');
10340 if (cb.cb_value == NULL) {
10341 (void) fprintf(stderr, gettext("missing value in "
10342 "property=value argument\n"));
10343 usage(B_FALSE);
10344 }
10345
10346 *(cb.cb_value) = '\0';
10347 cb.cb_value++;
10348 argc -= 2;
10349 argv += 2;
10350
10351 if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
10352 /* Argument is a vdev */
10353 cb.cb_vdevs.cb_names = argv;
10354 cb.cb_vdevs.cb_names_count = 1;
10355 cb.cb_type = ZFS_TYPE_VDEV;
10356 argc = 0; /* No pools to process */
10357 } else if (are_all_pools(1, argv)) {
10358 /* The first arg is a pool name */
10359 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10360 &cb.cb_vdevs)) {
10361 /* 2nd argument is a vdev */
10362 cb.cb_vdevs.cb_names = argv + 1;
10363 cb.cb_vdevs.cb_names_count = 1;
10364 cb.cb_type = ZFS_TYPE_VDEV;
10365 argc = 1; /* One pool to process */
10366 } else if (argc > 1) {
10367 (void) fprintf(stderr,
10368 gettext("too many pool names\n"));
10369 usage(B_FALSE);
10370 }
10371 }
10372
10373 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10374 B_FALSE, set_callback, &cb);
10375
10376 return (error);
10377 }
10378
10379 /* Add up the total number of bytes left to initialize/trim across all vdevs */
10380 static uint64_t
10381 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
10382 {
10383 uint64_t bytes_remaining;
10384 nvlist_t **child;
10385 uint_t c, children;
10386 vdev_stat_t *vs;
10387
10388 assert(activity == ZPOOL_WAIT_INITIALIZE ||
10389 activity == ZPOOL_WAIT_TRIM);
10390
10391 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
10392 (uint64_t **)&vs, &c) == 0);
10393
10394 if (activity == ZPOOL_WAIT_INITIALIZE &&
10395 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
10396 bytes_remaining = vs->vs_initialize_bytes_est -
10397 vs->vs_initialize_bytes_done;
10398 else if (activity == ZPOOL_WAIT_TRIM &&
10399 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
10400 bytes_remaining = vs->vs_trim_bytes_est -
10401 vs->vs_trim_bytes_done;
10402 else
10403 bytes_remaining = 0;
10404
10405 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10406 &child, &children) != 0)
10407 children = 0;
10408
10409 for (c = 0; c < children; c++)
10410 bytes_remaining += vdev_activity_remaining(child[c], activity);
10411
10412 return (bytes_remaining);
10413 }
10414
10415 /* Add up the total number of bytes left to rebuild across top-level vdevs */
10416 static uint64_t
10417 vdev_activity_top_remaining(nvlist_t *nv)
10418 {
10419 uint64_t bytes_remaining = 0;
10420 nvlist_t **child;
10421 uint_t children;
10422 int error;
10423
10424 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10425 &child, &children) != 0)
10426 children = 0;
10427
10428 for (uint_t c = 0; c < children; c++) {
10429 vdev_rebuild_stat_t *vrs;
10430 uint_t i;
10431
10432 error = nvlist_lookup_uint64_array(child[c],
10433 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
10434 if (error == 0) {
10435 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
10436 bytes_remaining += (vrs->vrs_bytes_est -
10437 vrs->vrs_bytes_rebuilt);
10438 }
10439 }
10440 }
10441
10442 return (bytes_remaining);
10443 }
10444
10445 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
10446 static boolean_t
10447 vdev_any_spare_replacing(nvlist_t *nv)
10448 {
10449 nvlist_t **child;
10450 uint_t c, children;
10451 char *vdev_type;
10452
10453 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
10454
10455 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
10456 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
10457 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
10458 return (B_TRUE);
10459 }
10460
10461 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10462 &child, &children) != 0)
10463 children = 0;
10464
10465 for (c = 0; c < children; c++) {
10466 if (vdev_any_spare_replacing(child[c]))
10467 return (B_TRUE);
10468 }
10469
10470 return (B_FALSE);
10471 }
10472
10473 typedef struct wait_data {
10474 char *wd_poolname;
10475 boolean_t wd_scripted;
10476 boolean_t wd_exact;
10477 boolean_t wd_headers_once;
10478 boolean_t wd_should_exit;
10479 /* Which activities to wait for */
10480 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
10481 float wd_interval;
10482 pthread_cond_t wd_cv;
10483 pthread_mutex_t wd_mutex;
10484 } wait_data_t;
10485
10486 /*
10487 * Print to stdout a single line, containing one column for each activity that
10488 * we are waiting for specifying how many bytes of work are left for that
10489 * activity.
10490 */
10491 static void
10492 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
10493 {
10494 nvlist_t *config, *nvroot;
10495 uint_t c;
10496 int i;
10497 pool_checkpoint_stat_t *pcs = NULL;
10498 pool_scan_stat_t *pss = NULL;
10499 pool_removal_stat_t *prs = NULL;
10500 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
10501 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
10502 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
10503
10504 /* Calculate the width of each column */
10505 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10506 /*
10507 * Make sure we have enough space in the col for pretty-printed
10508 * numbers and for the column header, and then leave a couple
10509 * spaces between cols for readability.
10510 */
10511 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
10512 }
10513
10514 /* Print header if appropriate */
10515 int term_height = terminal_height();
10516 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
10517 row % (term_height-1) == 0);
10518 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
10519 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10520 if (wd->wd_enabled[i])
10521 (void) printf("%*s", col_widths[i], headers[i]);
10522 }
10523 (void) fputc('\n', stdout);
10524 }
10525
10526 /* Bytes of work remaining in each activity */
10527 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
10528
10529 bytes_rem[ZPOOL_WAIT_FREE] =
10530 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
10531
10532 config = zpool_get_config(zhp, NULL);
10533 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10534
10535 (void) nvlist_lookup_uint64_array(nvroot,
10536 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10537 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
10538 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
10539
10540 (void) nvlist_lookup_uint64_array(nvroot,
10541 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10542 if (prs != NULL && prs->prs_state == DSS_SCANNING)
10543 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
10544 prs->prs_copied;
10545
10546 (void) nvlist_lookup_uint64_array(nvroot,
10547 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
10548 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
10549 pss->pss_pass_scrub_pause == 0) {
10550 int64_t rem = pss->pss_to_examine - pss->pss_issued;
10551 if (pss->pss_func == POOL_SCAN_SCRUB)
10552 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
10553 else
10554 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
10555 } else if (check_rebuilding(nvroot, NULL)) {
10556 bytes_rem[ZPOOL_WAIT_RESILVER] =
10557 vdev_activity_top_remaining(nvroot);
10558 }
10559
10560 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
10561 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
10562 bytes_rem[ZPOOL_WAIT_TRIM] =
10563 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
10564
10565 /*
10566 * A replace finishes after resilvering finishes, so the amount of work
10567 * left for a replace is the same as for resilvering.
10568 *
10569 * It isn't quite correct to say that if we have any 'spare' or
10570 * 'replacing' vdevs and a resilver is happening, then a replace is in
10571 * progress, like we do here. When a hot spare is used, the faulted vdev
10572 * is not removed after the hot spare is resilvered, so parent 'spare'
10573 * vdev is not removed either. So we could have a 'spare' vdev, but be
10574 * resilvering for a different reason. However, we use it as a heuristic
10575 * because we don't have access to the DTLs, which could tell us whether
10576 * or not we have really finished resilvering a hot spare.
10577 */
10578 if (vdev_any_spare_replacing(nvroot))
10579 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
10580
10581 if (timestamp_fmt != NODATE)
10582 print_timestamp(timestamp_fmt);
10583
10584 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10585 char buf[64];
10586 if (!wd->wd_enabled[i])
10587 continue;
10588
10589 if (wd->wd_exact)
10590 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
10591 bytes_rem[i]);
10592 else
10593 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
10594
10595 if (wd->wd_scripted)
10596 (void) printf(i == 0 ? "%s" : "\t%s", buf);
10597 else
10598 (void) printf(" %*s", col_widths[i] - 1, buf);
10599 }
10600 (void) printf("\n");
10601 (void) fflush(stdout);
10602 }
10603
10604 static void *
10605 wait_status_thread(void *arg)
10606 {
10607 wait_data_t *wd = (wait_data_t *)arg;
10608 zpool_handle_t *zhp;
10609
10610 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
10611 return (void *)(1);
10612
10613 for (int row = 0; ; row++) {
10614 boolean_t missing;
10615 struct timespec timeout;
10616 int ret = 0;
10617 (void) clock_gettime(CLOCK_REALTIME, &timeout);
10618
10619 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
10620 zpool_props_refresh(zhp) != 0) {
10621 zpool_close(zhp);
10622 return (void *)(uintptr_t)(missing ? 0 : 1);
10623 }
10624
10625 print_wait_status_row(wd, zhp, row);
10626
10627 timeout.tv_sec += floor(wd->wd_interval);
10628 long nanos = timeout.tv_nsec +
10629 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
10630 if (nanos >= NANOSEC) {
10631 timeout.tv_sec++;
10632 timeout.tv_nsec = nanos - NANOSEC;
10633 } else {
10634 timeout.tv_nsec = nanos;
10635 }
10636 pthread_mutex_lock(&wd->wd_mutex);
10637 if (!wd->wd_should_exit)
10638 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
10639 &timeout);
10640 pthread_mutex_unlock(&wd->wd_mutex);
10641 if (ret == 0) {
10642 break; /* signaled by main thread */
10643 } else if (ret != ETIMEDOUT) {
10644 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
10645 "failed: %s\n"), strerror(ret));
10646 zpool_close(zhp);
10647 return (void *)(uintptr_t)(1);
10648 }
10649 }
10650
10651 zpool_close(zhp);
10652 return (void *)(0);
10653 }
10654
10655 int
10656 zpool_do_wait(int argc, char **argv)
10657 {
10658 boolean_t verbose = B_FALSE;
10659 int c, i;
10660 unsigned long count;
10661 pthread_t status_thr;
10662 int error = 0;
10663 zpool_handle_t *zhp;
10664
10665 wait_data_t wd;
10666 wd.wd_scripted = B_FALSE;
10667 wd.wd_exact = B_FALSE;
10668 wd.wd_headers_once = B_FALSE;
10669 wd.wd_should_exit = B_FALSE;
10670
10671 pthread_mutex_init(&wd.wd_mutex, NULL);
10672 pthread_cond_init(&wd.wd_cv, NULL);
10673
10674 /* By default, wait for all types of activity. */
10675 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
10676 wd.wd_enabled[i] = B_TRUE;
10677
10678 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
10679 switch (c) {
10680 case 'H':
10681 wd.wd_scripted = B_TRUE;
10682 break;
10683 case 'n':
10684 wd.wd_headers_once = B_TRUE;
10685 break;
10686 case 'p':
10687 wd.wd_exact = B_TRUE;
10688 break;
10689 case 'T':
10690 get_timestamp_arg(*optarg);
10691 break;
10692 case 't':
10693 /* Reset activities array */
10694 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
10695
10696 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10697 static const char *const col_opts[] = {
10698 "discard", "free", "initialize", "replace",
10699 "remove", "resilver", "scrub", "trim" };
10700
10701 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
10702 if (strcmp(tok, col_opts[i]) == 0) {
10703 wd.wd_enabled[i] = B_TRUE;
10704 goto found;
10705 }
10706
10707 (void) fprintf(stderr,
10708 gettext("invalid activity '%s'\n"), tok);
10709 usage(B_FALSE);
10710 found:;
10711 }
10712 break;
10713 case '?':
10714 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10715 optopt);
10716 usage(B_FALSE);
10717 }
10718 }
10719
10720 argc -= optind;
10721 argv += optind;
10722
10723 get_interval_count(&argc, argv, &wd.wd_interval, &count);
10724 if (count != 0) {
10725 /* This subcmd only accepts an interval, not a count */
10726 (void) fprintf(stderr, gettext("too many arguments\n"));
10727 usage(B_FALSE);
10728 }
10729
10730 if (wd.wd_interval != 0)
10731 verbose = B_TRUE;
10732
10733 if (argc < 1) {
10734 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
10735 usage(B_FALSE);
10736 }
10737 if (argc > 1) {
10738 (void) fprintf(stderr, gettext("too many arguments\n"));
10739 usage(B_FALSE);
10740 }
10741
10742 wd.wd_poolname = argv[0];
10743
10744 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
10745 return (1);
10746
10747 if (verbose) {
10748 /*
10749 * We use a separate thread for printing status updates because
10750 * the main thread will call lzc_wait(), which blocks as long
10751 * as an activity is in progress, which can be a long time.
10752 */
10753 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
10754 != 0) {
10755 (void) fprintf(stderr, gettext("failed to create status"
10756 "thread: %s\n"), strerror(errno));
10757 zpool_close(zhp);
10758 return (1);
10759 }
10760 }
10761
10762 /*
10763 * Loop over all activities that we are supposed to wait for until none
10764 * of them are in progress. Note that this means we can end up waiting
10765 * for more activities to complete than just those that were in progress
10766 * when we began waiting; if an activity we are interested in begins
10767 * while we are waiting for another activity, we will wait for both to
10768 * complete before exiting.
10769 */
10770 for (;;) {
10771 boolean_t missing = B_FALSE;
10772 boolean_t any_waited = B_FALSE;
10773
10774 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10775 boolean_t waited;
10776
10777 if (!wd.wd_enabled[i])
10778 continue;
10779
10780 error = zpool_wait_status(zhp, i, &missing, &waited);
10781 if (error != 0 || missing)
10782 break;
10783
10784 any_waited = (any_waited || waited);
10785 }
10786
10787 if (error != 0 || missing || !any_waited)
10788 break;
10789 }
10790
10791 zpool_close(zhp);
10792
10793 if (verbose) {
10794 uintptr_t status;
10795 pthread_mutex_lock(&wd.wd_mutex);
10796 wd.wd_should_exit = B_TRUE;
10797 pthread_cond_signal(&wd.wd_cv);
10798 pthread_mutex_unlock(&wd.wd_mutex);
10799 (void) pthread_join(status_thr, (void *)&status);
10800 if (status != 0)
10801 error = status;
10802 }
10803
10804 pthread_mutex_destroy(&wd.wd_mutex);
10805 pthread_cond_destroy(&wd.wd_cv);
10806 return (error);
10807 }
10808
10809 static int
10810 find_command_idx(const char *command, int *idx)
10811 {
10812 for (int i = 0; i < NCOMMAND; ++i) {
10813 if (command_table[i].name == NULL)
10814 continue;
10815
10816 if (strcmp(command, command_table[i].name) == 0) {
10817 *idx = i;
10818 return (0);
10819 }
10820 }
10821 return (1);
10822 }
10823
10824 /*
10825 * Display version message
10826 */
10827 static int
10828 zpool_do_version(int argc, char **argv)
10829 {
10830 (void) argc, (void) argv;
10831 return (zfs_version_print() != 0);
10832 }
10833
10834 /*
10835 * Do zpool_load_compat() and print error message on failure
10836 */
10837 static zpool_compat_status_t
10838 zpool_do_load_compat(const char *compat, boolean_t *list)
10839 {
10840 char report[1024];
10841
10842 zpool_compat_status_t ret;
10843
10844 ret = zpool_load_compat(compat, list, report, 1024);
10845 switch (ret) {
10846
10847 case ZPOOL_COMPATIBILITY_OK:
10848 break;
10849
10850 case ZPOOL_COMPATIBILITY_NOFILES:
10851 case ZPOOL_COMPATIBILITY_BADFILE:
10852 case ZPOOL_COMPATIBILITY_BADTOKEN:
10853 (void) fprintf(stderr, "Error: %s\n", report);
10854 break;
10855
10856 case ZPOOL_COMPATIBILITY_WARNTOKEN:
10857 (void) fprintf(stderr, "Warning: %s\n", report);
10858 ret = ZPOOL_COMPATIBILITY_OK;
10859 break;
10860 }
10861 return (ret);
10862 }
10863
10864 int
10865 main(int argc, char **argv)
10866 {
10867 int ret = 0;
10868 int i = 0;
10869 char *cmdname;
10870 char **newargv;
10871
10872 (void) setlocale(LC_ALL, "");
10873 (void) setlocale(LC_NUMERIC, "C");
10874 (void) textdomain(TEXT_DOMAIN);
10875 srand(time(NULL));
10876
10877 opterr = 0;
10878
10879 /*
10880 * Make sure the user has specified some command.
10881 */
10882 if (argc < 2) {
10883 (void) fprintf(stderr, gettext("missing command\n"));
10884 usage(B_FALSE);
10885 }
10886
10887 cmdname = argv[1];
10888
10889 /*
10890 * Special case '-?'
10891 */
10892 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
10893 usage(B_TRUE);
10894
10895 /*
10896 * Special case '-V|--version'
10897 */
10898 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
10899 return (zpool_do_version(argc, argv));
10900
10901 if ((g_zfs = libzfs_init()) == NULL) {
10902 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
10903 return (1);
10904 }
10905
10906 libzfs_print_on_error(g_zfs, B_TRUE);
10907
10908 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
10909
10910 /*
10911 * Many commands modify input strings for string parsing reasons.
10912 * We create a copy to protect the original argv.
10913 */
10914 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
10915 for (i = 0; i < argc; i++)
10916 newargv[i] = strdup(argv[i]);
10917 newargv[argc] = NULL;
10918
10919 /*
10920 * Run the appropriate command.
10921 */
10922 if (find_command_idx(cmdname, &i) == 0) {
10923 current_command = &command_table[i];
10924 ret = command_table[i].func(argc - 1, newargv + 1);
10925 } else if (strchr(cmdname, '=')) {
10926 verify(find_command_idx("set", &i) == 0);
10927 current_command = &command_table[i];
10928 ret = command_table[i].func(argc, newargv);
10929 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
10930 /*
10931 * 'freeze' is a vile debugging abomination, so we treat
10932 * it as such.
10933 */
10934 zfs_cmd_t zc = {"\0"};
10935
10936 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
10937 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
10938 if (ret != 0) {
10939 (void) fprintf(stderr,
10940 gettext("failed to freeze pool: %d\n"), errno);
10941 ret = 1;
10942 }
10943
10944 log_history = 0;
10945 } else {
10946 (void) fprintf(stderr, gettext("unrecognized "
10947 "command '%s'\n"), cmdname);
10948 usage(B_FALSE);
10949 ret = 1;
10950 }
10951
10952 for (i = 0; i < argc; i++)
10953 free(newargv[i]);
10954 free(newargv);
10955
10956 if (ret == 0 && log_history)
10957 (void) zpool_log_history(g_zfs, history_str);
10958
10959 libzfs_fini(g_zfs);
10960
10961 /*
10962 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
10963 * for the purposes of running ::findleaks.
10964 */
10965 if (getenv("ZFS_ABORT") != NULL) {
10966 (void) printf("dumping core by request\n");
10967 abort();
10968 }
10969
10970 return (ret);
10971 }