]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_main.c
zpool: main: fix unused, remove argsused
[mirror_zfs.git] / cmd / zpool / zpool_main.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <strings.h>
54 #include <time.h>
55 #include <unistd.h>
56 #include <pwd.h>
57 #include <zone.h>
58 #include <sys/wait.h>
59 #include <zfs_prop.h>
60 #include <sys/fs/zfs.h>
61 #include <sys/stat.h>
62 #include <sys/systeminfo.h>
63 #include <sys/fm/fs/zfs.h>
64 #include <sys/fm/util.h>
65 #include <sys/fm/protocol.h>
66 #include <sys/zfs_ioctl.h>
67 #include <sys/mount.h>
68 #include <sys/sysmacros.h>
69
70 #include <math.h>
71
72 #include <libzfs.h>
73 #include <libzutil.h>
74
75 #include "zpool_util.h"
76 #include "zfs_comutil.h"
77 #include "zfeature_common.h"
78
79 #include "statcommon.h"
80
81 libzfs_handle_t *g_zfs;
82
83 static int zpool_do_create(int, char **);
84 static int zpool_do_destroy(int, char **);
85
86 static int zpool_do_add(int, char **);
87 static int zpool_do_remove(int, char **);
88 static int zpool_do_labelclear(int, char **);
89
90 static int zpool_do_checkpoint(int, char **);
91
92 static int zpool_do_list(int, char **);
93 static int zpool_do_iostat(int, char **);
94 static int zpool_do_status(int, char **);
95
96 static int zpool_do_online(int, char **);
97 static int zpool_do_offline(int, char **);
98 static int zpool_do_clear(int, char **);
99 static int zpool_do_reopen(int, char **);
100
101 static int zpool_do_reguid(int, char **);
102
103 static int zpool_do_attach(int, char **);
104 static int zpool_do_detach(int, char **);
105 static int zpool_do_replace(int, char **);
106 static int zpool_do_split(int, char **);
107
108 static int zpool_do_initialize(int, char **);
109 static int zpool_do_scrub(int, char **);
110 static int zpool_do_resilver(int, char **);
111 static int zpool_do_trim(int, char **);
112
113 static int zpool_do_import(int, char **);
114 static int zpool_do_export(int, char **);
115
116 static int zpool_do_upgrade(int, char **);
117
118 static int zpool_do_history(int, char **);
119 static int zpool_do_events(int, char **);
120
121 static int zpool_do_get(int, char **);
122 static int zpool_do_set(int, char **);
123
124 static int zpool_do_sync(int, char **);
125
126 static int zpool_do_version(int, char **);
127
128 static int zpool_do_wait(int, char **);
129
130 static zpool_compat_status_t zpool_do_load_compat(
131 const char *, boolean_t *);
132
133 /*
134 * These libumem hooks provide a reasonable set of defaults for the allocator's
135 * debugging facilities.
136 */
137
138 #ifdef DEBUG
139 const char *
140 _umem_debug_init(void)
141 {
142 return ("default,verbose"); /* $UMEM_DEBUG setting */
143 }
144
145 const char *
146 _umem_logging_init(void)
147 {
148 return ("fail,contents"); /* $UMEM_LOGGING setting */
149 }
150 #endif
151
152 typedef enum {
153 HELP_ADD,
154 HELP_ATTACH,
155 HELP_CLEAR,
156 HELP_CREATE,
157 HELP_CHECKPOINT,
158 HELP_DESTROY,
159 HELP_DETACH,
160 HELP_EXPORT,
161 HELP_HISTORY,
162 HELP_IMPORT,
163 HELP_IOSTAT,
164 HELP_LABELCLEAR,
165 HELP_LIST,
166 HELP_OFFLINE,
167 HELP_ONLINE,
168 HELP_REPLACE,
169 HELP_REMOVE,
170 HELP_INITIALIZE,
171 HELP_SCRUB,
172 HELP_RESILVER,
173 HELP_TRIM,
174 HELP_STATUS,
175 HELP_UPGRADE,
176 HELP_EVENTS,
177 HELP_GET,
178 HELP_SET,
179 HELP_SPLIT,
180 HELP_SYNC,
181 HELP_REGUID,
182 HELP_REOPEN,
183 HELP_VERSION,
184 HELP_WAIT
185 } zpool_help_t;
186
187
188 /*
189 * Flags for stats to display with "zpool iostats"
190 */
191 enum iostat_type {
192 IOS_DEFAULT = 0,
193 IOS_LATENCY = 1,
194 IOS_QUEUES = 2,
195 IOS_L_HISTO = 3,
196 IOS_RQ_HISTO = 4,
197 IOS_COUNT, /* always last element */
198 };
199
200 /* iostat_type entries as bitmasks */
201 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
202 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
203 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
204 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
205 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
206
207 /* Mask of all the histo bits */
208 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
209
210 /*
211 * Lookup table for iostat flags to nvlist names. Basically a list
212 * of all the nvlists a flag requires. Also specifies the order in
213 * which data gets printed in zpool iostat.
214 */
215 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
216 [IOS_L_HISTO] = {
217 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
218 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
219 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
220 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
221 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
222 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
223 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
224 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
225 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
226 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
227 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
228 NULL},
229 [IOS_LATENCY] = {
230 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
231 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
232 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
233 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
234 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
235 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
236 NULL},
237 [IOS_QUEUES] = {
238 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
239 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
240 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
241 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
242 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
243 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
244 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
245 NULL},
246 [IOS_RQ_HISTO] = {
247 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
248 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
249 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
250 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
251 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
252 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
253 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
254 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
255 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
256 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
257 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
258 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
259 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
260 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
261 NULL},
262 };
263
264
265 /*
266 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
267 * Right now, only one histo bit is ever set at one time, so we can
268 * just do a highbit64(a)
269 */
270 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
271
272 typedef struct zpool_command {
273 const char *name;
274 int (*func)(int, char **);
275 zpool_help_t usage;
276 } zpool_command_t;
277
278 /*
279 * Master command table. Each ZFS command has a name, associated function, and
280 * usage message. The usage messages need to be internationalized, so we have
281 * to have a function to return the usage message based on a command index.
282 *
283 * These commands are organized according to how they are displayed in the usage
284 * message. An empty command (one with a NULL name) indicates an empty line in
285 * the generic usage message.
286 */
287 static zpool_command_t command_table[] = {
288 { "version", zpool_do_version, HELP_VERSION },
289 { NULL },
290 { "create", zpool_do_create, HELP_CREATE },
291 { "destroy", zpool_do_destroy, HELP_DESTROY },
292 { NULL },
293 { "add", zpool_do_add, HELP_ADD },
294 { "remove", zpool_do_remove, HELP_REMOVE },
295 { NULL },
296 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
297 { NULL },
298 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
299 { NULL },
300 { "list", zpool_do_list, HELP_LIST },
301 { "iostat", zpool_do_iostat, HELP_IOSTAT },
302 { "status", zpool_do_status, HELP_STATUS },
303 { NULL },
304 { "online", zpool_do_online, HELP_ONLINE },
305 { "offline", zpool_do_offline, HELP_OFFLINE },
306 { "clear", zpool_do_clear, HELP_CLEAR },
307 { "reopen", zpool_do_reopen, HELP_REOPEN },
308 { NULL },
309 { "attach", zpool_do_attach, HELP_ATTACH },
310 { "detach", zpool_do_detach, HELP_DETACH },
311 { "replace", zpool_do_replace, HELP_REPLACE },
312 { "split", zpool_do_split, HELP_SPLIT },
313 { NULL },
314 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
315 { "resilver", zpool_do_resilver, HELP_RESILVER },
316 { "scrub", zpool_do_scrub, HELP_SCRUB },
317 { "trim", zpool_do_trim, HELP_TRIM },
318 { NULL },
319 { "import", zpool_do_import, HELP_IMPORT },
320 { "export", zpool_do_export, HELP_EXPORT },
321 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
322 { "reguid", zpool_do_reguid, HELP_REGUID },
323 { NULL },
324 { "history", zpool_do_history, HELP_HISTORY },
325 { "events", zpool_do_events, HELP_EVENTS },
326 { NULL },
327 { "get", zpool_do_get, HELP_GET },
328 { "set", zpool_do_set, HELP_SET },
329 { "sync", zpool_do_sync, HELP_SYNC },
330 { NULL },
331 { "wait", zpool_do_wait, HELP_WAIT },
332 };
333
334 #define NCOMMAND (ARRAY_SIZE(command_table))
335
336 #define VDEV_ALLOC_CLASS_LOGS "logs"
337
338 static zpool_command_t *current_command;
339 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
340 static char history_str[HIS_MAX_RECORD_LEN];
341 static boolean_t log_history = B_TRUE;
342 static uint_t timestamp_fmt = NODATE;
343
344 static const char *
345 get_usage(zpool_help_t idx)
346 {
347 switch (idx) {
348 case HELP_ADD:
349 return (gettext("\tadd [-fgLnP] [-o property=value] "
350 "<pool> <vdev> ...\n"));
351 case HELP_ATTACH:
352 return (gettext("\tattach [-fsw] [-o property=value] "
353 "<pool> <device> <new-device>\n"));
354 case HELP_CLEAR:
355 return (gettext("\tclear [-nF] <pool> [device]\n"));
356 case HELP_CREATE:
357 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
358 "\t [-O file-system-property=value] ... \n"
359 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
360 case HELP_CHECKPOINT:
361 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
362 case HELP_DESTROY:
363 return (gettext("\tdestroy [-f] <pool>\n"));
364 case HELP_DETACH:
365 return (gettext("\tdetach <pool> <device>\n"));
366 case HELP_EXPORT:
367 return (gettext("\texport [-af] <pool> ...\n"));
368 case HELP_HISTORY:
369 return (gettext("\thistory [-il] [<pool>] ...\n"));
370 case HELP_IMPORT:
371 return (gettext("\timport [-d dir] [-D]\n"
372 "\timport [-o mntopts] [-o property=value] ... \n"
373 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
374 "[-R root] [-F [-n]] -a\n"
375 "\timport [-o mntopts] [-o property=value] ... \n"
376 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
377 "[-R root] [-F [-n]]\n"
378 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
379 case HELP_IOSTAT:
380 return (gettext("\tiostat [[[-c [script1,script2,...]"
381 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
382 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
383 " [[-n] interval [count]]\n"));
384 case HELP_LABELCLEAR:
385 return (gettext("\tlabelclear [-f] <vdev>\n"));
386 case HELP_LIST:
387 return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
388 "[-T d|u] [pool] ... \n"
389 "\t [interval [count]]\n"));
390 case HELP_OFFLINE:
391 return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
392 case HELP_ONLINE:
393 return (gettext("\tonline [-e] <pool> <device> ...\n"));
394 case HELP_REPLACE:
395 return (gettext("\treplace [-fsw] [-o property=value] "
396 "<pool> <device> [new-device]\n"));
397 case HELP_REMOVE:
398 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
399 case HELP_REOPEN:
400 return (gettext("\treopen [-n] <pool>\n"));
401 case HELP_INITIALIZE:
402 return (gettext("\tinitialize [-c | -s] [-w] <pool> "
403 "[<device> ...]\n"));
404 case HELP_SCRUB:
405 return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
406 case HELP_RESILVER:
407 return (gettext("\tresilver <pool> ...\n"));
408 case HELP_TRIM:
409 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
410 "[<device> ...]\n"));
411 case HELP_STATUS:
412 return (gettext("\tstatus [-c [script1,script2,...]] "
413 "[-igLpPstvxD] [-T d|u] [pool] ... \n"
414 "\t [interval [count]]\n"));
415 case HELP_UPGRADE:
416 return (gettext("\tupgrade\n"
417 "\tupgrade -v\n"
418 "\tupgrade [-V version] <-a | pool ...>\n"));
419 case HELP_EVENTS:
420 return (gettext("\tevents [-vHf [pool] | -c]\n"));
421 case HELP_GET:
422 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
423 "<\"all\" | property[,...]> <pool> ...\n"));
424 case HELP_SET:
425 return (gettext("\tset <property=value> <pool> \n"));
426 case HELP_SPLIT:
427 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
428 "\t [-o property=value] <pool> <newpool> "
429 "[<device> ...]\n"));
430 case HELP_REGUID:
431 return (gettext("\treguid <pool>\n"));
432 case HELP_SYNC:
433 return (gettext("\tsync [pool] ...\n"));
434 case HELP_VERSION:
435 return (gettext("\tversion\n"));
436 case HELP_WAIT:
437 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
438 "<pool> [interval]\n"));
439 default:
440 __builtin_unreachable();
441 }
442 }
443
444 static void
445 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
446 {
447 uint_t children = 0;
448 nvlist_t **child;
449 uint_t i;
450
451 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
452 &child, &children);
453
454 if (children == 0) {
455 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
456 VDEV_NAME_PATH);
457
458 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
459 strcmp(path, VDEV_TYPE_HOLE) != 0)
460 fnvlist_add_boolean(res, path);
461
462 free(path);
463 return;
464 }
465
466 for (i = 0; i < children; i++) {
467 zpool_collect_leaves(zhp, child[i], res);
468 }
469 }
470
471 /*
472 * Callback routine that will print out a pool property value.
473 */
474 static int
475 print_pool_prop_cb(int prop, void *cb)
476 {
477 FILE *fp = cb;
478
479 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
480
481 if (zpool_prop_readonly(prop))
482 (void) fprintf(fp, " NO ");
483 else
484 (void) fprintf(fp, " YES ");
485
486 if (zpool_prop_values(prop) == NULL)
487 (void) fprintf(fp, "-\n");
488 else
489 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
490
491 return (ZPROP_CONT);
492 }
493
494 /*
495 * Callback routine that will print out a vdev property value.
496 */
497 static int
498 print_vdev_prop_cb(int prop, void *cb)
499 {
500 FILE *fp = cb;
501
502 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
503
504 if (vdev_prop_readonly(prop))
505 (void) fprintf(fp, " NO ");
506 else
507 (void) fprintf(fp, " YES ");
508
509 if (vdev_prop_values(prop) == NULL)
510 (void) fprintf(fp, "-\n");
511 else
512 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
513
514 return (ZPROP_CONT);
515 }
516
517 /*
518 * Display usage message. If we're inside a command, display only the usage for
519 * that command. Otherwise, iterate over the entire command table and display
520 * a complete usage message.
521 */
522 static void
523 usage(boolean_t requested)
524 {
525 FILE *fp = requested ? stdout : stderr;
526
527 if (current_command == NULL) {
528 int i;
529
530 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
531 (void) fprintf(fp,
532 gettext("where 'command' is one of the following:\n\n"));
533
534 for (i = 0; i < NCOMMAND; i++) {
535 if (command_table[i].name == NULL)
536 (void) fprintf(fp, "\n");
537 else
538 (void) fprintf(fp, "%s",
539 get_usage(command_table[i].usage));
540 }
541 } else {
542 (void) fprintf(fp, gettext("usage:\n"));
543 (void) fprintf(fp, "%s", get_usage(current_command->usage));
544 }
545
546 if (current_command != NULL &&
547 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
548 ((strcmp(current_command->name, "set") == 0) ||
549 (strcmp(current_command->name, "get") == 0) ||
550 (strcmp(current_command->name, "list") == 0))) {
551
552 (void) fprintf(fp,
553 gettext("\nthe following properties are supported:\n"));
554
555 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
556 "PROPERTY", "EDIT", "VALUES");
557
558 /* Iterate over all properties */
559 if (current_prop_type == ZFS_TYPE_POOL) {
560 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
561 B_TRUE, current_prop_type);
562
563 (void) fprintf(fp, "\t%-19s ", "feature@...");
564 (void) fprintf(fp, "YES "
565 "disabled | enabled | active\n");
566
567 (void) fprintf(fp, gettext("\nThe feature@ properties "
568 "must be appended with a feature name.\n"
569 "See zpool-features(7).\n"));
570 } else if (current_prop_type == ZFS_TYPE_VDEV) {
571 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
572 B_TRUE, current_prop_type);
573 }
574 }
575
576 /*
577 * See comments at end of main().
578 */
579 if (getenv("ZFS_ABORT") != NULL) {
580 (void) printf("dumping core by request\n");
581 abort();
582 }
583
584 exit(requested ? 0 : 2);
585 }
586
587 /*
588 * zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
589 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
590 * if none specified.
591 *
592 * -c Cancel. Ends active initializing.
593 * -s Suspend. Initializing can then be restarted with no flags.
594 * -w Wait. Blocks until initializing has completed.
595 */
596 int
597 zpool_do_initialize(int argc, char **argv)
598 {
599 int c;
600 char *poolname;
601 zpool_handle_t *zhp;
602 nvlist_t *vdevs;
603 int err = 0;
604 boolean_t wait = B_FALSE;
605
606 struct option long_options[] = {
607 {"cancel", no_argument, NULL, 'c'},
608 {"suspend", no_argument, NULL, 's'},
609 {"wait", no_argument, NULL, 'w'},
610 {0, 0, 0, 0}
611 };
612
613 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
614 while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
615 switch (c) {
616 case 'c':
617 if (cmd_type != POOL_INITIALIZE_START &&
618 cmd_type != POOL_INITIALIZE_CANCEL) {
619 (void) fprintf(stderr, gettext("-c cannot be "
620 "combined with other options\n"));
621 usage(B_FALSE);
622 }
623 cmd_type = POOL_INITIALIZE_CANCEL;
624 break;
625 case 's':
626 if (cmd_type != POOL_INITIALIZE_START &&
627 cmd_type != POOL_INITIALIZE_SUSPEND) {
628 (void) fprintf(stderr, gettext("-s cannot be "
629 "combined with other options\n"));
630 usage(B_FALSE);
631 }
632 cmd_type = POOL_INITIALIZE_SUSPEND;
633 break;
634 case 'w':
635 wait = B_TRUE;
636 break;
637 case '?':
638 if (optopt != 0) {
639 (void) fprintf(stderr,
640 gettext("invalid option '%c'\n"), optopt);
641 } else {
642 (void) fprintf(stderr,
643 gettext("invalid option '%s'\n"),
644 argv[optind - 1]);
645 }
646 usage(B_FALSE);
647 }
648 }
649
650 argc -= optind;
651 argv += optind;
652
653 if (argc < 1) {
654 (void) fprintf(stderr, gettext("missing pool name argument\n"));
655 usage(B_FALSE);
656 return (-1);
657 }
658
659 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
660 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
661 "-s\n"));
662 usage(B_FALSE);
663 }
664
665 poolname = argv[0];
666 zhp = zpool_open(g_zfs, poolname);
667 if (zhp == NULL)
668 return (-1);
669
670 vdevs = fnvlist_alloc();
671 if (argc == 1) {
672 /* no individual leaf vdevs specified, so add them all */
673 nvlist_t *config = zpool_get_config(zhp, NULL);
674 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
675 ZPOOL_CONFIG_VDEV_TREE);
676 zpool_collect_leaves(zhp, nvroot, vdevs);
677 } else {
678 for (int i = 1; i < argc; i++) {
679 fnvlist_add_boolean(vdevs, argv[i]);
680 }
681 }
682
683 if (wait)
684 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
685 else
686 err = zpool_initialize(zhp, cmd_type, vdevs);
687
688 fnvlist_free(vdevs);
689 zpool_close(zhp);
690
691 return (err);
692 }
693
694 /*
695 * print a pool vdev config for dry runs
696 */
697 static void
698 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
699 const char *match, int name_flags)
700 {
701 nvlist_t **child;
702 uint_t c, children;
703 char *vname;
704 boolean_t printed = B_FALSE;
705
706 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
707 &child, &children) != 0) {
708 if (name != NULL)
709 (void) printf("\t%*s%s\n", indent, "", name);
710 return;
711 }
712
713 for (c = 0; c < children; c++) {
714 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
715 char *class = "";
716
717 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
718 &is_hole);
719
720 if (is_hole == B_TRUE) {
721 continue;
722 }
723
724 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
725 &is_log);
726 if (is_log)
727 class = VDEV_ALLOC_BIAS_LOG;
728 (void) nvlist_lookup_string(child[c],
729 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
730 if (strcmp(match, class) != 0)
731 continue;
732
733 if (!printed && name != NULL) {
734 (void) printf("\t%*s%s\n", indent, "", name);
735 printed = B_TRUE;
736 }
737 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
738 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
739 name_flags);
740 free(vname);
741 }
742 }
743
744 /*
745 * Print the list of l2cache devices for dry runs.
746 */
747 static void
748 print_cache_list(nvlist_t *nv, int indent)
749 {
750 nvlist_t **child;
751 uint_t c, children;
752
753 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
754 &child, &children) == 0 && children > 0) {
755 (void) printf("\t%*s%s\n", indent, "", "cache");
756 } else {
757 return;
758 }
759 for (c = 0; c < children; c++) {
760 char *vname;
761
762 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
763 (void) printf("\t%*s%s\n", indent + 2, "", vname);
764 free(vname);
765 }
766 }
767
768 /*
769 * Print the list of spares for dry runs.
770 */
771 static void
772 print_spare_list(nvlist_t *nv, int indent)
773 {
774 nvlist_t **child;
775 uint_t c, children;
776
777 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
778 &child, &children) == 0 && children > 0) {
779 (void) printf("\t%*s%s\n", indent, "", "spares");
780 } else {
781 return;
782 }
783 for (c = 0; c < children; c++) {
784 char *vname;
785
786 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
787 (void) printf("\t%*s%s\n", indent + 2, "", vname);
788 free(vname);
789 }
790 }
791
792 static boolean_t
793 prop_list_contains_feature(nvlist_t *proplist)
794 {
795 nvpair_t *nvp;
796 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
797 nvp = nvlist_next_nvpair(proplist, nvp)) {
798 if (zpool_prop_feature(nvpair_name(nvp)))
799 return (B_TRUE);
800 }
801 return (B_FALSE);
802 }
803
804 /*
805 * Add a property pair (name, string-value) into a property nvlist.
806 */
807 static int
808 add_prop_list(const char *propname, char *propval, nvlist_t **props,
809 boolean_t poolprop)
810 {
811 zpool_prop_t prop = ZPOOL_PROP_INVAL;
812 nvlist_t *proplist;
813 const char *normnm;
814 char *strval;
815
816 if (*props == NULL &&
817 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
818 (void) fprintf(stderr,
819 gettext("internal error: out of memory\n"));
820 return (1);
821 }
822
823 proplist = *props;
824
825 if (poolprop) {
826 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
827 const char *cname =
828 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
829
830 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
831 (!zpool_prop_feature(propname) &&
832 !zpool_prop_vdev(propname))) {
833 (void) fprintf(stderr, gettext("property '%s' is "
834 "not a valid pool or vdev property\n"), propname);
835 return (2);
836 }
837
838 /*
839 * feature@ properties and version should not be specified
840 * at the same time.
841 */
842 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
843 nvlist_exists(proplist, vname)) ||
844 (prop == ZPOOL_PROP_VERSION &&
845 prop_list_contains_feature(proplist))) {
846 (void) fprintf(stderr, gettext("'feature@' and "
847 "'version' properties cannot be specified "
848 "together\n"));
849 return (2);
850 }
851
852 /*
853 * if version is specified, only "legacy" compatibility
854 * may be requested
855 */
856 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
857 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
858 nvlist_exists(proplist, vname)) ||
859 (prop == ZPOOL_PROP_VERSION &&
860 nvlist_exists(proplist, cname) &&
861 strcmp(fnvlist_lookup_string(proplist, cname),
862 ZPOOL_COMPAT_LEGACY) != 0)) {
863 (void) fprintf(stderr, gettext("when 'version' is "
864 "specified, the 'compatibility' feature may only "
865 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
866 return (2);
867 }
868
869 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
870 normnm = propname;
871 else
872 normnm = zpool_prop_to_name(prop);
873 } else {
874 zfs_prop_t fsprop = zfs_name_to_prop(propname);
875
876 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
877 B_FALSE)) {
878 normnm = zfs_prop_to_name(fsprop);
879 } else if (zfs_prop_user(propname) ||
880 zfs_prop_userquota(propname)) {
881 normnm = propname;
882 } else {
883 (void) fprintf(stderr, gettext("property '%s' is "
884 "not a valid filesystem property\n"), propname);
885 return (2);
886 }
887 }
888
889 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
890 prop != ZPOOL_PROP_CACHEFILE) {
891 (void) fprintf(stderr, gettext("property '%s' "
892 "specified multiple times\n"), propname);
893 return (2);
894 }
895
896 if (nvlist_add_string(proplist, normnm, propval) != 0) {
897 (void) fprintf(stderr, gettext("internal "
898 "error: out of memory\n"));
899 return (1);
900 }
901
902 return (0);
903 }
904
905 /*
906 * Set a default property pair (name, string-value) in a property nvlist
907 */
908 static int
909 add_prop_list_default(const char *propname, char *propval, nvlist_t **props)
910 {
911 char *pval;
912
913 if (nvlist_lookup_string(*props, propname, &pval) == 0)
914 return (0);
915
916 return (add_prop_list(propname, propval, props, B_TRUE));
917 }
918
919 /*
920 * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
921 *
922 * -f Force addition of devices, even if they appear in use
923 * -g Display guid for individual vdev name.
924 * -L Follow links when resolving vdev path name.
925 * -n Do not add the devices, but display the resulting layout if
926 * they were to be added.
927 * -o Set property=value.
928 * -P Display full path for vdev name.
929 *
930 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
931 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
932 * libzfs.
933 */
934 int
935 zpool_do_add(int argc, char **argv)
936 {
937 boolean_t force = B_FALSE;
938 boolean_t dryrun = B_FALSE;
939 int name_flags = 0;
940 int c;
941 nvlist_t *nvroot;
942 char *poolname;
943 int ret;
944 zpool_handle_t *zhp;
945 nvlist_t *config;
946 nvlist_t *props = NULL;
947 char *propval;
948
949 /* check options */
950 while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
951 switch (c) {
952 case 'f':
953 force = B_TRUE;
954 break;
955 case 'g':
956 name_flags |= VDEV_NAME_GUID;
957 break;
958 case 'L':
959 name_flags |= VDEV_NAME_FOLLOW_LINKS;
960 break;
961 case 'n':
962 dryrun = B_TRUE;
963 break;
964 case 'o':
965 if ((propval = strchr(optarg, '=')) == NULL) {
966 (void) fprintf(stderr, gettext("missing "
967 "'=' for -o option\n"));
968 usage(B_FALSE);
969 }
970 *propval = '\0';
971 propval++;
972
973 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
974 (add_prop_list(optarg, propval, &props, B_TRUE)))
975 usage(B_FALSE);
976 break;
977 case 'P':
978 name_flags |= VDEV_NAME_PATH;
979 break;
980 case '?':
981 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
982 optopt);
983 usage(B_FALSE);
984 }
985 }
986
987 argc -= optind;
988 argv += optind;
989
990 /* get pool name and check number of arguments */
991 if (argc < 1) {
992 (void) fprintf(stderr, gettext("missing pool name argument\n"));
993 usage(B_FALSE);
994 }
995 if (argc < 2) {
996 (void) fprintf(stderr, gettext("missing vdev specification\n"));
997 usage(B_FALSE);
998 }
999
1000 poolname = argv[0];
1001
1002 argc--;
1003 argv++;
1004
1005 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1006 return (1);
1007
1008 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1009 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1010 poolname);
1011 zpool_close(zhp);
1012 return (1);
1013 }
1014
1015 /* unless manually specified use "ashift" pool property (if set) */
1016 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1017 int intval;
1018 zprop_source_t src;
1019 char strval[ZPOOL_MAXPROPLEN];
1020
1021 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1022 if (src != ZPROP_SRC_DEFAULT) {
1023 (void) sprintf(strval, "%" PRId32, intval);
1024 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1025 &props, B_TRUE) == 0);
1026 }
1027 }
1028
1029 /* pass off to make_root_vdev for processing */
1030 nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
1031 argc, argv);
1032 if (nvroot == NULL) {
1033 zpool_close(zhp);
1034 return (1);
1035 }
1036
1037 if (dryrun) {
1038 nvlist_t *poolnvroot;
1039 nvlist_t **l2child, **sparechild;
1040 uint_t l2children, sparechildren, c;
1041 char *vname;
1042 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1043
1044 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1045 &poolnvroot) == 0);
1046
1047 (void) printf(gettext("would update '%s' to the following "
1048 "configuration:\n\n"), zpool_get_name(zhp));
1049
1050 /* print original main pool and new tree */
1051 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1052 name_flags | VDEV_NAME_TYPE_ID);
1053 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1054
1055 /* print other classes: 'dedup', 'special', and 'log' */
1056 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1057 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1058 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1059 print_vdev_tree(zhp, NULL, nvroot, 0,
1060 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1061 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1062 print_vdev_tree(zhp, "dedup", nvroot, 0,
1063 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1064 }
1065
1066 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1067 print_vdev_tree(zhp, "special", poolnvroot, 0,
1068 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1069 print_vdev_tree(zhp, NULL, nvroot, 0,
1070 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1071 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1072 print_vdev_tree(zhp, "special", nvroot, 0,
1073 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1074 }
1075
1076 if (num_logs(poolnvroot) > 0) {
1077 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1078 VDEV_ALLOC_BIAS_LOG, name_flags);
1079 print_vdev_tree(zhp, NULL, nvroot, 0,
1080 VDEV_ALLOC_BIAS_LOG, name_flags);
1081 } else if (num_logs(nvroot) > 0) {
1082 print_vdev_tree(zhp, "logs", nvroot, 0,
1083 VDEV_ALLOC_BIAS_LOG, name_flags);
1084 }
1085
1086 /* Do the same for the caches */
1087 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1088 &l2child, &l2children) == 0 && l2children) {
1089 hadcache = B_TRUE;
1090 (void) printf(gettext("\tcache\n"));
1091 for (c = 0; c < l2children; c++) {
1092 vname = zpool_vdev_name(g_zfs, NULL,
1093 l2child[c], name_flags);
1094 (void) printf("\t %s\n", vname);
1095 free(vname);
1096 }
1097 }
1098 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1099 &l2child, &l2children) == 0 && l2children) {
1100 if (!hadcache)
1101 (void) printf(gettext("\tcache\n"));
1102 for (c = 0; c < l2children; c++) {
1103 vname = zpool_vdev_name(g_zfs, NULL,
1104 l2child[c], name_flags);
1105 (void) printf("\t %s\n", vname);
1106 free(vname);
1107 }
1108 }
1109 /* And finally the spares */
1110 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1111 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1112 hadspare = B_TRUE;
1113 (void) printf(gettext("\tspares\n"));
1114 for (c = 0; c < sparechildren; c++) {
1115 vname = zpool_vdev_name(g_zfs, NULL,
1116 sparechild[c], name_flags);
1117 (void) printf("\t %s\n", vname);
1118 free(vname);
1119 }
1120 }
1121 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1122 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1123 if (!hadspare)
1124 (void) printf(gettext("\tspares\n"));
1125 for (c = 0; c < sparechildren; c++) {
1126 vname = zpool_vdev_name(g_zfs, NULL,
1127 sparechild[c], name_flags);
1128 (void) printf("\t %s\n", vname);
1129 free(vname);
1130 }
1131 }
1132
1133 ret = 0;
1134 } else {
1135 ret = (zpool_add(zhp, nvroot) != 0);
1136 }
1137
1138 nvlist_free(props);
1139 nvlist_free(nvroot);
1140 zpool_close(zhp);
1141
1142 return (ret);
1143 }
1144
1145 /*
1146 * zpool remove [-npsw] <pool> <vdev> ...
1147 *
1148 * Removes the given vdev from the pool.
1149 */
1150 int
1151 zpool_do_remove(int argc, char **argv)
1152 {
1153 char *poolname;
1154 int i, ret = 0;
1155 zpool_handle_t *zhp = NULL;
1156 boolean_t stop = B_FALSE;
1157 int c;
1158 boolean_t noop = B_FALSE;
1159 boolean_t parsable = B_FALSE;
1160 boolean_t wait = B_FALSE;
1161
1162 /* check options */
1163 while ((c = getopt(argc, argv, "npsw")) != -1) {
1164 switch (c) {
1165 case 'n':
1166 noop = B_TRUE;
1167 break;
1168 case 'p':
1169 parsable = B_TRUE;
1170 break;
1171 case 's':
1172 stop = B_TRUE;
1173 break;
1174 case 'w':
1175 wait = B_TRUE;
1176 break;
1177 case '?':
1178 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1179 optopt);
1180 usage(B_FALSE);
1181 }
1182 }
1183
1184 argc -= optind;
1185 argv += optind;
1186
1187 /* get pool name and check number of arguments */
1188 if (argc < 1) {
1189 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1190 usage(B_FALSE);
1191 }
1192
1193 poolname = argv[0];
1194
1195 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1196 return (1);
1197
1198 if (stop && noop) {
1199 (void) fprintf(stderr, gettext("stop request ignored\n"));
1200 return (0);
1201 }
1202
1203 if (stop) {
1204 if (argc > 1) {
1205 (void) fprintf(stderr, gettext("too many arguments\n"));
1206 usage(B_FALSE);
1207 }
1208 if (zpool_vdev_remove_cancel(zhp) != 0)
1209 ret = 1;
1210 if (wait) {
1211 (void) fprintf(stderr, gettext("invalid option "
1212 "combination: -w cannot be used with -s\n"));
1213 usage(B_FALSE);
1214 }
1215 } else {
1216 if (argc < 2) {
1217 (void) fprintf(stderr, gettext("missing device\n"));
1218 usage(B_FALSE);
1219 }
1220
1221 for (i = 1; i < argc; i++) {
1222 if (noop) {
1223 uint64_t size;
1224
1225 if (zpool_vdev_indirect_size(zhp, argv[i],
1226 &size) != 0) {
1227 ret = 1;
1228 break;
1229 }
1230 if (parsable) {
1231 (void) printf("%s %llu\n",
1232 argv[i], (unsigned long long)size);
1233 } else {
1234 char valstr[32];
1235 zfs_nicenum(size, valstr,
1236 sizeof (valstr));
1237 (void) printf("Memory that will be "
1238 "used after removing %s: %s\n",
1239 argv[i], valstr);
1240 }
1241 } else {
1242 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1243 ret = 1;
1244 }
1245 }
1246
1247 if (ret == 0 && wait)
1248 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1249 }
1250 zpool_close(zhp);
1251
1252 return (ret);
1253 }
1254
1255 /*
1256 * Return 1 if a vdev is active (being used in a pool)
1257 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1258 *
1259 * This is useful for checking if a disk in an active pool is offlined or
1260 * faulted.
1261 */
1262 static int
1263 vdev_is_active(char *vdev_path)
1264 {
1265 int fd;
1266 fd = open(vdev_path, O_EXCL);
1267 if (fd < 0) {
1268 return (1); /* cant open O_EXCL - disk is active */
1269 }
1270
1271 close(fd);
1272 return (0); /* disk is inactive in the pool */
1273 }
1274
1275 /*
1276 * zpool labelclear [-f] <vdev>
1277 *
1278 * -f Force clearing the label for the vdevs which are members of
1279 * the exported or foreign pools.
1280 *
1281 * Verifies that the vdev is not active and zeros out the label information
1282 * on the device.
1283 */
1284 int
1285 zpool_do_labelclear(int argc, char **argv)
1286 {
1287 char vdev[MAXPATHLEN];
1288 char *name = NULL;
1289 struct stat st;
1290 int c, fd = -1, ret = 0;
1291 nvlist_t *config;
1292 pool_state_t state;
1293 boolean_t inuse = B_FALSE;
1294 boolean_t force = B_FALSE;
1295
1296 /* check options */
1297 while ((c = getopt(argc, argv, "f")) != -1) {
1298 switch (c) {
1299 case 'f':
1300 force = B_TRUE;
1301 break;
1302 default:
1303 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1304 optopt);
1305 usage(B_FALSE);
1306 }
1307 }
1308
1309 argc -= optind;
1310 argv += optind;
1311
1312 /* get vdev name */
1313 if (argc < 1) {
1314 (void) fprintf(stderr, gettext("missing vdev name\n"));
1315 usage(B_FALSE);
1316 }
1317 if (argc > 1) {
1318 (void) fprintf(stderr, gettext("too many arguments\n"));
1319 usage(B_FALSE);
1320 }
1321
1322 /*
1323 * Check if we were given absolute path and use it as is.
1324 * Otherwise if the provided vdev name doesn't point to a file,
1325 * try prepending expected disk paths and partition numbers.
1326 */
1327 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1328 if (vdev[0] != '/' && stat(vdev, &st) != 0) {
1329 int error;
1330
1331 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1332 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1333 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1334 error = ENOENT;
1335 }
1336
1337 if (error || (stat(vdev, &st) != 0)) {
1338 (void) fprintf(stderr, gettext(
1339 "failed to find device %s, try specifying absolute "
1340 "path instead\n"), argv[0]);
1341 return (1);
1342 }
1343 }
1344
1345 if ((fd = open(vdev, O_RDWR)) < 0) {
1346 (void) fprintf(stderr, gettext("failed to open %s: %s\n"),
1347 vdev, strerror(errno));
1348 return (1);
1349 }
1350
1351 /*
1352 * Flush all dirty pages for the block device. This should not be
1353 * fatal when the device does not support BLKFLSBUF as would be the
1354 * case for a file vdev.
1355 */
1356 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1357 (void) fprintf(stderr, gettext("failed to invalidate "
1358 "cache for %s: %s\n"), vdev, strerror(errno));
1359
1360 if (zpool_read_label(fd, &config, NULL) != 0) {
1361 (void) fprintf(stderr,
1362 gettext("failed to read label from %s\n"), vdev);
1363 ret = 1;
1364 goto errout;
1365 }
1366 nvlist_free(config);
1367
1368 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1369 if (ret != 0) {
1370 (void) fprintf(stderr,
1371 gettext("failed to check state for %s\n"), vdev);
1372 ret = 1;
1373 goto errout;
1374 }
1375
1376 if (!inuse)
1377 goto wipe_label;
1378
1379 switch (state) {
1380 default:
1381 case POOL_STATE_ACTIVE:
1382 case POOL_STATE_SPARE:
1383 case POOL_STATE_L2CACHE:
1384 /*
1385 * We allow the user to call 'zpool offline -f'
1386 * on an offlined disk in an active pool. We can check if
1387 * the disk is online by calling vdev_is_active().
1388 */
1389 if (force && !vdev_is_active(vdev))
1390 break;
1391
1392 (void) fprintf(stderr, gettext(
1393 "%s is a member (%s) of pool \"%s\""),
1394 vdev, zpool_pool_state_to_name(state), name);
1395
1396 if (force) {
1397 (void) fprintf(stderr, gettext(
1398 ". Offline the disk first to clear its label."));
1399 }
1400 printf("\n");
1401 ret = 1;
1402 goto errout;
1403
1404 case POOL_STATE_EXPORTED:
1405 if (force)
1406 break;
1407 (void) fprintf(stderr, gettext(
1408 "use '-f' to override the following error:\n"
1409 "%s is a member of exported pool \"%s\"\n"),
1410 vdev, name);
1411 ret = 1;
1412 goto errout;
1413
1414 case POOL_STATE_POTENTIALLY_ACTIVE:
1415 if (force)
1416 break;
1417 (void) fprintf(stderr, gettext(
1418 "use '-f' to override the following error:\n"
1419 "%s is a member of potentially active pool \"%s\"\n"),
1420 vdev, name);
1421 ret = 1;
1422 goto errout;
1423
1424 case POOL_STATE_DESTROYED:
1425 /* inuse should never be set for a destroyed pool */
1426 assert(0);
1427 break;
1428 }
1429
1430 wipe_label:
1431 ret = zpool_clear_label(fd);
1432 if (ret != 0) {
1433 (void) fprintf(stderr,
1434 gettext("failed to clear label for %s\n"), vdev);
1435 }
1436
1437 errout:
1438 free(name);
1439 (void) close(fd);
1440
1441 return (ret);
1442 }
1443
1444 /*
1445 * zpool create [-fnd] [-o property=value] ...
1446 * [-O file-system-property=value] ...
1447 * [-R root] [-m mountpoint] <pool> <dev> ...
1448 *
1449 * -f Force creation, even if devices appear in use
1450 * -n Do not create the pool, but display the resulting layout if it
1451 * were to be created.
1452 * -R Create a pool under an alternate root
1453 * -m Set default mountpoint for the root dataset. By default it's
1454 * '/<pool>'
1455 * -o Set property=value.
1456 * -o Set feature@feature=enabled|disabled.
1457 * -d Don't automatically enable all supported pool features
1458 * (individual features can be enabled with -o).
1459 * -O Set fsproperty=value in the pool's root file system
1460 *
1461 * Creates the named pool according to the given vdev specification. The
1462 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1463 * Once we get the nvlist back from make_root_vdev(), we either print out the
1464 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1465 */
1466 int
1467 zpool_do_create(int argc, char **argv)
1468 {
1469 boolean_t force = B_FALSE;
1470 boolean_t dryrun = B_FALSE;
1471 boolean_t enable_pool_features = B_TRUE;
1472
1473 int c;
1474 nvlist_t *nvroot = NULL;
1475 char *poolname;
1476 char *tname = NULL;
1477 int ret = 1;
1478 char *altroot = NULL;
1479 char *compat = NULL;
1480 char *mountpoint = NULL;
1481 nvlist_t *fsprops = NULL;
1482 nvlist_t *props = NULL;
1483 char *propval;
1484
1485 /* check options */
1486 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1487 switch (c) {
1488 case 'f':
1489 force = B_TRUE;
1490 break;
1491 case 'n':
1492 dryrun = B_TRUE;
1493 break;
1494 case 'd':
1495 enable_pool_features = B_FALSE;
1496 break;
1497 case 'R':
1498 altroot = optarg;
1499 if (add_prop_list(zpool_prop_to_name(
1500 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1501 goto errout;
1502 if (add_prop_list_default(zpool_prop_to_name(
1503 ZPOOL_PROP_CACHEFILE), "none", &props))
1504 goto errout;
1505 break;
1506 case 'm':
1507 /* Equivalent to -O mountpoint=optarg */
1508 mountpoint = optarg;
1509 break;
1510 case 'o':
1511 if ((propval = strchr(optarg, '=')) == NULL) {
1512 (void) fprintf(stderr, gettext("missing "
1513 "'=' for -o option\n"));
1514 goto errout;
1515 }
1516 *propval = '\0';
1517 propval++;
1518
1519 if (add_prop_list(optarg, propval, &props, B_TRUE))
1520 goto errout;
1521
1522 /*
1523 * If the user is creating a pool that doesn't support
1524 * feature flags, don't enable any features.
1525 */
1526 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1527 char *end;
1528 u_longlong_t ver;
1529
1530 ver = strtoull(propval, &end, 10);
1531 if (*end == '\0' &&
1532 ver < SPA_VERSION_FEATURES) {
1533 enable_pool_features = B_FALSE;
1534 }
1535 }
1536 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1537 altroot = propval;
1538 if (zpool_name_to_prop(optarg) ==
1539 ZPOOL_PROP_COMPATIBILITY)
1540 compat = propval;
1541 break;
1542 case 'O':
1543 if ((propval = strchr(optarg, '=')) == NULL) {
1544 (void) fprintf(stderr, gettext("missing "
1545 "'=' for -O option\n"));
1546 goto errout;
1547 }
1548 *propval = '\0';
1549 propval++;
1550
1551 /*
1552 * Mountpoints are checked and then added later.
1553 * Uniquely among properties, they can be specified
1554 * more than once, to avoid conflict with -m.
1555 */
1556 if (0 == strcmp(optarg,
1557 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1558 mountpoint = propval;
1559 } else if (add_prop_list(optarg, propval, &fsprops,
1560 B_FALSE)) {
1561 goto errout;
1562 }
1563 break;
1564 case 't':
1565 /*
1566 * Sanity check temporary pool name.
1567 */
1568 if (strchr(optarg, '/') != NULL) {
1569 (void) fprintf(stderr, gettext("cannot create "
1570 "'%s': invalid character '/' in temporary "
1571 "name\n"), optarg);
1572 (void) fprintf(stderr, gettext("use 'zfs "
1573 "create' to create a dataset\n"));
1574 goto errout;
1575 }
1576
1577 if (add_prop_list(zpool_prop_to_name(
1578 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1579 goto errout;
1580 if (add_prop_list_default(zpool_prop_to_name(
1581 ZPOOL_PROP_CACHEFILE), "none", &props))
1582 goto errout;
1583 tname = optarg;
1584 break;
1585 case ':':
1586 (void) fprintf(stderr, gettext("missing argument for "
1587 "'%c' option\n"), optopt);
1588 goto badusage;
1589 case '?':
1590 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1591 optopt);
1592 goto badusage;
1593 }
1594 }
1595
1596 argc -= optind;
1597 argv += optind;
1598
1599 /* get pool name and check number of arguments */
1600 if (argc < 1) {
1601 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1602 goto badusage;
1603 }
1604 if (argc < 2) {
1605 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1606 goto badusage;
1607 }
1608
1609 poolname = argv[0];
1610
1611 /*
1612 * As a special case, check for use of '/' in the name, and direct the
1613 * user to use 'zfs create' instead.
1614 */
1615 if (strchr(poolname, '/') != NULL) {
1616 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
1617 "character '/' in pool name\n"), poolname);
1618 (void) fprintf(stderr, gettext("use 'zfs create' to "
1619 "create a dataset\n"));
1620 goto errout;
1621 }
1622
1623 /* pass off to make_root_vdev for bulk processing */
1624 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1625 argc - 1, argv + 1);
1626 if (nvroot == NULL)
1627 goto errout;
1628
1629 /* make_root_vdev() allows 0 toplevel children if there are spares */
1630 if (!zfs_allocatable_devs(nvroot)) {
1631 (void) fprintf(stderr, gettext("invalid vdev "
1632 "specification: at least one toplevel vdev must be "
1633 "specified\n"));
1634 goto errout;
1635 }
1636
1637 if (altroot != NULL && altroot[0] != '/') {
1638 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
1639 "must be an absolute path\n"), altroot);
1640 goto errout;
1641 }
1642
1643 /*
1644 * Check the validity of the mountpoint and direct the user to use the
1645 * '-m' mountpoint option if it looks like its in use.
1646 */
1647 if (mountpoint == NULL ||
1648 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1649 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1650 char buf[MAXPATHLEN];
1651 DIR *dirp;
1652
1653 if (mountpoint && mountpoint[0] != '/') {
1654 (void) fprintf(stderr, gettext("invalid mountpoint "
1655 "'%s': must be an absolute path, 'legacy', or "
1656 "'none'\n"), mountpoint);
1657 goto errout;
1658 }
1659
1660 if (mountpoint == NULL) {
1661 if (altroot != NULL)
1662 (void) snprintf(buf, sizeof (buf), "%s/%s",
1663 altroot, poolname);
1664 else
1665 (void) snprintf(buf, sizeof (buf), "/%s",
1666 poolname);
1667 } else {
1668 if (altroot != NULL)
1669 (void) snprintf(buf, sizeof (buf), "%s%s",
1670 altroot, mountpoint);
1671 else
1672 (void) snprintf(buf, sizeof (buf), "%s",
1673 mountpoint);
1674 }
1675
1676 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1677 (void) fprintf(stderr, gettext("mountpoint '%s' : "
1678 "%s\n"), buf, strerror(errno));
1679 (void) fprintf(stderr, gettext("use '-m' "
1680 "option to provide a different default\n"));
1681 goto errout;
1682 } else if (dirp) {
1683 int count = 0;
1684
1685 while (count < 3 && readdir(dirp) != NULL)
1686 count++;
1687 (void) closedir(dirp);
1688
1689 if (count > 2) {
1690 (void) fprintf(stderr, gettext("mountpoint "
1691 "'%s' exists and is not empty\n"), buf);
1692 (void) fprintf(stderr, gettext("use '-m' "
1693 "option to provide a "
1694 "different default\n"));
1695 goto errout;
1696 }
1697 }
1698 }
1699
1700 /*
1701 * Now that the mountpoint's validity has been checked, ensure that
1702 * the property is set appropriately prior to creating the pool.
1703 */
1704 if (mountpoint != NULL) {
1705 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1706 mountpoint, &fsprops, B_FALSE);
1707 if (ret != 0)
1708 goto errout;
1709 }
1710
1711 ret = 1;
1712 if (dryrun) {
1713 /*
1714 * For a dry run invocation, print out a basic message and run
1715 * through all the vdevs in the list and print out in an
1716 * appropriate hierarchy.
1717 */
1718 (void) printf(gettext("would create '%s' with the "
1719 "following layout:\n\n"), poolname);
1720
1721 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1722 print_vdev_tree(NULL, "dedup", nvroot, 0,
1723 VDEV_ALLOC_BIAS_DEDUP, 0);
1724 print_vdev_tree(NULL, "special", nvroot, 0,
1725 VDEV_ALLOC_BIAS_SPECIAL, 0);
1726 print_vdev_tree(NULL, "logs", nvroot, 0,
1727 VDEV_ALLOC_BIAS_LOG, 0);
1728 print_cache_list(nvroot, 0);
1729 print_spare_list(nvroot, 0);
1730
1731 ret = 0;
1732 } else {
1733 /*
1734 * Load in feature set.
1735 * Note: if compatibility property not given, we'll have
1736 * NULL, which means 'all features'.
1737 */
1738 boolean_t requested_features[SPA_FEATURES];
1739 if (zpool_do_load_compat(compat, requested_features) !=
1740 ZPOOL_COMPATIBILITY_OK)
1741 goto errout;
1742
1743 /*
1744 * props contains list of features to enable.
1745 * For each feature:
1746 * - remove it if feature@name=disabled
1747 * - leave it there if feature@name=enabled
1748 * - add it if:
1749 * - enable_pool_features (ie: no '-d' or '-o version')
1750 * - it's supported by the kernel module
1751 * - it's in the requested feature set
1752 * - warn if it's enabled but not in compat
1753 */
1754 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1755 char propname[MAXPATHLEN];
1756 char *propval;
1757 zfeature_info_t *feat = &spa_feature_table[i];
1758
1759 (void) snprintf(propname, sizeof (propname),
1760 "feature@%s", feat->fi_uname);
1761
1762 if (!nvlist_lookup_string(props, propname, &propval)) {
1763 if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0)
1764 (void) nvlist_remove_all(props,
1765 propname);
1766 if (strcmp(propval,
1767 ZFS_FEATURE_ENABLED) == 0 &&
1768 !requested_features[i])
1769 (void) fprintf(stderr, gettext(
1770 "Warning: feature \"%s\" enabled "
1771 "but is not in specified "
1772 "'compatibility' feature set.\n"),
1773 feat->fi_uname);
1774 } else if (
1775 enable_pool_features &&
1776 feat->fi_zfs_mod_supported &&
1777 requested_features[i]) {
1778 ret = add_prop_list(propname,
1779 ZFS_FEATURE_ENABLED, &props, B_TRUE);
1780 if (ret != 0)
1781 goto errout;
1782 }
1783 }
1784
1785 ret = 1;
1786 if (zpool_create(g_zfs, poolname,
1787 nvroot, props, fsprops) == 0) {
1788 zfs_handle_t *pool = zfs_open(g_zfs,
1789 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1790 if (pool != NULL) {
1791 if (zfs_mount(pool, NULL, 0) == 0) {
1792 ret = zfs_shareall(pool);
1793 zfs_commit_all_shares();
1794 }
1795 zfs_close(pool);
1796 }
1797 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1798 (void) fprintf(stderr, gettext("pool name may have "
1799 "been omitted\n"));
1800 }
1801 }
1802
1803 errout:
1804 nvlist_free(nvroot);
1805 nvlist_free(fsprops);
1806 nvlist_free(props);
1807 return (ret);
1808 badusage:
1809 nvlist_free(fsprops);
1810 nvlist_free(props);
1811 usage(B_FALSE);
1812 return (2);
1813 }
1814
1815 /*
1816 * zpool destroy <pool>
1817 *
1818 * -f Forcefully unmount any datasets
1819 *
1820 * Destroy the given pool. Automatically unmounts any datasets in the pool.
1821 */
1822 int
1823 zpool_do_destroy(int argc, char **argv)
1824 {
1825 boolean_t force = B_FALSE;
1826 int c;
1827 char *pool;
1828 zpool_handle_t *zhp;
1829 int ret;
1830
1831 /* check options */
1832 while ((c = getopt(argc, argv, "f")) != -1) {
1833 switch (c) {
1834 case 'f':
1835 force = B_TRUE;
1836 break;
1837 case '?':
1838 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1839 optopt);
1840 usage(B_FALSE);
1841 }
1842 }
1843
1844 argc -= optind;
1845 argv += optind;
1846
1847 /* check arguments */
1848 if (argc < 1) {
1849 (void) fprintf(stderr, gettext("missing pool argument\n"));
1850 usage(B_FALSE);
1851 }
1852 if (argc > 1) {
1853 (void) fprintf(stderr, gettext("too many arguments\n"));
1854 usage(B_FALSE);
1855 }
1856
1857 pool = argv[0];
1858
1859 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
1860 /*
1861 * As a special case, check for use of '/' in the name, and
1862 * direct the user to use 'zfs destroy' instead.
1863 */
1864 if (strchr(pool, '/') != NULL)
1865 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
1866 "destroy a dataset\n"));
1867 return (1);
1868 }
1869
1870 if (zpool_disable_datasets(zhp, force) != 0) {
1871 (void) fprintf(stderr, gettext("could not destroy '%s': "
1872 "could not unmount datasets\n"), zpool_get_name(zhp));
1873 zpool_close(zhp);
1874 return (1);
1875 }
1876
1877 /* The history must be logged as part of the export */
1878 log_history = B_FALSE;
1879
1880 ret = (zpool_destroy(zhp, history_str) != 0);
1881
1882 zpool_close(zhp);
1883
1884 return (ret);
1885 }
1886
1887 typedef struct export_cbdata {
1888 boolean_t force;
1889 boolean_t hardforce;
1890 } export_cbdata_t;
1891
1892 /*
1893 * Export one pool
1894 */
1895 static int
1896 zpool_export_one(zpool_handle_t *zhp, void *data)
1897 {
1898 export_cbdata_t *cb = data;
1899
1900 if (zpool_disable_datasets(zhp, cb->force) != 0)
1901 return (1);
1902
1903 /* The history must be logged as part of the export */
1904 log_history = B_FALSE;
1905
1906 if (cb->hardforce) {
1907 if (zpool_export_force(zhp, history_str) != 0)
1908 return (1);
1909 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
1910 return (1);
1911 }
1912
1913 return (0);
1914 }
1915
1916 /*
1917 * zpool export [-f] <pool> ...
1918 *
1919 * -a Export all pools
1920 * -f Forcefully unmount datasets
1921 *
1922 * Export the given pools. By default, the command will attempt to cleanly
1923 * unmount any active datasets within the pool. If the '-f' flag is specified,
1924 * then the datasets will be forcefully unmounted.
1925 */
1926 int
1927 zpool_do_export(int argc, char **argv)
1928 {
1929 export_cbdata_t cb;
1930 boolean_t do_all = B_FALSE;
1931 boolean_t force = B_FALSE;
1932 boolean_t hardforce = B_FALSE;
1933 int c, ret;
1934
1935 /* check options */
1936 while ((c = getopt(argc, argv, "afF")) != -1) {
1937 switch (c) {
1938 case 'a':
1939 do_all = B_TRUE;
1940 break;
1941 case 'f':
1942 force = B_TRUE;
1943 break;
1944 case 'F':
1945 hardforce = B_TRUE;
1946 break;
1947 case '?':
1948 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1949 optopt);
1950 usage(B_FALSE);
1951 }
1952 }
1953
1954 cb.force = force;
1955 cb.hardforce = hardforce;
1956 argc -= optind;
1957 argv += optind;
1958
1959 if (do_all) {
1960 if (argc != 0) {
1961 (void) fprintf(stderr, gettext("too many arguments\n"));
1962 usage(B_FALSE);
1963 }
1964
1965 return (for_each_pool(argc, argv, B_TRUE, NULL,
1966 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
1967 }
1968
1969 /* check arguments */
1970 if (argc < 1) {
1971 (void) fprintf(stderr, gettext("missing pool argument\n"));
1972 usage(B_FALSE);
1973 }
1974
1975 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
1976 B_FALSE, zpool_export_one, &cb);
1977
1978 return (ret);
1979 }
1980
1981 /*
1982 * Given a vdev configuration, determine the maximum width needed for the device
1983 * name column.
1984 */
1985 static int
1986 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
1987 int name_flags)
1988 {
1989 char *name;
1990 nvlist_t **child;
1991 uint_t c, children;
1992 int ret;
1993
1994 name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
1995 if (strlen(name) + depth > max)
1996 max = strlen(name) + depth;
1997
1998 free(name);
1999
2000 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2001 &child, &children) == 0) {
2002 for (c = 0; c < children; c++)
2003 if ((ret = max_width(zhp, child[c], depth + 2,
2004 max, name_flags)) > max)
2005 max = ret;
2006 }
2007
2008 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2009 &child, &children) == 0) {
2010 for (c = 0; c < children; c++)
2011 if ((ret = max_width(zhp, child[c], depth + 2,
2012 max, name_flags)) > max)
2013 max = ret;
2014 }
2015
2016 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2017 &child, &children) == 0) {
2018 for (c = 0; c < children; c++)
2019 if ((ret = max_width(zhp, child[c], depth + 2,
2020 max, name_flags)) > max)
2021 max = ret;
2022 }
2023
2024 return (max);
2025 }
2026
2027 typedef struct spare_cbdata {
2028 uint64_t cb_guid;
2029 zpool_handle_t *cb_zhp;
2030 } spare_cbdata_t;
2031
2032 static boolean_t
2033 find_vdev(nvlist_t *nv, uint64_t search)
2034 {
2035 uint64_t guid;
2036 nvlist_t **child;
2037 uint_t c, children;
2038
2039 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2040 search == guid)
2041 return (B_TRUE);
2042
2043 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2044 &child, &children) == 0) {
2045 for (c = 0; c < children; c++)
2046 if (find_vdev(child[c], search))
2047 return (B_TRUE);
2048 }
2049
2050 return (B_FALSE);
2051 }
2052
2053 static int
2054 find_spare(zpool_handle_t *zhp, void *data)
2055 {
2056 spare_cbdata_t *cbp = data;
2057 nvlist_t *config, *nvroot;
2058
2059 config = zpool_get_config(zhp, NULL);
2060 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2061 &nvroot) == 0);
2062
2063 if (find_vdev(nvroot, cbp->cb_guid)) {
2064 cbp->cb_zhp = zhp;
2065 return (1);
2066 }
2067
2068 zpool_close(zhp);
2069 return (0);
2070 }
2071
2072 typedef struct status_cbdata {
2073 int cb_count;
2074 int cb_name_flags;
2075 int cb_namewidth;
2076 boolean_t cb_allpools;
2077 boolean_t cb_verbose;
2078 boolean_t cb_literal;
2079 boolean_t cb_explain;
2080 boolean_t cb_first;
2081 boolean_t cb_dedup_stats;
2082 boolean_t cb_print_status;
2083 boolean_t cb_print_slow_ios;
2084 boolean_t cb_print_vdev_init;
2085 boolean_t cb_print_vdev_trim;
2086 vdev_cmd_data_list_t *vcdl;
2087 } status_cbdata_t;
2088
2089 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2090 static int
2091 is_blank_str(char *str)
2092 {
2093 while (str != NULL && *str != '\0') {
2094 if (!isblank(*str))
2095 return (0);
2096 str++;
2097 }
2098 return (1);
2099 }
2100
2101 /* Print command output lines for specific vdev in a specific pool */
2102 static void
2103 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
2104 {
2105 vdev_cmd_data_t *data;
2106 int i, j;
2107 char *val;
2108
2109 for (i = 0; i < vcdl->count; i++) {
2110 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2111 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2112 /* Not the vdev we're looking for */
2113 continue;
2114 }
2115
2116 data = &vcdl->data[i];
2117 /* Print out all the output values for this vdev */
2118 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2119 val = NULL;
2120 /* Does this vdev have values for this column? */
2121 for (int k = 0; k < data->cols_cnt; k++) {
2122 if (strcmp(data->cols[k],
2123 vcdl->uniq_cols[j]) == 0) {
2124 /* yes it does, record the value */
2125 val = data->lines[k];
2126 break;
2127 }
2128 }
2129 /*
2130 * Mark empty values with dashes to make output
2131 * awk-able.
2132 */
2133 if (val == NULL || is_blank_str(val))
2134 val = "-";
2135
2136 printf("%*s", vcdl->uniq_cols_width[j], val);
2137 if (j < vcdl->uniq_cols_cnt - 1)
2138 printf(" ");
2139 }
2140
2141 /* Print out any values that aren't in a column at the end */
2142 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2143 /* Did we have any columns? If so print a spacer. */
2144 if (vcdl->uniq_cols_cnt > 0)
2145 printf(" ");
2146
2147 val = data->lines[j];
2148 printf("%s", val ? val : "");
2149 }
2150 break;
2151 }
2152 }
2153
2154 /*
2155 * Print vdev initialization status for leaves
2156 */
2157 static void
2158 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2159 {
2160 if (verbose) {
2161 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2162 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2163 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2164 !vs->vs_scan_removing) {
2165 char zbuf[1024];
2166 char tbuf[256];
2167 struct tm zaction_ts;
2168
2169 time_t t = vs->vs_initialize_action_time;
2170 int initialize_pct = 100;
2171 if (vs->vs_initialize_state !=
2172 VDEV_INITIALIZE_COMPLETE) {
2173 initialize_pct = (vs->vs_initialize_bytes_done *
2174 100 / (vs->vs_initialize_bytes_est + 1));
2175 }
2176
2177 (void) localtime_r(&t, &zaction_ts);
2178 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2179
2180 switch (vs->vs_initialize_state) {
2181 case VDEV_INITIALIZE_SUSPENDED:
2182 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2183 gettext("suspended, started at"), tbuf);
2184 break;
2185 case VDEV_INITIALIZE_ACTIVE:
2186 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2187 gettext("started at"), tbuf);
2188 break;
2189 case VDEV_INITIALIZE_COMPLETE:
2190 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2191 gettext("completed at"), tbuf);
2192 break;
2193 }
2194
2195 (void) printf(gettext(" (%d%% initialized%s)"),
2196 initialize_pct, zbuf);
2197 } else {
2198 (void) printf(gettext(" (uninitialized)"));
2199 }
2200 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2201 (void) printf(gettext(" (initializing)"));
2202 }
2203 }
2204
2205 /*
2206 * Print vdev TRIM status for leaves
2207 */
2208 static void
2209 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2210 {
2211 if (verbose) {
2212 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2213 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2214 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2215 !vs->vs_scan_removing) {
2216 char zbuf[1024];
2217 char tbuf[256];
2218 struct tm zaction_ts;
2219
2220 time_t t = vs->vs_trim_action_time;
2221 int trim_pct = 100;
2222 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2223 trim_pct = (vs->vs_trim_bytes_done *
2224 100 / (vs->vs_trim_bytes_est + 1));
2225 }
2226
2227 (void) localtime_r(&t, &zaction_ts);
2228 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2229
2230 switch (vs->vs_trim_state) {
2231 case VDEV_TRIM_SUSPENDED:
2232 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2233 gettext("suspended, started at"), tbuf);
2234 break;
2235 case VDEV_TRIM_ACTIVE:
2236 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2237 gettext("started at"), tbuf);
2238 break;
2239 case VDEV_TRIM_COMPLETE:
2240 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2241 gettext("completed at"), tbuf);
2242 break;
2243 }
2244
2245 (void) printf(gettext(" (%d%% trimmed%s)"),
2246 trim_pct, zbuf);
2247 } else if (vs->vs_trim_notsup) {
2248 (void) printf(gettext(" (trim unsupported)"));
2249 } else {
2250 (void) printf(gettext(" (untrimmed)"));
2251 }
2252 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2253 (void) printf(gettext(" (trimming)"));
2254 }
2255 }
2256
2257 /*
2258 * Return the color associated with a health string. This includes returning
2259 * NULL for no color change.
2260 */
2261 static char *
2262 health_str_to_color(const char *health)
2263 {
2264 if (strcmp(health, gettext("FAULTED")) == 0 ||
2265 strcmp(health, gettext("SUSPENDED")) == 0 ||
2266 strcmp(health, gettext("UNAVAIL")) == 0) {
2267 return (ANSI_RED);
2268 }
2269
2270 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2271 strcmp(health, gettext("DEGRADED")) == 0 ||
2272 strcmp(health, gettext("REMOVED")) == 0) {
2273 return (ANSI_YELLOW);
2274 }
2275
2276 return (NULL);
2277 }
2278
2279 /*
2280 * Print out configuration state as requested by status_callback.
2281 */
2282 static void
2283 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2284 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2285 {
2286 nvlist_t **child, *root;
2287 uint_t c, i, vsc, children;
2288 pool_scan_stat_t *ps = NULL;
2289 vdev_stat_t *vs;
2290 char rbuf[6], wbuf[6], cbuf[6];
2291 char *vname;
2292 uint64_t notpresent;
2293 spare_cbdata_t spare_cb;
2294 const char *state;
2295 char *type;
2296 char *path = NULL;
2297 char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
2298
2299 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2300 &child, &children) != 0)
2301 children = 0;
2302
2303 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2304 (uint64_t **)&vs, &vsc) == 0);
2305
2306 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2307
2308 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2309 return;
2310
2311 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2312
2313 if (isspare) {
2314 /*
2315 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2316 * online drives.
2317 */
2318 if (vs->vs_aux == VDEV_AUX_SPARED)
2319 state = gettext("INUSE");
2320 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2321 state = gettext("AVAIL");
2322 }
2323
2324 printf_color(health_str_to_color(state),
2325 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2326 name, state);
2327
2328 if (!isspare) {
2329 if (vs->vs_read_errors)
2330 rcolor = ANSI_RED;
2331
2332 if (vs->vs_write_errors)
2333 wcolor = ANSI_RED;
2334
2335 if (vs->vs_checksum_errors)
2336 ccolor = ANSI_RED;
2337
2338 if (cb->cb_literal) {
2339 printf(" ");
2340 printf_color(rcolor, "%5llu",
2341 (u_longlong_t)vs->vs_read_errors);
2342 printf(" ");
2343 printf_color(wcolor, "%5llu",
2344 (u_longlong_t)vs->vs_write_errors);
2345 printf(" ");
2346 printf_color(ccolor, "%5llu",
2347 (u_longlong_t)vs->vs_checksum_errors);
2348 } else {
2349 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2350 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2351 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2352 sizeof (cbuf));
2353 printf(" ");
2354 printf_color(rcolor, "%5s", rbuf);
2355 printf(" ");
2356 printf_color(wcolor, "%5s", wbuf);
2357 printf(" ");
2358 printf_color(ccolor, "%5s", cbuf);
2359 }
2360 if (cb->cb_print_slow_ios) {
2361 if (children == 0) {
2362 /* Only leafs vdevs have slow IOs */
2363 zfs_nicenum(vs->vs_slow_ios, rbuf,
2364 sizeof (rbuf));
2365 } else {
2366 snprintf(rbuf, sizeof (rbuf), "-");
2367 }
2368
2369 if (cb->cb_literal)
2370 printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
2371 else
2372 printf(" %5s", rbuf);
2373 }
2374 }
2375
2376 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2377 &notpresent) == 0) {
2378 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2379 (void) printf(" %s %s", gettext("was"), path);
2380 } else if (vs->vs_aux != 0) {
2381 (void) printf(" ");
2382 color_start(ANSI_RED);
2383 switch (vs->vs_aux) {
2384 case VDEV_AUX_OPEN_FAILED:
2385 (void) printf(gettext("cannot open"));
2386 break;
2387
2388 case VDEV_AUX_BAD_GUID_SUM:
2389 (void) printf(gettext("missing device"));
2390 break;
2391
2392 case VDEV_AUX_NO_REPLICAS:
2393 (void) printf(gettext("insufficient replicas"));
2394 break;
2395
2396 case VDEV_AUX_VERSION_NEWER:
2397 (void) printf(gettext("newer version"));
2398 break;
2399
2400 case VDEV_AUX_UNSUP_FEAT:
2401 (void) printf(gettext("unsupported feature(s)"));
2402 break;
2403
2404 case VDEV_AUX_ASHIFT_TOO_BIG:
2405 (void) printf(gettext("unsupported minimum blocksize"));
2406 break;
2407
2408 case VDEV_AUX_SPARED:
2409 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2410 &spare_cb.cb_guid) == 0);
2411 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2412 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2413 zpool_get_name(zhp)) == 0)
2414 (void) printf(gettext("currently in "
2415 "use"));
2416 else
2417 (void) printf(gettext("in use by "
2418 "pool '%s'"),
2419 zpool_get_name(spare_cb.cb_zhp));
2420 zpool_close(spare_cb.cb_zhp);
2421 } else {
2422 (void) printf(gettext("currently in use"));
2423 }
2424 break;
2425
2426 case VDEV_AUX_ERR_EXCEEDED:
2427 (void) printf(gettext("too many errors"));
2428 break;
2429
2430 case VDEV_AUX_IO_FAILURE:
2431 (void) printf(gettext("experienced I/O failures"));
2432 break;
2433
2434 case VDEV_AUX_BAD_LOG:
2435 (void) printf(gettext("bad intent log"));
2436 break;
2437
2438 case VDEV_AUX_EXTERNAL:
2439 (void) printf(gettext("external device fault"));
2440 break;
2441
2442 case VDEV_AUX_SPLIT_POOL:
2443 (void) printf(gettext("split into new pool"));
2444 break;
2445
2446 case VDEV_AUX_ACTIVE:
2447 (void) printf(gettext("currently in use"));
2448 break;
2449
2450 case VDEV_AUX_CHILDREN_OFFLINE:
2451 (void) printf(gettext("all children offline"));
2452 break;
2453
2454 case VDEV_AUX_BAD_LABEL:
2455 (void) printf(gettext("invalid label"));
2456 break;
2457
2458 default:
2459 (void) printf(gettext("corrupted data"));
2460 break;
2461 }
2462 color_end();
2463 } else if (children == 0 && !isspare &&
2464 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2465 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2466 vs->vs_configured_ashift < vs->vs_physical_ashift) {
2467 (void) printf(
2468 gettext(" block size: %dB configured, %dB native"),
2469 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2470 }
2471
2472 if (vs->vs_scan_removing != 0) {
2473 (void) printf(gettext(" (removing)"));
2474 } else if (vs->vs_noalloc != 0) {
2475 (void) printf(gettext(" (non-allocating)"));
2476 }
2477
2478 /* The root vdev has the scrub/resilver stats */
2479 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2480 ZPOOL_CONFIG_VDEV_TREE);
2481 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2482 (uint64_t **)&ps, &c);
2483
2484 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) {
2485 if (vs->vs_scan_processed != 0) {
2486 (void) printf(gettext(" (%s)"),
2487 (ps->pss_func == POOL_SCAN_RESILVER) ?
2488 "resilvering" : "repairing");
2489 } else if (vs->vs_resilver_deferred) {
2490 (void) printf(gettext(" (awaiting resilver)"));
2491 }
2492 }
2493
2494 /* The top-level vdevs have the rebuild stats */
2495 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2496 children == 0) {
2497 if (vs->vs_rebuild_processed != 0) {
2498 (void) printf(gettext(" (resilvering)"));
2499 }
2500 }
2501
2502 if (cb->vcdl != NULL) {
2503 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2504 printf(" ");
2505 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2506 }
2507 }
2508
2509 /* Display vdev initialization and trim status for leaves. */
2510 if (children == 0) {
2511 print_status_initialize(vs, cb->cb_print_vdev_init);
2512 print_status_trim(vs, cb->cb_print_vdev_trim);
2513 }
2514
2515 (void) printf("\n");
2516
2517 for (c = 0; c < children; c++) {
2518 uint64_t islog = B_FALSE, ishole = B_FALSE;
2519
2520 /* Don't print logs or holes here */
2521 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2522 &islog);
2523 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2524 &ishole);
2525 if (islog || ishole)
2526 continue;
2527 /* Only print normal classes here */
2528 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2529 continue;
2530
2531 /* Provide vdev_rebuild_stats to children if available */
2532 if (vrs == NULL) {
2533 (void) nvlist_lookup_uint64_array(nv,
2534 ZPOOL_CONFIG_REBUILD_STATS,
2535 (uint64_t **)&vrs, &i);
2536 }
2537
2538 vname = zpool_vdev_name(g_zfs, zhp, child[c],
2539 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2540 print_status_config(zhp, cb, vname, child[c], depth + 2,
2541 isspare, vrs);
2542 free(vname);
2543 }
2544 }
2545
2546 /*
2547 * Print the configuration of an exported pool. Iterate over all vdevs in the
2548 * pool, printing out the name and status for each one.
2549 */
2550 static void
2551 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2552 int depth)
2553 {
2554 nvlist_t **child;
2555 uint_t c, children;
2556 vdev_stat_t *vs;
2557 char *type, *vname;
2558
2559 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2560 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2561 strcmp(type, VDEV_TYPE_HOLE) == 0)
2562 return;
2563
2564 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2565 (uint64_t **)&vs, &c) == 0);
2566
2567 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2568 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2569
2570 if (vs->vs_aux != 0) {
2571 (void) printf(" ");
2572
2573 switch (vs->vs_aux) {
2574 case VDEV_AUX_OPEN_FAILED:
2575 (void) printf(gettext("cannot open"));
2576 break;
2577
2578 case VDEV_AUX_BAD_GUID_SUM:
2579 (void) printf(gettext("missing device"));
2580 break;
2581
2582 case VDEV_AUX_NO_REPLICAS:
2583 (void) printf(gettext("insufficient replicas"));
2584 break;
2585
2586 case VDEV_AUX_VERSION_NEWER:
2587 (void) printf(gettext("newer version"));
2588 break;
2589
2590 case VDEV_AUX_UNSUP_FEAT:
2591 (void) printf(gettext("unsupported feature(s)"));
2592 break;
2593
2594 case VDEV_AUX_ERR_EXCEEDED:
2595 (void) printf(gettext("too many errors"));
2596 break;
2597
2598 case VDEV_AUX_ACTIVE:
2599 (void) printf(gettext("currently in use"));
2600 break;
2601
2602 case VDEV_AUX_CHILDREN_OFFLINE:
2603 (void) printf(gettext("all children offline"));
2604 break;
2605
2606 case VDEV_AUX_BAD_LABEL:
2607 (void) printf(gettext("invalid label"));
2608 break;
2609
2610 default:
2611 (void) printf(gettext("corrupted data"));
2612 break;
2613 }
2614 }
2615 (void) printf("\n");
2616
2617 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2618 &child, &children) != 0)
2619 return;
2620
2621 for (c = 0; c < children; c++) {
2622 uint64_t is_log = B_FALSE;
2623
2624 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2625 &is_log);
2626 if (is_log)
2627 continue;
2628 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2629 continue;
2630
2631 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2632 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2633 print_import_config(cb, vname, child[c], depth + 2);
2634 free(vname);
2635 }
2636
2637 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2638 &child, &children) == 0) {
2639 (void) printf(gettext("\tcache\n"));
2640 for (c = 0; c < children; c++) {
2641 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2642 cb->cb_name_flags);
2643 (void) printf("\t %s\n", vname);
2644 free(vname);
2645 }
2646 }
2647
2648 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2649 &child, &children) == 0) {
2650 (void) printf(gettext("\tspares\n"));
2651 for (c = 0; c < children; c++) {
2652 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2653 cb->cb_name_flags);
2654 (void) printf("\t %s\n", vname);
2655 free(vname);
2656 }
2657 }
2658 }
2659
2660 /*
2661 * Print specialized class vdevs.
2662 *
2663 * These are recorded as top level vdevs in the main pool child array
2664 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2665 * print_status_config() or print_import_config() to print the top level
2666 * class vdevs then any of their children (eg mirrored slogs) are printed
2667 * recursively - which works because only the top level vdev is marked.
2668 */
2669 static void
2670 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2671 const char *class)
2672 {
2673 uint_t c, children;
2674 nvlist_t **child;
2675 boolean_t printed = B_FALSE;
2676
2677 assert(zhp != NULL || !cb->cb_verbose);
2678
2679 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2680 &children) != 0)
2681 return;
2682
2683 for (c = 0; c < children; c++) {
2684 uint64_t is_log = B_FALSE;
2685 char *bias = NULL;
2686 char *type = NULL;
2687
2688 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2689 &is_log);
2690
2691 if (is_log) {
2692 bias = VDEV_ALLOC_CLASS_LOGS;
2693 } else {
2694 (void) nvlist_lookup_string(child[c],
2695 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2696 (void) nvlist_lookup_string(child[c],
2697 ZPOOL_CONFIG_TYPE, &type);
2698 }
2699
2700 if (bias == NULL || strcmp(bias, class) != 0)
2701 continue;
2702 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2703 continue;
2704
2705 if (!printed) {
2706 (void) printf("\t%s\t\n", gettext(class));
2707 printed = B_TRUE;
2708 }
2709
2710 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2711 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2712 if (cb->cb_print_status)
2713 print_status_config(zhp, cb, name, child[c], 2,
2714 B_FALSE, NULL);
2715 else
2716 print_import_config(cb, name, child[c], 2);
2717 free(name);
2718 }
2719 }
2720
2721 /*
2722 * Display the status for the given pool.
2723 */
2724 static int
2725 show_import(nvlist_t *config, boolean_t report_error)
2726 {
2727 uint64_t pool_state;
2728 vdev_stat_t *vs;
2729 char *name;
2730 uint64_t guid;
2731 uint64_t hostid = 0;
2732 char *msgid;
2733 char *hostname = "unknown";
2734 nvlist_t *nvroot, *nvinfo;
2735 zpool_status_t reason;
2736 zpool_errata_t errata;
2737 const char *health;
2738 uint_t vsc;
2739 char *comment;
2740 status_cbdata_t cb = { 0 };
2741
2742 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2743 &name) == 0);
2744 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2745 &guid) == 0);
2746 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2747 &pool_state) == 0);
2748 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2749 &nvroot) == 0);
2750
2751 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2752 (uint64_t **)&vs, &vsc) == 0);
2753 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2754
2755 reason = zpool_import_status(config, &msgid, &errata);
2756
2757 /*
2758 * If we're importing using a cachefile, then we won't report any
2759 * errors unless we are in the scan phase of the import.
2760 */
2761 if (reason != ZPOOL_STATUS_OK && !report_error)
2762 return (reason);
2763
2764 (void) printf(gettext(" pool: %s\n"), name);
2765 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
2766 (void) printf(gettext(" state: %s"), health);
2767 if (pool_state == POOL_STATE_DESTROYED)
2768 (void) printf(gettext(" (DESTROYED)"));
2769 (void) printf("\n");
2770
2771 switch (reason) {
2772 case ZPOOL_STATUS_MISSING_DEV_R:
2773 case ZPOOL_STATUS_MISSING_DEV_NR:
2774 case ZPOOL_STATUS_BAD_GUID_SUM:
2775 printf_color(ANSI_BOLD, gettext("status: "));
2776 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2777 "missing from the system.\n"));
2778 break;
2779
2780 case ZPOOL_STATUS_CORRUPT_LABEL_R:
2781 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2782 printf_color(ANSI_BOLD, gettext("status: "));
2783 printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2784 " corrupted data.\n"));
2785 break;
2786
2787 case ZPOOL_STATUS_CORRUPT_DATA:
2788 (void) printf(
2789 gettext(" status: The pool data is corrupted.\n"));
2790 break;
2791
2792 case ZPOOL_STATUS_OFFLINE_DEV:
2793 printf_color(ANSI_BOLD, gettext("status: "));
2794 printf_color(ANSI_YELLOW, gettext("One or more devices "
2795 "are offlined.\n"));
2796 break;
2797
2798 case ZPOOL_STATUS_CORRUPT_POOL:
2799 printf_color(ANSI_BOLD, gettext("status: "));
2800 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
2801 "corrupted.\n"));
2802 break;
2803
2804 case ZPOOL_STATUS_VERSION_OLDER:
2805 printf_color(ANSI_BOLD, gettext("status: "));
2806 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2807 "a legacy on-disk version.\n"));
2808 break;
2809
2810 case ZPOOL_STATUS_VERSION_NEWER:
2811 printf_color(ANSI_BOLD, gettext("status: "));
2812 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2813 "an incompatible version.\n"));
2814 break;
2815
2816 case ZPOOL_STATUS_FEAT_DISABLED:
2817 printf_color(ANSI_BOLD, gettext("status: "));
2818 printf_color(ANSI_YELLOW, gettext("Some supported "
2819 "features are not enabled on the pool.\n\t"
2820 "(Note that they may be intentionally disabled "
2821 "if the\n\t'compatibility' property is set.)\n"));
2822 break;
2823
2824 case ZPOOL_STATUS_COMPATIBILITY_ERR:
2825 printf_color(ANSI_BOLD, gettext("status: "));
2826 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
2827 "the file(s) indicated by the 'compatibility'\n"
2828 "property.\n"));
2829 break;
2830
2831 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
2832 printf_color(ANSI_BOLD, gettext("status: "));
2833 printf_color(ANSI_YELLOW, gettext("One or more features "
2834 "are enabled on the pool despite not being\n"
2835 "requested by the 'compatibility' property.\n"));
2836 break;
2837
2838 case ZPOOL_STATUS_UNSUP_FEAT_READ:
2839 printf_color(ANSI_BOLD, gettext("status: "));
2840 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
2841 "feature(s) not supported on this system:\n"));
2842 color_start(ANSI_YELLOW);
2843 zpool_print_unsup_feat(config);
2844 color_end();
2845 break;
2846
2847 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
2848 printf_color(ANSI_BOLD, gettext("status: "));
2849 printf_color(ANSI_YELLOW, gettext("The pool can only be "
2850 "accessed in read-only mode on this system. It\n\tcannot be"
2851 " accessed in read-write mode because it uses the "
2852 "following\n\tfeature(s) not supported on this system:\n"));
2853 color_start(ANSI_YELLOW);
2854 zpool_print_unsup_feat(config);
2855 color_end();
2856 break;
2857
2858 case ZPOOL_STATUS_HOSTID_ACTIVE:
2859 printf_color(ANSI_BOLD, gettext("status: "));
2860 printf_color(ANSI_YELLOW, gettext("The pool is currently "
2861 "imported by another system.\n"));
2862 break;
2863
2864 case ZPOOL_STATUS_HOSTID_REQUIRED:
2865 printf_color(ANSI_BOLD, gettext("status: "));
2866 printf_color(ANSI_YELLOW, gettext("The pool has the "
2867 "multihost property on. It cannot\n\tbe safely imported "
2868 "when the system hostid is not set.\n"));
2869 break;
2870
2871 case ZPOOL_STATUS_HOSTID_MISMATCH:
2872 printf_color(ANSI_BOLD, gettext("status: "));
2873 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
2874 "by another system.\n"));
2875 break;
2876
2877 case ZPOOL_STATUS_FAULTED_DEV_R:
2878 case ZPOOL_STATUS_FAULTED_DEV_NR:
2879 printf_color(ANSI_BOLD, gettext("status: "));
2880 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2881 "faulted.\n"));
2882 break;
2883
2884 case ZPOOL_STATUS_BAD_LOG:
2885 printf_color(ANSI_BOLD, gettext("status: "));
2886 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
2887 "be read.\n"));
2888 break;
2889
2890 case ZPOOL_STATUS_RESILVERING:
2891 case ZPOOL_STATUS_REBUILDING:
2892 printf_color(ANSI_BOLD, gettext("status: "));
2893 printf_color(ANSI_YELLOW, gettext("One or more devices were "
2894 "being resilvered.\n"));
2895 break;
2896
2897 case ZPOOL_STATUS_ERRATA:
2898 printf_color(ANSI_BOLD, gettext("status: "));
2899 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
2900 errata);
2901 break;
2902
2903 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
2904 printf_color(ANSI_BOLD, gettext("status: "));
2905 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2906 "configured to use a non-native block size.\n"
2907 "\tExpect reduced performance.\n"));
2908 break;
2909
2910 default:
2911 /*
2912 * No other status can be seen when importing pools.
2913 */
2914 assert(reason == ZPOOL_STATUS_OK);
2915 }
2916
2917 /*
2918 * Print out an action according to the overall state of the pool.
2919 */
2920 if (vs->vs_state == VDEV_STATE_HEALTHY) {
2921 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
2922 reason == ZPOOL_STATUS_FEAT_DISABLED) {
2923 (void) printf(gettext(" action: The pool can be "
2924 "imported using its name or numeric identifier, "
2925 "though\n\tsome features will not be available "
2926 "without an explicit 'zpool upgrade'.\n"));
2927 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
2928 (void) printf(gettext(" action: The pool can be "
2929 "imported using its name or numeric\n\tidentifier, "
2930 "though the file(s) indicated by its "
2931 "'compatibility'\n\tproperty cannot be parsed at "
2932 "this time.\n"));
2933 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
2934 (void) printf(gettext(" action: The pool can be "
2935 "imported using its name or numeric "
2936 "identifier and\n\tthe '-f' flag.\n"));
2937 } else if (reason == ZPOOL_STATUS_ERRATA) {
2938 switch (errata) {
2939 case ZPOOL_ERRATA_NONE:
2940 break;
2941
2942 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
2943 (void) printf(gettext(" action: The pool can "
2944 "be imported using its name or numeric "
2945 "identifier,\n\thowever there is a compat"
2946 "ibility issue which should be corrected"
2947 "\n\tby running 'zpool scrub'\n"));
2948 break;
2949
2950 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
2951 (void) printf(gettext(" action: The pool can"
2952 "not be imported with this version of ZFS "
2953 "due to\n\tan active asynchronous destroy. "
2954 "Revert to an earlier version\n\tand "
2955 "allow the destroy to complete before "
2956 "updating.\n"));
2957 break;
2958
2959 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
2960 (void) printf(gettext(" action: Existing "
2961 "encrypted datasets contain an on-disk "
2962 "incompatibility, which\n\tneeds to be "
2963 "corrected. Backup these datasets to new "
2964 "encrypted datasets\n\tand destroy the "
2965 "old ones.\n"));
2966 break;
2967
2968 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
2969 (void) printf(gettext(" action: Existing "
2970 "encrypted snapshots and bookmarks contain "
2971 "an on-disk\n\tincompatibility. This may "
2972 "cause on-disk corruption if they are used"
2973 "\n\twith 'zfs recv'. To correct the "
2974 "issue, enable the bookmark_v2 feature.\n\t"
2975 "No additional action is needed if there "
2976 "are no encrypted snapshots or\n\t"
2977 "bookmarks. If preserving the encrypted "
2978 "snapshots and bookmarks is\n\trequired, "
2979 "use a non-raw send to backup and restore "
2980 "them. Alternately,\n\tthey may be removed"
2981 " to resolve the incompatibility.\n"));
2982 break;
2983 default:
2984 /*
2985 * All errata must contain an action message.
2986 */
2987 assert(0);
2988 }
2989 } else {
2990 (void) printf(gettext(" action: The pool can be "
2991 "imported using its name or numeric "
2992 "identifier.\n"));
2993 }
2994 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
2995 (void) printf(gettext(" action: The pool can be imported "
2996 "despite missing or damaged devices. The\n\tfault "
2997 "tolerance of the pool may be compromised if imported.\n"));
2998 } else {
2999 switch (reason) {
3000 case ZPOOL_STATUS_VERSION_NEWER:
3001 (void) printf(gettext(" action: The pool cannot be "
3002 "imported. Access the pool on a system running "
3003 "newer\n\tsoftware, or recreate the pool from "
3004 "backup.\n"));
3005 break;
3006 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3007 printf_color(ANSI_BOLD, gettext("action: "));
3008 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3009 "imported. Access the pool on a system that "
3010 "supports\n\tthe required feature(s), or recreate "
3011 "the pool from backup.\n"));
3012 break;
3013 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3014 printf_color(ANSI_BOLD, gettext("action: "));
3015 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3016 "imported in read-write mode. Import the pool "
3017 "with\n"
3018 "\t\"-o readonly=on\", access the pool on a system "
3019 "that supports the\n\trequired feature(s), or "
3020 "recreate the pool from backup.\n"));
3021 break;
3022 case ZPOOL_STATUS_MISSING_DEV_R:
3023 case ZPOOL_STATUS_MISSING_DEV_NR:
3024 case ZPOOL_STATUS_BAD_GUID_SUM:
3025 (void) printf(gettext(" action: The pool cannot be "
3026 "imported. Attach the missing\n\tdevices and try "
3027 "again.\n"));
3028 break;
3029 case ZPOOL_STATUS_HOSTID_ACTIVE:
3030 VERIFY0(nvlist_lookup_nvlist(config,
3031 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3032
3033 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3034 hostname = fnvlist_lookup_string(nvinfo,
3035 ZPOOL_CONFIG_MMP_HOSTNAME);
3036
3037 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3038 hostid = fnvlist_lookup_uint64(nvinfo,
3039 ZPOOL_CONFIG_MMP_HOSTID);
3040
3041 (void) printf(gettext(" action: The pool must be "
3042 "exported from %s (hostid=%lx)\n\tbefore it "
3043 "can be safely imported.\n"), hostname,
3044 (unsigned long) hostid);
3045 break;
3046 case ZPOOL_STATUS_HOSTID_REQUIRED:
3047 (void) printf(gettext(" action: Set a unique system "
3048 "hostid with the zgenhostid(8) command.\n"));
3049 break;
3050 default:
3051 (void) printf(gettext(" action: The pool cannot be "
3052 "imported due to damaged devices or data.\n"));
3053 }
3054 }
3055
3056 /* Print the comment attached to the pool. */
3057 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3058 (void) printf(gettext("comment: %s\n"), comment);
3059
3060 /*
3061 * If the state is "closed" or "can't open", and the aux state
3062 * is "corrupt data":
3063 */
3064 if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3065 (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3066 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3067 if (pool_state == POOL_STATE_DESTROYED)
3068 (void) printf(gettext("\tThe pool was destroyed, "
3069 "but can be imported using the '-Df' flags.\n"));
3070 else if (pool_state != POOL_STATE_EXPORTED)
3071 (void) printf(gettext("\tThe pool may be active on "
3072 "another system, but can be imported using\n\t"
3073 "the '-f' flag.\n"));
3074 }
3075
3076 if (msgid != NULL) {
3077 (void) printf(gettext(
3078 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3079 msgid);
3080 }
3081
3082 (void) printf(gettext(" config:\n\n"));
3083
3084 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3085 VDEV_NAME_TYPE_ID);
3086 if (cb.cb_namewidth < 10)
3087 cb.cb_namewidth = 10;
3088
3089 print_import_config(&cb, name, nvroot, 0);
3090
3091 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3092 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3093 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3094
3095 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3096 (void) printf(gettext("\n\tAdditional devices are known to "
3097 "be part of this pool, though their\n\texact "
3098 "configuration cannot be determined.\n"));
3099 }
3100 return (0);
3101 }
3102
3103 static boolean_t
3104 zfs_force_import_required(nvlist_t *config)
3105 {
3106 uint64_t state;
3107 uint64_t hostid = 0;
3108 nvlist_t *nvinfo;
3109
3110 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3111 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
3112
3113 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3114 return (B_TRUE);
3115
3116 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3117 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3118 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3119 ZPOOL_CONFIG_MMP_STATE);
3120
3121 if (mmp_state != MMP_STATE_INACTIVE)
3122 return (B_TRUE);
3123 }
3124
3125 return (B_FALSE);
3126 }
3127
3128 /*
3129 * Perform the import for the given configuration. This passes the heavy
3130 * lifting off to zpool_import_props(), and then mounts the datasets contained
3131 * within the pool.
3132 */
3133 static int
3134 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3135 nvlist_t *props, int flags)
3136 {
3137 int ret = 0;
3138 zpool_handle_t *zhp;
3139 char *name;
3140 uint64_t version;
3141
3142 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3143 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3144
3145 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3146 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3147 "is formatted using an unsupported ZFS version\n"), name);
3148 return (1);
3149 } else if (zfs_force_import_required(config) &&
3150 !(flags & ZFS_IMPORT_ANY_HOST)) {
3151 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3152 nvlist_t *nvinfo;
3153
3154 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3155 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3156 mmp_state = fnvlist_lookup_uint64(nvinfo,
3157 ZPOOL_CONFIG_MMP_STATE);
3158
3159 if (mmp_state == MMP_STATE_ACTIVE) {
3160 char *hostname = "<unknown>";
3161 uint64_t hostid = 0;
3162
3163 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3164 hostname = fnvlist_lookup_string(nvinfo,
3165 ZPOOL_CONFIG_MMP_HOSTNAME);
3166
3167 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3168 hostid = fnvlist_lookup_uint64(nvinfo,
3169 ZPOOL_CONFIG_MMP_HOSTID);
3170
3171 (void) fprintf(stderr, gettext("cannot import '%s': "
3172 "pool is imported on %s (hostid: "
3173 "0x%lx)\nExport the pool on the other system, "
3174 "then run 'zpool import'.\n"),
3175 name, hostname, (unsigned long) hostid);
3176 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3177 (void) fprintf(stderr, gettext("Cannot import '%s': "
3178 "pool has the multihost property on and the\n"
3179 "system's hostid is not set. Set a unique hostid "
3180 "with the zgenhostid(8) command.\n"), name);
3181 } else {
3182 char *hostname = "<unknown>";
3183 uint64_t timestamp = 0;
3184 uint64_t hostid = 0;
3185
3186 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3187 hostname = fnvlist_lookup_string(config,
3188 ZPOOL_CONFIG_HOSTNAME);
3189
3190 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3191 timestamp = fnvlist_lookup_uint64(config,
3192 ZPOOL_CONFIG_TIMESTAMP);
3193
3194 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3195 hostid = fnvlist_lookup_uint64(config,
3196 ZPOOL_CONFIG_HOSTID);
3197
3198 (void) fprintf(stderr, gettext("cannot import '%s': "
3199 "pool was previously in use from another system.\n"
3200 "Last accessed by %s (hostid=%lx) at %s"
3201 "The pool can be imported, use 'zpool import -f' "
3202 "to import the pool.\n"), name, hostname,
3203 (unsigned long)hostid, ctime((time_t *)&timestamp));
3204 }
3205
3206 return (1);
3207 }
3208
3209 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3210 return (1);
3211
3212 if (newname != NULL)
3213 name = (char *)newname;
3214
3215 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3216 return (1);
3217
3218 /*
3219 * Loading keys is best effort. We don't want to return immediately
3220 * if it fails but we do want to give the error to the caller.
3221 */
3222 if (flags & ZFS_IMPORT_LOAD_KEYS) {
3223 ret = zfs_crypto_attempt_load_keys(g_zfs, name);
3224 if (ret != 0)
3225 ret = 1;
3226 }
3227
3228 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3229 !(flags & ZFS_IMPORT_ONLY) &&
3230 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
3231 zpool_close(zhp);
3232 return (1);
3233 }
3234
3235 zpool_close(zhp);
3236 return (ret);
3237 }
3238
3239 static int
3240 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3241 char *orig_name, char *new_name,
3242 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3243 importargs_t *import)
3244 {
3245 nvlist_t *config = NULL;
3246 nvlist_t *found_config = NULL;
3247 uint64_t pool_state;
3248
3249 /*
3250 * At this point we have a list of import candidate configs. Even if
3251 * we were searching by pool name or guid, we still need to
3252 * post-process the list to deal with pool state and possible
3253 * duplicate names.
3254 */
3255 int err = 0;
3256 nvpair_t *elem = NULL;
3257 boolean_t first = B_TRUE;
3258 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3259
3260 verify(nvpair_value_nvlist(elem, &config) == 0);
3261
3262 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3263 &pool_state) == 0);
3264 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3265 continue;
3266 if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3267 continue;
3268
3269 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3270 import->policy) == 0);
3271
3272 if (!pool_specified) {
3273 if (first)
3274 first = B_FALSE;
3275 else if (!do_all)
3276 (void) printf("\n");
3277
3278 if (do_all) {
3279 err |= do_import(config, NULL, mntopts,
3280 props, flags);
3281 } else {
3282 /*
3283 * If we're importing from cachefile, then
3284 * we don't want to report errors until we
3285 * are in the scan phase of the import. If
3286 * we get an error, then we return that error
3287 * to invoke the scan phase.
3288 */
3289 if (import->cachefile && !import->scan)
3290 err = show_import(config, B_FALSE);
3291 else
3292 (void) show_import(config, B_TRUE);
3293 }
3294 } else if (import->poolname != NULL) {
3295 char *name;
3296
3297 /*
3298 * We are searching for a pool based on name.
3299 */
3300 verify(nvlist_lookup_string(config,
3301 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3302
3303 if (strcmp(name, import->poolname) == 0) {
3304 if (found_config != NULL) {
3305 (void) fprintf(stderr, gettext(
3306 "cannot import '%s': more than "
3307 "one matching pool\n"),
3308 import->poolname);
3309 (void) fprintf(stderr, gettext(
3310 "import by numeric ID instead\n"));
3311 err = B_TRUE;
3312 }
3313 found_config = config;
3314 }
3315 } else {
3316 uint64_t guid;
3317
3318 /*
3319 * Search for a pool by guid.
3320 */
3321 verify(nvlist_lookup_uint64(config,
3322 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3323
3324 if (guid == import->guid)
3325 found_config = config;
3326 }
3327 }
3328
3329 /*
3330 * If we were searching for a specific pool, verify that we found a
3331 * pool, and then do the import.
3332 */
3333 if (pool_specified && err == 0) {
3334 if (found_config == NULL) {
3335 (void) fprintf(stderr, gettext("cannot import '%s': "
3336 "no such pool available\n"), orig_name);
3337 err = B_TRUE;
3338 } else {
3339 err |= do_import(found_config, new_name,
3340 mntopts, props, flags);
3341 }
3342 }
3343
3344 /*
3345 * If we were just looking for pools, report an error if none were
3346 * found.
3347 */
3348 if (!pool_specified && first)
3349 (void) fprintf(stderr,
3350 gettext("no pools available to import\n"));
3351 return (err);
3352 }
3353
3354 typedef struct target_exists_args {
3355 const char *poolname;
3356 uint64_t poolguid;
3357 } target_exists_args_t;
3358
3359 static int
3360 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3361 {
3362 target_exists_args_t *args = data;
3363 nvlist_t *config = zpool_get_config(zhp, NULL);
3364 int found = 0;
3365
3366 if (config == NULL)
3367 return (0);
3368
3369 if (args->poolname != NULL) {
3370 char *pool_name;
3371
3372 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3373 &pool_name) == 0);
3374 if (strcmp(pool_name, args->poolname) == 0)
3375 found = 1;
3376 } else {
3377 uint64_t pool_guid;
3378
3379 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3380 &pool_guid) == 0);
3381 if (pool_guid == args->poolguid)
3382 found = 1;
3383 }
3384 zpool_close(zhp);
3385
3386 return (found);
3387 }
3388 /*
3389 * zpool checkpoint <pool>
3390 * checkpoint --discard <pool>
3391 *
3392 * -d Discard the checkpoint from a checkpointed
3393 * --discard pool.
3394 *
3395 * -w Wait for discarding a checkpoint to complete.
3396 * --wait
3397 *
3398 * Checkpoints the specified pool, by taking a "snapshot" of its
3399 * current state. A pool can only have one checkpoint at a time.
3400 */
3401 int
3402 zpool_do_checkpoint(int argc, char **argv)
3403 {
3404 boolean_t discard, wait;
3405 char *pool;
3406 zpool_handle_t *zhp;
3407 int c, err;
3408
3409 struct option long_options[] = {
3410 {"discard", no_argument, NULL, 'd'},
3411 {"wait", no_argument, NULL, 'w'},
3412 {0, 0, 0, 0}
3413 };
3414
3415 discard = B_FALSE;
3416 wait = B_FALSE;
3417 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3418 switch (c) {
3419 case 'd':
3420 discard = B_TRUE;
3421 break;
3422 case 'w':
3423 wait = B_TRUE;
3424 break;
3425 case '?':
3426 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3427 optopt);
3428 usage(B_FALSE);
3429 }
3430 }
3431
3432 if (wait && !discard) {
3433 (void) fprintf(stderr, gettext("--wait only valid when "
3434 "--discard also specified\n"));
3435 usage(B_FALSE);
3436 }
3437
3438 argc -= optind;
3439 argv += optind;
3440
3441 if (argc < 1) {
3442 (void) fprintf(stderr, gettext("missing pool argument\n"));
3443 usage(B_FALSE);
3444 }
3445
3446 if (argc > 1) {
3447 (void) fprintf(stderr, gettext("too many arguments\n"));
3448 usage(B_FALSE);
3449 }
3450
3451 pool = argv[0];
3452
3453 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3454 /* As a special case, check for use of '/' in the name */
3455 if (strchr(pool, '/') != NULL)
3456 (void) fprintf(stderr, gettext("'zpool checkpoint' "
3457 "doesn't work on datasets. To save the state "
3458 "of a dataset from a specific point in time "
3459 "please use 'zfs snapshot'\n"));
3460 return (1);
3461 }
3462
3463 if (discard) {
3464 err = (zpool_discard_checkpoint(zhp) != 0);
3465 if (err == 0 && wait)
3466 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3467 } else {
3468 err = (zpool_checkpoint(zhp) != 0);
3469 }
3470
3471 zpool_close(zhp);
3472
3473 return (err);
3474 }
3475
3476 #define CHECKPOINT_OPT 1024
3477
3478 /*
3479 * zpool import [-d dir] [-D]
3480 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3481 * [-d dir | -c cachefile | -s] [-f] -a
3482 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3483 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3484 * [newpool]
3485 *
3486 * -c Read pool information from a cachefile instead of searching
3487 * devices. If importing from a cachefile config fails, then
3488 * fallback to searching for devices only in the directories that
3489 * exist in the cachefile.
3490 *
3491 * -d Scan in a specific directory, other than /dev/. More than
3492 * one directory can be specified using multiple '-d' options.
3493 *
3494 * -D Scan for previously destroyed pools or import all or only
3495 * specified destroyed pools.
3496 *
3497 * -R Temporarily import the pool, with all mountpoints relative to
3498 * the given root. The pool will remain exported when the machine
3499 * is rebooted.
3500 *
3501 * -V Import even in the presence of faulted vdevs. This is an
3502 * intentionally undocumented option for testing purposes, and
3503 * treats the pool configuration as complete, leaving any bad
3504 * vdevs in the FAULTED state. In other words, it does verbatim
3505 * import.
3506 *
3507 * -f Force import, even if it appears that the pool is active.
3508 *
3509 * -F Attempt rewind if necessary.
3510 *
3511 * -n See if rewind would work, but don't actually rewind.
3512 *
3513 * -N Import the pool but don't mount datasets.
3514 *
3515 * -T Specify a starting txg to use for import. This option is
3516 * intentionally undocumented option for testing purposes.
3517 *
3518 * -a Import all pools found.
3519 *
3520 * -l Load encryption keys while importing.
3521 *
3522 * -o Set property=value and/or temporary mount options (without '=').
3523 *
3524 * -s Scan using the default search path, the libblkid cache will
3525 * not be consulted.
3526 *
3527 * --rewind-to-checkpoint
3528 * Import the pool and revert back to the checkpoint.
3529 *
3530 * The import command scans for pools to import, and import pools based on pool
3531 * name and GUID. The pool can also be renamed as part of the import process.
3532 */
3533 int
3534 zpool_do_import(int argc, char **argv)
3535 {
3536 char **searchdirs = NULL;
3537 char *env, *envdup = NULL;
3538 int nsearch = 0;
3539 int c;
3540 int err = 0;
3541 nvlist_t *pools = NULL;
3542 boolean_t do_all = B_FALSE;
3543 boolean_t do_destroyed = B_FALSE;
3544 char *mntopts = NULL;
3545 uint64_t searchguid = 0;
3546 char *searchname = NULL;
3547 char *propval;
3548 nvlist_t *policy = NULL;
3549 nvlist_t *props = NULL;
3550 int flags = ZFS_IMPORT_NORMAL;
3551 uint32_t rewind_policy = ZPOOL_NO_REWIND;
3552 boolean_t dryrun = B_FALSE;
3553 boolean_t do_rewind = B_FALSE;
3554 boolean_t xtreme_rewind = B_FALSE;
3555 boolean_t do_scan = B_FALSE;
3556 boolean_t pool_exists = B_FALSE;
3557 boolean_t pool_specified = B_FALSE;
3558 uint64_t txg = -1ULL;
3559 char *cachefile = NULL;
3560 importargs_t idata = { 0 };
3561 char *endptr;
3562
3563 struct option long_options[] = {
3564 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3565 {0, 0, 0, 0}
3566 };
3567
3568 /* check options */
3569 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3570 long_options, NULL)) != -1) {
3571 switch (c) {
3572 case 'a':
3573 do_all = B_TRUE;
3574 break;
3575 case 'c':
3576 cachefile = optarg;
3577 break;
3578 case 'd':
3579 searchdirs = safe_realloc(searchdirs,
3580 (nsearch + 1) * sizeof (char *));
3581 searchdirs[nsearch++] = optarg;
3582 break;
3583 case 'D':
3584 do_destroyed = B_TRUE;
3585 break;
3586 case 'f':
3587 flags |= ZFS_IMPORT_ANY_HOST;
3588 break;
3589 case 'F':
3590 do_rewind = B_TRUE;
3591 break;
3592 case 'l':
3593 flags |= ZFS_IMPORT_LOAD_KEYS;
3594 break;
3595 case 'm':
3596 flags |= ZFS_IMPORT_MISSING_LOG;
3597 break;
3598 case 'n':
3599 dryrun = B_TRUE;
3600 break;
3601 case 'N':
3602 flags |= ZFS_IMPORT_ONLY;
3603 break;
3604 case 'o':
3605 if ((propval = strchr(optarg, '=')) != NULL) {
3606 *propval = '\0';
3607 propval++;
3608 if (add_prop_list(optarg, propval,
3609 &props, B_TRUE))
3610 goto error;
3611 } else {
3612 mntopts = optarg;
3613 }
3614 break;
3615 case 'R':
3616 if (add_prop_list(zpool_prop_to_name(
3617 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3618 goto error;
3619 if (add_prop_list_default(zpool_prop_to_name(
3620 ZPOOL_PROP_CACHEFILE), "none", &props))
3621 goto error;
3622 break;
3623 case 's':
3624 do_scan = B_TRUE;
3625 break;
3626 case 't':
3627 flags |= ZFS_IMPORT_TEMP_NAME;
3628 if (add_prop_list_default(zpool_prop_to_name(
3629 ZPOOL_PROP_CACHEFILE), "none", &props))
3630 goto error;
3631 break;
3632
3633 case 'T':
3634 errno = 0;
3635 txg = strtoull(optarg, &endptr, 0);
3636 if (errno != 0 || *endptr != '\0') {
3637 (void) fprintf(stderr,
3638 gettext("invalid txg value\n"));
3639 usage(B_FALSE);
3640 }
3641 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3642 break;
3643 case 'V':
3644 flags |= ZFS_IMPORT_VERBATIM;
3645 break;
3646 case 'X':
3647 xtreme_rewind = B_TRUE;
3648 break;
3649 case CHECKPOINT_OPT:
3650 flags |= ZFS_IMPORT_CHECKPOINT;
3651 break;
3652 case ':':
3653 (void) fprintf(stderr, gettext("missing argument for "
3654 "'%c' option\n"), optopt);
3655 usage(B_FALSE);
3656 break;
3657 case '?':
3658 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3659 optopt);
3660 usage(B_FALSE);
3661 }
3662 }
3663
3664 argc -= optind;
3665 argv += optind;
3666
3667 if (cachefile && nsearch != 0) {
3668 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3669 usage(B_FALSE);
3670 }
3671
3672 if (cachefile && do_scan) {
3673 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3674 usage(B_FALSE);
3675 }
3676
3677 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3678 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3679 usage(B_FALSE);
3680 }
3681
3682 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3683 (void) fprintf(stderr, gettext("-l is only meaningful during "
3684 "an import\n"));
3685 usage(B_FALSE);
3686 }
3687
3688 if ((dryrun || xtreme_rewind) && !do_rewind) {
3689 (void) fprintf(stderr,
3690 gettext("-n or -X only meaningful with -F\n"));
3691 usage(B_FALSE);
3692 }
3693 if (dryrun)
3694 rewind_policy = ZPOOL_TRY_REWIND;
3695 else if (do_rewind)
3696 rewind_policy = ZPOOL_DO_REWIND;
3697 if (xtreme_rewind)
3698 rewind_policy |= ZPOOL_EXTREME_REWIND;
3699
3700 /* In the future, we can capture further policy and include it here */
3701 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3702 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3703 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3704 rewind_policy) != 0)
3705 goto error;
3706
3707 /* check argument count */
3708 if (do_all) {
3709 if (argc != 0) {
3710 (void) fprintf(stderr, gettext("too many arguments\n"));
3711 usage(B_FALSE);
3712 }
3713 } else {
3714 if (argc > 2) {
3715 (void) fprintf(stderr, gettext("too many arguments\n"));
3716 usage(B_FALSE);
3717 }
3718 }
3719
3720 /*
3721 * Check for the effective uid. We do this explicitly here because
3722 * otherwise any attempt to discover pools will silently fail.
3723 */
3724 if (argc == 0 && geteuid() != 0) {
3725 (void) fprintf(stderr, gettext("cannot "
3726 "discover pools: permission denied\n"));
3727 if (searchdirs != NULL)
3728 free(searchdirs);
3729
3730 nvlist_free(props);
3731 nvlist_free(policy);
3732 return (1);
3733 }
3734
3735 /*
3736 * Depending on the arguments given, we do one of the following:
3737 *
3738 * <none> Iterate through all pools and display information about
3739 * each one.
3740 *
3741 * -a Iterate through all pools and try to import each one.
3742 *
3743 * <id> Find the pool that corresponds to the given GUID/pool
3744 * name and import that one.
3745 *
3746 * -D Above options applies only to destroyed pools.
3747 */
3748 if (argc != 0) {
3749 char *endptr;
3750
3751 errno = 0;
3752 searchguid = strtoull(argv[0], &endptr, 10);
3753 if (errno != 0 || *endptr != '\0') {
3754 searchname = argv[0];
3755 searchguid = 0;
3756 }
3757 pool_specified = B_TRUE;
3758
3759 /*
3760 * User specified a name or guid. Ensure it's unique.
3761 */
3762 target_exists_args_t search = {searchname, searchguid};
3763 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3764 }
3765
3766 /*
3767 * Check the environment for the preferred search path.
3768 */
3769 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3770 char *dir, *tmp = NULL;
3771
3772 envdup = strdup(env);
3773
3774 for (dir = strtok_r(envdup, ":", &tmp);
3775 dir != NULL;
3776 dir = strtok_r(NULL, ":", &tmp)) {
3777 searchdirs = safe_realloc(searchdirs,
3778 (nsearch + 1) * sizeof (char *));
3779 searchdirs[nsearch++] = dir;
3780 }
3781 }
3782
3783 idata.path = searchdirs;
3784 idata.paths = nsearch;
3785 idata.poolname = searchname;
3786 idata.guid = searchguid;
3787 idata.cachefile = cachefile;
3788 idata.scan = do_scan;
3789 idata.policy = policy;
3790
3791 pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
3792
3793 if (pools != NULL && pool_exists &&
3794 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
3795 (void) fprintf(stderr, gettext("cannot import '%s': "
3796 "a pool with that name already exists\n"),
3797 argv[0]);
3798 (void) fprintf(stderr, gettext("use the form '%s "
3799 "<pool | id> <newpool>' to give it a new name\n"),
3800 "zpool import");
3801 err = 1;
3802 } else if (pools == NULL && pool_exists) {
3803 (void) fprintf(stderr, gettext("cannot import '%s': "
3804 "a pool with that name is already created/imported,\n"),
3805 argv[0]);
3806 (void) fprintf(stderr, gettext("and no additional pools "
3807 "with that name were found\n"));
3808 err = 1;
3809 } else if (pools == NULL) {
3810 if (argc != 0) {
3811 (void) fprintf(stderr, gettext("cannot import '%s': "
3812 "no such pool available\n"), argv[0]);
3813 }
3814 err = 1;
3815 }
3816
3817 if (err == 1) {
3818 free(searchdirs);
3819 free(envdup);
3820 nvlist_free(policy);
3821 nvlist_free(pools);
3822 nvlist_free(props);
3823 return (1);
3824 }
3825
3826 err = import_pools(pools, props, mntopts, flags,
3827 argc >= 1 ? argv[0] : NULL,
3828 argc >= 2 ? argv[1] : NULL,
3829 do_destroyed, pool_specified, do_all, &idata);
3830
3831 /*
3832 * If we're using the cachefile and we failed to import, then
3833 * fallback to scanning the directory for pools that match
3834 * those in the cachefile.
3835 */
3836 if (err != 0 && cachefile != NULL) {
3837 (void) printf(gettext("cachefile import failed, retrying\n"));
3838
3839 /*
3840 * We use the scan flag to gather the directories that exist
3841 * in the cachefile. If we need to fallback to searching for
3842 * the pool config, we will only search devices in these
3843 * directories.
3844 */
3845 idata.scan = B_TRUE;
3846 nvlist_free(pools);
3847 pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
3848
3849 err = import_pools(pools, props, mntopts, flags,
3850 argc >= 1 ? argv[0] : NULL,
3851 argc >= 2 ? argv[1] : NULL,
3852 do_destroyed, pool_specified, do_all, &idata);
3853 }
3854
3855 error:
3856 nvlist_free(props);
3857 nvlist_free(pools);
3858 nvlist_free(policy);
3859 free(searchdirs);
3860 free(envdup);
3861
3862 return (err ? 1 : 0);
3863 }
3864
3865 /*
3866 * zpool sync [-f] [pool] ...
3867 *
3868 * -f (undocumented) force uberblock (and config including zpool cache file)
3869 * update.
3870 *
3871 * Sync the specified pool(s).
3872 * Without arguments "zpool sync" will sync all pools.
3873 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
3874 *
3875 */
3876 static int
3877 zpool_do_sync(int argc, char **argv)
3878 {
3879 int ret;
3880 boolean_t force = B_FALSE;
3881
3882 /* check options */
3883 while ((ret = getopt(argc, argv, "f")) != -1) {
3884 switch (ret) {
3885 case 'f':
3886 force = B_TRUE;
3887 break;
3888 case '?':
3889 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3890 optopt);
3891 usage(B_FALSE);
3892 }
3893 }
3894
3895 argc -= optind;
3896 argv += optind;
3897
3898 /* if argc == 0 we will execute zpool_sync_one on all pools */
3899 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
3900 B_FALSE, zpool_sync_one, &force);
3901
3902 return (ret);
3903 }
3904
3905 typedef struct iostat_cbdata {
3906 uint64_t cb_flags;
3907 int cb_namewidth;
3908 int cb_iteration;
3909 boolean_t cb_verbose;
3910 boolean_t cb_literal;
3911 boolean_t cb_scripted;
3912 zpool_list_t *cb_list;
3913 vdev_cmd_data_list_t *vcdl;
3914 vdev_cbdata_t cb_vdevs;
3915 } iostat_cbdata_t;
3916
3917 /* iostat labels */
3918 typedef struct name_and_columns {
3919 const char *name; /* Column name */
3920 unsigned int columns; /* Center name to this number of columns */
3921 } name_and_columns_t;
3922
3923 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
3924
3925 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
3926 {
3927 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
3928 {NULL}},
3929 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3930 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
3931 {NULL}},
3932 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
3933 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
3934 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
3935 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3936 {"asyncq_wait", 2}, {NULL}},
3937 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
3938 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
3939 {"trim", 2}, {"rebuild", 2}, {NULL}},
3940 };
3941
3942 /* Shorthand - if "columns" field not set, default to 1 column */
3943 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
3944 {
3945 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
3946 {"write"}, {NULL}},
3947 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3948 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
3949 {NULL}},
3950 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
3951 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
3952 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
3953 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3954 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
3955 {NULL}},
3956 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3957 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3958 {"ind"}, {"agg"}, {NULL}},
3959 };
3960
3961 static const char *histo_to_title[] = {
3962 [IOS_L_HISTO] = "latency",
3963 [IOS_RQ_HISTO] = "req_size",
3964 };
3965
3966 /*
3967 * Return the number of labels in a null-terminated name_and_columns_t
3968 * array.
3969 *
3970 */
3971 static unsigned int
3972 label_array_len(const name_and_columns_t *labels)
3973 {
3974 int i = 0;
3975
3976 while (labels[i].name)
3977 i++;
3978
3979 return (i);
3980 }
3981
3982 /*
3983 * Return the number of strings in a null-terminated string array.
3984 * For example:
3985 *
3986 * const char foo[] = {"bar", "baz", NULL}
3987 *
3988 * returns 2
3989 */
3990 static uint64_t
3991 str_array_len(const char *array[])
3992 {
3993 uint64_t i = 0;
3994 while (array[i])
3995 i++;
3996
3997 return (i);
3998 }
3999
4000
4001 /*
4002 * Return a default column width for default/latency/queue columns. This does
4003 * not include histograms, which have their columns autosized.
4004 */
4005 static unsigned int
4006 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4007 {
4008 unsigned long column_width = 5; /* Normal niceprint */
4009 static unsigned long widths[] = {
4010 /*
4011 * Choose some sane default column sizes for printing the
4012 * raw numbers.
4013 */
4014 [IOS_DEFAULT] = 15, /* 1PB capacity */
4015 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4016 [IOS_QUEUES] = 6, /* 1M queue entries */
4017 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4018 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4019 };
4020
4021 if (cb->cb_literal)
4022 column_width = widths[type];
4023
4024 return (column_width);
4025 }
4026
4027 /*
4028 * Print the column labels, i.e:
4029 *
4030 * capacity operations bandwidth
4031 * alloc free read write read write ...
4032 *
4033 * If force_column_width is set, use it for the column width. If not set, use
4034 * the default column width.
4035 */
4036 static void
4037 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4038 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4039 {
4040 int i, idx, s;
4041 int text_start, rw_column_width, spaces_to_end;
4042 uint64_t flags = cb->cb_flags;
4043 uint64_t f;
4044 unsigned int column_width = force_column_width;
4045
4046 /* For each bit set in flags */
4047 for (f = flags; f; f &= ~(1ULL << idx)) {
4048 idx = lowbit64(f) - 1;
4049 if (!force_column_width)
4050 column_width = default_column_width(cb, idx);
4051 /* Print our top labels centered over "read write" label. */
4052 for (i = 0; i < label_array_len(labels[idx]); i++) {
4053 const char *name = labels[idx][i].name;
4054 /*
4055 * We treat labels[][].columns == 0 as shorthand
4056 * for one column. It makes writing out the label
4057 * tables more concise.
4058 */
4059 unsigned int columns = MAX(1, labels[idx][i].columns);
4060 unsigned int slen = strlen(name);
4061
4062 rw_column_width = (column_width * columns) +
4063 (2 * (columns - 1));
4064
4065 text_start = (int)((rw_column_width) / columns -
4066 slen / columns);
4067 if (text_start < 0)
4068 text_start = 0;
4069
4070 printf(" "); /* Two spaces between columns */
4071
4072 /* Space from beginning of column to label */
4073 for (s = 0; s < text_start; s++)
4074 printf(" ");
4075
4076 printf("%s", name);
4077
4078 /* Print space after label to end of column */
4079 spaces_to_end = rw_column_width - text_start - slen;
4080 if (spaces_to_end < 0)
4081 spaces_to_end = 0;
4082
4083 for (s = 0; s < spaces_to_end; s++)
4084 printf(" ");
4085 }
4086 }
4087 }
4088
4089
4090 /*
4091 * print_cmd_columns - Print custom column titles from -c
4092 *
4093 * If the user specified the "zpool status|iostat -c" then print their custom
4094 * column titles in the header. For example, print_cmd_columns() would print
4095 * the " col1 col2" part of this:
4096 *
4097 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4098 * ...
4099 * capacity operations bandwidth
4100 * pool alloc free read write read write col1 col2
4101 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4102 * mypool 269K 1008M 0 0 107 946
4103 * mirror 269K 1008M 0 0 107 946
4104 * sdb - - 0 0 102 473 val1 val2
4105 * sdc - - 0 0 5 473 val1 val2
4106 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4107 */
4108 static void
4109 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4110 {
4111 int i, j;
4112 vdev_cmd_data_t *data = &vcdl->data[0];
4113
4114 if (vcdl->count == 0 || data == NULL)
4115 return;
4116
4117 /*
4118 * Each vdev cmd should have the same column names unless the user did
4119 * something weird with their cmd. Just take the column names from the
4120 * first vdev and assume it works for all of them.
4121 */
4122 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4123 printf(" ");
4124 if (use_dashes) {
4125 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4126 printf("-");
4127 } else {
4128 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4129 vcdl->uniq_cols[i]);
4130 }
4131 }
4132 }
4133
4134
4135 /*
4136 * Utility function to print out a line of dashes like:
4137 *
4138 * -------------------------------- ----- ----- ----- ----- -----
4139 *
4140 * ...or a dashed named-row line like:
4141 *
4142 * logs - - - - -
4143 *
4144 * @cb: iostat data
4145 *
4146 * @force_column_width If non-zero, use the value as the column width.
4147 * Otherwise use the default column widths.
4148 *
4149 * @name: Print a dashed named-row line starting
4150 * with @name. Otherwise, print a regular
4151 * dashed line.
4152 */
4153 static void
4154 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4155 const char *name)
4156 {
4157 int i;
4158 unsigned int namewidth;
4159 uint64_t flags = cb->cb_flags;
4160 uint64_t f;
4161 int idx;
4162 const name_and_columns_t *labels;
4163 const char *title;
4164
4165
4166 if (cb->cb_flags & IOS_ANYHISTO_M) {
4167 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4168 } else if (cb->cb_vdevs.cb_names_count) {
4169 title = "vdev";
4170 } else {
4171 title = "pool";
4172 }
4173
4174 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4175 name ? strlen(name) : 0);
4176
4177
4178 if (name) {
4179 printf("%-*s", namewidth, name);
4180 } else {
4181 for (i = 0; i < namewidth; i++)
4182 (void) printf("-");
4183 }
4184
4185 /* For each bit in flags */
4186 for (f = flags; f; f &= ~(1ULL << idx)) {
4187 unsigned int column_width;
4188 idx = lowbit64(f) - 1;
4189 if (force_column_width)
4190 column_width = force_column_width;
4191 else
4192 column_width = default_column_width(cb, idx);
4193
4194 labels = iostat_bottom_labels[idx];
4195 for (i = 0; i < label_array_len(labels); i++) {
4196 if (name)
4197 printf(" %*s-", column_width - 1, " ");
4198 else
4199 printf(" %.*s", column_width,
4200 "--------------------");
4201 }
4202 }
4203 }
4204
4205
4206 static void
4207 print_iostat_separator_impl(iostat_cbdata_t *cb,
4208 unsigned int force_column_width)
4209 {
4210 print_iostat_dashes(cb, force_column_width, NULL);
4211 }
4212
4213 static void
4214 print_iostat_separator(iostat_cbdata_t *cb)
4215 {
4216 print_iostat_separator_impl(cb, 0);
4217 }
4218
4219 static void
4220 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4221 const char *histo_vdev_name)
4222 {
4223 unsigned int namewidth;
4224 const char *title;
4225
4226 if (cb->cb_flags & IOS_ANYHISTO_M) {
4227 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4228 } else if (cb->cb_vdevs.cb_names_count) {
4229 title = "vdev";
4230 } else {
4231 title = "pool";
4232 }
4233
4234 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4235 histo_vdev_name ? strlen(histo_vdev_name) : 0);
4236
4237 if (histo_vdev_name)
4238 printf("%-*s", namewidth, histo_vdev_name);
4239 else
4240 printf("%*s", namewidth, "");
4241
4242
4243 print_iostat_labels(cb, force_column_width, iostat_top_labels);
4244 printf("\n");
4245
4246 printf("%-*s", namewidth, title);
4247
4248 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4249 if (cb->vcdl != NULL)
4250 print_cmd_columns(cb->vcdl, 0);
4251
4252 printf("\n");
4253
4254 print_iostat_separator_impl(cb, force_column_width);
4255
4256 if (cb->vcdl != NULL)
4257 print_cmd_columns(cb->vcdl, 1);
4258
4259 printf("\n");
4260 }
4261
4262 static void
4263 print_iostat_header(iostat_cbdata_t *cb)
4264 {
4265 print_iostat_header_impl(cb, 0, NULL);
4266 }
4267
4268
4269 /*
4270 * Display a single statistic.
4271 */
4272 static void
4273 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4274 unsigned int column_size, boolean_t scripted)
4275 {
4276 char buf[64];
4277
4278 zfs_nicenum_format(value, buf, sizeof (buf), format);
4279
4280 if (scripted)
4281 printf("\t%s", buf);
4282 else
4283 printf(" %*s", column_size, buf);
4284 }
4285
4286 /*
4287 * Calculate the default vdev stats
4288 *
4289 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4290 * stats into calcvs.
4291 */
4292 static void
4293 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4294 vdev_stat_t *calcvs)
4295 {
4296 int i;
4297
4298 memcpy(calcvs, newvs, sizeof (*calcvs));
4299 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4300 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4301
4302 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4303 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4304 }
4305
4306 /*
4307 * Internal representation of the extended iostats data.
4308 *
4309 * The extended iostat stats are exported in nvlists as either uint64_t arrays
4310 * or single uint64_t's. We make both look like arrays to make them easier
4311 * to process. In order to make single uint64_t's look like arrays, we set
4312 * __data to the stat data, and then set *data = &__data with count = 1. Then,
4313 * we can just use *data and count.
4314 */
4315 struct stat_array {
4316 uint64_t *data;
4317 uint_t count; /* Number of entries in data[] */
4318 uint64_t __data; /* Only used when data is a single uint64_t */
4319 };
4320
4321 static uint64_t
4322 stat_histo_max(struct stat_array *nva, unsigned int len)
4323 {
4324 uint64_t max = 0;
4325 int i;
4326 for (i = 0; i < len; i++)
4327 max = MAX(max, array64_max(nva[i].data, nva[i].count));
4328
4329 return (max);
4330 }
4331
4332 /*
4333 * Helper function to lookup a uint64_t array or uint64_t value and store its
4334 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
4335 * it look like a one element array to make it easier to process.
4336 */
4337 static int
4338 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4339 struct stat_array *nva)
4340 {
4341 nvpair_t *tmp;
4342 int ret;
4343
4344 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4345 switch (nvpair_type(tmp)) {
4346 case DATA_TYPE_UINT64_ARRAY:
4347 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4348 break;
4349 case DATA_TYPE_UINT64:
4350 ret = nvpair_value_uint64(tmp, &nva->__data);
4351 nva->data = &nva->__data;
4352 nva->count = 1;
4353 break;
4354 default:
4355 /* Not a uint64_t */
4356 ret = EINVAL;
4357 break;
4358 }
4359
4360 return (ret);
4361 }
4362
4363 /*
4364 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4365 * subtract them, and return the results in a newly allocated stat_array.
4366 * You must free the returned array after you are done with it with
4367 * free_calc_stats().
4368 *
4369 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4370 * values.
4371 */
4372 static struct stat_array *
4373 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4374 nvlist_t *newnv)
4375 {
4376 nvlist_t *oldnvx = NULL, *newnvx;
4377 struct stat_array *oldnva, *newnva, *calcnva;
4378 int i, j;
4379 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4380
4381 /* Extract our extended stats nvlist from the main list */
4382 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4383 &newnvx) == 0);
4384 if (oldnv) {
4385 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4386 &oldnvx) == 0);
4387 }
4388
4389 newnva = safe_malloc(alloc_size);
4390 oldnva = safe_malloc(alloc_size);
4391 calcnva = safe_malloc(alloc_size);
4392
4393 for (j = 0; j < len; j++) {
4394 verify(nvpair64_to_stat_array(newnvx, names[j],
4395 &newnva[j]) == 0);
4396 calcnva[j].count = newnva[j].count;
4397 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4398 calcnva[j].data = safe_malloc(alloc_size);
4399 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4400
4401 if (oldnvx) {
4402 verify(nvpair64_to_stat_array(oldnvx, names[j],
4403 &oldnva[j]) == 0);
4404 for (i = 0; i < oldnva[j].count; i++)
4405 calcnva[j].data[i] -= oldnva[j].data[i];
4406 }
4407 }
4408 free(newnva);
4409 free(oldnva);
4410 return (calcnva);
4411 }
4412
4413 static void
4414 free_calc_stats(struct stat_array *nva, unsigned int len)
4415 {
4416 int i;
4417 for (i = 0; i < len; i++)
4418 free(nva[i].data);
4419
4420 free(nva);
4421 }
4422
4423 static void
4424 print_iostat_histo(struct stat_array *nva, unsigned int len,
4425 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4426 double scale)
4427 {
4428 int i, j;
4429 char buf[6];
4430 uint64_t val;
4431 enum zfs_nicenum_format format;
4432 unsigned int buckets;
4433 unsigned int start_bucket;
4434
4435 if (cb->cb_literal)
4436 format = ZFS_NICENUM_RAW;
4437 else
4438 format = ZFS_NICENUM_1024;
4439
4440 /* All these histos are the same size, so just use nva[0].count */
4441 buckets = nva[0].count;
4442
4443 if (cb->cb_flags & IOS_RQ_HISTO_M) {
4444 /* Start at 512 - req size should never be lower than this */
4445 start_bucket = 9;
4446 } else {
4447 start_bucket = 0;
4448 }
4449
4450 for (j = start_bucket; j < buckets; j++) {
4451 /* Print histogram bucket label */
4452 if (cb->cb_flags & IOS_L_HISTO_M) {
4453 /* Ending range of this bucket */
4454 val = (1UL << (j + 1)) - 1;
4455 zfs_nicetime(val, buf, sizeof (buf));
4456 } else {
4457 /* Request size (starting range of bucket) */
4458 val = (1UL << j);
4459 zfs_nicenum(val, buf, sizeof (buf));
4460 }
4461
4462 if (cb->cb_scripted)
4463 printf("%llu", (u_longlong_t)val);
4464 else
4465 printf("%-*s", namewidth, buf);
4466
4467 /* Print the values on the line */
4468 for (i = 0; i < len; i++) {
4469 print_one_stat(nva[i].data[j] * scale, format,
4470 column_width, cb->cb_scripted);
4471 }
4472 printf("\n");
4473 }
4474 }
4475
4476 static void
4477 print_solid_separator(unsigned int length)
4478 {
4479 while (length--)
4480 printf("-");
4481 printf("\n");
4482 }
4483
4484 static void
4485 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4486 nvlist_t *newnv, double scale, const char *name)
4487 {
4488 unsigned int column_width;
4489 unsigned int namewidth;
4490 unsigned int entire_width;
4491 enum iostat_type type;
4492 struct stat_array *nva;
4493 const char **names;
4494 unsigned int names_len;
4495
4496 /* What type of histo are we? */
4497 type = IOS_HISTO_IDX(cb->cb_flags);
4498
4499 /* Get NULL-terminated array of nvlist names for our histo */
4500 names = vsx_type_to_nvlist[type];
4501 names_len = str_array_len(names); /* num of names */
4502
4503 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4504
4505 if (cb->cb_literal) {
4506 column_width = MAX(5,
4507 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4508 } else {
4509 column_width = 5;
4510 }
4511
4512 namewidth = MAX(cb->cb_namewidth,
4513 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4514
4515 /*
4516 * Calculate the entire line width of what we're printing. The
4517 * +2 is for the two spaces between columns:
4518 */
4519 /* read write */
4520 /* ----- ----- */
4521 /* |___| <---------- column_width */
4522 /* */
4523 /* |__________| <--- entire_width */
4524 /* */
4525 entire_width = namewidth + (column_width + 2) *
4526 label_array_len(iostat_bottom_labels[type]);
4527
4528 if (cb->cb_scripted)
4529 printf("%s\n", name);
4530 else
4531 print_iostat_header_impl(cb, column_width, name);
4532
4533 print_iostat_histo(nva, names_len, cb, column_width,
4534 namewidth, scale);
4535
4536 free_calc_stats(nva, names_len);
4537 if (!cb->cb_scripted)
4538 print_solid_separator(entire_width);
4539 }
4540
4541 /*
4542 * Calculate the average latency of a power-of-two latency histogram
4543 */
4544 static uint64_t
4545 single_histo_average(uint64_t *histo, unsigned int buckets)
4546 {
4547 int i;
4548 uint64_t count = 0, total = 0;
4549
4550 for (i = 0; i < buckets; i++) {
4551 /*
4552 * Our buckets are power-of-two latency ranges. Use the
4553 * midpoint latency of each bucket to calculate the average.
4554 * For example:
4555 *
4556 * Bucket Midpoint
4557 * 8ns-15ns: 12ns
4558 * 16ns-31ns: 24ns
4559 * ...
4560 */
4561 if (histo[i] != 0) {
4562 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4563 count += histo[i];
4564 }
4565 }
4566
4567 /* Prevent divide by zero */
4568 return (count == 0 ? 0 : total / count);
4569 }
4570
4571 static void
4572 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4573 {
4574 const char *names[] = {
4575 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4576 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4577 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4578 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4579 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4580 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4581 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4582 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4583 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4584 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4585 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4586 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4587 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4588 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4589 };
4590
4591 struct stat_array *nva;
4592
4593 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4594 enum zfs_nicenum_format format;
4595
4596 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4597
4598 if (cb->cb_literal)
4599 format = ZFS_NICENUM_RAW;
4600 else
4601 format = ZFS_NICENUM_1024;
4602
4603 for (int i = 0; i < ARRAY_SIZE(names); i++) {
4604 uint64_t val = nva[i].data[0];
4605 print_one_stat(val, format, column_width, cb->cb_scripted);
4606 }
4607
4608 free_calc_stats(nva, ARRAY_SIZE(names));
4609 }
4610
4611 static void
4612 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4613 nvlist_t *newnv)
4614 {
4615 int i;
4616 uint64_t val;
4617 const char *names[] = {
4618 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4619 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4620 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4621 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4622 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4623 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4624 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4625 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4626 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4627 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4628 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4629 };
4630 struct stat_array *nva;
4631
4632 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4633 enum zfs_nicenum_format format;
4634
4635 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4636
4637 if (cb->cb_literal)
4638 format = ZFS_NICENUM_RAWTIME;
4639 else
4640 format = ZFS_NICENUM_TIME;
4641
4642 /* Print our avg latencies on the line */
4643 for (i = 0; i < ARRAY_SIZE(names); i++) {
4644 /* Compute average latency for a latency histo */
4645 val = single_histo_average(nva[i].data, nva[i].count);
4646 print_one_stat(val, format, column_width, cb->cb_scripted);
4647 }
4648 free_calc_stats(nva, ARRAY_SIZE(names));
4649 }
4650
4651 /*
4652 * Print default statistics (capacity/operations/bandwidth)
4653 */
4654 static void
4655 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4656 {
4657 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4658 enum zfs_nicenum_format format;
4659 char na; /* char to print for "not applicable" values */
4660
4661 if (cb->cb_literal) {
4662 format = ZFS_NICENUM_RAW;
4663 na = '0';
4664 } else {
4665 format = ZFS_NICENUM_1024;
4666 na = '-';
4667 }
4668
4669 /* only toplevel vdevs have capacity stats */
4670 if (vs->vs_space == 0) {
4671 if (cb->cb_scripted)
4672 printf("\t%c\t%c", na, na);
4673 else
4674 printf(" %*c %*c", column_width, na, column_width,
4675 na);
4676 } else {
4677 print_one_stat(vs->vs_alloc, format, column_width,
4678 cb->cb_scripted);
4679 print_one_stat(vs->vs_space - vs->vs_alloc, format,
4680 column_width, cb->cb_scripted);
4681 }
4682
4683 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4684 format, column_width, cb->cb_scripted);
4685 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4686 format, column_width, cb->cb_scripted);
4687 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4688 format, column_width, cb->cb_scripted);
4689 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4690 format, column_width, cb->cb_scripted);
4691 }
4692
4693 static const char *class_name[] = {
4694 VDEV_ALLOC_BIAS_DEDUP,
4695 VDEV_ALLOC_BIAS_SPECIAL,
4696 VDEV_ALLOC_CLASS_LOGS
4697 };
4698
4699 /*
4700 * Print out all the statistics for the given vdev. This can either be the
4701 * toplevel configuration, or called recursively. If 'name' is NULL, then this
4702 * is a verbose output, and we don't want to display the toplevel pool stats.
4703 *
4704 * Returns the number of stat lines printed.
4705 */
4706 static unsigned int
4707 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4708 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4709 {
4710 nvlist_t **oldchild, **newchild;
4711 uint_t c, children, oldchildren;
4712 vdev_stat_t *oldvs, *newvs, *calcvs;
4713 vdev_stat_t zerovs = { 0 };
4714 char *vname;
4715 int i;
4716 int ret = 0;
4717 uint64_t tdelta;
4718 double scale;
4719
4720 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4721 return (ret);
4722
4723 calcvs = safe_malloc(sizeof (*calcvs));
4724
4725 if (oldnv != NULL) {
4726 verify(nvlist_lookup_uint64_array(oldnv,
4727 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4728 } else {
4729 oldvs = &zerovs;
4730 }
4731
4732 /* Do we only want to see a specific vdev? */
4733 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4734 /* Yes we do. Is this the vdev? */
4735 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4736 /*
4737 * This is our vdev. Since it is the only vdev we
4738 * will be displaying, make depth = 0 so that it
4739 * doesn't get indented.
4740 */
4741 depth = 0;
4742 break;
4743 }
4744 }
4745
4746 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
4747 /* Couldn't match the name */
4748 goto children;
4749 }
4750
4751
4752 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
4753 (uint64_t **)&newvs, &c) == 0);
4754
4755 /*
4756 * Print the vdev name unless it's is a histogram. Histograms
4757 * display the vdev name in the header itself.
4758 */
4759 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
4760 if (cb->cb_scripted) {
4761 printf("%s", name);
4762 } else {
4763 if (strlen(name) + depth > cb->cb_namewidth)
4764 (void) printf("%*s%s", depth, "", name);
4765 else
4766 (void) printf("%*s%s%*s", depth, "", name,
4767 (int)(cb->cb_namewidth - strlen(name) -
4768 depth), "");
4769 }
4770 }
4771
4772 /* Calculate our scaling factor */
4773 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
4774 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
4775 /*
4776 * If we specify printing histograms with no time interval, then
4777 * print the histogram numbers over the entire lifetime of the
4778 * vdev.
4779 */
4780 scale = 1;
4781 } else {
4782 if (tdelta == 0)
4783 scale = 1.0;
4784 else
4785 scale = (double)NANOSEC / tdelta;
4786 }
4787
4788 if (cb->cb_flags & IOS_DEFAULT_M) {
4789 calc_default_iostats(oldvs, newvs, calcvs);
4790 print_iostat_default(calcvs, cb, scale);
4791 }
4792 if (cb->cb_flags & IOS_LATENCY_M)
4793 print_iostat_latency(cb, oldnv, newnv);
4794 if (cb->cb_flags & IOS_QUEUES_M)
4795 print_iostat_queues(cb, newnv);
4796 if (cb->cb_flags & IOS_ANYHISTO_M) {
4797 printf("\n");
4798 print_iostat_histos(cb, oldnv, newnv, scale, name);
4799 }
4800
4801 if (cb->vcdl != NULL) {
4802 char *path;
4803 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
4804 &path) == 0) {
4805 printf(" ");
4806 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
4807 }
4808 }
4809
4810 if (!(cb->cb_flags & IOS_ANYHISTO_M))
4811 printf("\n");
4812
4813 ret++;
4814
4815 children:
4816
4817 free(calcvs);
4818
4819 if (!cb->cb_verbose)
4820 return (ret);
4821
4822 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
4823 &newchild, &children) != 0)
4824 return (ret);
4825
4826 if (oldnv) {
4827 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
4828 &oldchild, &oldchildren) != 0)
4829 return (ret);
4830
4831 children = MIN(oldchildren, children);
4832 }
4833
4834 /*
4835 * print normal top-level devices
4836 */
4837 for (c = 0; c < children; c++) {
4838 uint64_t ishole = B_FALSE, islog = B_FALSE;
4839
4840 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
4841 &ishole);
4842
4843 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
4844 &islog);
4845
4846 if (ishole || islog)
4847 continue;
4848
4849 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
4850 continue;
4851
4852 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4853 cb->cb_vdevs.cb_name_flags);
4854 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
4855 newchild[c], cb, depth + 2);
4856 free(vname);
4857 }
4858
4859 /*
4860 * print all other top-level devices
4861 */
4862 for (uint_t n = 0; n < 3; n++) {
4863 boolean_t printed = B_FALSE;
4864
4865 for (c = 0; c < children; c++) {
4866 uint64_t islog = B_FALSE;
4867 char *bias = NULL;
4868 char *type = NULL;
4869
4870 (void) nvlist_lookup_uint64(newchild[c],
4871 ZPOOL_CONFIG_IS_LOG, &islog);
4872 if (islog) {
4873 bias = VDEV_ALLOC_CLASS_LOGS;
4874 } else {
4875 (void) nvlist_lookup_string(newchild[c],
4876 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
4877 (void) nvlist_lookup_string(newchild[c],
4878 ZPOOL_CONFIG_TYPE, &type);
4879 }
4880 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
4881 continue;
4882 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
4883 continue;
4884
4885 if (!printed) {
4886 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
4887 !cb->cb_scripted &&
4888 !cb->cb_vdevs.cb_names) {
4889 print_iostat_dashes(cb, 0,
4890 class_name[n]);
4891 }
4892 printf("\n");
4893 printed = B_TRUE;
4894 }
4895
4896 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4897 cb->cb_vdevs.cb_name_flags);
4898 ret += print_vdev_stats(zhp, vname, oldnv ?
4899 oldchild[c] : NULL, newchild[c], cb, depth + 2);
4900 free(vname);
4901 }
4902 }
4903
4904 /*
4905 * Include level 2 ARC devices in iostat output
4906 */
4907 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
4908 &newchild, &children) != 0)
4909 return (ret);
4910
4911 if (oldnv) {
4912 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
4913 &oldchild, &oldchildren) != 0)
4914 return (ret);
4915
4916 children = MIN(oldchildren, children);
4917 }
4918
4919 if (children > 0) {
4920 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
4921 !cb->cb_vdevs.cb_names) {
4922 print_iostat_dashes(cb, 0, "cache");
4923 }
4924 printf("\n");
4925
4926 for (c = 0; c < children; c++) {
4927 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4928 cb->cb_vdevs.cb_name_flags);
4929 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
4930 : NULL, newchild[c], cb, depth + 2);
4931 free(vname);
4932 }
4933 }
4934
4935 return (ret);
4936 }
4937
4938 static int
4939 refresh_iostat(zpool_handle_t *zhp, void *data)
4940 {
4941 iostat_cbdata_t *cb = data;
4942 boolean_t missing;
4943
4944 /*
4945 * If the pool has disappeared, remove it from the list and continue.
4946 */
4947 if (zpool_refresh_stats(zhp, &missing) != 0)
4948 return (-1);
4949
4950 if (missing)
4951 pool_list_remove(cb->cb_list, zhp);
4952
4953 return (0);
4954 }
4955
4956 /*
4957 * Callback to print out the iostats for the given pool.
4958 */
4959 static int
4960 print_iostat(zpool_handle_t *zhp, void *data)
4961 {
4962 iostat_cbdata_t *cb = data;
4963 nvlist_t *oldconfig, *newconfig;
4964 nvlist_t *oldnvroot, *newnvroot;
4965 int ret;
4966
4967 newconfig = zpool_get_config(zhp, &oldconfig);
4968
4969 if (cb->cb_iteration == 1)
4970 oldconfig = NULL;
4971
4972 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
4973 &newnvroot) == 0);
4974
4975 if (oldconfig == NULL)
4976 oldnvroot = NULL;
4977 else
4978 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
4979 &oldnvroot) == 0);
4980
4981 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
4982 cb, 0);
4983 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
4984 !cb->cb_scripted && cb->cb_verbose &&
4985 !cb->cb_vdevs.cb_names_count) {
4986 print_iostat_separator(cb);
4987 if (cb->vcdl != NULL) {
4988 print_cmd_columns(cb->vcdl, 1);
4989 }
4990 printf("\n");
4991 }
4992
4993 return (ret);
4994 }
4995
4996 static int
4997 get_columns(void)
4998 {
4999 struct winsize ws;
5000 int columns = 80;
5001 int error;
5002
5003 if (isatty(STDOUT_FILENO)) {
5004 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5005 if (error == 0)
5006 columns = ws.ws_col;
5007 } else {
5008 columns = 999;
5009 }
5010
5011 return (columns);
5012 }
5013
5014 /*
5015 * Return the required length of the pool/vdev name column. The minimum
5016 * allowed width and output formatting flags must be provided.
5017 */
5018 static int
5019 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5020 {
5021 nvlist_t *config, *nvroot;
5022 int width = min_width;
5023
5024 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5025 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5026 &nvroot) == 0);
5027 unsigned int poolname_len = strlen(zpool_get_name(zhp));
5028 if (verbose == B_FALSE) {
5029 width = MAX(poolname_len, min_width);
5030 } else {
5031 width = MAX(poolname_len,
5032 max_width(zhp, nvroot, 0, min_width, flags));
5033 }
5034 }
5035
5036 return (width);
5037 }
5038
5039 /*
5040 * Parse the input string, get the 'interval' and 'count' value if there is one.
5041 */
5042 static void
5043 get_interval_count(int *argcp, char **argv, float *iv,
5044 unsigned long *cnt)
5045 {
5046 float interval = 0;
5047 unsigned long count = 0;
5048 int argc = *argcp;
5049
5050 /*
5051 * Determine if the last argument is an integer or a pool name
5052 */
5053 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5054 char *end;
5055
5056 errno = 0;
5057 interval = strtof(argv[argc - 1], &end);
5058
5059 if (*end == '\0' && errno == 0) {
5060 if (interval == 0) {
5061 (void) fprintf(stderr, gettext(
5062 "interval cannot be zero\n"));
5063 usage(B_FALSE);
5064 }
5065 /*
5066 * Ignore the last parameter
5067 */
5068 argc--;
5069 } else {
5070 /*
5071 * If this is not a valid number, just plow on. The
5072 * user will get a more informative error message later
5073 * on.
5074 */
5075 interval = 0;
5076 }
5077 }
5078
5079 /*
5080 * If the last argument is also an integer, then we have both a count
5081 * and an interval.
5082 */
5083 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5084 char *end;
5085
5086 errno = 0;
5087 count = interval;
5088 interval = strtof(argv[argc - 1], &end);
5089
5090 if (*end == '\0' && errno == 0) {
5091 if (interval == 0) {
5092 (void) fprintf(stderr, gettext(
5093 "interval cannot be zero\n"));
5094 usage(B_FALSE);
5095 }
5096
5097 /*
5098 * Ignore the last parameter
5099 */
5100 argc--;
5101 } else {
5102 interval = 0;
5103 }
5104 }
5105
5106 *iv = interval;
5107 *cnt = count;
5108 *argcp = argc;
5109 }
5110
5111 static void
5112 get_timestamp_arg(char c)
5113 {
5114 if (c == 'u')
5115 timestamp_fmt = UDATE;
5116 else if (c == 'd')
5117 timestamp_fmt = DDATE;
5118 else
5119 usage(B_FALSE);
5120 }
5121
5122 /*
5123 * Return stat flags that are supported by all pools by both the module and
5124 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5125 * It will get ANDed down until only the flags that are supported on all pools
5126 * remain.
5127 */
5128 static int
5129 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5130 {
5131 uint64_t *mask = data;
5132 nvlist_t *config, *nvroot, *nvx;
5133 uint64_t flags = 0;
5134 int i, j;
5135
5136 config = zpool_get_config(zhp, NULL);
5137 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5138 &nvroot) == 0);
5139
5140 /* Default stats are always supported, but for completeness.. */
5141 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5142 flags |= IOS_DEFAULT_M;
5143
5144 /* Get our extended stats nvlist from the main list */
5145 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5146 &nvx) != 0) {
5147 /*
5148 * No extended stats; they're probably running an older
5149 * module. No big deal, we support that too.
5150 */
5151 goto end;
5152 }
5153
5154 /* For each extended stat, make sure all its nvpairs are supported */
5155 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5156 if (!vsx_type_to_nvlist[j][0])
5157 continue;
5158
5159 /* Start off by assuming the flag is supported, then check */
5160 flags |= (1ULL << j);
5161 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5162 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5163 /* flag isn't supported */
5164 flags = flags & ~(1ULL << j);
5165 break;
5166 }
5167 }
5168 }
5169 end:
5170 *mask = *mask & flags;
5171 return (0);
5172 }
5173
5174 /*
5175 * Return a bitmask of stats that are supported on all pools by both the module
5176 * and zpool iostat.
5177 */
5178 static uint64_t
5179 get_stat_flags(zpool_list_t *list)
5180 {
5181 uint64_t mask = -1;
5182
5183 /*
5184 * get_stat_flags_cb() will lop off bits from "mask" until only the
5185 * flags that are supported on all pools remain.
5186 */
5187 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5188 return (mask);
5189 }
5190
5191 /*
5192 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5193 */
5194 static int
5195 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5196 {
5197 vdev_cbdata_t *cb = cb_data;
5198 char *name = NULL;
5199 int ret = 1; /* assume match */
5200 zpool_handle_t *zhp = zhp_data;
5201
5202 name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
5203
5204 if (strcmp(name, cb->cb_names[0])) {
5205 free(name);
5206 name = zpool_vdev_name(g_zfs, zhp, nv, VDEV_NAME_GUID);
5207 ret = (strcmp(name, cb->cb_names[0]) == 0);
5208 }
5209 free(name);
5210
5211 return (ret);
5212 }
5213
5214 /*
5215 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5216 */
5217 static int
5218 is_vdev(zpool_handle_t *zhp, void *cb_data)
5219 {
5220 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5221 }
5222
5223 /*
5224 * Check if vdevs are in a pool
5225 *
5226 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5227 * return 0. If pool_name is NULL, then search all pools.
5228 */
5229 static int
5230 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5231 vdev_cbdata_t *cb)
5232 {
5233 char **tmp_name;
5234 int ret = 0;
5235 int i;
5236 int pool_count = 0;
5237
5238 if ((argc == 0) || !*argv)
5239 return (0);
5240
5241 if (pool_name)
5242 pool_count = 1;
5243
5244 /* Temporarily hijack cb_names for a second... */
5245 tmp_name = cb->cb_names;
5246
5247 /* Go though our list of prospective vdev names */
5248 for (i = 0; i < argc; i++) {
5249 cb->cb_names = argv + i;
5250
5251 /* Is this name a vdev in our pools? */
5252 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5253 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5254 if (!ret) {
5255 /* No match */
5256 break;
5257 }
5258 }
5259
5260 cb->cb_names = tmp_name;
5261
5262 return (ret);
5263 }
5264
5265 static int
5266 is_pool_cb(zpool_handle_t *zhp, void *data)
5267 {
5268 char *name = data;
5269 if (strcmp(name, zpool_get_name(zhp)) == 0)
5270 return (1);
5271
5272 return (0);
5273 }
5274
5275 /*
5276 * Do we have a pool named *name? If so, return 1, otherwise 0.
5277 */
5278 static int
5279 is_pool(char *name)
5280 {
5281 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5282 is_pool_cb, name));
5283 }
5284
5285 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
5286 static int
5287 are_all_pools(int argc, char **argv)
5288 {
5289 if ((argc == 0) || !*argv)
5290 return (0);
5291
5292 while (--argc >= 0)
5293 if (!is_pool(argv[argc]))
5294 return (0);
5295
5296 return (1);
5297 }
5298
5299 /*
5300 * Helper function to print out vdev/pool names we can't resolve. Used for an
5301 * error message.
5302 */
5303 static void
5304 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5305 vdev_cbdata_t *cb)
5306 {
5307 int i;
5308 char *name;
5309 char *str;
5310 for (i = 0; i < argc; i++) {
5311 name = argv[i];
5312
5313 if (is_pool(name))
5314 str = gettext("pool");
5315 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5316 str = gettext("vdev in this pool");
5317 else if (are_vdevs_in_pool(1, &name, NULL, cb))
5318 str = gettext("vdev in another pool");
5319 else
5320 str = gettext("unknown");
5321
5322 fprintf(stderr, "\t%s (%s)\n", name, str);
5323 }
5324 }
5325
5326 /*
5327 * Same as get_interval_count(), but with additional checks to not misinterpret
5328 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
5329 * cb.cb_vdevs.cb_name_flags.
5330 */
5331 static void
5332 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5333 unsigned long *count, iostat_cbdata_t *cb)
5334 {
5335 char **tmpargv = argv;
5336 int argc_for_interval = 0;
5337
5338 /* Is the last arg an interval value? Or a guid? */
5339 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5340 &cb->cb_vdevs)) {
5341 /*
5342 * The last arg is not a guid, so it's probably an
5343 * interval value.
5344 */
5345 argc_for_interval++;
5346
5347 if (*argc >= 2 &&
5348 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5349 &cb->cb_vdevs)) {
5350 /*
5351 * The 2nd to last arg is not a guid, so it's probably
5352 * an interval value.
5353 */
5354 argc_for_interval++;
5355 }
5356 }
5357
5358 /* Point to our list of possible intervals */
5359 tmpargv = &argv[*argc - argc_for_interval];
5360
5361 *argc = *argc - argc_for_interval;
5362 get_interval_count(&argc_for_interval, tmpargv,
5363 interval, count);
5364 }
5365
5366 /*
5367 * Floating point sleep(). Allows you to pass in a floating point value for
5368 * seconds.
5369 */
5370 static void
5371 fsleep(float sec)
5372 {
5373 struct timespec req;
5374 req.tv_sec = floor(sec);
5375 req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
5376 nanosleep(&req, NULL);
5377 }
5378
5379 /*
5380 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5381 * if we were unable to determine its size.
5382 */
5383 static int
5384 terminal_height(void)
5385 {
5386 struct winsize win;
5387
5388 if (isatty(STDOUT_FILENO) == 0)
5389 return (-1);
5390
5391 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5392 return (win.ws_row);
5393
5394 return (-1);
5395 }
5396
5397 /*
5398 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5399 * print the result.
5400 *
5401 * name: Short name of the script ('iostat').
5402 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5403 */
5404 static void
5405 print_zpool_script_help(char *name, char *path)
5406 {
5407 char *argv[] = {path, "-h", NULL};
5408 char **lines = NULL;
5409 int lines_cnt = 0;
5410 int rc;
5411
5412 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5413 &lines_cnt);
5414 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5415 if (lines != NULL)
5416 libzfs_free_str_array(lines, lines_cnt);
5417 return;
5418 }
5419
5420 for (int i = 0; i < lines_cnt; i++)
5421 if (!is_blank_str(lines[i]))
5422 printf(" %-14s %s\n", name, lines[i]);
5423
5424 libzfs_free_str_array(lines, lines_cnt);
5425 }
5426
5427 /*
5428 * Go though the zpool status/iostat -c scripts in the user's path, run their
5429 * help option (-h), and print out the results.
5430 */
5431 static void
5432 print_zpool_dir_scripts(char *dirpath)
5433 {
5434 DIR *dir;
5435 struct dirent *ent;
5436 char fullpath[MAXPATHLEN];
5437 struct stat dir_stat;
5438
5439 if ((dir = opendir(dirpath)) != NULL) {
5440 /* print all the files and directories within directory */
5441 while ((ent = readdir(dir)) != NULL) {
5442 sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
5443
5444 /* Print the scripts */
5445 if (stat(fullpath, &dir_stat) == 0)
5446 if (dir_stat.st_mode & S_IXUSR &&
5447 S_ISREG(dir_stat.st_mode))
5448 print_zpool_script_help(ent->d_name,
5449 fullpath);
5450 }
5451 closedir(dir);
5452 }
5453 }
5454
5455 /*
5456 * Print out help text for all zpool status/iostat -c scripts.
5457 */
5458 static void
5459 print_zpool_script_list(char *subcommand)
5460 {
5461 char *dir, *sp, *tmp;
5462
5463 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5464
5465 sp = zpool_get_cmd_search_path();
5466 if (sp == NULL)
5467 return;
5468
5469 for (dir = strtok_r(sp, ":", &tmp);
5470 dir != NULL;
5471 dir = strtok_r(NULL, ":", &tmp))
5472 print_zpool_dir_scripts(dir);
5473
5474 free(sp);
5475 }
5476
5477 /*
5478 * Set the minimum pool/vdev name column width. The width must be at least 10,
5479 * but may be as large as the column width - 42 so it still fits on one line.
5480 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5481 */
5482 static int
5483 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5484 {
5485 iostat_cbdata_t *cb = data;
5486 int width, available_width;
5487
5488 /*
5489 * get_namewidth() returns the maximum width of any name in that column
5490 * for any pool/vdev/device line that will be output.
5491 */
5492 width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_vdevs.cb_name_flags,
5493 cb->cb_verbose);
5494
5495 /*
5496 * The width we are calculating is the width of the header and also the
5497 * padding width for names that are less than maximum width. The stats
5498 * take up 42 characters, so the width available for names is:
5499 */
5500 available_width = get_columns() - 42;
5501
5502 /*
5503 * If the maximum width fits on a screen, then great! Make everything
5504 * line up by justifying all lines to the same width. If that max
5505 * width is larger than what's available, the name plus stats won't fit
5506 * on one line, and justifying to that width would cause every line to
5507 * wrap on the screen. We only want lines with long names to wrap.
5508 * Limit the padding to what won't wrap.
5509 */
5510 if (width > available_width)
5511 width = available_width;
5512
5513 /*
5514 * And regardless of whatever the screen width is (get_columns can
5515 * return 0 if the width is not known or less than 42 for a narrow
5516 * terminal) have the width be a minimum of 10.
5517 */
5518 if (width < 10)
5519 width = 10;
5520
5521 /* Save the calculated width */
5522 cb->cb_namewidth = width;
5523
5524 return (0);
5525 }
5526
5527 /*
5528 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5529 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5530 * [interval [count]]
5531 *
5532 * -c CMD For each vdev, run command CMD
5533 * -g Display guid for individual vdev name.
5534 * -L Follow links when resolving vdev path name.
5535 * -P Display full path for vdev name.
5536 * -v Display statistics for individual vdevs
5537 * -h Display help
5538 * -p Display values in parsable (exact) format.
5539 * -H Scripted mode. Don't display headers, and separate properties
5540 * by a single tab.
5541 * -l Display average latency
5542 * -q Display queue depths
5543 * -w Display latency histograms
5544 * -r Display request size histogram
5545 * -T Display a timestamp in date(1) or Unix format
5546 * -n Only print headers once
5547 *
5548 * This command can be tricky because we want to be able to deal with pool
5549 * creation/destruction as well as vdev configuration changes. The bulk of this
5550 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
5551 * on pool_list_update() to detect the addition of new pools. Configuration
5552 * changes are all handled within libzfs.
5553 */
5554 int
5555 zpool_do_iostat(int argc, char **argv)
5556 {
5557 int c;
5558 int ret;
5559 int npools;
5560 float interval = 0;
5561 unsigned long count = 0;
5562 int winheight = 24;
5563 zpool_list_t *list;
5564 boolean_t verbose = B_FALSE;
5565 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5566 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5567 boolean_t omit_since_boot = B_FALSE;
5568 boolean_t guid = B_FALSE;
5569 boolean_t follow_links = B_FALSE;
5570 boolean_t full_name = B_FALSE;
5571 boolean_t headers_once = B_FALSE;
5572 iostat_cbdata_t cb = { 0 };
5573 char *cmd = NULL;
5574
5575 /* Used for printing error message */
5576 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5577 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5578
5579 uint64_t unsupported_flags;
5580
5581 /* check options */
5582 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5583 switch (c) {
5584 case 'c':
5585 if (cmd != NULL) {
5586 fprintf(stderr,
5587 gettext("Can't set -c flag twice\n"));
5588 exit(1);
5589 }
5590
5591 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5592 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5593 fprintf(stderr, gettext(
5594 "Can't run -c, disabled by "
5595 "ZPOOL_SCRIPTS_ENABLED.\n"));
5596 exit(1);
5597 }
5598
5599 if ((getuid() <= 0 || geteuid() <= 0) &&
5600 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5601 fprintf(stderr, gettext(
5602 "Can't run -c with root privileges "
5603 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5604 exit(1);
5605 }
5606 cmd = optarg;
5607 verbose = B_TRUE;
5608 break;
5609 case 'g':
5610 guid = B_TRUE;
5611 break;
5612 case 'L':
5613 follow_links = B_TRUE;
5614 break;
5615 case 'P':
5616 full_name = B_TRUE;
5617 break;
5618 case 'T':
5619 get_timestamp_arg(*optarg);
5620 break;
5621 case 'v':
5622 verbose = B_TRUE;
5623 break;
5624 case 'p':
5625 parsable = B_TRUE;
5626 break;
5627 case 'l':
5628 latency = B_TRUE;
5629 break;
5630 case 'q':
5631 queues = B_TRUE;
5632 break;
5633 case 'H':
5634 scripted = B_TRUE;
5635 break;
5636 case 'w':
5637 l_histo = B_TRUE;
5638 break;
5639 case 'r':
5640 rq_histo = B_TRUE;
5641 break;
5642 case 'y':
5643 omit_since_boot = B_TRUE;
5644 break;
5645 case 'n':
5646 headers_once = B_TRUE;
5647 break;
5648 case 'h':
5649 usage(B_FALSE);
5650 break;
5651 case '?':
5652 if (optopt == 'c') {
5653 print_zpool_script_list("iostat");
5654 exit(0);
5655 } else {
5656 fprintf(stderr,
5657 gettext("invalid option '%c'\n"), optopt);
5658 }
5659 usage(B_FALSE);
5660 }
5661 }
5662
5663 argc -= optind;
5664 argv += optind;
5665
5666 cb.cb_literal = parsable;
5667 cb.cb_scripted = scripted;
5668
5669 if (guid)
5670 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5671 if (follow_links)
5672 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5673 if (full_name)
5674 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5675 cb.cb_iteration = 0;
5676 cb.cb_namewidth = 0;
5677 cb.cb_verbose = verbose;
5678
5679 /* Get our interval and count values (if any) */
5680 if (guid) {
5681 get_interval_count_filter_guids(&argc, argv, &interval,
5682 &count, &cb);
5683 } else {
5684 get_interval_count(&argc, argv, &interval, &count);
5685 }
5686
5687 if (argc == 0) {
5688 /* No args, so just print the defaults. */
5689 } else if (are_all_pools(argc, argv)) {
5690 /* All the args are pool names */
5691 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5692 /* All the args are vdevs */
5693 cb.cb_vdevs.cb_names = argv;
5694 cb.cb_vdevs.cb_names_count = argc;
5695 argc = 0; /* No pools to process */
5696 } else if (are_all_pools(1, argv)) {
5697 /* The first arg is a pool name */
5698 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5699 &cb.cb_vdevs)) {
5700 /* ...and the rest are vdev names */
5701 cb.cb_vdevs.cb_names = argv + 1;
5702 cb.cb_vdevs.cb_names_count = argc - 1;
5703 argc = 1; /* One pool to process */
5704 } else {
5705 fprintf(stderr, gettext("Expected either a list of "));
5706 fprintf(stderr, gettext("pools, or list of vdevs in"));
5707 fprintf(stderr, " \"%s\", ", argv[0]);
5708 fprintf(stderr, gettext("but got:\n"));
5709 error_list_unresolved_vdevs(argc - 1, argv + 1,
5710 argv[0], &cb.cb_vdevs);
5711 fprintf(stderr, "\n");
5712 usage(B_FALSE);
5713 return (1);
5714 }
5715 } else {
5716 /*
5717 * The args don't make sense. The first arg isn't a pool name,
5718 * nor are all the args vdevs.
5719 */
5720 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5721 fprintf(stderr, "\n");
5722 return (1);
5723 }
5724
5725 if (cb.cb_vdevs.cb_names_count != 0) {
5726 /*
5727 * If user specified vdevs, it implies verbose.
5728 */
5729 cb.cb_verbose = B_TRUE;
5730 }
5731
5732 /*
5733 * Construct the list of all interesting pools.
5734 */
5735 ret = 0;
5736 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5737 &ret)) == NULL)
5738 return (1);
5739
5740 if (pool_list_count(list) == 0 && argc != 0) {
5741 pool_list_free(list);
5742 return (1);
5743 }
5744
5745 if (pool_list_count(list) == 0 && interval == 0) {
5746 pool_list_free(list);
5747 (void) fprintf(stderr, gettext("no pools available\n"));
5748 return (1);
5749 }
5750
5751 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5752 pool_list_free(list);
5753 (void) fprintf(stderr,
5754 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
5755 usage(B_FALSE);
5756 return (1);
5757 }
5758
5759 if (l_histo && rq_histo) {
5760 pool_list_free(list);
5761 (void) fprintf(stderr,
5762 gettext("Only one of [-r|-w] can be passed at a time\n"));
5763 usage(B_FALSE);
5764 return (1);
5765 }
5766
5767 /*
5768 * Enter the main iostat loop.
5769 */
5770 cb.cb_list = list;
5771
5772 if (l_histo) {
5773 /*
5774 * Histograms tables look out of place when you try to display
5775 * them with the other stats, so make a rule that you can only
5776 * print histograms by themselves.
5777 */
5778 cb.cb_flags = IOS_L_HISTO_M;
5779 } else if (rq_histo) {
5780 cb.cb_flags = IOS_RQ_HISTO_M;
5781 } else {
5782 cb.cb_flags = IOS_DEFAULT_M;
5783 if (latency)
5784 cb.cb_flags |= IOS_LATENCY_M;
5785 if (queues)
5786 cb.cb_flags |= IOS_QUEUES_M;
5787 }
5788
5789 /*
5790 * See if the module supports all the stats we want to display.
5791 */
5792 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
5793 if (unsupported_flags) {
5794 uint64_t f;
5795 int idx;
5796 fprintf(stderr,
5797 gettext("The loaded zfs module doesn't support:"));
5798
5799 /* for each bit set in unsupported_flags */
5800 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
5801 idx = lowbit64(f) - 1;
5802 fprintf(stderr, " -%c", flag_to_arg[idx]);
5803 }
5804
5805 fprintf(stderr, ". Try running a newer module.\n");
5806 pool_list_free(list);
5807
5808 return (1);
5809 }
5810
5811 for (;;) {
5812 if ((npools = pool_list_count(list)) == 0)
5813 (void) fprintf(stderr, gettext("no pools available\n"));
5814 else {
5815 /*
5816 * If this is the first iteration and -y was supplied
5817 * we skip any printing.
5818 */
5819 boolean_t skip = (omit_since_boot &&
5820 cb.cb_iteration == 0);
5821
5822 /*
5823 * Refresh all statistics. This is done as an
5824 * explicit step before calculating the maximum name
5825 * width, so that any * configuration changes are
5826 * properly accounted for.
5827 */
5828 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
5829 &cb);
5830
5831 /*
5832 * Iterate over all pools to determine the maximum width
5833 * for the pool / device name column across all pools.
5834 */
5835 cb.cb_namewidth = 0;
5836 (void) pool_list_iter(list, B_FALSE,
5837 get_namewidth_iostat, &cb);
5838
5839 if (timestamp_fmt != NODATE)
5840 print_timestamp(timestamp_fmt);
5841
5842 if (cmd != NULL && cb.cb_verbose &&
5843 !(cb.cb_flags & IOS_ANYHISTO_M)) {
5844 cb.vcdl = all_pools_for_each_vdev_run(argc,
5845 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
5846 cb.cb_vdevs.cb_names_count,
5847 cb.cb_vdevs.cb_name_flags);
5848 } else {
5849 cb.vcdl = NULL;
5850 }
5851
5852
5853 /*
5854 * Check terminal size so we can print headers
5855 * even when terminal window has its height
5856 * changed.
5857 */
5858 winheight = terminal_height();
5859 /*
5860 * Are we connected to TTY? If not, headers_once
5861 * should be true, to avoid breaking scripts.
5862 */
5863 if (winheight < 0)
5864 headers_once = B_TRUE;
5865
5866 /*
5867 * If it's the first time and we're not skipping it,
5868 * or either skip or verbose mode, print the header.
5869 *
5870 * The histogram code explicitly prints its header on
5871 * every vdev, so skip this for histograms.
5872 */
5873 if (((++cb.cb_iteration == 1 && !skip) ||
5874 (skip != verbose) ||
5875 (!headers_once &&
5876 (cb.cb_iteration % winheight) == 0)) &&
5877 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
5878 !cb.cb_scripted)
5879 print_iostat_header(&cb);
5880
5881 if (skip) {
5882 (void) fsleep(interval);
5883 continue;
5884 }
5885
5886 pool_list_iter(list, B_FALSE, print_iostat, &cb);
5887
5888 /*
5889 * If there's more than one pool, and we're not in
5890 * verbose mode (which prints a separator for us),
5891 * then print a separator.
5892 *
5893 * In addition, if we're printing specific vdevs then
5894 * we also want an ending separator.
5895 */
5896 if (((npools > 1 && !verbose &&
5897 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
5898 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
5899 cb.cb_vdevs.cb_names_count)) &&
5900 !cb.cb_scripted) {
5901 print_iostat_separator(&cb);
5902 if (cb.vcdl != NULL)
5903 print_cmd_columns(cb.vcdl, 1);
5904 printf("\n");
5905 }
5906
5907 if (cb.vcdl != NULL)
5908 free_vdev_cmd_data_list(cb.vcdl);
5909
5910 }
5911
5912 /*
5913 * Flush the output so that redirection to a file isn't buffered
5914 * indefinitely.
5915 */
5916 (void) fflush(stdout);
5917
5918 if (interval == 0)
5919 break;
5920
5921 if (count != 0 && --count == 0)
5922 break;
5923
5924 (void) fsleep(interval);
5925 }
5926
5927 pool_list_free(list);
5928
5929 return (ret);
5930 }
5931
5932 typedef struct list_cbdata {
5933 boolean_t cb_verbose;
5934 int cb_name_flags;
5935 int cb_namewidth;
5936 boolean_t cb_scripted;
5937 zprop_list_t *cb_proplist;
5938 boolean_t cb_literal;
5939 } list_cbdata_t;
5940
5941
5942 /*
5943 * Given a list of columns to display, output appropriate headers for each one.
5944 */
5945 static void
5946 print_header(list_cbdata_t *cb)
5947 {
5948 zprop_list_t *pl = cb->cb_proplist;
5949 char headerbuf[ZPOOL_MAXPROPLEN];
5950 const char *header;
5951 boolean_t first = B_TRUE;
5952 boolean_t right_justify;
5953 size_t width = 0;
5954
5955 for (; pl != NULL; pl = pl->pl_next) {
5956 width = pl->pl_width;
5957 if (first && cb->cb_verbose) {
5958 /*
5959 * Reset the width to accommodate the verbose listing
5960 * of devices.
5961 */
5962 width = cb->cb_namewidth;
5963 }
5964
5965 if (!first)
5966 (void) printf(" ");
5967 else
5968 first = B_FALSE;
5969
5970 right_justify = B_FALSE;
5971 if (pl->pl_prop != ZPROP_INVAL) {
5972 header = zpool_prop_column_name(pl->pl_prop);
5973 right_justify = zpool_prop_align_right(pl->pl_prop);
5974 } else {
5975 int i;
5976
5977 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
5978 headerbuf[i] = toupper(pl->pl_user_prop[i]);
5979 headerbuf[i] = '\0';
5980 header = headerbuf;
5981 }
5982
5983 if (pl->pl_next == NULL && !right_justify)
5984 (void) printf("%s", header);
5985 else if (right_justify)
5986 (void) printf("%*s", (int)width, header);
5987 else
5988 (void) printf("%-*s", (int)width, header);
5989 }
5990
5991 (void) printf("\n");
5992 }
5993
5994 /*
5995 * Given a pool and a list of properties, print out all the properties according
5996 * to the described layout. Used by zpool_do_list().
5997 */
5998 static void
5999 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6000 {
6001 zprop_list_t *pl = cb->cb_proplist;
6002 boolean_t first = B_TRUE;
6003 char property[ZPOOL_MAXPROPLEN];
6004 char *propstr;
6005 boolean_t right_justify;
6006 size_t width;
6007
6008 for (; pl != NULL; pl = pl->pl_next) {
6009
6010 width = pl->pl_width;
6011 if (first && cb->cb_verbose) {
6012 /*
6013 * Reset the width to accommodate the verbose listing
6014 * of devices.
6015 */
6016 width = cb->cb_namewidth;
6017 }
6018
6019 if (!first) {
6020 if (cb->cb_scripted)
6021 (void) printf("\t");
6022 else
6023 (void) printf(" ");
6024 } else {
6025 first = B_FALSE;
6026 }
6027
6028 right_justify = B_FALSE;
6029 if (pl->pl_prop != ZPROP_INVAL) {
6030 if (zpool_get_prop(zhp, pl->pl_prop, property,
6031 sizeof (property), NULL, cb->cb_literal) != 0)
6032 propstr = "-";
6033 else
6034 propstr = property;
6035
6036 right_justify = zpool_prop_align_right(pl->pl_prop);
6037 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6038 zpool_prop_unsupported(pl->pl_user_prop)) &&
6039 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6040 sizeof (property)) == 0) {
6041 propstr = property;
6042 } else {
6043 propstr = "-";
6044 }
6045
6046
6047 /*
6048 * If this is being called in scripted mode, or if this is the
6049 * last column and it is left-justified, don't include a width
6050 * format specifier.
6051 */
6052 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6053 (void) printf("%s", propstr);
6054 else if (right_justify)
6055 (void) printf("%*s", (int)width, propstr);
6056 else
6057 (void) printf("%-*s", (int)width, propstr);
6058 }
6059
6060 (void) printf("\n");
6061 }
6062
6063 static void
6064 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6065 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6066 {
6067 char propval[64];
6068 boolean_t fixed;
6069 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6070
6071 switch (prop) {
6072 case ZPOOL_PROP_EXPANDSZ:
6073 case ZPOOL_PROP_CHECKPOINT:
6074 case ZPOOL_PROP_DEDUPRATIO:
6075 if (value == 0)
6076 (void) strlcpy(propval, "-", sizeof (propval));
6077 else
6078 zfs_nicenum_format(value, propval, sizeof (propval),
6079 format);
6080 break;
6081 case ZPOOL_PROP_FRAGMENTATION:
6082 if (value == ZFS_FRAG_INVALID) {
6083 (void) strlcpy(propval, "-", sizeof (propval));
6084 } else if (format == ZFS_NICENUM_RAW) {
6085 (void) snprintf(propval, sizeof (propval), "%llu",
6086 (unsigned long long)value);
6087 } else {
6088 (void) snprintf(propval, sizeof (propval), "%llu%%",
6089 (unsigned long long)value);
6090 }
6091 break;
6092 case ZPOOL_PROP_CAPACITY:
6093 /* capacity value is in parts-per-10,000 (aka permyriad) */
6094 if (format == ZFS_NICENUM_RAW)
6095 (void) snprintf(propval, sizeof (propval), "%llu",
6096 (unsigned long long)value / 100);
6097 else
6098 (void) snprintf(propval, sizeof (propval),
6099 value < 1000 ? "%1.2f%%" : value < 10000 ?
6100 "%2.1f%%" : "%3.0f%%", value / 100.0);
6101 break;
6102 case ZPOOL_PROP_HEALTH:
6103 width = 8;
6104 (void) strlcpy(propval, str, sizeof (propval));
6105 break;
6106 default:
6107 zfs_nicenum_format(value, propval, sizeof (propval), format);
6108 }
6109
6110 if (!valid)
6111 (void) strlcpy(propval, "-", sizeof (propval));
6112
6113 if (scripted)
6114 (void) printf("\t%s", propval);
6115 else
6116 (void) printf(" %*s", (int)width, propval);
6117 }
6118
6119 /*
6120 * print static default line per vdev
6121 * not compatible with '-o' <proplist> option
6122 */
6123 static void
6124 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6125 list_cbdata_t *cb, int depth, boolean_t isspare)
6126 {
6127 nvlist_t **child;
6128 vdev_stat_t *vs;
6129 uint_t c, children;
6130 char *vname;
6131 boolean_t scripted = cb->cb_scripted;
6132 uint64_t islog = B_FALSE;
6133 char *dashes = "%-*s - - - - "
6134 "- - - - -\n";
6135
6136 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6137 (uint64_t **)&vs, &c) == 0);
6138
6139 if (name != NULL) {
6140 boolean_t toplevel = (vs->vs_space != 0);
6141 uint64_t cap;
6142 enum zfs_nicenum_format format;
6143 const char *state;
6144
6145 if (cb->cb_literal)
6146 format = ZFS_NICENUM_RAW;
6147 else
6148 format = ZFS_NICENUM_1024;
6149
6150 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6151 return;
6152
6153 if (scripted)
6154 (void) printf("\t%s", name);
6155 else if (strlen(name) + depth > cb->cb_namewidth)
6156 (void) printf("%*s%s", depth, "", name);
6157 else
6158 (void) printf("%*s%s%*s", depth, "", name,
6159 (int)(cb->cb_namewidth - strlen(name) - depth), "");
6160
6161 /*
6162 * Print the properties for the individual vdevs. Some
6163 * properties are only applicable to toplevel vdevs. The
6164 * 'toplevel' boolean value is passed to the print_one_column()
6165 * to indicate that the value is valid.
6166 */
6167 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, scripted,
6168 toplevel, format);
6169 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6170 scripted, toplevel, format);
6171 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6172 NULL, scripted, toplevel, format);
6173 print_one_column(ZPOOL_PROP_CHECKPOINT,
6174 vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6175 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6176 scripted, B_TRUE, format);
6177 print_one_column(ZPOOL_PROP_FRAGMENTATION,
6178 vs->vs_fragmentation, NULL, scripted,
6179 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6180 format);
6181 cap = (vs->vs_space == 0) ? 0 :
6182 (vs->vs_alloc * 10000 / vs->vs_space);
6183 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6184 scripted, toplevel, format);
6185 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6186 scripted, toplevel, format);
6187 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6188 if (isspare) {
6189 if (vs->vs_aux == VDEV_AUX_SPARED)
6190 state = "INUSE";
6191 else if (vs->vs_state == VDEV_STATE_HEALTHY)
6192 state = "AVAIL";
6193 }
6194 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6195 B_TRUE, format);
6196 (void) printf("\n");
6197 }
6198
6199 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6200 &child, &children) != 0)
6201 return;
6202
6203 /* list the normal vdevs first */
6204 for (c = 0; c < children; c++) {
6205 uint64_t ishole = B_FALSE;
6206
6207 if (nvlist_lookup_uint64(child[c],
6208 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6209 continue;
6210
6211 if (nvlist_lookup_uint64(child[c],
6212 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6213 continue;
6214
6215 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6216 continue;
6217
6218 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6219 cb->cb_name_flags);
6220 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6221 free(vname);
6222 }
6223
6224 /* list the classes: 'logs', 'dedup', and 'special' */
6225 for (uint_t n = 0; n < 3; n++) {
6226 boolean_t printed = B_FALSE;
6227
6228 for (c = 0; c < children; c++) {
6229 char *bias = NULL;
6230 char *type = NULL;
6231
6232 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6233 &islog) == 0 && islog) {
6234 bias = VDEV_ALLOC_CLASS_LOGS;
6235 } else {
6236 (void) nvlist_lookup_string(child[c],
6237 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6238 (void) nvlist_lookup_string(child[c],
6239 ZPOOL_CONFIG_TYPE, &type);
6240 }
6241 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6242 continue;
6243 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6244 continue;
6245
6246 if (!printed) {
6247 /* LINTED E_SEC_PRINTF_VAR_FMT */
6248 (void) printf(dashes, cb->cb_namewidth,
6249 class_name[n]);
6250 printed = B_TRUE;
6251 }
6252 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6253 cb->cb_name_flags);
6254 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6255 B_FALSE);
6256 free(vname);
6257 }
6258 }
6259
6260 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6261 &child, &children) == 0 && children > 0) {
6262 /* LINTED E_SEC_PRINTF_VAR_FMT */
6263 (void) printf(dashes, cb->cb_namewidth, "cache");
6264 for (c = 0; c < children; c++) {
6265 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6266 cb->cb_name_flags);
6267 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6268 B_FALSE);
6269 free(vname);
6270 }
6271 }
6272
6273 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6274 &children) == 0 && children > 0) {
6275 /* LINTED E_SEC_PRINTF_VAR_FMT */
6276 (void) printf(dashes, cb->cb_namewidth, "spare");
6277 for (c = 0; c < children; c++) {
6278 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6279 cb->cb_name_flags);
6280 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6281 B_TRUE);
6282 free(vname);
6283 }
6284 }
6285 }
6286
6287 /*
6288 * Generic callback function to list a pool.
6289 */
6290 static int
6291 list_callback(zpool_handle_t *zhp, void *data)
6292 {
6293 list_cbdata_t *cbp = data;
6294
6295 print_pool(zhp, cbp);
6296
6297 if (cbp->cb_verbose) {
6298 nvlist_t *config, *nvroot;
6299
6300 config = zpool_get_config(zhp, NULL);
6301 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6302 &nvroot) == 0);
6303 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6304 }
6305
6306 return (0);
6307 }
6308
6309 /*
6310 * Set the minimum pool/vdev name column width. The width must be at least 9,
6311 * but may be as large as needed.
6312 */
6313 static int
6314 get_namewidth_list(zpool_handle_t *zhp, void *data)
6315 {
6316 list_cbdata_t *cb = data;
6317 int width;
6318
6319 width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
6320 cb->cb_verbose);
6321
6322 if (width < 9)
6323 width = 9;
6324
6325 cb->cb_namewidth = width;
6326
6327 return (0);
6328 }
6329
6330 /*
6331 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6332 *
6333 * -g Display guid for individual vdev name.
6334 * -H Scripted mode. Don't display headers, and separate properties
6335 * by a single tab.
6336 * -L Follow links when resolving vdev path name.
6337 * -o List of properties to display. Defaults to
6338 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
6339 * "dedupratio,health,altroot"
6340 * -p Display values in parsable (exact) format.
6341 * -P Display full path for vdev name.
6342 * -T Display a timestamp in date(1) or Unix format
6343 *
6344 * List all pools in the system, whether or not they're healthy. Output space
6345 * statistics for each one, as well as health status summary.
6346 */
6347 int
6348 zpool_do_list(int argc, char **argv)
6349 {
6350 int c;
6351 int ret = 0;
6352 list_cbdata_t cb = { 0 };
6353 static char default_props[] =
6354 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6355 "capacity,dedupratio,health,altroot";
6356 char *props = default_props;
6357 float interval = 0;
6358 unsigned long count = 0;
6359 zpool_list_t *list;
6360 boolean_t first = B_TRUE;
6361 current_prop_type = ZFS_TYPE_POOL;
6362
6363 /* check options */
6364 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6365 switch (c) {
6366 case 'g':
6367 cb.cb_name_flags |= VDEV_NAME_GUID;
6368 break;
6369 case 'H':
6370 cb.cb_scripted = B_TRUE;
6371 break;
6372 case 'L':
6373 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6374 break;
6375 case 'o':
6376 props = optarg;
6377 break;
6378 case 'P':
6379 cb.cb_name_flags |= VDEV_NAME_PATH;
6380 break;
6381 case 'p':
6382 cb.cb_literal = B_TRUE;
6383 break;
6384 case 'T':
6385 get_timestamp_arg(*optarg);
6386 break;
6387 case 'v':
6388 cb.cb_verbose = B_TRUE;
6389 cb.cb_namewidth = 8; /* 8 until precalc is avail */
6390 break;
6391 case ':':
6392 (void) fprintf(stderr, gettext("missing argument for "
6393 "'%c' option\n"), optopt);
6394 usage(B_FALSE);
6395 break;
6396 case '?':
6397 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6398 optopt);
6399 usage(B_FALSE);
6400 }
6401 }
6402
6403 argc -= optind;
6404 argv += optind;
6405
6406 get_interval_count(&argc, argv, &interval, &count);
6407
6408 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6409 usage(B_FALSE);
6410
6411 for (;;) {
6412 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6413 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6414 return (1);
6415
6416 if (pool_list_count(list) == 0)
6417 break;
6418
6419 cb.cb_namewidth = 0;
6420 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6421
6422 if (timestamp_fmt != NODATE)
6423 print_timestamp(timestamp_fmt);
6424
6425 if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6426 print_header(&cb);
6427 first = B_FALSE;
6428 }
6429 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6430
6431 if (interval == 0)
6432 break;
6433
6434 if (count != 0 && --count == 0)
6435 break;
6436
6437 pool_list_free(list);
6438 (void) fsleep(interval);
6439 }
6440
6441 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6442 (void) printf(gettext("no pools available\n"));
6443 ret = 0;
6444 }
6445
6446 pool_list_free(list);
6447 zprop_free_list(cb.cb_proplist);
6448 return (ret);
6449 }
6450
6451 static int
6452 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6453 {
6454 boolean_t force = B_FALSE;
6455 boolean_t rebuild = B_FALSE;
6456 boolean_t wait = B_FALSE;
6457 int c;
6458 nvlist_t *nvroot;
6459 char *poolname, *old_disk, *new_disk;
6460 zpool_handle_t *zhp;
6461 nvlist_t *props = NULL;
6462 char *propval;
6463 int ret;
6464
6465 /* check options */
6466 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6467 switch (c) {
6468 case 'f':
6469 force = B_TRUE;
6470 break;
6471 case 'o':
6472 if ((propval = strchr(optarg, '=')) == NULL) {
6473 (void) fprintf(stderr, gettext("missing "
6474 "'=' for -o option\n"));
6475 usage(B_FALSE);
6476 }
6477 *propval = '\0';
6478 propval++;
6479
6480 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6481 (add_prop_list(optarg, propval, &props, B_TRUE)))
6482 usage(B_FALSE);
6483 break;
6484 case 's':
6485 rebuild = B_TRUE;
6486 break;
6487 case 'w':
6488 wait = B_TRUE;
6489 break;
6490 case '?':
6491 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6492 optopt);
6493 usage(B_FALSE);
6494 }
6495 }
6496
6497 argc -= optind;
6498 argv += optind;
6499
6500 /* get pool name and check number of arguments */
6501 if (argc < 1) {
6502 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6503 usage(B_FALSE);
6504 }
6505
6506 poolname = argv[0];
6507
6508 if (argc < 2) {
6509 (void) fprintf(stderr,
6510 gettext("missing <device> specification\n"));
6511 usage(B_FALSE);
6512 }
6513
6514 old_disk = argv[1];
6515
6516 if (argc < 3) {
6517 if (!replacing) {
6518 (void) fprintf(stderr,
6519 gettext("missing <new_device> specification\n"));
6520 usage(B_FALSE);
6521 }
6522 new_disk = old_disk;
6523 argc -= 1;
6524 argv += 1;
6525 } else {
6526 new_disk = argv[2];
6527 argc -= 2;
6528 argv += 2;
6529 }
6530
6531 if (argc > 1) {
6532 (void) fprintf(stderr, gettext("too many arguments\n"));
6533 usage(B_FALSE);
6534 }
6535
6536 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6537 nvlist_free(props);
6538 return (1);
6539 }
6540
6541 if (zpool_get_config(zhp, NULL) == NULL) {
6542 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6543 poolname);
6544 zpool_close(zhp);
6545 nvlist_free(props);
6546 return (1);
6547 }
6548
6549 /* unless manually specified use "ashift" pool property (if set) */
6550 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6551 int intval;
6552 zprop_source_t src;
6553 char strval[ZPOOL_MAXPROPLEN];
6554
6555 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6556 if (src != ZPROP_SRC_DEFAULT) {
6557 (void) sprintf(strval, "%" PRId32, intval);
6558 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6559 &props, B_TRUE) == 0);
6560 }
6561 }
6562
6563 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6564 argc, argv);
6565 if (nvroot == NULL) {
6566 zpool_close(zhp);
6567 nvlist_free(props);
6568 return (1);
6569 }
6570
6571 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6572 rebuild);
6573
6574 if (ret == 0 && wait)
6575 ret = zpool_wait(zhp,
6576 replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
6577
6578 nvlist_free(props);
6579 nvlist_free(nvroot);
6580 zpool_close(zhp);
6581
6582 return (ret);
6583 }
6584
6585 /*
6586 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6587 *
6588 * -f Force attach, even if <new_device> appears to be in use.
6589 * -s Use sequential instead of healing reconstruction for resilver.
6590 * -o Set property=value.
6591 * -w Wait for replacing to complete before returning
6592 *
6593 * Replace <device> with <new_device>.
6594 */
6595 int
6596 zpool_do_replace(int argc, char **argv)
6597 {
6598 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6599 }
6600
6601 /*
6602 * zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
6603 *
6604 * -f Force attach, even if <new_device> appears to be in use.
6605 * -s Use sequential instead of healing reconstruction for resilver.
6606 * -o Set property=value.
6607 * -w Wait for resilvering to complete before returning
6608 *
6609 * Attach <new_device> to the mirror containing <device>. If <device> is not
6610 * part of a mirror, then <device> will be transformed into a mirror of
6611 * <device> and <new_device>. In either case, <new_device> will begin life
6612 * with a DTL of [0, now], and will immediately begin to resilver itself.
6613 */
6614 int
6615 zpool_do_attach(int argc, char **argv)
6616 {
6617 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6618 }
6619
6620 /*
6621 * zpool detach [-f] <pool> <device>
6622 *
6623 * -f Force detach of <device>, even if DTLs argue against it
6624 * (not supported yet)
6625 *
6626 * Detach a device from a mirror. The operation will be refused if <device>
6627 * is the last device in the mirror, or if the DTLs indicate that this device
6628 * has the only valid copy of some data.
6629 */
6630 int
6631 zpool_do_detach(int argc, char **argv)
6632 {
6633 int c;
6634 char *poolname, *path;
6635 zpool_handle_t *zhp;
6636 int ret;
6637
6638 /* check options */
6639 while ((c = getopt(argc, argv, "")) != -1) {
6640 switch (c) {
6641 case '?':
6642 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6643 optopt);
6644 usage(B_FALSE);
6645 }
6646 }
6647
6648 argc -= optind;
6649 argv += optind;
6650
6651 /* get pool name and check number of arguments */
6652 if (argc < 1) {
6653 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6654 usage(B_FALSE);
6655 }
6656
6657 if (argc < 2) {
6658 (void) fprintf(stderr,
6659 gettext("missing <device> specification\n"));
6660 usage(B_FALSE);
6661 }
6662
6663 poolname = argv[0];
6664 path = argv[1];
6665
6666 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6667 return (1);
6668
6669 ret = zpool_vdev_detach(zhp, path);
6670
6671 zpool_close(zhp);
6672
6673 return (ret);
6674 }
6675
6676 /*
6677 * zpool split [-gLnP] [-o prop=val] ...
6678 * [-o mntopt] ...
6679 * [-R altroot] <pool> <newpool> [<device> ...]
6680 *
6681 * -g Display guid for individual vdev name.
6682 * -L Follow links when resolving vdev path name.
6683 * -n Do not split the pool, but display the resulting layout if
6684 * it were to be split.
6685 * -o Set property=value, or set mount options.
6686 * -P Display full path for vdev name.
6687 * -R Mount the split-off pool under an alternate root.
6688 * -l Load encryption keys while importing.
6689 *
6690 * Splits the named pool and gives it the new pool name. Devices to be split
6691 * off may be listed, provided that no more than one device is specified
6692 * per top-level vdev mirror. The newly split pool is left in an exported
6693 * state unless -R is specified.
6694 *
6695 * Restrictions: the top-level of the pool pool must only be made up of
6696 * mirrors; all devices in the pool must be healthy; no device may be
6697 * undergoing a resilvering operation.
6698 */
6699 int
6700 zpool_do_split(int argc, char **argv)
6701 {
6702 char *srcpool, *newpool, *propval;
6703 char *mntopts = NULL;
6704 splitflags_t flags;
6705 int c, ret = 0;
6706 boolean_t loadkeys = B_FALSE;
6707 zpool_handle_t *zhp;
6708 nvlist_t *config, *props = NULL;
6709
6710 flags.dryrun = B_FALSE;
6711 flags.import = B_FALSE;
6712 flags.name_flags = 0;
6713
6714 /* check options */
6715 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6716 switch (c) {
6717 case 'g':
6718 flags.name_flags |= VDEV_NAME_GUID;
6719 break;
6720 case 'L':
6721 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6722 break;
6723 case 'R':
6724 flags.import = B_TRUE;
6725 if (add_prop_list(
6726 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6727 &props, B_TRUE) != 0) {
6728 nvlist_free(props);
6729 usage(B_FALSE);
6730 }
6731 break;
6732 case 'l':
6733 loadkeys = B_TRUE;
6734 break;
6735 case 'n':
6736 flags.dryrun = B_TRUE;
6737 break;
6738 case 'o':
6739 if ((propval = strchr(optarg, '=')) != NULL) {
6740 *propval = '\0';
6741 propval++;
6742 if (add_prop_list(optarg, propval,
6743 &props, B_TRUE) != 0) {
6744 nvlist_free(props);
6745 usage(B_FALSE);
6746 }
6747 } else {
6748 mntopts = optarg;
6749 }
6750 break;
6751 case 'P':
6752 flags.name_flags |= VDEV_NAME_PATH;
6753 break;
6754 case ':':
6755 (void) fprintf(stderr, gettext("missing argument for "
6756 "'%c' option\n"), optopt);
6757 usage(B_FALSE);
6758 break;
6759 case '?':
6760 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6761 optopt);
6762 usage(B_FALSE);
6763 break;
6764 }
6765 }
6766
6767 if (!flags.import && mntopts != NULL) {
6768 (void) fprintf(stderr, gettext("setting mntopts is only "
6769 "valid when importing the pool\n"));
6770 usage(B_FALSE);
6771 }
6772
6773 if (!flags.import && loadkeys) {
6774 (void) fprintf(stderr, gettext("loading keys is only "
6775 "valid when importing the pool\n"));
6776 usage(B_FALSE);
6777 }
6778
6779 argc -= optind;
6780 argv += optind;
6781
6782 if (argc < 1) {
6783 (void) fprintf(stderr, gettext("Missing pool name\n"));
6784 usage(B_FALSE);
6785 }
6786 if (argc < 2) {
6787 (void) fprintf(stderr, gettext("Missing new pool name\n"));
6788 usage(B_FALSE);
6789 }
6790
6791 srcpool = argv[0];
6792 newpool = argv[1];
6793
6794 argc -= 2;
6795 argv += 2;
6796
6797 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
6798 nvlist_free(props);
6799 return (1);
6800 }
6801
6802 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
6803 if (config == NULL) {
6804 ret = 1;
6805 } else {
6806 if (flags.dryrun) {
6807 (void) printf(gettext("would create '%s' with the "
6808 "following layout:\n\n"), newpool);
6809 print_vdev_tree(NULL, newpool, config, 0, "",
6810 flags.name_flags);
6811 print_vdev_tree(NULL, "dedup", config, 0,
6812 VDEV_ALLOC_BIAS_DEDUP, 0);
6813 print_vdev_tree(NULL, "special", config, 0,
6814 VDEV_ALLOC_BIAS_SPECIAL, 0);
6815 }
6816 }
6817
6818 zpool_close(zhp);
6819
6820 if (ret != 0 || flags.dryrun || !flags.import) {
6821 nvlist_free(config);
6822 nvlist_free(props);
6823 return (ret);
6824 }
6825
6826 /*
6827 * The split was successful. Now we need to open the new
6828 * pool and import it.
6829 */
6830 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
6831 nvlist_free(config);
6832 nvlist_free(props);
6833 return (1);
6834 }
6835
6836 if (loadkeys) {
6837 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
6838 if (ret != 0)
6839 ret = 1;
6840 }
6841
6842 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
6843 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
6844 ret = 1;
6845 (void) fprintf(stderr, gettext("Split was successful, but "
6846 "the datasets could not all be mounted\n"));
6847 (void) fprintf(stderr, gettext("Try doing '%s' with a "
6848 "different altroot\n"), "zpool import");
6849 }
6850 zpool_close(zhp);
6851 nvlist_free(config);
6852 nvlist_free(props);
6853
6854 return (ret);
6855 }
6856
6857
6858
6859 /*
6860 * zpool online <pool> <device> ...
6861 */
6862 int
6863 zpool_do_online(int argc, char **argv)
6864 {
6865 int c, i;
6866 char *poolname;
6867 zpool_handle_t *zhp;
6868 int ret = 0;
6869 vdev_state_t newstate;
6870 int flags = 0;
6871
6872 /* check options */
6873 while ((c = getopt(argc, argv, "e")) != -1) {
6874 switch (c) {
6875 case 'e':
6876 flags |= ZFS_ONLINE_EXPAND;
6877 break;
6878 case '?':
6879 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6880 optopt);
6881 usage(B_FALSE);
6882 }
6883 }
6884
6885 argc -= optind;
6886 argv += optind;
6887
6888 /* get pool name and check number of arguments */
6889 if (argc < 1) {
6890 (void) fprintf(stderr, gettext("missing pool name\n"));
6891 usage(B_FALSE);
6892 }
6893 if (argc < 2) {
6894 (void) fprintf(stderr, gettext("missing device name\n"));
6895 usage(B_FALSE);
6896 }
6897
6898 poolname = argv[0];
6899
6900 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6901 return (1);
6902
6903 for (i = 1; i < argc; i++) {
6904 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
6905 if (newstate != VDEV_STATE_HEALTHY) {
6906 (void) printf(gettext("warning: device '%s' "
6907 "onlined, but remains in faulted state\n"),
6908 argv[i]);
6909 if (newstate == VDEV_STATE_FAULTED)
6910 (void) printf(gettext("use 'zpool "
6911 "clear' to restore a faulted "
6912 "device\n"));
6913 else
6914 (void) printf(gettext("use 'zpool "
6915 "replace' to replace devices "
6916 "that are no longer present\n"));
6917 }
6918 } else {
6919 ret = 1;
6920 }
6921 }
6922
6923 zpool_close(zhp);
6924
6925 return (ret);
6926 }
6927
6928 /*
6929 * zpool offline [-ft] <pool> <device> ...
6930 *
6931 * -f Force the device into a faulted state.
6932 *
6933 * -t Only take the device off-line temporarily. The offline/faulted
6934 * state will not be persistent across reboots.
6935 */
6936 int
6937 zpool_do_offline(int argc, char **argv)
6938 {
6939 int c, i;
6940 char *poolname;
6941 zpool_handle_t *zhp;
6942 int ret = 0;
6943 boolean_t istmp = B_FALSE;
6944 boolean_t fault = B_FALSE;
6945
6946 /* check options */
6947 while ((c = getopt(argc, argv, "ft")) != -1) {
6948 switch (c) {
6949 case 'f':
6950 fault = B_TRUE;
6951 break;
6952 case 't':
6953 istmp = B_TRUE;
6954 break;
6955 case '?':
6956 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6957 optopt);
6958 usage(B_FALSE);
6959 }
6960 }
6961
6962 argc -= optind;
6963 argv += optind;
6964
6965 /* get pool name and check number of arguments */
6966 if (argc < 1) {
6967 (void) fprintf(stderr, gettext("missing pool name\n"));
6968 usage(B_FALSE);
6969 }
6970 if (argc < 2) {
6971 (void) fprintf(stderr, gettext("missing device name\n"));
6972 usage(B_FALSE);
6973 }
6974
6975 poolname = argv[0];
6976
6977 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6978 return (1);
6979
6980 for (i = 1; i < argc; i++) {
6981 if (fault) {
6982 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
6983 vdev_aux_t aux;
6984 if (istmp == B_FALSE) {
6985 /* Force the fault to persist across imports */
6986 aux = VDEV_AUX_EXTERNAL_PERSIST;
6987 } else {
6988 aux = VDEV_AUX_EXTERNAL;
6989 }
6990
6991 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
6992 ret = 1;
6993 } else {
6994 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
6995 ret = 1;
6996 }
6997 }
6998
6999 zpool_close(zhp);
7000
7001 return (ret);
7002 }
7003
7004 /*
7005 * zpool clear <pool> [device]
7006 *
7007 * Clear all errors associated with a pool or a particular device.
7008 */
7009 int
7010 zpool_do_clear(int argc, char **argv)
7011 {
7012 int c;
7013 int ret = 0;
7014 boolean_t dryrun = B_FALSE;
7015 boolean_t do_rewind = B_FALSE;
7016 boolean_t xtreme_rewind = B_FALSE;
7017 uint32_t rewind_policy = ZPOOL_NO_REWIND;
7018 nvlist_t *policy = NULL;
7019 zpool_handle_t *zhp;
7020 char *pool, *device;
7021
7022 /* check options */
7023 while ((c = getopt(argc, argv, "FnX")) != -1) {
7024 switch (c) {
7025 case 'F':
7026 do_rewind = B_TRUE;
7027 break;
7028 case 'n':
7029 dryrun = B_TRUE;
7030 break;
7031 case 'X':
7032 xtreme_rewind = B_TRUE;
7033 break;
7034 case '?':
7035 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7036 optopt);
7037 usage(B_FALSE);
7038 }
7039 }
7040
7041 argc -= optind;
7042 argv += optind;
7043
7044 if (argc < 1) {
7045 (void) fprintf(stderr, gettext("missing pool name\n"));
7046 usage(B_FALSE);
7047 }
7048
7049 if (argc > 2) {
7050 (void) fprintf(stderr, gettext("too many arguments\n"));
7051 usage(B_FALSE);
7052 }
7053
7054 if ((dryrun || xtreme_rewind) && !do_rewind) {
7055 (void) fprintf(stderr,
7056 gettext("-n or -X only meaningful with -F\n"));
7057 usage(B_FALSE);
7058 }
7059 if (dryrun)
7060 rewind_policy = ZPOOL_TRY_REWIND;
7061 else if (do_rewind)
7062 rewind_policy = ZPOOL_DO_REWIND;
7063 if (xtreme_rewind)
7064 rewind_policy |= ZPOOL_EXTREME_REWIND;
7065
7066 /* In future, further rewind policy choices can be passed along here */
7067 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7068 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7069 rewind_policy) != 0) {
7070 return (1);
7071 }
7072
7073 pool = argv[0];
7074 device = argc == 2 ? argv[1] : NULL;
7075
7076 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7077 nvlist_free(policy);
7078 return (1);
7079 }
7080
7081 if (zpool_clear(zhp, device, policy) != 0)
7082 ret = 1;
7083
7084 zpool_close(zhp);
7085
7086 nvlist_free(policy);
7087
7088 return (ret);
7089 }
7090
7091 /*
7092 * zpool reguid <pool>
7093 */
7094 int
7095 zpool_do_reguid(int argc, char **argv)
7096 {
7097 int c;
7098 char *poolname;
7099 zpool_handle_t *zhp;
7100 int ret = 0;
7101
7102 /* check options */
7103 while ((c = getopt(argc, argv, "")) != -1) {
7104 switch (c) {
7105 case '?':
7106 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7107 optopt);
7108 usage(B_FALSE);
7109 }
7110 }
7111
7112 argc -= optind;
7113 argv += optind;
7114
7115 /* get pool name and check number of arguments */
7116 if (argc < 1) {
7117 (void) fprintf(stderr, gettext("missing pool name\n"));
7118 usage(B_FALSE);
7119 }
7120
7121 if (argc > 1) {
7122 (void) fprintf(stderr, gettext("too many arguments\n"));
7123 usage(B_FALSE);
7124 }
7125
7126 poolname = argv[0];
7127 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7128 return (1);
7129
7130 ret = zpool_reguid(zhp);
7131
7132 zpool_close(zhp);
7133 return (ret);
7134 }
7135
7136
7137 /*
7138 * zpool reopen <pool>
7139 *
7140 * Reopen the pool so that the kernel can update the sizes of all vdevs.
7141 */
7142 int
7143 zpool_do_reopen(int argc, char **argv)
7144 {
7145 int c;
7146 int ret = 0;
7147 boolean_t scrub_restart = B_TRUE;
7148
7149 /* check options */
7150 while ((c = getopt(argc, argv, "n")) != -1) {
7151 switch (c) {
7152 case 'n':
7153 scrub_restart = B_FALSE;
7154 break;
7155 case '?':
7156 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7157 optopt);
7158 usage(B_FALSE);
7159 }
7160 }
7161
7162 argc -= optind;
7163 argv += optind;
7164
7165 /* if argc == 0 we will execute zpool_reopen_one on all pools */
7166 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7167 B_FALSE, zpool_reopen_one, &scrub_restart);
7168
7169 return (ret);
7170 }
7171
7172 typedef struct scrub_cbdata {
7173 int cb_type;
7174 pool_scrub_cmd_t cb_scrub_cmd;
7175 } scrub_cbdata_t;
7176
7177 static boolean_t
7178 zpool_has_checkpoint(zpool_handle_t *zhp)
7179 {
7180 nvlist_t *config, *nvroot;
7181
7182 config = zpool_get_config(zhp, NULL);
7183
7184 if (config != NULL) {
7185 pool_checkpoint_stat_t *pcs = NULL;
7186 uint_t c;
7187
7188 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7189 (void) nvlist_lookup_uint64_array(nvroot,
7190 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7191
7192 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7193 return (B_FALSE);
7194
7195 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7196 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7197 return (B_TRUE);
7198 }
7199
7200 return (B_FALSE);
7201 }
7202
7203 static int
7204 scrub_callback(zpool_handle_t *zhp, void *data)
7205 {
7206 scrub_cbdata_t *cb = data;
7207 int err;
7208
7209 /*
7210 * Ignore faulted pools.
7211 */
7212 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7213 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7214 "currently unavailable\n"), zpool_get_name(zhp));
7215 return (1);
7216 }
7217
7218 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7219
7220 if (err == 0 && zpool_has_checkpoint(zhp) &&
7221 cb->cb_type == POOL_SCAN_SCRUB) {
7222 (void) printf(gettext("warning: will not scrub state that "
7223 "belongs to the checkpoint of pool '%s'\n"),
7224 zpool_get_name(zhp));
7225 }
7226
7227 return (err != 0);
7228 }
7229
7230 static int
7231 wait_callback(zpool_handle_t *zhp, void *data)
7232 {
7233 zpool_wait_activity_t *act = data;
7234 return (zpool_wait(zhp, *act));
7235 }
7236
7237 /*
7238 * zpool scrub [-s | -p] [-w] <pool> ...
7239 *
7240 * -s Stop. Stops any in-progress scrub.
7241 * -p Pause. Pause in-progress scrub.
7242 * -w Wait. Blocks until scrub has completed.
7243 */
7244 int
7245 zpool_do_scrub(int argc, char **argv)
7246 {
7247 int c;
7248 scrub_cbdata_t cb;
7249 boolean_t wait = B_FALSE;
7250 int error;
7251
7252 cb.cb_type = POOL_SCAN_SCRUB;
7253 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7254
7255 /* check options */
7256 while ((c = getopt(argc, argv, "spw")) != -1) {
7257 switch (c) {
7258 case 's':
7259 cb.cb_type = POOL_SCAN_NONE;
7260 break;
7261 case 'p':
7262 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7263 break;
7264 case 'w':
7265 wait = B_TRUE;
7266 break;
7267 case '?':
7268 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7269 optopt);
7270 usage(B_FALSE);
7271 }
7272 }
7273
7274 if (cb.cb_type == POOL_SCAN_NONE &&
7275 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
7276 (void) fprintf(stderr, gettext("invalid option combination: "
7277 "-s and -p are mutually exclusive\n"));
7278 usage(B_FALSE);
7279 }
7280
7281 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7282 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7283 (void) fprintf(stderr, gettext("invalid option combination: "
7284 "-w cannot be used with -p or -s\n"));
7285 usage(B_FALSE);
7286 }
7287
7288 argc -= optind;
7289 argv += optind;
7290
7291 if (argc < 1) {
7292 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7293 usage(B_FALSE);
7294 }
7295
7296 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7297 B_FALSE, scrub_callback, &cb);
7298
7299 if (wait && !error) {
7300 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7301 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7302 B_FALSE, wait_callback, &act);
7303 }
7304
7305 return (error);
7306 }
7307
7308 /*
7309 * zpool resilver <pool> ...
7310 *
7311 * Restarts any in-progress resilver
7312 */
7313 int
7314 zpool_do_resilver(int argc, char **argv)
7315 {
7316 int c;
7317 scrub_cbdata_t cb;
7318
7319 cb.cb_type = POOL_SCAN_RESILVER;
7320 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7321
7322 /* check options */
7323 while ((c = getopt(argc, argv, "")) != -1) {
7324 switch (c) {
7325 case '?':
7326 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7327 optopt);
7328 usage(B_FALSE);
7329 }
7330 }
7331
7332 argc -= optind;
7333 argv += optind;
7334
7335 if (argc < 1) {
7336 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7337 usage(B_FALSE);
7338 }
7339
7340 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7341 B_FALSE, scrub_callback, &cb));
7342 }
7343
7344 /*
7345 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7346 *
7347 * -c Cancel. Ends any in-progress trim.
7348 * -d Secure trim. Requires kernel and device support.
7349 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
7350 * adding a multiplier suffix such as 'k' or 'm'.
7351 * -s Suspend. TRIM can then be restarted with no flags.
7352 * -w Wait. Blocks until trimming has completed.
7353 */
7354 int
7355 zpool_do_trim(int argc, char **argv)
7356 {
7357 struct option long_options[] = {
7358 {"cancel", no_argument, NULL, 'c'},
7359 {"secure", no_argument, NULL, 'd'},
7360 {"rate", required_argument, NULL, 'r'},
7361 {"suspend", no_argument, NULL, 's'},
7362 {"wait", no_argument, NULL, 'w'},
7363 {0, 0, 0, 0}
7364 };
7365
7366 pool_trim_func_t cmd_type = POOL_TRIM_START;
7367 uint64_t rate = 0;
7368 boolean_t secure = B_FALSE;
7369 boolean_t wait = B_FALSE;
7370
7371 int c;
7372 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7373 != -1) {
7374 switch (c) {
7375 case 'c':
7376 if (cmd_type != POOL_TRIM_START &&
7377 cmd_type != POOL_TRIM_CANCEL) {
7378 (void) fprintf(stderr, gettext("-c cannot be "
7379 "combined with other options\n"));
7380 usage(B_FALSE);
7381 }
7382 cmd_type = POOL_TRIM_CANCEL;
7383 break;
7384 case 'd':
7385 if (cmd_type != POOL_TRIM_START) {
7386 (void) fprintf(stderr, gettext("-d cannot be "
7387 "combined with the -c or -s options\n"));
7388 usage(B_FALSE);
7389 }
7390 secure = B_TRUE;
7391 break;
7392 case 'r':
7393 if (cmd_type != POOL_TRIM_START) {
7394 (void) fprintf(stderr, gettext("-r cannot be "
7395 "combined with the -c or -s options\n"));
7396 usage(B_FALSE);
7397 }
7398 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7399 (void) fprintf(stderr, "%s: %s\n",
7400 gettext("invalid value for rate"),
7401 libzfs_error_description(g_zfs));
7402 usage(B_FALSE);
7403 }
7404 break;
7405 case 's':
7406 if (cmd_type != POOL_TRIM_START &&
7407 cmd_type != POOL_TRIM_SUSPEND) {
7408 (void) fprintf(stderr, gettext("-s cannot be "
7409 "combined with other options\n"));
7410 usage(B_FALSE);
7411 }
7412 cmd_type = POOL_TRIM_SUSPEND;
7413 break;
7414 case 'w':
7415 wait = B_TRUE;
7416 break;
7417 case '?':
7418 if (optopt != 0) {
7419 (void) fprintf(stderr,
7420 gettext("invalid option '%c'\n"), optopt);
7421 } else {
7422 (void) fprintf(stderr,
7423 gettext("invalid option '%s'\n"),
7424 argv[optind - 1]);
7425 }
7426 usage(B_FALSE);
7427 }
7428 }
7429
7430 argc -= optind;
7431 argv += optind;
7432
7433 if (argc < 1) {
7434 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7435 usage(B_FALSE);
7436 return (-1);
7437 }
7438
7439 if (wait && (cmd_type != POOL_TRIM_START)) {
7440 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
7441 "-s\n"));
7442 usage(B_FALSE);
7443 }
7444
7445 char *poolname = argv[0];
7446 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7447 if (zhp == NULL)
7448 return (-1);
7449
7450 trimflags_t trim_flags = {
7451 .secure = secure,
7452 .rate = rate,
7453 .wait = wait,
7454 };
7455
7456 nvlist_t *vdevs = fnvlist_alloc();
7457 if (argc == 1) {
7458 /* no individual leaf vdevs specified, so add them all */
7459 nvlist_t *config = zpool_get_config(zhp, NULL);
7460 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7461 ZPOOL_CONFIG_VDEV_TREE);
7462 zpool_collect_leaves(zhp, nvroot, vdevs);
7463 trim_flags.fullpool = B_TRUE;
7464 } else {
7465 trim_flags.fullpool = B_FALSE;
7466 for (int i = 1; i < argc; i++) {
7467 fnvlist_add_boolean(vdevs, argv[i]);
7468 }
7469 }
7470
7471 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7472
7473 fnvlist_free(vdevs);
7474 zpool_close(zhp);
7475
7476 return (error);
7477 }
7478
7479 /*
7480 * Converts a total number of seconds to a human readable string broken
7481 * down in to days/hours/minutes/seconds.
7482 */
7483 static void
7484 secs_to_dhms(uint64_t total, char *buf)
7485 {
7486 uint64_t days = total / 60 / 60 / 24;
7487 uint64_t hours = (total / 60 / 60) % 24;
7488 uint64_t mins = (total / 60) % 60;
7489 uint64_t secs = (total % 60);
7490
7491 if (days > 0) {
7492 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7493 (u_longlong_t)days, (u_longlong_t)hours,
7494 (u_longlong_t)mins, (u_longlong_t)secs);
7495 } else {
7496 (void) sprintf(buf, "%02llu:%02llu:%02llu",
7497 (u_longlong_t)hours, (u_longlong_t)mins,
7498 (u_longlong_t)secs);
7499 }
7500 }
7501
7502 /*
7503 * Print out detailed scrub status.
7504 */
7505 static void
7506 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7507 {
7508 time_t start, end, pause;
7509 uint64_t pass_scanned, scanned, pass_issued, issued, total;
7510 uint64_t elapsed, scan_rate, issue_rate;
7511 double fraction_done;
7512 char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
7513 char srate_buf[7], irate_buf[7], time_buf[32];
7514
7515 printf(" ");
7516 printf_color(ANSI_BOLD, gettext("scan:"));
7517 printf(" ");
7518
7519 /* If there's never been a scan, there's not much to say. */
7520 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7521 ps->pss_func >= POOL_SCAN_FUNCS) {
7522 (void) printf(gettext("none requested\n"));
7523 return;
7524 }
7525
7526 start = ps->pss_start_time;
7527 end = ps->pss_end_time;
7528 pause = ps->pss_pass_scrub_pause;
7529
7530 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7531
7532 assert(ps->pss_func == POOL_SCAN_SCRUB ||
7533 ps->pss_func == POOL_SCAN_RESILVER);
7534
7535 /* Scan is finished or canceled. */
7536 if (ps->pss_state == DSS_FINISHED) {
7537 secs_to_dhms(end - start, time_buf);
7538
7539 if (ps->pss_func == POOL_SCAN_SCRUB) {
7540 (void) printf(gettext("scrub repaired %s "
7541 "in %s with %llu errors on %s"), processed_buf,
7542 time_buf, (u_longlong_t)ps->pss_errors,
7543 ctime(&end));
7544 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7545 (void) printf(gettext("resilvered %s "
7546 "in %s with %llu errors on %s"), processed_buf,
7547 time_buf, (u_longlong_t)ps->pss_errors,
7548 ctime(&end));
7549 }
7550 return;
7551 } else if (ps->pss_state == DSS_CANCELED) {
7552 if (ps->pss_func == POOL_SCAN_SCRUB) {
7553 (void) printf(gettext("scrub canceled on %s"),
7554 ctime(&end));
7555 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7556 (void) printf(gettext("resilver canceled on %s"),
7557 ctime(&end));
7558 }
7559 return;
7560 }
7561
7562 assert(ps->pss_state == DSS_SCANNING);
7563
7564 /* Scan is in progress. Resilvers can't be paused. */
7565 if (ps->pss_func == POOL_SCAN_SCRUB) {
7566 if (pause == 0) {
7567 (void) printf(gettext("scrub in progress since %s"),
7568 ctime(&start));
7569 } else {
7570 (void) printf(gettext("scrub paused since %s"),
7571 ctime(&pause));
7572 (void) printf(gettext("\tscrub started on %s"),
7573 ctime(&start));
7574 }
7575 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7576 (void) printf(gettext("resilver in progress since %s"),
7577 ctime(&start));
7578 }
7579
7580 scanned = ps->pss_examined;
7581 pass_scanned = ps->pss_pass_exam;
7582 issued = ps->pss_issued;
7583 pass_issued = ps->pss_pass_issued;
7584 total = ps->pss_to_examine;
7585
7586 /* we are only done with a block once we have issued the IO for it */
7587 fraction_done = (double)issued / total;
7588
7589 /* elapsed time for this pass, rounding up to 1 if it's 0 */
7590 elapsed = time(NULL) - ps->pss_pass_start;
7591 elapsed -= ps->pss_pass_scrub_spent_paused;
7592 elapsed = (elapsed != 0) ? elapsed : 1;
7593
7594 scan_rate = pass_scanned / elapsed;
7595 issue_rate = pass_issued / elapsed;
7596 uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
7597 ((total - issued) / issue_rate) : UINT64_MAX;
7598 secs_to_dhms(total_secs_left, time_buf);
7599
7600 /* format all of the numbers we will be reporting */
7601 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
7602 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
7603 zfs_nicebytes(total, total_buf, sizeof (total_buf));
7604 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
7605 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
7606
7607 /* do not print estimated time if we have a paused scrub */
7608 if (pause == 0) {
7609 (void) printf(gettext("\t%s scanned at %s/s, "
7610 "%s issued at %s/s, %s total\n"),
7611 scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
7612 } else {
7613 (void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
7614 scanned_buf, issued_buf, total_buf);
7615 }
7616
7617 if (ps->pss_func == POOL_SCAN_RESILVER) {
7618 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7619 processed_buf, 100 * fraction_done);
7620 } else if (ps->pss_func == POOL_SCAN_SCRUB) {
7621 (void) printf(gettext("\t%s repaired, %.2f%% done"),
7622 processed_buf, 100 * fraction_done);
7623 }
7624
7625 if (pause == 0) {
7626 if (total_secs_left != UINT64_MAX &&
7627 issue_rate >= 10 * 1024 * 1024) {
7628 (void) printf(gettext(", %s to go\n"), time_buf);
7629 } else {
7630 (void) printf(gettext(", no estimated "
7631 "completion time\n"));
7632 }
7633 } else {
7634 (void) printf(gettext("\n"));
7635 }
7636 }
7637
7638 static void
7639 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
7640 {
7641 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
7642 return;
7643
7644 printf(" ");
7645 printf_color(ANSI_BOLD, gettext("scan:"));
7646 printf(" ");
7647
7648 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
7649 uint64_t bytes_issued = vrs->vrs_bytes_issued;
7650 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
7651 uint64_t bytes_est = vrs->vrs_bytes_est;
7652 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
7653 (vrs->vrs_pass_time_ms + 1)) * 1000;
7654 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
7655 (vrs->vrs_pass_time_ms + 1)) * 1000;
7656 double scan_pct = MIN((double)bytes_scanned * 100 /
7657 (bytes_est + 1), 100);
7658
7659 /* Format all of the numbers we will be reporting */
7660 char bytes_scanned_buf[7], bytes_issued_buf[7];
7661 char bytes_rebuilt_buf[7], bytes_est_buf[7];
7662 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
7663 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
7664 sizeof (bytes_scanned_buf));
7665 zfs_nicebytes(bytes_issued, bytes_issued_buf,
7666 sizeof (bytes_issued_buf));
7667 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
7668 sizeof (bytes_rebuilt_buf));
7669 zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
7670 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
7671 zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
7672
7673 time_t start = vrs->vrs_start_time;
7674 time_t end = vrs->vrs_end_time;
7675
7676 /* Rebuild is finished or canceled. */
7677 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
7678 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
7679 (void) printf(gettext("resilvered (%s) %s in %s "
7680 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
7681 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
7682 return;
7683 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
7684 (void) printf(gettext("resilver (%s) canceled on %s"),
7685 vdev_name, ctime(&end));
7686 return;
7687 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7688 (void) printf(gettext("resilver (%s) in progress since %s"),
7689 vdev_name, ctime(&start));
7690 }
7691
7692 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
7693
7694 secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
7695 MAX(scan_rate, 1), time_buf);
7696
7697 (void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
7698 "%s total\n"), bytes_scanned_buf, scan_rate_buf,
7699 bytes_issued_buf, issue_rate_buf, bytes_est_buf);
7700 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7701 bytes_rebuilt_buf, scan_pct);
7702
7703 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7704 if (scan_rate >= 10 * 1024 * 1024) {
7705 (void) printf(gettext(", %s to go\n"), time_buf);
7706 } else {
7707 (void) printf(gettext(", no estimated "
7708 "completion time\n"));
7709 }
7710 } else {
7711 (void) printf(gettext("\n"));
7712 }
7713 }
7714
7715 /*
7716 * Print rebuild status for top-level vdevs.
7717 */
7718 static void
7719 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7720 {
7721 nvlist_t **child;
7722 uint_t children;
7723
7724 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7725 &child, &children) != 0)
7726 children = 0;
7727
7728 for (uint_t c = 0; c < children; c++) {
7729 vdev_rebuild_stat_t *vrs;
7730 uint_t i;
7731
7732 if (nvlist_lookup_uint64_array(child[c],
7733 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7734 char *name = zpool_vdev_name(g_zfs, zhp,
7735 child[c], VDEV_NAME_TYPE_ID);
7736 print_rebuild_status_impl(vrs, name);
7737 free(name);
7738 }
7739 }
7740 }
7741
7742 /*
7743 * As we don't scrub checkpointed blocks, we want to warn the user that we
7744 * skipped scanning some blocks if a checkpoint exists or existed at any
7745 * time during the scan. If a sequential instead of healing reconstruction
7746 * was performed then the blocks were reconstructed. However, their checksums
7747 * have not been verified so we still print the warning.
7748 */
7749 static void
7750 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
7751 {
7752 if (ps == NULL || pcs == NULL)
7753 return;
7754
7755 if (pcs->pcs_state == CS_NONE ||
7756 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
7757 return;
7758
7759 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
7760
7761 if (ps->pss_state == DSS_NONE)
7762 return;
7763
7764 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
7765 ps->pss_end_time < pcs->pcs_start_time)
7766 return;
7767
7768 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
7769 (void) printf(gettext(" scan warning: skipped blocks "
7770 "that are only referenced by the checkpoint.\n"));
7771 } else {
7772 assert(ps->pss_state == DSS_SCANNING);
7773 (void) printf(gettext(" scan warning: skipping blocks "
7774 "that are only referenced by the checkpoint.\n"));
7775 }
7776 }
7777
7778 /*
7779 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
7780 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
7781 * the last completed (or cancelled) rebuild.
7782 */
7783 static boolean_t
7784 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
7785 {
7786 nvlist_t **child;
7787 uint_t children;
7788 boolean_t rebuilding = B_FALSE;
7789 uint64_t end_time = 0;
7790
7791 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7792 &child, &children) != 0)
7793 children = 0;
7794
7795 for (uint_t c = 0; c < children; c++) {
7796 vdev_rebuild_stat_t *vrs;
7797 uint_t i;
7798
7799 if (nvlist_lookup_uint64_array(child[c],
7800 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7801
7802 if (vrs->vrs_end_time > end_time)
7803 end_time = vrs->vrs_end_time;
7804
7805 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7806 rebuilding = B_TRUE;
7807 end_time = 0;
7808 break;
7809 }
7810 }
7811 }
7812
7813 if (rebuild_end_time != NULL)
7814 *rebuild_end_time = end_time;
7815
7816 return (rebuilding);
7817 }
7818
7819 /*
7820 * Print the scan status.
7821 */
7822 static void
7823 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7824 {
7825 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
7826 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
7827 boolean_t active_resilver = B_FALSE;
7828 pool_checkpoint_stat_t *pcs = NULL;
7829 pool_scan_stat_t *ps = NULL;
7830 uint_t c;
7831
7832 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
7833 (uint64_t **)&ps, &c) == 0) {
7834 if (ps->pss_func == POOL_SCAN_RESILVER) {
7835 resilver_end_time = ps->pss_end_time;
7836 active_resilver = (ps->pss_state == DSS_SCANNING);
7837 }
7838
7839 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
7840 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
7841 }
7842
7843 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
7844 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
7845
7846 /* Always print the scrub status when available. */
7847 if (have_scrub)
7848 print_scan_scrub_resilver_status(ps);
7849
7850 /*
7851 * When there is an active resilver or rebuild print its status.
7852 * Otherwise print the status of the last resilver or rebuild.
7853 */
7854 if (active_resilver || (!active_rebuild && have_resilver &&
7855 resilver_end_time && resilver_end_time > rebuild_end_time)) {
7856 print_scan_scrub_resilver_status(ps);
7857 } else if (active_rebuild || (!active_resilver && have_rebuild &&
7858 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
7859 print_rebuild_status(zhp, nvroot);
7860 }
7861
7862 (void) nvlist_lookup_uint64_array(nvroot,
7863 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7864 print_checkpoint_scan_warning(ps, pcs);
7865 }
7866
7867 /*
7868 * Print out detailed removal status.
7869 */
7870 static void
7871 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
7872 {
7873 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
7874 time_t start, end;
7875 nvlist_t *config, *nvroot;
7876 nvlist_t **child;
7877 uint_t children;
7878 char *vdev_name;
7879
7880 if (prs == NULL || prs->prs_state == DSS_NONE)
7881 return;
7882
7883 /*
7884 * Determine name of vdev.
7885 */
7886 config = zpool_get_config(zhp, NULL);
7887 nvroot = fnvlist_lookup_nvlist(config,
7888 ZPOOL_CONFIG_VDEV_TREE);
7889 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7890 &child, &children) == 0);
7891 assert(prs->prs_removing_vdev < children);
7892 vdev_name = zpool_vdev_name(g_zfs, zhp,
7893 child[prs->prs_removing_vdev], B_TRUE);
7894
7895 printf_color(ANSI_BOLD, gettext("remove: "));
7896
7897 start = prs->prs_start_time;
7898 end = prs->prs_end_time;
7899 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
7900
7901 /*
7902 * Removal is finished or canceled.
7903 */
7904 if (prs->prs_state == DSS_FINISHED) {
7905 uint64_t minutes_taken = (end - start) / 60;
7906
7907 (void) printf(gettext("Removal of vdev %llu copied %s "
7908 "in %lluh%um, completed on %s"),
7909 (longlong_t)prs->prs_removing_vdev,
7910 copied_buf,
7911 (u_longlong_t)(minutes_taken / 60),
7912 (uint_t)(minutes_taken % 60),
7913 ctime((time_t *)&end));
7914 } else if (prs->prs_state == DSS_CANCELED) {
7915 (void) printf(gettext("Removal of %s canceled on %s"),
7916 vdev_name, ctime(&end));
7917 } else {
7918 uint64_t copied, total, elapsed, mins_left, hours_left;
7919 double fraction_done;
7920 uint_t rate;
7921
7922 assert(prs->prs_state == DSS_SCANNING);
7923
7924 /*
7925 * Removal is in progress.
7926 */
7927 (void) printf(gettext(
7928 "Evacuation of %s in progress since %s"),
7929 vdev_name, ctime(&start));
7930
7931 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
7932 total = prs->prs_to_copy;
7933 fraction_done = (double)copied / total;
7934
7935 /* elapsed time for this pass */
7936 elapsed = time(NULL) - prs->prs_start_time;
7937 elapsed = elapsed > 0 ? elapsed : 1;
7938 rate = copied / elapsed;
7939 rate = rate > 0 ? rate : 1;
7940 mins_left = ((total - copied) / rate) / 60;
7941 hours_left = mins_left / 60;
7942
7943 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
7944 zfs_nicenum(total, total_buf, sizeof (total_buf));
7945 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
7946
7947 /*
7948 * do not print estimated time if hours_left is more than
7949 * 30 days
7950 */
7951 (void) printf(gettext(
7952 "\t%s copied out of %s at %s/s, %.2f%% done"),
7953 examined_buf, total_buf, rate_buf, 100 * fraction_done);
7954 if (hours_left < (30 * 24)) {
7955 (void) printf(gettext(", %lluh%um to go\n"),
7956 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
7957 } else {
7958 (void) printf(gettext(
7959 ", (copy is slow, no estimated time)\n"));
7960 }
7961 }
7962 free(vdev_name);
7963
7964 if (prs->prs_mapping_memory > 0) {
7965 char mem_buf[7];
7966 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
7967 (void) printf(gettext(
7968 "\t%s memory used for removed device mappings\n"),
7969 mem_buf);
7970 }
7971 }
7972
7973 static void
7974 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
7975 {
7976 time_t start;
7977 char space_buf[7];
7978
7979 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7980 return;
7981
7982 (void) printf(gettext("checkpoint: "));
7983
7984 start = pcs->pcs_start_time;
7985 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
7986
7987 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
7988 char *date = ctime(&start);
7989
7990 /*
7991 * ctime() adds a newline at the end of the generated
7992 * string, thus the weird format specifier and the
7993 * strlen() call used to chop it off from the output.
7994 */
7995 (void) printf(gettext("created %.*s, consumes %s\n"),
7996 (int)(strlen(date) - 1), date, space_buf);
7997 return;
7998 }
7999
8000 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8001
8002 (void) printf(gettext("discarding, %s remaining.\n"),
8003 space_buf);
8004 }
8005
8006 static void
8007 print_error_log(zpool_handle_t *zhp)
8008 {
8009 nvlist_t *nverrlist = NULL;
8010 nvpair_t *elem;
8011 char *pathname;
8012 size_t len = MAXPATHLEN * 2;
8013
8014 if (zpool_get_errlog(zhp, &nverrlist) != 0)
8015 return;
8016
8017 (void) printf("errors: Permanent errors have been "
8018 "detected in the following files:\n\n");
8019
8020 pathname = safe_malloc(len);
8021 elem = NULL;
8022 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8023 nvlist_t *nv;
8024 uint64_t dsobj, obj;
8025
8026 verify(nvpair_value_nvlist(elem, &nv) == 0);
8027 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8028 &dsobj) == 0);
8029 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8030 &obj) == 0);
8031 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8032 (void) printf("%7s %s\n", "", pathname);
8033 }
8034 free(pathname);
8035 nvlist_free(nverrlist);
8036 }
8037
8038 static void
8039 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8040 uint_t nspares)
8041 {
8042 uint_t i;
8043 char *name;
8044
8045 if (nspares == 0)
8046 return;
8047
8048 (void) printf(gettext("\tspares\n"));
8049
8050 for (i = 0; i < nspares; i++) {
8051 name = zpool_vdev_name(g_zfs, zhp, spares[i],
8052 cb->cb_name_flags);
8053 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8054 free(name);
8055 }
8056 }
8057
8058 static void
8059 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8060 uint_t nl2cache)
8061 {
8062 uint_t i;
8063 char *name;
8064
8065 if (nl2cache == 0)
8066 return;
8067
8068 (void) printf(gettext("\tcache\n"));
8069
8070 for (i = 0; i < nl2cache; i++) {
8071 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8072 cb->cb_name_flags);
8073 print_status_config(zhp, cb, name, l2cache[i], 2,
8074 B_FALSE, NULL);
8075 free(name);
8076 }
8077 }
8078
8079 static void
8080 print_dedup_stats(nvlist_t *config)
8081 {
8082 ddt_histogram_t *ddh;
8083 ddt_stat_t *dds;
8084 ddt_object_t *ddo;
8085 uint_t c;
8086 char dspace[6], mspace[6];
8087
8088 /*
8089 * If the pool was faulted then we may not have been able to
8090 * obtain the config. Otherwise, if we have anything in the dedup
8091 * table continue processing the stats.
8092 */
8093 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8094 (uint64_t **)&ddo, &c) != 0)
8095 return;
8096
8097 (void) printf("\n");
8098 (void) printf(gettext(" dedup: "));
8099 if (ddo->ddo_count == 0) {
8100 (void) printf(gettext("no DDT entries\n"));
8101 return;
8102 }
8103
8104 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8105 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8106 (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8107 (u_longlong_t)ddo->ddo_count,
8108 dspace,
8109 mspace);
8110
8111 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8112 (uint64_t **)&dds, &c) == 0);
8113 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8114 (uint64_t **)&ddh, &c) == 0);
8115 zpool_dump_ddt(dds, ddh);
8116 }
8117
8118 /*
8119 * Display a summary of pool status. Displays a summary such as:
8120 *
8121 * pool: tank
8122 * status: DEGRADED
8123 * reason: One or more devices ...
8124 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8125 * config:
8126 * mirror DEGRADED
8127 * c1t0d0 OK
8128 * c2t0d0 UNAVAIL
8129 *
8130 * When given the '-v' option, we print out the complete config. If the '-e'
8131 * option is specified, then we print out error rate information as well.
8132 */
8133 static int
8134 status_callback(zpool_handle_t *zhp, void *data)
8135 {
8136 status_cbdata_t *cbp = data;
8137 nvlist_t *config, *nvroot;
8138 char *msgid;
8139 zpool_status_t reason;
8140 zpool_errata_t errata;
8141 const char *health;
8142 uint_t c;
8143 vdev_stat_t *vs;
8144
8145 config = zpool_get_config(zhp, NULL);
8146 reason = zpool_get_status(zhp, &msgid, &errata);
8147
8148 cbp->cb_count++;
8149
8150 /*
8151 * If we were given 'zpool status -x', only report those pools with
8152 * problems.
8153 */
8154 if (cbp->cb_explain &&
8155 (reason == ZPOOL_STATUS_OK ||
8156 reason == ZPOOL_STATUS_VERSION_OLDER ||
8157 reason == ZPOOL_STATUS_FEAT_DISABLED ||
8158 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8159 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8160 if (!cbp->cb_allpools) {
8161 (void) printf(gettext("pool '%s' is healthy\n"),
8162 zpool_get_name(zhp));
8163 if (cbp->cb_first)
8164 cbp->cb_first = B_FALSE;
8165 }
8166 return (0);
8167 }
8168
8169 if (cbp->cb_first)
8170 cbp->cb_first = B_FALSE;
8171 else
8172 (void) printf("\n");
8173
8174 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8175 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8176 (uint64_t **)&vs, &c) == 0);
8177
8178 health = zpool_get_state_str(zhp);
8179
8180 printf(" ");
8181 printf_color(ANSI_BOLD, gettext("pool:"));
8182 printf(" %s\n", zpool_get_name(zhp));
8183 printf(" ");
8184 printf_color(ANSI_BOLD, gettext("state: "));
8185
8186 printf_color(health_str_to_color(health), "%s", health);
8187
8188 printf("\n");
8189
8190 switch (reason) {
8191 case ZPOOL_STATUS_MISSING_DEV_R:
8192 printf_color(ANSI_BOLD, gettext("status: "));
8193 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8194 "not be opened. Sufficient replicas exist for\n\tthe pool "
8195 "to continue functioning in a degraded state.\n"));
8196 printf_color(ANSI_BOLD, gettext("action: "));
8197 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8198 "and online it using 'zpool online'.\n"));
8199 break;
8200
8201 case ZPOOL_STATUS_MISSING_DEV_NR:
8202 printf_color(ANSI_BOLD, gettext("status: "));
8203 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8204 "not be opened. There are insufficient\n\treplicas for the"
8205 " pool to continue functioning.\n"));
8206 printf_color(ANSI_BOLD, gettext("action: "));
8207 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8208 "and online it using 'zpool online'.\n"));
8209 break;
8210
8211 case ZPOOL_STATUS_CORRUPT_LABEL_R:
8212 printf_color(ANSI_BOLD, gettext("status: "));
8213 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8214 "not be used because the label is missing or\n\tinvalid. "
8215 "Sufficient replicas exist for the pool to continue\n\t"
8216 "functioning in a degraded state.\n"));
8217 printf_color(ANSI_BOLD, gettext("action: "));
8218 printf_color(ANSI_YELLOW, gettext("Replace the device using "
8219 "'zpool replace'.\n"));
8220 break;
8221
8222 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8223 printf_color(ANSI_BOLD, gettext("status: "));
8224 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8225 "not be used because the label is missing \n\tor invalid. "
8226 "There are insufficient replicas for the pool to "
8227 "continue\n\tfunctioning.\n"));
8228 zpool_explain_recover(zpool_get_handle(zhp),
8229 zpool_get_name(zhp), reason, config);
8230 break;
8231
8232 case ZPOOL_STATUS_FAILING_DEV:
8233 printf_color(ANSI_BOLD, gettext("status: "));
8234 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8235 "experienced an unrecoverable error. An\n\tattempt was "
8236 "made to correct the error. Applications are "
8237 "unaffected.\n"));
8238 printf_color(ANSI_BOLD, gettext("action: "));
8239 printf_color(ANSI_YELLOW, gettext("Determine if the "
8240 "device needs to be replaced, and clear the errors\n\tusing"
8241 " 'zpool clear' or replace the device with 'zpool "
8242 "replace'.\n"));
8243 break;
8244
8245 case ZPOOL_STATUS_OFFLINE_DEV:
8246 printf_color(ANSI_BOLD, gettext("status: "));
8247 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8248 "been taken offline by the administrator.\n\tSufficient "
8249 "replicas exist for the pool to continue functioning in "
8250 "a\n\tdegraded state.\n"));
8251 printf_color(ANSI_BOLD, gettext("action: "));
8252 printf_color(ANSI_YELLOW, gettext("Online the device "
8253 "using 'zpool online' or replace the device with\n\t'zpool "
8254 "replace'.\n"));
8255 break;
8256
8257 case ZPOOL_STATUS_REMOVED_DEV:
8258 printf_color(ANSI_BOLD, gettext("status: "));
8259 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8260 "been removed by the administrator.\n\tSufficient "
8261 "replicas exist for the pool to continue functioning in "
8262 "a\n\tdegraded state.\n"));
8263 printf_color(ANSI_BOLD, gettext("action: "));
8264 printf_color(ANSI_YELLOW, gettext("Online the device "
8265 "using zpool online' or replace the device with\n\t'zpool "
8266 "replace'.\n"));
8267 break;
8268
8269 case ZPOOL_STATUS_RESILVERING:
8270 case ZPOOL_STATUS_REBUILDING:
8271 printf_color(ANSI_BOLD, gettext("status: "));
8272 printf_color(ANSI_YELLOW, gettext("One or more devices is "
8273 "currently being resilvered. The pool will\n\tcontinue "
8274 "to function, possibly in a degraded state.\n"));
8275 printf_color(ANSI_BOLD, gettext("action: "));
8276 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8277 "complete.\n"));
8278 break;
8279
8280 case ZPOOL_STATUS_REBUILD_SCRUB:
8281 printf_color(ANSI_BOLD, gettext("status: "));
8282 printf_color(ANSI_YELLOW, gettext("One or more devices have "
8283 "been sequentially resilvered, scrubbing\n\tthe pool "
8284 "is recommended.\n"));
8285 printf_color(ANSI_BOLD, gettext("action: "));
8286 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8287 "verify all data checksums.\n"));
8288 break;
8289
8290 case ZPOOL_STATUS_CORRUPT_DATA:
8291 printf_color(ANSI_BOLD, gettext("status: "));
8292 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8293 "experienced an error resulting in data\n\tcorruption. "
8294 "Applications may be affected.\n"));
8295 printf_color(ANSI_BOLD, gettext("action: "));
8296 printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8297 " if possible. Otherwise restore the\n\tentire pool from "
8298 "backup.\n"));
8299 break;
8300
8301 case ZPOOL_STATUS_CORRUPT_POOL:
8302 printf_color(ANSI_BOLD, gettext("status: "));
8303 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8304 "corrupted and the pool cannot be opened.\n"));
8305 zpool_explain_recover(zpool_get_handle(zhp),
8306 zpool_get_name(zhp), reason, config);
8307 break;
8308
8309 case ZPOOL_STATUS_VERSION_OLDER:
8310 printf_color(ANSI_BOLD, gettext("status: "));
8311 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8312 "a legacy on-disk format. The pool can\n\tstill be used, "
8313 "but some features are unavailable.\n"));
8314 printf_color(ANSI_BOLD, gettext("action: "));
8315 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8316 "'zpool upgrade'. Once this is done, the\n\tpool will no "
8317 "longer be accessible on software that does not support\n\t"
8318 "feature flags.\n"));
8319 break;
8320
8321 case ZPOOL_STATUS_VERSION_NEWER:
8322 printf_color(ANSI_BOLD, gettext("status: "));
8323 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8324 "to a newer, incompatible on-disk version.\n\tThe pool "
8325 "cannot be accessed on this system.\n"));
8326 printf_color(ANSI_BOLD, gettext("action: "));
8327 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8328 "system running more recent software, or\n\trestore the "
8329 "pool from backup.\n"));
8330 break;
8331
8332 case ZPOOL_STATUS_FEAT_DISABLED:
8333 printf_color(ANSI_BOLD, gettext("status: "));
8334 printf_color(ANSI_YELLOW, gettext("Some supported and "
8335 "requested features are not enabled on the pool.\n\t"
8336 "The pool can still be used, but some features are "
8337 "unavailable.\n"));
8338 printf_color(ANSI_BOLD, gettext("action: "));
8339 printf_color(ANSI_YELLOW, gettext("Enable all features using "
8340 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8341 "longer be accessible by software that does not support\n\t"
8342 "the features. See zpool-features(7) for details.\n"));
8343 break;
8344
8345 case ZPOOL_STATUS_COMPATIBILITY_ERR:
8346 printf_color(ANSI_BOLD, gettext("status: "));
8347 printf_color(ANSI_YELLOW, gettext("This pool has a "
8348 "compatibility list specified, but it could not be\n\t"
8349 "read/parsed at this time. The pool can still be used, "
8350 "but this\n\tshould be investigated.\n"));
8351 printf_color(ANSI_BOLD, gettext("action: "));
8352 printf_color(ANSI_YELLOW, gettext("Check the value of the "
8353 "'compatibility' property against the\n\t"
8354 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8355 ZPOOL_DATA_COMPAT_D ".\n"));
8356 break;
8357
8358 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8359 printf_color(ANSI_BOLD, gettext("status: "));
8360 printf_color(ANSI_YELLOW, gettext("One or more features "
8361 "are enabled on the pool despite not being\n\t"
8362 "requested by the 'compatibility' property.\n"));
8363 printf_color(ANSI_BOLD, gettext("action: "));
8364 printf_color(ANSI_YELLOW, gettext("Consider setting "
8365 "'compatibility' to an appropriate value, or\n\t"
8366 "adding needed features to the relevant file in\n\t"
8367 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8368 break;
8369
8370 case ZPOOL_STATUS_UNSUP_FEAT_READ:
8371 printf_color(ANSI_BOLD, gettext("status: "));
8372 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8373 "on this system because it uses the\n\tfollowing feature(s)"
8374 " not supported on this system:\n"));
8375 zpool_print_unsup_feat(config);
8376 (void) printf("\n");
8377 printf_color(ANSI_BOLD, gettext("action: "));
8378 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8379 "system that supports the required feature(s),\n\tor "
8380 "restore the pool from backup.\n"));
8381 break;
8382
8383 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8384 printf_color(ANSI_BOLD, gettext("status: "));
8385 printf_color(ANSI_YELLOW, gettext("The pool can only be "
8386 "accessed in read-only mode on this system. It\n\tcannot be"
8387 " accessed in read-write mode because it uses the "
8388 "following\n\tfeature(s) not supported on this system:\n"));
8389 zpool_print_unsup_feat(config);
8390 (void) printf("\n");
8391 printf_color(ANSI_BOLD, gettext("action: "));
8392 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8393 "in read-write mode. Import the pool with\n"
8394 "\t\"-o readonly=on\", access the pool from a system that "
8395 "supports the\n\trequired feature(s), or restore the "
8396 "pool from backup.\n"));
8397 break;
8398
8399 case ZPOOL_STATUS_FAULTED_DEV_R:
8400 printf_color(ANSI_BOLD, gettext("status: "));
8401 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8402 "faulted in response to persistent errors.\n\tSufficient "
8403 "replicas exist for the pool to continue functioning "
8404 "in a\n\tdegraded state.\n"));
8405 printf_color(ANSI_BOLD, gettext("action: "));
8406 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8407 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8408 break;
8409
8410 case ZPOOL_STATUS_FAULTED_DEV_NR:
8411 printf_color(ANSI_BOLD, gettext("status: "));
8412 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8413 "faulted in response to persistent errors. There are "
8414 "insufficient replicas for the pool to\n\tcontinue "
8415 "functioning.\n"));
8416 printf_color(ANSI_BOLD, gettext("action: "));
8417 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
8418 "pool from a backup source. Manually marking the device\n"
8419 "\trepaired using 'zpool clear' may allow some data "
8420 "to be recovered.\n"));
8421 break;
8422
8423 case ZPOOL_STATUS_IO_FAILURE_MMP:
8424 printf_color(ANSI_BOLD, gettext("status: "));
8425 printf_color(ANSI_YELLOW, gettext("The pool is suspended "
8426 "because multihost writes failed or were delayed;\n\t"
8427 "another system could import the pool undetected.\n"));
8428 printf_color(ANSI_BOLD, gettext("action: "));
8429 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
8430 " are connected, then reboot your system and\n\timport the "
8431 "pool.\n"));
8432 break;
8433
8434 case ZPOOL_STATUS_IO_FAILURE_WAIT:
8435 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
8436 printf_color(ANSI_BOLD, gettext("status: "));
8437 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8438 "faulted in response to IO failures.\n"));
8439 printf_color(ANSI_BOLD, gettext("action: "));
8440 printf_color(ANSI_YELLOW, gettext("Make sure the affected "
8441 "devices are connected, then run 'zpool clear'.\n"));
8442 break;
8443
8444 case ZPOOL_STATUS_BAD_LOG:
8445 printf_color(ANSI_BOLD, gettext("status: "));
8446 printf_color(ANSI_YELLOW, gettext("An intent log record "
8447 "could not be read.\n"
8448 "\tWaiting for administrator intervention to fix the "
8449 "faulted pool.\n"));
8450 printf_color(ANSI_BOLD, gettext("action: "));
8451 printf_color(ANSI_YELLOW, gettext("Either restore the affected "
8452 "device(s) and run 'zpool online',\n"
8453 "\tor ignore the intent log records by running "
8454 "'zpool clear'.\n"));
8455 break;
8456
8457 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
8458 (void) printf(gettext("status: One or more devices are "
8459 "configured to use a non-native block size.\n"
8460 "\tExpect reduced performance.\n"));
8461 (void) printf(gettext("action: Replace affected devices with "
8462 "devices that support the\n\tconfigured block size, or "
8463 "migrate data to a properly configured\n\tpool.\n"));
8464 break;
8465
8466 case ZPOOL_STATUS_HOSTID_MISMATCH:
8467 printf_color(ANSI_BOLD, gettext("status: "));
8468 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
8469 " and system hostid on imported pool.\n\tThis pool was "
8470 "previously imported into a system with a different "
8471 "hostid,\n\tand then was verbatim imported into this "
8472 "system.\n"));
8473 printf_color(ANSI_BOLD, gettext("action: "));
8474 printf_color(ANSI_YELLOW, gettext("Export this pool on all "
8475 "systems on which it is imported.\n"
8476 "\tThen import it to correct the mismatch.\n"));
8477 break;
8478
8479 case ZPOOL_STATUS_ERRATA:
8480 printf_color(ANSI_BOLD, gettext("status: "));
8481 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
8482 errata);
8483
8484 switch (errata) {
8485 case ZPOOL_ERRATA_NONE:
8486 break;
8487
8488 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
8489 printf_color(ANSI_BOLD, gettext("action: "));
8490 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8491 " run 'zpool scrub'.\n"));
8492 break;
8493
8494 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
8495 (void) printf(gettext("\tExisting encrypted datasets "
8496 "contain an on-disk incompatibility\n\twhich "
8497 "needs to be corrected.\n"));
8498 printf_color(ANSI_BOLD, gettext("action: "));
8499 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8500 " backup existing encrypted datasets to new\n\t"
8501 "encrypted datasets and destroy the old ones. "
8502 "'zfs mount -o ro' can\n\tbe used to temporarily "
8503 "mount existing encrypted datasets readonly.\n"));
8504 break;
8505
8506 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
8507 (void) printf(gettext("\tExisting encrypted snapshots "
8508 "and bookmarks contain an on-disk\n\tincompat"
8509 "ibility. This may cause on-disk corruption if "
8510 "they are used\n\twith 'zfs recv'.\n"));
8511 printf_color(ANSI_BOLD, gettext("action: "));
8512 printf_color(ANSI_YELLOW, gettext("To correct the"
8513 "issue, enable the bookmark_v2 feature. No "
8514 "additional\n\taction is needed if there are no "
8515 "encrypted snapshots or bookmarks.\n\tIf preserving"
8516 "the encrypted snapshots and bookmarks is required,"
8517 " use\n\ta non-raw send to backup and restore them."
8518 " Alternately, they may be\n\tremoved to resolve "
8519 "the incompatibility.\n"));
8520 break;
8521
8522 default:
8523 /*
8524 * All errata which allow the pool to be imported
8525 * must contain an action message.
8526 */
8527 assert(0);
8528 }
8529 break;
8530
8531 default:
8532 /*
8533 * The remaining errors can't actually be generated, yet.
8534 */
8535 assert(reason == ZPOOL_STATUS_OK);
8536 }
8537
8538 if (msgid != NULL) {
8539 printf(" ");
8540 printf_color(ANSI_BOLD, gettext("see:"));
8541 printf(gettext(
8542 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
8543 msgid);
8544 }
8545
8546 if (config != NULL) {
8547 uint64_t nerr;
8548 nvlist_t **spares, **l2cache;
8549 uint_t nspares, nl2cache;
8550 pool_checkpoint_stat_t *pcs = NULL;
8551 pool_removal_stat_t *prs = NULL;
8552
8553 print_scan_status(zhp, nvroot);
8554
8555 (void) nvlist_lookup_uint64_array(nvroot,
8556 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
8557 print_removal_status(zhp, prs);
8558
8559 (void) nvlist_lookup_uint64_array(nvroot,
8560 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8561 print_checkpoint_status(pcs);
8562
8563 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
8564 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
8565 if (cbp->cb_namewidth < 10)
8566 cbp->cb_namewidth = 10;
8567
8568 color_start(ANSI_BOLD);
8569 (void) printf(gettext("config:\n\n"));
8570 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
8571 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
8572 "CKSUM");
8573 color_end();
8574
8575 if (cbp->cb_print_slow_ios) {
8576 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
8577 }
8578
8579 if (cbp->vcdl != NULL)
8580 print_cmd_columns(cbp->vcdl, 0);
8581
8582 printf("\n");
8583
8584 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
8585 B_FALSE, NULL);
8586
8587 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
8588 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
8589 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
8590
8591 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
8592 &l2cache, &nl2cache) == 0)
8593 print_l2cache(zhp, cbp, l2cache, nl2cache);
8594
8595 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
8596 &spares, &nspares) == 0)
8597 print_spares(zhp, cbp, spares, nspares);
8598
8599 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
8600 &nerr) == 0) {
8601 nvlist_t *nverrlist = NULL;
8602
8603 /*
8604 * If the approximate error count is small, get a
8605 * precise count by fetching the entire log and
8606 * uniquifying the results.
8607 */
8608 if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
8609 zpool_get_errlog(zhp, &nverrlist) == 0) {
8610 nvpair_t *elem;
8611
8612 elem = NULL;
8613 nerr = 0;
8614 while ((elem = nvlist_next_nvpair(nverrlist,
8615 elem)) != NULL) {
8616 nerr++;
8617 }
8618 }
8619 nvlist_free(nverrlist);
8620
8621 (void) printf("\n");
8622
8623 if (nerr == 0)
8624 (void) printf(gettext("errors: No known data "
8625 "errors\n"));
8626 else if (!cbp->cb_verbose)
8627 (void) printf(gettext("errors: %llu data "
8628 "errors, use '-v' for a list\n"),
8629 (u_longlong_t)nerr);
8630 else
8631 print_error_log(zhp);
8632 }
8633
8634 if (cbp->cb_dedup_stats)
8635 print_dedup_stats(config);
8636 } else {
8637 (void) printf(gettext("config: The configuration cannot be "
8638 "determined.\n"));
8639 }
8640
8641 return (0);
8642 }
8643
8644 /*
8645 * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
8646 * [interval [count]]
8647 *
8648 * -c CMD For each vdev, run command CMD
8649 * -i Display vdev initialization status.
8650 * -g Display guid for individual vdev name.
8651 * -L Follow links when resolving vdev path name.
8652 * -p Display values in parsable (exact) format.
8653 * -P Display full path for vdev name.
8654 * -s Display slow IOs column.
8655 * -v Display complete error logs
8656 * -x Display only pools with potential problems
8657 * -D Display dedup status (undocumented)
8658 * -t Display vdev TRIM status.
8659 * -T Display a timestamp in date(1) or Unix format
8660 *
8661 * Describes the health status of all pools or some subset.
8662 */
8663 int
8664 zpool_do_status(int argc, char **argv)
8665 {
8666 int c;
8667 int ret;
8668 float interval = 0;
8669 unsigned long count = 0;
8670 status_cbdata_t cb = { 0 };
8671 char *cmd = NULL;
8672
8673 /* check options */
8674 while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
8675 switch (c) {
8676 case 'c':
8677 if (cmd != NULL) {
8678 fprintf(stderr,
8679 gettext("Can't set -c flag twice\n"));
8680 exit(1);
8681 }
8682
8683 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
8684 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
8685 fprintf(stderr, gettext(
8686 "Can't run -c, disabled by "
8687 "ZPOOL_SCRIPTS_ENABLED.\n"));
8688 exit(1);
8689 }
8690
8691 if ((getuid() <= 0 || geteuid() <= 0) &&
8692 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
8693 fprintf(stderr, gettext(
8694 "Can't run -c with root privileges "
8695 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
8696 exit(1);
8697 }
8698 cmd = optarg;
8699 break;
8700 case 'i':
8701 cb.cb_print_vdev_init = B_TRUE;
8702 break;
8703 case 'g':
8704 cb.cb_name_flags |= VDEV_NAME_GUID;
8705 break;
8706 case 'L':
8707 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
8708 break;
8709 case 'p':
8710 cb.cb_literal = B_TRUE;
8711 break;
8712 case 'P':
8713 cb.cb_name_flags |= VDEV_NAME_PATH;
8714 break;
8715 case 's':
8716 cb.cb_print_slow_ios = B_TRUE;
8717 break;
8718 case 'v':
8719 cb.cb_verbose = B_TRUE;
8720 break;
8721 case 'x':
8722 cb.cb_explain = B_TRUE;
8723 break;
8724 case 'D':
8725 cb.cb_dedup_stats = B_TRUE;
8726 break;
8727 case 't':
8728 cb.cb_print_vdev_trim = B_TRUE;
8729 break;
8730 case 'T':
8731 get_timestamp_arg(*optarg);
8732 break;
8733 case '?':
8734 if (optopt == 'c') {
8735 print_zpool_script_list("status");
8736 exit(0);
8737 } else {
8738 fprintf(stderr,
8739 gettext("invalid option '%c'\n"), optopt);
8740 }
8741 usage(B_FALSE);
8742 }
8743 }
8744
8745 argc -= optind;
8746 argv += optind;
8747
8748 get_interval_count(&argc, argv, &interval, &count);
8749
8750 if (argc == 0)
8751 cb.cb_allpools = B_TRUE;
8752
8753 cb.cb_first = B_TRUE;
8754 cb.cb_print_status = B_TRUE;
8755
8756 for (;;) {
8757 if (timestamp_fmt != NODATE)
8758 print_timestamp(timestamp_fmt);
8759
8760 if (cmd != NULL)
8761 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
8762 NULL, NULL, 0, 0);
8763
8764 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8765 cb.cb_literal, status_callback, &cb);
8766
8767 if (cb.vcdl != NULL)
8768 free_vdev_cmd_data_list(cb.vcdl);
8769
8770 if (argc == 0 && cb.cb_count == 0)
8771 (void) fprintf(stderr, gettext("no pools available\n"));
8772 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
8773 (void) printf(gettext("all pools are healthy\n"));
8774
8775 if (ret != 0)
8776 return (ret);
8777
8778 if (interval == 0)
8779 break;
8780
8781 if (count != 0 && --count == 0)
8782 break;
8783
8784 (void) fsleep(interval);
8785 }
8786
8787 return (0);
8788 }
8789
8790 typedef struct upgrade_cbdata {
8791 int cb_first;
8792 int cb_argc;
8793 uint64_t cb_version;
8794 char **cb_argv;
8795 } upgrade_cbdata_t;
8796
8797 static int
8798 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
8799 {
8800 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
8801 int *count = (int *)unsupp_fs;
8802
8803 if (zfs_version > ZPL_VERSION) {
8804 (void) printf(gettext("%s (v%d) is not supported by this "
8805 "implementation of ZFS.\n"),
8806 zfs_get_name(zhp), zfs_version);
8807 (*count)++;
8808 }
8809
8810 zfs_iter_filesystems(zhp, 0, check_unsupp_fs, unsupp_fs);
8811
8812 zfs_close(zhp);
8813
8814 return (0);
8815 }
8816
8817 static int
8818 upgrade_version(zpool_handle_t *zhp, uint64_t version)
8819 {
8820 int ret;
8821 nvlist_t *config;
8822 uint64_t oldversion;
8823 int unsupp_fs = 0;
8824
8825 config = zpool_get_config(zhp, NULL);
8826 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8827 &oldversion) == 0);
8828
8829 char compat[ZFS_MAXPROPLEN];
8830 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8831 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8832 compat[0] = '\0';
8833
8834 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
8835 assert(oldversion < version);
8836
8837 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
8838 if (ret != 0)
8839 return (ret);
8840
8841 if (unsupp_fs) {
8842 (void) fprintf(stderr, gettext("Upgrade not performed due "
8843 "to %d unsupported filesystems (max v%d).\n"),
8844 unsupp_fs, (int)ZPL_VERSION);
8845 return (1);
8846 }
8847
8848 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
8849 (void) fprintf(stderr, gettext("Upgrade not performed because "
8850 "'compatibility' property set to '"
8851 ZPOOL_COMPAT_LEGACY "'.\n"));
8852 return (1);
8853 }
8854
8855 ret = zpool_upgrade(zhp, version);
8856 if (ret != 0)
8857 return (ret);
8858
8859 if (version >= SPA_VERSION_FEATURES) {
8860 (void) printf(gettext("Successfully upgraded "
8861 "'%s' from version %llu to feature flags.\n"),
8862 zpool_get_name(zhp), (u_longlong_t)oldversion);
8863 } else {
8864 (void) printf(gettext("Successfully upgraded "
8865 "'%s' from version %llu to version %llu.\n"),
8866 zpool_get_name(zhp), (u_longlong_t)oldversion,
8867 (u_longlong_t)version);
8868 }
8869
8870 return (0);
8871 }
8872
8873 static int
8874 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
8875 {
8876 int i, ret, count;
8877 boolean_t firstff = B_TRUE;
8878 nvlist_t *enabled = zpool_get_features(zhp);
8879
8880 char compat[ZFS_MAXPROPLEN];
8881 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8882 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8883 compat[0] = '\0';
8884
8885 boolean_t requested_features[SPA_FEATURES];
8886 if (zpool_do_load_compat(compat, requested_features) !=
8887 ZPOOL_COMPATIBILITY_OK)
8888 return (-1);
8889
8890 count = 0;
8891 for (i = 0; i < SPA_FEATURES; i++) {
8892 const char *fname = spa_feature_table[i].fi_uname;
8893 const char *fguid = spa_feature_table[i].fi_guid;
8894
8895 if (!spa_feature_table[i].fi_zfs_mod_supported)
8896 continue;
8897
8898 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
8899 char *propname;
8900 verify(-1 != asprintf(&propname, "feature@%s", fname));
8901 ret = zpool_set_prop(zhp, propname,
8902 ZFS_FEATURE_ENABLED);
8903 if (ret != 0) {
8904 free(propname);
8905 return (ret);
8906 }
8907 count++;
8908
8909 if (firstff) {
8910 (void) printf(gettext("Enabled the "
8911 "following features on '%s':\n"),
8912 zpool_get_name(zhp));
8913 firstff = B_FALSE;
8914 }
8915 (void) printf(gettext(" %s\n"), fname);
8916 free(propname);
8917 }
8918 }
8919
8920 if (countp != NULL)
8921 *countp = count;
8922 return (0);
8923 }
8924
8925 static int
8926 upgrade_cb(zpool_handle_t *zhp, void *arg)
8927 {
8928 upgrade_cbdata_t *cbp = arg;
8929 nvlist_t *config;
8930 uint64_t version;
8931 boolean_t modified_pool = B_FALSE;
8932 int ret;
8933
8934 config = zpool_get_config(zhp, NULL);
8935 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8936 &version) == 0);
8937
8938 assert(SPA_VERSION_IS_SUPPORTED(version));
8939
8940 if (version < cbp->cb_version) {
8941 cbp->cb_first = B_FALSE;
8942 ret = upgrade_version(zhp, cbp->cb_version);
8943 if (ret != 0)
8944 return (ret);
8945 modified_pool = B_TRUE;
8946
8947 /*
8948 * If they did "zpool upgrade -a", then we could
8949 * be doing ioctls to different pools. We need
8950 * to log this history once to each pool, and bypass
8951 * the normal history logging that happens in main().
8952 */
8953 (void) zpool_log_history(g_zfs, history_str);
8954 log_history = B_FALSE;
8955 }
8956
8957 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
8958 int count;
8959 ret = upgrade_enable_all(zhp, &count);
8960 if (ret != 0)
8961 return (ret);
8962
8963 if (count > 0) {
8964 cbp->cb_first = B_FALSE;
8965 modified_pool = B_TRUE;
8966 }
8967 }
8968
8969 if (modified_pool) {
8970 (void) printf("\n");
8971 (void) after_zpool_upgrade(zhp);
8972 }
8973
8974 return (0);
8975 }
8976
8977 static int
8978 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
8979 {
8980 upgrade_cbdata_t *cbp = arg;
8981 nvlist_t *config;
8982 uint64_t version;
8983
8984 config = zpool_get_config(zhp, NULL);
8985 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8986 &version) == 0);
8987
8988 assert(SPA_VERSION_IS_SUPPORTED(version));
8989
8990 if (version < SPA_VERSION_FEATURES) {
8991 if (cbp->cb_first) {
8992 (void) printf(gettext("The following pools are "
8993 "formatted with legacy version numbers and can\n"
8994 "be upgraded to use feature flags. After "
8995 "being upgraded, these pools\nwill no "
8996 "longer be accessible by software that does not "
8997 "support feature\nflags.\n\n"
8998 "Note that setting a pool's 'compatibility' "
8999 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
9000 "inhibit upgrades.\n\n"));
9001 (void) printf(gettext("VER POOL\n"));
9002 (void) printf(gettext("--- ------------\n"));
9003 cbp->cb_first = B_FALSE;
9004 }
9005
9006 (void) printf("%2llu %s\n", (u_longlong_t)version,
9007 zpool_get_name(zhp));
9008 }
9009
9010 return (0);
9011 }
9012
9013 static int
9014 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
9015 {
9016 upgrade_cbdata_t *cbp = arg;
9017 nvlist_t *config;
9018 uint64_t version;
9019
9020 config = zpool_get_config(zhp, NULL);
9021 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9022 &version) == 0);
9023
9024 if (version >= SPA_VERSION_FEATURES) {
9025 int i;
9026 boolean_t poolfirst = B_TRUE;
9027 nvlist_t *enabled = zpool_get_features(zhp);
9028
9029 for (i = 0; i < SPA_FEATURES; i++) {
9030 const char *fguid = spa_feature_table[i].fi_guid;
9031 const char *fname = spa_feature_table[i].fi_uname;
9032
9033 if (!spa_feature_table[i].fi_zfs_mod_supported)
9034 continue;
9035
9036 if (!nvlist_exists(enabled, fguid)) {
9037 if (cbp->cb_first) {
9038 (void) printf(gettext("\nSome "
9039 "supported features are not "
9040 "enabled on the following pools. "
9041 "Once a\nfeature is enabled the "
9042 "pool may become incompatible with "
9043 "software\nthat does not support "
9044 "the feature. See "
9045 "zpool-features(7) for "
9046 "details.\n\n"
9047 "Note that the pool "
9048 "'compatibility' feature can be "
9049 "used to inhibit\nfeature "
9050 "upgrades.\n\n"));
9051 (void) printf(gettext("POOL "
9052 "FEATURE\n"));
9053 (void) printf(gettext("------"
9054 "---------\n"));
9055 cbp->cb_first = B_FALSE;
9056 }
9057
9058 if (poolfirst) {
9059 (void) printf(gettext("%s\n"),
9060 zpool_get_name(zhp));
9061 poolfirst = B_FALSE;
9062 }
9063
9064 (void) printf(gettext(" %s\n"), fname);
9065 }
9066 /*
9067 * If they did "zpool upgrade -a", then we could
9068 * be doing ioctls to different pools. We need
9069 * to log this history once to each pool, and bypass
9070 * the normal history logging that happens in main().
9071 */
9072 (void) zpool_log_history(g_zfs, history_str);
9073 log_history = B_FALSE;
9074 }
9075 }
9076
9077 return (0);
9078 }
9079
9080 static int
9081 upgrade_one(zpool_handle_t *zhp, void *data)
9082 {
9083 boolean_t modified_pool = B_FALSE;
9084 upgrade_cbdata_t *cbp = data;
9085 uint64_t cur_version;
9086 int ret;
9087
9088 if (strcmp("log", zpool_get_name(zhp)) == 0) {
9089 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9090 "Pool 'log' must be renamed using export and import"
9091 " to upgrade.\n"));
9092 return (1);
9093 }
9094
9095 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9096 if (cur_version > cbp->cb_version) {
9097 (void) printf(gettext("Pool '%s' is already formatted "
9098 "using more current version '%llu'.\n\n"),
9099 zpool_get_name(zhp), (u_longlong_t)cur_version);
9100 return (0);
9101 }
9102
9103 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9104 (void) printf(gettext("Pool '%s' is already formatted "
9105 "using version %llu.\n\n"), zpool_get_name(zhp),
9106 (u_longlong_t)cbp->cb_version);
9107 return (0);
9108 }
9109
9110 if (cur_version != cbp->cb_version) {
9111 modified_pool = B_TRUE;
9112 ret = upgrade_version(zhp, cbp->cb_version);
9113 if (ret != 0)
9114 return (ret);
9115 }
9116
9117 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9118 int count = 0;
9119 ret = upgrade_enable_all(zhp, &count);
9120 if (ret != 0)
9121 return (ret);
9122
9123 if (count != 0) {
9124 modified_pool = B_TRUE;
9125 } else if (cur_version == SPA_VERSION) {
9126 (void) printf(gettext("Pool '%s' already has all "
9127 "supported and requested features enabled.\n"),
9128 zpool_get_name(zhp));
9129 }
9130 }
9131
9132 if (modified_pool) {
9133 (void) printf("\n");
9134 (void) after_zpool_upgrade(zhp);
9135 }
9136
9137 return (0);
9138 }
9139
9140 /*
9141 * zpool upgrade
9142 * zpool upgrade -v
9143 * zpool upgrade [-V version] <-a | pool ...>
9144 *
9145 * With no arguments, display downrev'd ZFS pool available for upgrade.
9146 * Individual pools can be upgraded by specifying the pool, and '-a' will
9147 * upgrade all pools.
9148 */
9149 int
9150 zpool_do_upgrade(int argc, char **argv)
9151 {
9152 int c;
9153 upgrade_cbdata_t cb = { 0 };
9154 int ret = 0;
9155 boolean_t showversions = B_FALSE;
9156 boolean_t upgradeall = B_FALSE;
9157 char *end;
9158
9159
9160 /* check options */
9161 while ((c = getopt(argc, argv, ":avV:")) != -1) {
9162 switch (c) {
9163 case 'a':
9164 upgradeall = B_TRUE;
9165 break;
9166 case 'v':
9167 showversions = B_TRUE;
9168 break;
9169 case 'V':
9170 cb.cb_version = strtoll(optarg, &end, 10);
9171 if (*end != '\0' ||
9172 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9173 (void) fprintf(stderr,
9174 gettext("invalid version '%s'\n"), optarg);
9175 usage(B_FALSE);
9176 }
9177 break;
9178 case ':':
9179 (void) fprintf(stderr, gettext("missing argument for "
9180 "'%c' option\n"), optopt);
9181 usage(B_FALSE);
9182 break;
9183 case '?':
9184 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9185 optopt);
9186 usage(B_FALSE);
9187 }
9188 }
9189
9190 cb.cb_argc = argc;
9191 cb.cb_argv = argv;
9192 argc -= optind;
9193 argv += optind;
9194
9195 if (cb.cb_version == 0) {
9196 cb.cb_version = SPA_VERSION;
9197 } else if (!upgradeall && argc == 0) {
9198 (void) fprintf(stderr, gettext("-V option is "
9199 "incompatible with other arguments\n"));
9200 usage(B_FALSE);
9201 }
9202
9203 if (showversions) {
9204 if (upgradeall || argc != 0) {
9205 (void) fprintf(stderr, gettext("-v option is "
9206 "incompatible with other arguments\n"));
9207 usage(B_FALSE);
9208 }
9209 } else if (upgradeall) {
9210 if (argc != 0) {
9211 (void) fprintf(stderr, gettext("-a option should not "
9212 "be used along with a pool name\n"));
9213 usage(B_FALSE);
9214 }
9215 }
9216
9217 (void) printf(gettext("This system supports ZFS pool feature "
9218 "flags.\n\n"));
9219 if (showversions) {
9220 int i;
9221
9222 (void) printf(gettext("The following features are "
9223 "supported:\n\n"));
9224 (void) printf(gettext("FEAT DESCRIPTION\n"));
9225 (void) printf("----------------------------------------------"
9226 "---------------\n");
9227 for (i = 0; i < SPA_FEATURES; i++) {
9228 zfeature_info_t *fi = &spa_feature_table[i];
9229 if (!fi->fi_zfs_mod_supported)
9230 continue;
9231 const char *ro =
9232 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9233 " (read-only compatible)" : "";
9234
9235 (void) printf("%-37s%s\n", fi->fi_uname, ro);
9236 (void) printf(" %s\n", fi->fi_desc);
9237 }
9238 (void) printf("\n");
9239
9240 (void) printf(gettext("The following legacy versions are also "
9241 "supported:\n\n"));
9242 (void) printf(gettext("VER DESCRIPTION\n"));
9243 (void) printf("--- -----------------------------------------"
9244 "---------------\n");
9245 (void) printf(gettext(" 1 Initial ZFS version\n"));
9246 (void) printf(gettext(" 2 Ditto blocks "
9247 "(replicated metadata)\n"));
9248 (void) printf(gettext(" 3 Hot spares and double parity "
9249 "RAID-Z\n"));
9250 (void) printf(gettext(" 4 zpool history\n"));
9251 (void) printf(gettext(" 5 Compression using the gzip "
9252 "algorithm\n"));
9253 (void) printf(gettext(" 6 bootfs pool property\n"));
9254 (void) printf(gettext(" 7 Separate intent log devices\n"));
9255 (void) printf(gettext(" 8 Delegated administration\n"));
9256 (void) printf(gettext(" 9 refquota and refreservation "
9257 "properties\n"));
9258 (void) printf(gettext(" 10 Cache devices\n"));
9259 (void) printf(gettext(" 11 Improved scrub performance\n"));
9260 (void) printf(gettext(" 12 Snapshot properties\n"));
9261 (void) printf(gettext(" 13 snapused property\n"));
9262 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
9263 (void) printf(gettext(" 15 user/group space accounting\n"));
9264 (void) printf(gettext(" 16 stmf property support\n"));
9265 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
9266 (void) printf(gettext(" 18 Snapshot user holds\n"));
9267 (void) printf(gettext(" 19 Log device removal\n"));
9268 (void) printf(gettext(" 20 Compression using zle "
9269 "(zero-length encoding)\n"));
9270 (void) printf(gettext(" 21 Deduplication\n"));
9271 (void) printf(gettext(" 22 Received properties\n"));
9272 (void) printf(gettext(" 23 Slim ZIL\n"));
9273 (void) printf(gettext(" 24 System attributes\n"));
9274 (void) printf(gettext(" 25 Improved scrub stats\n"));
9275 (void) printf(gettext(" 26 Improved snapshot deletion "
9276 "performance\n"));
9277 (void) printf(gettext(" 27 Improved snapshot creation "
9278 "performance\n"));
9279 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
9280 (void) printf(gettext("\nFor more information on a particular "
9281 "version, including supported releases,\n"));
9282 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9283 } else if (argc == 0 && upgradeall) {
9284 cb.cb_first = B_TRUE;
9285 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9286 if (ret == 0 && cb.cb_first) {
9287 if (cb.cb_version == SPA_VERSION) {
9288 (void) printf(gettext("All pools are already "
9289 "formatted using feature flags.\n\n"));
9290 (void) printf(gettext("Every feature flags "
9291 "pool already has all supported and "
9292 "requested features enabled.\n"));
9293 } else {
9294 (void) printf(gettext("All pools are already "
9295 "formatted with version %llu or higher.\n"),
9296 (u_longlong_t)cb.cb_version);
9297 }
9298 }
9299 } else if (argc == 0) {
9300 cb.cb_first = B_TRUE;
9301 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9302 assert(ret == 0);
9303
9304 if (cb.cb_first) {
9305 (void) printf(gettext("All pools are formatted "
9306 "using feature flags.\n\n"));
9307 } else {
9308 (void) printf(gettext("\nUse 'zpool upgrade -v' "
9309 "for a list of available legacy versions.\n"));
9310 }
9311
9312 cb.cb_first = B_TRUE;
9313 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9314 assert(ret == 0);
9315
9316 if (cb.cb_first) {
9317 (void) printf(gettext("Every feature flags pool has "
9318 "all supported and requested features enabled.\n"));
9319 } else {
9320 (void) printf(gettext("\n"));
9321 }
9322 } else {
9323 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9324 B_FALSE, upgrade_one, &cb);
9325 }
9326
9327 return (ret);
9328 }
9329
9330 typedef struct hist_cbdata {
9331 boolean_t first;
9332 boolean_t longfmt;
9333 boolean_t internal;
9334 } hist_cbdata_t;
9335
9336 static void
9337 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9338 {
9339 nvlist_t **records;
9340 uint_t numrecords;
9341 int i;
9342
9343 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9344 &records, &numrecords) == 0);
9345 for (i = 0; i < numrecords; i++) {
9346 nvlist_t *rec = records[i];
9347 char tbuf[64] = "";
9348
9349 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9350 time_t tsec;
9351 struct tm t;
9352
9353 tsec = fnvlist_lookup_uint64(records[i],
9354 ZPOOL_HIST_TIME);
9355 (void) localtime_r(&tsec, &t);
9356 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9357 }
9358
9359 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9360 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9361 ZPOOL_HIST_ELAPSED_NS);
9362 (void) snprintf(tbuf + strlen(tbuf),
9363 sizeof (tbuf) - strlen(tbuf),
9364 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9365 }
9366
9367 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9368 (void) printf("%s %s", tbuf,
9369 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9370 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9371 int ievent =
9372 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9373 if (!cb->internal)
9374 continue;
9375 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9376 (void) printf("%s unrecognized record:\n",
9377 tbuf);
9378 dump_nvlist(rec, 4);
9379 continue;
9380 }
9381 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
9382 zfs_history_event_names[ievent],
9383 (longlong_t)fnvlist_lookup_uint64(
9384 rec, ZPOOL_HIST_TXG),
9385 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9386 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9387 if (!cb->internal)
9388 continue;
9389 (void) printf("%s [txg:%lld] %s", tbuf,
9390 (longlong_t)fnvlist_lookup_uint64(
9391 rec, ZPOOL_HIST_TXG),
9392 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9393 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9394 (void) printf(" %s (%llu)",
9395 fnvlist_lookup_string(rec,
9396 ZPOOL_HIST_DSNAME),
9397 (u_longlong_t)fnvlist_lookup_uint64(rec,
9398 ZPOOL_HIST_DSID));
9399 }
9400 (void) printf(" %s", fnvlist_lookup_string(rec,
9401 ZPOOL_HIST_INT_STR));
9402 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9403 if (!cb->internal)
9404 continue;
9405 (void) printf("%s ioctl %s\n", tbuf,
9406 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9407 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9408 (void) printf(" input:\n");
9409 dump_nvlist(fnvlist_lookup_nvlist(rec,
9410 ZPOOL_HIST_INPUT_NVL), 8);
9411 }
9412 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
9413 (void) printf(" output:\n");
9414 dump_nvlist(fnvlist_lookup_nvlist(rec,
9415 ZPOOL_HIST_OUTPUT_NVL), 8);
9416 }
9417 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
9418 (void) printf(" output nvlist omitted; "
9419 "original size: %lldKB\n",
9420 (longlong_t)fnvlist_lookup_int64(rec,
9421 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
9422 }
9423 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
9424 (void) printf(" errno: %lld\n",
9425 (longlong_t)fnvlist_lookup_int64(rec,
9426 ZPOOL_HIST_ERRNO));
9427 }
9428 } else {
9429 if (!cb->internal)
9430 continue;
9431 (void) printf("%s unrecognized record:\n", tbuf);
9432 dump_nvlist(rec, 4);
9433 }
9434
9435 if (!cb->longfmt) {
9436 (void) printf("\n");
9437 continue;
9438 }
9439 (void) printf(" [");
9440 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
9441 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
9442 struct passwd *pwd = getpwuid(who);
9443 (void) printf("user %d ", (int)who);
9444 if (pwd != NULL)
9445 (void) printf("(%s) ", pwd->pw_name);
9446 }
9447 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
9448 (void) printf("on %s",
9449 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
9450 }
9451 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
9452 (void) printf(":%s",
9453 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
9454 }
9455
9456 (void) printf("]");
9457 (void) printf("\n");
9458 }
9459 }
9460
9461 /*
9462 * Print out the command history for a specific pool.
9463 */
9464 static int
9465 get_history_one(zpool_handle_t *zhp, void *data)
9466 {
9467 nvlist_t *nvhis;
9468 int ret;
9469 hist_cbdata_t *cb = (hist_cbdata_t *)data;
9470 uint64_t off = 0;
9471 boolean_t eof = B_FALSE;
9472
9473 cb->first = B_FALSE;
9474
9475 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
9476
9477 while (!eof) {
9478 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
9479 return (ret);
9480
9481 print_history_records(nvhis, cb);
9482 nvlist_free(nvhis);
9483 }
9484 (void) printf("\n");
9485
9486 return (ret);
9487 }
9488
9489 /*
9490 * zpool history <pool>
9491 *
9492 * Displays the history of commands that modified pools.
9493 */
9494 int
9495 zpool_do_history(int argc, char **argv)
9496 {
9497 hist_cbdata_t cbdata = { 0 };
9498 int ret;
9499 int c;
9500
9501 cbdata.first = B_TRUE;
9502 /* check options */
9503 while ((c = getopt(argc, argv, "li")) != -1) {
9504 switch (c) {
9505 case 'l':
9506 cbdata.longfmt = B_TRUE;
9507 break;
9508 case 'i':
9509 cbdata.internal = B_TRUE;
9510 break;
9511 case '?':
9512 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9513 optopt);
9514 usage(B_FALSE);
9515 }
9516 }
9517 argc -= optind;
9518 argv += optind;
9519
9520 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9521 B_FALSE, get_history_one, &cbdata);
9522
9523 if (argc == 0 && cbdata.first == B_TRUE) {
9524 (void) fprintf(stderr, gettext("no pools available\n"));
9525 return (0);
9526 }
9527
9528 return (ret);
9529 }
9530
9531 typedef struct ev_opts {
9532 int verbose;
9533 int scripted;
9534 int follow;
9535 int clear;
9536 char poolname[ZFS_MAX_DATASET_NAME_LEN];
9537 } ev_opts_t;
9538
9539 static void
9540 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
9541 {
9542 char ctime_str[26], str[32], *ptr;
9543 int64_t *tv;
9544 uint_t n;
9545
9546 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
9547 memset(str, ' ', 32);
9548 (void) ctime_r((const time_t *)&tv[0], ctime_str);
9549 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
9550 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
9551 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
9552 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
9553 if (opts->scripted)
9554 (void) printf(gettext("%s\t"), str);
9555 else
9556 (void) printf(gettext("%s "), str);
9557
9558 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
9559 (void) printf(gettext("%s\n"), ptr);
9560 }
9561
9562 static void
9563 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
9564 {
9565 nvpair_t *nvp;
9566
9567 for (nvp = nvlist_next_nvpair(nvl, NULL);
9568 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
9569
9570 data_type_t type = nvpair_type(nvp);
9571 const char *name = nvpair_name(nvp);
9572
9573 boolean_t b;
9574 uint8_t i8;
9575 uint16_t i16;
9576 uint32_t i32;
9577 uint64_t i64;
9578 char *str;
9579 nvlist_t *cnv;
9580
9581 printf(gettext("%*s%s = "), depth, "", name);
9582
9583 switch (type) {
9584 case DATA_TYPE_BOOLEAN:
9585 printf(gettext("%s"), "1");
9586 break;
9587
9588 case DATA_TYPE_BOOLEAN_VALUE:
9589 (void) nvpair_value_boolean_value(nvp, &b);
9590 printf(gettext("%s"), b ? "1" : "0");
9591 break;
9592
9593 case DATA_TYPE_BYTE:
9594 (void) nvpair_value_byte(nvp, &i8);
9595 printf(gettext("0x%x"), i8);
9596 break;
9597
9598 case DATA_TYPE_INT8:
9599 (void) nvpair_value_int8(nvp, (void *)&i8);
9600 printf(gettext("0x%x"), i8);
9601 break;
9602
9603 case DATA_TYPE_UINT8:
9604 (void) nvpair_value_uint8(nvp, &i8);
9605 printf(gettext("0x%x"), i8);
9606 break;
9607
9608 case DATA_TYPE_INT16:
9609 (void) nvpair_value_int16(nvp, (void *)&i16);
9610 printf(gettext("0x%x"), i16);
9611 break;
9612
9613 case DATA_TYPE_UINT16:
9614 (void) nvpair_value_uint16(nvp, &i16);
9615 printf(gettext("0x%x"), i16);
9616 break;
9617
9618 case DATA_TYPE_INT32:
9619 (void) nvpair_value_int32(nvp, (void *)&i32);
9620 printf(gettext("0x%x"), i32);
9621 break;
9622
9623 case DATA_TYPE_UINT32:
9624 (void) nvpair_value_uint32(nvp, &i32);
9625 printf(gettext("0x%x"), i32);
9626 break;
9627
9628 case DATA_TYPE_INT64:
9629 (void) nvpair_value_int64(nvp, (void *)&i64);
9630 printf(gettext("0x%llx"), (u_longlong_t)i64);
9631 break;
9632
9633 case DATA_TYPE_UINT64:
9634 (void) nvpair_value_uint64(nvp, &i64);
9635 /*
9636 * translate vdev state values to readable
9637 * strings to aide zpool events consumers
9638 */
9639 if (strcmp(name,
9640 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
9641 strcmp(name,
9642 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
9643 printf(gettext("\"%s\" (0x%llx)"),
9644 zpool_state_to_name(i64, VDEV_AUX_NONE),
9645 (u_longlong_t)i64);
9646 } else {
9647 printf(gettext("0x%llx"), (u_longlong_t)i64);
9648 }
9649 break;
9650
9651 case DATA_TYPE_HRTIME:
9652 (void) nvpair_value_hrtime(nvp, (void *)&i64);
9653 printf(gettext("0x%llx"), (u_longlong_t)i64);
9654 break;
9655
9656 case DATA_TYPE_STRING:
9657 (void) nvpair_value_string(nvp, &str);
9658 printf(gettext("\"%s\""), str ? str : "<NULL>");
9659 break;
9660
9661 case DATA_TYPE_NVLIST:
9662 printf(gettext("(embedded nvlist)\n"));
9663 (void) nvpair_value_nvlist(nvp, &cnv);
9664 zpool_do_events_nvprint(cnv, depth + 8);
9665 printf(gettext("%*s(end %s)"), depth, "", name);
9666 break;
9667
9668 case DATA_TYPE_NVLIST_ARRAY: {
9669 nvlist_t **val;
9670 uint_t i, nelem;
9671
9672 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
9673 printf(gettext("(%d embedded nvlists)\n"), nelem);
9674 for (i = 0; i < nelem; i++) {
9675 printf(gettext("%*s%s[%d] = %s\n"),
9676 depth, "", name, i, "(embedded nvlist)");
9677 zpool_do_events_nvprint(val[i], depth + 8);
9678 printf(gettext("%*s(end %s[%i])\n"),
9679 depth, "", name, i);
9680 }
9681 printf(gettext("%*s(end %s)\n"), depth, "", name);
9682 }
9683 break;
9684
9685 case DATA_TYPE_INT8_ARRAY: {
9686 int8_t *val;
9687 uint_t i, nelem;
9688
9689 (void) nvpair_value_int8_array(nvp, &val, &nelem);
9690 for (i = 0; i < nelem; i++)
9691 printf(gettext("0x%x "), val[i]);
9692
9693 break;
9694 }
9695
9696 case DATA_TYPE_UINT8_ARRAY: {
9697 uint8_t *val;
9698 uint_t i, nelem;
9699
9700 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
9701 for (i = 0; i < nelem; i++)
9702 printf(gettext("0x%x "), val[i]);
9703
9704 break;
9705 }
9706
9707 case DATA_TYPE_INT16_ARRAY: {
9708 int16_t *val;
9709 uint_t i, nelem;
9710
9711 (void) nvpair_value_int16_array(nvp, &val, &nelem);
9712 for (i = 0; i < nelem; i++)
9713 printf(gettext("0x%x "), val[i]);
9714
9715 break;
9716 }
9717
9718 case DATA_TYPE_UINT16_ARRAY: {
9719 uint16_t *val;
9720 uint_t i, nelem;
9721
9722 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
9723 for (i = 0; i < nelem; i++)
9724 printf(gettext("0x%x "), val[i]);
9725
9726 break;
9727 }
9728
9729 case DATA_TYPE_INT32_ARRAY: {
9730 int32_t *val;
9731 uint_t i, nelem;
9732
9733 (void) nvpair_value_int32_array(nvp, &val, &nelem);
9734 for (i = 0; i < nelem; i++)
9735 printf(gettext("0x%x "), val[i]);
9736
9737 break;
9738 }
9739
9740 case DATA_TYPE_UINT32_ARRAY: {
9741 uint32_t *val;
9742 uint_t i, nelem;
9743
9744 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
9745 for (i = 0; i < nelem; i++)
9746 printf(gettext("0x%x "), val[i]);
9747
9748 break;
9749 }
9750
9751 case DATA_TYPE_INT64_ARRAY: {
9752 int64_t *val;
9753 uint_t i, nelem;
9754
9755 (void) nvpair_value_int64_array(nvp, &val, &nelem);
9756 for (i = 0; i < nelem; i++)
9757 printf(gettext("0x%llx "),
9758 (u_longlong_t)val[i]);
9759
9760 break;
9761 }
9762
9763 case DATA_TYPE_UINT64_ARRAY: {
9764 uint64_t *val;
9765 uint_t i, nelem;
9766
9767 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
9768 for (i = 0; i < nelem; i++)
9769 printf(gettext("0x%llx "),
9770 (u_longlong_t)val[i]);
9771
9772 break;
9773 }
9774
9775 case DATA_TYPE_STRING_ARRAY: {
9776 char **str;
9777 uint_t i, nelem;
9778
9779 (void) nvpair_value_string_array(nvp, &str, &nelem);
9780 for (i = 0; i < nelem; i++)
9781 printf(gettext("\"%s\" "),
9782 str[i] ? str[i] : "<NULL>");
9783
9784 break;
9785 }
9786
9787 case DATA_TYPE_BOOLEAN_ARRAY:
9788 case DATA_TYPE_BYTE_ARRAY:
9789 case DATA_TYPE_DOUBLE:
9790 case DATA_TYPE_DONTCARE:
9791 case DATA_TYPE_UNKNOWN:
9792 printf(gettext("<unknown>"));
9793 break;
9794 }
9795
9796 printf(gettext("\n"));
9797 }
9798 }
9799
9800 static int
9801 zpool_do_events_next(ev_opts_t *opts)
9802 {
9803 nvlist_t *nvl;
9804 int zevent_fd, ret, dropped;
9805 char *pool;
9806
9807 zevent_fd = open(ZFS_DEV, O_RDWR);
9808 VERIFY(zevent_fd >= 0);
9809
9810 if (!opts->scripted)
9811 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
9812
9813 while (1) {
9814 ret = zpool_events_next(g_zfs, &nvl, &dropped,
9815 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
9816 if (ret || nvl == NULL)
9817 break;
9818
9819 if (dropped > 0)
9820 (void) printf(gettext("dropped %d events\n"), dropped);
9821
9822 if (strlen(opts->poolname) > 0 &&
9823 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
9824 strcmp(opts->poolname, pool) != 0)
9825 continue;
9826
9827 zpool_do_events_short(nvl, opts);
9828
9829 if (opts->verbose) {
9830 zpool_do_events_nvprint(nvl, 8);
9831 printf(gettext("\n"));
9832 }
9833 (void) fflush(stdout);
9834
9835 nvlist_free(nvl);
9836 }
9837
9838 VERIFY(0 == close(zevent_fd));
9839
9840 return (ret);
9841 }
9842
9843 static int
9844 zpool_do_events_clear(void)
9845 {
9846 int count, ret;
9847
9848 ret = zpool_events_clear(g_zfs, &count);
9849 if (!ret)
9850 (void) printf(gettext("cleared %d events\n"), count);
9851
9852 return (ret);
9853 }
9854
9855 /*
9856 * zpool events [-vHf [pool] | -c]
9857 *
9858 * Displays events logs by ZFS.
9859 */
9860 int
9861 zpool_do_events(int argc, char **argv)
9862 {
9863 ev_opts_t opts = { 0 };
9864 int ret;
9865 int c;
9866
9867 /* check options */
9868 while ((c = getopt(argc, argv, "vHfc")) != -1) {
9869 switch (c) {
9870 case 'v':
9871 opts.verbose = 1;
9872 break;
9873 case 'H':
9874 opts.scripted = 1;
9875 break;
9876 case 'f':
9877 opts.follow = 1;
9878 break;
9879 case 'c':
9880 opts.clear = 1;
9881 break;
9882 case '?':
9883 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9884 optopt);
9885 usage(B_FALSE);
9886 }
9887 }
9888 argc -= optind;
9889 argv += optind;
9890
9891 if (argc > 1) {
9892 (void) fprintf(stderr, gettext("too many arguments\n"));
9893 usage(B_FALSE);
9894 } else if (argc == 1) {
9895 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
9896 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
9897 (void) fprintf(stderr,
9898 gettext("invalid pool name '%s'\n"), opts.poolname);
9899 usage(B_FALSE);
9900 }
9901 }
9902
9903 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
9904 opts.clear) {
9905 (void) fprintf(stderr,
9906 gettext("invalid options combined with -c\n"));
9907 usage(B_FALSE);
9908 }
9909
9910 if (opts.clear)
9911 ret = zpool_do_events_clear();
9912 else
9913 ret = zpool_do_events_next(&opts);
9914
9915 return (ret);
9916 }
9917
9918 static int
9919 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
9920 {
9921 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9922 char value[ZFS_MAXPROPLEN];
9923 zprop_source_t srctype;
9924
9925 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
9926 pl = pl->pl_next) {
9927 char *prop_name;
9928 /*
9929 * If the first property is pool name, it is a special
9930 * placeholder that we can skip. This will also skip
9931 * over the name property when 'all' is specified.
9932 */
9933 if (pl->pl_prop == ZPOOL_PROP_NAME &&
9934 pl == cbp->cb_proplist)
9935 continue;
9936
9937 if (pl->pl_prop == ZPROP_INVAL) {
9938 prop_name = pl->pl_user_prop;
9939 } else {
9940 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
9941 }
9942 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
9943 prop_name, value, sizeof (value), &srctype,
9944 cbp->cb_literal) == 0) {
9945 zprop_print_one_property(vdevname, cbp, prop_name,
9946 value, srctype, NULL, NULL);
9947 }
9948 }
9949
9950 return (0);
9951 }
9952
9953 static int
9954 get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data)
9955 {
9956 zpool_handle_t *zhp = zhp_data;
9957 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9958 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9959 cbp->cb_vdevs.cb_name_flags);
9960 int ret;
9961
9962 /* Adjust the column widths for the vdev properties */
9963 ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
9964
9965 return (ret);
9966 }
9967
9968 static int
9969 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
9970 {
9971 zpool_handle_t *zhp = zhp_data;
9972 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9973 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9974 cbp->cb_vdevs.cb_name_flags);
9975 int ret;
9976
9977 /* Display the properties */
9978 ret = get_callback_vdev(zhp, vdevname, data);
9979
9980 return (ret);
9981 }
9982
9983 static int
9984 get_callback(zpool_handle_t *zhp, void *data)
9985 {
9986 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9987 char value[MAXNAMELEN];
9988 zprop_source_t srctype;
9989 zprop_list_t *pl;
9990 int vid;
9991
9992 if (cbp->cb_type == ZFS_TYPE_VDEV) {
9993 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
9994 for_each_vdev(zhp, get_callback_vdev_width_cb, data);
9995 for_each_vdev(zhp, get_callback_vdev_cb, data);
9996 } else {
9997 /* Adjust column widths for vdev properties */
9998 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
9999 vid++) {
10000 vdev_expand_proplist(zhp,
10001 cbp->cb_vdevs.cb_names[vid],
10002 &cbp->cb_proplist);
10003 }
10004 /* Display the properties */
10005 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10006 vid++) {
10007 get_callback_vdev(zhp,
10008 cbp->cb_vdevs.cb_names[vid], data);
10009 }
10010 }
10011 } else {
10012 assert(cbp->cb_type == ZFS_TYPE_POOL);
10013 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
10014 /*
10015 * Skip the special fake placeholder. This will also
10016 * skip over the name property when 'all' is specified.
10017 */
10018 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10019 pl == cbp->cb_proplist)
10020 continue;
10021
10022 if (pl->pl_prop == ZPROP_INVAL &&
10023 (zpool_prop_feature(pl->pl_user_prop) ||
10024 zpool_prop_unsupported(pl->pl_user_prop))) {
10025 srctype = ZPROP_SRC_LOCAL;
10026
10027 if (zpool_prop_get_feature(zhp,
10028 pl->pl_user_prop, value,
10029 sizeof (value)) == 0) {
10030 zprop_print_one_property(
10031 zpool_get_name(zhp), cbp,
10032 pl->pl_user_prop, value, srctype,
10033 NULL, NULL);
10034 }
10035 } else {
10036 if (zpool_get_prop(zhp, pl->pl_prop, value,
10037 sizeof (value), &srctype,
10038 cbp->cb_literal) != 0)
10039 continue;
10040
10041 zprop_print_one_property(zpool_get_name(zhp),
10042 cbp, zpool_prop_to_name(pl->pl_prop),
10043 value, srctype, NULL, NULL);
10044 }
10045 }
10046 }
10047
10048 return (0);
10049 }
10050
10051 /*
10052 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10053 *
10054 * -H Scripted mode. Don't display headers, and separate properties
10055 * by a single tab.
10056 * -o List of columns to display. Defaults to
10057 * "name,property,value,source".
10058 * -p Display values in parsable (exact) format.
10059 *
10060 * Get properties of pools in the system. Output space statistics
10061 * for each one as well as other attributes.
10062 */
10063 int
10064 zpool_do_get(int argc, char **argv)
10065 {
10066 zprop_get_cbdata_t cb = { 0 };
10067 zprop_list_t fake_name = { 0 };
10068 int ret;
10069 int c, i;
10070 char *value;
10071 char *propstr = NULL;
10072
10073 cb.cb_first = B_TRUE;
10074
10075 /*
10076 * Set up default columns and sources.
10077 */
10078 cb.cb_sources = ZPROP_SRC_ALL;
10079 cb.cb_columns[0] = GET_COL_NAME;
10080 cb.cb_columns[1] = GET_COL_PROPERTY;
10081 cb.cb_columns[2] = GET_COL_VALUE;
10082 cb.cb_columns[3] = GET_COL_SOURCE;
10083 cb.cb_type = ZFS_TYPE_POOL;
10084 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10085 current_prop_type = cb.cb_type;
10086
10087 /* check options */
10088 while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10089 switch (c) {
10090 case 'p':
10091 cb.cb_literal = B_TRUE;
10092 break;
10093 case 'H':
10094 cb.cb_scripted = B_TRUE;
10095 break;
10096 case 'o':
10097 bzero(&cb.cb_columns, sizeof (cb.cb_columns));
10098 i = 0;
10099 while (*optarg != '\0') {
10100 static char *col_subopts[] =
10101 { "name", "property", "value", "source",
10102 "all", NULL };
10103
10104 if (i == ZFS_GET_NCOLS) {
10105 (void) fprintf(stderr, gettext("too "
10106 "many fields given to -o "
10107 "option\n"));
10108 usage(B_FALSE);
10109 }
10110
10111 switch (getsubopt(&optarg, col_subopts,
10112 &value)) {
10113 case 0:
10114 cb.cb_columns[i++] = GET_COL_NAME;
10115 break;
10116 case 1:
10117 cb.cb_columns[i++] = GET_COL_PROPERTY;
10118 break;
10119 case 2:
10120 cb.cb_columns[i++] = GET_COL_VALUE;
10121 break;
10122 case 3:
10123 cb.cb_columns[i++] = GET_COL_SOURCE;
10124 break;
10125 case 4:
10126 if (i > 0) {
10127 (void) fprintf(stderr,
10128 gettext("\"all\" conflicts "
10129 "with specific fields "
10130 "given to -o option\n"));
10131 usage(B_FALSE);
10132 }
10133 cb.cb_columns[0] = GET_COL_NAME;
10134 cb.cb_columns[1] = GET_COL_PROPERTY;
10135 cb.cb_columns[2] = GET_COL_VALUE;
10136 cb.cb_columns[3] = GET_COL_SOURCE;
10137 i = ZFS_GET_NCOLS;
10138 break;
10139 default:
10140 (void) fprintf(stderr,
10141 gettext("invalid column name "
10142 "'%s'\n"), value);
10143 usage(B_FALSE);
10144 }
10145 }
10146 break;
10147 case '?':
10148 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10149 optopt);
10150 usage(B_FALSE);
10151 }
10152 }
10153
10154 argc -= optind;
10155 argv += optind;
10156
10157 if (argc < 1) {
10158 (void) fprintf(stderr, gettext("missing property "
10159 "argument\n"));
10160 usage(B_FALSE);
10161 }
10162
10163 /* Properties list is needed later by zprop_get_list() */
10164 propstr = argv[0];
10165
10166 argc--;
10167 argv++;
10168
10169 if (argc == 0) {
10170 /* No args, so just print the defaults. */
10171 } else if (are_all_pools(argc, argv)) {
10172 /* All the args are pool names */
10173 } else if (are_all_pools(1, argv)) {
10174 /* The first arg is a pool name */
10175 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10176 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10177 &cb.cb_vdevs)) {
10178 /* ... and the rest are vdev names */
10179 cb.cb_vdevs.cb_names = argv + 1;
10180 cb.cb_vdevs.cb_names_count = argc - 1;
10181 cb.cb_type = ZFS_TYPE_VDEV;
10182 argc = 1; /* One pool to process */
10183 } else {
10184 fprintf(stderr, gettext("Expected a list of vdevs in"
10185 " \"%s\", but got:\n"), argv[0]);
10186 error_list_unresolved_vdevs(argc - 1, argv + 1,
10187 argv[0], &cb.cb_vdevs);
10188 fprintf(stderr, "\n");
10189 usage(B_FALSE);
10190 return (1);
10191 }
10192 } else {
10193 /*
10194 * The first arg isn't a pool name,
10195 */
10196 fprintf(stderr, gettext("missing pool name.\n"));
10197 fprintf(stderr, "\n");
10198 usage(B_FALSE);
10199 return (1);
10200 }
10201
10202 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10203 cb.cb_type) != 0) {
10204 /* Use correct list of valid properties (pool or vdev) */
10205 current_prop_type = cb.cb_type;
10206 usage(B_FALSE);
10207 }
10208
10209 if (cb.cb_proplist != NULL) {
10210 fake_name.pl_prop = ZPOOL_PROP_NAME;
10211 fake_name.pl_width = strlen(gettext("NAME"));
10212 fake_name.pl_next = cb.cb_proplist;
10213 cb.cb_proplist = &fake_name;
10214 }
10215
10216 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10217 cb.cb_literal, get_callback, &cb);
10218
10219 if (cb.cb_proplist == &fake_name)
10220 zprop_free_list(fake_name.pl_next);
10221 else
10222 zprop_free_list(cb.cb_proplist);
10223
10224 return (ret);
10225 }
10226
10227 typedef struct set_cbdata {
10228 char *cb_propname;
10229 char *cb_value;
10230 zfs_type_t cb_type;
10231 vdev_cbdata_t cb_vdevs;
10232 boolean_t cb_any_successful;
10233 } set_cbdata_t;
10234
10235 static int
10236 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10237 {
10238 int error;
10239
10240 /* Check if we have out-of-bounds features */
10241 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10242 boolean_t features[SPA_FEATURES];
10243 if (zpool_do_load_compat(cb->cb_value, features) !=
10244 ZPOOL_COMPATIBILITY_OK)
10245 return (-1);
10246
10247 nvlist_t *enabled = zpool_get_features(zhp);
10248 spa_feature_t i;
10249 for (i = 0; i < SPA_FEATURES; i++) {
10250 const char *fguid = spa_feature_table[i].fi_guid;
10251 if (nvlist_exists(enabled, fguid) && !features[i])
10252 break;
10253 }
10254 if (i < SPA_FEATURES)
10255 (void) fprintf(stderr, gettext("Warning: one or "
10256 "more features already enabled on pool '%s'\n"
10257 "are not present in this compatibility set.\n"),
10258 zpool_get_name(zhp));
10259 }
10260
10261 /* if we're setting a feature, check it's in compatibility set */
10262 if (zpool_prop_feature(cb->cb_propname) &&
10263 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10264 char *fname = strchr(cb->cb_propname, '@') + 1;
10265 spa_feature_t f;
10266
10267 if (zfeature_lookup_name(fname, &f) == 0) {
10268 char compat[ZFS_MAXPROPLEN];
10269 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10270 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10271 compat[0] = '\0';
10272
10273 boolean_t features[SPA_FEATURES];
10274 if (zpool_do_load_compat(compat, features) !=
10275 ZPOOL_COMPATIBILITY_OK) {
10276 (void) fprintf(stderr, gettext("Error: "
10277 "cannot enable feature '%s' on pool '%s'\n"
10278 "because the pool's 'compatibility' "
10279 "property cannot be parsed.\n"),
10280 fname, zpool_get_name(zhp));
10281 return (-1);
10282 }
10283
10284 if (!features[f]) {
10285 (void) fprintf(stderr, gettext("Error: "
10286 "cannot enable feature '%s' on pool '%s'\n"
10287 "as it is not specified in this pool's "
10288 "current compatibility set.\n"
10289 "Consider setting 'compatibility' to a "
10290 "less restrictive set, or to 'off'.\n"),
10291 fname, zpool_get_name(zhp));
10292 return (-1);
10293 }
10294 }
10295 }
10296
10297 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10298
10299 return (error);
10300 }
10301
10302 static int
10303 set_callback(zpool_handle_t *zhp, void *data)
10304 {
10305 int error;
10306 set_cbdata_t *cb = (set_cbdata_t *)data;
10307
10308 if (cb->cb_type == ZFS_TYPE_VDEV) {
10309 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10310 cb->cb_propname, cb->cb_value);
10311 } else {
10312 assert(cb->cb_type == ZFS_TYPE_POOL);
10313 error = set_pool_callback(zhp, cb);
10314 }
10315
10316 cb->cb_any_successful = !error;
10317 return (error);
10318 }
10319
10320 int
10321 zpool_do_set(int argc, char **argv)
10322 {
10323 set_cbdata_t cb = { 0 };
10324 int error;
10325
10326 current_prop_type = ZFS_TYPE_POOL;
10327 if (argc > 1 && argv[1][0] == '-') {
10328 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10329 argv[1][1]);
10330 usage(B_FALSE);
10331 }
10332
10333 if (argc < 2) {
10334 (void) fprintf(stderr, gettext("missing property=value "
10335 "argument\n"));
10336 usage(B_FALSE);
10337 }
10338
10339 if (argc < 3) {
10340 (void) fprintf(stderr, gettext("missing pool name\n"));
10341 usage(B_FALSE);
10342 }
10343
10344 if (argc > 4) {
10345 (void) fprintf(stderr, gettext("too many pool names\n"));
10346 usage(B_FALSE);
10347 }
10348
10349 cb.cb_propname = argv[1];
10350 cb.cb_type = ZFS_TYPE_POOL;
10351 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10352 cb.cb_value = strchr(cb.cb_propname, '=');
10353 if (cb.cb_value == NULL) {
10354 (void) fprintf(stderr, gettext("missing value in "
10355 "property=value argument\n"));
10356 usage(B_FALSE);
10357 }
10358
10359 *(cb.cb_value) = '\0';
10360 cb.cb_value++;
10361 argc -= 2;
10362 argv += 2;
10363
10364 if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
10365 /* Argument is a vdev */
10366 cb.cb_vdevs.cb_names = argv;
10367 cb.cb_vdevs.cb_names_count = 1;
10368 cb.cb_type = ZFS_TYPE_VDEV;
10369 argc = 0; /* No pools to process */
10370 } else if (are_all_pools(1, argv)) {
10371 /* The first arg is a pool name */
10372 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10373 &cb.cb_vdevs)) {
10374 /* 2nd argument is a vdev */
10375 cb.cb_vdevs.cb_names = argv + 1;
10376 cb.cb_vdevs.cb_names_count = 1;
10377 cb.cb_type = ZFS_TYPE_VDEV;
10378 argc = 1; /* One pool to process */
10379 } else if (argc > 1) {
10380 (void) fprintf(stderr,
10381 gettext("too many pool names\n"));
10382 usage(B_FALSE);
10383 }
10384 }
10385
10386 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10387 B_FALSE, set_callback, &cb);
10388
10389 return (error);
10390 }
10391
10392 /* Add up the total number of bytes left to initialize/trim across all vdevs */
10393 static uint64_t
10394 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
10395 {
10396 uint64_t bytes_remaining;
10397 nvlist_t **child;
10398 uint_t c, children;
10399 vdev_stat_t *vs;
10400
10401 assert(activity == ZPOOL_WAIT_INITIALIZE ||
10402 activity == ZPOOL_WAIT_TRIM);
10403
10404 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
10405 (uint64_t **)&vs, &c) == 0);
10406
10407 if (activity == ZPOOL_WAIT_INITIALIZE &&
10408 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
10409 bytes_remaining = vs->vs_initialize_bytes_est -
10410 vs->vs_initialize_bytes_done;
10411 else if (activity == ZPOOL_WAIT_TRIM &&
10412 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
10413 bytes_remaining = vs->vs_trim_bytes_est -
10414 vs->vs_trim_bytes_done;
10415 else
10416 bytes_remaining = 0;
10417
10418 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10419 &child, &children) != 0)
10420 children = 0;
10421
10422 for (c = 0; c < children; c++)
10423 bytes_remaining += vdev_activity_remaining(child[c], activity);
10424
10425 return (bytes_remaining);
10426 }
10427
10428 /* Add up the total number of bytes left to rebuild across top-level vdevs */
10429 static uint64_t
10430 vdev_activity_top_remaining(nvlist_t *nv)
10431 {
10432 uint64_t bytes_remaining = 0;
10433 nvlist_t **child;
10434 uint_t children;
10435 int error;
10436
10437 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10438 &child, &children) != 0)
10439 children = 0;
10440
10441 for (uint_t c = 0; c < children; c++) {
10442 vdev_rebuild_stat_t *vrs;
10443 uint_t i;
10444
10445 error = nvlist_lookup_uint64_array(child[c],
10446 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
10447 if (error == 0) {
10448 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
10449 bytes_remaining += (vrs->vrs_bytes_est -
10450 vrs->vrs_bytes_rebuilt);
10451 }
10452 }
10453 }
10454
10455 return (bytes_remaining);
10456 }
10457
10458 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
10459 static boolean_t
10460 vdev_any_spare_replacing(nvlist_t *nv)
10461 {
10462 nvlist_t **child;
10463 uint_t c, children;
10464 char *vdev_type;
10465
10466 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
10467
10468 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
10469 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
10470 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
10471 return (B_TRUE);
10472 }
10473
10474 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10475 &child, &children) != 0)
10476 children = 0;
10477
10478 for (c = 0; c < children; c++) {
10479 if (vdev_any_spare_replacing(child[c]))
10480 return (B_TRUE);
10481 }
10482
10483 return (B_FALSE);
10484 }
10485
10486 typedef struct wait_data {
10487 char *wd_poolname;
10488 boolean_t wd_scripted;
10489 boolean_t wd_exact;
10490 boolean_t wd_headers_once;
10491 boolean_t wd_should_exit;
10492 /* Which activities to wait for */
10493 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
10494 float wd_interval;
10495 pthread_cond_t wd_cv;
10496 pthread_mutex_t wd_mutex;
10497 } wait_data_t;
10498
10499 /*
10500 * Print to stdout a single line, containing one column for each activity that
10501 * we are waiting for specifying how many bytes of work are left for that
10502 * activity.
10503 */
10504 static void
10505 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
10506 {
10507 nvlist_t *config, *nvroot;
10508 uint_t c;
10509 int i;
10510 pool_checkpoint_stat_t *pcs = NULL;
10511 pool_scan_stat_t *pss = NULL;
10512 pool_removal_stat_t *prs = NULL;
10513 char *headers[] = {"DISCARD", "FREE", "INITIALIZE", "REPLACE",
10514 "REMOVE", "RESILVER", "SCRUB", "TRIM"};
10515 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
10516
10517 /* Calculate the width of each column */
10518 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10519 /*
10520 * Make sure we have enough space in the col for pretty-printed
10521 * numbers and for the column header, and then leave a couple
10522 * spaces between cols for readability.
10523 */
10524 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
10525 }
10526
10527 /* Print header if appropriate */
10528 int term_height = terminal_height();
10529 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
10530 row % (term_height-1) == 0);
10531 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
10532 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10533 if (wd->wd_enabled[i])
10534 (void) printf("%*s", col_widths[i], headers[i]);
10535 }
10536 (void) printf("\n");
10537 }
10538
10539 /* Bytes of work remaining in each activity */
10540 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
10541
10542 bytes_rem[ZPOOL_WAIT_FREE] =
10543 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
10544
10545 config = zpool_get_config(zhp, NULL);
10546 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10547
10548 (void) nvlist_lookup_uint64_array(nvroot,
10549 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10550 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
10551 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
10552
10553 (void) nvlist_lookup_uint64_array(nvroot,
10554 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10555 if (prs != NULL && prs->prs_state == DSS_SCANNING)
10556 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
10557 prs->prs_copied;
10558
10559 (void) nvlist_lookup_uint64_array(nvroot,
10560 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
10561 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
10562 pss->pss_pass_scrub_pause == 0) {
10563 int64_t rem = pss->pss_to_examine - pss->pss_issued;
10564 if (pss->pss_func == POOL_SCAN_SCRUB)
10565 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
10566 else
10567 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
10568 } else if (check_rebuilding(nvroot, NULL)) {
10569 bytes_rem[ZPOOL_WAIT_RESILVER] =
10570 vdev_activity_top_remaining(nvroot);
10571 }
10572
10573 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
10574 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
10575 bytes_rem[ZPOOL_WAIT_TRIM] =
10576 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
10577
10578 /*
10579 * A replace finishes after resilvering finishes, so the amount of work
10580 * left for a replace is the same as for resilvering.
10581 *
10582 * It isn't quite correct to say that if we have any 'spare' or
10583 * 'replacing' vdevs and a resilver is happening, then a replace is in
10584 * progress, like we do here. When a hot spare is used, the faulted vdev
10585 * is not removed after the hot spare is resilvered, so parent 'spare'
10586 * vdev is not removed either. So we could have a 'spare' vdev, but be
10587 * resilvering for a different reason. However, we use it as a heuristic
10588 * because we don't have access to the DTLs, which could tell us whether
10589 * or not we have really finished resilvering a hot spare.
10590 */
10591 if (vdev_any_spare_replacing(nvroot))
10592 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
10593
10594 if (timestamp_fmt != NODATE)
10595 print_timestamp(timestamp_fmt);
10596
10597 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10598 char buf[64];
10599 if (!wd->wd_enabled[i])
10600 continue;
10601
10602 if (wd->wd_exact)
10603 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
10604 bytes_rem[i]);
10605 else
10606 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
10607
10608 if (wd->wd_scripted)
10609 (void) printf(i == 0 ? "%s" : "\t%s", buf);
10610 else
10611 (void) printf(" %*s", col_widths[i] - 1, buf);
10612 }
10613 (void) printf("\n");
10614 (void) fflush(stdout);
10615 }
10616
10617 static void *
10618 wait_status_thread(void *arg)
10619 {
10620 wait_data_t *wd = (wait_data_t *)arg;
10621 zpool_handle_t *zhp;
10622
10623 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
10624 return (void *)(1);
10625
10626 for (int row = 0; ; row++) {
10627 boolean_t missing;
10628 struct timespec timeout;
10629 int ret = 0;
10630 (void) clock_gettime(CLOCK_REALTIME, &timeout);
10631
10632 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
10633 zpool_props_refresh(zhp) != 0) {
10634 zpool_close(zhp);
10635 return (void *)(uintptr_t)(missing ? 0 : 1);
10636 }
10637
10638 print_wait_status_row(wd, zhp, row);
10639
10640 timeout.tv_sec += floor(wd->wd_interval);
10641 long nanos = timeout.tv_nsec +
10642 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
10643 if (nanos >= NANOSEC) {
10644 timeout.tv_sec++;
10645 timeout.tv_nsec = nanos - NANOSEC;
10646 } else {
10647 timeout.tv_nsec = nanos;
10648 }
10649 pthread_mutex_lock(&wd->wd_mutex);
10650 if (!wd->wd_should_exit)
10651 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
10652 &timeout);
10653 pthread_mutex_unlock(&wd->wd_mutex);
10654 if (ret == 0) {
10655 break; /* signaled by main thread */
10656 } else if (ret != ETIMEDOUT) {
10657 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
10658 "failed: %s\n"), strerror(ret));
10659 zpool_close(zhp);
10660 return (void *)(uintptr_t)(1);
10661 }
10662 }
10663
10664 zpool_close(zhp);
10665 return (void *)(0);
10666 }
10667
10668 int
10669 zpool_do_wait(int argc, char **argv)
10670 {
10671 boolean_t verbose = B_FALSE;
10672 int c;
10673 char *value;
10674 int i;
10675 unsigned long count;
10676 pthread_t status_thr;
10677 int error = 0;
10678 zpool_handle_t *zhp;
10679
10680 wait_data_t wd;
10681 wd.wd_scripted = B_FALSE;
10682 wd.wd_exact = B_FALSE;
10683 wd.wd_headers_once = B_FALSE;
10684 wd.wd_should_exit = B_FALSE;
10685
10686 pthread_mutex_init(&wd.wd_mutex, NULL);
10687 pthread_cond_init(&wd.wd_cv, NULL);
10688
10689 /* By default, wait for all types of activity. */
10690 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
10691 wd.wd_enabled[i] = B_TRUE;
10692
10693 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
10694 switch (c) {
10695 case 'H':
10696 wd.wd_scripted = B_TRUE;
10697 break;
10698 case 'n':
10699 wd.wd_headers_once = B_TRUE;
10700 break;
10701 case 'p':
10702 wd.wd_exact = B_TRUE;
10703 break;
10704 case 'T':
10705 get_timestamp_arg(*optarg);
10706 break;
10707 case 't':
10708 {
10709 static char *col_subopts[] = { "discard", "free",
10710 "initialize", "replace", "remove", "resilver",
10711 "scrub", "trim", NULL };
10712
10713 /* Reset activities array */
10714 bzero(&wd.wd_enabled, sizeof (wd.wd_enabled));
10715 while (*optarg != '\0') {
10716 int activity = getsubopt(&optarg, col_subopts,
10717 &value);
10718
10719 if (activity < 0) {
10720 (void) fprintf(stderr,
10721 gettext("invalid activity '%s'\n"),
10722 value);
10723 usage(B_FALSE);
10724 }
10725
10726 wd.wd_enabled[activity] = B_TRUE;
10727 }
10728 break;
10729 }
10730 case '?':
10731 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10732 optopt);
10733 usage(B_FALSE);
10734 }
10735 }
10736
10737 argc -= optind;
10738 argv += optind;
10739
10740 get_interval_count(&argc, argv, &wd.wd_interval, &count);
10741 if (count != 0) {
10742 /* This subcmd only accepts an interval, not a count */
10743 (void) fprintf(stderr, gettext("too many arguments\n"));
10744 usage(B_FALSE);
10745 }
10746
10747 if (wd.wd_interval != 0)
10748 verbose = B_TRUE;
10749
10750 if (argc < 1) {
10751 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
10752 usage(B_FALSE);
10753 }
10754 if (argc > 1) {
10755 (void) fprintf(stderr, gettext("too many arguments\n"));
10756 usage(B_FALSE);
10757 }
10758
10759 wd.wd_poolname = argv[0];
10760
10761 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
10762 return (1);
10763
10764 if (verbose) {
10765 /*
10766 * We use a separate thread for printing status updates because
10767 * the main thread will call lzc_wait(), which blocks as long
10768 * as an activity is in progress, which can be a long time.
10769 */
10770 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
10771 != 0) {
10772 (void) fprintf(stderr, gettext("failed to create status"
10773 "thread: %s\n"), strerror(errno));
10774 zpool_close(zhp);
10775 return (1);
10776 }
10777 }
10778
10779 /*
10780 * Loop over all activities that we are supposed to wait for until none
10781 * of them are in progress. Note that this means we can end up waiting
10782 * for more activities to complete than just those that were in progress
10783 * when we began waiting; if an activity we are interested in begins
10784 * while we are waiting for another activity, we will wait for both to
10785 * complete before exiting.
10786 */
10787 for (;;) {
10788 boolean_t missing = B_FALSE;
10789 boolean_t any_waited = B_FALSE;
10790
10791 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10792 boolean_t waited;
10793
10794 if (!wd.wd_enabled[i])
10795 continue;
10796
10797 error = zpool_wait_status(zhp, i, &missing, &waited);
10798 if (error != 0 || missing)
10799 break;
10800
10801 any_waited = (any_waited || waited);
10802 }
10803
10804 if (error != 0 || missing || !any_waited)
10805 break;
10806 }
10807
10808 zpool_close(zhp);
10809
10810 if (verbose) {
10811 uintptr_t status;
10812 pthread_mutex_lock(&wd.wd_mutex);
10813 wd.wd_should_exit = B_TRUE;
10814 pthread_cond_signal(&wd.wd_cv);
10815 pthread_mutex_unlock(&wd.wd_mutex);
10816 (void) pthread_join(status_thr, (void *)&status);
10817 if (status != 0)
10818 error = status;
10819 }
10820
10821 pthread_mutex_destroy(&wd.wd_mutex);
10822 pthread_cond_destroy(&wd.wd_cv);
10823 return (error);
10824 }
10825
10826 static int
10827 find_command_idx(char *command, int *idx)
10828 {
10829 int i;
10830
10831 for (i = 0; i < NCOMMAND; i++) {
10832 if (command_table[i].name == NULL)
10833 continue;
10834
10835 if (strcmp(command, command_table[i].name) == 0) {
10836 *idx = i;
10837 return (0);
10838 }
10839 }
10840 return (1);
10841 }
10842
10843 /*
10844 * Display version message
10845 */
10846 static int
10847 zpool_do_version(int argc, char **argv)
10848 {
10849 (void) argc, (void) argv;
10850
10851 if (zfs_version_print() == -1)
10852 return (1);
10853
10854 return (0);
10855 }
10856
10857 /*
10858 * Do zpool_load_compat() and print error message on failure
10859 */
10860 static zpool_compat_status_t
10861 zpool_do_load_compat(const char *compat, boolean_t *list)
10862 {
10863 char report[1024];
10864
10865 zpool_compat_status_t ret;
10866
10867 ret = zpool_load_compat(compat, list, report, 1024);
10868 switch (ret) {
10869
10870 case ZPOOL_COMPATIBILITY_OK:
10871 break;
10872
10873 case ZPOOL_COMPATIBILITY_NOFILES:
10874 case ZPOOL_COMPATIBILITY_BADFILE:
10875 case ZPOOL_COMPATIBILITY_BADTOKEN:
10876 (void) fprintf(stderr, "Error: %s\n", report);
10877 break;
10878
10879 case ZPOOL_COMPATIBILITY_WARNTOKEN:
10880 (void) fprintf(stderr, "Warning: %s\n", report);
10881 ret = ZPOOL_COMPATIBILITY_OK;
10882 break;
10883 }
10884 return (ret);
10885 }
10886
10887 int
10888 main(int argc, char **argv)
10889 {
10890 int ret = 0;
10891 int i = 0;
10892 char *cmdname;
10893 char **newargv;
10894
10895 (void) setlocale(LC_ALL, "");
10896 (void) setlocale(LC_NUMERIC, "C");
10897 (void) textdomain(TEXT_DOMAIN);
10898 srand(time(NULL));
10899
10900 opterr = 0;
10901
10902 /*
10903 * Make sure the user has specified some command.
10904 */
10905 if (argc < 2) {
10906 (void) fprintf(stderr, gettext("missing command\n"));
10907 usage(B_FALSE);
10908 }
10909
10910 cmdname = argv[1];
10911
10912 /*
10913 * Special case '-?'
10914 */
10915 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
10916 usage(B_TRUE);
10917
10918 /*
10919 * Special case '-V|--version'
10920 */
10921 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
10922 return (zpool_do_version(argc, argv));
10923
10924 if ((g_zfs = libzfs_init()) == NULL) {
10925 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
10926 return (1);
10927 }
10928
10929 libzfs_print_on_error(g_zfs, B_TRUE);
10930
10931 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
10932
10933 /*
10934 * Many commands modify input strings for string parsing reasons.
10935 * We create a copy to protect the original argv.
10936 */
10937 newargv = malloc((argc + 1) * sizeof (newargv[0]));
10938 for (i = 0; i < argc; i++)
10939 newargv[i] = strdup(argv[i]);
10940 newargv[argc] = NULL;
10941
10942 /*
10943 * Run the appropriate command.
10944 */
10945 if (find_command_idx(cmdname, &i) == 0) {
10946 current_command = &command_table[i];
10947 ret = command_table[i].func(argc - 1, newargv + 1);
10948 } else if (strchr(cmdname, '=')) {
10949 verify(find_command_idx("set", &i) == 0);
10950 current_command = &command_table[i];
10951 ret = command_table[i].func(argc, newargv);
10952 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
10953 /*
10954 * 'freeze' is a vile debugging abomination, so we treat
10955 * it as such.
10956 */
10957 zfs_cmd_t zc = {"\0"};
10958
10959 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
10960 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
10961 if (ret != 0) {
10962 (void) fprintf(stderr,
10963 gettext("failed to freeze pool: %d\n"), errno);
10964 ret = 1;
10965 }
10966
10967 log_history = 0;
10968 } else {
10969 (void) fprintf(stderr, gettext("unrecognized "
10970 "command '%s'\n"), cmdname);
10971 usage(B_FALSE);
10972 ret = 1;
10973 }
10974
10975 for (i = 0; i < argc; i++)
10976 free(newargv[i]);
10977 free(newargv);
10978
10979 if (ret == 0 && log_history)
10980 (void) zpool_log_history(g_zfs, history_str);
10981
10982 libzfs_fini(g_zfs);
10983
10984 /*
10985 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
10986 * for the purposes of running ::findleaks.
10987 */
10988 if (getenv("ZFS_ABORT") != NULL) {
10989 (void) printf("dumping core by request\n");
10990 abort();
10991 }
10992
10993 return (ret);
10994 }