]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_main.c
Fix locale-specific time
[mirror_zfs.git] / cmd / zpool / zpool_main.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <time.h>
54 #include <unistd.h>
55 #include <pwd.h>
56 #include <zone.h>
57 #include <sys/wait.h>
58 #include <zfs_prop.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/stat.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/fs/zfs.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/zfs_ioctl.h>
66 #include <sys/mount.h>
67 #include <sys/sysmacros.h>
68
69 #include <math.h>
70
71 #include <libzfs.h>
72 #include <libzutil.h>
73
74 #include "zpool_util.h"
75 #include "zfs_comutil.h"
76 #include "zfeature_common.h"
77
78 #include "statcommon.h"
79
80 libzfs_handle_t *g_zfs;
81
82 static int zpool_do_create(int, char **);
83 static int zpool_do_destroy(int, char **);
84
85 static int zpool_do_add(int, char **);
86 static int zpool_do_remove(int, char **);
87 static int zpool_do_labelclear(int, char **);
88
89 static int zpool_do_checkpoint(int, char **);
90
91 static int zpool_do_list(int, char **);
92 static int zpool_do_iostat(int, char **);
93 static int zpool_do_status(int, char **);
94
95 static int zpool_do_online(int, char **);
96 static int zpool_do_offline(int, char **);
97 static int zpool_do_clear(int, char **);
98 static int zpool_do_reopen(int, char **);
99
100 static int zpool_do_reguid(int, char **);
101
102 static int zpool_do_attach(int, char **);
103 static int zpool_do_detach(int, char **);
104 static int zpool_do_replace(int, char **);
105 static int zpool_do_split(int, char **);
106
107 static int zpool_do_initialize(int, char **);
108 static int zpool_do_scrub(int, char **);
109 static int zpool_do_resilver(int, char **);
110 static int zpool_do_trim(int, char **);
111
112 static int zpool_do_import(int, char **);
113 static int zpool_do_export(int, char **);
114
115 static int zpool_do_upgrade(int, char **);
116
117 static int zpool_do_history(int, char **);
118 static int zpool_do_events(int, char **);
119
120 static int zpool_do_get(int, char **);
121 static int zpool_do_set(int, char **);
122
123 static int zpool_do_sync(int, char **);
124
125 static int zpool_do_version(int, char **);
126
127 static int zpool_do_wait(int, char **);
128
129 static int zpool_do_help(int argc, char **argv);
130
131 static zpool_compat_status_t zpool_do_load_compat(
132 const char *, boolean_t *);
133
134 enum zpool_options {
135 ZPOOL_OPTION_POWER = 1024,
136 ZPOOL_OPTION_ALLOW_INUSE,
137 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
138 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH
139 };
140
141 /*
142 * These libumem hooks provide a reasonable set of defaults for the allocator's
143 * debugging facilities.
144 */
145
146 #ifdef DEBUG
147 const char *
148 _umem_debug_init(void)
149 {
150 return ("default,verbose"); /* $UMEM_DEBUG setting */
151 }
152
153 const char *
154 _umem_logging_init(void)
155 {
156 return ("fail,contents"); /* $UMEM_LOGGING setting */
157 }
158 #endif
159
160 typedef enum {
161 HELP_ADD,
162 HELP_ATTACH,
163 HELP_CLEAR,
164 HELP_CREATE,
165 HELP_CHECKPOINT,
166 HELP_DESTROY,
167 HELP_DETACH,
168 HELP_EXPORT,
169 HELP_HISTORY,
170 HELP_IMPORT,
171 HELP_IOSTAT,
172 HELP_LABELCLEAR,
173 HELP_LIST,
174 HELP_OFFLINE,
175 HELP_ONLINE,
176 HELP_REPLACE,
177 HELP_REMOVE,
178 HELP_INITIALIZE,
179 HELP_SCRUB,
180 HELP_RESILVER,
181 HELP_TRIM,
182 HELP_STATUS,
183 HELP_UPGRADE,
184 HELP_EVENTS,
185 HELP_GET,
186 HELP_SET,
187 HELP_SPLIT,
188 HELP_SYNC,
189 HELP_REGUID,
190 HELP_REOPEN,
191 HELP_VERSION,
192 HELP_WAIT
193 } zpool_help_t;
194
195
196 /*
197 * Flags for stats to display with "zpool iostats"
198 */
199 enum iostat_type {
200 IOS_DEFAULT = 0,
201 IOS_LATENCY = 1,
202 IOS_QUEUES = 2,
203 IOS_L_HISTO = 3,
204 IOS_RQ_HISTO = 4,
205 IOS_COUNT, /* always last element */
206 };
207
208 /* iostat_type entries as bitmasks */
209 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
210 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
211 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
212 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
213 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
214
215 /* Mask of all the histo bits */
216 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
217
218 /*
219 * Lookup table for iostat flags to nvlist names. Basically a list
220 * of all the nvlists a flag requires. Also specifies the order in
221 * which data gets printed in zpool iostat.
222 */
223 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
224 [IOS_L_HISTO] = {
225 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
226 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
227 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
228 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
229 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
230 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
231 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
232 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
233 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
234 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
235 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
236 NULL},
237 [IOS_LATENCY] = {
238 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
244 NULL},
245 [IOS_QUEUES] = {
246 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
247 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
248 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
249 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
250 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
251 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
252 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
253 NULL},
254 [IOS_RQ_HISTO] = {
255 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
256 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
257 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
258 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
259 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
260 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
261 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
262 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
263 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
264 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
265 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
266 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
267 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
268 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
269 NULL},
270 };
271
272
273 /*
274 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
275 * Right now, only one histo bit is ever set at one time, so we can
276 * just do a highbit64(a)
277 */
278 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
279
280 typedef struct zpool_command {
281 const char *name;
282 int (*func)(int, char **);
283 zpool_help_t usage;
284 } zpool_command_t;
285
286 /*
287 * Master command table. Each ZFS command has a name, associated function, and
288 * usage message. The usage messages need to be internationalized, so we have
289 * to have a function to return the usage message based on a command index.
290 *
291 * These commands are organized according to how they are displayed in the usage
292 * message. An empty command (one with a NULL name) indicates an empty line in
293 * the generic usage message.
294 */
295 static zpool_command_t command_table[] = {
296 { "version", zpool_do_version, HELP_VERSION },
297 { NULL },
298 { "create", zpool_do_create, HELP_CREATE },
299 { "destroy", zpool_do_destroy, HELP_DESTROY },
300 { NULL },
301 { "add", zpool_do_add, HELP_ADD },
302 { "remove", zpool_do_remove, HELP_REMOVE },
303 { NULL },
304 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
305 { NULL },
306 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
307 { NULL },
308 { "list", zpool_do_list, HELP_LIST },
309 { "iostat", zpool_do_iostat, HELP_IOSTAT },
310 { "status", zpool_do_status, HELP_STATUS },
311 { NULL },
312 { "online", zpool_do_online, HELP_ONLINE },
313 { "offline", zpool_do_offline, HELP_OFFLINE },
314 { "clear", zpool_do_clear, HELP_CLEAR },
315 { "reopen", zpool_do_reopen, HELP_REOPEN },
316 { NULL },
317 { "attach", zpool_do_attach, HELP_ATTACH },
318 { "detach", zpool_do_detach, HELP_DETACH },
319 { "replace", zpool_do_replace, HELP_REPLACE },
320 { "split", zpool_do_split, HELP_SPLIT },
321 { NULL },
322 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
323 { "resilver", zpool_do_resilver, HELP_RESILVER },
324 { "scrub", zpool_do_scrub, HELP_SCRUB },
325 { "trim", zpool_do_trim, HELP_TRIM },
326 { NULL },
327 { "import", zpool_do_import, HELP_IMPORT },
328 { "export", zpool_do_export, HELP_EXPORT },
329 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
330 { "reguid", zpool_do_reguid, HELP_REGUID },
331 { NULL },
332 { "history", zpool_do_history, HELP_HISTORY },
333 { "events", zpool_do_events, HELP_EVENTS },
334 { NULL },
335 { "get", zpool_do_get, HELP_GET },
336 { "set", zpool_do_set, HELP_SET },
337 { "sync", zpool_do_sync, HELP_SYNC },
338 { NULL },
339 { "wait", zpool_do_wait, HELP_WAIT },
340 };
341
342 #define NCOMMAND (ARRAY_SIZE(command_table))
343
344 #define VDEV_ALLOC_CLASS_LOGS "logs"
345
346 static zpool_command_t *current_command;
347 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
348 static char history_str[HIS_MAX_RECORD_LEN];
349 static boolean_t log_history = B_TRUE;
350 static uint_t timestamp_fmt = NODATE;
351
352 static const char *
353 get_usage(zpool_help_t idx)
354 {
355 switch (idx) {
356 case HELP_ADD:
357 return (gettext("\tadd [-afgLnP] [-o property=value] "
358 "<pool> <vdev> ...\n"));
359 case HELP_ATTACH:
360 return (gettext("\tattach [-fsw] [-o property=value] "
361 "<pool> <device> <new-device>\n"));
362 case HELP_CLEAR:
363 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
364 case HELP_CREATE:
365 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
366 "\t [-O file-system-property=value] ... \n"
367 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
368 case HELP_CHECKPOINT:
369 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
370 case HELP_DESTROY:
371 return (gettext("\tdestroy [-f] <pool>\n"));
372 case HELP_DETACH:
373 return (gettext("\tdetach <pool> <device>\n"));
374 case HELP_EXPORT:
375 return (gettext("\texport [-af] <pool> ...\n"));
376 case HELP_HISTORY:
377 return (gettext("\thistory [-il] [<pool>] ...\n"));
378 case HELP_IMPORT:
379 return (gettext("\timport [-d dir] [-D]\n"
380 "\timport [-o mntopts] [-o property=value] ... \n"
381 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
382 "[-R root] [-F [-n]] -a\n"
383 "\timport [-o mntopts] [-o property=value] ... \n"
384 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
385 "[-R root] [-F [-n]]\n"
386 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
387 case HELP_IOSTAT:
388 return (gettext("\tiostat [[[-c [script1,script2,...]"
389 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
390 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
391 " [[-n] interval [count]]\n"));
392 case HELP_LABELCLEAR:
393 return (gettext("\tlabelclear [-f] <vdev>\n"));
394 case HELP_LIST:
395 return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
396 "[-T d|u] [pool] ... \n"
397 "\t [interval [count]]\n"));
398 case HELP_OFFLINE:
399 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
400 "<device> ...\n"));
401 case HELP_ONLINE:
402 return (gettext("\tonline [--power][-e] <pool> <device> "
403 "...\n"));
404 case HELP_REPLACE:
405 return (gettext("\treplace [-fsw] [-o property=value] "
406 "<pool> <device> [new-device]\n"));
407 case HELP_REMOVE:
408 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
409 case HELP_REOPEN:
410 return (gettext("\treopen [-n] <pool>\n"));
411 case HELP_INITIALIZE:
412 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
413 "[<device> ...]\n"));
414 case HELP_SCRUB:
415 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
416 case HELP_RESILVER:
417 return (gettext("\tresilver <pool> ...\n"));
418 case HELP_TRIM:
419 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
420 "[<device> ...]\n"));
421 case HELP_STATUS:
422 return (gettext("\tstatus [--power] [-c [script1,script2,...]] "
423 "[-DegiLpPstvx] [-T d|u] [pool] ...\n"
424 "\t [interval [count]]\n"));
425 case HELP_UPGRADE:
426 return (gettext("\tupgrade\n"
427 "\tupgrade -v\n"
428 "\tupgrade [-V version] <-a | pool ...>\n"));
429 case HELP_EVENTS:
430 return (gettext("\tevents [-vHf [pool] | -c]\n"));
431 case HELP_GET:
432 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
433 "<\"all\" | property[,...]> <pool> ...\n"));
434 case HELP_SET:
435 return (gettext("\tset <property=value> <pool>\n"
436 "\tset <vdev_property=value> <pool> <vdev>\n"));
437 case HELP_SPLIT:
438 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
439 "\t [-o property=value] <pool> <newpool> "
440 "[<device> ...]\n"));
441 case HELP_REGUID:
442 return (gettext("\treguid <pool>\n"));
443 case HELP_SYNC:
444 return (gettext("\tsync [pool] ...\n"));
445 case HELP_VERSION:
446 return (gettext("\tversion\n"));
447 case HELP_WAIT:
448 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
449 "<pool> [interval]\n"));
450 default:
451 __builtin_unreachable();
452 }
453 }
454
455 static void
456 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
457 {
458 uint_t children = 0;
459 nvlist_t **child;
460 uint_t i;
461
462 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
463 &child, &children);
464
465 if (children == 0) {
466 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
467 VDEV_NAME_PATH);
468
469 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
470 strcmp(path, VDEV_TYPE_HOLE) != 0)
471 fnvlist_add_boolean(res, path);
472
473 free(path);
474 return;
475 }
476
477 for (i = 0; i < children; i++) {
478 zpool_collect_leaves(zhp, child[i], res);
479 }
480 }
481
482 /*
483 * Callback routine that will print out a pool property value.
484 */
485 static int
486 print_pool_prop_cb(int prop, void *cb)
487 {
488 FILE *fp = cb;
489
490 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
491
492 if (zpool_prop_readonly(prop))
493 (void) fprintf(fp, " NO ");
494 else
495 (void) fprintf(fp, " YES ");
496
497 if (zpool_prop_values(prop) == NULL)
498 (void) fprintf(fp, "-\n");
499 else
500 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
501
502 return (ZPROP_CONT);
503 }
504
505 /*
506 * Callback routine that will print out a vdev property value.
507 */
508 static int
509 print_vdev_prop_cb(int prop, void *cb)
510 {
511 FILE *fp = cb;
512
513 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
514
515 if (vdev_prop_readonly(prop))
516 (void) fprintf(fp, " NO ");
517 else
518 (void) fprintf(fp, " YES ");
519
520 if (vdev_prop_values(prop) == NULL)
521 (void) fprintf(fp, "-\n");
522 else
523 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
524
525 return (ZPROP_CONT);
526 }
527
528 /*
529 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
530 * '/dev/disk/by-vdev/L5'.
531 */
532 static const char *
533 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
534 {
535 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
536 if (vdev_nv == NULL) {
537 return (NULL);
538 }
539 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
540 }
541
542 static int
543 zpool_power_on(zpool_handle_t *zhp, char *vdev)
544 {
545 return (zpool_power(zhp, vdev, B_TRUE));
546 }
547
548 static int
549 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
550 {
551 int rc;
552
553 rc = zpool_power_on(zhp, vdev);
554 if (rc != 0)
555 return (rc);
556
557 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
558
559 return (0);
560 }
561
562 static int
563 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
564 {
565 nvlist_t *nv;
566 const char *path = NULL;
567 int rc;
568
569 /* Power up all the devices first */
570 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
571 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
572 if (path != NULL) {
573 rc = zpool_power_on(zhp, (char *)path);
574 if (rc != 0) {
575 return (rc);
576 }
577 }
578 }
579
580 /*
581 * Wait for their devices to show up. Since we powered them on
582 * at roughly the same time, they should all come online around
583 * the same time.
584 */
585 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
586 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
587 zpool_disk_wait(path);
588 }
589
590 return (0);
591 }
592
593 static int
594 zpool_power_off(zpool_handle_t *zhp, char *vdev)
595 {
596 return (zpool_power(zhp, vdev, B_FALSE));
597 }
598
599 /*
600 * Display usage message. If we're inside a command, display only the usage for
601 * that command. Otherwise, iterate over the entire command table and display
602 * a complete usage message.
603 */
604 static __attribute__((noreturn)) void
605 usage(boolean_t requested)
606 {
607 FILE *fp = requested ? stdout : stderr;
608
609 if (current_command == NULL) {
610 int i;
611
612 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
613 (void) fprintf(fp,
614 gettext("where 'command' is one of the following:\n\n"));
615
616 for (i = 0; i < NCOMMAND; i++) {
617 if (command_table[i].name == NULL)
618 (void) fprintf(fp, "\n");
619 else
620 (void) fprintf(fp, "%s",
621 get_usage(command_table[i].usage));
622 }
623
624 (void) fprintf(fp,
625 gettext("\nFor further help on a command or topic, "
626 "run: %s\n"), "zpool help [<topic>]");
627 } else {
628 (void) fprintf(fp, gettext("usage:\n"));
629 (void) fprintf(fp, "%s", get_usage(current_command->usage));
630 }
631
632 if (current_command != NULL &&
633 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
634 ((strcmp(current_command->name, "set") == 0) ||
635 (strcmp(current_command->name, "get") == 0) ||
636 (strcmp(current_command->name, "list") == 0))) {
637
638 (void) fprintf(fp, "%s",
639 gettext("\nthe following properties are supported:\n"));
640
641 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
642 "PROPERTY", "EDIT", "VALUES");
643
644 /* Iterate over all properties */
645 if (current_prop_type == ZFS_TYPE_POOL) {
646 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
647 B_TRUE, current_prop_type);
648
649 (void) fprintf(fp, "\t%-19s ", "feature@...");
650 (void) fprintf(fp, "YES "
651 "disabled | enabled | active\n");
652
653 (void) fprintf(fp, gettext("\nThe feature@ properties "
654 "must be appended with a feature name.\n"
655 "See zpool-features(7).\n"));
656 } else if (current_prop_type == ZFS_TYPE_VDEV) {
657 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
658 B_TRUE, current_prop_type);
659 }
660 }
661
662 /*
663 * See comments at end of main().
664 */
665 if (getenv("ZFS_ABORT") != NULL) {
666 (void) printf("dumping core by request\n");
667 abort();
668 }
669
670 exit(requested ? 0 : 2);
671 }
672
673 /*
674 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
675 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
676 * if none specified.
677 *
678 * -c Cancel. Ends active initializing.
679 * -s Suspend. Initializing can then be restarted with no flags.
680 * -u Uninitialize. Clears initialization state.
681 * -w Wait. Blocks until initializing has completed.
682 */
683 int
684 zpool_do_initialize(int argc, char **argv)
685 {
686 int c;
687 char *poolname;
688 zpool_handle_t *zhp;
689 nvlist_t *vdevs;
690 int err = 0;
691 boolean_t wait = B_FALSE;
692
693 struct option long_options[] = {
694 {"cancel", no_argument, NULL, 'c'},
695 {"suspend", no_argument, NULL, 's'},
696 {"uninit", no_argument, NULL, 'u'},
697 {"wait", no_argument, NULL, 'w'},
698 {0, 0, 0, 0}
699 };
700
701 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
702 while ((c = getopt_long(argc, argv, "csuw", long_options,
703 NULL)) != -1) {
704 switch (c) {
705 case 'c':
706 if (cmd_type != POOL_INITIALIZE_START &&
707 cmd_type != POOL_INITIALIZE_CANCEL) {
708 (void) fprintf(stderr, gettext("-c cannot be "
709 "combined with other options\n"));
710 usage(B_FALSE);
711 }
712 cmd_type = POOL_INITIALIZE_CANCEL;
713 break;
714 case 's':
715 if (cmd_type != POOL_INITIALIZE_START &&
716 cmd_type != POOL_INITIALIZE_SUSPEND) {
717 (void) fprintf(stderr, gettext("-s cannot be "
718 "combined with other options\n"));
719 usage(B_FALSE);
720 }
721 cmd_type = POOL_INITIALIZE_SUSPEND;
722 break;
723 case 'u':
724 if (cmd_type != POOL_INITIALIZE_START &&
725 cmd_type != POOL_INITIALIZE_UNINIT) {
726 (void) fprintf(stderr, gettext("-u cannot be "
727 "combined with other options\n"));
728 usage(B_FALSE);
729 }
730 cmd_type = POOL_INITIALIZE_UNINIT;
731 break;
732 case 'w':
733 wait = B_TRUE;
734 break;
735 case '?':
736 if (optopt != 0) {
737 (void) fprintf(stderr,
738 gettext("invalid option '%c'\n"), optopt);
739 } else {
740 (void) fprintf(stderr,
741 gettext("invalid option '%s'\n"),
742 argv[optind - 1]);
743 }
744 usage(B_FALSE);
745 }
746 }
747
748 argc -= optind;
749 argv += optind;
750
751 if (argc < 1) {
752 (void) fprintf(stderr, gettext("missing pool name argument\n"));
753 usage(B_FALSE);
754 return (-1);
755 }
756
757 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
758 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
759 "or -u\n"));
760 usage(B_FALSE);
761 }
762
763 poolname = argv[0];
764 zhp = zpool_open(g_zfs, poolname);
765 if (zhp == NULL)
766 return (-1);
767
768 vdevs = fnvlist_alloc();
769 if (argc == 1) {
770 /* no individual leaf vdevs specified, so add them all */
771 nvlist_t *config = zpool_get_config(zhp, NULL);
772 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
773 ZPOOL_CONFIG_VDEV_TREE);
774 zpool_collect_leaves(zhp, nvroot, vdevs);
775 } else {
776 for (int i = 1; i < argc; i++) {
777 fnvlist_add_boolean(vdevs, argv[i]);
778 }
779 }
780
781 if (wait)
782 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
783 else
784 err = zpool_initialize(zhp, cmd_type, vdevs);
785
786 fnvlist_free(vdevs);
787 zpool_close(zhp);
788
789 return (err);
790 }
791
792 /*
793 * print a pool vdev config for dry runs
794 */
795 static void
796 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
797 const char *match, int name_flags)
798 {
799 nvlist_t **child;
800 uint_t c, children;
801 char *vname;
802 boolean_t printed = B_FALSE;
803
804 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
805 &child, &children) != 0) {
806 if (name != NULL)
807 (void) printf("\t%*s%s\n", indent, "", name);
808 return;
809 }
810
811 for (c = 0; c < children; c++) {
812 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
813 const char *class = "";
814
815 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
816 &is_hole);
817
818 if (is_hole == B_TRUE) {
819 continue;
820 }
821
822 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
823 &is_log);
824 if (is_log)
825 class = VDEV_ALLOC_BIAS_LOG;
826 (void) nvlist_lookup_string(child[c],
827 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
828 if (strcmp(match, class) != 0)
829 continue;
830
831 if (!printed && name != NULL) {
832 (void) printf("\t%*s%s\n", indent, "", name);
833 printed = B_TRUE;
834 }
835 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
836 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
837 name_flags);
838 free(vname);
839 }
840 }
841
842 /*
843 * Print the list of l2cache devices for dry runs.
844 */
845 static void
846 print_cache_list(nvlist_t *nv, int indent)
847 {
848 nvlist_t **child;
849 uint_t c, children;
850
851 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
852 &child, &children) == 0 && children > 0) {
853 (void) printf("\t%*s%s\n", indent, "", "cache");
854 } else {
855 return;
856 }
857 for (c = 0; c < children; c++) {
858 char *vname;
859
860 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
861 (void) printf("\t%*s%s\n", indent + 2, "", vname);
862 free(vname);
863 }
864 }
865
866 /*
867 * Print the list of spares for dry runs.
868 */
869 static void
870 print_spare_list(nvlist_t *nv, int indent)
871 {
872 nvlist_t **child;
873 uint_t c, children;
874
875 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
876 &child, &children) == 0 && children > 0) {
877 (void) printf("\t%*s%s\n", indent, "", "spares");
878 } else {
879 return;
880 }
881 for (c = 0; c < children; c++) {
882 char *vname;
883
884 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
885 (void) printf("\t%*s%s\n", indent + 2, "", vname);
886 free(vname);
887 }
888 }
889
890 static boolean_t
891 prop_list_contains_feature(nvlist_t *proplist)
892 {
893 nvpair_t *nvp;
894 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
895 nvp = nvlist_next_nvpair(proplist, nvp)) {
896 if (zpool_prop_feature(nvpair_name(nvp)))
897 return (B_TRUE);
898 }
899 return (B_FALSE);
900 }
901
902 /*
903 * Add a property pair (name, string-value) into a property nvlist.
904 */
905 static int
906 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
907 boolean_t poolprop)
908 {
909 zpool_prop_t prop = ZPOOL_PROP_INVAL;
910 nvlist_t *proplist;
911 const char *normnm;
912 const char *strval;
913
914 if (*props == NULL &&
915 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
916 (void) fprintf(stderr,
917 gettext("internal error: out of memory\n"));
918 return (1);
919 }
920
921 proplist = *props;
922
923 if (poolprop) {
924 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
925 const char *cname =
926 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
927
928 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
929 (!zpool_prop_feature(propname) &&
930 !zpool_prop_vdev(propname))) {
931 (void) fprintf(stderr, gettext("property '%s' is "
932 "not a valid pool or vdev property\n"), propname);
933 return (2);
934 }
935
936 /*
937 * feature@ properties and version should not be specified
938 * at the same time.
939 */
940 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
941 nvlist_exists(proplist, vname)) ||
942 (prop == ZPOOL_PROP_VERSION &&
943 prop_list_contains_feature(proplist))) {
944 (void) fprintf(stderr, gettext("'feature@' and "
945 "'version' properties cannot be specified "
946 "together\n"));
947 return (2);
948 }
949
950 /*
951 * if version is specified, only "legacy" compatibility
952 * may be requested
953 */
954 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
955 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
956 nvlist_exists(proplist, vname)) ||
957 (prop == ZPOOL_PROP_VERSION &&
958 nvlist_exists(proplist, cname) &&
959 strcmp(fnvlist_lookup_string(proplist, cname),
960 ZPOOL_COMPAT_LEGACY) != 0)) {
961 (void) fprintf(stderr, gettext("when 'version' is "
962 "specified, the 'compatibility' feature may only "
963 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
964 return (2);
965 }
966
967 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
968 normnm = propname;
969 else
970 normnm = zpool_prop_to_name(prop);
971 } else {
972 zfs_prop_t fsprop = zfs_name_to_prop(propname);
973
974 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
975 B_FALSE)) {
976 normnm = zfs_prop_to_name(fsprop);
977 } else if (zfs_prop_user(propname) ||
978 zfs_prop_userquota(propname)) {
979 normnm = propname;
980 } else {
981 (void) fprintf(stderr, gettext("property '%s' is "
982 "not a valid filesystem property\n"), propname);
983 return (2);
984 }
985 }
986
987 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
988 prop != ZPOOL_PROP_CACHEFILE) {
989 (void) fprintf(stderr, gettext("property '%s' "
990 "specified multiple times\n"), propname);
991 return (2);
992 }
993
994 if (nvlist_add_string(proplist, normnm, propval) != 0) {
995 (void) fprintf(stderr, gettext("internal "
996 "error: out of memory\n"));
997 return (1);
998 }
999
1000 return (0);
1001 }
1002
1003 /*
1004 * Set a default property pair (name, string-value) in a property nvlist
1005 */
1006 static int
1007 add_prop_list_default(const char *propname, const char *propval,
1008 nvlist_t **props)
1009 {
1010 const char *pval;
1011
1012 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1013 return (0);
1014
1015 return (add_prop_list(propname, propval, props, B_TRUE));
1016 }
1017
1018 /*
1019 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1020 *
1021 * -a Disable the ashift validation checks
1022 * -f Force addition of devices, even if they appear in use
1023 * -g Display guid for individual vdev name.
1024 * -L Follow links when resolving vdev path name.
1025 * -n Do not add the devices, but display the resulting layout if
1026 * they were to be added.
1027 * -o Set property=value.
1028 * -P Display full path for vdev name.
1029 *
1030 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1031 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1032 * libzfs.
1033 */
1034 int
1035 zpool_do_add(int argc, char **argv)
1036 {
1037 boolean_t check_replication = B_TRUE;
1038 boolean_t check_inuse = B_TRUE;
1039 boolean_t dryrun = B_FALSE;
1040 boolean_t check_ashift = B_TRUE;
1041 boolean_t force = B_FALSE;
1042 int name_flags = 0;
1043 int c;
1044 nvlist_t *nvroot;
1045 char *poolname;
1046 int ret;
1047 zpool_handle_t *zhp;
1048 nvlist_t *config;
1049 nvlist_t *props = NULL;
1050 char *propval;
1051
1052 struct option long_options[] = {
1053 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1054 {"allow-replication-mismatch", no_argument, NULL,
1055 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1056 {"allow-ashift-mismatch", no_argument, NULL,
1057 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1058 {0, 0, 0, 0}
1059 };
1060
1061 /* check options */
1062 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1063 != -1) {
1064 switch (c) {
1065 case 'f':
1066 force = B_TRUE;
1067 break;
1068 case 'g':
1069 name_flags |= VDEV_NAME_GUID;
1070 break;
1071 case 'L':
1072 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1073 break;
1074 case 'n':
1075 dryrun = B_TRUE;
1076 break;
1077 case 'o':
1078 if ((propval = strchr(optarg, '=')) == NULL) {
1079 (void) fprintf(stderr, gettext("missing "
1080 "'=' for -o option\n"));
1081 usage(B_FALSE);
1082 }
1083 *propval = '\0';
1084 propval++;
1085
1086 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1087 (add_prop_list(optarg, propval, &props, B_TRUE)))
1088 usage(B_FALSE);
1089 break;
1090 case 'P':
1091 name_flags |= VDEV_NAME_PATH;
1092 break;
1093 case ZPOOL_OPTION_ALLOW_INUSE:
1094 check_inuse = B_FALSE;
1095 break;
1096 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1097 check_replication = B_FALSE;
1098 break;
1099 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1100 check_ashift = B_FALSE;
1101 break;
1102 case '?':
1103 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1104 optopt);
1105 usage(B_FALSE);
1106 }
1107 }
1108
1109 argc -= optind;
1110 argv += optind;
1111
1112 /* get pool name and check number of arguments */
1113 if (argc < 1) {
1114 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1115 usage(B_FALSE);
1116 }
1117 if (argc < 2) {
1118 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1119 usage(B_FALSE);
1120 }
1121
1122 if (force) {
1123 if (!check_inuse || !check_replication || !check_ashift) {
1124 (void) fprintf(stderr, gettext("'-f' option is not "
1125 "allowed with '--allow-replication-mismatch', "
1126 "'--allow-ashift-mismatch', or "
1127 "'--allow-in-use'\n"));
1128 usage(B_FALSE);
1129 }
1130 check_inuse = B_FALSE;
1131 check_replication = B_FALSE;
1132 check_ashift = B_FALSE;
1133 }
1134
1135 poolname = argv[0];
1136
1137 argc--;
1138 argv++;
1139
1140 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1141 return (1);
1142
1143 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1144 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1145 poolname);
1146 zpool_close(zhp);
1147 return (1);
1148 }
1149
1150 /* unless manually specified use "ashift" pool property (if set) */
1151 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1152 int intval;
1153 zprop_source_t src;
1154 char strval[ZPOOL_MAXPROPLEN];
1155
1156 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1157 if (src != ZPROP_SRC_DEFAULT) {
1158 (void) sprintf(strval, "%" PRId32, intval);
1159 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1160 &props, B_TRUE) == 0);
1161 }
1162 }
1163
1164 /* pass off to make_root_vdev for processing */
1165 nvroot = make_root_vdev(zhp, props, !check_inuse,
1166 check_replication, B_FALSE, dryrun, argc, argv);
1167 if (nvroot == NULL) {
1168 zpool_close(zhp);
1169 return (1);
1170 }
1171
1172 if (dryrun) {
1173 nvlist_t *poolnvroot;
1174 nvlist_t **l2child, **sparechild;
1175 uint_t l2children, sparechildren, c;
1176 char *vname;
1177 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1178
1179 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1180 &poolnvroot) == 0);
1181
1182 (void) printf(gettext("would update '%s' to the following "
1183 "configuration:\n\n"), zpool_get_name(zhp));
1184
1185 /* print original main pool and new tree */
1186 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1187 name_flags | VDEV_NAME_TYPE_ID);
1188 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1189
1190 /* print other classes: 'dedup', 'special', and 'log' */
1191 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1192 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1193 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1194 print_vdev_tree(zhp, NULL, nvroot, 0,
1195 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1196 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1197 print_vdev_tree(zhp, "dedup", nvroot, 0,
1198 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1199 }
1200
1201 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1202 print_vdev_tree(zhp, "special", poolnvroot, 0,
1203 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1204 print_vdev_tree(zhp, NULL, nvroot, 0,
1205 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1206 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1207 print_vdev_tree(zhp, "special", nvroot, 0,
1208 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1209 }
1210
1211 if (num_logs(poolnvroot) > 0) {
1212 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1213 VDEV_ALLOC_BIAS_LOG, name_flags);
1214 print_vdev_tree(zhp, NULL, nvroot, 0,
1215 VDEV_ALLOC_BIAS_LOG, name_flags);
1216 } else if (num_logs(nvroot) > 0) {
1217 print_vdev_tree(zhp, "logs", nvroot, 0,
1218 VDEV_ALLOC_BIAS_LOG, name_flags);
1219 }
1220
1221 /* Do the same for the caches */
1222 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1223 &l2child, &l2children) == 0 && l2children) {
1224 hadcache = B_TRUE;
1225 (void) printf(gettext("\tcache\n"));
1226 for (c = 0; c < l2children; c++) {
1227 vname = zpool_vdev_name(g_zfs, NULL,
1228 l2child[c], name_flags);
1229 (void) printf("\t %s\n", vname);
1230 free(vname);
1231 }
1232 }
1233 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1234 &l2child, &l2children) == 0 && l2children) {
1235 if (!hadcache)
1236 (void) printf(gettext("\tcache\n"));
1237 for (c = 0; c < l2children; c++) {
1238 vname = zpool_vdev_name(g_zfs, NULL,
1239 l2child[c], name_flags);
1240 (void) printf("\t %s\n", vname);
1241 free(vname);
1242 }
1243 }
1244 /* And finally the spares */
1245 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1246 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1247 hadspare = B_TRUE;
1248 (void) printf(gettext("\tspares\n"));
1249 for (c = 0; c < sparechildren; c++) {
1250 vname = zpool_vdev_name(g_zfs, NULL,
1251 sparechild[c], name_flags);
1252 (void) printf("\t %s\n", vname);
1253 free(vname);
1254 }
1255 }
1256 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1257 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1258 if (!hadspare)
1259 (void) printf(gettext("\tspares\n"));
1260 for (c = 0; c < sparechildren; c++) {
1261 vname = zpool_vdev_name(g_zfs, NULL,
1262 sparechild[c], name_flags);
1263 (void) printf("\t %s\n", vname);
1264 free(vname);
1265 }
1266 }
1267
1268 ret = 0;
1269 } else {
1270 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1271 }
1272
1273 nvlist_free(props);
1274 nvlist_free(nvroot);
1275 zpool_close(zhp);
1276
1277 return (ret);
1278 }
1279
1280 /*
1281 * zpool remove [-npsw] <pool> <vdev> ...
1282 *
1283 * Removes the given vdev from the pool.
1284 */
1285 int
1286 zpool_do_remove(int argc, char **argv)
1287 {
1288 char *poolname;
1289 int i, ret = 0;
1290 zpool_handle_t *zhp = NULL;
1291 boolean_t stop = B_FALSE;
1292 int c;
1293 boolean_t noop = B_FALSE;
1294 boolean_t parsable = B_FALSE;
1295 boolean_t wait = B_FALSE;
1296
1297 /* check options */
1298 while ((c = getopt(argc, argv, "npsw")) != -1) {
1299 switch (c) {
1300 case 'n':
1301 noop = B_TRUE;
1302 break;
1303 case 'p':
1304 parsable = B_TRUE;
1305 break;
1306 case 's':
1307 stop = B_TRUE;
1308 break;
1309 case 'w':
1310 wait = B_TRUE;
1311 break;
1312 case '?':
1313 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1314 optopt);
1315 usage(B_FALSE);
1316 }
1317 }
1318
1319 argc -= optind;
1320 argv += optind;
1321
1322 /* get pool name and check number of arguments */
1323 if (argc < 1) {
1324 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1325 usage(B_FALSE);
1326 }
1327
1328 poolname = argv[0];
1329
1330 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1331 return (1);
1332
1333 if (stop && noop) {
1334 zpool_close(zhp);
1335 (void) fprintf(stderr, gettext("stop request ignored\n"));
1336 return (0);
1337 }
1338
1339 if (stop) {
1340 if (argc > 1) {
1341 (void) fprintf(stderr, gettext("too many arguments\n"));
1342 usage(B_FALSE);
1343 }
1344 if (zpool_vdev_remove_cancel(zhp) != 0)
1345 ret = 1;
1346 if (wait) {
1347 (void) fprintf(stderr, gettext("invalid option "
1348 "combination: -w cannot be used with -s\n"));
1349 usage(B_FALSE);
1350 }
1351 } else {
1352 if (argc < 2) {
1353 (void) fprintf(stderr, gettext("missing device\n"));
1354 usage(B_FALSE);
1355 }
1356
1357 for (i = 1; i < argc; i++) {
1358 if (noop) {
1359 uint64_t size;
1360
1361 if (zpool_vdev_indirect_size(zhp, argv[i],
1362 &size) != 0) {
1363 ret = 1;
1364 break;
1365 }
1366 if (parsable) {
1367 (void) printf("%s %llu\n",
1368 argv[i], (unsigned long long)size);
1369 } else {
1370 char valstr[32];
1371 zfs_nicenum(size, valstr,
1372 sizeof (valstr));
1373 (void) printf("Memory that will be "
1374 "used after removing %s: %s\n",
1375 argv[i], valstr);
1376 }
1377 } else {
1378 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1379 ret = 1;
1380 }
1381 }
1382
1383 if (ret == 0 && wait)
1384 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1385 }
1386 zpool_close(zhp);
1387
1388 return (ret);
1389 }
1390
1391 /*
1392 * Return 1 if a vdev is active (being used in a pool)
1393 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1394 *
1395 * This is useful for checking if a disk in an active pool is offlined or
1396 * faulted.
1397 */
1398 static int
1399 vdev_is_active(char *vdev_path)
1400 {
1401 int fd;
1402 fd = open(vdev_path, O_EXCL);
1403 if (fd < 0) {
1404 return (1); /* cant open O_EXCL - disk is active */
1405 }
1406
1407 close(fd);
1408 return (0); /* disk is inactive in the pool */
1409 }
1410
1411 /*
1412 * zpool labelclear [-f] <vdev>
1413 *
1414 * -f Force clearing the label for the vdevs which are members of
1415 * the exported or foreign pools.
1416 *
1417 * Verifies that the vdev is not active and zeros out the label information
1418 * on the device.
1419 */
1420 int
1421 zpool_do_labelclear(int argc, char **argv)
1422 {
1423 char vdev[MAXPATHLEN];
1424 char *name = NULL;
1425 int c, fd = -1, ret = 0;
1426 nvlist_t *config;
1427 pool_state_t state;
1428 boolean_t inuse = B_FALSE;
1429 boolean_t force = B_FALSE;
1430
1431 /* check options */
1432 while ((c = getopt(argc, argv, "f")) != -1) {
1433 switch (c) {
1434 case 'f':
1435 force = B_TRUE;
1436 break;
1437 default:
1438 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1439 optopt);
1440 usage(B_FALSE);
1441 }
1442 }
1443
1444 argc -= optind;
1445 argv += optind;
1446
1447 /* get vdev name */
1448 if (argc < 1) {
1449 (void) fprintf(stderr, gettext("missing vdev name\n"));
1450 usage(B_FALSE);
1451 }
1452 if (argc > 1) {
1453 (void) fprintf(stderr, gettext("too many arguments\n"));
1454 usage(B_FALSE);
1455 }
1456
1457 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1458
1459 /*
1460 * If we cannot open an absolute path, we quit.
1461 * Otherwise if the provided vdev name doesn't point to a file,
1462 * try prepending expected disk paths and partition numbers.
1463 */
1464 if ((fd = open(vdev, O_RDWR)) < 0) {
1465 int error;
1466 if (vdev[0] == '/') {
1467 (void) fprintf(stderr, gettext("failed to open "
1468 "%s: %s\n"), vdev, strerror(errno));
1469 return (1);
1470 }
1471
1472 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1473 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1474 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1475 error = ENOENT;
1476 }
1477
1478 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1479 if (errno == ENOENT) {
1480 (void) fprintf(stderr, gettext(
1481 "failed to find device %s, try "
1482 "specifying absolute path instead\n"),
1483 argv[0]);
1484 return (1);
1485 }
1486
1487 (void) fprintf(stderr, gettext("failed to open %s:"
1488 " %s\n"), vdev, strerror(errno));
1489 return (1);
1490 }
1491 }
1492
1493 /*
1494 * Flush all dirty pages for the block device. This should not be
1495 * fatal when the device does not support BLKFLSBUF as would be the
1496 * case for a file vdev.
1497 */
1498 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1499 (void) fprintf(stderr, gettext("failed to invalidate "
1500 "cache for %s: %s\n"), vdev, strerror(errno));
1501
1502 if (zpool_read_label(fd, &config, NULL) != 0) {
1503 (void) fprintf(stderr,
1504 gettext("failed to read label from %s\n"), vdev);
1505 ret = 1;
1506 goto errout;
1507 }
1508 nvlist_free(config);
1509
1510 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1511 if (ret != 0) {
1512 (void) fprintf(stderr,
1513 gettext("failed to check state for %s\n"), vdev);
1514 ret = 1;
1515 goto errout;
1516 }
1517
1518 if (!inuse)
1519 goto wipe_label;
1520
1521 switch (state) {
1522 default:
1523 case POOL_STATE_ACTIVE:
1524 case POOL_STATE_SPARE:
1525 case POOL_STATE_L2CACHE:
1526 /*
1527 * We allow the user to call 'zpool offline -f'
1528 * on an offlined disk in an active pool. We can check if
1529 * the disk is online by calling vdev_is_active().
1530 */
1531 if (force && !vdev_is_active(vdev))
1532 break;
1533
1534 (void) fprintf(stderr, gettext(
1535 "%s is a member (%s) of pool \"%s\""),
1536 vdev, zpool_pool_state_to_name(state), name);
1537
1538 if (force) {
1539 (void) fprintf(stderr, gettext(
1540 ". Offline the disk first to clear its label."));
1541 }
1542 printf("\n");
1543 ret = 1;
1544 goto errout;
1545
1546 case POOL_STATE_EXPORTED:
1547 if (force)
1548 break;
1549 (void) fprintf(stderr, gettext(
1550 "use '-f' to override the following error:\n"
1551 "%s is a member of exported pool \"%s\"\n"),
1552 vdev, name);
1553 ret = 1;
1554 goto errout;
1555
1556 case POOL_STATE_POTENTIALLY_ACTIVE:
1557 if (force)
1558 break;
1559 (void) fprintf(stderr, gettext(
1560 "use '-f' to override the following error:\n"
1561 "%s is a member of potentially active pool \"%s\"\n"),
1562 vdev, name);
1563 ret = 1;
1564 goto errout;
1565
1566 case POOL_STATE_DESTROYED:
1567 /* inuse should never be set for a destroyed pool */
1568 assert(0);
1569 break;
1570 }
1571
1572 wipe_label:
1573 ret = zpool_clear_label(fd);
1574 if (ret != 0) {
1575 (void) fprintf(stderr,
1576 gettext("failed to clear label for %s\n"), vdev);
1577 }
1578
1579 errout:
1580 free(name);
1581 (void) close(fd);
1582
1583 return (ret);
1584 }
1585
1586 /*
1587 * zpool create [-fnd] [-o property=value] ...
1588 * [-O file-system-property=value] ...
1589 * [-R root] [-m mountpoint] <pool> <dev> ...
1590 *
1591 * -f Force creation, even if devices appear in use
1592 * -n Do not create the pool, but display the resulting layout if it
1593 * were to be created.
1594 * -R Create a pool under an alternate root
1595 * -m Set default mountpoint for the root dataset. By default it's
1596 * '/<pool>'
1597 * -o Set property=value.
1598 * -o Set feature@feature=enabled|disabled.
1599 * -d Don't automatically enable all supported pool features
1600 * (individual features can be enabled with -o).
1601 * -O Set fsproperty=value in the pool's root file system
1602 *
1603 * Creates the named pool according to the given vdev specification. The
1604 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1605 * Once we get the nvlist back from make_root_vdev(), we either print out the
1606 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1607 */
1608 int
1609 zpool_do_create(int argc, char **argv)
1610 {
1611 boolean_t force = B_FALSE;
1612 boolean_t dryrun = B_FALSE;
1613 boolean_t enable_pool_features = B_TRUE;
1614
1615 int c;
1616 nvlist_t *nvroot = NULL;
1617 char *poolname;
1618 char *tname = NULL;
1619 int ret = 1;
1620 char *altroot = NULL;
1621 char *compat = NULL;
1622 char *mountpoint = NULL;
1623 nvlist_t *fsprops = NULL;
1624 nvlist_t *props = NULL;
1625 char *propval;
1626
1627 /* check options */
1628 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1629 switch (c) {
1630 case 'f':
1631 force = B_TRUE;
1632 break;
1633 case 'n':
1634 dryrun = B_TRUE;
1635 break;
1636 case 'd':
1637 enable_pool_features = B_FALSE;
1638 break;
1639 case 'R':
1640 altroot = optarg;
1641 if (add_prop_list(zpool_prop_to_name(
1642 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1643 goto errout;
1644 if (add_prop_list_default(zpool_prop_to_name(
1645 ZPOOL_PROP_CACHEFILE), "none", &props))
1646 goto errout;
1647 break;
1648 case 'm':
1649 /* Equivalent to -O mountpoint=optarg */
1650 mountpoint = optarg;
1651 break;
1652 case 'o':
1653 if ((propval = strchr(optarg, '=')) == NULL) {
1654 (void) fprintf(stderr, gettext("missing "
1655 "'=' for -o option\n"));
1656 goto errout;
1657 }
1658 *propval = '\0';
1659 propval++;
1660
1661 if (add_prop_list(optarg, propval, &props, B_TRUE))
1662 goto errout;
1663
1664 /*
1665 * If the user is creating a pool that doesn't support
1666 * feature flags, don't enable any features.
1667 */
1668 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1669 char *end;
1670 u_longlong_t ver;
1671
1672 ver = strtoull(propval, &end, 10);
1673 if (*end == '\0' &&
1674 ver < SPA_VERSION_FEATURES) {
1675 enable_pool_features = B_FALSE;
1676 }
1677 }
1678 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1679 altroot = propval;
1680 if (zpool_name_to_prop(optarg) ==
1681 ZPOOL_PROP_COMPATIBILITY)
1682 compat = propval;
1683 break;
1684 case 'O':
1685 if ((propval = strchr(optarg, '=')) == NULL) {
1686 (void) fprintf(stderr, gettext("missing "
1687 "'=' for -O option\n"));
1688 goto errout;
1689 }
1690 *propval = '\0';
1691 propval++;
1692
1693 /*
1694 * Mountpoints are checked and then added later.
1695 * Uniquely among properties, they can be specified
1696 * more than once, to avoid conflict with -m.
1697 */
1698 if (0 == strcmp(optarg,
1699 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1700 mountpoint = propval;
1701 } else if (add_prop_list(optarg, propval, &fsprops,
1702 B_FALSE)) {
1703 goto errout;
1704 }
1705 break;
1706 case 't':
1707 /*
1708 * Sanity check temporary pool name.
1709 */
1710 if (strchr(optarg, '/') != NULL) {
1711 (void) fprintf(stderr, gettext("cannot create "
1712 "'%s': invalid character '/' in temporary "
1713 "name\n"), optarg);
1714 (void) fprintf(stderr, gettext("use 'zfs "
1715 "create' to create a dataset\n"));
1716 goto errout;
1717 }
1718
1719 if (add_prop_list(zpool_prop_to_name(
1720 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1721 goto errout;
1722 if (add_prop_list_default(zpool_prop_to_name(
1723 ZPOOL_PROP_CACHEFILE), "none", &props))
1724 goto errout;
1725 tname = optarg;
1726 break;
1727 case ':':
1728 (void) fprintf(stderr, gettext("missing argument for "
1729 "'%c' option\n"), optopt);
1730 goto badusage;
1731 case '?':
1732 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1733 optopt);
1734 goto badusage;
1735 }
1736 }
1737
1738 argc -= optind;
1739 argv += optind;
1740
1741 /* get pool name and check number of arguments */
1742 if (argc < 1) {
1743 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1744 goto badusage;
1745 }
1746 if (argc < 2) {
1747 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1748 goto badusage;
1749 }
1750
1751 poolname = argv[0];
1752
1753 /*
1754 * As a special case, check for use of '/' in the name, and direct the
1755 * user to use 'zfs create' instead.
1756 */
1757 if (strchr(poolname, '/') != NULL) {
1758 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
1759 "character '/' in pool name\n"), poolname);
1760 (void) fprintf(stderr, gettext("use 'zfs create' to "
1761 "create a dataset\n"));
1762 goto errout;
1763 }
1764
1765 /* pass off to make_root_vdev for bulk processing */
1766 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1767 argc - 1, argv + 1);
1768 if (nvroot == NULL)
1769 goto errout;
1770
1771 /* make_root_vdev() allows 0 toplevel children if there are spares */
1772 if (!zfs_allocatable_devs(nvroot)) {
1773 (void) fprintf(stderr, gettext("invalid vdev "
1774 "specification: at least one toplevel vdev must be "
1775 "specified\n"));
1776 goto errout;
1777 }
1778
1779 if (altroot != NULL && altroot[0] != '/') {
1780 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
1781 "must be an absolute path\n"), altroot);
1782 goto errout;
1783 }
1784
1785 /*
1786 * Check the validity of the mountpoint and direct the user to use the
1787 * '-m' mountpoint option if it looks like its in use.
1788 */
1789 if (mountpoint == NULL ||
1790 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1791 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1792 char buf[MAXPATHLEN];
1793 DIR *dirp;
1794
1795 if (mountpoint && mountpoint[0] != '/') {
1796 (void) fprintf(stderr, gettext("invalid mountpoint "
1797 "'%s': must be an absolute path, 'legacy', or "
1798 "'none'\n"), mountpoint);
1799 goto errout;
1800 }
1801
1802 if (mountpoint == NULL) {
1803 if (altroot != NULL)
1804 (void) snprintf(buf, sizeof (buf), "%s/%s",
1805 altroot, poolname);
1806 else
1807 (void) snprintf(buf, sizeof (buf), "/%s",
1808 poolname);
1809 } else {
1810 if (altroot != NULL)
1811 (void) snprintf(buf, sizeof (buf), "%s%s",
1812 altroot, mountpoint);
1813 else
1814 (void) snprintf(buf, sizeof (buf), "%s",
1815 mountpoint);
1816 }
1817
1818 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1819 (void) fprintf(stderr, gettext("mountpoint '%s' : "
1820 "%s\n"), buf, strerror(errno));
1821 (void) fprintf(stderr, gettext("use '-m' "
1822 "option to provide a different default\n"));
1823 goto errout;
1824 } else if (dirp) {
1825 int count = 0;
1826
1827 while (count < 3 && readdir(dirp) != NULL)
1828 count++;
1829 (void) closedir(dirp);
1830
1831 if (count > 2) {
1832 (void) fprintf(stderr, gettext("mountpoint "
1833 "'%s' exists and is not empty\n"), buf);
1834 (void) fprintf(stderr, gettext("use '-m' "
1835 "option to provide a "
1836 "different default\n"));
1837 goto errout;
1838 }
1839 }
1840 }
1841
1842 /*
1843 * Now that the mountpoint's validity has been checked, ensure that
1844 * the property is set appropriately prior to creating the pool.
1845 */
1846 if (mountpoint != NULL) {
1847 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1848 mountpoint, &fsprops, B_FALSE);
1849 if (ret != 0)
1850 goto errout;
1851 }
1852
1853 ret = 1;
1854 if (dryrun) {
1855 /*
1856 * For a dry run invocation, print out a basic message and run
1857 * through all the vdevs in the list and print out in an
1858 * appropriate hierarchy.
1859 */
1860 (void) printf(gettext("would create '%s' with the "
1861 "following layout:\n\n"), poolname);
1862
1863 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1864 print_vdev_tree(NULL, "dedup", nvroot, 0,
1865 VDEV_ALLOC_BIAS_DEDUP, 0);
1866 print_vdev_tree(NULL, "special", nvroot, 0,
1867 VDEV_ALLOC_BIAS_SPECIAL, 0);
1868 print_vdev_tree(NULL, "logs", nvroot, 0,
1869 VDEV_ALLOC_BIAS_LOG, 0);
1870 print_cache_list(nvroot, 0);
1871 print_spare_list(nvroot, 0);
1872
1873 ret = 0;
1874 } else {
1875 /*
1876 * Load in feature set.
1877 * Note: if compatibility property not given, we'll have
1878 * NULL, which means 'all features'.
1879 */
1880 boolean_t requested_features[SPA_FEATURES];
1881 if (zpool_do_load_compat(compat, requested_features) !=
1882 ZPOOL_COMPATIBILITY_OK)
1883 goto errout;
1884
1885 /*
1886 * props contains list of features to enable.
1887 * For each feature:
1888 * - remove it if feature@name=disabled
1889 * - leave it there if feature@name=enabled
1890 * - add it if:
1891 * - enable_pool_features (ie: no '-d' or '-o version')
1892 * - it's supported by the kernel module
1893 * - it's in the requested feature set
1894 * - warn if it's enabled but not in compat
1895 */
1896 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1897 char propname[MAXPATHLEN];
1898 const char *propval;
1899 zfeature_info_t *feat = &spa_feature_table[i];
1900
1901 (void) snprintf(propname, sizeof (propname),
1902 "feature@%s", feat->fi_uname);
1903
1904 if (!nvlist_lookup_string(props, propname, &propval)) {
1905 if (strcmp(propval,
1906 ZFS_FEATURE_DISABLED) == 0) {
1907 (void) nvlist_remove_all(props,
1908 propname);
1909 } else if (strcmp(propval,
1910 ZFS_FEATURE_ENABLED) == 0 &&
1911 !requested_features[i]) {
1912 (void) fprintf(stderr, gettext(
1913 "Warning: feature \"%s\" enabled "
1914 "but is not in specified "
1915 "'compatibility' feature set.\n"),
1916 feat->fi_uname);
1917 }
1918 } else if (
1919 enable_pool_features &&
1920 feat->fi_zfs_mod_supported &&
1921 requested_features[i]) {
1922 ret = add_prop_list(propname,
1923 ZFS_FEATURE_ENABLED, &props, B_TRUE);
1924 if (ret != 0)
1925 goto errout;
1926 }
1927 }
1928
1929 ret = 1;
1930 if (zpool_create(g_zfs, poolname,
1931 nvroot, props, fsprops) == 0) {
1932 zfs_handle_t *pool = zfs_open(g_zfs,
1933 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1934 if (pool != NULL) {
1935 if (zfs_mount(pool, NULL, 0) == 0) {
1936 ret = zfs_share(pool, NULL);
1937 zfs_commit_shares(NULL);
1938 }
1939 zfs_close(pool);
1940 }
1941 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1942 (void) fprintf(stderr, gettext("pool name may have "
1943 "been omitted\n"));
1944 }
1945 }
1946
1947 errout:
1948 nvlist_free(nvroot);
1949 nvlist_free(fsprops);
1950 nvlist_free(props);
1951 return (ret);
1952 badusage:
1953 nvlist_free(fsprops);
1954 nvlist_free(props);
1955 usage(B_FALSE);
1956 return (2);
1957 }
1958
1959 /*
1960 * zpool destroy <pool>
1961 *
1962 * -f Forcefully unmount any datasets
1963 *
1964 * Destroy the given pool. Automatically unmounts any datasets in the pool.
1965 */
1966 int
1967 zpool_do_destroy(int argc, char **argv)
1968 {
1969 boolean_t force = B_FALSE;
1970 int c;
1971 char *pool;
1972 zpool_handle_t *zhp;
1973 int ret;
1974
1975 /* check options */
1976 while ((c = getopt(argc, argv, "f")) != -1) {
1977 switch (c) {
1978 case 'f':
1979 force = B_TRUE;
1980 break;
1981 case '?':
1982 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1983 optopt);
1984 usage(B_FALSE);
1985 }
1986 }
1987
1988 argc -= optind;
1989 argv += optind;
1990
1991 /* check arguments */
1992 if (argc < 1) {
1993 (void) fprintf(stderr, gettext("missing pool argument\n"));
1994 usage(B_FALSE);
1995 }
1996 if (argc > 1) {
1997 (void) fprintf(stderr, gettext("too many arguments\n"));
1998 usage(B_FALSE);
1999 }
2000
2001 pool = argv[0];
2002
2003 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2004 /*
2005 * As a special case, check for use of '/' in the name, and
2006 * direct the user to use 'zfs destroy' instead.
2007 */
2008 if (strchr(pool, '/') != NULL)
2009 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2010 "destroy a dataset\n"));
2011 return (1);
2012 }
2013
2014 if (zpool_disable_datasets(zhp, force) != 0) {
2015 (void) fprintf(stderr, gettext("could not destroy '%s': "
2016 "could not unmount datasets\n"), zpool_get_name(zhp));
2017 zpool_close(zhp);
2018 return (1);
2019 }
2020
2021 /* The history must be logged as part of the export */
2022 log_history = B_FALSE;
2023
2024 ret = (zpool_destroy(zhp, history_str) != 0);
2025
2026 zpool_close(zhp);
2027
2028 return (ret);
2029 }
2030
2031 typedef struct export_cbdata {
2032 boolean_t force;
2033 boolean_t hardforce;
2034 } export_cbdata_t;
2035
2036 /*
2037 * Export one pool
2038 */
2039 static int
2040 zpool_export_one(zpool_handle_t *zhp, void *data)
2041 {
2042 export_cbdata_t *cb = data;
2043
2044 if (zpool_disable_datasets(zhp, cb->force) != 0)
2045 return (1);
2046
2047 /* The history must be logged as part of the export */
2048 log_history = B_FALSE;
2049
2050 if (cb->hardforce) {
2051 if (zpool_export_force(zhp, history_str) != 0)
2052 return (1);
2053 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2054 return (1);
2055 }
2056
2057 return (0);
2058 }
2059
2060 /*
2061 * zpool export [-f] <pool> ...
2062 *
2063 * -a Export all pools
2064 * -f Forcefully unmount datasets
2065 *
2066 * Export the given pools. By default, the command will attempt to cleanly
2067 * unmount any active datasets within the pool. If the '-f' flag is specified,
2068 * then the datasets will be forcefully unmounted.
2069 */
2070 int
2071 zpool_do_export(int argc, char **argv)
2072 {
2073 export_cbdata_t cb;
2074 boolean_t do_all = B_FALSE;
2075 boolean_t force = B_FALSE;
2076 boolean_t hardforce = B_FALSE;
2077 int c, ret;
2078
2079 /* check options */
2080 while ((c = getopt(argc, argv, "afF")) != -1) {
2081 switch (c) {
2082 case 'a':
2083 do_all = B_TRUE;
2084 break;
2085 case 'f':
2086 force = B_TRUE;
2087 break;
2088 case 'F':
2089 hardforce = B_TRUE;
2090 break;
2091 case '?':
2092 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2093 optopt);
2094 usage(B_FALSE);
2095 }
2096 }
2097
2098 cb.force = force;
2099 cb.hardforce = hardforce;
2100 argc -= optind;
2101 argv += optind;
2102
2103 if (do_all) {
2104 if (argc != 0) {
2105 (void) fprintf(stderr, gettext("too many arguments\n"));
2106 usage(B_FALSE);
2107 }
2108
2109 return (for_each_pool(argc, argv, B_TRUE, NULL,
2110 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
2111 }
2112
2113 /* check arguments */
2114 if (argc < 1) {
2115 (void) fprintf(stderr, gettext("missing pool argument\n"));
2116 usage(B_FALSE);
2117 }
2118
2119 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2120 B_FALSE, zpool_export_one, &cb);
2121
2122 return (ret);
2123 }
2124
2125 /*
2126 * Given a vdev configuration, determine the maximum width needed for the device
2127 * name column.
2128 */
2129 static int
2130 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2131 int name_flags)
2132 {
2133 static const char *const subtypes[] =
2134 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2135
2136 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2137 max = MAX(strlen(name) + depth, max);
2138 free(name);
2139
2140 nvlist_t **child;
2141 uint_t children;
2142 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2143 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2144 &child, &children) == 0)
2145 for (uint_t c = 0; c < children; ++c)
2146 max = MAX(max_width(zhp, child[c], depth + 2,
2147 max, name_flags), max);
2148
2149 return (max);
2150 }
2151
2152 typedef struct spare_cbdata {
2153 uint64_t cb_guid;
2154 zpool_handle_t *cb_zhp;
2155 } spare_cbdata_t;
2156
2157 static boolean_t
2158 find_vdev(nvlist_t *nv, uint64_t search)
2159 {
2160 uint64_t guid;
2161 nvlist_t **child;
2162 uint_t c, children;
2163
2164 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2165 search == guid)
2166 return (B_TRUE);
2167
2168 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2169 &child, &children) == 0) {
2170 for (c = 0; c < children; c++)
2171 if (find_vdev(child[c], search))
2172 return (B_TRUE);
2173 }
2174
2175 return (B_FALSE);
2176 }
2177
2178 static int
2179 find_spare(zpool_handle_t *zhp, void *data)
2180 {
2181 spare_cbdata_t *cbp = data;
2182 nvlist_t *config, *nvroot;
2183
2184 config = zpool_get_config(zhp, NULL);
2185 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2186 &nvroot) == 0);
2187
2188 if (find_vdev(nvroot, cbp->cb_guid)) {
2189 cbp->cb_zhp = zhp;
2190 return (1);
2191 }
2192
2193 zpool_close(zhp);
2194 return (0);
2195 }
2196
2197 typedef struct status_cbdata {
2198 int cb_count;
2199 int cb_name_flags;
2200 int cb_namewidth;
2201 boolean_t cb_allpools;
2202 boolean_t cb_verbose;
2203 boolean_t cb_literal;
2204 boolean_t cb_explain;
2205 boolean_t cb_first;
2206 boolean_t cb_dedup_stats;
2207 boolean_t cb_print_unhealthy;
2208 boolean_t cb_print_status;
2209 boolean_t cb_print_slow_ios;
2210 boolean_t cb_print_vdev_init;
2211 boolean_t cb_print_vdev_trim;
2212 vdev_cmd_data_list_t *vcdl;
2213 boolean_t cb_print_power;
2214 } status_cbdata_t;
2215
2216 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2217 static boolean_t
2218 is_blank_str(const char *str)
2219 {
2220 for (; str != NULL && *str != '\0'; ++str)
2221 if (!isblank(*str))
2222 return (B_FALSE);
2223 return (B_TRUE);
2224 }
2225
2226 /* Print command output lines for specific vdev in a specific pool */
2227 static void
2228 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2229 {
2230 vdev_cmd_data_t *data;
2231 int i, j;
2232 const char *val;
2233
2234 for (i = 0; i < vcdl->count; i++) {
2235 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2236 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2237 /* Not the vdev we're looking for */
2238 continue;
2239 }
2240
2241 data = &vcdl->data[i];
2242 /* Print out all the output values for this vdev */
2243 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2244 val = NULL;
2245 /* Does this vdev have values for this column? */
2246 for (int k = 0; k < data->cols_cnt; k++) {
2247 if (strcmp(data->cols[k],
2248 vcdl->uniq_cols[j]) == 0) {
2249 /* yes it does, record the value */
2250 val = data->lines[k];
2251 break;
2252 }
2253 }
2254 /*
2255 * Mark empty values with dashes to make output
2256 * awk-able.
2257 */
2258 if (val == NULL || is_blank_str(val))
2259 val = "-";
2260
2261 printf("%*s", vcdl->uniq_cols_width[j], val);
2262 if (j < vcdl->uniq_cols_cnt - 1)
2263 fputs(" ", stdout);
2264 }
2265
2266 /* Print out any values that aren't in a column at the end */
2267 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2268 /* Did we have any columns? If so print a spacer. */
2269 if (vcdl->uniq_cols_cnt > 0)
2270 fputs(" ", stdout);
2271
2272 val = data->lines[j];
2273 fputs(val ?: "", stdout);
2274 }
2275 break;
2276 }
2277 }
2278
2279 /*
2280 * Print vdev initialization status for leaves
2281 */
2282 static void
2283 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2284 {
2285 if (verbose) {
2286 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2287 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2288 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2289 !vs->vs_scan_removing) {
2290 char zbuf[1024];
2291 char tbuf[256];
2292
2293 time_t t = vs->vs_initialize_action_time;
2294 int initialize_pct = 100;
2295 if (vs->vs_initialize_state !=
2296 VDEV_INITIALIZE_COMPLETE) {
2297 initialize_pct = (vs->vs_initialize_bytes_done *
2298 100 / (vs->vs_initialize_bytes_est + 1));
2299 }
2300
2301 (void) ctime_r(&t, tbuf);
2302 tbuf[24] = 0;
2303
2304 switch (vs->vs_initialize_state) {
2305 case VDEV_INITIALIZE_SUSPENDED:
2306 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2307 gettext("suspended, started at"), tbuf);
2308 break;
2309 case VDEV_INITIALIZE_ACTIVE:
2310 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2311 gettext("started at"), tbuf);
2312 break;
2313 case VDEV_INITIALIZE_COMPLETE:
2314 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2315 gettext("completed at"), tbuf);
2316 break;
2317 }
2318
2319 (void) printf(gettext(" (%d%% initialized%s)"),
2320 initialize_pct, zbuf);
2321 } else {
2322 (void) printf(gettext(" (uninitialized)"));
2323 }
2324 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2325 (void) printf(gettext(" (initializing)"));
2326 }
2327 }
2328
2329 /*
2330 * Print vdev TRIM status for leaves
2331 */
2332 static void
2333 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2334 {
2335 if (verbose) {
2336 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2337 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2338 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2339 !vs->vs_scan_removing) {
2340 char zbuf[1024];
2341 char tbuf[256];
2342
2343 time_t t = vs->vs_trim_action_time;
2344 int trim_pct = 100;
2345 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2346 trim_pct = (vs->vs_trim_bytes_done *
2347 100 / (vs->vs_trim_bytes_est + 1));
2348 }
2349
2350 (void) ctime_r(&t, tbuf);
2351 tbuf[24] = 0;
2352
2353 switch (vs->vs_trim_state) {
2354 case VDEV_TRIM_SUSPENDED:
2355 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2356 gettext("suspended, started at"), tbuf);
2357 break;
2358 case VDEV_TRIM_ACTIVE:
2359 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2360 gettext("started at"), tbuf);
2361 break;
2362 case VDEV_TRIM_COMPLETE:
2363 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2364 gettext("completed at"), tbuf);
2365 break;
2366 }
2367
2368 (void) printf(gettext(" (%d%% trimmed%s)"),
2369 trim_pct, zbuf);
2370 } else if (vs->vs_trim_notsup) {
2371 (void) printf(gettext(" (trim unsupported)"));
2372 } else {
2373 (void) printf(gettext(" (untrimmed)"));
2374 }
2375 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2376 (void) printf(gettext(" (trimming)"));
2377 }
2378 }
2379
2380 /*
2381 * Return the color associated with a health string. This includes returning
2382 * NULL for no color change.
2383 */
2384 static const char *
2385 health_str_to_color(const char *health)
2386 {
2387 if (strcmp(health, gettext("FAULTED")) == 0 ||
2388 strcmp(health, gettext("SUSPENDED")) == 0 ||
2389 strcmp(health, gettext("UNAVAIL")) == 0) {
2390 return (ANSI_RED);
2391 }
2392
2393 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2394 strcmp(health, gettext("DEGRADED")) == 0 ||
2395 strcmp(health, gettext("REMOVED")) == 0) {
2396 return (ANSI_YELLOW);
2397 }
2398
2399 return (NULL);
2400 }
2401
2402 /*
2403 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2404 * A vdev is unhealthy if any of the following are true:
2405 * 1) there are read, write, or checksum errors,
2406 * 2) its state is not ONLINE, or
2407 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2408 */
2409 static int
2410 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2411 {
2412 status_cbdata_t *cb = data;
2413 vdev_stat_t *vs;
2414 uint_t vsc;
2415 (void) hdl_data;
2416
2417 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2418 (uint64_t **)&vs, &vsc) != 0)
2419 return (1);
2420
2421 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2422 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2423 return (1);
2424
2425 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2426 return (1);
2427
2428 return (0);
2429 }
2430
2431 /*
2432 * Print out configuration state as requested by status_callback.
2433 */
2434 static void
2435 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2436 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2437 {
2438 nvlist_t **child, *root;
2439 uint_t c, i, vsc, children;
2440 pool_scan_stat_t *ps = NULL;
2441 vdev_stat_t *vs;
2442 char rbuf[6], wbuf[6], cbuf[6];
2443 char *vname;
2444 uint64_t notpresent;
2445 spare_cbdata_t spare_cb;
2446 const char *state;
2447 const char *type;
2448 const char *path = NULL;
2449 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2450 *scolor = NULL;
2451
2452 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2453 &child, &children) != 0)
2454 children = 0;
2455
2456 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2457 (uint64_t **)&vs, &vsc) == 0);
2458
2459 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2460
2461 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2462 return;
2463
2464 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2465
2466 if (isspare) {
2467 /*
2468 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2469 * online drives.
2470 */
2471 if (vs->vs_aux == VDEV_AUX_SPARED)
2472 state = gettext("INUSE");
2473 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2474 state = gettext("AVAIL");
2475 }
2476
2477 /*
2478 * If '-e' is specified then top-level vdevs and their children
2479 * can be pruned if all of their leaves are healthy.
2480 */
2481 if (cb->cb_print_unhealthy && depth > 0 &&
2482 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2483 return;
2484 }
2485
2486 printf_color(health_str_to_color(state),
2487 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2488 name, state);
2489
2490 if (!isspare) {
2491 if (vs->vs_read_errors)
2492 rcolor = ANSI_RED;
2493
2494 if (vs->vs_write_errors)
2495 wcolor = ANSI_RED;
2496
2497 if (vs->vs_checksum_errors)
2498 ccolor = ANSI_RED;
2499
2500 if (vs->vs_slow_ios)
2501 scolor = ANSI_BLUE;
2502
2503 if (cb->cb_literal) {
2504 fputc(' ', stdout);
2505 printf_color(rcolor, "%5llu",
2506 (u_longlong_t)vs->vs_read_errors);
2507 fputc(' ', stdout);
2508 printf_color(wcolor, "%5llu",
2509 (u_longlong_t)vs->vs_write_errors);
2510 fputc(' ', stdout);
2511 printf_color(ccolor, "%5llu",
2512 (u_longlong_t)vs->vs_checksum_errors);
2513 } else {
2514 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2515 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2516 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2517 sizeof (cbuf));
2518 fputc(' ', stdout);
2519 printf_color(rcolor, "%5s", rbuf);
2520 fputc(' ', stdout);
2521 printf_color(wcolor, "%5s", wbuf);
2522 fputc(' ', stdout);
2523 printf_color(ccolor, "%5s", cbuf);
2524 }
2525 if (cb->cb_print_slow_ios) {
2526 if (children == 0) {
2527 /* Only leafs vdevs have slow IOs */
2528 zfs_nicenum(vs->vs_slow_ios, rbuf,
2529 sizeof (rbuf));
2530 } else {
2531 snprintf(rbuf, sizeof (rbuf), "-");
2532 }
2533
2534 if (cb->cb_literal)
2535 printf_color(scolor, " %5llu",
2536 (u_longlong_t)vs->vs_slow_ios);
2537 else
2538 printf_color(scolor, " %5s", rbuf);
2539 }
2540 if (cb->cb_print_power) {
2541 if (children == 0) {
2542 /* Only leaf vdevs have physical slots */
2543 switch (zpool_power_current_state(zhp, (char *)
2544 fnvlist_lookup_string(nv,
2545 ZPOOL_CONFIG_PATH))) {
2546 case 0:
2547 printf_color(ANSI_RED, " %5s",
2548 gettext("off"));
2549 break;
2550 case 1:
2551 printf(" %5s", gettext("on"));
2552 break;
2553 default:
2554 printf(" %5s", "-");
2555 }
2556 } else {
2557 printf(" %5s", "-");
2558 }
2559 }
2560 }
2561
2562 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2563 &notpresent) == 0) {
2564 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2565 (void) printf(" %s %s", gettext("was"), path);
2566 } else if (vs->vs_aux != 0) {
2567 (void) printf(" ");
2568 color_start(ANSI_RED);
2569 switch (vs->vs_aux) {
2570 case VDEV_AUX_OPEN_FAILED:
2571 (void) printf(gettext("cannot open"));
2572 break;
2573
2574 case VDEV_AUX_BAD_GUID_SUM:
2575 (void) printf(gettext("missing device"));
2576 break;
2577
2578 case VDEV_AUX_NO_REPLICAS:
2579 (void) printf(gettext("insufficient replicas"));
2580 break;
2581
2582 case VDEV_AUX_VERSION_NEWER:
2583 (void) printf(gettext("newer version"));
2584 break;
2585
2586 case VDEV_AUX_UNSUP_FEAT:
2587 (void) printf(gettext("unsupported feature(s)"));
2588 break;
2589
2590 case VDEV_AUX_ASHIFT_TOO_BIG:
2591 (void) printf(gettext("unsupported minimum blocksize"));
2592 break;
2593
2594 case VDEV_AUX_SPARED:
2595 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2596 &spare_cb.cb_guid) == 0);
2597 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2598 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2599 zpool_get_name(zhp)) == 0)
2600 (void) printf(gettext("currently in "
2601 "use"));
2602 else
2603 (void) printf(gettext("in use by "
2604 "pool '%s'"),
2605 zpool_get_name(spare_cb.cb_zhp));
2606 zpool_close(spare_cb.cb_zhp);
2607 } else {
2608 (void) printf(gettext("currently in use"));
2609 }
2610 break;
2611
2612 case VDEV_AUX_ERR_EXCEEDED:
2613 if (vs->vs_read_errors + vs->vs_write_errors +
2614 vs->vs_checksum_errors == 0 && children == 0 &&
2615 vs->vs_slow_ios > 0) {
2616 (void) printf(gettext("too many slow I/Os"));
2617 } else {
2618 (void) printf(gettext("too many errors"));
2619 }
2620 break;
2621
2622 case VDEV_AUX_IO_FAILURE:
2623 (void) printf(gettext("experienced I/O failures"));
2624 break;
2625
2626 case VDEV_AUX_BAD_LOG:
2627 (void) printf(gettext("bad intent log"));
2628 break;
2629
2630 case VDEV_AUX_EXTERNAL:
2631 (void) printf(gettext("external device fault"));
2632 break;
2633
2634 case VDEV_AUX_SPLIT_POOL:
2635 (void) printf(gettext("split into new pool"));
2636 break;
2637
2638 case VDEV_AUX_ACTIVE:
2639 (void) printf(gettext("currently in use"));
2640 break;
2641
2642 case VDEV_AUX_CHILDREN_OFFLINE:
2643 (void) printf(gettext("all children offline"));
2644 break;
2645
2646 case VDEV_AUX_BAD_LABEL:
2647 (void) printf(gettext("invalid label"));
2648 break;
2649
2650 default:
2651 (void) printf(gettext("corrupted data"));
2652 break;
2653 }
2654 color_end();
2655 } else if (children == 0 && !isspare &&
2656 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2657 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2658 vs->vs_configured_ashift < vs->vs_physical_ashift) {
2659 (void) printf(
2660 gettext(" block size: %dB configured, %dB native"),
2661 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2662 }
2663
2664 if (vs->vs_scan_removing != 0) {
2665 (void) printf(gettext(" (removing)"));
2666 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
2667 (void) printf(gettext(" (non-allocating)"));
2668 }
2669
2670 /* The root vdev has the scrub/resilver stats */
2671 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2672 ZPOOL_CONFIG_VDEV_TREE);
2673 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2674 (uint64_t **)&ps, &c);
2675
2676 /*
2677 * If you force fault a drive that's resilvering, its scan stats can
2678 * get frozen in time, giving the false impression that it's
2679 * being resilvered. That's why we check the state to see if the vdev
2680 * is healthy before reporting "resilvering" or "repairing".
2681 */
2682 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
2683 vs->vs_state == VDEV_STATE_HEALTHY) {
2684 if (vs->vs_scan_processed != 0) {
2685 (void) printf(gettext(" (%s)"),
2686 (ps->pss_func == POOL_SCAN_RESILVER) ?
2687 "resilvering" : "repairing");
2688 } else if (vs->vs_resilver_deferred) {
2689 (void) printf(gettext(" (awaiting resilver)"));
2690 }
2691 }
2692
2693 /* The top-level vdevs have the rebuild stats */
2694 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2695 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
2696 if (vs->vs_rebuild_processed != 0) {
2697 (void) printf(gettext(" (resilvering)"));
2698 }
2699 }
2700
2701 if (cb->vcdl != NULL) {
2702 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2703 printf(" ");
2704 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2705 }
2706 }
2707
2708 /* Display vdev initialization and trim status for leaves. */
2709 if (children == 0) {
2710 print_status_initialize(vs, cb->cb_print_vdev_init);
2711 print_status_trim(vs, cb->cb_print_vdev_trim);
2712 }
2713
2714 (void) printf("\n");
2715
2716 for (c = 0; c < children; c++) {
2717 uint64_t islog = B_FALSE, ishole = B_FALSE;
2718
2719 /* Don't print logs or holes here */
2720 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2721 &islog);
2722 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2723 &ishole);
2724 if (islog || ishole)
2725 continue;
2726 /* Only print normal classes here */
2727 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2728 continue;
2729
2730 /* Provide vdev_rebuild_stats to children if available */
2731 if (vrs == NULL) {
2732 (void) nvlist_lookup_uint64_array(nv,
2733 ZPOOL_CONFIG_REBUILD_STATS,
2734 (uint64_t **)&vrs, &i);
2735 }
2736
2737 vname = zpool_vdev_name(g_zfs, zhp, child[c],
2738 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2739 print_status_config(zhp, cb, vname, child[c], depth + 2,
2740 isspare, vrs);
2741 free(vname);
2742 }
2743 }
2744
2745 /*
2746 * Print the configuration of an exported pool. Iterate over all vdevs in the
2747 * pool, printing out the name and status for each one.
2748 */
2749 static void
2750 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2751 int depth)
2752 {
2753 nvlist_t **child;
2754 uint_t c, children;
2755 vdev_stat_t *vs;
2756 const char *type;
2757 char *vname;
2758
2759 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2760 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2761 strcmp(type, VDEV_TYPE_HOLE) == 0)
2762 return;
2763
2764 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2765 (uint64_t **)&vs, &c) == 0);
2766
2767 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2768 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2769
2770 if (vs->vs_aux != 0) {
2771 (void) printf(" ");
2772
2773 switch (vs->vs_aux) {
2774 case VDEV_AUX_OPEN_FAILED:
2775 (void) printf(gettext("cannot open"));
2776 break;
2777
2778 case VDEV_AUX_BAD_GUID_SUM:
2779 (void) printf(gettext("missing device"));
2780 break;
2781
2782 case VDEV_AUX_NO_REPLICAS:
2783 (void) printf(gettext("insufficient replicas"));
2784 break;
2785
2786 case VDEV_AUX_VERSION_NEWER:
2787 (void) printf(gettext("newer version"));
2788 break;
2789
2790 case VDEV_AUX_UNSUP_FEAT:
2791 (void) printf(gettext("unsupported feature(s)"));
2792 break;
2793
2794 case VDEV_AUX_ERR_EXCEEDED:
2795 (void) printf(gettext("too many errors"));
2796 break;
2797
2798 case VDEV_AUX_ACTIVE:
2799 (void) printf(gettext("currently in use"));
2800 break;
2801
2802 case VDEV_AUX_CHILDREN_OFFLINE:
2803 (void) printf(gettext("all children offline"));
2804 break;
2805
2806 case VDEV_AUX_BAD_LABEL:
2807 (void) printf(gettext("invalid label"));
2808 break;
2809
2810 default:
2811 (void) printf(gettext("corrupted data"));
2812 break;
2813 }
2814 }
2815 (void) printf("\n");
2816
2817 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2818 &child, &children) != 0)
2819 return;
2820
2821 for (c = 0; c < children; c++) {
2822 uint64_t is_log = B_FALSE;
2823
2824 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2825 &is_log);
2826 if (is_log)
2827 continue;
2828 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2829 continue;
2830
2831 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2832 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2833 print_import_config(cb, vname, child[c], depth + 2);
2834 free(vname);
2835 }
2836
2837 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2838 &child, &children) == 0) {
2839 (void) printf(gettext("\tcache\n"));
2840 for (c = 0; c < children; c++) {
2841 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2842 cb->cb_name_flags);
2843 (void) printf("\t %s\n", vname);
2844 free(vname);
2845 }
2846 }
2847
2848 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2849 &child, &children) == 0) {
2850 (void) printf(gettext("\tspares\n"));
2851 for (c = 0; c < children; c++) {
2852 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2853 cb->cb_name_flags);
2854 (void) printf("\t %s\n", vname);
2855 free(vname);
2856 }
2857 }
2858 }
2859
2860 /*
2861 * Print specialized class vdevs.
2862 *
2863 * These are recorded as top level vdevs in the main pool child array
2864 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2865 * print_status_config() or print_import_config() to print the top level
2866 * class vdevs then any of their children (eg mirrored slogs) are printed
2867 * recursively - which works because only the top level vdev is marked.
2868 */
2869 static void
2870 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2871 const char *class)
2872 {
2873 uint_t c, children;
2874 nvlist_t **child;
2875 boolean_t printed = B_FALSE;
2876
2877 assert(zhp != NULL || !cb->cb_verbose);
2878
2879 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2880 &children) != 0)
2881 return;
2882
2883 for (c = 0; c < children; c++) {
2884 uint64_t is_log = B_FALSE;
2885 const char *bias = NULL;
2886 const char *type = NULL;
2887
2888 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2889 &is_log);
2890
2891 if (is_log) {
2892 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
2893 } else {
2894 (void) nvlist_lookup_string(child[c],
2895 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2896 (void) nvlist_lookup_string(child[c],
2897 ZPOOL_CONFIG_TYPE, &type);
2898 }
2899
2900 if (bias == NULL || strcmp(bias, class) != 0)
2901 continue;
2902 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2903 continue;
2904
2905 if (!printed) {
2906 (void) printf("\t%s\t\n", gettext(class));
2907 printed = B_TRUE;
2908 }
2909
2910 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2911 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2912 if (cb->cb_print_status)
2913 print_status_config(zhp, cb, name, child[c], 2,
2914 B_FALSE, NULL);
2915 else
2916 print_import_config(cb, name, child[c], 2);
2917 free(name);
2918 }
2919 }
2920
2921 /*
2922 * Display the status for the given pool.
2923 */
2924 static int
2925 show_import(nvlist_t *config, boolean_t report_error)
2926 {
2927 uint64_t pool_state;
2928 vdev_stat_t *vs;
2929 const char *name;
2930 uint64_t guid;
2931 uint64_t hostid = 0;
2932 const char *msgid;
2933 const char *hostname = "unknown";
2934 nvlist_t *nvroot, *nvinfo;
2935 zpool_status_t reason;
2936 zpool_errata_t errata;
2937 const char *health;
2938 uint_t vsc;
2939 const char *comment;
2940 status_cbdata_t cb = { 0 };
2941
2942 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2943 &name) == 0);
2944 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2945 &guid) == 0);
2946 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2947 &pool_state) == 0);
2948 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2949 &nvroot) == 0);
2950
2951 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2952 (uint64_t **)&vs, &vsc) == 0);
2953 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2954
2955 reason = zpool_import_status(config, &msgid, &errata);
2956
2957 /*
2958 * If we're importing using a cachefile, then we won't report any
2959 * errors unless we are in the scan phase of the import.
2960 */
2961 if (reason != ZPOOL_STATUS_OK && !report_error)
2962 return (reason);
2963
2964 (void) printf(gettext(" pool: %s\n"), name);
2965 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
2966 (void) printf(gettext(" state: %s"), health);
2967 if (pool_state == POOL_STATE_DESTROYED)
2968 (void) printf(gettext(" (DESTROYED)"));
2969 (void) printf("\n");
2970
2971 switch (reason) {
2972 case ZPOOL_STATUS_MISSING_DEV_R:
2973 case ZPOOL_STATUS_MISSING_DEV_NR:
2974 case ZPOOL_STATUS_BAD_GUID_SUM:
2975 printf_color(ANSI_BOLD, gettext("status: "));
2976 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2977 "missing from the system.\n"));
2978 break;
2979
2980 case ZPOOL_STATUS_CORRUPT_LABEL_R:
2981 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2982 printf_color(ANSI_BOLD, gettext("status: "));
2983 printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2984 " corrupted data.\n"));
2985 break;
2986
2987 case ZPOOL_STATUS_CORRUPT_DATA:
2988 (void) printf(
2989 gettext(" status: The pool data is corrupted.\n"));
2990 break;
2991
2992 case ZPOOL_STATUS_OFFLINE_DEV:
2993 printf_color(ANSI_BOLD, gettext("status: "));
2994 printf_color(ANSI_YELLOW, gettext("One or more devices "
2995 "are offlined.\n"));
2996 break;
2997
2998 case ZPOOL_STATUS_CORRUPT_POOL:
2999 printf_color(ANSI_BOLD, gettext("status: "));
3000 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3001 "corrupted.\n"));
3002 break;
3003
3004 case ZPOOL_STATUS_VERSION_OLDER:
3005 printf_color(ANSI_BOLD, gettext("status: "));
3006 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3007 "a legacy on-disk version.\n"));
3008 break;
3009
3010 case ZPOOL_STATUS_VERSION_NEWER:
3011 printf_color(ANSI_BOLD, gettext("status: "));
3012 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3013 "an incompatible version.\n"));
3014 break;
3015
3016 case ZPOOL_STATUS_FEAT_DISABLED:
3017 printf_color(ANSI_BOLD, gettext("status: "));
3018 printf_color(ANSI_YELLOW, gettext("Some supported "
3019 "features are not enabled on the pool.\n\t"
3020 "(Note that they may be intentionally disabled "
3021 "if the\n\t'compatibility' property is set.)\n"));
3022 break;
3023
3024 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3025 printf_color(ANSI_BOLD, gettext("status: "));
3026 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3027 "the file(s) indicated by the 'compatibility'\n"
3028 "property.\n"));
3029 break;
3030
3031 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3032 printf_color(ANSI_BOLD, gettext("status: "));
3033 printf_color(ANSI_YELLOW, gettext("One or more features "
3034 "are enabled on the pool despite not being\n"
3035 "requested by the 'compatibility' property.\n"));
3036 break;
3037
3038 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3039 printf_color(ANSI_BOLD, gettext("status: "));
3040 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3041 "feature(s) not supported on this system:\n"));
3042 color_start(ANSI_YELLOW);
3043 zpool_print_unsup_feat(config);
3044 color_end();
3045 break;
3046
3047 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3048 printf_color(ANSI_BOLD, gettext("status: "));
3049 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3050 "accessed in read-only mode on this system. It\n\tcannot be"
3051 " accessed in read-write mode because it uses the "
3052 "following\n\tfeature(s) not supported on this system:\n"));
3053 color_start(ANSI_YELLOW);
3054 zpool_print_unsup_feat(config);
3055 color_end();
3056 break;
3057
3058 case ZPOOL_STATUS_HOSTID_ACTIVE:
3059 printf_color(ANSI_BOLD, gettext("status: "));
3060 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3061 "imported by another system.\n"));
3062 break;
3063
3064 case ZPOOL_STATUS_HOSTID_REQUIRED:
3065 printf_color(ANSI_BOLD, gettext("status: "));
3066 printf_color(ANSI_YELLOW, gettext("The pool has the "
3067 "multihost property on. It cannot\n\tbe safely imported "
3068 "when the system hostid is not set.\n"));
3069 break;
3070
3071 case ZPOOL_STATUS_HOSTID_MISMATCH:
3072 printf_color(ANSI_BOLD, gettext("status: "));
3073 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3074 "by another system.\n"));
3075 break;
3076
3077 case ZPOOL_STATUS_FAULTED_DEV_R:
3078 case ZPOOL_STATUS_FAULTED_DEV_NR:
3079 printf_color(ANSI_BOLD, gettext("status: "));
3080 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3081 "faulted.\n"));
3082 break;
3083
3084 case ZPOOL_STATUS_BAD_LOG:
3085 printf_color(ANSI_BOLD, gettext("status: "));
3086 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3087 "be read.\n"));
3088 break;
3089
3090 case ZPOOL_STATUS_RESILVERING:
3091 case ZPOOL_STATUS_REBUILDING:
3092 printf_color(ANSI_BOLD, gettext("status: "));
3093 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3094 "being resilvered.\n"));
3095 break;
3096
3097 case ZPOOL_STATUS_ERRATA:
3098 printf_color(ANSI_BOLD, gettext("status: "));
3099 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3100 errata);
3101 break;
3102
3103 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3104 printf_color(ANSI_BOLD, gettext("status: "));
3105 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3106 "configured to use a non-native block size.\n"
3107 "\tExpect reduced performance.\n"));
3108 break;
3109
3110 default:
3111 /*
3112 * No other status can be seen when importing pools.
3113 */
3114 assert(reason == ZPOOL_STATUS_OK);
3115 }
3116
3117 /*
3118 * Print out an action according to the overall state of the pool.
3119 */
3120 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3121 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3122 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3123 (void) printf(gettext(" action: The pool can be "
3124 "imported using its name or numeric identifier, "
3125 "though\n\tsome features will not be available "
3126 "without an explicit 'zpool upgrade'.\n"));
3127 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3128 (void) printf(gettext(" action: The pool can be "
3129 "imported using its name or numeric\n\tidentifier, "
3130 "though the file(s) indicated by its "
3131 "'compatibility'\n\tproperty cannot be parsed at "
3132 "this time.\n"));
3133 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3134 (void) printf(gettext(" action: The pool can be "
3135 "imported using its name or numeric "
3136 "identifier and\n\tthe '-f' flag.\n"));
3137 } else if (reason == ZPOOL_STATUS_ERRATA) {
3138 switch (errata) {
3139 case ZPOOL_ERRATA_NONE:
3140 break;
3141
3142 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3143 (void) printf(gettext(" action: The pool can "
3144 "be imported using its name or numeric "
3145 "identifier,\n\thowever there is a compat"
3146 "ibility issue which should be corrected"
3147 "\n\tby running 'zpool scrub'\n"));
3148 break;
3149
3150 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3151 (void) printf(gettext(" action: The pool can"
3152 "not be imported with this version of ZFS "
3153 "due to\n\tan active asynchronous destroy. "
3154 "Revert to an earlier version\n\tand "
3155 "allow the destroy to complete before "
3156 "updating.\n"));
3157 break;
3158
3159 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3160 (void) printf(gettext(" action: Existing "
3161 "encrypted datasets contain an on-disk "
3162 "incompatibility, which\n\tneeds to be "
3163 "corrected. Backup these datasets to new "
3164 "encrypted datasets\n\tand destroy the "
3165 "old ones.\n"));
3166 break;
3167
3168 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3169 (void) printf(gettext(" action: Existing "
3170 "encrypted snapshots and bookmarks contain "
3171 "an on-disk\n\tincompatibility. This may "
3172 "cause on-disk corruption if they are used"
3173 "\n\twith 'zfs recv'. To correct the "
3174 "issue, enable the bookmark_v2 feature.\n\t"
3175 "No additional action is needed if there "
3176 "are no encrypted snapshots or\n\t"
3177 "bookmarks. If preserving the encrypted "
3178 "snapshots and bookmarks is\n\trequired, "
3179 "use a non-raw send to backup and restore "
3180 "them. Alternately,\n\tthey may be removed"
3181 " to resolve the incompatibility.\n"));
3182 break;
3183 default:
3184 /*
3185 * All errata must contain an action message.
3186 */
3187 assert(0);
3188 }
3189 } else {
3190 (void) printf(gettext(" action: The pool can be "
3191 "imported using its name or numeric "
3192 "identifier.\n"));
3193 }
3194 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3195 (void) printf(gettext(" action: The pool can be imported "
3196 "despite missing or damaged devices. The\n\tfault "
3197 "tolerance of the pool may be compromised if imported.\n"));
3198 } else {
3199 switch (reason) {
3200 case ZPOOL_STATUS_VERSION_NEWER:
3201 (void) printf(gettext(" action: The pool cannot be "
3202 "imported. Access the pool on a system running "
3203 "newer\n\tsoftware, or recreate the pool from "
3204 "backup.\n"));
3205 break;
3206 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3207 printf_color(ANSI_BOLD, gettext("action: "));
3208 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3209 "imported. Access the pool on a system that "
3210 "supports\n\tthe required feature(s), or recreate "
3211 "the pool from backup.\n"));
3212 break;
3213 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3214 printf_color(ANSI_BOLD, gettext("action: "));
3215 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3216 "imported in read-write mode. Import the pool "
3217 "with\n"
3218 "\t\"-o readonly=on\", access the pool on a system "
3219 "that supports the\n\trequired feature(s), or "
3220 "recreate the pool from backup.\n"));
3221 break;
3222 case ZPOOL_STATUS_MISSING_DEV_R:
3223 case ZPOOL_STATUS_MISSING_DEV_NR:
3224 case ZPOOL_STATUS_BAD_GUID_SUM:
3225 (void) printf(gettext(" action: The pool cannot be "
3226 "imported. Attach the missing\n\tdevices and try "
3227 "again.\n"));
3228 break;
3229 case ZPOOL_STATUS_HOSTID_ACTIVE:
3230 VERIFY0(nvlist_lookup_nvlist(config,
3231 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3232
3233 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3234 hostname = fnvlist_lookup_string(nvinfo,
3235 ZPOOL_CONFIG_MMP_HOSTNAME);
3236
3237 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3238 hostid = fnvlist_lookup_uint64(nvinfo,
3239 ZPOOL_CONFIG_MMP_HOSTID);
3240
3241 (void) printf(gettext(" action: The pool must be "
3242 "exported from %s (hostid=%"PRIx64")\n\tbefore it "
3243 "can be safely imported.\n"), hostname, hostid);
3244 break;
3245 case ZPOOL_STATUS_HOSTID_REQUIRED:
3246 (void) printf(gettext(" action: Set a unique system "
3247 "hostid with the zgenhostid(8) command.\n"));
3248 break;
3249 default:
3250 (void) printf(gettext(" action: The pool cannot be "
3251 "imported due to damaged devices or data.\n"));
3252 }
3253 }
3254
3255 /* Print the comment attached to the pool. */
3256 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3257 (void) printf(gettext("comment: %s\n"), comment);
3258
3259 /*
3260 * If the state is "closed" or "can't open", and the aux state
3261 * is "corrupt data":
3262 */
3263 if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3264 (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3265 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3266 if (pool_state == POOL_STATE_DESTROYED)
3267 (void) printf(gettext("\tThe pool was destroyed, "
3268 "but can be imported using the '-Df' flags.\n"));
3269 else if (pool_state != POOL_STATE_EXPORTED)
3270 (void) printf(gettext("\tThe pool may be active on "
3271 "another system, but can be imported using\n\t"
3272 "the '-f' flag.\n"));
3273 }
3274
3275 if (msgid != NULL) {
3276 (void) printf(gettext(
3277 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3278 msgid);
3279 }
3280
3281 (void) printf(gettext(" config:\n\n"));
3282
3283 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3284 VDEV_NAME_TYPE_ID);
3285 if (cb.cb_namewidth < 10)
3286 cb.cb_namewidth = 10;
3287
3288 print_import_config(&cb, name, nvroot, 0);
3289
3290 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3291 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3292 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3293
3294 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3295 (void) printf(gettext("\n\tAdditional devices are known to "
3296 "be part of this pool, though their\n\texact "
3297 "configuration cannot be determined.\n"));
3298 }
3299 return (0);
3300 }
3301
3302 static boolean_t
3303 zfs_force_import_required(nvlist_t *config)
3304 {
3305 uint64_t state;
3306 uint64_t hostid = 0;
3307 nvlist_t *nvinfo;
3308
3309 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3310 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3311
3312 /*
3313 * The hostid on LOAD_INFO comes from the MOS label via
3314 * spa_tryimport(). If its not there then we're likely talking to an
3315 * older kernel, so use the top one, which will be from the label
3316 * discovered in zpool_find_import(), or if a cachefile is in use, the
3317 * local hostid.
3318 */
3319 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3320 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3321 &hostid);
3322
3323 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3324 return (B_TRUE);
3325
3326 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3327 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3328 ZPOOL_CONFIG_MMP_STATE);
3329
3330 if (mmp_state != MMP_STATE_INACTIVE)
3331 return (B_TRUE);
3332 }
3333
3334 return (B_FALSE);
3335 }
3336
3337 /*
3338 * Perform the import for the given configuration. This passes the heavy
3339 * lifting off to zpool_import_props(), and then mounts the datasets contained
3340 * within the pool.
3341 */
3342 static int
3343 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3344 nvlist_t *props, int flags)
3345 {
3346 int ret = 0;
3347 int ms_status = 0;
3348 zpool_handle_t *zhp;
3349 const char *name;
3350 uint64_t version;
3351
3352 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3353 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3354
3355 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3356 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3357 "is formatted using an unsupported ZFS version\n"), name);
3358 return (1);
3359 } else if (zfs_force_import_required(config) &&
3360 !(flags & ZFS_IMPORT_ANY_HOST)) {
3361 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3362 nvlist_t *nvinfo;
3363
3364 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3365 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3366 mmp_state = fnvlist_lookup_uint64(nvinfo,
3367 ZPOOL_CONFIG_MMP_STATE);
3368
3369 if (mmp_state == MMP_STATE_ACTIVE) {
3370 const char *hostname = "<unknown>";
3371 uint64_t hostid = 0;
3372
3373 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3374 hostname = fnvlist_lookup_string(nvinfo,
3375 ZPOOL_CONFIG_MMP_HOSTNAME);
3376
3377 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3378 hostid = fnvlist_lookup_uint64(nvinfo,
3379 ZPOOL_CONFIG_MMP_HOSTID);
3380
3381 (void) fprintf(stderr, gettext("cannot import '%s': "
3382 "pool is imported on %s (hostid: "
3383 "0x%"PRIx64")\nExport the pool on the other "
3384 "system, then run 'zpool import'.\n"),
3385 name, hostname, hostid);
3386 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3387 (void) fprintf(stderr, gettext("Cannot import '%s': "
3388 "pool has the multihost property on and the\n"
3389 "system's hostid is not set. Set a unique hostid "
3390 "with the zgenhostid(8) command.\n"), name);
3391 } else {
3392 const char *hostname = "<unknown>";
3393 time_t timestamp = 0;
3394 uint64_t hostid = 0;
3395
3396 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3397 hostname = fnvlist_lookup_string(nvinfo,
3398 ZPOOL_CONFIG_HOSTNAME);
3399 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3400 hostname = fnvlist_lookup_string(config,
3401 ZPOOL_CONFIG_HOSTNAME);
3402
3403 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3404 timestamp = fnvlist_lookup_uint64(config,
3405 ZPOOL_CONFIG_TIMESTAMP);
3406
3407 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3408 hostid = fnvlist_lookup_uint64(nvinfo,
3409 ZPOOL_CONFIG_HOSTID);
3410 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3411 hostid = fnvlist_lookup_uint64(config,
3412 ZPOOL_CONFIG_HOSTID);
3413
3414 (void) fprintf(stderr, gettext("cannot import '%s': "
3415 "pool was previously in use from another system.\n"
3416 "Last accessed by %s (hostid=%"PRIx64") at %s"
3417 "The pool can be imported, use 'zpool import -f' "
3418 "to import the pool.\n"), name, hostname,
3419 hostid, ctime(&timestamp));
3420 }
3421
3422 return (1);
3423 }
3424
3425 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3426 return (1);
3427
3428 if (newname != NULL)
3429 name = newname;
3430
3431 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3432 return (1);
3433
3434 /*
3435 * Loading keys is best effort. We don't want to return immediately
3436 * if it fails but we do want to give the error to the caller.
3437 */
3438 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3439 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3440 ret = 1;
3441
3442 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3443 !(flags & ZFS_IMPORT_ONLY)) {
3444 ms_status = zpool_enable_datasets(zhp, mntopts, 0);
3445 if (ms_status == EZFS_SHAREFAILED) {
3446 (void) fprintf(stderr, gettext("Import was "
3447 "successful, but unable to share some datasets"));
3448 } else if (ms_status == EZFS_MOUNTFAILED) {
3449 (void) fprintf(stderr, gettext("Import was "
3450 "successful, but unable to mount some datasets"));
3451 }
3452 }
3453
3454 zpool_close(zhp);
3455 return (ret);
3456 }
3457
3458 static int
3459 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3460 char *orig_name, char *new_name,
3461 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3462 importargs_t *import)
3463 {
3464 nvlist_t *config = NULL;
3465 nvlist_t *found_config = NULL;
3466 uint64_t pool_state;
3467
3468 /*
3469 * At this point we have a list of import candidate configs. Even if
3470 * we were searching by pool name or guid, we still need to
3471 * post-process the list to deal with pool state and possible
3472 * duplicate names.
3473 */
3474 int err = 0;
3475 nvpair_t *elem = NULL;
3476 boolean_t first = B_TRUE;
3477 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3478
3479 verify(nvpair_value_nvlist(elem, &config) == 0);
3480
3481 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3482 &pool_state) == 0);
3483 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3484 continue;
3485 if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3486 continue;
3487
3488 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3489 import->policy) == 0);
3490
3491 if (!pool_specified) {
3492 if (first)
3493 first = B_FALSE;
3494 else if (!do_all)
3495 (void) fputc('\n', stdout);
3496
3497 if (do_all) {
3498 err |= do_import(config, NULL, mntopts,
3499 props, flags);
3500 } else {
3501 /*
3502 * If we're importing from cachefile, then
3503 * we don't want to report errors until we
3504 * are in the scan phase of the import. If
3505 * we get an error, then we return that error
3506 * to invoke the scan phase.
3507 */
3508 if (import->cachefile && !import->scan)
3509 err = show_import(config, B_FALSE);
3510 else
3511 (void) show_import(config, B_TRUE);
3512 }
3513 } else if (import->poolname != NULL) {
3514 const char *name;
3515
3516 /*
3517 * We are searching for a pool based on name.
3518 */
3519 verify(nvlist_lookup_string(config,
3520 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3521
3522 if (strcmp(name, import->poolname) == 0) {
3523 if (found_config != NULL) {
3524 (void) fprintf(stderr, gettext(
3525 "cannot import '%s': more than "
3526 "one matching pool\n"),
3527 import->poolname);
3528 (void) fprintf(stderr, gettext(
3529 "import by numeric ID instead\n"));
3530 err = B_TRUE;
3531 }
3532 found_config = config;
3533 }
3534 } else {
3535 uint64_t guid;
3536
3537 /*
3538 * Search for a pool by guid.
3539 */
3540 verify(nvlist_lookup_uint64(config,
3541 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3542
3543 if (guid == import->guid)
3544 found_config = config;
3545 }
3546 }
3547
3548 /*
3549 * If we were searching for a specific pool, verify that we found a
3550 * pool, and then do the import.
3551 */
3552 if (pool_specified && err == 0) {
3553 if (found_config == NULL) {
3554 (void) fprintf(stderr, gettext("cannot import '%s': "
3555 "no such pool available\n"), orig_name);
3556 err = B_TRUE;
3557 } else {
3558 err |= do_import(found_config, new_name,
3559 mntopts, props, flags);
3560 }
3561 }
3562
3563 /*
3564 * If we were just looking for pools, report an error if none were
3565 * found.
3566 */
3567 if (!pool_specified && first)
3568 (void) fprintf(stderr,
3569 gettext("no pools available to import\n"));
3570 return (err);
3571 }
3572
3573 typedef struct target_exists_args {
3574 const char *poolname;
3575 uint64_t poolguid;
3576 } target_exists_args_t;
3577
3578 static int
3579 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3580 {
3581 target_exists_args_t *args = data;
3582 nvlist_t *config = zpool_get_config(zhp, NULL);
3583 int found = 0;
3584
3585 if (config == NULL)
3586 return (0);
3587
3588 if (args->poolname != NULL) {
3589 const char *pool_name;
3590
3591 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3592 &pool_name) == 0);
3593 if (strcmp(pool_name, args->poolname) == 0)
3594 found = 1;
3595 } else {
3596 uint64_t pool_guid;
3597
3598 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3599 &pool_guid) == 0);
3600 if (pool_guid == args->poolguid)
3601 found = 1;
3602 }
3603 zpool_close(zhp);
3604
3605 return (found);
3606 }
3607 /*
3608 * zpool checkpoint <pool>
3609 * checkpoint --discard <pool>
3610 *
3611 * -d Discard the checkpoint from a checkpointed
3612 * --discard pool.
3613 *
3614 * -w Wait for discarding a checkpoint to complete.
3615 * --wait
3616 *
3617 * Checkpoints the specified pool, by taking a "snapshot" of its
3618 * current state. A pool can only have one checkpoint at a time.
3619 */
3620 int
3621 zpool_do_checkpoint(int argc, char **argv)
3622 {
3623 boolean_t discard, wait;
3624 char *pool;
3625 zpool_handle_t *zhp;
3626 int c, err;
3627
3628 struct option long_options[] = {
3629 {"discard", no_argument, NULL, 'd'},
3630 {"wait", no_argument, NULL, 'w'},
3631 {0, 0, 0, 0}
3632 };
3633
3634 discard = B_FALSE;
3635 wait = B_FALSE;
3636 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3637 switch (c) {
3638 case 'd':
3639 discard = B_TRUE;
3640 break;
3641 case 'w':
3642 wait = B_TRUE;
3643 break;
3644 case '?':
3645 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3646 optopt);
3647 usage(B_FALSE);
3648 }
3649 }
3650
3651 if (wait && !discard) {
3652 (void) fprintf(stderr, gettext("--wait only valid when "
3653 "--discard also specified\n"));
3654 usage(B_FALSE);
3655 }
3656
3657 argc -= optind;
3658 argv += optind;
3659
3660 if (argc < 1) {
3661 (void) fprintf(stderr, gettext("missing pool argument\n"));
3662 usage(B_FALSE);
3663 }
3664
3665 if (argc > 1) {
3666 (void) fprintf(stderr, gettext("too many arguments\n"));
3667 usage(B_FALSE);
3668 }
3669
3670 pool = argv[0];
3671
3672 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3673 /* As a special case, check for use of '/' in the name */
3674 if (strchr(pool, '/') != NULL)
3675 (void) fprintf(stderr, gettext("'zpool checkpoint' "
3676 "doesn't work on datasets. To save the state "
3677 "of a dataset from a specific point in time "
3678 "please use 'zfs snapshot'\n"));
3679 return (1);
3680 }
3681
3682 if (discard) {
3683 err = (zpool_discard_checkpoint(zhp) != 0);
3684 if (err == 0 && wait)
3685 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3686 } else {
3687 err = (zpool_checkpoint(zhp) != 0);
3688 }
3689
3690 zpool_close(zhp);
3691
3692 return (err);
3693 }
3694
3695 #define CHECKPOINT_OPT 1024
3696
3697 /*
3698 * zpool import [-d dir] [-D]
3699 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3700 * [-d dir | -c cachefile | -s] [-f] -a
3701 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3702 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3703 * [newpool]
3704 *
3705 * -c Read pool information from a cachefile instead of searching
3706 * devices. If importing from a cachefile config fails, then
3707 * fallback to searching for devices only in the directories that
3708 * exist in the cachefile.
3709 *
3710 * -d Scan in a specific directory, other than /dev/. More than
3711 * one directory can be specified using multiple '-d' options.
3712 *
3713 * -D Scan for previously destroyed pools or import all or only
3714 * specified destroyed pools.
3715 *
3716 * -R Temporarily import the pool, with all mountpoints relative to
3717 * the given root. The pool will remain exported when the machine
3718 * is rebooted.
3719 *
3720 * -V Import even in the presence of faulted vdevs. This is an
3721 * intentionally undocumented option for testing purposes, and
3722 * treats the pool configuration as complete, leaving any bad
3723 * vdevs in the FAULTED state. In other words, it does verbatim
3724 * import.
3725 *
3726 * -f Force import, even if it appears that the pool is active.
3727 *
3728 * -F Attempt rewind if necessary.
3729 *
3730 * -n See if rewind would work, but don't actually rewind.
3731 *
3732 * -N Import the pool but don't mount datasets.
3733 *
3734 * -T Specify a starting txg to use for import. This option is
3735 * intentionally undocumented option for testing purposes.
3736 *
3737 * -a Import all pools found.
3738 *
3739 * -l Load encryption keys while importing.
3740 *
3741 * -o Set property=value and/or temporary mount options (without '=').
3742 *
3743 * -s Scan using the default search path, the libblkid cache will
3744 * not be consulted.
3745 *
3746 * --rewind-to-checkpoint
3747 * Import the pool and revert back to the checkpoint.
3748 *
3749 * The import command scans for pools to import, and import pools based on pool
3750 * name and GUID. The pool can also be renamed as part of the import process.
3751 */
3752 int
3753 zpool_do_import(int argc, char **argv)
3754 {
3755 char **searchdirs = NULL;
3756 char *env, *envdup = NULL;
3757 int nsearch = 0;
3758 int c;
3759 int err = 0;
3760 nvlist_t *pools = NULL;
3761 boolean_t do_all = B_FALSE;
3762 boolean_t do_destroyed = B_FALSE;
3763 char *mntopts = NULL;
3764 uint64_t searchguid = 0;
3765 char *searchname = NULL;
3766 char *propval;
3767 nvlist_t *policy = NULL;
3768 nvlist_t *props = NULL;
3769 int flags = ZFS_IMPORT_NORMAL;
3770 uint32_t rewind_policy = ZPOOL_NO_REWIND;
3771 boolean_t dryrun = B_FALSE;
3772 boolean_t do_rewind = B_FALSE;
3773 boolean_t xtreme_rewind = B_FALSE;
3774 boolean_t do_scan = B_FALSE;
3775 boolean_t pool_exists = B_FALSE;
3776 boolean_t pool_specified = B_FALSE;
3777 uint64_t txg = -1ULL;
3778 char *cachefile = NULL;
3779 importargs_t idata = { 0 };
3780 char *endptr;
3781
3782 struct option long_options[] = {
3783 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3784 {0, 0, 0, 0}
3785 };
3786
3787 /* check options */
3788 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3789 long_options, NULL)) != -1) {
3790 switch (c) {
3791 case 'a':
3792 do_all = B_TRUE;
3793 break;
3794 case 'c':
3795 cachefile = optarg;
3796 break;
3797 case 'd':
3798 searchdirs = safe_realloc(searchdirs,
3799 (nsearch + 1) * sizeof (char *));
3800 searchdirs[nsearch++] = optarg;
3801 break;
3802 case 'D':
3803 do_destroyed = B_TRUE;
3804 break;
3805 case 'f':
3806 flags |= ZFS_IMPORT_ANY_HOST;
3807 break;
3808 case 'F':
3809 do_rewind = B_TRUE;
3810 break;
3811 case 'l':
3812 flags |= ZFS_IMPORT_LOAD_KEYS;
3813 break;
3814 case 'm':
3815 flags |= ZFS_IMPORT_MISSING_LOG;
3816 break;
3817 case 'n':
3818 dryrun = B_TRUE;
3819 break;
3820 case 'N':
3821 flags |= ZFS_IMPORT_ONLY;
3822 break;
3823 case 'o':
3824 if ((propval = strchr(optarg, '=')) != NULL) {
3825 *propval = '\0';
3826 propval++;
3827 if (add_prop_list(optarg, propval,
3828 &props, B_TRUE))
3829 goto error;
3830 } else {
3831 mntopts = optarg;
3832 }
3833 break;
3834 case 'R':
3835 if (add_prop_list(zpool_prop_to_name(
3836 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3837 goto error;
3838 if (add_prop_list_default(zpool_prop_to_name(
3839 ZPOOL_PROP_CACHEFILE), "none", &props))
3840 goto error;
3841 break;
3842 case 's':
3843 do_scan = B_TRUE;
3844 break;
3845 case 't':
3846 flags |= ZFS_IMPORT_TEMP_NAME;
3847 if (add_prop_list_default(zpool_prop_to_name(
3848 ZPOOL_PROP_CACHEFILE), "none", &props))
3849 goto error;
3850 break;
3851
3852 case 'T':
3853 errno = 0;
3854 txg = strtoull(optarg, &endptr, 0);
3855 if (errno != 0 || *endptr != '\0') {
3856 (void) fprintf(stderr,
3857 gettext("invalid txg value\n"));
3858 usage(B_FALSE);
3859 }
3860 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3861 break;
3862 case 'V':
3863 flags |= ZFS_IMPORT_VERBATIM;
3864 break;
3865 case 'X':
3866 xtreme_rewind = B_TRUE;
3867 break;
3868 case CHECKPOINT_OPT:
3869 flags |= ZFS_IMPORT_CHECKPOINT;
3870 break;
3871 case ':':
3872 (void) fprintf(stderr, gettext("missing argument for "
3873 "'%c' option\n"), optopt);
3874 usage(B_FALSE);
3875 break;
3876 case '?':
3877 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3878 optopt);
3879 usage(B_FALSE);
3880 }
3881 }
3882
3883 argc -= optind;
3884 argv += optind;
3885
3886 if (cachefile && nsearch != 0) {
3887 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3888 usage(B_FALSE);
3889 }
3890
3891 if (cachefile && do_scan) {
3892 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3893 usage(B_FALSE);
3894 }
3895
3896 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3897 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3898 usage(B_FALSE);
3899 }
3900
3901 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3902 (void) fprintf(stderr, gettext("-l is only meaningful during "
3903 "an import\n"));
3904 usage(B_FALSE);
3905 }
3906
3907 if ((dryrun || xtreme_rewind) && !do_rewind) {
3908 (void) fprintf(stderr,
3909 gettext("-n or -X only meaningful with -F\n"));
3910 usage(B_FALSE);
3911 }
3912 if (dryrun)
3913 rewind_policy = ZPOOL_TRY_REWIND;
3914 else if (do_rewind)
3915 rewind_policy = ZPOOL_DO_REWIND;
3916 if (xtreme_rewind)
3917 rewind_policy |= ZPOOL_EXTREME_REWIND;
3918
3919 /* In the future, we can capture further policy and include it here */
3920 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3921 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3922 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3923 rewind_policy) != 0)
3924 goto error;
3925
3926 /* check argument count */
3927 if (do_all) {
3928 if (argc != 0) {
3929 (void) fprintf(stderr, gettext("too many arguments\n"));
3930 usage(B_FALSE);
3931 }
3932 } else {
3933 if (argc > 2) {
3934 (void) fprintf(stderr, gettext("too many arguments\n"));
3935 usage(B_FALSE);
3936 }
3937 }
3938
3939 /*
3940 * Check for the effective uid. We do this explicitly here because
3941 * otherwise any attempt to discover pools will silently fail.
3942 */
3943 if (argc == 0 && geteuid() != 0) {
3944 (void) fprintf(stderr, gettext("cannot "
3945 "discover pools: permission denied\n"));
3946
3947 free(searchdirs);
3948 nvlist_free(props);
3949 nvlist_free(policy);
3950 return (1);
3951 }
3952
3953 /*
3954 * Depending on the arguments given, we do one of the following:
3955 *
3956 * <none> Iterate through all pools and display information about
3957 * each one.
3958 *
3959 * -a Iterate through all pools and try to import each one.
3960 *
3961 * <id> Find the pool that corresponds to the given GUID/pool
3962 * name and import that one.
3963 *
3964 * -D Above options applies only to destroyed pools.
3965 */
3966 if (argc != 0) {
3967 char *endptr;
3968
3969 errno = 0;
3970 searchguid = strtoull(argv[0], &endptr, 10);
3971 if (errno != 0 || *endptr != '\0') {
3972 searchname = argv[0];
3973 searchguid = 0;
3974 }
3975 pool_specified = B_TRUE;
3976
3977 /*
3978 * User specified a name or guid. Ensure it's unique.
3979 */
3980 target_exists_args_t search = {searchname, searchguid};
3981 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3982 }
3983
3984 /*
3985 * Check the environment for the preferred search path.
3986 */
3987 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3988 char *dir, *tmp = NULL;
3989
3990 envdup = strdup(env);
3991
3992 for (dir = strtok_r(envdup, ":", &tmp);
3993 dir != NULL;
3994 dir = strtok_r(NULL, ":", &tmp)) {
3995 searchdirs = safe_realloc(searchdirs,
3996 (nsearch + 1) * sizeof (char *));
3997 searchdirs[nsearch++] = dir;
3998 }
3999 }
4000
4001 idata.path = searchdirs;
4002 idata.paths = nsearch;
4003 idata.poolname = searchname;
4004 idata.guid = searchguid;
4005 idata.cachefile = cachefile;
4006 idata.scan = do_scan;
4007 idata.policy = policy;
4008
4009 libpc_handle_t lpch = {
4010 .lpc_lib_handle = g_zfs,
4011 .lpc_ops = &libzfs_config_ops,
4012 .lpc_printerr = B_TRUE
4013 };
4014 pools = zpool_search_import(&lpch, &idata);
4015
4016 if (pools != NULL && pool_exists &&
4017 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4018 (void) fprintf(stderr, gettext("cannot import '%s': "
4019 "a pool with that name already exists\n"),
4020 argv[0]);
4021 (void) fprintf(stderr, gettext("use the form '%s "
4022 "<pool | id> <newpool>' to give it a new name\n"),
4023 "zpool import");
4024 err = 1;
4025 } else if (pools == NULL && pool_exists) {
4026 (void) fprintf(stderr, gettext("cannot import '%s': "
4027 "a pool with that name is already created/imported,\n"),
4028 argv[0]);
4029 (void) fprintf(stderr, gettext("and no additional pools "
4030 "with that name were found\n"));
4031 err = 1;
4032 } else if (pools == NULL) {
4033 if (argc != 0) {
4034 (void) fprintf(stderr, gettext("cannot import '%s': "
4035 "no such pool available\n"), argv[0]);
4036 }
4037 err = 1;
4038 }
4039
4040 if (err == 1) {
4041 free(searchdirs);
4042 free(envdup);
4043 nvlist_free(policy);
4044 nvlist_free(pools);
4045 nvlist_free(props);
4046 return (1);
4047 }
4048
4049 err = import_pools(pools, props, mntopts, flags,
4050 argc >= 1 ? argv[0] : NULL,
4051 argc >= 2 ? argv[1] : NULL,
4052 do_destroyed, pool_specified, do_all, &idata);
4053
4054 /*
4055 * If we're using the cachefile and we failed to import, then
4056 * fallback to scanning the directory for pools that match
4057 * those in the cachefile.
4058 */
4059 if (err != 0 && cachefile != NULL) {
4060 (void) printf(gettext("cachefile import failed, retrying\n"));
4061
4062 /*
4063 * We use the scan flag to gather the directories that exist
4064 * in the cachefile. If we need to fallback to searching for
4065 * the pool config, we will only search devices in these
4066 * directories.
4067 */
4068 idata.scan = B_TRUE;
4069 nvlist_free(pools);
4070 pools = zpool_search_import(&lpch, &idata);
4071
4072 err = import_pools(pools, props, mntopts, flags,
4073 argc >= 1 ? argv[0] : NULL,
4074 argc >= 2 ? argv[1] : NULL,
4075 do_destroyed, pool_specified, do_all, &idata);
4076 }
4077
4078 error:
4079 nvlist_free(props);
4080 nvlist_free(pools);
4081 nvlist_free(policy);
4082 free(searchdirs);
4083 free(envdup);
4084
4085 return (err ? 1 : 0);
4086 }
4087
4088 /*
4089 * zpool sync [-f] [pool] ...
4090 *
4091 * -f (undocumented) force uberblock (and config including zpool cache file)
4092 * update.
4093 *
4094 * Sync the specified pool(s).
4095 * Without arguments "zpool sync" will sync all pools.
4096 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4097 *
4098 */
4099 static int
4100 zpool_do_sync(int argc, char **argv)
4101 {
4102 int ret;
4103 boolean_t force = B_FALSE;
4104
4105 /* check options */
4106 while ((ret = getopt(argc, argv, "f")) != -1) {
4107 switch (ret) {
4108 case 'f':
4109 force = B_TRUE;
4110 break;
4111 case '?':
4112 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4113 optopt);
4114 usage(B_FALSE);
4115 }
4116 }
4117
4118 argc -= optind;
4119 argv += optind;
4120
4121 /* if argc == 0 we will execute zpool_sync_one on all pools */
4122 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4123 B_FALSE, zpool_sync_one, &force);
4124
4125 return (ret);
4126 }
4127
4128 typedef struct iostat_cbdata {
4129 uint64_t cb_flags;
4130 int cb_namewidth;
4131 int cb_iteration;
4132 boolean_t cb_verbose;
4133 boolean_t cb_literal;
4134 boolean_t cb_scripted;
4135 zpool_list_t *cb_list;
4136 vdev_cmd_data_list_t *vcdl;
4137 vdev_cbdata_t cb_vdevs;
4138 } iostat_cbdata_t;
4139
4140 /* iostat labels */
4141 typedef struct name_and_columns {
4142 const char *name; /* Column name */
4143 unsigned int columns; /* Center name to this number of columns */
4144 } name_and_columns_t;
4145
4146 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4147
4148 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4149 {
4150 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4151 {NULL}},
4152 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4153 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4154 {NULL}},
4155 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4156 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4157 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4158 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4159 {"asyncq_wait", 2}, {NULL}},
4160 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4161 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4162 {"trim", 2}, {"rebuild", 2}, {NULL}},
4163 };
4164
4165 /* Shorthand - if "columns" field not set, default to 1 column */
4166 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4167 {
4168 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4169 {"write"}, {NULL}},
4170 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4171 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4172 {NULL}},
4173 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4174 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4175 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4176 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4177 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4178 {NULL}},
4179 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4180 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4181 {"ind"}, {"agg"}, {NULL}},
4182 };
4183
4184 static const char *histo_to_title[] = {
4185 [IOS_L_HISTO] = "latency",
4186 [IOS_RQ_HISTO] = "req_size",
4187 };
4188
4189 /*
4190 * Return the number of labels in a null-terminated name_and_columns_t
4191 * array.
4192 *
4193 */
4194 static unsigned int
4195 label_array_len(const name_and_columns_t *labels)
4196 {
4197 int i = 0;
4198
4199 while (labels[i].name)
4200 i++;
4201
4202 return (i);
4203 }
4204
4205 /*
4206 * Return the number of strings in a null-terminated string array.
4207 * For example:
4208 *
4209 * const char foo[] = {"bar", "baz", NULL}
4210 *
4211 * returns 2
4212 */
4213 static uint64_t
4214 str_array_len(const char *array[])
4215 {
4216 uint64_t i = 0;
4217 while (array[i])
4218 i++;
4219
4220 return (i);
4221 }
4222
4223
4224 /*
4225 * Return a default column width for default/latency/queue columns. This does
4226 * not include histograms, which have their columns autosized.
4227 */
4228 static unsigned int
4229 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4230 {
4231 unsigned long column_width = 5; /* Normal niceprint */
4232 static unsigned long widths[] = {
4233 /*
4234 * Choose some sane default column sizes for printing the
4235 * raw numbers.
4236 */
4237 [IOS_DEFAULT] = 15, /* 1PB capacity */
4238 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4239 [IOS_QUEUES] = 6, /* 1M queue entries */
4240 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4241 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4242 };
4243
4244 if (cb->cb_literal)
4245 column_width = widths[type];
4246
4247 return (column_width);
4248 }
4249
4250 /*
4251 * Print the column labels, i.e:
4252 *
4253 * capacity operations bandwidth
4254 * alloc free read write read write ...
4255 *
4256 * If force_column_width is set, use it for the column width. If not set, use
4257 * the default column width.
4258 */
4259 static void
4260 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4261 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4262 {
4263 int i, idx, s;
4264 int text_start, rw_column_width, spaces_to_end;
4265 uint64_t flags = cb->cb_flags;
4266 uint64_t f;
4267 unsigned int column_width = force_column_width;
4268
4269 /* For each bit set in flags */
4270 for (f = flags; f; f &= ~(1ULL << idx)) {
4271 idx = lowbit64(f) - 1;
4272 if (!force_column_width)
4273 column_width = default_column_width(cb, idx);
4274 /* Print our top labels centered over "read write" label. */
4275 for (i = 0; i < label_array_len(labels[idx]); i++) {
4276 const char *name = labels[idx][i].name;
4277 /*
4278 * We treat labels[][].columns == 0 as shorthand
4279 * for one column. It makes writing out the label
4280 * tables more concise.
4281 */
4282 unsigned int columns = MAX(1, labels[idx][i].columns);
4283 unsigned int slen = strlen(name);
4284
4285 rw_column_width = (column_width * columns) +
4286 (2 * (columns - 1));
4287
4288 text_start = (int)((rw_column_width) / columns -
4289 slen / columns);
4290 if (text_start < 0)
4291 text_start = 0;
4292
4293 printf(" "); /* Two spaces between columns */
4294
4295 /* Space from beginning of column to label */
4296 for (s = 0; s < text_start; s++)
4297 printf(" ");
4298
4299 printf("%s", name);
4300
4301 /* Print space after label to end of column */
4302 spaces_to_end = rw_column_width - text_start - slen;
4303 if (spaces_to_end < 0)
4304 spaces_to_end = 0;
4305
4306 for (s = 0; s < spaces_to_end; s++)
4307 printf(" ");
4308 }
4309 }
4310 }
4311
4312
4313 /*
4314 * print_cmd_columns - Print custom column titles from -c
4315 *
4316 * If the user specified the "zpool status|iostat -c" then print their custom
4317 * column titles in the header. For example, print_cmd_columns() would print
4318 * the " col1 col2" part of this:
4319 *
4320 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4321 * ...
4322 * capacity operations bandwidth
4323 * pool alloc free read write read write col1 col2
4324 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4325 * mypool 269K 1008M 0 0 107 946
4326 * mirror 269K 1008M 0 0 107 946
4327 * sdb - - 0 0 102 473 val1 val2
4328 * sdc - - 0 0 5 473 val1 val2
4329 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4330 */
4331 static void
4332 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4333 {
4334 int i, j;
4335 vdev_cmd_data_t *data = &vcdl->data[0];
4336
4337 if (vcdl->count == 0 || data == NULL)
4338 return;
4339
4340 /*
4341 * Each vdev cmd should have the same column names unless the user did
4342 * something weird with their cmd. Just take the column names from the
4343 * first vdev and assume it works for all of them.
4344 */
4345 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4346 printf(" ");
4347 if (use_dashes) {
4348 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4349 printf("-");
4350 } else {
4351 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4352 vcdl->uniq_cols[i]);
4353 }
4354 }
4355 }
4356
4357
4358 /*
4359 * Utility function to print out a line of dashes like:
4360 *
4361 * -------------------------------- ----- ----- ----- ----- -----
4362 *
4363 * ...or a dashed named-row line like:
4364 *
4365 * logs - - - - -
4366 *
4367 * @cb: iostat data
4368 *
4369 * @force_column_width If non-zero, use the value as the column width.
4370 * Otherwise use the default column widths.
4371 *
4372 * @name: Print a dashed named-row line starting
4373 * with @name. Otherwise, print a regular
4374 * dashed line.
4375 */
4376 static void
4377 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4378 const char *name)
4379 {
4380 int i;
4381 unsigned int namewidth;
4382 uint64_t flags = cb->cb_flags;
4383 uint64_t f;
4384 int idx;
4385 const name_and_columns_t *labels;
4386 const char *title;
4387
4388
4389 if (cb->cb_flags & IOS_ANYHISTO_M) {
4390 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4391 } else if (cb->cb_vdevs.cb_names_count) {
4392 title = "vdev";
4393 } else {
4394 title = "pool";
4395 }
4396
4397 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4398 name ? strlen(name) : 0);
4399
4400
4401 if (name) {
4402 printf("%-*s", namewidth, name);
4403 } else {
4404 for (i = 0; i < namewidth; i++)
4405 (void) printf("-");
4406 }
4407
4408 /* For each bit in flags */
4409 for (f = flags; f; f &= ~(1ULL << idx)) {
4410 unsigned int column_width;
4411 idx = lowbit64(f) - 1;
4412 if (force_column_width)
4413 column_width = force_column_width;
4414 else
4415 column_width = default_column_width(cb, idx);
4416
4417 labels = iostat_bottom_labels[idx];
4418 for (i = 0; i < label_array_len(labels); i++) {
4419 if (name)
4420 printf(" %*s-", column_width - 1, " ");
4421 else
4422 printf(" %.*s", column_width,
4423 "--------------------");
4424 }
4425 }
4426 }
4427
4428
4429 static void
4430 print_iostat_separator_impl(iostat_cbdata_t *cb,
4431 unsigned int force_column_width)
4432 {
4433 print_iostat_dashes(cb, force_column_width, NULL);
4434 }
4435
4436 static void
4437 print_iostat_separator(iostat_cbdata_t *cb)
4438 {
4439 print_iostat_separator_impl(cb, 0);
4440 }
4441
4442 static void
4443 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4444 const char *histo_vdev_name)
4445 {
4446 unsigned int namewidth;
4447 const char *title;
4448
4449 color_start(ANSI_BOLD);
4450
4451 if (cb->cb_flags & IOS_ANYHISTO_M) {
4452 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4453 } else if (cb->cb_vdevs.cb_names_count) {
4454 title = "vdev";
4455 } else {
4456 title = "pool";
4457 }
4458
4459 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4460 histo_vdev_name ? strlen(histo_vdev_name) : 0);
4461
4462 if (histo_vdev_name)
4463 printf("%-*s", namewidth, histo_vdev_name);
4464 else
4465 printf("%*s", namewidth, "");
4466
4467
4468 print_iostat_labels(cb, force_column_width, iostat_top_labels);
4469 printf("\n");
4470
4471 printf("%-*s", namewidth, title);
4472
4473 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4474 if (cb->vcdl != NULL)
4475 print_cmd_columns(cb->vcdl, 0);
4476
4477 printf("\n");
4478
4479 print_iostat_separator_impl(cb, force_column_width);
4480
4481 if (cb->vcdl != NULL)
4482 print_cmd_columns(cb->vcdl, 1);
4483
4484 color_end();
4485
4486 printf("\n");
4487 }
4488
4489 static void
4490 print_iostat_header(iostat_cbdata_t *cb)
4491 {
4492 print_iostat_header_impl(cb, 0, NULL);
4493 }
4494
4495 /*
4496 * Prints a size string (i.e. 120M) with the suffix ("M") colored
4497 * by order of magnitude. Uses column_size to add padding.
4498 */
4499 static void
4500 print_stat_color(const char *statbuf, unsigned int column_size)
4501 {
4502 fputs(" ", stdout);
4503 size_t len = strlen(statbuf);
4504 while (len < column_size) {
4505 fputc(' ', stdout);
4506 column_size--;
4507 }
4508 if (*statbuf == '0') {
4509 color_start(ANSI_GRAY);
4510 fputc('0', stdout);
4511 } else {
4512 for (; *statbuf; statbuf++) {
4513 if (*statbuf == 'K') color_start(ANSI_GREEN);
4514 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
4515 else if (*statbuf == 'G') color_start(ANSI_RED);
4516 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
4517 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
4518 else if (*statbuf == 'E') color_start(ANSI_CYAN);
4519 fputc(*statbuf, stdout);
4520 if (--column_size <= 0)
4521 break;
4522 }
4523 }
4524 color_end();
4525 }
4526
4527 /*
4528 * Display a single statistic.
4529 */
4530 static void
4531 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4532 unsigned int column_size, boolean_t scripted)
4533 {
4534 char buf[64];
4535
4536 zfs_nicenum_format(value, buf, sizeof (buf), format);
4537
4538 if (scripted)
4539 printf("\t%s", buf);
4540 else
4541 print_stat_color(buf, column_size);
4542 }
4543
4544 /*
4545 * Calculate the default vdev stats
4546 *
4547 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4548 * stats into calcvs.
4549 */
4550 static void
4551 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4552 vdev_stat_t *calcvs)
4553 {
4554 int i;
4555
4556 memcpy(calcvs, newvs, sizeof (*calcvs));
4557 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4558 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4559
4560 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4561 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4562 }
4563
4564 /*
4565 * Internal representation of the extended iostats data.
4566 *
4567 * The extended iostat stats are exported in nvlists as either uint64_t arrays
4568 * or single uint64_t's. We make both look like arrays to make them easier
4569 * to process. In order to make single uint64_t's look like arrays, we set
4570 * __data to the stat data, and then set *data = &__data with count = 1. Then,
4571 * we can just use *data and count.
4572 */
4573 struct stat_array {
4574 uint64_t *data;
4575 uint_t count; /* Number of entries in data[] */
4576 uint64_t __data; /* Only used when data is a single uint64_t */
4577 };
4578
4579 static uint64_t
4580 stat_histo_max(struct stat_array *nva, unsigned int len)
4581 {
4582 uint64_t max = 0;
4583 int i;
4584 for (i = 0; i < len; i++)
4585 max = MAX(max, array64_max(nva[i].data, nva[i].count));
4586
4587 return (max);
4588 }
4589
4590 /*
4591 * Helper function to lookup a uint64_t array or uint64_t value and store its
4592 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
4593 * it look like a one element array to make it easier to process.
4594 */
4595 static int
4596 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4597 struct stat_array *nva)
4598 {
4599 nvpair_t *tmp;
4600 int ret;
4601
4602 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4603 switch (nvpair_type(tmp)) {
4604 case DATA_TYPE_UINT64_ARRAY:
4605 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4606 break;
4607 case DATA_TYPE_UINT64:
4608 ret = nvpair_value_uint64(tmp, &nva->__data);
4609 nva->data = &nva->__data;
4610 nva->count = 1;
4611 break;
4612 default:
4613 /* Not a uint64_t */
4614 ret = EINVAL;
4615 break;
4616 }
4617
4618 return (ret);
4619 }
4620
4621 /*
4622 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4623 * subtract them, and return the results in a newly allocated stat_array.
4624 * You must free the returned array after you are done with it with
4625 * free_calc_stats().
4626 *
4627 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4628 * values.
4629 */
4630 static struct stat_array *
4631 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4632 nvlist_t *newnv)
4633 {
4634 nvlist_t *oldnvx = NULL, *newnvx;
4635 struct stat_array *oldnva, *newnva, *calcnva;
4636 int i, j;
4637 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4638
4639 /* Extract our extended stats nvlist from the main list */
4640 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4641 &newnvx) == 0);
4642 if (oldnv) {
4643 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4644 &oldnvx) == 0);
4645 }
4646
4647 newnva = safe_malloc(alloc_size);
4648 oldnva = safe_malloc(alloc_size);
4649 calcnva = safe_malloc(alloc_size);
4650
4651 for (j = 0; j < len; j++) {
4652 verify(nvpair64_to_stat_array(newnvx, names[j],
4653 &newnva[j]) == 0);
4654 calcnva[j].count = newnva[j].count;
4655 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4656 calcnva[j].data = safe_malloc(alloc_size);
4657 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4658
4659 if (oldnvx) {
4660 verify(nvpair64_to_stat_array(oldnvx, names[j],
4661 &oldnva[j]) == 0);
4662 for (i = 0; i < oldnva[j].count; i++)
4663 calcnva[j].data[i] -= oldnva[j].data[i];
4664 }
4665 }
4666 free(newnva);
4667 free(oldnva);
4668 return (calcnva);
4669 }
4670
4671 static void
4672 free_calc_stats(struct stat_array *nva, unsigned int len)
4673 {
4674 int i;
4675 for (i = 0; i < len; i++)
4676 free(nva[i].data);
4677
4678 free(nva);
4679 }
4680
4681 static void
4682 print_iostat_histo(struct stat_array *nva, unsigned int len,
4683 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4684 double scale)
4685 {
4686 int i, j;
4687 char buf[6];
4688 uint64_t val;
4689 enum zfs_nicenum_format format;
4690 unsigned int buckets;
4691 unsigned int start_bucket;
4692
4693 if (cb->cb_literal)
4694 format = ZFS_NICENUM_RAW;
4695 else
4696 format = ZFS_NICENUM_1024;
4697
4698 /* All these histos are the same size, so just use nva[0].count */
4699 buckets = nva[0].count;
4700
4701 if (cb->cb_flags & IOS_RQ_HISTO_M) {
4702 /* Start at 512 - req size should never be lower than this */
4703 start_bucket = 9;
4704 } else {
4705 start_bucket = 0;
4706 }
4707
4708 for (j = start_bucket; j < buckets; j++) {
4709 /* Print histogram bucket label */
4710 if (cb->cb_flags & IOS_L_HISTO_M) {
4711 /* Ending range of this bucket */
4712 val = (1UL << (j + 1)) - 1;
4713 zfs_nicetime(val, buf, sizeof (buf));
4714 } else {
4715 /* Request size (starting range of bucket) */
4716 val = (1UL << j);
4717 zfs_nicenum(val, buf, sizeof (buf));
4718 }
4719
4720 if (cb->cb_scripted)
4721 printf("%llu", (u_longlong_t)val);
4722 else
4723 printf("%-*s", namewidth, buf);
4724
4725 /* Print the values on the line */
4726 for (i = 0; i < len; i++) {
4727 print_one_stat(nva[i].data[j] * scale, format,
4728 column_width, cb->cb_scripted);
4729 }
4730 printf("\n");
4731 }
4732 }
4733
4734 static void
4735 print_solid_separator(unsigned int length)
4736 {
4737 while (length--)
4738 printf("-");
4739 printf("\n");
4740 }
4741
4742 static void
4743 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4744 nvlist_t *newnv, double scale, const char *name)
4745 {
4746 unsigned int column_width;
4747 unsigned int namewidth;
4748 unsigned int entire_width;
4749 enum iostat_type type;
4750 struct stat_array *nva;
4751 const char **names;
4752 unsigned int names_len;
4753
4754 /* What type of histo are we? */
4755 type = IOS_HISTO_IDX(cb->cb_flags);
4756
4757 /* Get NULL-terminated array of nvlist names for our histo */
4758 names = vsx_type_to_nvlist[type];
4759 names_len = str_array_len(names); /* num of names */
4760
4761 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4762
4763 if (cb->cb_literal) {
4764 column_width = MAX(5,
4765 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4766 } else {
4767 column_width = 5;
4768 }
4769
4770 namewidth = MAX(cb->cb_namewidth,
4771 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4772
4773 /*
4774 * Calculate the entire line width of what we're printing. The
4775 * +2 is for the two spaces between columns:
4776 */
4777 /* read write */
4778 /* ----- ----- */
4779 /* |___| <---------- column_width */
4780 /* */
4781 /* |__________| <--- entire_width */
4782 /* */
4783 entire_width = namewidth + (column_width + 2) *
4784 label_array_len(iostat_bottom_labels[type]);
4785
4786 if (cb->cb_scripted)
4787 printf("%s\n", name);
4788 else
4789 print_iostat_header_impl(cb, column_width, name);
4790
4791 print_iostat_histo(nva, names_len, cb, column_width,
4792 namewidth, scale);
4793
4794 free_calc_stats(nva, names_len);
4795 if (!cb->cb_scripted)
4796 print_solid_separator(entire_width);
4797 }
4798
4799 /*
4800 * Calculate the average latency of a power-of-two latency histogram
4801 */
4802 static uint64_t
4803 single_histo_average(uint64_t *histo, unsigned int buckets)
4804 {
4805 int i;
4806 uint64_t count = 0, total = 0;
4807
4808 for (i = 0; i < buckets; i++) {
4809 /*
4810 * Our buckets are power-of-two latency ranges. Use the
4811 * midpoint latency of each bucket to calculate the average.
4812 * For example:
4813 *
4814 * Bucket Midpoint
4815 * 8ns-15ns: 12ns
4816 * 16ns-31ns: 24ns
4817 * ...
4818 */
4819 if (histo[i] != 0) {
4820 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4821 count += histo[i];
4822 }
4823 }
4824
4825 /* Prevent divide by zero */
4826 return (count == 0 ? 0 : total / count);
4827 }
4828
4829 static void
4830 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4831 {
4832 const char *names[] = {
4833 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4834 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4835 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4836 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4837 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4838 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4839 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4840 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4841 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4842 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4843 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4844 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4845 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4846 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4847 };
4848
4849 struct stat_array *nva;
4850
4851 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4852 enum zfs_nicenum_format format;
4853
4854 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4855
4856 if (cb->cb_literal)
4857 format = ZFS_NICENUM_RAW;
4858 else
4859 format = ZFS_NICENUM_1024;
4860
4861 for (int i = 0; i < ARRAY_SIZE(names); i++) {
4862 uint64_t val = nva[i].data[0];
4863 print_one_stat(val, format, column_width, cb->cb_scripted);
4864 }
4865
4866 free_calc_stats(nva, ARRAY_SIZE(names));
4867 }
4868
4869 static void
4870 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4871 nvlist_t *newnv)
4872 {
4873 int i;
4874 uint64_t val;
4875 const char *names[] = {
4876 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4877 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4878 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4879 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4880 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4881 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4882 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4883 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4884 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4885 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4886 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4887 };
4888 struct stat_array *nva;
4889
4890 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4891 enum zfs_nicenum_format format;
4892
4893 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4894
4895 if (cb->cb_literal)
4896 format = ZFS_NICENUM_RAWTIME;
4897 else
4898 format = ZFS_NICENUM_TIME;
4899
4900 /* Print our avg latencies on the line */
4901 for (i = 0; i < ARRAY_SIZE(names); i++) {
4902 /* Compute average latency for a latency histo */
4903 val = single_histo_average(nva[i].data, nva[i].count);
4904 print_one_stat(val, format, column_width, cb->cb_scripted);
4905 }
4906 free_calc_stats(nva, ARRAY_SIZE(names));
4907 }
4908
4909 /*
4910 * Print default statistics (capacity/operations/bandwidth)
4911 */
4912 static void
4913 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4914 {
4915 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4916 enum zfs_nicenum_format format;
4917 char na; /* char to print for "not applicable" values */
4918
4919 if (cb->cb_literal) {
4920 format = ZFS_NICENUM_RAW;
4921 na = '0';
4922 } else {
4923 format = ZFS_NICENUM_1024;
4924 na = '-';
4925 }
4926
4927 /* only toplevel vdevs have capacity stats */
4928 if (vs->vs_space == 0) {
4929 if (cb->cb_scripted)
4930 printf("\t%c\t%c", na, na);
4931 else
4932 printf(" %*c %*c", column_width, na, column_width,
4933 na);
4934 } else {
4935 print_one_stat(vs->vs_alloc, format, column_width,
4936 cb->cb_scripted);
4937 print_one_stat(vs->vs_space - vs->vs_alloc, format,
4938 column_width, cb->cb_scripted);
4939 }
4940
4941 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4942 format, column_width, cb->cb_scripted);
4943 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4944 format, column_width, cb->cb_scripted);
4945 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4946 format, column_width, cb->cb_scripted);
4947 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4948 format, column_width, cb->cb_scripted);
4949 }
4950
4951 static const char *const class_name[] = {
4952 VDEV_ALLOC_BIAS_DEDUP,
4953 VDEV_ALLOC_BIAS_SPECIAL,
4954 VDEV_ALLOC_CLASS_LOGS
4955 };
4956
4957 /*
4958 * Print out all the statistics for the given vdev. This can either be the
4959 * toplevel configuration, or called recursively. If 'name' is NULL, then this
4960 * is a verbose output, and we don't want to display the toplevel pool stats.
4961 *
4962 * Returns the number of stat lines printed.
4963 */
4964 static unsigned int
4965 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4966 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4967 {
4968 nvlist_t **oldchild, **newchild;
4969 uint_t c, children, oldchildren;
4970 vdev_stat_t *oldvs, *newvs, *calcvs;
4971 vdev_stat_t zerovs = { 0 };
4972 char *vname;
4973 int i;
4974 int ret = 0;
4975 uint64_t tdelta;
4976 double scale;
4977
4978 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4979 return (ret);
4980
4981 calcvs = safe_malloc(sizeof (*calcvs));
4982
4983 if (oldnv != NULL) {
4984 verify(nvlist_lookup_uint64_array(oldnv,
4985 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4986 } else {
4987 oldvs = &zerovs;
4988 }
4989
4990 /* Do we only want to see a specific vdev? */
4991 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4992 /* Yes we do. Is this the vdev? */
4993 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4994 /*
4995 * This is our vdev. Since it is the only vdev we
4996 * will be displaying, make depth = 0 so that it
4997 * doesn't get indented.
4998 */
4999 depth = 0;
5000 break;
5001 }
5002 }
5003
5004 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5005 /* Couldn't match the name */
5006 goto children;
5007 }
5008
5009
5010 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5011 (uint64_t **)&newvs, &c) == 0);
5012
5013 /*
5014 * Print the vdev name unless it's is a histogram. Histograms
5015 * display the vdev name in the header itself.
5016 */
5017 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5018 if (cb->cb_scripted) {
5019 printf("%s", name);
5020 } else {
5021 if (strlen(name) + depth > cb->cb_namewidth)
5022 (void) printf("%*s%s", depth, "", name);
5023 else
5024 (void) printf("%*s%s%*s", depth, "", name,
5025 (int)(cb->cb_namewidth - strlen(name) -
5026 depth), "");
5027 }
5028 }
5029
5030 /* Calculate our scaling factor */
5031 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5032 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5033 /*
5034 * If we specify printing histograms with no time interval, then
5035 * print the histogram numbers over the entire lifetime of the
5036 * vdev.
5037 */
5038 scale = 1;
5039 } else {
5040 if (tdelta == 0)
5041 scale = 1.0;
5042 else
5043 scale = (double)NANOSEC / tdelta;
5044 }
5045
5046 if (cb->cb_flags & IOS_DEFAULT_M) {
5047 calc_default_iostats(oldvs, newvs, calcvs);
5048 print_iostat_default(calcvs, cb, scale);
5049 }
5050 if (cb->cb_flags & IOS_LATENCY_M)
5051 print_iostat_latency(cb, oldnv, newnv);
5052 if (cb->cb_flags & IOS_QUEUES_M)
5053 print_iostat_queues(cb, newnv);
5054 if (cb->cb_flags & IOS_ANYHISTO_M) {
5055 printf("\n");
5056 print_iostat_histos(cb, oldnv, newnv, scale, name);
5057 }
5058
5059 if (cb->vcdl != NULL) {
5060 const char *path;
5061 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5062 &path) == 0) {
5063 printf(" ");
5064 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5065 }
5066 }
5067
5068 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5069 printf("\n");
5070
5071 ret++;
5072
5073 children:
5074
5075 free(calcvs);
5076
5077 if (!cb->cb_verbose)
5078 return (ret);
5079
5080 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5081 &newchild, &children) != 0)
5082 return (ret);
5083
5084 if (oldnv) {
5085 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5086 &oldchild, &oldchildren) != 0)
5087 return (ret);
5088
5089 children = MIN(oldchildren, children);
5090 }
5091
5092 /*
5093 * print normal top-level devices
5094 */
5095 for (c = 0; c < children; c++) {
5096 uint64_t ishole = B_FALSE, islog = B_FALSE;
5097
5098 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5099 &ishole);
5100
5101 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5102 &islog);
5103
5104 if (ishole || islog)
5105 continue;
5106
5107 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5108 continue;
5109
5110 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5111 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5112 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5113 newchild[c], cb, depth + 2);
5114 free(vname);
5115 }
5116
5117 /*
5118 * print all other top-level devices
5119 */
5120 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5121 boolean_t printed = B_FALSE;
5122
5123 for (c = 0; c < children; c++) {
5124 uint64_t islog = B_FALSE;
5125 const char *bias = NULL;
5126 const char *type = NULL;
5127
5128 (void) nvlist_lookup_uint64(newchild[c],
5129 ZPOOL_CONFIG_IS_LOG, &islog);
5130 if (islog) {
5131 bias = VDEV_ALLOC_CLASS_LOGS;
5132 } else {
5133 (void) nvlist_lookup_string(newchild[c],
5134 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5135 (void) nvlist_lookup_string(newchild[c],
5136 ZPOOL_CONFIG_TYPE, &type);
5137 }
5138 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5139 continue;
5140 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5141 continue;
5142
5143 if (!printed) {
5144 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5145 !cb->cb_scripted &&
5146 !cb->cb_vdevs.cb_names) {
5147 print_iostat_dashes(cb, 0,
5148 class_name[n]);
5149 }
5150 printf("\n");
5151 printed = B_TRUE;
5152 }
5153
5154 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5155 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5156 ret += print_vdev_stats(zhp, vname, oldnv ?
5157 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5158 free(vname);
5159 }
5160 }
5161
5162 /*
5163 * Include level 2 ARC devices in iostat output
5164 */
5165 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5166 &newchild, &children) != 0)
5167 return (ret);
5168
5169 if (oldnv) {
5170 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5171 &oldchild, &oldchildren) != 0)
5172 return (ret);
5173
5174 children = MIN(oldchildren, children);
5175 }
5176
5177 if (children > 0) {
5178 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5179 !cb->cb_vdevs.cb_names) {
5180 print_iostat_dashes(cb, 0, "cache");
5181 }
5182 printf("\n");
5183
5184 for (c = 0; c < children; c++) {
5185 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5186 cb->cb_vdevs.cb_name_flags);
5187 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5188 : NULL, newchild[c], cb, depth + 2);
5189 free(vname);
5190 }
5191 }
5192
5193 return (ret);
5194 }
5195
5196 static int
5197 refresh_iostat(zpool_handle_t *zhp, void *data)
5198 {
5199 iostat_cbdata_t *cb = data;
5200 boolean_t missing;
5201
5202 /*
5203 * If the pool has disappeared, remove it from the list and continue.
5204 */
5205 if (zpool_refresh_stats(zhp, &missing) != 0)
5206 return (-1);
5207
5208 if (missing)
5209 pool_list_remove(cb->cb_list, zhp);
5210
5211 return (0);
5212 }
5213
5214 /*
5215 * Callback to print out the iostats for the given pool.
5216 */
5217 static int
5218 print_iostat(zpool_handle_t *zhp, void *data)
5219 {
5220 iostat_cbdata_t *cb = data;
5221 nvlist_t *oldconfig, *newconfig;
5222 nvlist_t *oldnvroot, *newnvroot;
5223 int ret;
5224
5225 newconfig = zpool_get_config(zhp, &oldconfig);
5226
5227 if (cb->cb_iteration == 1)
5228 oldconfig = NULL;
5229
5230 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5231 &newnvroot) == 0);
5232
5233 if (oldconfig == NULL)
5234 oldnvroot = NULL;
5235 else
5236 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5237 &oldnvroot) == 0);
5238
5239 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5240 cb, 0);
5241 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5242 !cb->cb_scripted && cb->cb_verbose &&
5243 !cb->cb_vdevs.cb_names_count) {
5244 print_iostat_separator(cb);
5245 if (cb->vcdl != NULL) {
5246 print_cmd_columns(cb->vcdl, 1);
5247 }
5248 printf("\n");
5249 }
5250
5251 return (ret);
5252 }
5253
5254 static int
5255 get_columns(void)
5256 {
5257 struct winsize ws;
5258 int columns = 80;
5259 int error;
5260
5261 if (isatty(STDOUT_FILENO)) {
5262 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5263 if (error == 0)
5264 columns = ws.ws_col;
5265 } else {
5266 columns = 999;
5267 }
5268
5269 return (columns);
5270 }
5271
5272 /*
5273 * Return the required length of the pool/vdev name column. The minimum
5274 * allowed width and output formatting flags must be provided.
5275 */
5276 static int
5277 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5278 {
5279 nvlist_t *config, *nvroot;
5280 int width = min_width;
5281
5282 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5283 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5284 &nvroot) == 0);
5285 size_t poolname_len = strlen(zpool_get_name(zhp));
5286 if (verbose == B_FALSE) {
5287 width = MAX(poolname_len, min_width);
5288 } else {
5289 width = MAX(poolname_len,
5290 max_width(zhp, nvroot, 0, min_width, flags));
5291 }
5292 }
5293
5294 return (width);
5295 }
5296
5297 /*
5298 * Parse the input string, get the 'interval' and 'count' value if there is one.
5299 */
5300 static void
5301 get_interval_count(int *argcp, char **argv, float *iv,
5302 unsigned long *cnt)
5303 {
5304 float interval = 0;
5305 unsigned long count = 0;
5306 int argc = *argcp;
5307
5308 /*
5309 * Determine if the last argument is an integer or a pool name
5310 */
5311 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5312 char *end;
5313
5314 errno = 0;
5315 interval = strtof(argv[argc - 1], &end);
5316
5317 if (*end == '\0' && errno == 0) {
5318 if (interval == 0) {
5319 (void) fprintf(stderr, gettext(
5320 "interval cannot be zero\n"));
5321 usage(B_FALSE);
5322 }
5323 /*
5324 * Ignore the last parameter
5325 */
5326 argc--;
5327 } else {
5328 /*
5329 * If this is not a valid number, just plow on. The
5330 * user will get a more informative error message later
5331 * on.
5332 */
5333 interval = 0;
5334 }
5335 }
5336
5337 /*
5338 * If the last argument is also an integer, then we have both a count
5339 * and an interval.
5340 */
5341 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5342 char *end;
5343
5344 errno = 0;
5345 count = interval;
5346 interval = strtof(argv[argc - 1], &end);
5347
5348 if (*end == '\0' && errno == 0) {
5349 if (interval == 0) {
5350 (void) fprintf(stderr, gettext(
5351 "interval cannot be zero\n"));
5352 usage(B_FALSE);
5353 }
5354
5355 /*
5356 * Ignore the last parameter
5357 */
5358 argc--;
5359 } else {
5360 interval = 0;
5361 }
5362 }
5363
5364 *iv = interval;
5365 *cnt = count;
5366 *argcp = argc;
5367 }
5368
5369 static void
5370 get_timestamp_arg(char c)
5371 {
5372 if (c == 'u')
5373 timestamp_fmt = UDATE;
5374 else if (c == 'd')
5375 timestamp_fmt = DDATE;
5376 else
5377 usage(B_FALSE);
5378 }
5379
5380 /*
5381 * Return stat flags that are supported by all pools by both the module and
5382 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5383 * It will get ANDed down until only the flags that are supported on all pools
5384 * remain.
5385 */
5386 static int
5387 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5388 {
5389 uint64_t *mask = data;
5390 nvlist_t *config, *nvroot, *nvx;
5391 uint64_t flags = 0;
5392 int i, j;
5393
5394 config = zpool_get_config(zhp, NULL);
5395 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5396 &nvroot) == 0);
5397
5398 /* Default stats are always supported, but for completeness.. */
5399 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5400 flags |= IOS_DEFAULT_M;
5401
5402 /* Get our extended stats nvlist from the main list */
5403 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5404 &nvx) != 0) {
5405 /*
5406 * No extended stats; they're probably running an older
5407 * module. No big deal, we support that too.
5408 */
5409 goto end;
5410 }
5411
5412 /* For each extended stat, make sure all its nvpairs are supported */
5413 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5414 if (!vsx_type_to_nvlist[j][0])
5415 continue;
5416
5417 /* Start off by assuming the flag is supported, then check */
5418 flags |= (1ULL << j);
5419 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5420 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5421 /* flag isn't supported */
5422 flags = flags & ~(1ULL << j);
5423 break;
5424 }
5425 }
5426 }
5427 end:
5428 *mask = *mask & flags;
5429 return (0);
5430 }
5431
5432 /*
5433 * Return a bitmask of stats that are supported on all pools by both the module
5434 * and zpool iostat.
5435 */
5436 static uint64_t
5437 get_stat_flags(zpool_list_t *list)
5438 {
5439 uint64_t mask = -1;
5440
5441 /*
5442 * get_stat_flags_cb() will lop off bits from "mask" until only the
5443 * flags that are supported on all pools remain.
5444 */
5445 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5446 return (mask);
5447 }
5448
5449 /*
5450 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5451 */
5452 static int
5453 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5454 {
5455 uint64_t guid;
5456 vdev_cbdata_t *cb = cb_data;
5457 zpool_handle_t *zhp = zhp_data;
5458
5459 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
5460 return (0);
5461
5462 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
5463 }
5464
5465 /*
5466 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5467 */
5468 static int
5469 is_vdev(zpool_handle_t *zhp, void *cb_data)
5470 {
5471 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5472 }
5473
5474 /*
5475 * Check if vdevs are in a pool
5476 *
5477 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5478 * return 0. If pool_name is NULL, then search all pools.
5479 */
5480 static int
5481 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5482 vdev_cbdata_t *cb)
5483 {
5484 char **tmp_name;
5485 int ret = 0;
5486 int i;
5487 int pool_count = 0;
5488
5489 if ((argc == 0) || !*argv)
5490 return (0);
5491
5492 if (pool_name)
5493 pool_count = 1;
5494
5495 /* Temporarily hijack cb_names for a second... */
5496 tmp_name = cb->cb_names;
5497
5498 /* Go though our list of prospective vdev names */
5499 for (i = 0; i < argc; i++) {
5500 cb->cb_names = argv + i;
5501
5502 /* Is this name a vdev in our pools? */
5503 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5504 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5505 if (!ret) {
5506 /* No match */
5507 break;
5508 }
5509 }
5510
5511 cb->cb_names = tmp_name;
5512
5513 return (ret);
5514 }
5515
5516 static int
5517 is_pool_cb(zpool_handle_t *zhp, void *data)
5518 {
5519 char *name = data;
5520 if (strcmp(name, zpool_get_name(zhp)) == 0)
5521 return (1);
5522
5523 return (0);
5524 }
5525
5526 /*
5527 * Do we have a pool named *name? If so, return 1, otherwise 0.
5528 */
5529 static int
5530 is_pool(char *name)
5531 {
5532 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5533 is_pool_cb, name));
5534 }
5535
5536 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
5537 static int
5538 are_all_pools(int argc, char **argv)
5539 {
5540 if ((argc == 0) || !*argv)
5541 return (0);
5542
5543 while (--argc >= 0)
5544 if (!is_pool(argv[argc]))
5545 return (0);
5546
5547 return (1);
5548 }
5549
5550 /*
5551 * Helper function to print out vdev/pool names we can't resolve. Used for an
5552 * error message.
5553 */
5554 static void
5555 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5556 vdev_cbdata_t *cb)
5557 {
5558 int i;
5559 char *name;
5560 char *str;
5561 for (i = 0; i < argc; i++) {
5562 name = argv[i];
5563
5564 if (is_pool(name))
5565 str = gettext("pool");
5566 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5567 str = gettext("vdev in this pool");
5568 else if (are_vdevs_in_pool(1, &name, NULL, cb))
5569 str = gettext("vdev in another pool");
5570 else
5571 str = gettext("unknown");
5572
5573 fprintf(stderr, "\t%s (%s)\n", name, str);
5574 }
5575 }
5576
5577 /*
5578 * Same as get_interval_count(), but with additional checks to not misinterpret
5579 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
5580 * cb.cb_vdevs.cb_name_flags.
5581 */
5582 static void
5583 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5584 unsigned long *count, iostat_cbdata_t *cb)
5585 {
5586 char **tmpargv = argv;
5587 int argc_for_interval = 0;
5588
5589 /* Is the last arg an interval value? Or a guid? */
5590 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5591 &cb->cb_vdevs)) {
5592 /*
5593 * The last arg is not a guid, so it's probably an
5594 * interval value.
5595 */
5596 argc_for_interval++;
5597
5598 if (*argc >= 2 &&
5599 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5600 &cb->cb_vdevs)) {
5601 /*
5602 * The 2nd to last arg is not a guid, so it's probably
5603 * an interval value.
5604 */
5605 argc_for_interval++;
5606 }
5607 }
5608
5609 /* Point to our list of possible intervals */
5610 tmpargv = &argv[*argc - argc_for_interval];
5611
5612 *argc = *argc - argc_for_interval;
5613 get_interval_count(&argc_for_interval, tmpargv,
5614 interval, count);
5615 }
5616
5617 /*
5618 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5619 * if we were unable to determine its size.
5620 */
5621 static int
5622 terminal_height(void)
5623 {
5624 struct winsize win;
5625
5626 if (isatty(STDOUT_FILENO) == 0)
5627 return (-1);
5628
5629 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5630 return (win.ws_row);
5631
5632 return (-1);
5633 }
5634
5635 /*
5636 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5637 * print the result.
5638 *
5639 * name: Short name of the script ('iostat').
5640 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5641 */
5642 static void
5643 print_zpool_script_help(char *name, char *path)
5644 {
5645 char *argv[] = {path, (char *)"-h", NULL};
5646 char **lines = NULL;
5647 int lines_cnt = 0;
5648 int rc;
5649
5650 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5651 &lines_cnt);
5652 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5653 if (lines != NULL)
5654 libzfs_free_str_array(lines, lines_cnt);
5655 return;
5656 }
5657
5658 for (int i = 0; i < lines_cnt; i++)
5659 if (!is_blank_str(lines[i]))
5660 printf(" %-14s %s\n", name, lines[i]);
5661
5662 libzfs_free_str_array(lines, lines_cnt);
5663 }
5664
5665 /*
5666 * Go though the zpool status/iostat -c scripts in the user's path, run their
5667 * help option (-h), and print out the results.
5668 */
5669 static void
5670 print_zpool_dir_scripts(char *dirpath)
5671 {
5672 DIR *dir;
5673 struct dirent *ent;
5674 char fullpath[MAXPATHLEN];
5675 struct stat dir_stat;
5676
5677 if ((dir = opendir(dirpath)) != NULL) {
5678 /* print all the files and directories within directory */
5679 while ((ent = readdir(dir)) != NULL) {
5680 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
5681 dirpath, ent->d_name) >= sizeof (fullpath)) {
5682 (void) fprintf(stderr,
5683 gettext("internal error: "
5684 "ZPOOL_SCRIPTS_PATH too large.\n"));
5685 exit(1);
5686 }
5687
5688 /* Print the scripts */
5689 if (stat(fullpath, &dir_stat) == 0)
5690 if (dir_stat.st_mode & S_IXUSR &&
5691 S_ISREG(dir_stat.st_mode))
5692 print_zpool_script_help(ent->d_name,
5693 fullpath);
5694 }
5695 closedir(dir);
5696 }
5697 }
5698
5699 /*
5700 * Print out help text for all zpool status/iostat -c scripts.
5701 */
5702 static void
5703 print_zpool_script_list(const char *subcommand)
5704 {
5705 char *dir, *sp, *tmp;
5706
5707 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5708
5709 sp = zpool_get_cmd_search_path();
5710 if (sp == NULL)
5711 return;
5712
5713 for (dir = strtok_r(sp, ":", &tmp);
5714 dir != NULL;
5715 dir = strtok_r(NULL, ":", &tmp))
5716 print_zpool_dir_scripts(dir);
5717
5718 free(sp);
5719 }
5720
5721 /*
5722 * Set the minimum pool/vdev name column width. The width must be at least 10,
5723 * but may be as large as the column width - 42 so it still fits on one line.
5724 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5725 */
5726 static int
5727 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5728 {
5729 iostat_cbdata_t *cb = data;
5730 int width, available_width;
5731
5732 /*
5733 * get_namewidth() returns the maximum width of any name in that column
5734 * for any pool/vdev/device line that will be output.
5735 */
5736 width = get_namewidth(zhp, cb->cb_namewidth,
5737 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
5738
5739 /*
5740 * The width we are calculating is the width of the header and also the
5741 * padding width for names that are less than maximum width. The stats
5742 * take up 42 characters, so the width available for names is:
5743 */
5744 available_width = get_columns() - 42;
5745
5746 /*
5747 * If the maximum width fits on a screen, then great! Make everything
5748 * line up by justifying all lines to the same width. If that max
5749 * width is larger than what's available, the name plus stats won't fit
5750 * on one line, and justifying to that width would cause every line to
5751 * wrap on the screen. We only want lines with long names to wrap.
5752 * Limit the padding to what won't wrap.
5753 */
5754 if (width > available_width)
5755 width = available_width;
5756
5757 /*
5758 * And regardless of whatever the screen width is (get_columns can
5759 * return 0 if the width is not known or less than 42 for a narrow
5760 * terminal) have the width be a minimum of 10.
5761 */
5762 if (width < 10)
5763 width = 10;
5764
5765 /* Save the calculated width */
5766 cb->cb_namewidth = width;
5767
5768 return (0);
5769 }
5770
5771 /*
5772 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5773 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5774 * [interval [count]]
5775 *
5776 * -c CMD For each vdev, run command CMD
5777 * -g Display guid for individual vdev name.
5778 * -L Follow links when resolving vdev path name.
5779 * -P Display full path for vdev name.
5780 * -v Display statistics for individual vdevs
5781 * -h Display help
5782 * -p Display values in parsable (exact) format.
5783 * -H Scripted mode. Don't display headers, and separate properties
5784 * by a single tab.
5785 * -l Display average latency
5786 * -q Display queue depths
5787 * -w Display latency histograms
5788 * -r Display request size histogram
5789 * -T Display a timestamp in date(1) or Unix format
5790 * -n Only print headers once
5791 *
5792 * This command can be tricky because we want to be able to deal with pool
5793 * creation/destruction as well as vdev configuration changes. The bulk of this
5794 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
5795 * on pool_list_update() to detect the addition of new pools. Configuration
5796 * changes are all handled within libzfs.
5797 */
5798 int
5799 zpool_do_iostat(int argc, char **argv)
5800 {
5801 int c;
5802 int ret;
5803 int npools;
5804 float interval = 0;
5805 unsigned long count = 0;
5806 int winheight = 24;
5807 zpool_list_t *list;
5808 boolean_t verbose = B_FALSE;
5809 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5810 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5811 boolean_t omit_since_boot = B_FALSE;
5812 boolean_t guid = B_FALSE;
5813 boolean_t follow_links = B_FALSE;
5814 boolean_t full_name = B_FALSE;
5815 boolean_t headers_once = B_FALSE;
5816 iostat_cbdata_t cb = { 0 };
5817 char *cmd = NULL;
5818
5819 /* Used for printing error message */
5820 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5821 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5822
5823 uint64_t unsupported_flags;
5824
5825 /* check options */
5826 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5827 switch (c) {
5828 case 'c':
5829 if (cmd != NULL) {
5830 fprintf(stderr,
5831 gettext("Can't set -c flag twice\n"));
5832 exit(1);
5833 }
5834
5835 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5836 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5837 fprintf(stderr, gettext(
5838 "Can't run -c, disabled by "
5839 "ZPOOL_SCRIPTS_ENABLED.\n"));
5840 exit(1);
5841 }
5842
5843 if ((getuid() <= 0 || geteuid() <= 0) &&
5844 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5845 fprintf(stderr, gettext(
5846 "Can't run -c with root privileges "
5847 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5848 exit(1);
5849 }
5850 cmd = optarg;
5851 verbose = B_TRUE;
5852 break;
5853 case 'g':
5854 guid = B_TRUE;
5855 break;
5856 case 'L':
5857 follow_links = B_TRUE;
5858 break;
5859 case 'P':
5860 full_name = B_TRUE;
5861 break;
5862 case 'T':
5863 get_timestamp_arg(*optarg);
5864 break;
5865 case 'v':
5866 verbose = B_TRUE;
5867 break;
5868 case 'p':
5869 parsable = B_TRUE;
5870 break;
5871 case 'l':
5872 latency = B_TRUE;
5873 break;
5874 case 'q':
5875 queues = B_TRUE;
5876 break;
5877 case 'H':
5878 scripted = B_TRUE;
5879 break;
5880 case 'w':
5881 l_histo = B_TRUE;
5882 break;
5883 case 'r':
5884 rq_histo = B_TRUE;
5885 break;
5886 case 'y':
5887 omit_since_boot = B_TRUE;
5888 break;
5889 case 'n':
5890 headers_once = B_TRUE;
5891 break;
5892 case 'h':
5893 usage(B_FALSE);
5894 break;
5895 case '?':
5896 if (optopt == 'c') {
5897 print_zpool_script_list("iostat");
5898 exit(0);
5899 } else {
5900 fprintf(stderr,
5901 gettext("invalid option '%c'\n"), optopt);
5902 }
5903 usage(B_FALSE);
5904 }
5905 }
5906
5907 argc -= optind;
5908 argv += optind;
5909
5910 cb.cb_literal = parsable;
5911 cb.cb_scripted = scripted;
5912
5913 if (guid)
5914 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5915 if (follow_links)
5916 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5917 if (full_name)
5918 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5919 cb.cb_iteration = 0;
5920 cb.cb_namewidth = 0;
5921 cb.cb_verbose = verbose;
5922
5923 /* Get our interval and count values (if any) */
5924 if (guid) {
5925 get_interval_count_filter_guids(&argc, argv, &interval,
5926 &count, &cb);
5927 } else {
5928 get_interval_count(&argc, argv, &interval, &count);
5929 }
5930
5931 if (argc == 0) {
5932 /* No args, so just print the defaults. */
5933 } else if (are_all_pools(argc, argv)) {
5934 /* All the args are pool names */
5935 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5936 /* All the args are vdevs */
5937 cb.cb_vdevs.cb_names = argv;
5938 cb.cb_vdevs.cb_names_count = argc;
5939 argc = 0; /* No pools to process */
5940 } else if (are_all_pools(1, argv)) {
5941 /* The first arg is a pool name */
5942 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5943 &cb.cb_vdevs)) {
5944 /* ...and the rest are vdev names */
5945 cb.cb_vdevs.cb_names = argv + 1;
5946 cb.cb_vdevs.cb_names_count = argc - 1;
5947 argc = 1; /* One pool to process */
5948 } else {
5949 fprintf(stderr, gettext("Expected either a list of "));
5950 fprintf(stderr, gettext("pools, or list of vdevs in"));
5951 fprintf(stderr, " \"%s\", ", argv[0]);
5952 fprintf(stderr, gettext("but got:\n"));
5953 error_list_unresolved_vdevs(argc - 1, argv + 1,
5954 argv[0], &cb.cb_vdevs);
5955 fprintf(stderr, "\n");
5956 usage(B_FALSE);
5957 return (1);
5958 }
5959 } else {
5960 /*
5961 * The args don't make sense. The first arg isn't a pool name,
5962 * nor are all the args vdevs.
5963 */
5964 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5965 fprintf(stderr, "\n");
5966 return (1);
5967 }
5968
5969 if (cb.cb_vdevs.cb_names_count != 0) {
5970 /*
5971 * If user specified vdevs, it implies verbose.
5972 */
5973 cb.cb_verbose = B_TRUE;
5974 }
5975
5976 /*
5977 * Construct the list of all interesting pools.
5978 */
5979 ret = 0;
5980 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5981 &ret)) == NULL)
5982 return (1);
5983
5984 if (pool_list_count(list) == 0 && argc != 0) {
5985 pool_list_free(list);
5986 return (1);
5987 }
5988
5989 if (pool_list_count(list) == 0 && interval == 0) {
5990 pool_list_free(list);
5991 (void) fprintf(stderr, gettext("no pools available\n"));
5992 return (1);
5993 }
5994
5995 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5996 pool_list_free(list);
5997 (void) fprintf(stderr,
5998 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
5999 usage(B_FALSE);
6000 return (1);
6001 }
6002
6003 if (l_histo && rq_histo) {
6004 pool_list_free(list);
6005 (void) fprintf(stderr,
6006 gettext("Only one of [-r|-w] can be passed at a time\n"));
6007 usage(B_FALSE);
6008 return (1);
6009 }
6010
6011 /*
6012 * Enter the main iostat loop.
6013 */
6014 cb.cb_list = list;
6015
6016 if (l_histo) {
6017 /*
6018 * Histograms tables look out of place when you try to display
6019 * them with the other stats, so make a rule that you can only
6020 * print histograms by themselves.
6021 */
6022 cb.cb_flags = IOS_L_HISTO_M;
6023 } else if (rq_histo) {
6024 cb.cb_flags = IOS_RQ_HISTO_M;
6025 } else {
6026 cb.cb_flags = IOS_DEFAULT_M;
6027 if (latency)
6028 cb.cb_flags |= IOS_LATENCY_M;
6029 if (queues)
6030 cb.cb_flags |= IOS_QUEUES_M;
6031 }
6032
6033 /*
6034 * See if the module supports all the stats we want to display.
6035 */
6036 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6037 if (unsupported_flags) {
6038 uint64_t f;
6039 int idx;
6040 fprintf(stderr,
6041 gettext("The loaded zfs module doesn't support:"));
6042
6043 /* for each bit set in unsupported_flags */
6044 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6045 idx = lowbit64(f) - 1;
6046 fprintf(stderr, " -%c", flag_to_arg[idx]);
6047 }
6048
6049 fprintf(stderr, ". Try running a newer module.\n");
6050 pool_list_free(list);
6051
6052 return (1);
6053 }
6054
6055 for (;;) {
6056 if ((npools = pool_list_count(list)) == 0)
6057 (void) fprintf(stderr, gettext("no pools available\n"));
6058 else {
6059 /*
6060 * If this is the first iteration and -y was supplied
6061 * we skip any printing.
6062 */
6063 boolean_t skip = (omit_since_boot &&
6064 cb.cb_iteration == 0);
6065
6066 /*
6067 * Refresh all statistics. This is done as an
6068 * explicit step before calculating the maximum name
6069 * width, so that any * configuration changes are
6070 * properly accounted for.
6071 */
6072 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6073 &cb);
6074
6075 /*
6076 * Iterate over all pools to determine the maximum width
6077 * for the pool / device name column across all pools.
6078 */
6079 cb.cb_namewidth = 0;
6080 (void) pool_list_iter(list, B_FALSE,
6081 get_namewidth_iostat, &cb);
6082
6083 if (timestamp_fmt != NODATE)
6084 print_timestamp(timestamp_fmt);
6085
6086 if (cmd != NULL && cb.cb_verbose &&
6087 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6088 cb.vcdl = all_pools_for_each_vdev_run(argc,
6089 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6090 cb.cb_vdevs.cb_names_count,
6091 cb.cb_vdevs.cb_name_flags);
6092 } else {
6093 cb.vcdl = NULL;
6094 }
6095
6096
6097 /*
6098 * Check terminal size so we can print headers
6099 * even when terminal window has its height
6100 * changed.
6101 */
6102 winheight = terminal_height();
6103 /*
6104 * Are we connected to TTY? If not, headers_once
6105 * should be true, to avoid breaking scripts.
6106 */
6107 if (winheight < 0)
6108 headers_once = B_TRUE;
6109
6110 /*
6111 * If it's the first time and we're not skipping it,
6112 * or either skip or verbose mode, print the header.
6113 *
6114 * The histogram code explicitly prints its header on
6115 * every vdev, so skip this for histograms.
6116 */
6117 if (((++cb.cb_iteration == 1 && !skip) ||
6118 (skip != verbose) ||
6119 (!headers_once &&
6120 (cb.cb_iteration % winheight) == 0)) &&
6121 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6122 !cb.cb_scripted)
6123 print_iostat_header(&cb);
6124
6125 if (skip) {
6126 (void) fflush(stdout);
6127 (void) fsleep(interval);
6128 continue;
6129 }
6130
6131 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6132
6133 /*
6134 * If there's more than one pool, and we're not in
6135 * verbose mode (which prints a separator for us),
6136 * then print a separator.
6137 *
6138 * In addition, if we're printing specific vdevs then
6139 * we also want an ending separator.
6140 */
6141 if (((npools > 1 && !verbose &&
6142 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6143 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6144 cb.cb_vdevs.cb_names_count)) &&
6145 !cb.cb_scripted) {
6146 print_iostat_separator(&cb);
6147 if (cb.vcdl != NULL)
6148 print_cmd_columns(cb.vcdl, 1);
6149 printf("\n");
6150 }
6151
6152 if (cb.vcdl != NULL)
6153 free_vdev_cmd_data_list(cb.vcdl);
6154
6155 }
6156
6157 if (interval == 0)
6158 break;
6159
6160 if (count != 0 && --count == 0)
6161 break;
6162
6163 (void) fflush(stdout);
6164 (void) fsleep(interval);
6165 }
6166
6167 pool_list_free(list);
6168
6169 return (ret);
6170 }
6171
6172 typedef struct list_cbdata {
6173 boolean_t cb_verbose;
6174 int cb_name_flags;
6175 int cb_namewidth;
6176 boolean_t cb_scripted;
6177 zprop_list_t *cb_proplist;
6178 boolean_t cb_literal;
6179 } list_cbdata_t;
6180
6181
6182 /*
6183 * Given a list of columns to display, output appropriate headers for each one.
6184 */
6185 static void
6186 print_header(list_cbdata_t *cb)
6187 {
6188 zprop_list_t *pl = cb->cb_proplist;
6189 char headerbuf[ZPOOL_MAXPROPLEN];
6190 const char *header;
6191 boolean_t first = B_TRUE;
6192 boolean_t right_justify;
6193 size_t width = 0;
6194
6195 for (; pl != NULL; pl = pl->pl_next) {
6196 width = pl->pl_width;
6197 if (first && cb->cb_verbose) {
6198 /*
6199 * Reset the width to accommodate the verbose listing
6200 * of devices.
6201 */
6202 width = cb->cb_namewidth;
6203 }
6204
6205 if (!first)
6206 (void) fputs(" ", stdout);
6207 else
6208 first = B_FALSE;
6209
6210 right_justify = B_FALSE;
6211 if (pl->pl_prop != ZPROP_USERPROP) {
6212 header = zpool_prop_column_name(pl->pl_prop);
6213 right_justify = zpool_prop_align_right(pl->pl_prop);
6214 } else {
6215 int i;
6216
6217 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6218 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6219 headerbuf[i] = '\0';
6220 header = headerbuf;
6221 }
6222
6223 if (pl->pl_next == NULL && !right_justify)
6224 (void) fputs(header, stdout);
6225 else if (right_justify)
6226 (void) printf("%*s", (int)width, header);
6227 else
6228 (void) printf("%-*s", (int)width, header);
6229 }
6230
6231 (void) fputc('\n', stdout);
6232 }
6233
6234 /*
6235 * Given a pool and a list of properties, print out all the properties according
6236 * to the described layout. Used by zpool_do_list().
6237 */
6238 static void
6239 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6240 {
6241 zprop_list_t *pl = cb->cb_proplist;
6242 boolean_t first = B_TRUE;
6243 char property[ZPOOL_MAXPROPLEN];
6244 const char *propstr;
6245 boolean_t right_justify;
6246 size_t width;
6247
6248 for (; pl != NULL; pl = pl->pl_next) {
6249
6250 width = pl->pl_width;
6251 if (first && cb->cb_verbose) {
6252 /*
6253 * Reset the width to accommodate the verbose listing
6254 * of devices.
6255 */
6256 width = cb->cb_namewidth;
6257 }
6258
6259 if (!first) {
6260 if (cb->cb_scripted)
6261 (void) fputc('\t', stdout);
6262 else
6263 (void) fputs(" ", stdout);
6264 } else {
6265 first = B_FALSE;
6266 }
6267
6268 right_justify = B_FALSE;
6269 if (pl->pl_prop != ZPROP_USERPROP) {
6270 if (zpool_get_prop(zhp, pl->pl_prop, property,
6271 sizeof (property), NULL, cb->cb_literal) != 0)
6272 propstr = "-";
6273 else
6274 propstr = property;
6275
6276 right_justify = zpool_prop_align_right(pl->pl_prop);
6277 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6278 zpool_prop_unsupported(pl->pl_user_prop)) &&
6279 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6280 sizeof (property)) == 0) {
6281 propstr = property;
6282 } else if (zfs_prop_user(pl->pl_user_prop) &&
6283 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6284 sizeof (property), NULL) == 0) {
6285 propstr = property;
6286 } else {
6287 propstr = "-";
6288 }
6289
6290 /*
6291 * If this is being called in scripted mode, or if this is the
6292 * last column and it is left-justified, don't include a width
6293 * format specifier.
6294 */
6295 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6296 (void) fputs(propstr, stdout);
6297 else if (right_justify)
6298 (void) printf("%*s", (int)width, propstr);
6299 else
6300 (void) printf("%-*s", (int)width, propstr);
6301 }
6302
6303 (void) fputc('\n', stdout);
6304 }
6305
6306 static void
6307 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6308 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6309 {
6310 char propval[64];
6311 boolean_t fixed;
6312 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6313
6314 switch (prop) {
6315 case ZPOOL_PROP_SIZE:
6316 case ZPOOL_PROP_EXPANDSZ:
6317 case ZPOOL_PROP_CHECKPOINT:
6318 case ZPOOL_PROP_DEDUPRATIO:
6319 if (value == 0)
6320 (void) strlcpy(propval, "-", sizeof (propval));
6321 else
6322 zfs_nicenum_format(value, propval, sizeof (propval),
6323 format);
6324 break;
6325 case ZPOOL_PROP_FRAGMENTATION:
6326 if (value == ZFS_FRAG_INVALID) {
6327 (void) strlcpy(propval, "-", sizeof (propval));
6328 } else if (format == ZFS_NICENUM_RAW) {
6329 (void) snprintf(propval, sizeof (propval), "%llu",
6330 (unsigned long long)value);
6331 } else {
6332 (void) snprintf(propval, sizeof (propval), "%llu%%",
6333 (unsigned long long)value);
6334 }
6335 break;
6336 case ZPOOL_PROP_CAPACITY:
6337 /* capacity value is in parts-per-10,000 (aka permyriad) */
6338 if (format == ZFS_NICENUM_RAW)
6339 (void) snprintf(propval, sizeof (propval), "%llu",
6340 (unsigned long long)value / 100);
6341 else
6342 (void) snprintf(propval, sizeof (propval),
6343 value < 1000 ? "%1.2f%%" : value < 10000 ?
6344 "%2.1f%%" : "%3.0f%%", value / 100.0);
6345 break;
6346 case ZPOOL_PROP_HEALTH:
6347 width = 8;
6348 (void) strlcpy(propval, str, sizeof (propval));
6349 break;
6350 default:
6351 zfs_nicenum_format(value, propval, sizeof (propval), format);
6352 }
6353
6354 if (!valid)
6355 (void) strlcpy(propval, "-", sizeof (propval));
6356
6357 if (scripted)
6358 (void) printf("\t%s", propval);
6359 else
6360 (void) printf(" %*s", (int)width, propval);
6361 }
6362
6363 /*
6364 * print static default line per vdev
6365 * not compatible with '-o' <proplist> option
6366 */
6367 static void
6368 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6369 list_cbdata_t *cb, int depth, boolean_t isspare)
6370 {
6371 nvlist_t **child;
6372 vdev_stat_t *vs;
6373 uint_t c, children;
6374 char *vname;
6375 boolean_t scripted = cb->cb_scripted;
6376 uint64_t islog = B_FALSE;
6377 const char *dashes = "%-*s - - - - "
6378 "- - - - -\n";
6379
6380 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6381 (uint64_t **)&vs, &c) == 0);
6382
6383 if (name != NULL) {
6384 boolean_t toplevel = (vs->vs_space != 0);
6385 uint64_t cap;
6386 enum zfs_nicenum_format format;
6387 const char *state;
6388
6389 if (cb->cb_literal)
6390 format = ZFS_NICENUM_RAW;
6391 else
6392 format = ZFS_NICENUM_1024;
6393
6394 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6395 return;
6396
6397 if (scripted)
6398 (void) printf("\t%s", name);
6399 else if (strlen(name) + depth > cb->cb_namewidth)
6400 (void) printf("%*s%s", depth, "", name);
6401 else
6402 (void) printf("%*s%s%*s", depth, "", name,
6403 (int)(cb->cb_namewidth - strlen(name) - depth), "");
6404
6405 /*
6406 * Print the properties for the individual vdevs. Some
6407 * properties are only applicable to toplevel vdevs. The
6408 * 'toplevel' boolean value is passed to the print_one_column()
6409 * to indicate that the value is valid.
6410 */
6411 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
6412 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
6413 scripted, B_TRUE, format);
6414 else
6415 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
6416 scripted, toplevel, format);
6417 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6418 scripted, toplevel, format);
6419 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6420 NULL, scripted, toplevel, format);
6421 print_one_column(ZPOOL_PROP_CHECKPOINT,
6422 vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6423 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6424 scripted, B_TRUE, format);
6425 print_one_column(ZPOOL_PROP_FRAGMENTATION,
6426 vs->vs_fragmentation, NULL, scripted,
6427 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6428 format);
6429 cap = (vs->vs_space == 0) ? 0 :
6430 (vs->vs_alloc * 10000 / vs->vs_space);
6431 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6432 scripted, toplevel, format);
6433 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6434 scripted, toplevel, format);
6435 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6436 if (isspare) {
6437 if (vs->vs_aux == VDEV_AUX_SPARED)
6438 state = "INUSE";
6439 else if (vs->vs_state == VDEV_STATE_HEALTHY)
6440 state = "AVAIL";
6441 }
6442 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6443 B_TRUE, format);
6444 (void) fputc('\n', stdout);
6445 }
6446
6447 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6448 &child, &children) != 0)
6449 return;
6450
6451 /* list the normal vdevs first */
6452 for (c = 0; c < children; c++) {
6453 uint64_t ishole = B_FALSE;
6454
6455 if (nvlist_lookup_uint64(child[c],
6456 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6457 continue;
6458
6459 if (nvlist_lookup_uint64(child[c],
6460 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6461 continue;
6462
6463 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6464 continue;
6465
6466 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6467 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6468 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6469 free(vname);
6470 }
6471
6472 /* list the classes: 'logs', 'dedup', and 'special' */
6473 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
6474 boolean_t printed = B_FALSE;
6475
6476 for (c = 0; c < children; c++) {
6477 const char *bias = NULL;
6478 const char *type = NULL;
6479
6480 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6481 &islog) == 0 && islog) {
6482 bias = VDEV_ALLOC_CLASS_LOGS;
6483 } else {
6484 (void) nvlist_lookup_string(child[c],
6485 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6486 (void) nvlist_lookup_string(child[c],
6487 ZPOOL_CONFIG_TYPE, &type);
6488 }
6489 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6490 continue;
6491 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6492 continue;
6493
6494 if (!printed) {
6495 /* LINTED E_SEC_PRINTF_VAR_FMT */
6496 (void) printf(dashes, cb->cb_namewidth,
6497 class_name[n]);
6498 printed = B_TRUE;
6499 }
6500 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6501 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6502 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6503 B_FALSE);
6504 free(vname);
6505 }
6506 }
6507
6508 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6509 &child, &children) == 0 && children > 0) {
6510 /* LINTED E_SEC_PRINTF_VAR_FMT */
6511 (void) printf(dashes, cb->cb_namewidth, "cache");
6512 for (c = 0; c < children; c++) {
6513 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6514 cb->cb_name_flags);
6515 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6516 B_FALSE);
6517 free(vname);
6518 }
6519 }
6520
6521 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6522 &children) == 0 && children > 0) {
6523 /* LINTED E_SEC_PRINTF_VAR_FMT */
6524 (void) printf(dashes, cb->cb_namewidth, "spare");
6525 for (c = 0; c < children; c++) {
6526 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6527 cb->cb_name_flags);
6528 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6529 B_TRUE);
6530 free(vname);
6531 }
6532 }
6533 }
6534
6535 /*
6536 * Generic callback function to list a pool.
6537 */
6538 static int
6539 list_callback(zpool_handle_t *zhp, void *data)
6540 {
6541 list_cbdata_t *cbp = data;
6542
6543 print_pool(zhp, cbp);
6544
6545 if (cbp->cb_verbose) {
6546 nvlist_t *config, *nvroot;
6547
6548 config = zpool_get_config(zhp, NULL);
6549 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6550 &nvroot) == 0);
6551 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6552 }
6553
6554 return (0);
6555 }
6556
6557 /*
6558 * Set the minimum pool/vdev name column width. The width must be at least 9,
6559 * but may be as large as needed.
6560 */
6561 static int
6562 get_namewidth_list(zpool_handle_t *zhp, void *data)
6563 {
6564 list_cbdata_t *cb = data;
6565 int width;
6566
6567 width = get_namewidth(zhp, cb->cb_namewidth,
6568 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6569
6570 if (width < 9)
6571 width = 9;
6572
6573 cb->cb_namewidth = width;
6574
6575 return (0);
6576 }
6577
6578 /*
6579 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6580 *
6581 * -g Display guid for individual vdev name.
6582 * -H Scripted mode. Don't display headers, and separate properties
6583 * by a single tab.
6584 * -L Follow links when resolving vdev path name.
6585 * -o List of properties to display. Defaults to
6586 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
6587 * "dedupratio,health,altroot"
6588 * -p Display values in parsable (exact) format.
6589 * -P Display full path for vdev name.
6590 * -T Display a timestamp in date(1) or Unix format
6591 *
6592 * List all pools in the system, whether or not they're healthy. Output space
6593 * statistics for each one, as well as health status summary.
6594 */
6595 int
6596 zpool_do_list(int argc, char **argv)
6597 {
6598 int c;
6599 int ret = 0;
6600 list_cbdata_t cb = { 0 };
6601 static char default_props[] =
6602 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6603 "capacity,dedupratio,health,altroot";
6604 char *props = default_props;
6605 float interval = 0;
6606 unsigned long count = 0;
6607 zpool_list_t *list;
6608 boolean_t first = B_TRUE;
6609 current_prop_type = ZFS_TYPE_POOL;
6610
6611 /* check options */
6612 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6613 switch (c) {
6614 case 'g':
6615 cb.cb_name_flags |= VDEV_NAME_GUID;
6616 break;
6617 case 'H':
6618 cb.cb_scripted = B_TRUE;
6619 break;
6620 case 'L':
6621 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6622 break;
6623 case 'o':
6624 props = optarg;
6625 break;
6626 case 'P':
6627 cb.cb_name_flags |= VDEV_NAME_PATH;
6628 break;
6629 case 'p':
6630 cb.cb_literal = B_TRUE;
6631 break;
6632 case 'T':
6633 get_timestamp_arg(*optarg);
6634 break;
6635 case 'v':
6636 cb.cb_verbose = B_TRUE;
6637 cb.cb_namewidth = 8; /* 8 until precalc is avail */
6638 break;
6639 case ':':
6640 (void) fprintf(stderr, gettext("missing argument for "
6641 "'%c' option\n"), optopt);
6642 usage(B_FALSE);
6643 break;
6644 case '?':
6645 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6646 optopt);
6647 usage(B_FALSE);
6648 }
6649 }
6650
6651 argc -= optind;
6652 argv += optind;
6653
6654 get_interval_count(&argc, argv, &interval, &count);
6655
6656 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6657 usage(B_FALSE);
6658
6659 for (;;) {
6660 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6661 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6662 return (1);
6663
6664 if (pool_list_count(list) == 0)
6665 break;
6666
6667 cb.cb_namewidth = 0;
6668 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6669
6670 if (timestamp_fmt != NODATE)
6671 print_timestamp(timestamp_fmt);
6672
6673 if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6674 print_header(&cb);
6675 first = B_FALSE;
6676 }
6677 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6678
6679 if (interval == 0)
6680 break;
6681
6682 if (count != 0 && --count == 0)
6683 break;
6684
6685 pool_list_free(list);
6686
6687 (void) fflush(stdout);
6688 (void) fsleep(interval);
6689 }
6690
6691 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6692 (void) printf(gettext("no pools available\n"));
6693 ret = 0;
6694 }
6695
6696 pool_list_free(list);
6697 zprop_free_list(cb.cb_proplist);
6698 return (ret);
6699 }
6700
6701 static int
6702 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6703 {
6704 boolean_t force = B_FALSE;
6705 boolean_t rebuild = B_FALSE;
6706 boolean_t wait = B_FALSE;
6707 int c;
6708 nvlist_t *nvroot;
6709 char *poolname, *old_disk, *new_disk;
6710 zpool_handle_t *zhp;
6711 nvlist_t *props = NULL;
6712 char *propval;
6713 int ret;
6714
6715 /* check options */
6716 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6717 switch (c) {
6718 case 'f':
6719 force = B_TRUE;
6720 break;
6721 case 'o':
6722 if ((propval = strchr(optarg, '=')) == NULL) {
6723 (void) fprintf(stderr, gettext("missing "
6724 "'=' for -o option\n"));
6725 usage(B_FALSE);
6726 }
6727 *propval = '\0';
6728 propval++;
6729
6730 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6731 (add_prop_list(optarg, propval, &props, B_TRUE)))
6732 usage(B_FALSE);
6733 break;
6734 case 's':
6735 rebuild = B_TRUE;
6736 break;
6737 case 'w':
6738 wait = B_TRUE;
6739 break;
6740 case '?':
6741 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6742 optopt);
6743 usage(B_FALSE);
6744 }
6745 }
6746
6747 argc -= optind;
6748 argv += optind;
6749
6750 /* get pool name and check number of arguments */
6751 if (argc < 1) {
6752 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6753 usage(B_FALSE);
6754 }
6755
6756 poolname = argv[0];
6757
6758 if (argc < 2) {
6759 (void) fprintf(stderr,
6760 gettext("missing <device> specification\n"));
6761 usage(B_FALSE);
6762 }
6763
6764 old_disk = argv[1];
6765
6766 if (argc < 3) {
6767 if (!replacing) {
6768 (void) fprintf(stderr,
6769 gettext("missing <new_device> specification\n"));
6770 usage(B_FALSE);
6771 }
6772 new_disk = old_disk;
6773 argc -= 1;
6774 argv += 1;
6775 } else {
6776 new_disk = argv[2];
6777 argc -= 2;
6778 argv += 2;
6779 }
6780
6781 if (argc > 1) {
6782 (void) fprintf(stderr, gettext("too many arguments\n"));
6783 usage(B_FALSE);
6784 }
6785
6786 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6787 nvlist_free(props);
6788 return (1);
6789 }
6790
6791 if (zpool_get_config(zhp, NULL) == NULL) {
6792 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6793 poolname);
6794 zpool_close(zhp);
6795 nvlist_free(props);
6796 return (1);
6797 }
6798
6799 /* unless manually specified use "ashift" pool property (if set) */
6800 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6801 int intval;
6802 zprop_source_t src;
6803 char strval[ZPOOL_MAXPROPLEN];
6804
6805 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6806 if (src != ZPROP_SRC_DEFAULT) {
6807 (void) sprintf(strval, "%" PRId32, intval);
6808 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6809 &props, B_TRUE) == 0);
6810 }
6811 }
6812
6813 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6814 argc, argv);
6815 if (nvroot == NULL) {
6816 zpool_close(zhp);
6817 nvlist_free(props);
6818 return (1);
6819 }
6820
6821 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6822 rebuild);
6823
6824 if (ret == 0 && wait) {
6825 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
6826 char raidz_prefix[] = "raidz";
6827 if (replacing) {
6828 activity = ZPOOL_WAIT_REPLACE;
6829 } else if (strncmp(old_disk,
6830 raidz_prefix, strlen(raidz_prefix)) == 0) {
6831 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
6832 }
6833 ret = zpool_wait(zhp, activity);
6834 }
6835
6836 nvlist_free(props);
6837 nvlist_free(nvroot);
6838 zpool_close(zhp);
6839
6840 return (ret);
6841 }
6842
6843 /*
6844 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6845 *
6846 * -f Force attach, even if <new_device> appears to be in use.
6847 * -s Use sequential instead of healing reconstruction for resilver.
6848 * -o Set property=value.
6849 * -w Wait for replacing to complete before returning
6850 *
6851 * Replace <device> with <new_device>.
6852 */
6853 int
6854 zpool_do_replace(int argc, char **argv)
6855 {
6856 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6857 }
6858
6859 /*
6860 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
6861 *
6862 * -f Force attach, even if <new_device> appears to be in use.
6863 * -s Use sequential instead of healing reconstruction for resilver.
6864 * -o Set property=value.
6865 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
6866 * before returning.
6867 *
6868 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
6869 * mirror or raidz. If <device> is not part of a mirror, then <device> will
6870 * be transformed into a mirror of <device> and <new_device>. When a mirror
6871 * is involved, <new_device> will begin life with a DTL of [0, now], and will
6872 * immediately begin to resilver itself. For the raidz case, a expansion will
6873 * commence and reflow the raidz data across all the disks including the
6874 * <new_device>.
6875 */
6876 int
6877 zpool_do_attach(int argc, char **argv)
6878 {
6879 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6880 }
6881
6882 /*
6883 * zpool detach [-f] <pool> <device>
6884 *
6885 * -f Force detach of <device>, even if DTLs argue against it
6886 * (not supported yet)
6887 *
6888 * Detach a device from a mirror. The operation will be refused if <device>
6889 * is the last device in the mirror, or if the DTLs indicate that this device
6890 * has the only valid copy of some data.
6891 */
6892 int
6893 zpool_do_detach(int argc, char **argv)
6894 {
6895 int c;
6896 char *poolname, *path;
6897 zpool_handle_t *zhp;
6898 int ret;
6899
6900 /* check options */
6901 while ((c = getopt(argc, argv, "")) != -1) {
6902 switch (c) {
6903 case '?':
6904 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6905 optopt);
6906 usage(B_FALSE);
6907 }
6908 }
6909
6910 argc -= optind;
6911 argv += optind;
6912
6913 /* get pool name and check number of arguments */
6914 if (argc < 1) {
6915 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6916 usage(B_FALSE);
6917 }
6918
6919 if (argc < 2) {
6920 (void) fprintf(stderr,
6921 gettext("missing <device> specification\n"));
6922 usage(B_FALSE);
6923 }
6924
6925 poolname = argv[0];
6926 path = argv[1];
6927
6928 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6929 return (1);
6930
6931 ret = zpool_vdev_detach(zhp, path);
6932
6933 zpool_close(zhp);
6934
6935 return (ret);
6936 }
6937
6938 /*
6939 * zpool split [-gLnP] [-o prop=val] ...
6940 * [-o mntopt] ...
6941 * [-R altroot] <pool> <newpool> [<device> ...]
6942 *
6943 * -g Display guid for individual vdev name.
6944 * -L Follow links when resolving vdev path name.
6945 * -n Do not split the pool, but display the resulting layout if
6946 * it were to be split.
6947 * -o Set property=value, or set mount options.
6948 * -P Display full path for vdev name.
6949 * -R Mount the split-off pool under an alternate root.
6950 * -l Load encryption keys while importing.
6951 *
6952 * Splits the named pool and gives it the new pool name. Devices to be split
6953 * off may be listed, provided that no more than one device is specified
6954 * per top-level vdev mirror. The newly split pool is left in an exported
6955 * state unless -R is specified.
6956 *
6957 * Restrictions: the top-level of the pool pool must only be made up of
6958 * mirrors; all devices in the pool must be healthy; no device may be
6959 * undergoing a resilvering operation.
6960 */
6961 int
6962 zpool_do_split(int argc, char **argv)
6963 {
6964 char *srcpool, *newpool, *propval;
6965 char *mntopts = NULL;
6966 splitflags_t flags;
6967 int c, ret = 0;
6968 int ms_status = 0;
6969 boolean_t loadkeys = B_FALSE;
6970 zpool_handle_t *zhp;
6971 nvlist_t *config, *props = NULL;
6972
6973 flags.dryrun = B_FALSE;
6974 flags.import = B_FALSE;
6975 flags.name_flags = 0;
6976
6977 /* check options */
6978 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6979 switch (c) {
6980 case 'g':
6981 flags.name_flags |= VDEV_NAME_GUID;
6982 break;
6983 case 'L':
6984 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6985 break;
6986 case 'R':
6987 flags.import = B_TRUE;
6988 if (add_prop_list(
6989 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6990 &props, B_TRUE) != 0) {
6991 nvlist_free(props);
6992 usage(B_FALSE);
6993 }
6994 break;
6995 case 'l':
6996 loadkeys = B_TRUE;
6997 break;
6998 case 'n':
6999 flags.dryrun = B_TRUE;
7000 break;
7001 case 'o':
7002 if ((propval = strchr(optarg, '=')) != NULL) {
7003 *propval = '\0';
7004 propval++;
7005 if (add_prop_list(optarg, propval,
7006 &props, B_TRUE) != 0) {
7007 nvlist_free(props);
7008 usage(B_FALSE);
7009 }
7010 } else {
7011 mntopts = optarg;
7012 }
7013 break;
7014 case 'P':
7015 flags.name_flags |= VDEV_NAME_PATH;
7016 break;
7017 case ':':
7018 (void) fprintf(stderr, gettext("missing argument for "
7019 "'%c' option\n"), optopt);
7020 usage(B_FALSE);
7021 break;
7022 case '?':
7023 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7024 optopt);
7025 usage(B_FALSE);
7026 break;
7027 }
7028 }
7029
7030 if (!flags.import && mntopts != NULL) {
7031 (void) fprintf(stderr, gettext("setting mntopts is only "
7032 "valid when importing the pool\n"));
7033 usage(B_FALSE);
7034 }
7035
7036 if (!flags.import && loadkeys) {
7037 (void) fprintf(stderr, gettext("loading keys is only "
7038 "valid when importing the pool\n"));
7039 usage(B_FALSE);
7040 }
7041
7042 argc -= optind;
7043 argv += optind;
7044
7045 if (argc < 1) {
7046 (void) fprintf(stderr, gettext("Missing pool name\n"));
7047 usage(B_FALSE);
7048 }
7049 if (argc < 2) {
7050 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7051 usage(B_FALSE);
7052 }
7053
7054 srcpool = argv[0];
7055 newpool = argv[1];
7056
7057 argc -= 2;
7058 argv += 2;
7059
7060 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7061 nvlist_free(props);
7062 return (1);
7063 }
7064
7065 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7066 if (config == NULL) {
7067 ret = 1;
7068 } else {
7069 if (flags.dryrun) {
7070 (void) printf(gettext("would create '%s' with the "
7071 "following layout:\n\n"), newpool);
7072 print_vdev_tree(NULL, newpool, config, 0, "",
7073 flags.name_flags);
7074 print_vdev_tree(NULL, "dedup", config, 0,
7075 VDEV_ALLOC_BIAS_DEDUP, 0);
7076 print_vdev_tree(NULL, "special", config, 0,
7077 VDEV_ALLOC_BIAS_SPECIAL, 0);
7078 }
7079 }
7080
7081 zpool_close(zhp);
7082
7083 if (ret != 0 || flags.dryrun || !flags.import) {
7084 nvlist_free(config);
7085 nvlist_free(props);
7086 return (ret);
7087 }
7088
7089 /*
7090 * The split was successful. Now we need to open the new
7091 * pool and import it.
7092 */
7093 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7094 nvlist_free(config);
7095 nvlist_free(props);
7096 return (1);
7097 }
7098
7099 if (loadkeys) {
7100 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7101 if (ret != 0)
7102 ret = 1;
7103 }
7104
7105 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7106 ms_status = zpool_enable_datasets(zhp, mntopts, 0);
7107 if (ms_status == EZFS_SHAREFAILED) {
7108 (void) fprintf(stderr, gettext("Split was successful, "
7109 "datasets are mounted but sharing of some datasets "
7110 "has failed\n"));
7111 } else if (ms_status == EZFS_MOUNTFAILED) {
7112 (void) fprintf(stderr, gettext("Split was successful"
7113 ", but some datasets could not be mounted\n"));
7114 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7115 "different altroot\n"), "zpool import");
7116 }
7117 }
7118 zpool_close(zhp);
7119 nvlist_free(config);
7120 nvlist_free(props);
7121
7122 return (ret);
7123 }
7124
7125
7126 /*
7127 * zpool online [--power] <pool> <device> ...
7128 *
7129 * --power: Power on the enclosure slot to the drive (if possible)
7130 */
7131 int
7132 zpool_do_online(int argc, char **argv)
7133 {
7134 int c, i;
7135 char *poolname;
7136 zpool_handle_t *zhp;
7137 int ret = 0;
7138 vdev_state_t newstate;
7139 int flags = 0;
7140 boolean_t is_power_on = B_FALSE;
7141 struct option long_options[] = {
7142 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7143 {0, 0, 0, 0}
7144 };
7145
7146 /* check options */
7147 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7148 switch (c) {
7149 case 'e':
7150 flags |= ZFS_ONLINE_EXPAND;
7151 break;
7152 case ZPOOL_OPTION_POWER:
7153 is_power_on = B_TRUE;
7154 break;
7155 case '?':
7156 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7157 optopt);
7158 usage(B_FALSE);
7159 }
7160 }
7161
7162 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7163 is_power_on = B_TRUE;
7164
7165 argc -= optind;
7166 argv += optind;
7167
7168 /* get pool name and check number of arguments */
7169 if (argc < 1) {
7170 (void) fprintf(stderr, gettext("missing pool name\n"));
7171 usage(B_FALSE);
7172 }
7173 if (argc < 2) {
7174 (void) fprintf(stderr, gettext("missing device name\n"));
7175 usage(B_FALSE);
7176 }
7177
7178 poolname = argv[0];
7179
7180 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7181 return (1);
7182
7183 for (i = 1; i < argc; i++) {
7184 vdev_state_t oldstate;
7185 boolean_t avail_spare, l2cache;
7186 int rc;
7187
7188 if (is_power_on) {
7189 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7190 if (rc == ENOTSUP) {
7191 (void) fprintf(stderr,
7192 gettext("Power control not supported\n"));
7193 }
7194 if (rc != 0)
7195 return (rc);
7196 }
7197
7198 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7199 &l2cache, NULL);
7200 if (tgt == NULL) {
7201 ret = 1;
7202 continue;
7203 }
7204 uint_t vsc;
7205 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
7206 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
7207 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
7208 if (newstate != VDEV_STATE_HEALTHY) {
7209 (void) printf(gettext("warning: device '%s' "
7210 "onlined, but remains in faulted state\n"),
7211 argv[i]);
7212 if (newstate == VDEV_STATE_FAULTED)
7213 (void) printf(gettext("use 'zpool "
7214 "clear' to restore a faulted "
7215 "device\n"));
7216 else
7217 (void) printf(gettext("use 'zpool "
7218 "replace' to replace devices "
7219 "that are no longer present\n"));
7220 if ((flags & ZFS_ONLINE_EXPAND)) {
7221 (void) printf(gettext("%s: failed "
7222 "to expand usable space on "
7223 "unhealthy device '%s'\n"),
7224 (oldstate >= VDEV_STATE_DEGRADED ?
7225 "error" : "warning"), argv[i]);
7226 if (oldstate >= VDEV_STATE_DEGRADED) {
7227 ret = 1;
7228 break;
7229 }
7230 }
7231 }
7232 } else {
7233 ret = 1;
7234 }
7235 }
7236
7237 zpool_close(zhp);
7238
7239 return (ret);
7240 }
7241
7242 /*
7243 * zpool offline [-ft]|[--power] <pool> <device> ...
7244 *
7245 *
7246 * -f Force the device into a faulted state.
7247 *
7248 * -t Only take the device off-line temporarily. The offline/faulted
7249 * state will not be persistent across reboots.
7250 *
7251 * --power Power off the enclosure slot to the drive (if possible)
7252 */
7253 int
7254 zpool_do_offline(int argc, char **argv)
7255 {
7256 int c, i;
7257 char *poolname;
7258 zpool_handle_t *zhp;
7259 int ret = 0;
7260 boolean_t istmp = B_FALSE;
7261 boolean_t fault = B_FALSE;
7262 boolean_t is_power_off = B_FALSE;
7263
7264 struct option long_options[] = {
7265 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7266 {0, 0, 0, 0}
7267 };
7268
7269 /* check options */
7270 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
7271 switch (c) {
7272 case 'f':
7273 fault = B_TRUE;
7274 break;
7275 case 't':
7276 istmp = B_TRUE;
7277 break;
7278 case ZPOOL_OPTION_POWER:
7279 is_power_off = B_TRUE;
7280 break;
7281 case '?':
7282 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7283 optopt);
7284 usage(B_FALSE);
7285 }
7286 }
7287
7288 if (is_power_off && fault) {
7289 (void) fprintf(stderr,
7290 gettext("-0 and -f cannot be used together\n"));
7291 usage(B_FALSE);
7292 return (1);
7293 }
7294
7295 if (is_power_off && istmp) {
7296 (void) fprintf(stderr,
7297 gettext("-0 and -t cannot be used together\n"));
7298 usage(B_FALSE);
7299 return (1);
7300 }
7301
7302 argc -= optind;
7303 argv += optind;
7304
7305 /* get pool name and check number of arguments */
7306 if (argc < 1) {
7307 (void) fprintf(stderr, gettext("missing pool name\n"));
7308 usage(B_FALSE);
7309 }
7310 if (argc < 2) {
7311 (void) fprintf(stderr, gettext("missing device name\n"));
7312 usage(B_FALSE);
7313 }
7314
7315 poolname = argv[0];
7316
7317 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7318 return (1);
7319
7320 for (i = 1; i < argc; i++) {
7321 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
7322 if (is_power_off) {
7323 /*
7324 * Note: we have to power off first, then set REMOVED,
7325 * or else zpool_vdev_set_removed_state() returns
7326 * EAGAIN.
7327 */
7328 ret = zpool_power_off(zhp, argv[i]);
7329 if (ret != 0) {
7330 (void) fprintf(stderr, "%s %s %d\n",
7331 gettext("unable to power off slot for"),
7332 argv[i], ret);
7333 }
7334 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
7335
7336 } else if (fault) {
7337 vdev_aux_t aux;
7338 if (istmp == B_FALSE) {
7339 /* Force the fault to persist across imports */
7340 aux = VDEV_AUX_EXTERNAL_PERSIST;
7341 } else {
7342 aux = VDEV_AUX_EXTERNAL;
7343 }
7344
7345 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
7346 ret = 1;
7347 } else {
7348 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
7349 ret = 1;
7350 }
7351 }
7352
7353 zpool_close(zhp);
7354
7355 return (ret);
7356 }
7357
7358 /*
7359 * zpool clear [-nF]|[--power] <pool> [device]
7360 *
7361 * Clear all errors associated with a pool or a particular device.
7362 */
7363 int
7364 zpool_do_clear(int argc, char **argv)
7365 {
7366 int c;
7367 int ret = 0;
7368 boolean_t dryrun = B_FALSE;
7369 boolean_t do_rewind = B_FALSE;
7370 boolean_t xtreme_rewind = B_FALSE;
7371 boolean_t is_power_on = B_FALSE;
7372 uint32_t rewind_policy = ZPOOL_NO_REWIND;
7373 nvlist_t *policy = NULL;
7374 zpool_handle_t *zhp;
7375 char *pool, *device;
7376
7377 struct option long_options[] = {
7378 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7379 {0, 0, 0, 0}
7380 };
7381
7382 /* check options */
7383 while ((c = getopt_long(argc, argv, "FnX", long_options,
7384 NULL)) != -1) {
7385 switch (c) {
7386 case 'F':
7387 do_rewind = B_TRUE;
7388 break;
7389 case 'n':
7390 dryrun = B_TRUE;
7391 break;
7392 case 'X':
7393 xtreme_rewind = B_TRUE;
7394 break;
7395 case ZPOOL_OPTION_POWER:
7396 is_power_on = B_TRUE;
7397 break;
7398 case '?':
7399 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7400 optopt);
7401 usage(B_FALSE);
7402 }
7403 }
7404
7405 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7406 is_power_on = B_TRUE;
7407
7408 argc -= optind;
7409 argv += optind;
7410
7411 if (argc < 1) {
7412 (void) fprintf(stderr, gettext("missing pool name\n"));
7413 usage(B_FALSE);
7414 }
7415
7416 if (argc > 2) {
7417 (void) fprintf(stderr, gettext("too many arguments\n"));
7418 usage(B_FALSE);
7419 }
7420
7421 if ((dryrun || xtreme_rewind) && !do_rewind) {
7422 (void) fprintf(stderr,
7423 gettext("-n or -X only meaningful with -F\n"));
7424 usage(B_FALSE);
7425 }
7426 if (dryrun)
7427 rewind_policy = ZPOOL_TRY_REWIND;
7428 else if (do_rewind)
7429 rewind_policy = ZPOOL_DO_REWIND;
7430 if (xtreme_rewind)
7431 rewind_policy |= ZPOOL_EXTREME_REWIND;
7432
7433 /* In future, further rewind policy choices can be passed along here */
7434 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7435 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7436 rewind_policy) != 0) {
7437 return (1);
7438 }
7439
7440 pool = argv[0];
7441 device = argc == 2 ? argv[1] : NULL;
7442
7443 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7444 nvlist_free(policy);
7445 return (1);
7446 }
7447
7448 if (is_power_on) {
7449 if (device == NULL) {
7450 zpool_power_on_pool_and_wait_for_devices(zhp);
7451 } else {
7452 zpool_power_on_and_disk_wait(zhp, device);
7453 }
7454 }
7455
7456 if (zpool_clear(zhp, device, policy) != 0)
7457 ret = 1;
7458
7459 zpool_close(zhp);
7460
7461 nvlist_free(policy);
7462
7463 return (ret);
7464 }
7465
7466 /*
7467 * zpool reguid <pool>
7468 */
7469 int
7470 zpool_do_reguid(int argc, char **argv)
7471 {
7472 int c;
7473 char *poolname;
7474 zpool_handle_t *zhp;
7475 int ret = 0;
7476
7477 /* check options */
7478 while ((c = getopt(argc, argv, "")) != -1) {
7479 switch (c) {
7480 case '?':
7481 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7482 optopt);
7483 usage(B_FALSE);
7484 }
7485 }
7486
7487 argc -= optind;
7488 argv += optind;
7489
7490 /* get pool name and check number of arguments */
7491 if (argc < 1) {
7492 (void) fprintf(stderr, gettext("missing pool name\n"));
7493 usage(B_FALSE);
7494 }
7495
7496 if (argc > 1) {
7497 (void) fprintf(stderr, gettext("too many arguments\n"));
7498 usage(B_FALSE);
7499 }
7500
7501 poolname = argv[0];
7502 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7503 return (1);
7504
7505 ret = zpool_reguid(zhp);
7506
7507 zpool_close(zhp);
7508 return (ret);
7509 }
7510
7511
7512 /*
7513 * zpool reopen <pool>
7514 *
7515 * Reopen the pool so that the kernel can update the sizes of all vdevs.
7516 */
7517 int
7518 zpool_do_reopen(int argc, char **argv)
7519 {
7520 int c;
7521 int ret = 0;
7522 boolean_t scrub_restart = B_TRUE;
7523
7524 /* check options */
7525 while ((c = getopt(argc, argv, "n")) != -1) {
7526 switch (c) {
7527 case 'n':
7528 scrub_restart = B_FALSE;
7529 break;
7530 case '?':
7531 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7532 optopt);
7533 usage(B_FALSE);
7534 }
7535 }
7536
7537 argc -= optind;
7538 argv += optind;
7539
7540 /* if argc == 0 we will execute zpool_reopen_one on all pools */
7541 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7542 B_FALSE, zpool_reopen_one, &scrub_restart);
7543
7544 return (ret);
7545 }
7546
7547 typedef struct scrub_cbdata {
7548 int cb_type;
7549 pool_scrub_cmd_t cb_scrub_cmd;
7550 } scrub_cbdata_t;
7551
7552 static boolean_t
7553 zpool_has_checkpoint(zpool_handle_t *zhp)
7554 {
7555 nvlist_t *config, *nvroot;
7556
7557 config = zpool_get_config(zhp, NULL);
7558
7559 if (config != NULL) {
7560 pool_checkpoint_stat_t *pcs = NULL;
7561 uint_t c;
7562
7563 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7564 (void) nvlist_lookup_uint64_array(nvroot,
7565 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7566
7567 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7568 return (B_FALSE);
7569
7570 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7571 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7572 return (B_TRUE);
7573 }
7574
7575 return (B_FALSE);
7576 }
7577
7578 static int
7579 scrub_callback(zpool_handle_t *zhp, void *data)
7580 {
7581 scrub_cbdata_t *cb = data;
7582 int err;
7583
7584 /*
7585 * Ignore faulted pools.
7586 */
7587 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7588 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7589 "currently unavailable\n"), zpool_get_name(zhp));
7590 return (1);
7591 }
7592
7593 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7594
7595 if (err == 0 && zpool_has_checkpoint(zhp) &&
7596 cb->cb_type == POOL_SCAN_SCRUB) {
7597 (void) printf(gettext("warning: will not scrub state that "
7598 "belongs to the checkpoint of pool '%s'\n"),
7599 zpool_get_name(zhp));
7600 }
7601
7602 return (err != 0);
7603 }
7604
7605 static int
7606 wait_callback(zpool_handle_t *zhp, void *data)
7607 {
7608 zpool_wait_activity_t *act = data;
7609 return (zpool_wait(zhp, *act));
7610 }
7611
7612 /*
7613 * zpool scrub [-s | -p] [-w] [-e] <pool> ...
7614 *
7615 * -e Only scrub blocks in the error log.
7616 * -s Stop. Stops any in-progress scrub.
7617 * -p Pause. Pause in-progress scrub.
7618 * -w Wait. Blocks until scrub has completed.
7619 */
7620 int
7621 zpool_do_scrub(int argc, char **argv)
7622 {
7623 int c;
7624 scrub_cbdata_t cb;
7625 boolean_t wait = B_FALSE;
7626 int error;
7627
7628 cb.cb_type = POOL_SCAN_SCRUB;
7629 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7630
7631 boolean_t is_error_scrub = B_FALSE;
7632 boolean_t is_pause = B_FALSE;
7633 boolean_t is_stop = B_FALSE;
7634
7635 /* check options */
7636 while ((c = getopt(argc, argv, "spwe")) != -1) {
7637 switch (c) {
7638 case 'e':
7639 is_error_scrub = B_TRUE;
7640 break;
7641 case 's':
7642 is_stop = B_TRUE;
7643 break;
7644 case 'p':
7645 is_pause = B_TRUE;
7646 break;
7647 case 'w':
7648 wait = B_TRUE;
7649 break;
7650 case '?':
7651 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7652 optopt);
7653 usage(B_FALSE);
7654 }
7655 }
7656
7657 if (is_pause && is_stop) {
7658 (void) fprintf(stderr, gettext("invalid option "
7659 "combination :-s and -p are mutually exclusive\n"));
7660 usage(B_FALSE);
7661 } else {
7662 if (is_error_scrub)
7663 cb.cb_type = POOL_SCAN_ERRORSCRUB;
7664
7665 if (is_pause) {
7666 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7667 } else if (is_stop) {
7668 cb.cb_type = POOL_SCAN_NONE;
7669 } else {
7670 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7671 }
7672 }
7673
7674 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7675 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7676 (void) fprintf(stderr, gettext("invalid option combination: "
7677 "-w cannot be used with -p or -s\n"));
7678 usage(B_FALSE);
7679 }
7680
7681 argc -= optind;
7682 argv += optind;
7683
7684 if (argc < 1) {
7685 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7686 usage(B_FALSE);
7687 }
7688
7689 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7690 B_FALSE, scrub_callback, &cb);
7691
7692 if (wait && !error) {
7693 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7694 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7695 B_FALSE, wait_callback, &act);
7696 }
7697
7698 return (error);
7699 }
7700
7701 /*
7702 * zpool resilver <pool> ...
7703 *
7704 * Restarts any in-progress resilver
7705 */
7706 int
7707 zpool_do_resilver(int argc, char **argv)
7708 {
7709 int c;
7710 scrub_cbdata_t cb;
7711
7712 cb.cb_type = POOL_SCAN_RESILVER;
7713 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7714
7715 /* check options */
7716 while ((c = getopt(argc, argv, "")) != -1) {
7717 switch (c) {
7718 case '?':
7719 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7720 optopt);
7721 usage(B_FALSE);
7722 }
7723 }
7724
7725 argc -= optind;
7726 argv += optind;
7727
7728 if (argc < 1) {
7729 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7730 usage(B_FALSE);
7731 }
7732
7733 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7734 B_FALSE, scrub_callback, &cb));
7735 }
7736
7737 /*
7738 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7739 *
7740 * -c Cancel. Ends any in-progress trim.
7741 * -d Secure trim. Requires kernel and device support.
7742 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
7743 * adding a multiplier suffix such as 'k' or 'm'.
7744 * -s Suspend. TRIM can then be restarted with no flags.
7745 * -w Wait. Blocks until trimming has completed.
7746 */
7747 int
7748 zpool_do_trim(int argc, char **argv)
7749 {
7750 struct option long_options[] = {
7751 {"cancel", no_argument, NULL, 'c'},
7752 {"secure", no_argument, NULL, 'd'},
7753 {"rate", required_argument, NULL, 'r'},
7754 {"suspend", no_argument, NULL, 's'},
7755 {"wait", no_argument, NULL, 'w'},
7756 {0, 0, 0, 0}
7757 };
7758
7759 pool_trim_func_t cmd_type = POOL_TRIM_START;
7760 uint64_t rate = 0;
7761 boolean_t secure = B_FALSE;
7762 boolean_t wait = B_FALSE;
7763
7764 int c;
7765 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7766 != -1) {
7767 switch (c) {
7768 case 'c':
7769 if (cmd_type != POOL_TRIM_START &&
7770 cmd_type != POOL_TRIM_CANCEL) {
7771 (void) fprintf(stderr, gettext("-c cannot be "
7772 "combined with other options\n"));
7773 usage(B_FALSE);
7774 }
7775 cmd_type = POOL_TRIM_CANCEL;
7776 break;
7777 case 'd':
7778 if (cmd_type != POOL_TRIM_START) {
7779 (void) fprintf(stderr, gettext("-d cannot be "
7780 "combined with the -c or -s options\n"));
7781 usage(B_FALSE);
7782 }
7783 secure = B_TRUE;
7784 break;
7785 case 'r':
7786 if (cmd_type != POOL_TRIM_START) {
7787 (void) fprintf(stderr, gettext("-r cannot be "
7788 "combined with the -c or -s options\n"));
7789 usage(B_FALSE);
7790 }
7791 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7792 (void) fprintf(stderr, "%s: %s\n",
7793 gettext("invalid value for rate"),
7794 libzfs_error_description(g_zfs));
7795 usage(B_FALSE);
7796 }
7797 break;
7798 case 's':
7799 if (cmd_type != POOL_TRIM_START &&
7800 cmd_type != POOL_TRIM_SUSPEND) {
7801 (void) fprintf(stderr, gettext("-s cannot be "
7802 "combined with other options\n"));
7803 usage(B_FALSE);
7804 }
7805 cmd_type = POOL_TRIM_SUSPEND;
7806 break;
7807 case 'w':
7808 wait = B_TRUE;
7809 break;
7810 case '?':
7811 if (optopt != 0) {
7812 (void) fprintf(stderr,
7813 gettext("invalid option '%c'\n"), optopt);
7814 } else {
7815 (void) fprintf(stderr,
7816 gettext("invalid option '%s'\n"),
7817 argv[optind - 1]);
7818 }
7819 usage(B_FALSE);
7820 }
7821 }
7822
7823 argc -= optind;
7824 argv += optind;
7825
7826 if (argc < 1) {
7827 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7828 usage(B_FALSE);
7829 return (-1);
7830 }
7831
7832 if (wait && (cmd_type != POOL_TRIM_START)) {
7833 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
7834 "-s\n"));
7835 usage(B_FALSE);
7836 }
7837
7838 char *poolname = argv[0];
7839 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7840 if (zhp == NULL)
7841 return (-1);
7842
7843 trimflags_t trim_flags = {
7844 .secure = secure,
7845 .rate = rate,
7846 .wait = wait,
7847 };
7848
7849 nvlist_t *vdevs = fnvlist_alloc();
7850 if (argc == 1) {
7851 /* no individual leaf vdevs specified, so add them all */
7852 nvlist_t *config = zpool_get_config(zhp, NULL);
7853 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7854 ZPOOL_CONFIG_VDEV_TREE);
7855 zpool_collect_leaves(zhp, nvroot, vdevs);
7856 trim_flags.fullpool = B_TRUE;
7857 } else {
7858 trim_flags.fullpool = B_FALSE;
7859 for (int i = 1; i < argc; i++) {
7860 fnvlist_add_boolean(vdevs, argv[i]);
7861 }
7862 }
7863
7864 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7865
7866 fnvlist_free(vdevs);
7867 zpool_close(zhp);
7868
7869 return (error);
7870 }
7871
7872 /*
7873 * Converts a total number of seconds to a human readable string broken
7874 * down in to days/hours/minutes/seconds.
7875 */
7876 static void
7877 secs_to_dhms(uint64_t total, char *buf)
7878 {
7879 uint64_t days = total / 60 / 60 / 24;
7880 uint64_t hours = (total / 60 / 60) % 24;
7881 uint64_t mins = (total / 60) % 60;
7882 uint64_t secs = (total % 60);
7883
7884 if (days > 0) {
7885 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7886 (u_longlong_t)days, (u_longlong_t)hours,
7887 (u_longlong_t)mins, (u_longlong_t)secs);
7888 } else {
7889 (void) sprintf(buf, "%02llu:%02llu:%02llu",
7890 (u_longlong_t)hours, (u_longlong_t)mins,
7891 (u_longlong_t)secs);
7892 }
7893 }
7894
7895 /*
7896 * Print out detailed error scrub status.
7897 */
7898 static void
7899 print_err_scrub_status(pool_scan_stat_t *ps)
7900 {
7901 time_t start, end, pause;
7902 uint64_t total_secs_left;
7903 uint64_t secs_left, mins_left, hours_left, days_left;
7904 uint64_t examined, to_be_examined;
7905
7906 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
7907 return;
7908 }
7909
7910 (void) printf(gettext(" scrub: "));
7911
7912 start = ps->pss_error_scrub_start;
7913 end = ps->pss_error_scrub_end;
7914 pause = ps->pss_pass_error_scrub_pause;
7915 examined = ps->pss_error_scrub_examined;
7916 to_be_examined = ps->pss_error_scrub_to_be_examined;
7917
7918 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
7919
7920 if (ps->pss_error_scrub_state == DSS_FINISHED) {
7921 total_secs_left = end - start;
7922 days_left = total_secs_left / 60 / 60 / 24;
7923 hours_left = (total_secs_left / 60 / 60) % 24;
7924 mins_left = (total_secs_left / 60) % 60;
7925 secs_left = (total_secs_left % 60);
7926
7927 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
7928 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
7929 (u_longlong_t)days_left, (u_longlong_t)hours_left,
7930 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
7931 ctime(&end));
7932
7933 return;
7934 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
7935 (void) printf(gettext("error scrub canceled on %s"),
7936 ctime(&end));
7937 return;
7938 }
7939 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
7940
7941 /* Error scrub is in progress. */
7942 if (pause == 0) {
7943 (void) printf(gettext("error scrub in progress since %s"),
7944 ctime(&start));
7945 } else {
7946 (void) printf(gettext("error scrub paused since %s"),
7947 ctime(&pause));
7948 (void) printf(gettext("\terror scrub started on %s"),
7949 ctime(&start));
7950 }
7951
7952 double fraction_done = (double)examined / (to_be_examined + examined);
7953 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
7954 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
7955
7956 (void) printf("\n");
7957 }
7958
7959 /*
7960 * Print out detailed scrub status.
7961 */
7962 static void
7963 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7964 {
7965 time_t start, end, pause;
7966 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
7967 uint64_t elapsed, scan_rate, issue_rate;
7968 double fraction_done;
7969 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
7970 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
7971
7972 printf(" ");
7973 printf_color(ANSI_BOLD, gettext("scan:"));
7974 printf(" ");
7975
7976 /* If there's never been a scan, there's not much to say. */
7977 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7978 ps->pss_func >= POOL_SCAN_FUNCS) {
7979 (void) printf(gettext("none requested\n"));
7980 return;
7981 }
7982
7983 start = ps->pss_start_time;
7984 end = ps->pss_end_time;
7985 pause = ps->pss_pass_scrub_pause;
7986
7987 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7988
7989 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
7990 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
7991 assert(is_resilver || is_scrub);
7992
7993 /* Scan is finished or canceled. */
7994 if (ps->pss_state == DSS_FINISHED) {
7995 secs_to_dhms(end - start, time_buf);
7996
7997 if (is_scrub) {
7998 (void) printf(gettext("scrub repaired %s "
7999 "in %s with %llu errors on %s"), processed_buf,
8000 time_buf, (u_longlong_t)ps->pss_errors,
8001 ctime(&end));
8002 } else if (is_resilver) {
8003 (void) printf(gettext("resilvered %s "
8004 "in %s with %llu errors on %s"), processed_buf,
8005 time_buf, (u_longlong_t)ps->pss_errors,
8006 ctime(&end));
8007 }
8008 return;
8009 } else if (ps->pss_state == DSS_CANCELED) {
8010 if (is_scrub) {
8011 (void) printf(gettext("scrub canceled on %s"),
8012 ctime(&end));
8013 } else if (is_resilver) {
8014 (void) printf(gettext("resilver canceled on %s"),
8015 ctime(&end));
8016 }
8017 return;
8018 }
8019
8020 assert(ps->pss_state == DSS_SCANNING);
8021
8022 /* Scan is in progress. Resilvers can't be paused. */
8023 if (is_scrub) {
8024 if (pause == 0) {
8025 (void) printf(gettext("scrub in progress since %s"),
8026 ctime(&start));
8027 } else {
8028 (void) printf(gettext("scrub paused since %s"),
8029 ctime(&pause));
8030 (void) printf(gettext("\tscrub started on %s"),
8031 ctime(&start));
8032 }
8033 } else if (is_resilver) {
8034 (void) printf(gettext("resilver in progress since %s"),
8035 ctime(&start));
8036 }
8037
8038 scanned = ps->pss_examined;
8039 pass_scanned = ps->pss_pass_exam;
8040 issued = ps->pss_issued;
8041 pass_issued = ps->pss_pass_issued;
8042 total_s = ps->pss_to_examine;
8043 total_i = ps->pss_to_examine - ps->pss_skipped;
8044
8045 /* we are only done with a block once we have issued the IO for it */
8046 fraction_done = (double)issued / total_i;
8047
8048 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8049 elapsed = time(NULL) - ps->pss_pass_start;
8050 elapsed -= ps->pss_pass_scrub_spent_paused;
8051 elapsed = (elapsed != 0) ? elapsed : 1;
8052
8053 scan_rate = pass_scanned / elapsed;
8054 issue_rate = pass_issued / elapsed;
8055
8056 /* format all of the numbers we will be reporting */
8057 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8058 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8059 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8060 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8061
8062 /* do not print estimated time if we have a paused scrub */
8063 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8064 if (pause == 0 && scan_rate > 0) {
8065 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8066 (void) printf(gettext(" at %s/s"), srate_buf);
8067 }
8068 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8069 if (pause == 0 && issue_rate > 0) {
8070 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8071 (void) printf(gettext(" at %s/s"), irate_buf);
8072 }
8073 (void) printf(gettext("\n"));
8074
8075 if (is_resilver) {
8076 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8077 processed_buf, 100 * fraction_done);
8078 } else if (is_scrub) {
8079 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8080 processed_buf, 100 * fraction_done);
8081 }
8082
8083 if (pause == 0) {
8084 /*
8085 * Only provide an estimate iff:
8086 * 1) we haven't yet issued all we expected, and
8087 * 2) the issue rate exceeds 10 MB/s, and
8088 * 3) it's either:
8089 * a) a resilver which has started repairs, or
8090 * b) a scrub which has entered the issue phase.
8091 */
8092 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8093 ((is_resilver && ps->pss_processed > 0) ||
8094 (is_scrub && issued > 0))) {
8095 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8096 (void) printf(gettext(", %s to go\n"), time_buf);
8097 } else {
8098 (void) printf(gettext(", no estimated "
8099 "completion time\n"));
8100 }
8101 } else {
8102 (void) printf(gettext("\n"));
8103 }
8104 }
8105
8106 static void
8107 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8108 {
8109 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8110 return;
8111
8112 printf(" ");
8113 printf_color(ANSI_BOLD, gettext("scan:"));
8114 printf(" ");
8115
8116 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8117 uint64_t bytes_issued = vrs->vrs_bytes_issued;
8118 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8119 uint64_t bytes_est_s = vrs->vrs_bytes_est;
8120 uint64_t bytes_est_i = vrs->vrs_bytes_est;
8121 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8122 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8123 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8124 (vrs->vrs_pass_time_ms + 1)) * 1000;
8125 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8126 (vrs->vrs_pass_time_ms + 1)) * 1000;
8127 double scan_pct = MIN((double)bytes_scanned * 100 /
8128 (bytes_est_s + 1), 100);
8129
8130 /* Format all of the numbers we will be reporting */
8131 char bytes_scanned_buf[7], bytes_issued_buf[7];
8132 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8133 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8134 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8135 sizeof (bytes_scanned_buf));
8136 zfs_nicebytes(bytes_issued, bytes_issued_buf,
8137 sizeof (bytes_issued_buf));
8138 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8139 sizeof (bytes_rebuilt_buf));
8140 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8141 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8142
8143 time_t start = vrs->vrs_start_time;
8144 time_t end = vrs->vrs_end_time;
8145
8146 /* Rebuild is finished or canceled. */
8147 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8148 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8149 (void) printf(gettext("resilvered (%s) %s in %s "
8150 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8151 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8152 return;
8153 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8154 (void) printf(gettext("resilver (%s) canceled on %s"),
8155 vdev_name, ctime(&end));
8156 return;
8157 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8158 (void) printf(gettext("resilver (%s) in progress since %s"),
8159 vdev_name, ctime(&start));
8160 }
8161
8162 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
8163
8164 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
8165 bytes_est_s_buf);
8166 if (scan_rate > 0) {
8167 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
8168 (void) printf(gettext(" at %s/s"), scan_rate_buf);
8169 }
8170 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
8171 bytes_est_i_buf);
8172 if (issue_rate > 0) {
8173 zfs_nicebytes(issue_rate, issue_rate_buf,
8174 sizeof (issue_rate_buf));
8175 (void) printf(gettext(" at %s/s"), issue_rate_buf);
8176 }
8177 (void) printf(gettext("\n"));
8178
8179 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8180 bytes_rebuilt_buf, scan_pct);
8181
8182 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8183 if (bytes_est_s >= bytes_scanned &&
8184 scan_rate >= 10 * 1024 * 1024) {
8185 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
8186 time_buf);
8187 (void) printf(gettext(", %s to go\n"), time_buf);
8188 } else {
8189 (void) printf(gettext(", no estimated "
8190 "completion time\n"));
8191 }
8192 } else {
8193 (void) printf(gettext("\n"));
8194 }
8195 }
8196
8197 /*
8198 * Print rebuild status for top-level vdevs.
8199 */
8200 static void
8201 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8202 {
8203 nvlist_t **child;
8204 uint_t children;
8205
8206 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8207 &child, &children) != 0)
8208 children = 0;
8209
8210 for (uint_t c = 0; c < children; c++) {
8211 vdev_rebuild_stat_t *vrs;
8212 uint_t i;
8213
8214 if (nvlist_lookup_uint64_array(child[c],
8215 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8216 char *name = zpool_vdev_name(g_zfs, zhp,
8217 child[c], VDEV_NAME_TYPE_ID);
8218 print_rebuild_status_impl(vrs, i, name);
8219 free(name);
8220 }
8221 }
8222 }
8223
8224 /*
8225 * As we don't scrub checkpointed blocks, we want to warn the user that we
8226 * skipped scanning some blocks if a checkpoint exists or existed at any
8227 * time during the scan. If a sequential instead of healing reconstruction
8228 * was performed then the blocks were reconstructed. However, their checksums
8229 * have not been verified so we still print the warning.
8230 */
8231 static void
8232 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
8233 {
8234 if (ps == NULL || pcs == NULL)
8235 return;
8236
8237 if (pcs->pcs_state == CS_NONE ||
8238 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
8239 return;
8240
8241 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
8242
8243 if (ps->pss_state == DSS_NONE)
8244 return;
8245
8246 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
8247 ps->pss_end_time < pcs->pcs_start_time)
8248 return;
8249
8250 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
8251 (void) printf(gettext(" scan warning: skipped blocks "
8252 "that are only referenced by the checkpoint.\n"));
8253 } else {
8254 assert(ps->pss_state == DSS_SCANNING);
8255 (void) printf(gettext(" scan warning: skipping blocks "
8256 "that are only referenced by the checkpoint.\n"));
8257 }
8258 }
8259
8260 /*
8261 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
8262 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
8263 * the last completed (or cancelled) rebuild.
8264 */
8265 static boolean_t
8266 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
8267 {
8268 nvlist_t **child;
8269 uint_t children;
8270 boolean_t rebuilding = B_FALSE;
8271 uint64_t end_time = 0;
8272
8273 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8274 &child, &children) != 0)
8275 children = 0;
8276
8277 for (uint_t c = 0; c < children; c++) {
8278 vdev_rebuild_stat_t *vrs;
8279 uint_t i;
8280
8281 if (nvlist_lookup_uint64_array(child[c],
8282 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8283
8284 if (vrs->vrs_end_time > end_time)
8285 end_time = vrs->vrs_end_time;
8286
8287 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8288 rebuilding = B_TRUE;
8289 end_time = 0;
8290 break;
8291 }
8292 }
8293 }
8294
8295 if (rebuild_end_time != NULL)
8296 *rebuild_end_time = end_time;
8297
8298 return (rebuilding);
8299 }
8300
8301 /*
8302 * Print the scan status.
8303 */
8304 static void
8305 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8306 {
8307 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
8308 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
8309 boolean_t have_errorscrub = B_FALSE;
8310 boolean_t active_resilver = B_FALSE;
8311 pool_checkpoint_stat_t *pcs = NULL;
8312 pool_scan_stat_t *ps = NULL;
8313 uint_t c;
8314 time_t scrub_start = 0, errorscrub_start = 0;
8315
8316 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
8317 (uint64_t **)&ps, &c) == 0) {
8318 if (ps->pss_func == POOL_SCAN_RESILVER) {
8319 resilver_end_time = ps->pss_end_time;
8320 active_resilver = (ps->pss_state == DSS_SCANNING);
8321 }
8322
8323 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
8324 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
8325 scrub_start = ps->pss_start_time;
8326 if (c > offsetof(pool_scan_stat_t,
8327 pss_pass_error_scrub_pause) / 8) {
8328 have_errorscrub = (ps->pss_error_scrub_func ==
8329 POOL_SCAN_ERRORSCRUB);
8330 errorscrub_start = ps->pss_error_scrub_start;
8331 }
8332 }
8333
8334 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
8335 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
8336
8337 /* Always print the scrub status when available. */
8338 if (have_scrub && scrub_start > errorscrub_start)
8339 print_scan_scrub_resilver_status(ps);
8340 else if (have_errorscrub && errorscrub_start >= scrub_start)
8341 print_err_scrub_status(ps);
8342
8343 /*
8344 * When there is an active resilver or rebuild print its status.
8345 * Otherwise print the status of the last resilver or rebuild.
8346 */
8347 if (active_resilver || (!active_rebuild && have_resilver &&
8348 resilver_end_time && resilver_end_time > rebuild_end_time)) {
8349 print_scan_scrub_resilver_status(ps);
8350 } else if (active_rebuild || (!active_resilver && have_rebuild &&
8351 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
8352 print_rebuild_status(zhp, nvroot);
8353 }
8354
8355 (void) nvlist_lookup_uint64_array(nvroot,
8356 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8357 print_checkpoint_scan_warning(ps, pcs);
8358 }
8359
8360 /*
8361 * Print out detailed removal status.
8362 */
8363 static void
8364 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
8365 {
8366 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
8367 time_t start, end;
8368 nvlist_t *config, *nvroot;
8369 nvlist_t **child;
8370 uint_t children;
8371 char *vdev_name;
8372
8373 if (prs == NULL || prs->prs_state == DSS_NONE)
8374 return;
8375
8376 /*
8377 * Determine name of vdev.
8378 */
8379 config = zpool_get_config(zhp, NULL);
8380 nvroot = fnvlist_lookup_nvlist(config,
8381 ZPOOL_CONFIG_VDEV_TREE);
8382 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8383 &child, &children) == 0);
8384 assert(prs->prs_removing_vdev < children);
8385 vdev_name = zpool_vdev_name(g_zfs, zhp,
8386 child[prs->prs_removing_vdev], B_TRUE);
8387
8388 printf_color(ANSI_BOLD, gettext("remove: "));
8389
8390 start = prs->prs_start_time;
8391 end = prs->prs_end_time;
8392 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
8393
8394 /*
8395 * Removal is finished or canceled.
8396 */
8397 if (prs->prs_state == DSS_FINISHED) {
8398 uint64_t minutes_taken = (end - start) / 60;
8399
8400 (void) printf(gettext("Removal of vdev %llu copied %s "
8401 "in %lluh%um, completed on %s"),
8402 (longlong_t)prs->prs_removing_vdev,
8403 copied_buf,
8404 (u_longlong_t)(minutes_taken / 60),
8405 (uint_t)(minutes_taken % 60),
8406 ctime((time_t *)&end));
8407 } else if (prs->prs_state == DSS_CANCELED) {
8408 (void) printf(gettext("Removal of %s canceled on %s"),
8409 vdev_name, ctime(&end));
8410 } else {
8411 uint64_t copied, total, elapsed, mins_left, hours_left;
8412 double fraction_done;
8413 uint_t rate;
8414
8415 assert(prs->prs_state == DSS_SCANNING);
8416
8417 /*
8418 * Removal is in progress.
8419 */
8420 (void) printf(gettext(
8421 "Evacuation of %s in progress since %s"),
8422 vdev_name, ctime(&start));
8423
8424 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
8425 total = prs->prs_to_copy;
8426 fraction_done = (double)copied / total;
8427
8428 /* elapsed time for this pass */
8429 elapsed = time(NULL) - prs->prs_start_time;
8430 elapsed = elapsed > 0 ? elapsed : 1;
8431 rate = copied / elapsed;
8432 rate = rate > 0 ? rate : 1;
8433 mins_left = ((total - copied) / rate) / 60;
8434 hours_left = mins_left / 60;
8435
8436 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8437 zfs_nicenum(total, total_buf, sizeof (total_buf));
8438 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8439
8440 /*
8441 * do not print estimated time if hours_left is more than
8442 * 30 days
8443 */
8444 (void) printf(gettext(
8445 "\t%s copied out of %s at %s/s, %.2f%% done"),
8446 examined_buf, total_buf, rate_buf, 100 * fraction_done);
8447 if (hours_left < (30 * 24)) {
8448 (void) printf(gettext(", %lluh%um to go\n"),
8449 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
8450 } else {
8451 (void) printf(gettext(
8452 ", (copy is slow, no estimated time)\n"));
8453 }
8454 }
8455 free(vdev_name);
8456
8457 if (prs->prs_mapping_memory > 0) {
8458 char mem_buf[7];
8459 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
8460 (void) printf(gettext(
8461 "\t%s memory used for removed device mappings\n"),
8462 mem_buf);
8463 }
8464 }
8465
8466 /*
8467 * Print out detailed raidz expansion status.
8468 */
8469 static void
8470 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
8471 {
8472 char copied_buf[7];
8473
8474 if (pres == NULL || pres->pres_state == DSS_NONE)
8475 return;
8476
8477 /*
8478 * Determine name of vdev.
8479 */
8480 nvlist_t *config = zpool_get_config(zhp, NULL);
8481 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8482 ZPOOL_CONFIG_VDEV_TREE);
8483 nvlist_t **child;
8484 uint_t children;
8485 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8486 &child, &children) == 0);
8487 assert(pres->pres_expanding_vdev < children);
8488
8489 printf_color(ANSI_BOLD, gettext("expand: "));
8490
8491 time_t start = pres->pres_start_time;
8492 time_t end = pres->pres_end_time;
8493 char *vname =
8494 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
8495 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
8496
8497 /*
8498 * Expansion is finished or canceled.
8499 */
8500 if (pres->pres_state == DSS_FINISHED) {
8501 char time_buf[32];
8502 secs_to_dhms(end - start, time_buf);
8503
8504 (void) printf(gettext("expanded %s-%u copied %s in %s, "
8505 "on %s"), vname, (int)pres->pres_expanding_vdev,
8506 copied_buf, time_buf, ctime((time_t *)&end));
8507 } else {
8508 char examined_buf[7], total_buf[7], rate_buf[7];
8509 uint64_t copied, total, elapsed, secs_left;
8510 double fraction_done;
8511 uint_t rate;
8512
8513 assert(pres->pres_state == DSS_SCANNING);
8514
8515 /*
8516 * Expansion is in progress.
8517 */
8518 (void) printf(gettext(
8519 "expansion of %s-%u in progress since %s"),
8520 vname, (int)pres->pres_expanding_vdev, ctime(&start));
8521
8522 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
8523 total = pres->pres_to_reflow;
8524 fraction_done = (double)copied / total;
8525
8526 /* elapsed time for this pass */
8527 elapsed = time(NULL) - pres->pres_start_time;
8528 elapsed = elapsed > 0 ? elapsed : 1;
8529 rate = copied / elapsed;
8530 rate = rate > 0 ? rate : 1;
8531 secs_left = (total - copied) / rate;
8532
8533 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8534 zfs_nicenum(total, total_buf, sizeof (total_buf));
8535 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8536
8537 /*
8538 * do not print estimated time if hours_left is more than
8539 * 30 days
8540 */
8541 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
8542 examined_buf, total_buf, rate_buf, 100 * fraction_done);
8543 if (pres->pres_waiting_for_resilver) {
8544 (void) printf(gettext(", paused for resilver or "
8545 "clear\n"));
8546 } else if (secs_left < (30 * 24 * 3600)) {
8547 char time_buf[32];
8548 secs_to_dhms(secs_left, time_buf);
8549 (void) printf(gettext(", %s to go\n"), time_buf);
8550 } else {
8551 (void) printf(gettext(
8552 ", (copy is slow, no estimated time)\n"));
8553 }
8554 }
8555 free(vname);
8556 }
8557 static void
8558 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
8559 {
8560 time_t start;
8561 char space_buf[7];
8562
8563 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8564 return;
8565
8566 (void) printf(gettext("checkpoint: "));
8567
8568 start = pcs->pcs_start_time;
8569 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
8570
8571 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
8572 char *date = ctime(&start);
8573
8574 /*
8575 * ctime() adds a newline at the end of the generated
8576 * string, thus the weird format specifier and the
8577 * strlen() call used to chop it off from the output.
8578 */
8579 (void) printf(gettext("created %.*s, consumes %s\n"),
8580 (int)(strlen(date) - 1), date, space_buf);
8581 return;
8582 }
8583
8584 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8585
8586 (void) printf(gettext("discarding, %s remaining.\n"),
8587 space_buf);
8588 }
8589
8590 static void
8591 print_error_log(zpool_handle_t *zhp)
8592 {
8593 nvlist_t *nverrlist = NULL;
8594 nvpair_t *elem;
8595 char *pathname;
8596 size_t len = MAXPATHLEN * 2;
8597
8598 if (zpool_get_errlog(zhp, &nverrlist) != 0)
8599 return;
8600
8601 (void) printf("errors: Permanent errors have been "
8602 "detected in the following files:\n\n");
8603
8604 pathname = safe_malloc(len);
8605 elem = NULL;
8606 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8607 nvlist_t *nv;
8608 uint64_t dsobj, obj;
8609
8610 verify(nvpair_value_nvlist(elem, &nv) == 0);
8611 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8612 &dsobj) == 0);
8613 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8614 &obj) == 0);
8615 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8616 (void) printf("%7s %s\n", "", pathname);
8617 }
8618 free(pathname);
8619 nvlist_free(nverrlist);
8620 }
8621
8622 static void
8623 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8624 uint_t nspares)
8625 {
8626 uint_t i;
8627 char *name;
8628
8629 if (nspares == 0)
8630 return;
8631
8632 (void) printf(gettext("\tspares\n"));
8633
8634 for (i = 0; i < nspares; i++) {
8635 name = zpool_vdev_name(g_zfs, zhp, spares[i],
8636 cb->cb_name_flags);
8637 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8638 free(name);
8639 }
8640 }
8641
8642 static void
8643 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8644 uint_t nl2cache)
8645 {
8646 uint_t i;
8647 char *name;
8648
8649 if (nl2cache == 0)
8650 return;
8651
8652 (void) printf(gettext("\tcache\n"));
8653
8654 for (i = 0; i < nl2cache; i++) {
8655 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8656 cb->cb_name_flags);
8657 print_status_config(zhp, cb, name, l2cache[i], 2,
8658 B_FALSE, NULL);
8659 free(name);
8660 }
8661 }
8662
8663 static void
8664 print_dedup_stats(nvlist_t *config)
8665 {
8666 ddt_histogram_t *ddh;
8667 ddt_stat_t *dds;
8668 ddt_object_t *ddo;
8669 uint_t c;
8670 char dspace[6], mspace[6];
8671
8672 /*
8673 * If the pool was faulted then we may not have been able to
8674 * obtain the config. Otherwise, if we have anything in the dedup
8675 * table continue processing the stats.
8676 */
8677 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8678 (uint64_t **)&ddo, &c) != 0)
8679 return;
8680
8681 (void) printf("\n");
8682 (void) printf(gettext(" dedup: "));
8683 if (ddo->ddo_count == 0) {
8684 (void) printf(gettext("no DDT entries\n"));
8685 return;
8686 }
8687
8688 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8689 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8690 (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8691 (u_longlong_t)ddo->ddo_count,
8692 dspace,
8693 mspace);
8694
8695 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8696 (uint64_t **)&dds, &c) == 0);
8697 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8698 (uint64_t **)&ddh, &c) == 0);
8699 zpool_dump_ddt(dds, ddh);
8700 }
8701
8702 /*
8703 * Display a summary of pool status. Displays a summary such as:
8704 *
8705 * pool: tank
8706 * status: DEGRADED
8707 * reason: One or more devices ...
8708 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8709 * config:
8710 * mirror DEGRADED
8711 * c1t0d0 OK
8712 * c2t0d0 UNAVAIL
8713 *
8714 * When given the '-v' option, we print out the complete config. If the '-e'
8715 * option is specified, then we print out error rate information as well.
8716 */
8717 static int
8718 status_callback(zpool_handle_t *zhp, void *data)
8719 {
8720 status_cbdata_t *cbp = data;
8721 nvlist_t *config, *nvroot;
8722 const char *msgid;
8723 zpool_status_t reason;
8724 zpool_errata_t errata;
8725 const char *health;
8726 uint_t c;
8727 vdev_stat_t *vs;
8728
8729 config = zpool_get_config(zhp, NULL);
8730 reason = zpool_get_status(zhp, &msgid, &errata);
8731
8732 cbp->cb_count++;
8733
8734 /*
8735 * If we were given 'zpool status -x', only report those pools with
8736 * problems.
8737 */
8738 if (cbp->cb_explain &&
8739 (reason == ZPOOL_STATUS_OK ||
8740 reason == ZPOOL_STATUS_VERSION_OLDER ||
8741 reason == ZPOOL_STATUS_FEAT_DISABLED ||
8742 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8743 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8744 if (!cbp->cb_allpools) {
8745 (void) printf(gettext("pool '%s' is healthy\n"),
8746 zpool_get_name(zhp));
8747 if (cbp->cb_first)
8748 cbp->cb_first = B_FALSE;
8749 }
8750 return (0);
8751 }
8752
8753 if (cbp->cb_first)
8754 cbp->cb_first = B_FALSE;
8755 else
8756 (void) printf("\n");
8757
8758 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8759 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8760 (uint64_t **)&vs, &c) == 0);
8761
8762 health = zpool_get_state_str(zhp);
8763
8764 printf(" ");
8765 printf_color(ANSI_BOLD, gettext("pool:"));
8766 printf(" %s\n", zpool_get_name(zhp));
8767 fputc(' ', stdout);
8768 printf_color(ANSI_BOLD, gettext("state: "));
8769
8770 printf_color(health_str_to_color(health), "%s", health);
8771
8772 fputc('\n', stdout);
8773
8774 switch (reason) {
8775 case ZPOOL_STATUS_MISSING_DEV_R:
8776 printf_color(ANSI_BOLD, gettext("status: "));
8777 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8778 "not be opened. Sufficient replicas exist for\n\tthe pool "
8779 "to continue functioning in a degraded state.\n"));
8780 printf_color(ANSI_BOLD, gettext("action: "));
8781 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8782 "and online it using 'zpool online'.\n"));
8783 break;
8784
8785 case ZPOOL_STATUS_MISSING_DEV_NR:
8786 printf_color(ANSI_BOLD, gettext("status: "));
8787 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8788 "not be opened. There are insufficient\n\treplicas for the"
8789 " pool to continue functioning.\n"));
8790 printf_color(ANSI_BOLD, gettext("action: "));
8791 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8792 "and online it using 'zpool online'.\n"));
8793 break;
8794
8795 case ZPOOL_STATUS_CORRUPT_LABEL_R:
8796 printf_color(ANSI_BOLD, gettext("status: "));
8797 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8798 "not be used because the label is missing or\n\tinvalid. "
8799 "Sufficient replicas exist for the pool to continue\n\t"
8800 "functioning in a degraded state.\n"));
8801 printf_color(ANSI_BOLD, gettext("action: "));
8802 printf_color(ANSI_YELLOW, gettext("Replace the device using "
8803 "'zpool replace'.\n"));
8804 break;
8805
8806 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8807 printf_color(ANSI_BOLD, gettext("status: "));
8808 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8809 "not be used because the label is missing \n\tor invalid. "
8810 "There are insufficient replicas for the pool to "
8811 "continue\n\tfunctioning.\n"));
8812 zpool_explain_recover(zpool_get_handle(zhp),
8813 zpool_get_name(zhp), reason, config);
8814 break;
8815
8816 case ZPOOL_STATUS_FAILING_DEV:
8817 printf_color(ANSI_BOLD, gettext("status: "));
8818 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8819 "experienced an unrecoverable error. An\n\tattempt was "
8820 "made to correct the error. Applications are "
8821 "unaffected.\n"));
8822 printf_color(ANSI_BOLD, gettext("action: "));
8823 printf_color(ANSI_YELLOW, gettext("Determine if the "
8824 "device needs to be replaced, and clear the errors\n\tusing"
8825 " 'zpool clear' or replace the device with 'zpool "
8826 "replace'.\n"));
8827 break;
8828
8829 case ZPOOL_STATUS_OFFLINE_DEV:
8830 printf_color(ANSI_BOLD, gettext("status: "));
8831 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8832 "been taken offline by the administrator.\n\tSufficient "
8833 "replicas exist for the pool to continue functioning in "
8834 "a\n\tdegraded state.\n"));
8835 printf_color(ANSI_BOLD, gettext("action: "));
8836 printf_color(ANSI_YELLOW, gettext("Online the device "
8837 "using 'zpool online' or replace the device with\n\t'zpool "
8838 "replace'.\n"));
8839 break;
8840
8841 case ZPOOL_STATUS_REMOVED_DEV:
8842 printf_color(ANSI_BOLD, gettext("status: "));
8843 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8844 "been removed by the administrator.\n\tSufficient "
8845 "replicas exist for the pool to continue functioning in "
8846 "a\n\tdegraded state.\n"));
8847 printf_color(ANSI_BOLD, gettext("action: "));
8848 printf_color(ANSI_YELLOW, gettext("Online the device "
8849 "using zpool online' or replace the device with\n\t'zpool "
8850 "replace'.\n"));
8851 break;
8852
8853 case ZPOOL_STATUS_RESILVERING:
8854 case ZPOOL_STATUS_REBUILDING:
8855 printf_color(ANSI_BOLD, gettext("status: "));
8856 printf_color(ANSI_YELLOW, gettext("One or more devices is "
8857 "currently being resilvered. The pool will\n\tcontinue "
8858 "to function, possibly in a degraded state.\n"));
8859 printf_color(ANSI_BOLD, gettext("action: "));
8860 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8861 "complete.\n"));
8862 break;
8863
8864 case ZPOOL_STATUS_REBUILD_SCRUB:
8865 printf_color(ANSI_BOLD, gettext("status: "));
8866 printf_color(ANSI_YELLOW, gettext("One or more devices have "
8867 "been sequentially resilvered, scrubbing\n\tthe pool "
8868 "is recommended.\n"));
8869 printf_color(ANSI_BOLD, gettext("action: "));
8870 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8871 "verify all data checksums.\n"));
8872 break;
8873
8874 case ZPOOL_STATUS_CORRUPT_DATA:
8875 printf_color(ANSI_BOLD, gettext("status: "));
8876 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8877 "experienced an error resulting in data\n\tcorruption. "
8878 "Applications may be affected.\n"));
8879 printf_color(ANSI_BOLD, gettext("action: "));
8880 printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8881 " if possible. Otherwise restore the\n\tentire pool from "
8882 "backup.\n"));
8883 break;
8884
8885 case ZPOOL_STATUS_CORRUPT_POOL:
8886 printf_color(ANSI_BOLD, gettext("status: "));
8887 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8888 "corrupted and the pool cannot be opened.\n"));
8889 zpool_explain_recover(zpool_get_handle(zhp),
8890 zpool_get_name(zhp), reason, config);
8891 break;
8892
8893 case ZPOOL_STATUS_VERSION_OLDER:
8894 printf_color(ANSI_BOLD, gettext("status: "));
8895 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8896 "a legacy on-disk format. The pool can\n\tstill be used, "
8897 "but some features are unavailable.\n"));
8898 printf_color(ANSI_BOLD, gettext("action: "));
8899 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8900 "'zpool upgrade'. Once this is done, the\n\tpool will no "
8901 "longer be accessible on software that does not support\n\t"
8902 "feature flags.\n"));
8903 break;
8904
8905 case ZPOOL_STATUS_VERSION_NEWER:
8906 printf_color(ANSI_BOLD, gettext("status: "));
8907 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8908 "to a newer, incompatible on-disk version.\n\tThe pool "
8909 "cannot be accessed on this system.\n"));
8910 printf_color(ANSI_BOLD, gettext("action: "));
8911 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8912 "system running more recent software, or\n\trestore the "
8913 "pool from backup.\n"));
8914 break;
8915
8916 case ZPOOL_STATUS_FEAT_DISABLED:
8917 printf_color(ANSI_BOLD, gettext("status: "));
8918 printf_color(ANSI_YELLOW, gettext("Some supported and "
8919 "requested features are not enabled on the pool.\n\t"
8920 "The pool can still be used, but some features are "
8921 "unavailable.\n"));
8922 printf_color(ANSI_BOLD, gettext("action: "));
8923 printf_color(ANSI_YELLOW, gettext("Enable all features using "
8924 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8925 "longer be accessible by software that does not support\n\t"
8926 "the features. See zpool-features(7) for details.\n"));
8927 break;
8928
8929 case ZPOOL_STATUS_COMPATIBILITY_ERR:
8930 printf_color(ANSI_BOLD, gettext("status: "));
8931 printf_color(ANSI_YELLOW, gettext("This pool has a "
8932 "compatibility list specified, but it could not be\n\t"
8933 "read/parsed at this time. The pool can still be used, "
8934 "but this\n\tshould be investigated.\n"));
8935 printf_color(ANSI_BOLD, gettext("action: "));
8936 printf_color(ANSI_YELLOW, gettext("Check the value of the "
8937 "'compatibility' property against the\n\t"
8938 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8939 ZPOOL_DATA_COMPAT_D ".\n"));
8940 break;
8941
8942 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8943 printf_color(ANSI_BOLD, gettext("status: "));
8944 printf_color(ANSI_YELLOW, gettext("One or more features "
8945 "are enabled on the pool despite not being\n\t"
8946 "requested by the 'compatibility' property.\n"));
8947 printf_color(ANSI_BOLD, gettext("action: "));
8948 printf_color(ANSI_YELLOW, gettext("Consider setting "
8949 "'compatibility' to an appropriate value, or\n\t"
8950 "adding needed features to the relevant file in\n\t"
8951 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8952 break;
8953
8954 case ZPOOL_STATUS_UNSUP_FEAT_READ:
8955 printf_color(ANSI_BOLD, gettext("status: "));
8956 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8957 "on this system because it uses the\n\tfollowing feature(s)"
8958 " not supported on this system:\n"));
8959 zpool_print_unsup_feat(config);
8960 (void) printf("\n");
8961 printf_color(ANSI_BOLD, gettext("action: "));
8962 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8963 "system that supports the required feature(s),\n\tor "
8964 "restore the pool from backup.\n"));
8965 break;
8966
8967 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8968 printf_color(ANSI_BOLD, gettext("status: "));
8969 printf_color(ANSI_YELLOW, gettext("The pool can only be "
8970 "accessed in read-only mode on this system. It\n\tcannot be"
8971 " accessed in read-write mode because it uses the "
8972 "following\n\tfeature(s) not supported on this system:\n"));
8973 zpool_print_unsup_feat(config);
8974 (void) printf("\n");
8975 printf_color(ANSI_BOLD, gettext("action: "));
8976 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8977 "in read-write mode. Import the pool with\n"
8978 "\t\"-o readonly=on\", access the pool from a system that "
8979 "supports the\n\trequired feature(s), or restore the "
8980 "pool from backup.\n"));
8981 break;
8982
8983 case ZPOOL_STATUS_FAULTED_DEV_R:
8984 printf_color(ANSI_BOLD, gettext("status: "));
8985 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8986 "faulted in response to persistent errors.\n\tSufficient "
8987 "replicas exist for the pool to continue functioning "
8988 "in a\n\tdegraded state.\n"));
8989 printf_color(ANSI_BOLD, gettext("action: "));
8990 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8991 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8992 break;
8993
8994 case ZPOOL_STATUS_FAULTED_DEV_NR:
8995 printf_color(ANSI_BOLD, gettext("status: "));
8996 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8997 "faulted in response to persistent errors. There are "
8998 "insufficient replicas for the pool to\n\tcontinue "
8999 "functioning.\n"));
9000 printf_color(ANSI_BOLD, gettext("action: "));
9001 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
9002 "pool from a backup source. Manually marking the device\n"
9003 "\trepaired using 'zpool clear' may allow some data "
9004 "to be recovered.\n"));
9005 break;
9006
9007 case ZPOOL_STATUS_IO_FAILURE_MMP:
9008 printf_color(ANSI_BOLD, gettext("status: "));
9009 printf_color(ANSI_YELLOW, gettext("The pool is suspended "
9010 "because multihost writes failed or were delayed;\n\t"
9011 "another system could import the pool undetected.\n"));
9012 printf_color(ANSI_BOLD, gettext("action: "));
9013 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
9014 " are connected, then reboot your system and\n\timport the "
9015 "pool.\n"));
9016 break;
9017
9018 case ZPOOL_STATUS_IO_FAILURE_WAIT:
9019 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
9020 printf_color(ANSI_BOLD, gettext("status: "));
9021 printf_color(ANSI_YELLOW, gettext("One or more devices are "
9022 "faulted in response to IO failures.\n"));
9023 printf_color(ANSI_BOLD, gettext("action: "));
9024 printf_color(ANSI_YELLOW, gettext("Make sure the affected "
9025 "devices are connected, then run 'zpool clear'.\n"));
9026 break;
9027
9028 case ZPOOL_STATUS_BAD_LOG:
9029 printf_color(ANSI_BOLD, gettext("status: "));
9030 printf_color(ANSI_YELLOW, gettext("An intent log record "
9031 "could not be read.\n"
9032 "\tWaiting for administrator intervention to fix the "
9033 "faulted pool.\n"));
9034 printf_color(ANSI_BOLD, gettext("action: "));
9035 printf_color(ANSI_YELLOW, gettext("Either restore the affected "
9036 "device(s) and run 'zpool online',\n"
9037 "\tor ignore the intent log records by running "
9038 "'zpool clear'.\n"));
9039 break;
9040
9041 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
9042 (void) printf(gettext("status: One or more devices are "
9043 "configured to use a non-native block size.\n"
9044 "\tExpect reduced performance.\n"));
9045 (void) printf(gettext("action: Replace affected devices with "
9046 "devices that support the\n\tconfigured block size, or "
9047 "migrate data to a properly configured\n\tpool.\n"));
9048 break;
9049
9050 case ZPOOL_STATUS_HOSTID_MISMATCH:
9051 printf_color(ANSI_BOLD, gettext("status: "));
9052 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
9053 " and system hostid on imported pool.\n\tThis pool was "
9054 "previously imported into a system with a different "
9055 "hostid,\n\tand then was verbatim imported into this "
9056 "system.\n"));
9057 printf_color(ANSI_BOLD, gettext("action: "));
9058 printf_color(ANSI_YELLOW, gettext("Export this pool on all "
9059 "systems on which it is imported.\n"
9060 "\tThen import it to correct the mismatch.\n"));
9061 break;
9062
9063 case ZPOOL_STATUS_ERRATA:
9064 printf_color(ANSI_BOLD, gettext("status: "));
9065 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
9066 errata);
9067
9068 switch (errata) {
9069 case ZPOOL_ERRATA_NONE:
9070 break;
9071
9072 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
9073 printf_color(ANSI_BOLD, gettext("action: "));
9074 printf_color(ANSI_YELLOW, gettext("To correct the issue"
9075 " run 'zpool scrub'.\n"));
9076 break;
9077
9078 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
9079 (void) printf(gettext("\tExisting encrypted datasets "
9080 "contain an on-disk incompatibility\n\twhich "
9081 "needs to be corrected.\n"));
9082 printf_color(ANSI_BOLD, gettext("action: "));
9083 printf_color(ANSI_YELLOW, gettext("To correct the issue"
9084 " backup existing encrypted datasets to new\n\t"
9085 "encrypted datasets and destroy the old ones. "
9086 "'zfs mount -o ro' can\n\tbe used to temporarily "
9087 "mount existing encrypted datasets readonly.\n"));
9088 break;
9089
9090 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
9091 (void) printf(gettext("\tExisting encrypted snapshots "
9092 "and bookmarks contain an on-disk\n\tincompat"
9093 "ibility. This may cause on-disk corruption if "
9094 "they are used\n\twith 'zfs recv'.\n"));
9095 printf_color(ANSI_BOLD, gettext("action: "));
9096 printf_color(ANSI_YELLOW, gettext("To correct the"
9097 "issue, enable the bookmark_v2 feature. No "
9098 "additional\n\taction is needed if there are no "
9099 "encrypted snapshots or bookmarks.\n\tIf preserving"
9100 "the encrypted snapshots and bookmarks is required,"
9101 " use\n\ta non-raw send to backup and restore them."
9102 " Alternately, they may be\n\tremoved to resolve "
9103 "the incompatibility.\n"));
9104 break;
9105
9106 default:
9107 /*
9108 * All errata which allow the pool to be imported
9109 * must contain an action message.
9110 */
9111 assert(0);
9112 }
9113 break;
9114
9115 default:
9116 /*
9117 * The remaining errors can't actually be generated, yet.
9118 */
9119 assert(reason == ZPOOL_STATUS_OK);
9120 }
9121
9122 if (msgid != NULL) {
9123 printf(" ");
9124 printf_color(ANSI_BOLD, gettext("see:"));
9125 printf(gettext(
9126 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
9127 msgid);
9128 }
9129
9130 if (config != NULL) {
9131 uint64_t nerr;
9132 nvlist_t **spares, **l2cache;
9133 uint_t nspares, nl2cache;
9134
9135 print_scan_status(zhp, nvroot);
9136
9137 pool_removal_stat_t *prs = NULL;
9138 (void) nvlist_lookup_uint64_array(nvroot,
9139 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
9140 print_removal_status(zhp, prs);
9141
9142 pool_checkpoint_stat_t *pcs = NULL;
9143 (void) nvlist_lookup_uint64_array(nvroot,
9144 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
9145 print_checkpoint_status(pcs);
9146
9147 pool_raidz_expand_stat_t *pres = NULL;
9148 (void) nvlist_lookup_uint64_array(nvroot,
9149 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
9150 print_raidz_expand_status(zhp, pres);
9151
9152 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
9153 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
9154 if (cbp->cb_namewidth < 10)
9155 cbp->cb_namewidth = 10;
9156
9157 color_start(ANSI_BOLD);
9158 (void) printf(gettext("config:\n\n"));
9159 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
9160 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
9161 "CKSUM");
9162 color_end();
9163
9164 if (cbp->cb_print_slow_ios) {
9165 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
9166 }
9167
9168 if (cbp->cb_print_power) {
9169 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
9170 }
9171
9172 if (cbp->vcdl != NULL)
9173 print_cmd_columns(cbp->vcdl, 0);
9174
9175 printf("\n");
9176
9177 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
9178 B_FALSE, NULL);
9179
9180 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
9181 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
9182 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
9183
9184 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
9185 &l2cache, &nl2cache) == 0)
9186 print_l2cache(zhp, cbp, l2cache, nl2cache);
9187
9188 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
9189 &spares, &nspares) == 0)
9190 print_spares(zhp, cbp, spares, nspares);
9191
9192 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9193 &nerr) == 0) {
9194 (void) printf("\n");
9195 if (nerr == 0) {
9196 (void) printf(gettext(
9197 "errors: No known data errors\n"));
9198 } else if (!cbp->cb_verbose) {
9199 color_start(ANSI_RED);
9200 (void) printf(gettext("errors: %llu data "
9201 "errors, use '-v' for a list\n"),
9202 (u_longlong_t)nerr);
9203 color_end();
9204 } else {
9205 print_error_log(zhp);
9206 }
9207 }
9208
9209 if (cbp->cb_dedup_stats)
9210 print_dedup_stats(config);
9211 } else {
9212 (void) printf(gettext("config: The configuration cannot be "
9213 "determined.\n"));
9214 }
9215
9216 return (0);
9217 }
9218
9219 /*
9220 * zpool status [-c [script1,script2,...]] [-DegiLpPstvx] [--power] [-T d|u] ...
9221 * [pool] [interval [count]]
9222 *
9223 * -c CMD For each vdev, run command CMD
9224 * -D Display dedup status (undocumented)
9225 * -e Display only unhealthy vdevs
9226 * -g Display guid for individual vdev name.
9227 * -i Display vdev initialization status.
9228 * -L Follow links when resolving vdev path name.
9229 * -p Display values in parsable (exact) format.
9230 * -P Display full path for vdev name.
9231 * -s Display slow IOs column.
9232 * -t Display vdev TRIM status.
9233 * -T Display a timestamp in date(1) or Unix format
9234 * -v Display complete error logs
9235 * -x Display only pools with potential problems
9236 * --power Display vdev enclosure slot power status
9237 *
9238 * Describes the health status of all pools or some subset.
9239 */
9240 int
9241 zpool_do_status(int argc, char **argv)
9242 {
9243 int c;
9244 int ret;
9245 float interval = 0;
9246 unsigned long count = 0;
9247 status_cbdata_t cb = { 0 };
9248 char *cmd = NULL;
9249
9250 struct option long_options[] = {
9251 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
9252 {0, 0, 0, 0}
9253 };
9254
9255 /* check options */
9256 while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options,
9257 NULL)) != -1) {
9258 switch (c) {
9259 case 'c':
9260 if (cmd != NULL) {
9261 fprintf(stderr,
9262 gettext("Can't set -c flag twice\n"));
9263 exit(1);
9264 }
9265
9266 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
9267 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
9268 fprintf(stderr, gettext(
9269 "Can't run -c, disabled by "
9270 "ZPOOL_SCRIPTS_ENABLED.\n"));
9271 exit(1);
9272 }
9273
9274 if ((getuid() <= 0 || geteuid() <= 0) &&
9275 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
9276 fprintf(stderr, gettext(
9277 "Can't run -c with root privileges "
9278 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
9279 exit(1);
9280 }
9281 cmd = optarg;
9282 break;
9283 case 'D':
9284 cb.cb_dedup_stats = B_TRUE;
9285 break;
9286 case 'e':
9287 cb.cb_print_unhealthy = B_TRUE;
9288 break;
9289 case 'g':
9290 cb.cb_name_flags |= VDEV_NAME_GUID;
9291 break;
9292 case 'i':
9293 cb.cb_print_vdev_init = B_TRUE;
9294 break;
9295 case 'L':
9296 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
9297 break;
9298 case 'p':
9299 cb.cb_literal = B_TRUE;
9300 break;
9301 case 'P':
9302 cb.cb_name_flags |= VDEV_NAME_PATH;
9303 break;
9304 case 's':
9305 cb.cb_print_slow_ios = B_TRUE;
9306 break;
9307 case 't':
9308 cb.cb_print_vdev_trim = B_TRUE;
9309 break;
9310 case 'T':
9311 get_timestamp_arg(*optarg);
9312 break;
9313 case 'v':
9314 cb.cb_verbose = B_TRUE;
9315 break;
9316 case 'x':
9317 cb.cb_explain = B_TRUE;
9318 break;
9319 case ZPOOL_OPTION_POWER:
9320 cb.cb_print_power = B_TRUE;
9321 break;
9322 case '?':
9323 if (optopt == 'c') {
9324 print_zpool_script_list("status");
9325 exit(0);
9326 } else {
9327 fprintf(stderr,
9328 gettext("invalid option '%c'\n"), optopt);
9329 }
9330 usage(B_FALSE);
9331 }
9332 }
9333
9334 argc -= optind;
9335 argv += optind;
9336
9337 get_interval_count(&argc, argv, &interval, &count);
9338
9339 if (argc == 0)
9340 cb.cb_allpools = B_TRUE;
9341
9342 cb.cb_first = B_TRUE;
9343 cb.cb_print_status = B_TRUE;
9344
9345 for (;;) {
9346 if (timestamp_fmt != NODATE)
9347 print_timestamp(timestamp_fmt);
9348
9349 if (cmd != NULL)
9350 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
9351 NULL, NULL, 0, 0);
9352
9353 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
9354 cb.cb_literal, status_callback, &cb);
9355
9356 if (cb.vcdl != NULL)
9357 free_vdev_cmd_data_list(cb.vcdl);
9358 if (argc == 0 && cb.cb_count == 0)
9359 (void) fprintf(stderr, gettext("no pools available\n"));
9360 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
9361 (void) printf(gettext("all pools are healthy\n"));
9362
9363 if (ret != 0)
9364 return (ret);
9365
9366 if (interval == 0)
9367 break;
9368
9369 if (count != 0 && --count == 0)
9370 break;
9371
9372 (void) fflush(stdout);
9373 (void) fsleep(interval);
9374 }
9375
9376 return (0);
9377 }
9378
9379 typedef struct upgrade_cbdata {
9380 int cb_first;
9381 int cb_argc;
9382 uint64_t cb_version;
9383 char **cb_argv;
9384 } upgrade_cbdata_t;
9385
9386 static int
9387 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
9388 {
9389 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
9390 int *count = (int *)unsupp_fs;
9391
9392 if (zfs_version > ZPL_VERSION) {
9393 (void) printf(gettext("%s (v%d) is not supported by this "
9394 "implementation of ZFS.\n"),
9395 zfs_get_name(zhp), zfs_version);
9396 (*count)++;
9397 }
9398
9399 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
9400
9401 zfs_close(zhp);
9402
9403 return (0);
9404 }
9405
9406 static int
9407 upgrade_version(zpool_handle_t *zhp, uint64_t version)
9408 {
9409 int ret;
9410 nvlist_t *config;
9411 uint64_t oldversion;
9412 int unsupp_fs = 0;
9413
9414 config = zpool_get_config(zhp, NULL);
9415 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9416 &oldversion) == 0);
9417
9418 char compat[ZFS_MAXPROPLEN];
9419 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9420 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9421 compat[0] = '\0';
9422
9423 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
9424 assert(oldversion < version);
9425
9426 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
9427 if (ret != 0)
9428 return (ret);
9429
9430 if (unsupp_fs) {
9431 (void) fprintf(stderr, gettext("Upgrade not performed due "
9432 "to %d unsupported filesystems (max v%d).\n"),
9433 unsupp_fs, (int)ZPL_VERSION);
9434 return (1);
9435 }
9436
9437 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
9438 (void) fprintf(stderr, gettext("Upgrade not performed because "
9439 "'compatibility' property set to '"
9440 ZPOOL_COMPAT_LEGACY "'.\n"));
9441 return (1);
9442 }
9443
9444 ret = zpool_upgrade(zhp, version);
9445 if (ret != 0)
9446 return (ret);
9447
9448 if (version >= SPA_VERSION_FEATURES) {
9449 (void) printf(gettext("Successfully upgraded "
9450 "'%s' from version %llu to feature flags.\n"),
9451 zpool_get_name(zhp), (u_longlong_t)oldversion);
9452 } else {
9453 (void) printf(gettext("Successfully upgraded "
9454 "'%s' from version %llu to version %llu.\n"),
9455 zpool_get_name(zhp), (u_longlong_t)oldversion,
9456 (u_longlong_t)version);
9457 }
9458
9459 return (0);
9460 }
9461
9462 static int
9463 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
9464 {
9465 int i, ret, count;
9466 boolean_t firstff = B_TRUE;
9467 nvlist_t *enabled = zpool_get_features(zhp);
9468
9469 char compat[ZFS_MAXPROPLEN];
9470 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9471 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9472 compat[0] = '\0';
9473
9474 boolean_t requested_features[SPA_FEATURES];
9475 if (zpool_do_load_compat(compat, requested_features) !=
9476 ZPOOL_COMPATIBILITY_OK)
9477 return (-1);
9478
9479 count = 0;
9480 for (i = 0; i < SPA_FEATURES; i++) {
9481 const char *fname = spa_feature_table[i].fi_uname;
9482 const char *fguid = spa_feature_table[i].fi_guid;
9483
9484 if (!spa_feature_table[i].fi_zfs_mod_supported)
9485 continue;
9486
9487 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
9488 char *propname;
9489 verify(-1 != asprintf(&propname, "feature@%s", fname));
9490 ret = zpool_set_prop(zhp, propname,
9491 ZFS_FEATURE_ENABLED);
9492 if (ret != 0) {
9493 free(propname);
9494 return (ret);
9495 }
9496 count++;
9497
9498 if (firstff) {
9499 (void) printf(gettext("Enabled the "
9500 "following features on '%s':\n"),
9501 zpool_get_name(zhp));
9502 firstff = B_FALSE;
9503 }
9504 (void) printf(gettext(" %s\n"), fname);
9505 free(propname);
9506 }
9507 }
9508
9509 if (countp != NULL)
9510 *countp = count;
9511 return (0);
9512 }
9513
9514 static int
9515 upgrade_cb(zpool_handle_t *zhp, void *arg)
9516 {
9517 upgrade_cbdata_t *cbp = arg;
9518 nvlist_t *config;
9519 uint64_t version;
9520 boolean_t modified_pool = B_FALSE;
9521 int ret;
9522
9523 config = zpool_get_config(zhp, NULL);
9524 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9525 &version) == 0);
9526
9527 assert(SPA_VERSION_IS_SUPPORTED(version));
9528
9529 if (version < cbp->cb_version) {
9530 cbp->cb_first = B_FALSE;
9531 ret = upgrade_version(zhp, cbp->cb_version);
9532 if (ret != 0)
9533 return (ret);
9534 modified_pool = B_TRUE;
9535
9536 /*
9537 * If they did "zpool upgrade -a", then we could
9538 * be doing ioctls to different pools. We need
9539 * to log this history once to each pool, and bypass
9540 * the normal history logging that happens in main().
9541 */
9542 (void) zpool_log_history(g_zfs, history_str);
9543 log_history = B_FALSE;
9544 }
9545
9546 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9547 int count;
9548 ret = upgrade_enable_all(zhp, &count);
9549 if (ret != 0)
9550 return (ret);
9551
9552 if (count > 0) {
9553 cbp->cb_first = B_FALSE;
9554 modified_pool = B_TRUE;
9555 }
9556 }
9557
9558 if (modified_pool) {
9559 (void) printf("\n");
9560 (void) after_zpool_upgrade(zhp);
9561 }
9562
9563 return (0);
9564 }
9565
9566 static int
9567 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
9568 {
9569 upgrade_cbdata_t *cbp = arg;
9570 nvlist_t *config;
9571 uint64_t version;
9572
9573 config = zpool_get_config(zhp, NULL);
9574 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9575 &version) == 0);
9576
9577 assert(SPA_VERSION_IS_SUPPORTED(version));
9578
9579 if (version < SPA_VERSION_FEATURES) {
9580 if (cbp->cb_first) {
9581 (void) printf(gettext("The following pools are "
9582 "formatted with legacy version numbers and can\n"
9583 "be upgraded to use feature flags. After "
9584 "being upgraded, these pools\nwill no "
9585 "longer be accessible by software that does not "
9586 "support feature\nflags.\n\n"
9587 "Note that setting a pool's 'compatibility' "
9588 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
9589 "inhibit upgrades.\n\n"));
9590 (void) printf(gettext("VER POOL\n"));
9591 (void) printf(gettext("--- ------------\n"));
9592 cbp->cb_first = B_FALSE;
9593 }
9594
9595 (void) printf("%2llu %s\n", (u_longlong_t)version,
9596 zpool_get_name(zhp));
9597 }
9598
9599 return (0);
9600 }
9601
9602 static int
9603 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
9604 {
9605 upgrade_cbdata_t *cbp = arg;
9606 nvlist_t *config;
9607 uint64_t version;
9608
9609 config = zpool_get_config(zhp, NULL);
9610 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9611 &version) == 0);
9612
9613 if (version >= SPA_VERSION_FEATURES) {
9614 int i;
9615 boolean_t poolfirst = B_TRUE;
9616 nvlist_t *enabled = zpool_get_features(zhp);
9617
9618 for (i = 0; i < SPA_FEATURES; i++) {
9619 const char *fguid = spa_feature_table[i].fi_guid;
9620 const char *fname = spa_feature_table[i].fi_uname;
9621
9622 if (!spa_feature_table[i].fi_zfs_mod_supported)
9623 continue;
9624
9625 if (!nvlist_exists(enabled, fguid)) {
9626 if (cbp->cb_first) {
9627 (void) printf(gettext("\nSome "
9628 "supported features are not "
9629 "enabled on the following pools. "
9630 "Once a\nfeature is enabled the "
9631 "pool may become incompatible with "
9632 "software\nthat does not support "
9633 "the feature. See "
9634 "zpool-features(7) for "
9635 "details.\n\n"
9636 "Note that the pool "
9637 "'compatibility' feature can be "
9638 "used to inhibit\nfeature "
9639 "upgrades.\n\n"));
9640 (void) printf(gettext("POOL "
9641 "FEATURE\n"));
9642 (void) printf(gettext("------"
9643 "---------\n"));
9644 cbp->cb_first = B_FALSE;
9645 }
9646
9647 if (poolfirst) {
9648 (void) printf(gettext("%s\n"),
9649 zpool_get_name(zhp));
9650 poolfirst = B_FALSE;
9651 }
9652
9653 (void) printf(gettext(" %s\n"), fname);
9654 }
9655 /*
9656 * If they did "zpool upgrade -a", then we could
9657 * be doing ioctls to different pools. We need
9658 * to log this history once to each pool, and bypass
9659 * the normal history logging that happens in main().
9660 */
9661 (void) zpool_log_history(g_zfs, history_str);
9662 log_history = B_FALSE;
9663 }
9664 }
9665
9666 return (0);
9667 }
9668
9669 static int
9670 upgrade_one(zpool_handle_t *zhp, void *data)
9671 {
9672 boolean_t modified_pool = B_FALSE;
9673 upgrade_cbdata_t *cbp = data;
9674 uint64_t cur_version;
9675 int ret;
9676
9677 if (strcmp("log", zpool_get_name(zhp)) == 0) {
9678 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9679 "Pool 'log' must be renamed using export and import"
9680 " to upgrade.\n"));
9681 return (1);
9682 }
9683
9684 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9685 if (cur_version > cbp->cb_version) {
9686 (void) printf(gettext("Pool '%s' is already formatted "
9687 "using more current version '%llu'.\n\n"),
9688 zpool_get_name(zhp), (u_longlong_t)cur_version);
9689 return (0);
9690 }
9691
9692 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9693 (void) printf(gettext("Pool '%s' is already formatted "
9694 "using version %llu.\n\n"), zpool_get_name(zhp),
9695 (u_longlong_t)cbp->cb_version);
9696 return (0);
9697 }
9698
9699 if (cur_version != cbp->cb_version) {
9700 modified_pool = B_TRUE;
9701 ret = upgrade_version(zhp, cbp->cb_version);
9702 if (ret != 0)
9703 return (ret);
9704 }
9705
9706 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9707 int count = 0;
9708 ret = upgrade_enable_all(zhp, &count);
9709 if (ret != 0)
9710 return (ret);
9711
9712 if (count != 0) {
9713 modified_pool = B_TRUE;
9714 } else if (cur_version == SPA_VERSION) {
9715 (void) printf(gettext("Pool '%s' already has all "
9716 "supported and requested features enabled.\n"),
9717 zpool_get_name(zhp));
9718 }
9719 }
9720
9721 if (modified_pool) {
9722 (void) printf("\n");
9723 (void) after_zpool_upgrade(zhp);
9724 }
9725
9726 return (0);
9727 }
9728
9729 /*
9730 * zpool upgrade
9731 * zpool upgrade -v
9732 * zpool upgrade [-V version] <-a | pool ...>
9733 *
9734 * With no arguments, display downrev'd ZFS pool available for upgrade.
9735 * Individual pools can be upgraded by specifying the pool, and '-a' will
9736 * upgrade all pools.
9737 */
9738 int
9739 zpool_do_upgrade(int argc, char **argv)
9740 {
9741 int c;
9742 upgrade_cbdata_t cb = { 0 };
9743 int ret = 0;
9744 boolean_t showversions = B_FALSE;
9745 boolean_t upgradeall = B_FALSE;
9746 char *end;
9747
9748
9749 /* check options */
9750 while ((c = getopt(argc, argv, ":avV:")) != -1) {
9751 switch (c) {
9752 case 'a':
9753 upgradeall = B_TRUE;
9754 break;
9755 case 'v':
9756 showversions = B_TRUE;
9757 break;
9758 case 'V':
9759 cb.cb_version = strtoll(optarg, &end, 10);
9760 if (*end != '\0' ||
9761 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9762 (void) fprintf(stderr,
9763 gettext("invalid version '%s'\n"), optarg);
9764 usage(B_FALSE);
9765 }
9766 break;
9767 case ':':
9768 (void) fprintf(stderr, gettext("missing argument for "
9769 "'%c' option\n"), optopt);
9770 usage(B_FALSE);
9771 break;
9772 case '?':
9773 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9774 optopt);
9775 usage(B_FALSE);
9776 }
9777 }
9778
9779 cb.cb_argc = argc;
9780 cb.cb_argv = argv;
9781 argc -= optind;
9782 argv += optind;
9783
9784 if (cb.cb_version == 0) {
9785 cb.cb_version = SPA_VERSION;
9786 } else if (!upgradeall && argc == 0) {
9787 (void) fprintf(stderr, gettext("-V option is "
9788 "incompatible with other arguments\n"));
9789 usage(B_FALSE);
9790 }
9791
9792 if (showversions) {
9793 if (upgradeall || argc != 0) {
9794 (void) fprintf(stderr, gettext("-v option is "
9795 "incompatible with other arguments\n"));
9796 usage(B_FALSE);
9797 }
9798 } else if (upgradeall) {
9799 if (argc != 0) {
9800 (void) fprintf(stderr, gettext("-a option should not "
9801 "be used along with a pool name\n"));
9802 usage(B_FALSE);
9803 }
9804 }
9805
9806 (void) printf("%s", gettext("This system supports ZFS pool feature "
9807 "flags.\n\n"));
9808 if (showversions) {
9809 int i;
9810
9811 (void) printf(gettext("The following features are "
9812 "supported:\n\n"));
9813 (void) printf(gettext("FEAT DESCRIPTION\n"));
9814 (void) printf("----------------------------------------------"
9815 "---------------\n");
9816 for (i = 0; i < SPA_FEATURES; i++) {
9817 zfeature_info_t *fi = &spa_feature_table[i];
9818 if (!fi->fi_zfs_mod_supported)
9819 continue;
9820 const char *ro =
9821 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9822 " (read-only compatible)" : "";
9823
9824 (void) printf("%-37s%s\n", fi->fi_uname, ro);
9825 (void) printf(" %s\n", fi->fi_desc);
9826 }
9827 (void) printf("\n");
9828
9829 (void) printf(gettext("The following legacy versions are also "
9830 "supported:\n\n"));
9831 (void) printf(gettext("VER DESCRIPTION\n"));
9832 (void) printf("--- -----------------------------------------"
9833 "---------------\n");
9834 (void) printf(gettext(" 1 Initial ZFS version\n"));
9835 (void) printf(gettext(" 2 Ditto blocks "
9836 "(replicated metadata)\n"));
9837 (void) printf(gettext(" 3 Hot spares and double parity "
9838 "RAID-Z\n"));
9839 (void) printf(gettext(" 4 zpool history\n"));
9840 (void) printf(gettext(" 5 Compression using the gzip "
9841 "algorithm\n"));
9842 (void) printf(gettext(" 6 bootfs pool property\n"));
9843 (void) printf(gettext(" 7 Separate intent log devices\n"));
9844 (void) printf(gettext(" 8 Delegated administration\n"));
9845 (void) printf(gettext(" 9 refquota and refreservation "
9846 "properties\n"));
9847 (void) printf(gettext(" 10 Cache devices\n"));
9848 (void) printf(gettext(" 11 Improved scrub performance\n"));
9849 (void) printf(gettext(" 12 Snapshot properties\n"));
9850 (void) printf(gettext(" 13 snapused property\n"));
9851 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
9852 (void) printf(gettext(" 15 user/group space accounting\n"));
9853 (void) printf(gettext(" 16 stmf property support\n"));
9854 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
9855 (void) printf(gettext(" 18 Snapshot user holds\n"));
9856 (void) printf(gettext(" 19 Log device removal\n"));
9857 (void) printf(gettext(" 20 Compression using zle "
9858 "(zero-length encoding)\n"));
9859 (void) printf(gettext(" 21 Deduplication\n"));
9860 (void) printf(gettext(" 22 Received properties\n"));
9861 (void) printf(gettext(" 23 Slim ZIL\n"));
9862 (void) printf(gettext(" 24 System attributes\n"));
9863 (void) printf(gettext(" 25 Improved scrub stats\n"));
9864 (void) printf(gettext(" 26 Improved snapshot deletion "
9865 "performance\n"));
9866 (void) printf(gettext(" 27 Improved snapshot creation "
9867 "performance\n"));
9868 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
9869 (void) printf(gettext("\nFor more information on a particular "
9870 "version, including supported releases,\n"));
9871 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9872 } else if (argc == 0 && upgradeall) {
9873 cb.cb_first = B_TRUE;
9874 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9875 if (ret == 0 && cb.cb_first) {
9876 if (cb.cb_version == SPA_VERSION) {
9877 (void) printf(gettext("All pools are already "
9878 "formatted using feature flags.\n\n"));
9879 (void) printf(gettext("Every feature flags "
9880 "pool already has all supported and "
9881 "requested features enabled.\n"));
9882 } else {
9883 (void) printf(gettext("All pools are already "
9884 "formatted with version %llu or higher.\n"),
9885 (u_longlong_t)cb.cb_version);
9886 }
9887 }
9888 } else if (argc == 0) {
9889 cb.cb_first = B_TRUE;
9890 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9891 assert(ret == 0);
9892
9893 if (cb.cb_first) {
9894 (void) printf(gettext("All pools are formatted "
9895 "using feature flags.\n\n"));
9896 } else {
9897 (void) printf(gettext("\nUse 'zpool upgrade -v' "
9898 "for a list of available legacy versions.\n"));
9899 }
9900
9901 cb.cb_first = B_TRUE;
9902 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9903 assert(ret == 0);
9904
9905 if (cb.cb_first) {
9906 (void) printf(gettext("Every feature flags pool has "
9907 "all supported and requested features enabled.\n"));
9908 } else {
9909 (void) printf(gettext("\n"));
9910 }
9911 } else {
9912 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9913 B_FALSE, upgrade_one, &cb);
9914 }
9915
9916 return (ret);
9917 }
9918
9919 typedef struct hist_cbdata {
9920 boolean_t first;
9921 boolean_t longfmt;
9922 boolean_t internal;
9923 } hist_cbdata_t;
9924
9925 static void
9926 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9927 {
9928 nvlist_t **records;
9929 uint_t numrecords;
9930 int i;
9931
9932 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9933 &records, &numrecords) == 0);
9934 for (i = 0; i < numrecords; i++) {
9935 nvlist_t *rec = records[i];
9936 char tbuf[64] = "";
9937
9938 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9939 time_t tsec;
9940 struct tm t;
9941
9942 tsec = fnvlist_lookup_uint64(records[i],
9943 ZPOOL_HIST_TIME);
9944 (void) localtime_r(&tsec, &t);
9945 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9946 }
9947
9948 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9949 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9950 ZPOOL_HIST_ELAPSED_NS);
9951 (void) snprintf(tbuf + strlen(tbuf),
9952 sizeof (tbuf) - strlen(tbuf),
9953 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9954 }
9955
9956 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9957 (void) printf("%s %s", tbuf,
9958 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9959 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9960 int ievent =
9961 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9962 if (!cb->internal)
9963 continue;
9964 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9965 (void) printf("%s unrecognized record:\n",
9966 tbuf);
9967 dump_nvlist(rec, 4);
9968 continue;
9969 }
9970 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
9971 zfs_history_event_names[ievent],
9972 (longlong_t)fnvlist_lookup_uint64(
9973 rec, ZPOOL_HIST_TXG),
9974 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9975 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9976 if (!cb->internal)
9977 continue;
9978 (void) printf("%s [txg:%lld] %s", tbuf,
9979 (longlong_t)fnvlist_lookup_uint64(
9980 rec, ZPOOL_HIST_TXG),
9981 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9982 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9983 (void) printf(" %s (%llu)",
9984 fnvlist_lookup_string(rec,
9985 ZPOOL_HIST_DSNAME),
9986 (u_longlong_t)fnvlist_lookup_uint64(rec,
9987 ZPOOL_HIST_DSID));
9988 }
9989 (void) printf(" %s", fnvlist_lookup_string(rec,
9990 ZPOOL_HIST_INT_STR));
9991 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9992 if (!cb->internal)
9993 continue;
9994 (void) printf("%s ioctl %s\n", tbuf,
9995 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9996 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9997 (void) printf(" input:\n");
9998 dump_nvlist(fnvlist_lookup_nvlist(rec,
9999 ZPOOL_HIST_INPUT_NVL), 8);
10000 }
10001 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
10002 (void) printf(" output:\n");
10003 dump_nvlist(fnvlist_lookup_nvlist(rec,
10004 ZPOOL_HIST_OUTPUT_NVL), 8);
10005 }
10006 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
10007 (void) printf(" output nvlist omitted; "
10008 "original size: %lldKB\n",
10009 (longlong_t)fnvlist_lookup_int64(rec,
10010 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
10011 }
10012 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
10013 (void) printf(" errno: %lld\n",
10014 (longlong_t)fnvlist_lookup_int64(rec,
10015 ZPOOL_HIST_ERRNO));
10016 }
10017 } else {
10018 if (!cb->internal)
10019 continue;
10020 (void) printf("%s unrecognized record:\n", tbuf);
10021 dump_nvlist(rec, 4);
10022 }
10023
10024 if (!cb->longfmt) {
10025 (void) printf("\n");
10026 continue;
10027 }
10028 (void) printf(" [");
10029 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
10030 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
10031 struct passwd *pwd = getpwuid(who);
10032 (void) printf("user %d ", (int)who);
10033 if (pwd != NULL)
10034 (void) printf("(%s) ", pwd->pw_name);
10035 }
10036 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
10037 (void) printf("on %s",
10038 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
10039 }
10040 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
10041 (void) printf(":%s",
10042 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
10043 }
10044
10045 (void) printf("]");
10046 (void) printf("\n");
10047 }
10048 }
10049
10050 /*
10051 * Print out the command history for a specific pool.
10052 */
10053 static int
10054 get_history_one(zpool_handle_t *zhp, void *data)
10055 {
10056 nvlist_t *nvhis;
10057 int ret;
10058 hist_cbdata_t *cb = (hist_cbdata_t *)data;
10059 uint64_t off = 0;
10060 boolean_t eof = B_FALSE;
10061
10062 cb->first = B_FALSE;
10063
10064 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
10065
10066 while (!eof) {
10067 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
10068 return (ret);
10069
10070 print_history_records(nvhis, cb);
10071 nvlist_free(nvhis);
10072 }
10073 (void) printf("\n");
10074
10075 return (ret);
10076 }
10077
10078 /*
10079 * zpool history <pool>
10080 *
10081 * Displays the history of commands that modified pools.
10082 */
10083 int
10084 zpool_do_history(int argc, char **argv)
10085 {
10086 hist_cbdata_t cbdata = { 0 };
10087 int ret;
10088 int c;
10089
10090 cbdata.first = B_TRUE;
10091 /* check options */
10092 while ((c = getopt(argc, argv, "li")) != -1) {
10093 switch (c) {
10094 case 'l':
10095 cbdata.longfmt = B_TRUE;
10096 break;
10097 case 'i':
10098 cbdata.internal = B_TRUE;
10099 break;
10100 case '?':
10101 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10102 optopt);
10103 usage(B_FALSE);
10104 }
10105 }
10106 argc -= optind;
10107 argv += optind;
10108
10109 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
10110 B_FALSE, get_history_one, &cbdata);
10111
10112 if (argc == 0 && cbdata.first == B_TRUE) {
10113 (void) fprintf(stderr, gettext("no pools available\n"));
10114 return (0);
10115 }
10116
10117 return (ret);
10118 }
10119
10120 typedef struct ev_opts {
10121 int verbose;
10122 int scripted;
10123 int follow;
10124 int clear;
10125 char poolname[ZFS_MAX_DATASET_NAME_LEN];
10126 } ev_opts_t;
10127
10128 static void
10129 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
10130 {
10131 char ctime_str[26], str[32];
10132 const char *ptr;
10133 int64_t *tv;
10134 uint_t n;
10135
10136 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
10137 memset(str, ' ', 32);
10138 (void) ctime_r((const time_t *)&tv[0], ctime_str);
10139 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
10140 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
10141 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
10142 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
10143 if (opts->scripted)
10144 (void) printf(gettext("%s\t"), str);
10145 else
10146 (void) printf(gettext("%s "), str);
10147
10148 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
10149 (void) printf(gettext("%s\n"), ptr);
10150 }
10151
10152 static void
10153 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
10154 {
10155 nvpair_t *nvp;
10156
10157 for (nvp = nvlist_next_nvpair(nvl, NULL);
10158 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
10159
10160 data_type_t type = nvpair_type(nvp);
10161 const char *name = nvpair_name(nvp);
10162
10163 boolean_t b;
10164 uint8_t i8;
10165 uint16_t i16;
10166 uint32_t i32;
10167 uint64_t i64;
10168 const char *str;
10169 nvlist_t *cnv;
10170
10171 printf(gettext("%*s%s = "), depth, "", name);
10172
10173 switch (type) {
10174 case DATA_TYPE_BOOLEAN:
10175 printf(gettext("%s"), "1");
10176 break;
10177
10178 case DATA_TYPE_BOOLEAN_VALUE:
10179 (void) nvpair_value_boolean_value(nvp, &b);
10180 printf(gettext("%s"), b ? "1" : "0");
10181 break;
10182
10183 case DATA_TYPE_BYTE:
10184 (void) nvpair_value_byte(nvp, &i8);
10185 printf(gettext("0x%x"), i8);
10186 break;
10187
10188 case DATA_TYPE_INT8:
10189 (void) nvpair_value_int8(nvp, (void *)&i8);
10190 printf(gettext("0x%x"), i8);
10191 break;
10192
10193 case DATA_TYPE_UINT8:
10194 (void) nvpair_value_uint8(nvp, &i8);
10195 printf(gettext("0x%x"), i8);
10196 break;
10197
10198 case DATA_TYPE_INT16:
10199 (void) nvpair_value_int16(nvp, (void *)&i16);
10200 printf(gettext("0x%x"), i16);
10201 break;
10202
10203 case DATA_TYPE_UINT16:
10204 (void) nvpair_value_uint16(nvp, &i16);
10205 printf(gettext("0x%x"), i16);
10206 break;
10207
10208 case DATA_TYPE_INT32:
10209 (void) nvpair_value_int32(nvp, (void *)&i32);
10210 printf(gettext("0x%x"), i32);
10211 break;
10212
10213 case DATA_TYPE_UINT32:
10214 (void) nvpair_value_uint32(nvp, &i32);
10215 printf(gettext("0x%x"), i32);
10216 break;
10217
10218 case DATA_TYPE_INT64:
10219 (void) nvpair_value_int64(nvp, (void *)&i64);
10220 printf(gettext("0x%llx"), (u_longlong_t)i64);
10221 break;
10222
10223 case DATA_TYPE_UINT64:
10224 (void) nvpair_value_uint64(nvp, &i64);
10225 /*
10226 * translate vdev state values to readable
10227 * strings to aide zpool events consumers
10228 */
10229 if (strcmp(name,
10230 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
10231 strcmp(name,
10232 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
10233 printf(gettext("\"%s\" (0x%llx)"),
10234 zpool_state_to_name(i64, VDEV_AUX_NONE),
10235 (u_longlong_t)i64);
10236 } else {
10237 printf(gettext("0x%llx"), (u_longlong_t)i64);
10238 }
10239 break;
10240
10241 case DATA_TYPE_HRTIME:
10242 (void) nvpair_value_hrtime(nvp, (void *)&i64);
10243 printf(gettext("0x%llx"), (u_longlong_t)i64);
10244 break;
10245
10246 case DATA_TYPE_STRING:
10247 (void) nvpair_value_string(nvp, &str);
10248 printf(gettext("\"%s\""), str ? str : "<NULL>");
10249 break;
10250
10251 case DATA_TYPE_NVLIST:
10252 printf(gettext("(embedded nvlist)\n"));
10253 (void) nvpair_value_nvlist(nvp, &cnv);
10254 zpool_do_events_nvprint(cnv, depth + 8);
10255 printf(gettext("%*s(end %s)"), depth, "", name);
10256 break;
10257
10258 case DATA_TYPE_NVLIST_ARRAY: {
10259 nvlist_t **val;
10260 uint_t i, nelem;
10261
10262 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
10263 printf(gettext("(%d embedded nvlists)\n"), nelem);
10264 for (i = 0; i < nelem; i++) {
10265 printf(gettext("%*s%s[%d] = %s\n"),
10266 depth, "", name, i, "(embedded nvlist)");
10267 zpool_do_events_nvprint(val[i], depth + 8);
10268 printf(gettext("%*s(end %s[%i])\n"),
10269 depth, "", name, i);
10270 }
10271 printf(gettext("%*s(end %s)\n"), depth, "", name);
10272 }
10273 break;
10274
10275 case DATA_TYPE_INT8_ARRAY: {
10276 int8_t *val;
10277 uint_t i, nelem;
10278
10279 (void) nvpair_value_int8_array(nvp, &val, &nelem);
10280 for (i = 0; i < nelem; i++)
10281 printf(gettext("0x%x "), val[i]);
10282
10283 break;
10284 }
10285
10286 case DATA_TYPE_UINT8_ARRAY: {
10287 uint8_t *val;
10288 uint_t i, nelem;
10289
10290 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
10291 for (i = 0; i < nelem; i++)
10292 printf(gettext("0x%x "), val[i]);
10293
10294 break;
10295 }
10296
10297 case DATA_TYPE_INT16_ARRAY: {
10298 int16_t *val;
10299 uint_t i, nelem;
10300
10301 (void) nvpair_value_int16_array(nvp, &val, &nelem);
10302 for (i = 0; i < nelem; i++)
10303 printf(gettext("0x%x "), val[i]);
10304
10305 break;
10306 }
10307
10308 case DATA_TYPE_UINT16_ARRAY: {
10309 uint16_t *val;
10310 uint_t i, nelem;
10311
10312 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
10313 for (i = 0; i < nelem; i++)
10314 printf(gettext("0x%x "), val[i]);
10315
10316 break;
10317 }
10318
10319 case DATA_TYPE_INT32_ARRAY: {
10320 int32_t *val;
10321 uint_t i, nelem;
10322
10323 (void) nvpair_value_int32_array(nvp, &val, &nelem);
10324 for (i = 0; i < nelem; i++)
10325 printf(gettext("0x%x "), val[i]);
10326
10327 break;
10328 }
10329
10330 case DATA_TYPE_UINT32_ARRAY: {
10331 uint32_t *val;
10332 uint_t i, nelem;
10333
10334 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
10335 for (i = 0; i < nelem; i++)
10336 printf(gettext("0x%x "), val[i]);
10337
10338 break;
10339 }
10340
10341 case DATA_TYPE_INT64_ARRAY: {
10342 int64_t *val;
10343 uint_t i, nelem;
10344
10345 (void) nvpair_value_int64_array(nvp, &val, &nelem);
10346 for (i = 0; i < nelem; i++)
10347 printf(gettext("0x%llx "),
10348 (u_longlong_t)val[i]);
10349
10350 break;
10351 }
10352
10353 case DATA_TYPE_UINT64_ARRAY: {
10354 uint64_t *val;
10355 uint_t i, nelem;
10356
10357 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
10358 for (i = 0; i < nelem; i++)
10359 printf(gettext("0x%llx "),
10360 (u_longlong_t)val[i]);
10361
10362 break;
10363 }
10364
10365 case DATA_TYPE_STRING_ARRAY: {
10366 const char **str;
10367 uint_t i, nelem;
10368
10369 (void) nvpair_value_string_array(nvp, &str, &nelem);
10370 for (i = 0; i < nelem; i++)
10371 printf(gettext("\"%s\" "),
10372 str[i] ? str[i] : "<NULL>");
10373
10374 break;
10375 }
10376
10377 case DATA_TYPE_BOOLEAN_ARRAY:
10378 case DATA_TYPE_BYTE_ARRAY:
10379 case DATA_TYPE_DOUBLE:
10380 case DATA_TYPE_DONTCARE:
10381 case DATA_TYPE_UNKNOWN:
10382 printf(gettext("<unknown>"));
10383 break;
10384 }
10385
10386 printf(gettext("\n"));
10387 }
10388 }
10389
10390 static int
10391 zpool_do_events_next(ev_opts_t *opts)
10392 {
10393 nvlist_t *nvl;
10394 int zevent_fd, ret, dropped;
10395 const char *pool;
10396
10397 zevent_fd = open(ZFS_DEV, O_RDWR);
10398 VERIFY(zevent_fd >= 0);
10399
10400 if (!opts->scripted)
10401 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
10402
10403 while (1) {
10404 ret = zpool_events_next(g_zfs, &nvl, &dropped,
10405 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
10406 if (ret || nvl == NULL)
10407 break;
10408
10409 if (dropped > 0)
10410 (void) printf(gettext("dropped %d events\n"), dropped);
10411
10412 if (strlen(opts->poolname) > 0 &&
10413 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
10414 strcmp(opts->poolname, pool) != 0)
10415 continue;
10416
10417 zpool_do_events_short(nvl, opts);
10418
10419 if (opts->verbose) {
10420 zpool_do_events_nvprint(nvl, 8);
10421 printf(gettext("\n"));
10422 }
10423 (void) fflush(stdout);
10424
10425 nvlist_free(nvl);
10426 }
10427
10428 VERIFY(0 == close(zevent_fd));
10429
10430 return (ret);
10431 }
10432
10433 static int
10434 zpool_do_events_clear(void)
10435 {
10436 int count, ret;
10437
10438 ret = zpool_events_clear(g_zfs, &count);
10439 if (!ret)
10440 (void) printf(gettext("cleared %d events\n"), count);
10441
10442 return (ret);
10443 }
10444
10445 /*
10446 * zpool events [-vHf [pool] | -c]
10447 *
10448 * Displays events logs by ZFS.
10449 */
10450 int
10451 zpool_do_events(int argc, char **argv)
10452 {
10453 ev_opts_t opts = { 0 };
10454 int ret;
10455 int c;
10456
10457 /* check options */
10458 while ((c = getopt(argc, argv, "vHfc")) != -1) {
10459 switch (c) {
10460 case 'v':
10461 opts.verbose = 1;
10462 break;
10463 case 'H':
10464 opts.scripted = 1;
10465 break;
10466 case 'f':
10467 opts.follow = 1;
10468 break;
10469 case 'c':
10470 opts.clear = 1;
10471 break;
10472 case '?':
10473 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10474 optopt);
10475 usage(B_FALSE);
10476 }
10477 }
10478 argc -= optind;
10479 argv += optind;
10480
10481 if (argc > 1) {
10482 (void) fprintf(stderr, gettext("too many arguments\n"));
10483 usage(B_FALSE);
10484 } else if (argc == 1) {
10485 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
10486 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
10487 (void) fprintf(stderr,
10488 gettext("invalid pool name '%s'\n"), opts.poolname);
10489 usage(B_FALSE);
10490 }
10491 }
10492
10493 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
10494 opts.clear) {
10495 (void) fprintf(stderr,
10496 gettext("invalid options combined with -c\n"));
10497 usage(B_FALSE);
10498 }
10499
10500 if (opts.clear)
10501 ret = zpool_do_events_clear();
10502 else
10503 ret = zpool_do_events_next(&opts);
10504
10505 return (ret);
10506 }
10507
10508 static int
10509 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
10510 {
10511 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10512 char value[ZFS_MAXPROPLEN];
10513 zprop_source_t srctype;
10514
10515 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
10516 pl = pl->pl_next) {
10517 char *prop_name;
10518 /*
10519 * If the first property is pool name, it is a special
10520 * placeholder that we can skip. This will also skip
10521 * over the name property when 'all' is specified.
10522 */
10523 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10524 pl == cbp->cb_proplist)
10525 continue;
10526
10527 if (pl->pl_prop == ZPROP_INVAL) {
10528 prop_name = pl->pl_user_prop;
10529 } else {
10530 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
10531 }
10532 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
10533 prop_name, value, sizeof (value), &srctype,
10534 cbp->cb_literal) == 0) {
10535 zprop_print_one_property(vdevname, cbp, prop_name,
10536 value, srctype, NULL, NULL);
10537 }
10538 }
10539
10540 return (0);
10541 }
10542
10543 static int
10544 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
10545 {
10546 zpool_handle_t *zhp = zhp_data;
10547 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10548 char *vdevname;
10549 const char *type;
10550 int ret;
10551
10552 /*
10553 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
10554 * pool name for display purposes, which is not desired. Fallback to
10555 * zpool_vdev_name() when not dealing with the root vdev.
10556 */
10557 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
10558 if (zhp != NULL && strcmp(type, "root") == 0)
10559 vdevname = strdup("root-0");
10560 else
10561 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
10562 cbp->cb_vdevs.cb_name_flags);
10563
10564 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
10565
10566 ret = get_callback_vdev(zhp, vdevname, data);
10567
10568 free(vdevname);
10569
10570 return (ret);
10571 }
10572
10573 static int
10574 get_callback(zpool_handle_t *zhp, void *data)
10575 {
10576 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10577 char value[ZFS_MAXPROPLEN];
10578 zprop_source_t srctype;
10579 zprop_list_t *pl;
10580 int vid;
10581
10582 if (cbp->cb_type == ZFS_TYPE_VDEV) {
10583 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
10584 for_each_vdev(zhp, get_callback_vdev_cb, data);
10585 } else {
10586 /* Adjust column widths for vdev properties */
10587 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10588 vid++) {
10589 vdev_expand_proplist(zhp,
10590 cbp->cb_vdevs.cb_names[vid],
10591 &cbp->cb_proplist);
10592 }
10593 /* Display the properties */
10594 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10595 vid++) {
10596 get_callback_vdev(zhp,
10597 cbp->cb_vdevs.cb_names[vid], data);
10598 }
10599 }
10600 } else {
10601 assert(cbp->cb_type == ZFS_TYPE_POOL);
10602 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
10603 /*
10604 * Skip the special fake placeholder. This will also
10605 * skip over the name property when 'all' is specified.
10606 */
10607 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10608 pl == cbp->cb_proplist)
10609 continue;
10610
10611 if (pl->pl_prop == ZPROP_INVAL &&
10612 zfs_prop_user(pl->pl_user_prop)) {
10613 srctype = ZPROP_SRC_LOCAL;
10614
10615 if (zpool_get_userprop(zhp, pl->pl_user_prop,
10616 value, sizeof (value), &srctype) != 0)
10617 continue;
10618
10619 zprop_print_one_property(zpool_get_name(zhp),
10620 cbp, pl->pl_user_prop, value, srctype,
10621 NULL, NULL);
10622 } else if (pl->pl_prop == ZPROP_INVAL &&
10623 (zpool_prop_feature(pl->pl_user_prop) ||
10624 zpool_prop_unsupported(pl->pl_user_prop))) {
10625 srctype = ZPROP_SRC_LOCAL;
10626
10627 if (zpool_prop_get_feature(zhp,
10628 pl->pl_user_prop, value,
10629 sizeof (value)) == 0) {
10630 zprop_print_one_property(
10631 zpool_get_name(zhp), cbp,
10632 pl->pl_user_prop, value, srctype,
10633 NULL, NULL);
10634 }
10635 } else {
10636 if (zpool_get_prop(zhp, pl->pl_prop, value,
10637 sizeof (value), &srctype,
10638 cbp->cb_literal) != 0)
10639 continue;
10640
10641 zprop_print_one_property(zpool_get_name(zhp),
10642 cbp, zpool_prop_to_name(pl->pl_prop),
10643 value, srctype, NULL, NULL);
10644 }
10645 }
10646 }
10647
10648 return (0);
10649 }
10650
10651 /*
10652 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10653 *
10654 * -H Scripted mode. Don't display headers, and separate properties
10655 * by a single tab.
10656 * -o List of columns to display. Defaults to
10657 * "name,property,value,source".
10658 * -p Display values in parsable (exact) format.
10659 *
10660 * Get properties of pools in the system. Output space statistics
10661 * for each one as well as other attributes.
10662 */
10663 int
10664 zpool_do_get(int argc, char **argv)
10665 {
10666 zprop_get_cbdata_t cb = { 0 };
10667 zprop_list_t fake_name = { 0 };
10668 int ret;
10669 int c, i;
10670 char *propstr = NULL;
10671 char *vdev = NULL;
10672
10673 cb.cb_first = B_TRUE;
10674
10675 /*
10676 * Set up default columns and sources.
10677 */
10678 cb.cb_sources = ZPROP_SRC_ALL;
10679 cb.cb_columns[0] = GET_COL_NAME;
10680 cb.cb_columns[1] = GET_COL_PROPERTY;
10681 cb.cb_columns[2] = GET_COL_VALUE;
10682 cb.cb_columns[3] = GET_COL_SOURCE;
10683 cb.cb_type = ZFS_TYPE_POOL;
10684 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10685 current_prop_type = cb.cb_type;
10686
10687 /* check options */
10688 while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10689 switch (c) {
10690 case 'p':
10691 cb.cb_literal = B_TRUE;
10692 break;
10693 case 'H':
10694 cb.cb_scripted = B_TRUE;
10695 break;
10696 case 'o':
10697 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
10698 i = 0;
10699
10700 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10701 static const char *const col_opts[] =
10702 { "name", "property", "value", "source",
10703 "all" };
10704 static const zfs_get_column_t col_cols[] =
10705 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
10706 GET_COL_SOURCE };
10707
10708 if (i == ZFS_GET_NCOLS - 1) {
10709 (void) fprintf(stderr, gettext("too "
10710 "many fields given to -o "
10711 "option\n"));
10712 usage(B_FALSE);
10713 }
10714
10715 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
10716 if (strcmp(tok, col_opts[c]) == 0)
10717 goto found;
10718
10719 (void) fprintf(stderr,
10720 gettext("invalid column name '%s'\n"), tok);
10721 usage(B_FALSE);
10722
10723 found:
10724 if (c >= 4) {
10725 if (i > 0) {
10726 (void) fprintf(stderr,
10727 gettext("\"all\" conflicts "
10728 "with specific fields "
10729 "given to -o option\n"));
10730 usage(B_FALSE);
10731 }
10732
10733 memcpy(cb.cb_columns, col_cols,
10734 sizeof (col_cols));
10735 i = ZFS_GET_NCOLS - 1;
10736 } else
10737 cb.cb_columns[i++] = col_cols[c];
10738 }
10739 break;
10740 case '?':
10741 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10742 optopt);
10743 usage(B_FALSE);
10744 }
10745 }
10746
10747 argc -= optind;
10748 argv += optind;
10749
10750 if (argc < 1) {
10751 (void) fprintf(stderr, gettext("missing property "
10752 "argument\n"));
10753 usage(B_FALSE);
10754 }
10755
10756 /* Properties list is needed later by zprop_get_list() */
10757 propstr = argv[0];
10758
10759 argc--;
10760 argv++;
10761
10762 if (argc == 0) {
10763 /* No args, so just print the defaults. */
10764 } else if (are_all_pools(argc, argv)) {
10765 /* All the args are pool names */
10766 } else if (are_all_pools(1, argv)) {
10767 /* The first arg is a pool name */
10768 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10769 (argc == 2 && strcmp(argv[1], "root") == 0) ||
10770 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10771 &cb.cb_vdevs)) {
10772
10773 if (strcmp(argv[1], "root") == 0)
10774 vdev = strdup("root-0");
10775 else
10776 vdev = strdup(argv[1]);
10777
10778 /* ... and the rest are vdev names */
10779 cb.cb_vdevs.cb_names = &vdev;
10780 cb.cb_vdevs.cb_names_count = argc - 1;
10781 cb.cb_type = ZFS_TYPE_VDEV;
10782 argc = 1; /* One pool to process */
10783 } else {
10784 fprintf(stderr, gettext("Expected a list of vdevs in"
10785 " \"%s\", but got:\n"), argv[0]);
10786 error_list_unresolved_vdevs(argc - 1, argv + 1,
10787 argv[0], &cb.cb_vdevs);
10788 fprintf(stderr, "\n");
10789 usage(B_FALSE);
10790 return (1);
10791 }
10792 } else {
10793 /*
10794 * The first arg isn't the name of a valid pool.
10795 */
10796 fprintf(stderr, gettext("Cannot get properties of %s: "
10797 "no such pool available.\n"), argv[0]);
10798 return (1);
10799 }
10800
10801 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10802 cb.cb_type) != 0) {
10803 /* Use correct list of valid properties (pool or vdev) */
10804 current_prop_type = cb.cb_type;
10805 usage(B_FALSE);
10806 }
10807
10808 if (cb.cb_proplist != NULL) {
10809 fake_name.pl_prop = ZPOOL_PROP_NAME;
10810 fake_name.pl_width = strlen(gettext("NAME"));
10811 fake_name.pl_next = cb.cb_proplist;
10812 cb.cb_proplist = &fake_name;
10813 }
10814
10815 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10816 cb.cb_literal, get_callback, &cb);
10817
10818 if (cb.cb_proplist == &fake_name)
10819 zprop_free_list(fake_name.pl_next);
10820 else
10821 zprop_free_list(cb.cb_proplist);
10822
10823 if (vdev != NULL)
10824 free(vdev);
10825
10826 return (ret);
10827 }
10828
10829 typedef struct set_cbdata {
10830 char *cb_propname;
10831 char *cb_value;
10832 zfs_type_t cb_type;
10833 vdev_cbdata_t cb_vdevs;
10834 boolean_t cb_any_successful;
10835 } set_cbdata_t;
10836
10837 static int
10838 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10839 {
10840 int error;
10841
10842 /* Check if we have out-of-bounds features */
10843 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10844 boolean_t features[SPA_FEATURES];
10845 if (zpool_do_load_compat(cb->cb_value, features) !=
10846 ZPOOL_COMPATIBILITY_OK)
10847 return (-1);
10848
10849 nvlist_t *enabled = zpool_get_features(zhp);
10850 spa_feature_t i;
10851 for (i = 0; i < SPA_FEATURES; i++) {
10852 const char *fguid = spa_feature_table[i].fi_guid;
10853 if (nvlist_exists(enabled, fguid) && !features[i])
10854 break;
10855 }
10856 if (i < SPA_FEATURES)
10857 (void) fprintf(stderr, gettext("Warning: one or "
10858 "more features already enabled on pool '%s'\n"
10859 "are not present in this compatibility set.\n"),
10860 zpool_get_name(zhp));
10861 }
10862
10863 /* if we're setting a feature, check it's in compatibility set */
10864 if (zpool_prop_feature(cb->cb_propname) &&
10865 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10866 char *fname = strchr(cb->cb_propname, '@') + 1;
10867 spa_feature_t f;
10868
10869 if (zfeature_lookup_name(fname, &f) == 0) {
10870 char compat[ZFS_MAXPROPLEN];
10871 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10872 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10873 compat[0] = '\0';
10874
10875 boolean_t features[SPA_FEATURES];
10876 if (zpool_do_load_compat(compat, features) !=
10877 ZPOOL_COMPATIBILITY_OK) {
10878 (void) fprintf(stderr, gettext("Error: "
10879 "cannot enable feature '%s' on pool '%s'\n"
10880 "because the pool's 'compatibility' "
10881 "property cannot be parsed.\n"),
10882 fname, zpool_get_name(zhp));
10883 return (-1);
10884 }
10885
10886 if (!features[f]) {
10887 (void) fprintf(stderr, gettext("Error: "
10888 "cannot enable feature '%s' on pool '%s'\n"
10889 "as it is not specified in this pool's "
10890 "current compatibility set.\n"
10891 "Consider setting 'compatibility' to a "
10892 "less restrictive set, or to 'off'.\n"),
10893 fname, zpool_get_name(zhp));
10894 return (-1);
10895 }
10896 }
10897 }
10898
10899 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10900
10901 return (error);
10902 }
10903
10904 static int
10905 set_callback(zpool_handle_t *zhp, void *data)
10906 {
10907 int error;
10908 set_cbdata_t *cb = (set_cbdata_t *)data;
10909
10910 if (cb->cb_type == ZFS_TYPE_VDEV) {
10911 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10912 cb->cb_propname, cb->cb_value);
10913 } else {
10914 assert(cb->cb_type == ZFS_TYPE_POOL);
10915 error = set_pool_callback(zhp, cb);
10916 }
10917
10918 cb->cb_any_successful = !error;
10919 return (error);
10920 }
10921
10922 int
10923 zpool_do_set(int argc, char **argv)
10924 {
10925 set_cbdata_t cb = { 0 };
10926 int error;
10927 char *vdev = NULL;
10928
10929 current_prop_type = ZFS_TYPE_POOL;
10930 if (argc > 1 && argv[1][0] == '-') {
10931 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10932 argv[1][1]);
10933 usage(B_FALSE);
10934 }
10935
10936 if (argc < 2) {
10937 (void) fprintf(stderr, gettext("missing property=value "
10938 "argument\n"));
10939 usage(B_FALSE);
10940 }
10941
10942 if (argc < 3) {
10943 (void) fprintf(stderr, gettext("missing pool name\n"));
10944 usage(B_FALSE);
10945 }
10946
10947 if (argc > 4) {
10948 (void) fprintf(stderr, gettext("too many pool names\n"));
10949 usage(B_FALSE);
10950 }
10951
10952 cb.cb_propname = argv[1];
10953 cb.cb_type = ZFS_TYPE_POOL;
10954 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10955 cb.cb_value = strchr(cb.cb_propname, '=');
10956 if (cb.cb_value == NULL) {
10957 (void) fprintf(stderr, gettext("missing value in "
10958 "property=value argument\n"));
10959 usage(B_FALSE);
10960 }
10961
10962 *(cb.cb_value) = '\0';
10963 cb.cb_value++;
10964 argc -= 2;
10965 argv += 2;
10966
10967 /* argv[0] is pool name */
10968 if (!is_pool(argv[0])) {
10969 (void) fprintf(stderr,
10970 gettext("cannot open '%s': is not a pool\n"), argv[0]);
10971 return (EINVAL);
10972 }
10973
10974 /* argv[1], when supplied, is vdev name */
10975 if (argc == 2) {
10976
10977 if (strcmp(argv[1], "root") == 0)
10978 vdev = strdup("root-0");
10979 else
10980 vdev = strdup(argv[1]);
10981
10982 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
10983 (void) fprintf(stderr, gettext(
10984 "cannot find '%s' in '%s': device not in pool\n"),
10985 vdev, argv[0]);
10986 free(vdev);
10987 return (EINVAL);
10988 }
10989 cb.cb_vdevs.cb_names = &vdev;
10990 cb.cb_vdevs.cb_names_count = 1;
10991 cb.cb_type = ZFS_TYPE_VDEV;
10992 }
10993
10994 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10995 B_FALSE, set_callback, &cb);
10996
10997 if (vdev != NULL)
10998 free(vdev);
10999
11000 return (error);
11001 }
11002
11003 /* Add up the total number of bytes left to initialize/trim across all vdevs */
11004 static uint64_t
11005 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
11006 {
11007 uint64_t bytes_remaining;
11008 nvlist_t **child;
11009 uint_t c, children;
11010 vdev_stat_t *vs;
11011
11012 assert(activity == ZPOOL_WAIT_INITIALIZE ||
11013 activity == ZPOOL_WAIT_TRIM);
11014
11015 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
11016 (uint64_t **)&vs, &c) == 0);
11017
11018 if (activity == ZPOOL_WAIT_INITIALIZE &&
11019 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
11020 bytes_remaining = vs->vs_initialize_bytes_est -
11021 vs->vs_initialize_bytes_done;
11022 else if (activity == ZPOOL_WAIT_TRIM &&
11023 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
11024 bytes_remaining = vs->vs_trim_bytes_est -
11025 vs->vs_trim_bytes_done;
11026 else
11027 bytes_remaining = 0;
11028
11029 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11030 &child, &children) != 0)
11031 children = 0;
11032
11033 for (c = 0; c < children; c++)
11034 bytes_remaining += vdev_activity_remaining(child[c], activity);
11035
11036 return (bytes_remaining);
11037 }
11038
11039 /* Add up the total number of bytes left to rebuild across top-level vdevs */
11040 static uint64_t
11041 vdev_activity_top_remaining(nvlist_t *nv)
11042 {
11043 uint64_t bytes_remaining = 0;
11044 nvlist_t **child;
11045 uint_t children;
11046 int error;
11047
11048 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11049 &child, &children) != 0)
11050 children = 0;
11051
11052 for (uint_t c = 0; c < children; c++) {
11053 vdev_rebuild_stat_t *vrs;
11054 uint_t i;
11055
11056 error = nvlist_lookup_uint64_array(child[c],
11057 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
11058 if (error == 0) {
11059 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
11060 bytes_remaining += (vrs->vrs_bytes_est -
11061 vrs->vrs_bytes_rebuilt);
11062 }
11063 }
11064 }
11065
11066 return (bytes_remaining);
11067 }
11068
11069 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
11070 static boolean_t
11071 vdev_any_spare_replacing(nvlist_t *nv)
11072 {
11073 nvlist_t **child;
11074 uint_t c, children;
11075 const char *vdev_type;
11076
11077 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
11078
11079 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
11080 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
11081 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
11082 return (B_TRUE);
11083 }
11084
11085 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11086 &child, &children) != 0)
11087 children = 0;
11088
11089 for (c = 0; c < children; c++) {
11090 if (vdev_any_spare_replacing(child[c]))
11091 return (B_TRUE);
11092 }
11093
11094 return (B_FALSE);
11095 }
11096
11097 typedef struct wait_data {
11098 char *wd_poolname;
11099 boolean_t wd_scripted;
11100 boolean_t wd_exact;
11101 boolean_t wd_headers_once;
11102 boolean_t wd_should_exit;
11103 /* Which activities to wait for */
11104 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
11105 float wd_interval;
11106 pthread_cond_t wd_cv;
11107 pthread_mutex_t wd_mutex;
11108 } wait_data_t;
11109
11110 /*
11111 * Print to stdout a single line, containing one column for each activity that
11112 * we are waiting for specifying how many bytes of work are left for that
11113 * activity.
11114 */
11115 static void
11116 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
11117 {
11118 nvlist_t *config, *nvroot;
11119 uint_t c;
11120 int i;
11121 pool_checkpoint_stat_t *pcs = NULL;
11122 pool_scan_stat_t *pss = NULL;
11123 pool_removal_stat_t *prs = NULL;
11124 pool_raidz_expand_stat_t *pres = NULL;
11125 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
11126 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
11127 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
11128
11129 /* Calculate the width of each column */
11130 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11131 /*
11132 * Make sure we have enough space in the col for pretty-printed
11133 * numbers and for the column header, and then leave a couple
11134 * spaces between cols for readability.
11135 */
11136 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
11137 }
11138
11139 if (timestamp_fmt != NODATE)
11140 print_timestamp(timestamp_fmt);
11141
11142 /* Print header if appropriate */
11143 int term_height = terminal_height();
11144 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
11145 row % (term_height-1) == 0);
11146 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
11147 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11148 if (wd->wd_enabled[i])
11149 (void) printf("%*s", col_widths[i], headers[i]);
11150 }
11151 (void) fputc('\n', stdout);
11152 }
11153
11154 /* Bytes of work remaining in each activity */
11155 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
11156
11157 bytes_rem[ZPOOL_WAIT_FREE] =
11158 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
11159
11160 config = zpool_get_config(zhp, NULL);
11161 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
11162
11163 (void) nvlist_lookup_uint64_array(nvroot,
11164 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
11165 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
11166 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
11167
11168 (void) nvlist_lookup_uint64_array(nvroot,
11169 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
11170 if (prs != NULL && prs->prs_state == DSS_SCANNING)
11171 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
11172 prs->prs_copied;
11173
11174 (void) nvlist_lookup_uint64_array(nvroot,
11175 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
11176 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
11177 pss->pss_pass_scrub_pause == 0) {
11178 int64_t rem = pss->pss_to_examine - pss->pss_issued;
11179 if (pss->pss_func == POOL_SCAN_SCRUB)
11180 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
11181 else
11182 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
11183 } else if (check_rebuilding(nvroot, NULL)) {
11184 bytes_rem[ZPOOL_WAIT_RESILVER] =
11185 vdev_activity_top_remaining(nvroot);
11186 }
11187
11188 (void) nvlist_lookup_uint64_array(nvroot,
11189 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
11190 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
11191 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
11192 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
11193 }
11194
11195 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
11196 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
11197 bytes_rem[ZPOOL_WAIT_TRIM] =
11198 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
11199
11200 /*
11201 * A replace finishes after resilvering finishes, so the amount of work
11202 * left for a replace is the same as for resilvering.
11203 *
11204 * It isn't quite correct to say that if we have any 'spare' or
11205 * 'replacing' vdevs and a resilver is happening, then a replace is in
11206 * progress, like we do here. When a hot spare is used, the faulted vdev
11207 * is not removed after the hot spare is resilvered, so parent 'spare'
11208 * vdev is not removed either. So we could have a 'spare' vdev, but be
11209 * resilvering for a different reason. However, we use it as a heuristic
11210 * because we don't have access to the DTLs, which could tell us whether
11211 * or not we have really finished resilvering a hot spare.
11212 */
11213 if (vdev_any_spare_replacing(nvroot))
11214 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
11215
11216 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11217 char buf[64];
11218 if (!wd->wd_enabled[i])
11219 continue;
11220
11221 if (wd->wd_exact) {
11222 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
11223 bytes_rem[i]);
11224 } else {
11225 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
11226 }
11227
11228 if (wd->wd_scripted)
11229 (void) printf(i == 0 ? "%s" : "\t%s", buf);
11230 else
11231 (void) printf(" %*s", col_widths[i] - 1, buf);
11232 }
11233 (void) printf("\n");
11234 (void) fflush(stdout);
11235 }
11236
11237 static void *
11238 wait_status_thread(void *arg)
11239 {
11240 wait_data_t *wd = (wait_data_t *)arg;
11241 zpool_handle_t *zhp;
11242
11243 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
11244 return (void *)(1);
11245
11246 for (int row = 0; ; row++) {
11247 boolean_t missing;
11248 struct timespec timeout;
11249 int ret = 0;
11250 (void) clock_gettime(CLOCK_REALTIME, &timeout);
11251
11252 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
11253 zpool_props_refresh(zhp) != 0) {
11254 zpool_close(zhp);
11255 return (void *)(uintptr_t)(missing ? 0 : 1);
11256 }
11257
11258 print_wait_status_row(wd, zhp, row);
11259
11260 timeout.tv_sec += floor(wd->wd_interval);
11261 long nanos = timeout.tv_nsec +
11262 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
11263 if (nanos >= NANOSEC) {
11264 timeout.tv_sec++;
11265 timeout.tv_nsec = nanos - NANOSEC;
11266 } else {
11267 timeout.tv_nsec = nanos;
11268 }
11269 pthread_mutex_lock(&wd->wd_mutex);
11270 if (!wd->wd_should_exit)
11271 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
11272 &timeout);
11273 pthread_mutex_unlock(&wd->wd_mutex);
11274 if (ret == 0) {
11275 break; /* signaled by main thread */
11276 } else if (ret != ETIMEDOUT) {
11277 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
11278 "failed: %s\n"), strerror(ret));
11279 zpool_close(zhp);
11280 return (void *)(uintptr_t)(1);
11281 }
11282 }
11283
11284 zpool_close(zhp);
11285 return (void *)(0);
11286 }
11287
11288 int
11289 zpool_do_wait(int argc, char **argv)
11290 {
11291 boolean_t verbose = B_FALSE;
11292 int c, i;
11293 unsigned long count;
11294 pthread_t status_thr;
11295 int error = 0;
11296 zpool_handle_t *zhp;
11297
11298 wait_data_t wd;
11299 wd.wd_scripted = B_FALSE;
11300 wd.wd_exact = B_FALSE;
11301 wd.wd_headers_once = B_FALSE;
11302 wd.wd_should_exit = B_FALSE;
11303
11304 pthread_mutex_init(&wd.wd_mutex, NULL);
11305 pthread_cond_init(&wd.wd_cv, NULL);
11306
11307 /* By default, wait for all types of activity. */
11308 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
11309 wd.wd_enabled[i] = B_TRUE;
11310
11311 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
11312 switch (c) {
11313 case 'H':
11314 wd.wd_scripted = B_TRUE;
11315 break;
11316 case 'n':
11317 wd.wd_headers_once = B_TRUE;
11318 break;
11319 case 'p':
11320 wd.wd_exact = B_TRUE;
11321 break;
11322 case 'T':
11323 get_timestamp_arg(*optarg);
11324 break;
11325 case 't':
11326 /* Reset activities array */
11327 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
11328
11329 for (char *tok; (tok = strsep(&optarg, ",")); ) {
11330 static const char *const col_opts[] = {
11331 "discard", "free", "initialize", "replace",
11332 "remove", "resilver", "scrub", "trim",
11333 "raidz_expand" };
11334
11335 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
11336 if (strcmp(tok, col_opts[i]) == 0) {
11337 wd.wd_enabled[i] = B_TRUE;
11338 goto found;
11339 }
11340
11341 (void) fprintf(stderr,
11342 gettext("invalid activity '%s'\n"), tok);
11343 usage(B_FALSE);
11344 found:;
11345 }
11346 break;
11347 case '?':
11348 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11349 optopt);
11350 usage(B_FALSE);
11351 }
11352 }
11353
11354 argc -= optind;
11355 argv += optind;
11356
11357 get_interval_count(&argc, argv, &wd.wd_interval, &count);
11358 if (count != 0) {
11359 /* This subcmd only accepts an interval, not a count */
11360 (void) fprintf(stderr, gettext("too many arguments\n"));
11361 usage(B_FALSE);
11362 }
11363
11364 if (wd.wd_interval != 0)
11365 verbose = B_TRUE;
11366
11367 if (argc < 1) {
11368 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
11369 usage(B_FALSE);
11370 }
11371 if (argc > 1) {
11372 (void) fprintf(stderr, gettext("too many arguments\n"));
11373 usage(B_FALSE);
11374 }
11375
11376 wd.wd_poolname = argv[0];
11377
11378 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
11379 return (1);
11380
11381 if (verbose) {
11382 /*
11383 * We use a separate thread for printing status updates because
11384 * the main thread will call lzc_wait(), which blocks as long
11385 * as an activity is in progress, which can be a long time.
11386 */
11387 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
11388 != 0) {
11389 (void) fprintf(stderr, gettext("failed to create status"
11390 "thread: %s\n"), strerror(errno));
11391 zpool_close(zhp);
11392 return (1);
11393 }
11394 }
11395
11396 /*
11397 * Loop over all activities that we are supposed to wait for until none
11398 * of them are in progress. Note that this means we can end up waiting
11399 * for more activities to complete than just those that were in progress
11400 * when we began waiting; if an activity we are interested in begins
11401 * while we are waiting for another activity, we will wait for both to
11402 * complete before exiting.
11403 */
11404 for (;;) {
11405 boolean_t missing = B_FALSE;
11406 boolean_t any_waited = B_FALSE;
11407
11408 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11409 boolean_t waited;
11410
11411 if (!wd.wd_enabled[i])
11412 continue;
11413
11414 error = zpool_wait_status(zhp, i, &missing, &waited);
11415 if (error != 0 || missing)
11416 break;
11417
11418 any_waited = (any_waited || waited);
11419 }
11420
11421 if (error != 0 || missing || !any_waited)
11422 break;
11423 }
11424
11425 zpool_close(zhp);
11426
11427 if (verbose) {
11428 uintptr_t status;
11429 pthread_mutex_lock(&wd.wd_mutex);
11430 wd.wd_should_exit = B_TRUE;
11431 pthread_cond_signal(&wd.wd_cv);
11432 pthread_mutex_unlock(&wd.wd_mutex);
11433 (void) pthread_join(status_thr, (void *)&status);
11434 if (status != 0)
11435 error = status;
11436 }
11437
11438 pthread_mutex_destroy(&wd.wd_mutex);
11439 pthread_cond_destroy(&wd.wd_cv);
11440 return (error);
11441 }
11442
11443 static int
11444 find_command_idx(const char *command, int *idx)
11445 {
11446 for (int i = 0; i < NCOMMAND; ++i) {
11447 if (command_table[i].name == NULL)
11448 continue;
11449
11450 if (strcmp(command, command_table[i].name) == 0) {
11451 *idx = i;
11452 return (0);
11453 }
11454 }
11455 return (1);
11456 }
11457
11458 /*
11459 * Display version message
11460 */
11461 static int
11462 zpool_do_version(int argc, char **argv)
11463 {
11464 (void) argc, (void) argv;
11465 return (zfs_version_print() != 0);
11466 }
11467
11468 /* Display documentation */
11469 static int
11470 zpool_do_help(int argc, char **argv)
11471 {
11472 char page[MAXNAMELEN];
11473 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
11474 strcpy(page, "zpool");
11475 else if (strcmp(argv[2], "concepts") == 0 ||
11476 strcmp(argv[2], "props") == 0)
11477 snprintf(page, sizeof (page), "zpool%s", argv[2]);
11478 else
11479 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
11480
11481 execlp("man", "man", page, NULL);
11482
11483 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
11484 return (-1);
11485 }
11486
11487 /*
11488 * Do zpool_load_compat() and print error message on failure
11489 */
11490 static zpool_compat_status_t
11491 zpool_do_load_compat(const char *compat, boolean_t *list)
11492 {
11493 char report[1024];
11494
11495 zpool_compat_status_t ret;
11496
11497 ret = zpool_load_compat(compat, list, report, 1024);
11498 switch (ret) {
11499
11500 case ZPOOL_COMPATIBILITY_OK:
11501 break;
11502
11503 case ZPOOL_COMPATIBILITY_NOFILES:
11504 case ZPOOL_COMPATIBILITY_BADFILE:
11505 case ZPOOL_COMPATIBILITY_BADTOKEN:
11506 (void) fprintf(stderr, "Error: %s\n", report);
11507 break;
11508
11509 case ZPOOL_COMPATIBILITY_WARNTOKEN:
11510 (void) fprintf(stderr, "Warning: %s\n", report);
11511 ret = ZPOOL_COMPATIBILITY_OK;
11512 break;
11513 }
11514 return (ret);
11515 }
11516
11517 int
11518 main(int argc, char **argv)
11519 {
11520 int ret = 0;
11521 int i = 0;
11522 char *cmdname;
11523 char **newargv;
11524
11525 (void) setlocale(LC_ALL, "");
11526 (void) setlocale(LC_NUMERIC, "C");
11527 (void) textdomain(TEXT_DOMAIN);
11528 srand(time(NULL));
11529
11530 opterr = 0;
11531
11532 /*
11533 * Make sure the user has specified some command.
11534 */
11535 if (argc < 2) {
11536 (void) fprintf(stderr, gettext("missing command\n"));
11537 usage(B_FALSE);
11538 }
11539
11540 cmdname = argv[1];
11541
11542 /*
11543 * Special case '-?'
11544 */
11545 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
11546 usage(B_TRUE);
11547
11548 /*
11549 * Special case '-V|--version'
11550 */
11551 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
11552 return (zpool_do_version(argc, argv));
11553
11554 /*
11555 * Special case 'help'
11556 */
11557 if (strcmp(cmdname, "help") == 0)
11558 return (zpool_do_help(argc, argv));
11559
11560 if ((g_zfs = libzfs_init()) == NULL) {
11561 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
11562 return (1);
11563 }
11564
11565 libzfs_print_on_error(g_zfs, B_TRUE);
11566
11567 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
11568
11569 /*
11570 * Many commands modify input strings for string parsing reasons.
11571 * We create a copy to protect the original argv.
11572 */
11573 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
11574 for (i = 0; i < argc; i++)
11575 newargv[i] = strdup(argv[i]);
11576 newargv[argc] = NULL;
11577
11578 /*
11579 * Run the appropriate command.
11580 */
11581 if (find_command_idx(cmdname, &i) == 0) {
11582 current_command = &command_table[i];
11583 ret = command_table[i].func(argc - 1, newargv + 1);
11584 } else if (strchr(cmdname, '=')) {
11585 verify(find_command_idx("set", &i) == 0);
11586 current_command = &command_table[i];
11587 ret = command_table[i].func(argc, newargv);
11588 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
11589 /*
11590 * 'freeze' is a vile debugging abomination, so we treat
11591 * it as such.
11592 */
11593 zfs_cmd_t zc = {"\0"};
11594
11595 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
11596 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
11597 if (ret != 0) {
11598 (void) fprintf(stderr,
11599 gettext("failed to freeze pool: %d\n"), errno);
11600 ret = 1;
11601 }
11602
11603 log_history = 0;
11604 } else {
11605 (void) fprintf(stderr, gettext("unrecognized "
11606 "command '%s'\n"), cmdname);
11607 usage(B_FALSE);
11608 ret = 1;
11609 }
11610
11611 for (i = 0; i < argc; i++)
11612 free(newargv[i]);
11613 free(newargv);
11614
11615 if (ret == 0 && log_history)
11616 (void) zpool_log_history(g_zfs, history_str);
11617
11618 libzfs_fini(g_zfs);
11619
11620 /*
11621 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
11622 * for the purposes of running ::findleaks.
11623 */
11624 if (getenv("ZFS_ABORT") != NULL) {
11625 (void) printf("dumping core by request\n");
11626 abort();
11627 }
11628
11629 return (ret);
11630 }